diff options
author | Jens Axboe <axboe@kernel.dk> | 2018-10-16 22:23:06 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-11-07 21:44:59 +0100 |
commit | a8908939af569ce2419f43fd56eeaf003bc3d85d (patch) | |
tree | 9940fd37b26a26ae1d63f94365fe97c862fa65d9 /block | |
parent | Merge branch 'irq/for-block' of git://git.kernel.org/pub/scm/linux/kernel/git... (diff) | |
download | linux-a8908939af569ce2419f43fd56eeaf003bc3d85d.tar.xz linux-a8908939af569ce2419f43fd56eeaf003bc3d85d.zip |
blk-mq: kill q->mq_map
It's just a pointer to set->mq_map, use that instead. Move the
assignment a bit earlier, so we always know it's valid.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 13 | ||||
-rw-r--r-- | block/blk-mq.h | 4 |
2 files changed, 7 insertions, 10 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 67a2bafd4b29..766facfa1f08 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2322,7 +2322,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) * If the cpu isn't present, the cpu is mapped to first hctx. */ for_each_possible_cpu(i) { - hctx_idx = q->mq_map[i]; + hctx_idx = set->mq_map[i]; /* unmapped hw queue can be remapped after CPU topo changed */ if (!set->tags[hctx_idx] && !__blk_mq_alloc_rq_map(set, hctx_idx)) { @@ -2332,7 +2332,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) * case, remap the current ctx to hctx[0] which * is guaranteed to always have tags allocated */ - q->mq_map[i] = 0; + set->mq_map[i] = 0; } ctx = per_cpu_ptr(q->queue_ctx, i); @@ -2430,8 +2430,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, struct request_queue *q) { - q->tag_set = set; - mutex_lock(&set->tag_list_lock); /* @@ -2468,8 +2466,6 @@ void blk_mq_release(struct request_queue *q) kobject_put(&hctx->kobj); } - q->mq_map = NULL; - kfree(q->queue_hw_ctx); /* @@ -2589,7 +2585,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, int node; struct blk_mq_hw_ctx *hctx; - node = blk_mq_hw_queue_to_node(q->mq_map, i); + node = blk_mq_hw_queue_to_node(set->mq_map, i); /* * If the hw queue has been mapped to another numa node, * we need to realloc the hctx. If allocation fails, fallback @@ -2666,8 +2662,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, if (!q->queue_hw_ctx) goto err_percpu; - q->mq_map = set->mq_map; - blk_mq_realloc_hw_ctxs(set, q); if (!q->nr_hw_queues) goto err_hctxs; @@ -2676,6 +2670,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); q->nr_queues = nr_cpu_ids; + q->tag_set = set; q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; diff --git a/block/blk-mq.h b/block/blk-mq.h index 9497b47e2526..9536be06d022 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -75,7 +75,9 @@ extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, int cpu) { - return q->queue_hw_ctx[q->mq_map[cpu]]; + struct blk_mq_tag_set *set = q->tag_set; + + return q->queue_hw_ctx[set->mq_map[cpu]]; } /* |