diff options
author | Jianchao Wang <jianchao.w.wang@oracle.com> | 2018-10-12 12:07:28 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-10-13 23:42:04 +0200 |
commit | e01ad46d53b59720c6ae69963ee1756506954c85 (patch) | |
tree | d84fbc92eacf1b2341032ae25dd93a3dc3dd1a3d /block | |
parent | blk-mq: realloc hctx when hw queue is mapped to another node (diff) | |
download | linux-e01ad46d53b59720c6ae69963ee1756506954c85.tar.xz linux-e01ad46d53b59720c6ae69963ee1756506954c85.zip |
blk-mq: fallback to previous nr_hw_queues when updating fails
When we try to increate the nr_hw_queues, we may fail due to
shortage of memory or other reason, then blk_mq_realloc_hw_ctxs stops
and some entries in q->queue_hw_ctx are left with NULL. However,
because queue map has been updated with new nr_hw_queues, some cpus
have been mapped to hw queue which just encounters allocation failure,
thus blk_mq_map_queue could return NULL. This will cause panic in
following blk_mq_map_swqueue.
To fix it, when increase nr_hw_queues fails, fallback to previous
nr_hw_queues and post warning. At the same time, driver's .map_queues
usually use completion irq affinity to map hw and cpu, fallback
nr_hw_queues will cause lack of some cpu's map to hw, so use default
blk_mq_map_queues to do that.
Reported-by: syzbot+83e8cbe702263932d9d4@syzkaller.appspotmail.com
Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 27 |
1 files changed, 24 insertions, 3 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 941f51380077..c2ecd64a2403 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2557,7 +2557,7 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, struct request_queue *q) { - int i, j; + int i, j, end; struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; /* protect against switching io scheduler */ @@ -2591,8 +2591,20 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, break; } } + /* + * Increasing nr_hw_queues fails. Free the newly allocated + * hctxs and keep the previous q->nr_hw_queues. + */ + if (i != set->nr_hw_queues) { + j = q->nr_hw_queues; + end = i; + } else { + j = i; + end = q->nr_hw_queues; + q->nr_hw_queues = set->nr_hw_queues; + } - for (j = i; j < q->nr_hw_queues; j++) { + for (; j < end; j++) { struct blk_mq_hw_ctx *hctx = hctxs[j]; if (hctx) { @@ -2604,7 +2616,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, } } - q->nr_hw_queues = i; mutex_unlock(&q->sysfs_lock); } @@ -2989,6 +3000,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, { struct request_queue *q; LIST_HEAD(head); + int prev_nr_hw_queues; lockdep_assert_held(&set->tag_list_lock); @@ -3017,10 +3029,19 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, blk_mq_sysfs_unregister(q); } + prev_nr_hw_queues = set->nr_hw_queues; set->nr_hw_queues = nr_hw_queues; blk_mq_update_queue_map(set); +fallback: list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_realloc_hw_ctxs(set, q); + if (q->nr_hw_queues != set->nr_hw_queues) { + pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", + nr_hw_queues, prev_nr_hw_queues); + set->nr_hw_queues = prev_nr_hw_queues; + blk_mq_map_queues(set); + goto fallback; + } blk_mq_map_swqueue(q); } |