summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2019-09-27 09:24:30 +0200
committerJens Axboe <axboe@kernel.dk>2019-09-27 19:38:28 +0200
commita12de1d42d74ef3c80e9fb9a2da94daaef747869 (patch)
tree730e2f9be02fa7087af02e7b9d825dc7a3b2b3b0 /block
parentblock: fix null pointer dereference in blk_mq_rq_timed_out() (diff)
downloadlinux-a12de1d42d74ef3c80e9fb9a2da94daaef747869.tar.xz
linux-a12de1d42d74ef3c80e9fb9a2da94daaef747869.zip
blk-mq: honor IO scheduler for multiqueue devices
If a device is using multiple queues, the IO scheduler may be bypassed. This may hurt performance for some slow MQ devices, and it also breaks zoned devices which depend on mq-deadline for respecting the write order in one zone. Don't bypass io scheduler if we have one setup. This patch can double sequential write performance basically on MQ scsi_debug when mq-deadline is applied. Cc: Bart Van Assche <bvanassche@acm.org> Cc: Hannes Reinecke <hare@suse.com> Cc: Dave Chinner <dchinner@redhat.com> Reviewed-by: Javier González <javier@javigon.com> Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6e3b15f70cd7..5d4bef3c378d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2012,6 +2012,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
}
blk_add_rq_to_plug(plug, rq);
+ } else if (q->elevator) {
+ blk_mq_sched_insert_request(rq, false, true, true);
} else if (plug && !blk_queue_nomerges(q)) {
/*
* We do limited plugging. If the bio can be merged, do that.
@@ -2035,8 +2037,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
&cookie);
}
- } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
- !data.hctx->dispatch_busy)) {
+ } else if ((q->nr_hw_queues > 1 && is_sync) ||
+ !data.hctx->dispatch_busy) {
blk_mq_try_issue_directly(data.hctx, rq, &cookie);
} else {
blk_mq_sched_insert_request(rq, false, true, true);