diff options
author | Ming Lei <ming.lei@redhat.com> | 2021-12-03 14:15:34 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-12-03 22:51:29 +0100 |
commit | 4cafe86c9267f9dd5819df946ba8c038ba958370 (patch) | |
tree | d5c2871f88972a6f2a893a4add72818d9ea9110c /block | |
parent | blk-mq: pass request queue to blk_mq_run_dispatch_ops (diff) | |
download | linux-4cafe86c9267f9dd5819df946ba8c038ba958370.tar.xz linux-4cafe86c9267f9dd5819df946ba8c038ba958370.zip |
blk-mq: run dispatch lock once in case of issuing from list
It isn't necessary to call blk_mq_run_dispatch_ops() once for issuing
single request directly, and enough to do it one time when issuing from
whole list.
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20211203131534.3668411-5-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq-sched.c | 3 | ||||
-rw-r--r-- | block/blk-mq.c | 14 |
2 files changed, 8 insertions, 9 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 0d7257848f7e..55488ba97823 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -475,7 +475,8 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, * us one extra enqueue & dequeue to sw queue. */ if (!hctx->dispatch_busy && !run_queue_async) { - blk_mq_try_issue_list_directly(hctx, list); + blk_mq_run_dispatch_ops(hctx->queue, + blk_mq_try_issue_list_directly(hctx, list)); if (list_empty(list)) goto out; } diff --git a/block/blk-mq.c b/block/blk-mq.c index 24c65bb8719b..22ec21aa0c22 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2464,12 +2464,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) { - blk_status_t ret; - struct blk_mq_hw_ctx *hctx = rq->mq_hctx; - - blk_mq_run_dispatch_ops(rq->q, - ret = __blk_mq_try_issue_directly(hctx, rq, true, last)); - return ret; + return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last); } static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule) @@ -2526,7 +2521,8 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) plug->rq_count = 0; if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) { - blk_mq_plug_issue_direct(plug, false); + blk_mq_run_dispatch_ops(plug->mq_list->q, + blk_mq_plug_issue_direct(plug, false)); if (rq_list_empty(plug->mq_list)) return; } @@ -2867,7 +2863,9 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * * bypass a potential scheduler on the bottom device for * insert. */ - return blk_mq_request_issue_directly(rq, true); + blk_mq_run_dispatch_ops(rq->q, + ret = blk_mq_request_issue_directly(rq, true)); + return ret; } EXPORT_SYMBOL_GPL(blk_insert_cloned_request); |