diff options
author | Christoph Hellwig <hch@lst.de> | 2023-04-13 08:06:47 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2023-04-13 14:57:18 +0200 |
commit | 89ea5ceb53d14f52ecbad8393be47f382c47c37d (patch) | |
tree | 1c95c5ce7fc34eff89bd471d89ccfb2c081fc41d /block | |
parent | blk-mq: pass a flags argument to blk_mq_add_to_requeue_list (diff) | |
download | linux-89ea5ceb53d14f52ecbad8393be47f382c47c37d.tar.xz linux-89ea5ceb53d14f52ecbad8393be47f382c47c37d.zip |
blk-mq: cleanup __blk_mq_sched_dispatch_requests
__blk_mq_sched_dispatch_requests currently has duplicated logic
for the cases where requests are on the hctx dispatch list or not.
Merge the two with a new need_dispatch variable and remove a few
pointless local variables.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20230413060651.694656-2-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq-sched.c | 31 |
1 files changed, 14 insertions, 17 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index f90fc42a88ca..67c95f31b15b 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -269,9 +269,7 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) { - struct request_queue *q = hctx->queue; - const bool has_sched = q->elevator; - int ret = 0; + bool need_dispatch = false; LIST_HEAD(rq_list); /* @@ -300,23 +298,22 @@ static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) */ if (!list_empty(&rq_list)) { blk_mq_sched_mark_restart_hctx(hctx); - if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) { - if (has_sched) - ret = blk_mq_do_dispatch_sched(hctx); - else - ret = blk_mq_do_dispatch_ctx(hctx); - } - } else if (has_sched) { - ret = blk_mq_do_dispatch_sched(hctx); - } else if (hctx->dispatch_busy) { - /* dequeue request one by one from sw queue if queue is busy */ - ret = blk_mq_do_dispatch_ctx(hctx); + if (!blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) + return 0; + need_dispatch = true; } else { - blk_mq_flush_busy_ctxs(hctx, &rq_list); - blk_mq_dispatch_rq_list(hctx, &rq_list, 0); + need_dispatch = hctx->dispatch_busy; } - return ret; + if (hctx->queue->elevator) + return blk_mq_do_dispatch_sched(hctx); + + /* dequeue request one by one from sw queue if queue is busy */ + if (need_dispatch) + return blk_mq_do_dispatch_ctx(hctx); + blk_mq_flush_busy_ctxs(hctx, &rq_list); + blk_mq_dispatch_rq_list(hctx, &rq_list, 0); + return 0; } void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) |