diff options
author | Ming Lei <ming.lei@redhat.com> | 2021-12-03 14:15:31 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-12-03 22:51:29 +0100 |
commit | 2a904d00855f94cb85751e45fa494f225d44ae0d (patch) | |
tree | 0bcc141b19e61b4d536115e274c68da183ae9ff2 /block | |
parent | block: switch to atomic_t for request references (diff) | |
download | linux-2a904d00855f94cb85751e45fa494f225d44ae0d.tar.xz linux-2a904d00855f94cb85751e45fa494f225d44ae0d.zip |
blk-mq: remove hctx_lock and hctx_unlock
Remove hctx_lock and hctx_unlock, and add one helper of
blk_mq_run_dispatch_ops() to run code block defined in dispatch_ops
with rcu/srcu read held.
Compared with hctx_lock()/hctx_unlock():
1) remove 2 branch to 1, so we just need to check
(hctx->flags & BLK_MQ_F_BLOCKING) once when running one dispatch_ops
2) srcu_idx needn't to be touched in case of non-blocking
3) might_sleep_if() can be moved to the blocking branch
Also put the added blk_mq_run_dispatch_ops() in private header, so that
the following patch can use it out of blk-mq.c.
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20211203131534.3668411-2-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 57 | ||||
-rw-r--r-- | block/blk-mq.h | 16 |
2 files changed, 26 insertions, 47 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 8c7cab75229e..494da31dc1a5 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1071,26 +1071,6 @@ void blk_mq_complete_request(struct request *rq) } EXPORT_SYMBOL(blk_mq_complete_request); -static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) - __releases(hctx->srcu) -{ - if (!(hctx->flags & BLK_MQ_F_BLOCKING)) - rcu_read_unlock(); - else - srcu_read_unlock(hctx->srcu, srcu_idx); -} - -static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) - __acquires(hctx->srcu) -{ - if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { - /* shut up gcc false positive */ - *srcu_idx = 0; - rcu_read_lock(); - } else - *srcu_idx = srcu_read_lock(hctx->srcu); -} - /** * blk_mq_start_request - Start processing a request * @rq: Pointer to request to be started @@ -1947,19 +1927,13 @@ out: */ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) { - int srcu_idx; - /* * We can't run the queue inline with ints disabled. Ensure that * we catch bad users of this early. */ WARN_ON_ONCE(in_interrupt()); - might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); - - hctx_lock(hctx, &srcu_idx); - blk_mq_sched_dispatch_requests(hctx); - hctx_unlock(hctx, srcu_idx); + blk_mq_run_dispatch_ops(hctx, blk_mq_sched_dispatch_requests(hctx)); } static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) @@ -2071,7 +2045,6 @@ EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); */ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) { - int srcu_idx; bool need_run; /* @@ -2082,10 +2055,9 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) * And queue will be rerun in blk_mq_unquiesce_queue() if it is * quiesced. */ - hctx_lock(hctx, &srcu_idx); - need_run = !blk_queue_quiesced(hctx->queue) && - blk_mq_hctx_has_pending(hctx); - hctx_unlock(hctx, srcu_idx); + blk_mq_run_dispatch_ops(hctx, + need_run = !blk_queue_quiesced(hctx->queue) && + blk_mq_hctx_has_pending(hctx)); if (need_run) __blk_mq_delay_run_hw_queue(hctx, async, 0); @@ -2488,32 +2460,22 @@ insert: static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq) { - blk_status_t ret; - int srcu_idx; - - might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); + blk_status_t ret = + __blk_mq_try_issue_directly(hctx, rq, false, true); - hctx_lock(hctx, &srcu_idx); - - ret = __blk_mq_try_issue_directly(hctx, rq, false, true); if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) blk_mq_request_bypass_insert(rq, false, true); else if (ret != BLK_STS_OK) blk_mq_end_request(rq, ret); - - hctx_unlock(hctx, srcu_idx); } static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) { blk_status_t ret; - int srcu_idx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx; - hctx_lock(hctx, &srcu_idx); - ret = __blk_mq_try_issue_directly(hctx, rq, true, last); - hctx_unlock(hctx, srcu_idx); - + blk_mq_run_dispatch_ops(hctx, + ret = __blk_mq_try_issue_directly(hctx, rq, true, last)); return ret; } @@ -2826,7 +2788,8 @@ void blk_mq_submit_bio(struct bio *bio) (q->nr_hw_queues == 1 || !is_sync))) blk_mq_sched_insert_request(rq, false, true, true); else - blk_mq_try_issue_directly(rq->mq_hctx, rq); + blk_mq_run_dispatch_ops(rq->mq_hctx, + blk_mq_try_issue_directly(rq->mq_hctx, rq)); } /** diff --git a/block/blk-mq.h b/block/blk-mq.h index d516c7a46f57..e4c396204928 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -374,5 +374,21 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, return __blk_mq_active_requests(hctx) < depth; } +/* run the code block in @dispatch_ops with rcu/srcu read lock held */ +#define blk_mq_run_dispatch_ops(hctx, dispatch_ops) \ +do { \ + if (!((hctx)->flags & BLK_MQ_F_BLOCKING)) { \ + rcu_read_lock(); \ + (dispatch_ops); \ + rcu_read_unlock(); \ + } else { \ + int srcu_idx; \ + \ + might_sleep(); \ + srcu_idx = srcu_read_lock((hctx)->srcu); \ + (dispatch_ops); \ + srcu_read_unlock((hctx)->srcu, srcu_idx); \ + } \ +} while (0) #endif |