summaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-05-16 20:28:01 +0200
committerJens Axboe <axboe@kernel.dk>2020-05-19 17:34:29 +0200
commitac7c5675fa45a372fab27d78a72d2e10e4734959 (patch)
tree7add7a513b2d64e636acf3d3912f8cf292253ad7 /block/blk-mq.c
parentblk-mq: remove a pointless queue enter pair in blk_mq_alloc_request_hctx (diff)
downloadlinux-ac7c5675fa45a372fab27d78a72d2e10e4734959.tar.xz
linux-ac7c5675fa45a372fab27d78a72d2e10e4734959.zip
blk-mq: allow blk_mq_make_request to consume the q_usage_counter reference
blk_mq_make_request currently needs to grab an q_usage_counter reference when allocating a request. This is because the block layer grabs one before calling blk_mq_make_request, but also releases it as soon as blk_mq_make_request returns. Remove the blk_queue_exit call after blk_mq_make_request returns, and instead let it consume the reference. This works perfectly fine for the block layer caller, just device mapper needs an extra reference as the old problem still persists there. Open code blk_queue_enter_live in device mapper, as there should be no other callers and this allows better documenting why we do a non-try get. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b1c12de8926e..cac11945f602 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2028,26 +2028,24 @@ blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
__blk_queue_split(q, &bio, &nr_segs);
if (!bio_integrity_prep(bio))
- return BLK_QC_T_NONE;
+ goto queue_exit;
if (!is_flush_fua && !blk_queue_nomerges(q) &&
blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
- return BLK_QC_T_NONE;
+ goto queue_exit;
if (blk_mq_sched_bio_merge(q, bio, nr_segs))
- return BLK_QC_T_NONE;
+ goto queue_exit;
rq_qos_throttle(q, bio);
data.cmd_flags = bio->bi_opf;
- blk_queue_enter_live(q);
rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) {
- blk_queue_exit(q);
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
- return BLK_QC_T_NONE;
+ goto queue_exit;
}
trace_block_getrq(q, bio, bio->bi_opf);
@@ -2134,6 +2132,9 @@ blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
}
return cookie;
+queue_exit:
+ blk_queue_exit(q);
+ return BLK_QC_T_NONE;
}
EXPORT_SYMBOL_GPL(blk_mq_make_request); /* only for request based dm */