summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2023-01-17 12:42:15 +0100
committerJens Axboe <axboe@kernel.dk>2023-01-17 17:56:52 +0100
commit7746564793978fe2f43b18a302b22dca0ad3a0e8 (patch)
tree95ce46a90bfbd263b9938dca5aa7b63b094d1641 /block
parentblock/rnbd-clt: fix wrong max ID in ida_alloc_max (diff)
downloadlinux-7746564793978fe2f43b18a302b22dca0ad3a0e8.tar.xz
linux-7746564793978fe2f43b18a302b22dca0ad3a0e8.zip
block: fix hctx checks for batch allocation
When there are no read queues read requests will be assigned a default queue on allocation. However, blk_mq_get_cached_request() is not prepared for that and will fail all attempts to grab read requests from the cache. Worst case it doubles the number of requests allocated, roughly half of which will be returned by blk_mq_free_plug_rqs(). It only affects batched allocations and so is io_uring specific. For reference, QD8 t/io_uring benchmark improves by 20-35%. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/80d4511011d7d4751b4cf6375c4e38f237d935e3.1673955390.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 2c49b4151da1..9d463f7563bc 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2890,6 +2890,7 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
{
struct request *rq;
+ enum hctx_type type, hctx_type;
if (!plug)
return NULL;
@@ -2902,7 +2903,10 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
return NULL;
}
- if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
+ type = blk_mq_get_hctx_type((*bio)->bi_opf);
+ hctx_type = rq->mq_hctx->type;
+ if (type != hctx_type &&
+ !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
return NULL;
if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
return NULL;