summaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2021-10-12 12:40:44 +0200
committerJens Axboe <axboe@kernel.dk>2021-10-18 14:17:03 +0200
commitb90cfaed3789ecdc5580027fc91e3056bc6b3216 (patch)
treedae4568a3dcb4d34a91be44d66cb645c6bed8a7b /block/blk-mq.c
parentblock: pre-allocate requests if plug is started and is a batch (diff)
downloadlinux-b90cfaed3789ecdc5580027fc91e3056bc6b3216.tar.xz
linux-b90cfaed3789ecdc5580027fc91e3056bc6b3216.zip
blk-mq: cleanup and rename __blk_mq_alloc_request
The newly added loop for the cached requests in __blk_mq_alloc_request is a little too convoluted for my taste, so unwind it a bit. Also rename the function to __blk_mq_alloc_requests now that it can allocate more than a single request. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20211012104045.658051-2-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c56
1 files changed, 28 insertions, 28 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index d9f14d3c2b8c..98a5d0850b95 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -354,7 +354,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
return rq;
}
-static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
+static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
{
struct request_queue *q = data->q;
struct elevator_queue *e = q->elevator;
@@ -395,36 +395,36 @@ retry:
*/
do {
tag = blk_mq_get_tag(data);
- if (tag != BLK_MQ_NO_TAG) {
- rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
- if (!--data->nr_tags)
- return rq;
- if (e || data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
- return rq;
- rq->rq_next = *data->cached_rq;
- *data->cached_rq = rq;
- data->flags |= BLK_MQ_REQ_NOWAIT;
- continue;
+ if (tag == BLK_MQ_NO_TAG) {
+ if (data->flags & BLK_MQ_REQ_NOWAIT)
+ break;
+ /*
+ * Give up the CPU and sleep for a random short time to
+ * ensure that thread using a realtime scheduling class
+ * are migrated off the CPU, and thus off the hctx that
+ * is going away.
+ */
+ msleep(3);
+ goto retry;
}
- if (data->flags & BLK_MQ_REQ_NOWAIT)
- break;
- /*
- * Give up the CPU and sleep for a random short time to ensure
- * that thread using a realtime scheduling class are migrated
- * off the CPU, and thus off the hctx that is going away.
- */
- msleep(3);
- goto retry;
+ rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
+ if (!--data->nr_tags || e ||
+ (data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
+ return rq;
+
+ /* link into the cached list */
+ rq->rq_next = *data->cached_rq;
+ *data->cached_rq = rq;
+ data->flags |= BLK_MQ_REQ_NOWAIT;
} while (1);
- if (data->cached_rq) {
- rq = *data->cached_rq;
- *data->cached_rq = rq->rq_next;
- return rq;
- }
+ if (!data->cached_rq)
+ return NULL;
- return NULL;
+ rq = *data->cached_rq;
+ *data->cached_rq = rq->rq_next;
+ return rq;
}
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
@@ -443,7 +443,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
if (ret)
return ERR_PTR(ret);
- rq = __blk_mq_alloc_request(&data);
+ rq = __blk_mq_alloc_requests(&data);
if (!rq)
goto out_queue_exit;
rq->__data_len = 0;
@@ -2258,7 +2258,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
plug->nr_ios = 1;
data.cached_rq = &plug->cached_rq;
}
- rq = __blk_mq_alloc_request(&data);
+ rq = __blk_mq_alloc_requests(&data);
if (unlikely(!rq)) {
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)