summaryrefslogtreecommitdiffstats
path: root/block/mq-deadline.c
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2021-09-28 00:03:25 +0200
committerJens Axboe <axboe@kernel.dk>2021-10-18 14:17:02 +0200
commite2c7275dc0fe3cba6a3de5a5f4c98f923e3d9d14 (patch)
treea1e775a7dbdfe1b11947fbb8cf8029c8d1495a56 /block/mq-deadline.c
parentblock: move struct request to blk-mq.h (diff)
downloadlinux-e2c7275dc0fe3cba6a3de5a5f4c98f923e3d9d14.tar.xz
linux-e2c7275dc0fe3cba6a3de5a5f4c98f923e3d9d14.zip
block/mq-deadline: Improve request accounting further
The scheduler .insert_requests() callback is called when a request is queued for the first time and also when it is requeued. Only count a request the first time it is queued. Additionally, since the mq-deadline scheduler only performs zone locking for requests that have been inserted, skip the zone unlock code for requests that have not been inserted into the mq-deadline scheduler. Fixes: 38ba64d12d4c ("block/mq-deadline: Track I/O statistics") Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com> Reviewed-by: Niklas Cassel <Niklas.Cassel@wdc.com> Cc: Hannes Reinecke <hare@suse.de> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Link: https://lore.kernel.org/r/20210927220328.1410161-2-bvanassche@acm.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/mq-deadline.c')
-rw-r--r--block/mq-deadline.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 47f042fa6a68..c27b4347ca91 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -677,8 +677,10 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
blk_req_zone_write_unlock(rq);
prio = ioprio_class_to_prio[ioprio_class];
- dd_count(dd, inserted, prio);
- rq->elv.priv[0] = (void *)(uintptr_t)1;
+ if (!rq->elv.priv[0]) {
+ dd_count(dd, inserted, prio);
+ rq->elv.priv[0] = (void *)(uintptr_t)1;
+ }
if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
blk_mq_free_requests(&free);
@@ -759,12 +761,13 @@ static void dd_finish_request(struct request *rq)
/*
* The block layer core may call dd_finish_request() without having
- * called dd_insert_requests(). Hence only update statistics for
- * requests for which dd_insert_requests() has been called. See also
- * blk_mq_request_bypass_insert().
+ * called dd_insert_requests(). Skip requests that bypassed I/O
+ * scheduling. See also blk_mq_request_bypass_insert().
*/
- if (rq->elv.priv[0])
- dd_count(dd, completed, prio);
+ if (!rq->elv.priv[0])
+ return;
+
+ dd_count(dd, completed, prio);
if (blk_queue_is_zoned(q)) {
unsigned long flags;