diff options
author | Jens Axboe <axboe@kernel.dk> | 2018-05-30 09:26:07 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-05-30 18:43:58 +0200 |
commit | 9c5587346490ad4355e8de6ae402b76e55c411d5 (patch) | |
tree | c3d4ef3bb13f175030152a1721ad1d33c0aa7389 /block | |
parent | block: remove parent device reference from struct bsg_class_device (diff) | |
download | linux-9c5587346490ad4355e8de6ae402b76e55c411d5.tar.xz linux-9c5587346490ad4355e8de6ae402b76e55c411d5.zip |
blk-mq: abstract out blk-mq-sched rq list iteration bio merge helper
No functional changes in this patch, just a prep patch for utilizing
this in an IO scheduler.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Reviewed-by: Omar Sandoval <osandov@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq-sched.c | 34 |
1 files changed, 24 insertions, 10 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 25c14c58385c..b0f2c2a40a0c 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -268,19 +268,16 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); /* - * Reverse check our software queue for entries that we could potentially - * merge with. Currently includes a hand-wavy stop count of 8, to not spend - * too much time checking for merges. + * Iterate list of requests and see if we can merge this bio with any + * of them. */ -static bool blk_mq_attempt_merge(struct request_queue *q, - struct blk_mq_ctx *ctx, struct bio *bio) +bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, + struct bio *bio) { struct request *rq; int checked = 8; - lockdep_assert_held(&ctx->lock); - - list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) { + list_for_each_entry_reverse(rq, list, queuelist) { bool merged = false; if (!checked--) @@ -305,13 +302,30 @@ static bool blk_mq_attempt_merge(struct request_queue *q, continue; } - if (merged) - ctx->rq_merged++; return merged; } return false; } +EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge); + +/* + * Reverse check our software queue for entries that we could potentially + * merge with. Currently includes a hand-wavy stop count of 8, to not spend + * too much time checking for merges. + */ +static bool blk_mq_attempt_merge(struct request_queue *q, + struct blk_mq_ctx *ctx, struct bio *bio) +{ + lockdep_assert_held(&ctx->lock); + + if (blk_mq_bio_list_merge(q, &ctx->rq_list, bio)) { + ctx->rq_merged++; + return true; + } + + return false; +} bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) { |