diff options
author | Ming Lei <ming.lei@redhat.com> | 2017-11-02 16:24:36 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2017-11-04 19:38:50 +0100 |
commit | a6a252e6491443c1c18eab7e254daee63d4a7a04 (patch) | |
tree | e87705015ce4b4722e271acf2f99db7dfdfe7d2d /block/blk-mq-sched.c | |
parent | blk-flush: use blk_mq_request_bypass_insert() (diff) | |
download | linux-a6a252e6491443c1c18eab7e254daee63d4a7a04.tar.xz linux-a6a252e6491443c1c18eab7e254daee63d4a7a04.zip |
blk-mq-sched: decide how to handle flush rq via RQF_FLUSH_SEQ
In case of IO scheduler we always pre-allocate one driver tag before
calling blk_insert_flush(), and flush request will be marked as
RQF_FLUSH_SEQ once it is in flush machinery.
So if RQF_FLUSH_SEQ isn't set, we call blk_insert_flush() to handle
the request, otherwise the flush request is dispatched to ->dispatch
list directly.
This is a preparation patch for not preallocating a driver tag for flush
requests, and for not treating flush requests as a special case. This is
similar to what the legacy path does.
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to '')
-rw-r--r-- | block/blk-mq-sched.c | 29 |
1 files changed, 16 insertions, 13 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 13a27d4d1671..e7094f44afaf 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -345,21 +345,23 @@ void blk_mq_sched_request_inserted(struct request *rq) EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, + bool has_sched, struct request *rq) { - if (rq->tag == -1) { + /* dispatch flush rq directly */ + if (rq->rq_flags & RQF_FLUSH_SEQ) { + spin_lock(&hctx->lock); + list_add(&rq->queuelist, &hctx->dispatch); + spin_unlock(&hctx->lock); + return true; + } + + if (has_sched) { rq->rq_flags |= RQF_SORTED; - return false; + WARN_ON(rq->tag != -1); } - /* - * If we already have a real request tag, send directly to - * the dispatch list. - */ - spin_lock(&hctx->lock); - list_add(&rq->queuelist, &hctx->dispatch); - spin_unlock(&hctx->lock); - return true; + return false; } /* @@ -385,12 +387,13 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head, struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); - if (rq->tag == -1 && op_is_flush(rq->cmd_flags)) { + /* flush rq in flush machinery need to be dispatched directly */ + if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) { blk_mq_sched_insert_flush(hctx, rq, can_block); return; } - if (e && blk_mq_sched_bypass_insert(hctx, rq)) + if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) goto run; if (e && e->type->ops.mq.insert_requests) { @@ -428,7 +431,7 @@ void blk_mq_sched_insert_requests(struct request_queue *q, list_for_each_entry_safe(rq, next, list, queuelist) { if (WARN_ON_ONCE(rq->tag != -1)) { list_del_init(&rq->queuelist); - blk_mq_sched_bypass_insert(hctx, rq); + blk_mq_sched_bypass_insert(hctx, true, rq); } } } |