diff options
author | Christoph Hellwig <hch@lst.de> | 2017-06-16 18:15:26 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2017-06-18 18:08:55 +0200 |
commit | 5bbf4e5a8e3a780874b2ed77bd1bd57850f3f6da (patch) | |
tree | 06835c621f167a8cc563d57a6d6e29d127078967 /block/bfq-iosched.c | |
parent | blk-mq: refactor blk_mq_sched_assign_ioc (diff) | |
download | linux-5bbf4e5a8e3a780874b2ed77bd1bd57850f3f6da.tar.xz linux-5bbf4e5a8e3a780874b2ed77bd1bd57850f3f6da.zip |
blk-mq-sched: unify request prepare methods
This patch makes sure we always allocate requests in the core blk-mq
code and use a common prepare_request method to initialize them for
both mq I/O schedulers. For Kyber and additional limit_depth method
is added that is called before allocating the request.
Also because none of the intializations can really fail the new method
does not return an error - instead the bfq finish method is hardened
to deal with the no-IOC case.
Last but not least this removes the abuse of RQF_QUEUE by the blk-mq
scheduling code as RQF_ELFPRIV is all that is needed now.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/bfq-iosched.c')
-rw-r--r-- | block/bfq-iosched.c | 19 |
1 files changed, 12 insertions, 7 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index f037b005faa1..60d32700f104 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -4292,8 +4292,14 @@ static void bfq_put_rq_priv_body(struct bfq_queue *bfqq) static void bfq_finish_request(struct request *rq) { - struct bfq_queue *bfqq = RQ_BFQQ(rq); - struct bfq_data *bfqd = bfqq->bfqd; + struct bfq_queue *bfqq; + struct bfq_data *bfqd; + + if (!rq->elv.icq) + return; + + bfqq = RQ_BFQQ(rq); + bfqd = bfqq->bfqd; if (rq->rq_flags & RQF_STARTED) bfqg_stats_update_completion(bfqq_group(bfqq), @@ -4394,9 +4400,9 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, /* * Allocate bfq data structures associated with this request. */ -static int bfq_get_rq_private(struct request_queue *q, struct request *rq, - struct bio *bio) +static void bfq_prepare_request(struct request *rq, struct bio *bio) { + struct request_queue *q = rq->q; struct bfq_data *bfqd = q->elevator->elevator_data; struct bfq_io_cq *bic; const int is_sync = rq_is_sync(rq); @@ -4405,7 +4411,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, bool split = false; if (!rq->elv.icq) - return 1; + return; bic = icq_to_bic(rq->elv.icq); spin_lock_irq(&bfqd->lock); @@ -4466,7 +4472,6 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, bfq_handle_burst(bfqd, bfqq); spin_unlock_irq(&bfqd->lock); - return 0; } static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) @@ -4945,7 +4950,7 @@ static struct elv_fs_entry bfq_attrs[] = { static struct elevator_type iosched_bfq_mq = { .ops.mq = { - .get_rq_priv = bfq_get_rq_private, + .prepare_request = bfq_prepare_request, .finish_request = bfq_finish_request, .exit_icq = bfq_exit_icq, .insert_requests = bfq_insert_requests, |