diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2011-03-21 10:14:27 +0100 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-03-21 10:14:27 +0100 |
commit | 5e84ea3a9c662dc2d7a48703a4468fad954a3b7f (patch) | |
tree | 3fa0fb26a7c8a970213584104cc2498ef46d60a3 | |
parent | block: NULL dereference on error path in __blkdev_get() (diff) | |
download | linux-5e84ea3a9c662dc2d7a48703a4468fad954a3b7f.tar.xz linux-5e84ea3a9c662dc2d7a48703a4468fad954a3b7f.zip |
block: attempt to merge with existing requests on plug flush
One of the disadvantages of on-stack plugging is that we potentially
lose out on merging since all pending IO isn't always visible to
everybody. When we flush the on-stack plugs, right now we don't do
any checks to see if potential merge candidates could be utilized.
Correct this by adding a new insert variant, ELEVATOR_INSERT_SORT_MERGE.
It works just ELEVATOR_INSERT_SORT, but first checks whether we can
merge with an existing request before doing the insertion (if we fail
merging).
This fixes a regression with multiple processes issuing IO that
can be merged.
Thanks to Shaohua Li <shaohua.li@intel.com> for testing and fixing
an accounting bug.
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r-- | block/blk-core.c | 2 | ||||
-rw-r--r-- | block/blk-merge.c | 6 | ||||
-rw-r--r-- | block/blk.h | 2 | ||||
-rw-r--r-- | block/elevator.c | 52 | ||||
-rw-r--r-- | include/linux/elevator.h | 1 |
5 files changed, 59 insertions, 4 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index e1fcf7a24668..525693237a4a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2685,7 +2685,7 @@ static void flush_plug_list(struct blk_plug *plug) /* * rq is already accounted, so use raw insert */ - __elv_add_request(q, rq, ELEVATOR_INSERT_SORT); + __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); } if (q) { diff --git a/block/blk-merge.c b/block/blk-merge.c index ea85e20d5e94..cfcc37cb222b 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -465,3 +465,9 @@ int attempt_front_merge(struct request_queue *q, struct request *rq) return 0; } + +int blk_attempt_req_merge(struct request_queue *q, struct request *rq, + struct request *next) +{ + return attempt_merge(q, rq, next); +} diff --git a/block/blk.h b/block/blk.h index 49d21af81d07..c8db371a921d 100644 --- a/block/blk.h +++ b/block/blk.h @@ -103,6 +103,8 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, struct bio *bio); int attempt_back_merge(struct request_queue *q, struct request *rq); int attempt_front_merge(struct request_queue *q, struct request *rq); +int blk_attempt_req_merge(struct request_queue *q, struct request *rq, + struct request *next); void blk_recalc_rq_segments(struct request *rq); void blk_rq_set_mixed_merge(struct request *rq); diff --git a/block/elevator.c b/block/elevator.c index 542ce826b401..c387d3168734 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -521,6 +521,40 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) return ELEVATOR_NO_MERGE; } +/* + * Attempt to do an insertion back merge. Only check for the case where + * we can append 'rq' to an existing request, so we can throw 'rq' away + * afterwards. + * + * Returns true if we merged, false otherwise + */ +static bool elv_attempt_insert_merge(struct request_queue *q, + struct request *rq) +{ + struct request *__rq; + + if (blk_queue_nomerges(q)) + return false; + + /* + * First try one-hit cache. + */ + if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) + return true; + + if (blk_queue_noxmerges(q)) + return false; + + /* + * See if our hash lookup can find a potential backmerge. + */ + __rq = elv_rqhash_find(q, blk_rq_pos(rq)); + if (__rq && blk_attempt_req_merge(q, __rq, rq)) + return true; + + return false; +} + void elv_merged_request(struct request_queue *q, struct request *rq, int type) { struct elevator_queue *e = q->elevator; @@ -538,14 +572,18 @@ void elv_merge_requests(struct request_queue *q, struct request *rq, struct request *next) { struct elevator_queue *e = q->elevator; + const int next_sorted = next->cmd_flags & REQ_SORTED; - if (e->ops->elevator_merge_req_fn) + if (next_sorted && e->ops->elevator_merge_req_fn) e->ops->elevator_merge_req_fn(q, rq, next); elv_rqhash_reposition(q, rq); - elv_rqhash_del(q, next); - q->nr_sorted--; + if (next_sorted) { + elv_rqhash_del(q, next); + q->nr_sorted--; + } + q->last_merge = rq; } @@ -647,6 +685,14 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) __blk_run_queue(q, false); break; + case ELEVATOR_INSERT_SORT_MERGE: + /* + * If we succeed in merging this request with one in the + * queue already, we are done - rq has now been freed, + * so no need to do anything further. + */ + if (elv_attempt_insert_merge(q, rq)) + break; case ELEVATOR_INSERT_SORT: BUG_ON(rq->cmd_type != REQ_TYPE_FS && !(rq->cmd_flags & REQ_DISCARD)); diff --git a/include/linux/elevator.h b/include/linux/elevator.h index ec6f72b84477..d93efcc44570 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -166,6 +166,7 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t); #define ELEVATOR_INSERT_SORT 3 #define ELEVATOR_INSERT_REQUEUE 4 #define ELEVATOR_INSERT_FLUSH 5 +#define ELEVATOR_INSERT_SORT_MERGE 6 /* * return values from elevator_may_queue_fn |