diff options
author | Shaohua Li <shaohua.li@intel.com> | 2011-12-16 14:00:31 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2011-12-16 14:00:31 +0100 |
commit | 274193224cdabd687d804a26e0150bb20f2dd52c (patch) | |
tree | f07a788183f2ac91b9b16295f8f146bd5b88fb96 /block/elevator.c | |
parent | block, cfq: fix empty queue crash caused by request merge (diff) | |
download | linux-274193224cdabd687d804a26e0150bb20f2dd52c.tar.xz linux-274193224cdabd687d804a26e0150bb20f2dd52c.zip |
block: recursive merge requests
In my workload, thread 1 accesses a, a+2, ..., thread 2 accesses a+1,
a+3,.... When the requests are flushed to queue, a and a+1 are merged
to (a, a+1), a+2 and a+3 too to (a+2, a+3), but (a, a+1) and (a+2, a+3)
aren't merged.
With recursive merge below, the workload throughput gets improved 20%
and context switch drops 60%.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/elevator.c')
-rw-r--r-- | block/elevator.c | 16 |
1 files changed, 12 insertions, 4 deletions
diff --git a/block/elevator.c b/block/elevator.c index 91e18f8af9be..99838f460b44 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -515,6 +515,7 @@ static bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq) { struct request *__rq; + bool ret; if (blk_queue_nomerges(q)) return false; @@ -528,14 +529,21 @@ static bool elv_attempt_insert_merge(struct request_queue *q, if (blk_queue_noxmerges(q)) return false; + ret = false; /* * See if our hash lookup can find a potential backmerge. */ - __rq = elv_rqhash_find(q, blk_rq_pos(rq)); - if (__rq && blk_attempt_req_merge(q, __rq, rq)) - return true; + while (1) { + __rq = elv_rqhash_find(q, blk_rq_pos(rq)); + if (!__rq || !blk_attempt_req_merge(q, __rq, rq)) + break; - return false; + /* The merged request could be merged with others, try again */ + ret = true; + rq = __rq; + } + + return ret; } void elv_merged_request(struct request_queue *q, struct request *rq, int type) |