diff options
author | Shaohua Li <shaohua.li@intel.com> | 2011-07-12 14:24:56 +0200 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-07-12 14:24:56 +0200 |
commit | 7700fc4f675fa38094e78e345b594363a2fd895b (patch) | |
tree | 000ae1fb3d825240b7e78ad8d3db66be168ffe94 /block/cfq-iosched.c | |
parent | CFQ: add think time check for service tree (diff) | |
download | linux-7700fc4f675fa38094e78e345b594363a2fd895b.tar.xz linux-7700fc4f675fa38094e78e345b594363a2fd895b.zip |
CFQ: add think time check for group
Currently when the last queue of a group has no request, we don't expire
the queue to hope request from the group comes soon, so the group doesn't
miss its share. But if the think time is big, the assumption isn't correct
and we just waste bandwidth. In such case, we don't do idle.
[global]
runtime=30
direct=1
[test1]
cgroup=test1
cgroup_weight=1000
rw=randread
ioengine=libaio
size=500m
runtime=30
directory=/mnt
filename=file1
thinktime=9000
[test2]
cgroup=test2
cgroup_weight=1000
rw=randread
ioengine=libaio
size=500m
runtime=30
directory=/mnt
filename=file2
patched base
test1 64k 39k
test2 548k 540k
total 604k 578k
group1 gets much better throughput because it waits less time.
To check if the patch changes behavior of queue without think time. I also
tried to give test1 2ms think time or no think time. The test result is stable.
The thoughput doesn't change with/without the patch.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 19 |
1 files changed, 17 insertions, 2 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index baa90606f01a..1f96ad6254f1 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -211,6 +211,7 @@ struct cfq_group { #endif /* number of requests that are on the dispatch list or inside driver */ int dispatched; + struct cfq_ttime ttime; }; /* @@ -1067,6 +1068,8 @@ static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd) *st = CFQ_RB_ROOT; RB_CLEAR_NODE(&cfqg->rb_node); + cfqg->ttime.last_end_request = jiffies; + /* * Take the initial reference that will be released on destroy * This can be thought of a joint reference by cgroup and @@ -2381,8 +2384,9 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) * this group, wait for requests to complete. */ check_group_idle: - if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 - && cfqq->cfqg->dispatched) { + if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 && + cfqq->cfqg->dispatched && + !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) { cfqq = NULL; goto keep_queue; } @@ -3239,6 +3243,9 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq, __cfq_update_io_thinktime(&cfqq->service_tree->ttime, cfqd->cfq_slice_idle); } +#ifdef CONFIG_CFQ_GROUP_IOSCHED + __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle); +#endif } static void @@ -3521,6 +3528,10 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) if (cfqq->cfqg->nr_cfqq > 1) return false; + /* the only queue in the group, but think time is big */ + if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) + return false; + if (cfq_slice_used(cfqq)) return true; @@ -3581,6 +3592,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) cfqd->last_delayed_sync = now; } +#ifdef CONFIG_CFQ_GROUP_IOSCHED + cfqq->cfqg->ttime.last_end_request = now; +#endif + /* * If this is the active queue, check if it needs to be expired, * or if we want to idle in case it has no pending requests. |