summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Lyle <mlyle@lyle.org>2017-10-14 01:35:39 +0200
committerJens Axboe <axboe@kernel.dk>2017-10-16 17:07:26 +0200
commita8500fc816b19795756d27c762daa5e19f5e1b6f (patch)
tree38d73cfab43258b7d24a1f9ea7502dc87e93c425
parentbcache: writeback rate shouldn't artifically clamp (diff)
downloadlinux-a8500fc816b19795756d27c762daa5e19f5e1b6f.tar.xz
linux-a8500fc816b19795756d27c762daa5e19f5e1b6f.zip
bcache: rearrange writeback main thread ratelimit
The time spent searching for things to write back "counts" for the actual rate achieved, so don't flush the accumulated rate with each chunk. This will maintain better fidelity to user-commanded rates, but it may slightly increase the burstiness of writeback. The writeback lock needs improvement to help mitigate this. Signed-off-by: Michael Lyle <mlyle@lyle.org> Reviewed-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--drivers/md/bcache/writeback.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 897d28050656..9b770b13bdf6 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -440,6 +440,8 @@ static int bch_writeback_thread(void *arg)
struct cached_dev *dc = arg;
bool searched_full_index;
+ bch_ratelimit_reset(&dc->writeback_rate);
+
while (!kthread_should_stop()) {
down_write(&dc->writeback_lock);
if (!atomic_read(&dc->has_dirty) ||
@@ -467,7 +469,6 @@ static int bch_writeback_thread(void *arg)
up_write(&dc->writeback_lock);
- bch_ratelimit_reset(&dc->writeback_rate);
read_dirty(dc);
if (searched_full_index) {
@@ -477,6 +478,8 @@ static int bch_writeback_thread(void *arg)
!kthread_should_stop() &&
!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
delay = schedule_timeout_interruptible(delay);
+
+ bch_ratelimit_reset(&dc->writeback_rate);
}
}