summaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/writeback.h
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-06-05 15:24:39 +0200
committerKent Overstreet <koverstreet@google.com>2013-06-27 06:58:04 +0200
commit72c270612bd33192fa836ad0f2939af1ca218292 (patch)
tree344129d75f3b5c0abcf77dd4b6340783a126cde8 /drivers/md/bcache/writeback.h
parentbcache: Track dirty data by stripe (diff)
downloadlinux-72c270612bd33192fa836ad0f2939af1ca218292.tar.xz
linux-72c270612bd33192fa836ad0f2939af1ca218292.zip
bcache: Write out full stripes
Now that we're tracking dirty data per stripe, we can add two optimizations for raid5/6: * If a stripe is already dirty, force writes to that stripe to writeback mode - to help build up full stripes of dirty data * When flushing dirty data, preferentially write out full stripes first if there are any. Signed-off-by: Kent Overstreet <koverstreet@google.com>
Diffstat (limited to 'drivers/md/bcache/writeback.h')
-rw-r--r--drivers/md/bcache/writeback.h43
1 files changed, 43 insertions, 0 deletions
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 5ce9771df047..c91f61bb95b6 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -1,6 +1,9 @@
#ifndef _BCACHE_WRITEBACK_H
#define _BCACHE_WRITEBACK_H
+#define CUTOFF_WRITEBACK 40
+#define CUTOFF_WRITEBACK_SYNC 70
+
static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
{
uint64_t i, ret = 0;
@@ -11,6 +14,46 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
return ret;
}
+static inline bool bcache_dev_stripe_dirty(struct bcache_device *d,
+ uint64_t offset,
+ unsigned nr_sectors)
+{
+ uint64_t stripe = offset >> d->stripe_size_bits;
+
+ while (1) {
+ if (atomic_read(d->stripe_sectors_dirty + stripe))
+ return true;
+
+ if (nr_sectors <= 1 << d->stripe_size_bits)
+ return false;
+
+ nr_sectors -= 1 << d->stripe_size_bits;
+ stripe++;
+ }
+}
+
+static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
+ unsigned cache_mode, bool would_skip)
+{
+ unsigned in_use = dc->disk.c->gc_stats.in_use;
+
+ if (cache_mode != CACHE_MODE_WRITEBACK ||
+ atomic_read(&dc->disk.detaching) ||
+ in_use > CUTOFF_WRITEBACK_SYNC)
+ return false;
+
+ if (dc->partial_stripes_expensive &&
+ bcache_dev_stripe_dirty(&dc->disk, bio->bi_sector,
+ bio_sectors(bio)))
+ return true;
+
+ if (would_skip)
+ return false;
+
+ return bio->bi_rw & REQ_SYNC ||
+ in_use <= CUTOFF_WRITEBACK;
+}
+
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
void bch_writeback_queue(struct cached_dev *);
void bch_writeback_add(struct cached_dev *);