summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/Kconfig22
-rw-r--r--drivers/md/bcache/Kconfig11
-rw-r--r--drivers/md/bcache/alloc.c385
-rw-r--r--drivers/md/bcache/bcache.h339
-rw-r--r--drivers/md/bcache/bset.c289
-rw-r--r--drivers/md/bcache/bset.h93
-rw-r--r--drivers/md/bcache/btree.c1419
-rw-r--r--drivers/md/bcache/btree.h195
-rw-r--r--drivers/md/bcache/closure.c103
-rw-r--r--drivers/md/bcache/closure.h183
-rw-r--r--drivers/md/bcache/debug.c185
-rw-r--r--drivers/md/bcache/debug.h50
-rw-r--r--drivers/md/bcache/journal.c293
-rw-r--r--drivers/md/bcache/journal.h52
-rw-r--r--drivers/md/bcache/movinggc.c108
-rw-r--r--drivers/md/bcache/request.c1102
-rw-r--r--drivers/md/bcache/request.h43
-rw-r--r--drivers/md/bcache/stats.c26
-rw-r--r--drivers/md/bcache/stats.h13
-rw-r--r--drivers/md/bcache/super.c192
-rw-r--r--drivers/md/bcache/sysfs.c92
-rw-r--r--drivers/md/bcache/trace.c1
-rw-r--r--drivers/md/bcache/util.c20
-rw-r--r--drivers/md/bcache/util.h17
-rw-r--r--drivers/md/bcache/writeback.c500
-rw-r--r--drivers/md/bcache/writeback.h46
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/dm-bufio.c5
-rw-r--r--drivers/md/dm-cache-metadata.c104
-rw-r--r--drivers/md/dm-cache-metadata.h5
-rw-r--r--drivers/md/dm-cache-policy-internal.h7
-rw-r--r--drivers/md/dm-cache-policy-mq.c694
-rw-r--r--drivers/md/dm-cache-policy.c4
-rw-r--r--drivers/md/dm-cache-policy.h21
-rw-r--r--drivers/md/dm-cache-target.c687
-rw-r--r--drivers/md/dm-crypt.c216
-rw-r--r--drivers/md/dm-delay.c23
-rw-r--r--drivers/md/dm-ioctl.c36
-rw-r--r--drivers/md/dm-mpath.c34
-rw-r--r--drivers/md/dm-snap.c71
-rw-r--r--drivers/md/dm-stats.c1
-rw-r--r--drivers/md/dm-table.c28
-rw-r--r--drivers/md/dm-thin-metadata.c8
-rw-r--r--drivers/md/dm-thin-metadata.h1
-rw-r--r--drivers/md/dm-thin.c66
-rw-r--r--drivers/md/dm.c47
-rw-r--r--drivers/md/dm.h13
-rw-r--r--drivers/md/md.c207
-rw-r--r--drivers/md/md.h6
-rw-r--r--drivers/md/persistent-data/dm-array.c15
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c6
-rw-r--r--drivers/md/persistent-data/dm-block-manager.h7
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c32
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c18
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c8
-rw-r--r--drivers/md/raid1.c171
-rw-r--r--drivers/md/raid1.h15
-rw-r--r--drivers/md/raid10.c36
-rw-r--r--drivers/md/raid5.c432
-rw-r--r--drivers/md/raid5.h18
60 files changed, 4956 insertions, 3869 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 30b426ed744b..f2ccbc3b9fe4 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -297,6 +297,17 @@ config DM_MIRROR
Allow volume managers to mirror logical volumes, also
needed for live data migration tools such as 'pvmove'.
+config DM_LOG_USERSPACE
+ tristate "Mirror userspace logging"
+ depends on DM_MIRROR && NET
+ select CONNECTOR
+ ---help---
+ The userspace logging module provides a mechanism for
+ relaying the dm-dirty-log API to userspace. Log designs
+ which are more suited to userspace implementation (e.g.
+ shared storage logs) or experimental logs can be implemented
+ by leveraging this framework.
+
config DM_RAID
tristate "RAID 1/4/5/6/10 target"
depends on BLK_DEV_DM
@@ -323,17 +334,6 @@ config DM_RAID
RAID-5, RAID-6 distributes the syndromes across the drives
in one of the available parity distribution methods.
-config DM_LOG_USERSPACE
- tristate "Mirror userspace logging"
- depends on DM_MIRROR && NET
- select CONNECTOR
- ---help---
- The userspace logging module provides a mechanism for
- relaying the dm-dirty-log API to userspace. Log designs
- which are more suited to userspace implementation (e.g.
- shared storage logs) or experimental logs can be implemented
- by leveraging this framework.
-
config DM_ZERO
tristate "Zero target"
depends on BLK_DEV_DM
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index f950c9d29f3e..2638417b19aa 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -13,15 +13,8 @@ config BCACHE_DEBUG
---help---
Don't select this option unless you're a developer
- Enables extra debugging tools (primarily a fuzz tester)
-
-config BCACHE_EDEBUG
- bool "Extended runtime checks"
- depends on BCACHE
- ---help---
- Don't select this option unless you're a developer
-
- Enables extra runtime checks which significantly affect performance
+ Enables extra debugging tools, allows expensive runtime checks to be
+ turned on.
config BCACHE_CLOSURES_DEBUG
bool "Debug closures"
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index e45f5575fd4d..4c9852d92b0a 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -63,13 +63,12 @@
#include "bcache.h"
#include "btree.h"
+#include <linux/blkdev.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/random.h>
#include <trace/events/bcache.h>
-#define MAX_IN_FLIGHT_DISCARDS 8U
-
/* Bucket heap / gen */
uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
@@ -121,75 +120,6 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
mutex_unlock(&c->bucket_lock);
}
-/* Discard/TRIM */
-
-struct discard {
- struct list_head list;
- struct work_struct work;
- struct cache *ca;
- long bucket;
-
- struct bio bio;
- struct bio_vec bv;
-};
-
-static void discard_finish(struct work_struct *w)
-{
- struct discard *d = container_of(w, struct discard, work);
- struct cache *ca = d->ca;
- char buf[BDEVNAME_SIZE];
-
- if (!test_bit(BIO_UPTODATE, &d->bio.bi_flags)) {
- pr_notice("discard error on %s, disabling",
- bdevname(ca->bdev, buf));
- d->ca->discard = 0;
- }
-
- mutex_lock(&ca->set->bucket_lock);
-
- fifo_push(&ca->free, d->bucket);
- list_add(&d->list, &ca->discards);
- atomic_dec(&ca->discards_in_flight);
-
- mutex_unlock(&ca->set->bucket_lock);
-
- closure_wake_up(&ca->set->bucket_wait);
- wake_up_process(ca->alloc_thread);
-
- closure_put(&ca->set->cl);
-}
-
-static void discard_endio(struct bio *bio, int error)
-{
- struct discard *d = container_of(bio, struct discard, bio);
- schedule_work(&d->work);
-}
-
-static void do_discard(struct cache *ca, long bucket)
-{
- struct discard *d = list_first_entry(&ca->discards,
- struct discard, list);
-
- list_del(&d->list);
- d->bucket = bucket;
-
- atomic_inc(&ca->discards_in_flight);
- closure_get(&ca->set->cl);
-
- bio_init(&d->bio);
-
- d->bio.bi_sector = bucket_to_sector(ca->set, d->bucket);
- d->bio.bi_bdev = ca->bdev;
- d->bio.bi_rw = REQ_WRITE|REQ_DISCARD;
- d->bio.bi_max_vecs = 1;
- d->bio.bi_io_vec = d->bio.bi_inline_vecs;
- d->bio.bi_size = bucket_bytes(ca);
- d->bio.bi_end_io = discard_endio;
- bio_set_prio(&d->bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
-
- submit_bio(0, &d->bio);
-}
-
/* Allocation */
static inline bool can_inc_bucket_gen(struct bucket *b)
@@ -280,7 +210,7 @@ static void invalidate_buckets_lru(struct cache *ca)
* multiple times when it can't do anything
*/
ca->invalidate_needs_gc = 1;
- bch_queue_gc(ca->set);
+ wake_up_gc(ca->set);
return;
}
@@ -305,7 +235,7 @@ static void invalidate_buckets_fifo(struct cache *ca)
if (++checked >= ca->sb.nbuckets) {
ca->invalidate_needs_gc = 1;
- bch_queue_gc(ca->set);
+ wake_up_gc(ca->set);
return;
}
}
@@ -330,7 +260,7 @@ static void invalidate_buckets_random(struct cache *ca)
if (++checked >= ca->sb.nbuckets / 2) {
ca->invalidate_needs_gc = 1;
- bch_queue_gc(ca->set);
+ wake_up_gc(ca->set);
return;
}
}
@@ -398,16 +328,18 @@ static int bch_allocator_thread(void *arg)
else
break;
- allocator_wait(ca, (int) fifo_free(&ca->free) >
- atomic_read(&ca->discards_in_flight));
-
if (ca->discard) {
- allocator_wait(ca, !list_empty(&ca->discards));
- do_discard(ca, bucket);
- } else {
- fifo_push(&ca->free, bucket);
- closure_wake_up(&ca->set->bucket_wait);
+ mutex_unlock(&ca->set->bucket_lock);
+ blkdev_issue_discard(ca->bdev,
+ bucket_to_sector(ca->set, bucket),
+ ca->sb.block_size, GFP_KERNEL, 0);
+ mutex_lock(&ca->set->bucket_lock);
}
+
+ allocator_wait(ca, !fifo_full(&ca->free));
+
+ fifo_push(&ca->free, bucket);
+ wake_up(&ca->set->bucket_wait);
}
/*
@@ -433,16 +365,40 @@ static int bch_allocator_thread(void *arg)
}
}
-long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl)
+long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait)
{
- long r = -1;
-again:
+ DEFINE_WAIT(w);
+ struct bucket *b;
+ long r;
+
+ /* fastpath */
+ if (fifo_used(&ca->free) > ca->watermark[watermark]) {
+ fifo_pop(&ca->free, r);
+ goto out;
+ }
+
+ if (!wait)
+ return -1;
+
+ while (1) {
+ if (fifo_used(&ca->free) > ca->watermark[watermark]) {
+ fifo_pop(&ca->free, r);
+ break;
+ }
+
+ prepare_to_wait(&ca->set->bucket_wait, &w,
+ TASK_UNINTERRUPTIBLE);
+
+ mutex_unlock(&ca->set->bucket_lock);
+ schedule();
+ mutex_lock(&ca->set->bucket_lock);
+ }
+
+ finish_wait(&ca->set->bucket_wait, &w);
+out:
wake_up_process(ca->alloc_thread);
- if (fifo_used(&ca->free) > ca->watermark[watermark] &&
- fifo_pop(&ca->free, r)) {
- struct bucket *b = ca->buckets + r;
-#ifdef CONFIG_BCACHE_EDEBUG
+ if (expensive_debug_checks(ca->set)) {
size_t iter;
long i;
@@ -455,36 +411,25 @@ again:
BUG_ON(i == r);
fifo_for_each(i, &ca->unused, iter)
BUG_ON(i == r);
-#endif
- BUG_ON(atomic_read(&b->pin) != 1);
-
- SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
-
- if (watermark <= WATERMARK_METADATA) {
- SET_GC_MARK(b, GC_MARK_METADATA);
- b->prio = BTREE_PRIO;
- } else {
- SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
- b->prio = INITIAL_PRIO;
- }
-
- return r;
}
- trace_bcache_alloc_fail(ca);
+ b = ca->buckets + r;
- if (cl) {
- closure_wait(&ca->set->bucket_wait, cl);
+ BUG_ON(atomic_read(&b->pin) != 1);
- if (closure_blocking(cl)) {
- mutex_unlock(&ca->set->bucket_lock);
- closure_sync(cl);
- mutex_lock(&ca->set->bucket_lock);
- goto again;
- }
+ SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
+
+ if (watermark <= WATERMARK_METADATA) {
+ SET_GC_MARK(b, GC_MARK_METADATA);
+ SET_GC_MOVE(b, 0);
+ b->prio = BTREE_PRIO;
+ } else {
+ SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+ SET_GC_MOVE(b, 0);
+ b->prio = INITIAL_PRIO;
}
- return -1;
+ return r;
}
void bch_bucket_free(struct cache_set *c, struct bkey *k)
@@ -501,7 +446,7 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
}
int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
- struct bkey *k, int n, struct closure *cl)
+ struct bkey *k, int n, bool wait)
{
int i;
@@ -514,7 +459,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
for (i = 0; i < n; i++) {
struct cache *ca = c->cache_by_alloc[i];
- long b = bch_bucket_alloc(ca, watermark, cl);
+ long b = bch_bucket_alloc(ca, watermark, wait);
if (b == -1)
goto err;
@@ -529,22 +474,202 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
return 0;
err:
bch_bucket_free(c, k);
- __bkey_put(c, k);
+ bkey_put(c, k);
return -1;
}
int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
- struct bkey *k, int n, struct closure *cl)
+ struct bkey *k, int n, bool wait)
{
int ret;
mutex_lock(&c->bucket_lock);
- ret = __bch_bucket_alloc_set(c, watermark, k, n, cl);
+ ret = __bch_bucket_alloc_set(c, watermark, k, n, wait);
mutex_unlock(&c->bucket_lock);
return ret;
}
+/* Sector allocator */
+
+struct open_bucket {
+ struct list_head list;
+ unsigned last_write_point;
+ unsigned sectors_free;
+ BKEY_PADDED(key);
+};
+
+/*
+ * We keep multiple buckets open for writes, and try to segregate different
+ * write streams for better cache utilization: first we look for a bucket where
+ * the last write to it was sequential with the current write, and failing that
+ * we look for a bucket that was last used by the same task.
+ *
+ * The ideas is if you've got multiple tasks pulling data into the cache at the
+ * same time, you'll get better cache utilization if you try to segregate their
+ * data and preserve locality.
+ *
+ * For example, say you've starting Firefox at the same time you're copying a
+ * bunch of files. Firefox will likely end up being fairly hot and stay in the
+ * cache awhile, but the data you copied might not be; if you wrote all that
+ * data to the same buckets it'd get invalidated at the same time.
+ *
+ * Both of those tasks will be doing fairly random IO so we can't rely on
+ * detecting sequential IO to segregate their data, but going off of the task
+ * should be a sane heuristic.
+ */
+static struct open_bucket *pick_data_bucket(struct cache_set *c,
+ const struct bkey *search,
+ unsigned write_point,
+ struct bkey *alloc)
+{
+ struct open_bucket *ret, *ret_task = NULL;
+
+ list_for_each_entry_reverse(ret, &c->data_buckets, list)
+ if (!bkey_cmp(&ret->key, search))
+ goto found;
+ else if (ret->last_write_point == write_point)
+ ret_task = ret;
+
+ ret = ret_task ?: list_first_entry(&c->data_buckets,
+ struct open_bucket, list);
+found:
+ if (!ret->sectors_free && KEY_PTRS(alloc)) {
+ ret->sectors_free = c->sb.bucket_size;
+ bkey_copy(&ret->key, alloc);
+ bkey_init(alloc);
+ }
+
+ if (!ret->sectors_free)
+ ret = NULL;
+
+ return ret;
+}
+
+/*
+ * Allocates some space in the cache to write to, and k to point to the newly
+ * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
+ * end of the newly allocated space).
+ *
+ * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
+ * sectors were actually allocated.
+ *
+ * If s->writeback is true, will not fail.
+ */
+bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
+ unsigned write_point, unsigned write_prio, bool wait)
+{
+ struct open_bucket *b;
+ BKEY_PADDED(key) alloc;
+ unsigned i;
+
+ /*
+ * We might have to allocate a new bucket, which we can't do with a
+ * spinlock held. So if we have to allocate, we drop the lock, allocate
+ * and then retry. KEY_PTRS() indicates whether alloc points to
+ * allocated bucket(s).
+ */
+
+ bkey_init(&alloc.key);
+ spin_lock(&c->data_bucket_lock);
+
+ while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
+ unsigned watermark = write_prio
+ ? WATERMARK_MOVINGGC
+ : WATERMARK_NONE;
+
+ spin_unlock(&c->data_bucket_lock);
+
+ if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
+ return false;
+
+ spin_lock(&c->data_bucket_lock);
+ }
+
+ /*
+ * If we had to allocate, we might race and not need to allocate the
+ * second time we call find_data_bucket(). If we allocated a bucket but
+ * didn't use it, drop the refcount bch_bucket_alloc_set() took:
+ */
+ if (KEY_PTRS(&alloc.key))
+ bkey_put(c, &alloc.key);
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++)
+ EBUG_ON(ptr_stale(c, &b->key, i));
+
+ /* Set up the pointer to the space we're allocating: */
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++)
+ k->ptr[i] = b->key.ptr[i];
+
+ sectors = min(sectors, b->sectors_free);
+
+ SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
+ SET_KEY_SIZE(k, sectors);
+ SET_KEY_PTRS(k, KEY_PTRS(&b->key));
+
+ /*
+ * Move b to the end of the lru, and keep track of what this bucket was
+ * last used for:
+ */
+ list_move_tail(&b->list, &c->data_buckets);
+ bkey_copy_key(&b->key, k);
+ b->last_write_point = write_point;
+
+ b->sectors_free -= sectors;
+
+ for (i = 0; i < KEY_PTRS(&b->key); i++) {
+ SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
+
+ atomic_long_add(sectors,
+ &PTR_CACHE(c, &b->key, i)->sectors_written);
+ }
+
+ if (b->sectors_free < c->sb.block_size)
+ b->sectors_free = 0;
+
+ /*
+ * k takes refcounts on the buckets it points to until it's inserted
+ * into the btree, but if we're done with this bucket we just transfer
+ * get_data_bucket()'s refcount.
+ */
+ if (b->sectors_free)
+ for (i = 0; i < KEY_PTRS(&b->key); i++)
+ atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
+
+ spin_unlock(&c->data_bucket_lock);
+ return true;
+}
+
/* Init */
+void bch_open_buckets_free(struct cache_set *c)
+{
+ struct open_bucket *b;
+
+ while (!list_empty(&c->data_buckets)) {
+ b = list_first_entry(&c->data_buckets,
+ struct open_bucket, list);
+ list_del(&b->list);
+ kfree(b);
+ }
+}
+
+int bch_open_buckets_alloc(struct cache_set *c)
+{
+ int i;
+
+ spin_lock_init(&c->data_bucket_lock);
+
+ for (i = 0; i < 6; i++) {
+ struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ list_add(&b->list, &c->data_buckets);
+ }
+
+ return 0;
+}
+
int bch_cache_allocator_start(struct cache *ca)
{
struct task_struct *k = kthread_run(bch_allocator_thread,
@@ -556,22 +681,8 @@ int bch_cache_allocator_start(struct cache *ca)
return 0;
}
-void bch_cache_allocator_exit(struct cache *ca)
-{
- struct discard *d;
-
- while (!list_empty(&ca->discards)) {
- d = list_first_entry(&ca->discards, struct discard, list);
- cancel_work_sync(&d->work);
- list_del(&d->list);
- kfree(d);
- }
-}
-
int bch_cache_allocator_init(struct cache *ca)
{
- unsigned i;
-
/*
* Reserve:
* Prio/gen writes first
@@ -589,15 +700,5 @@ int bch_cache_allocator_init(struct cache *ca)
ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
ca->watermark[WATERMARK_MOVINGGC];
- for (i = 0; i < MAX_IN_FLIGHT_DISCARDS; i++) {
- struct discard *d = kzalloc(sizeof(*d), GFP_KERNEL);
- if (!d)
- return -ENOMEM;
-
- d->ca = ca;
- INIT_WORK(&d->work, discard_finish);
- list_add(&d->list, &ca->discards);
- }
-
return 0;
}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 0f12382aa35d..754f43177483 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -177,6 +177,7 @@
#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
+#include <linux/bcache.h>
#include <linux/bio.h>
#include <linux/kobject.h>
#include <linux/list.h>
@@ -196,7 +197,7 @@ struct bucket {
uint8_t disk_gen;
uint8_t last_gc; /* Most out of date gen in the btree */
uint8_t gc_gen;
- uint16_t gc_mark;
+ uint16_t gc_mark; /* Bitfield used by GC. See below for field */
};
/*
@@ -208,169 +209,8 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
#define GC_MARK_RECLAIMABLE 0
#define GC_MARK_DIRTY 1
#define GC_MARK_METADATA 2
-BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14);
-
-struct bkey {
- uint64_t high;
- uint64_t low;
- uint64_t ptr[];
-};
-
-/* Enough for a key with 6 pointers */
-#define BKEY_PAD 8
-
-#define BKEY_PADDED(key) \
- union { struct bkey key; uint64_t key ## _pad[BKEY_PAD]; }
-
-/* Version 0: Cache device
- * Version 1: Backing device
- * Version 2: Seed pointer into btree node checksum
- * Version 3: Cache device with new UUID format
- * Version 4: Backing device with data offset
- */
-#define BCACHE_SB_VERSION_CDEV 0
-#define BCACHE_SB_VERSION_BDEV 1
-#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
-#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
-#define BCACHE_SB_MAX_VERSION 4
-
-#define SB_SECTOR 8
-#define SB_SIZE 4096
-#define SB_LABEL_SIZE 32
-#define SB_JOURNAL_BUCKETS 256U
-/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
-#define MAX_CACHES_PER_SET 8
-
-#define BDEV_DATA_START_DEFAULT 16 /* sectors */
-
-struct cache_sb {
- uint64_t csum;
- uint64_t offset; /* sector where this sb was written */
- uint64_t version;
-
- uint8_t magic[16];
-
- uint8_t uuid[16];
- union {
- uint8_t set_uuid[16];
- uint64_t set_magic;
- };
- uint8_t label[SB_LABEL_SIZE];
-
- uint64_t flags;
- uint64_t seq;
- uint64_t pad[8];
-
- union {
- struct {
- /* Cache devices */
- uint64_t nbuckets; /* device size */
-
- uint16_t block_size; /* sectors */
- uint16_t bucket_size; /* sectors */
-
- uint16_t nr_in_set;
- uint16_t nr_this_dev;
- };
- struct {
- /* Backing devices */
- uint64_t data_offset;
-
- /*
- * block_size from the cache device section is still used by
- * backing devices, so don't add anything here until we fix
- * things to not need it for backing devices anymore
- */
- };
- };
-
- uint32_t last_mount; /* time_t */
-
- uint16_t first_bucket;
- union {
- uint16_t njournal_buckets;
- uint16_t keys;
- };
- uint64_t d[SB_JOURNAL_BUCKETS]; /* journal buckets */
-};
-
-BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
-BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
-BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
-#define CACHE_REPLACEMENT_LRU 0U
-#define CACHE_REPLACEMENT_FIFO 1U
-#define CACHE_REPLACEMENT_RANDOM 2U
-
-BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
-#define CACHE_MODE_WRITETHROUGH 0U
-#define CACHE_MODE_WRITEBACK 1U
-#define CACHE_MODE_WRITEAROUND 2U
-#define CACHE_MODE_NONE 3U
-BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
-#define BDEV_STATE_NONE 0U
-#define BDEV_STATE_CLEAN 1U
-#define BDEV_STATE_DIRTY 2U
-#define BDEV_STATE_STALE 3U
-
-/* Version 1: Seed pointer into btree node checksum
- */
-#define BCACHE_BSET_VERSION 1
-
-/*
- * This is the on disk format for btree nodes - a btree node on disk is a list
- * of these; within each set the keys are sorted
- */
-struct bset {
- uint64_t csum;
- uint64_t magic;
- uint64_t seq;
- uint32_t version;
- uint32_t keys;
-
- union {
- struct bkey start[0];
- uint64_t d[0];
- };
-};
-
-/*
- * On disk format for priorities and gens - see super.c near prio_write() for
- * more.
- */
-struct prio_set {
- uint64_t csum;
- uint64_t magic;
- uint64_t seq;
- uint32_t version;
- uint32_t pad;
-
- uint64_t next_bucket;
-
- struct bucket_disk {
- uint16_t prio;
- uint8_t gen;
- } __attribute((packed)) data[];
-};
-
-struct uuid_entry {
- union {
- struct {
- uint8_t uuid[16];
- uint8_t label[32];
- uint32_t first_reg;
- uint32_t last_reg;
- uint32_t invalidated;
-
- uint32_t flags;
- /* Size of flash only volumes */
- uint64_t sectors;
- };
-
- uint8_t pad[128];
- };
-};
-
-BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
+BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
+BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
#include "journal.h"
#include "stats.h"
@@ -384,8 +224,6 @@ struct keybuf_key {
void *private;
};
-typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
-
struct keybuf {
struct bkey last_scanned;
spinlock_t lock;
@@ -400,7 +238,7 @@ struct keybuf {
struct rb_root keys;
-#define KEYBUF_NR 100
+#define KEYBUF_NR 500
DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
};
@@ -429,16 +267,15 @@ struct bcache_device {
struct gendisk *disk;
- /* If nonzero, we're closing */
- atomic_t closing;
-
- /* If nonzero, we're detaching/unregistering from cache set */
- atomic_t detaching;
- int flush_done;
+ unsigned long flags;
+#define BCACHE_DEV_CLOSING 0
+#define BCACHE_DEV_DETACHING 1
+#define BCACHE_DEV_UNLINK_DONE 2
- uint64_t nr_stripes;
- unsigned stripe_size_bits;
+ unsigned nr_stripes;
+ unsigned stripe_size;
atomic_t *stripe_sectors_dirty;
+ unsigned long *full_dirty_stripes;
unsigned long sectors_dirty_last;
long sectors_dirty_derivative;
@@ -509,7 +346,7 @@ struct cached_dev {
/* Limit number of writeback bios in flight */
struct semaphore in_flight;
- struct closure_with_timer writeback;
+ struct task_struct *writeback_thread;
struct keybuf writeback_keys;
@@ -527,8 +364,8 @@ struct cached_dev {
unsigned sequential_cutoff;
unsigned readahead;
- unsigned sequential_merge:1;
unsigned verify:1;
+ unsigned bypass_torture_test:1;
unsigned partial_stripes_expensive:1;
unsigned writeback_metadata:1;
@@ -536,14 +373,14 @@ struct cached_dev {
unsigned char writeback_percent;
unsigned writeback_delay;
- int writeback_rate_change;
- int64_t writeback_rate_derivative;
uint64_t writeback_rate_target;
+ int64_t writeback_rate_proportional;
+ int64_t writeback_rate_derivative;
+ int64_t writeback_rate_change;
unsigned writeback_rate_update_seconds;
unsigned writeback_rate_d_term;
unsigned writeback_rate_p_term_inverse;
- unsigned writeback_rate_d_smooth;
};
enum alloc_watermarks {
@@ -609,7 +446,6 @@ struct cache {
* call prio_write() to keep gens from wrapping.
*/
uint8_t need_save_prio;
- unsigned gc_move_threshold;
/*
* If nonzero, we know we aren't going to find any buckets to invalidate
@@ -620,15 +456,6 @@ struct cache {
bool discard; /* Get rid of? */
- /*
- * We preallocate structs for issuing discards to buckets, and keep them
- * on this list when they're not in use; do_discard() issues discards
- * whenever there's work to do and is called by free_some_buckets() and
- * when a discard finishes.
- */
- atomic_t discards_in_flight;
- struct list_head discards;
-
struct journal_device journal;
/* The rest of this all shows up in sysfs */
@@ -649,7 +476,6 @@ struct gc_stat {
size_t nkeys;
uint64_t data; /* sectors */
- uint64_t dirty; /* sectors */
unsigned in_use; /* percent */
};
@@ -744,8 +570,8 @@ struct cache_set {
* basically a lock for this that we can wait on asynchronously. The
* btree_root() macro releases the lock when it returns.
*/
- struct closure *try_harder;
- struct closure_waitlist try_wait;
+ struct task_struct *try_harder;
+ wait_queue_head_t try_wait;
uint64_t try_harder_start;
/*
@@ -759,7 +585,7 @@ struct cache_set {
* written.
*/
atomic_t prio_blocked;
- struct closure_waitlist bucket_wait;
+ wait_queue_head_t bucket_wait;
/*
* For any bio we don't skip we subtract the number of sectors from
@@ -782,7 +608,7 @@ struct cache_set {
struct gc_stat gc_stats;
size_t nbuckets;
- struct closure_with_waitlist gc;
+ struct task_struct *gc_thread;
/* Where in the btree gc currently is */
struct bkey gc_done;
@@ -795,11 +621,10 @@ struct cache_set {
/* Counts how many sectors bio_insert has added to the cache */
atomic_t sectors_to_gc;
- struct closure moving_gc;
- struct closure_waitlist moving_gc_wait;
+ wait_queue_head_t moving_gc_wait;
struct keybuf moving_gc_keys;
/* Number of moving GC bios in flight */
- atomic_t in_flight;
+ struct semaphore moving_in_flight;
struct btree *root;
@@ -841,22 +666,27 @@ struct cache_set {
unsigned congested_read_threshold_us;
unsigned congested_write_threshold_us;
- spinlock_t sort_time_lock;
struct time_stats sort_time;
struct time_stats btree_gc_time;
struct time_stats btree_split_time;
- spinlock_t btree_read_time_lock;
struct time_stats btree_read_time;
struct time_stats try_harder_time;
atomic_long_t cache_read_races;
atomic_long_t writeback_keys_done;
atomic_long_t writeback_keys_failed;
+
+ enum {
+ ON_ERROR_UNREGISTER,
+ ON_ERROR_PANIC,
+ } on_error;
unsigned error_limit;
unsigned error_decay;
+
unsigned short journal_delay_ms;
unsigned verify:1;
unsigned key_merging_disabled:1;
+ unsigned expensive_debug_checks:1;
unsigned gc_always_rewrite:1;
unsigned shrinker_disabled:1;
unsigned copy_gc_enabled:1;
@@ -865,21 +695,6 @@ struct cache_set {
struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
};
-static inline bool key_merging_disabled(struct cache_set *c)
-{
-#ifdef CONFIG_BCACHE_DEBUG
- return c->key_merging_disabled;
-#else
- return 0;
-#endif
-}
-
-static inline bool SB_IS_BDEV(const struct cache_sb *sb)
-{
- return sb->version == BCACHE_SB_VERSION_BDEV
- || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
-}
-
struct bbio {
unsigned submit_time_us;
union {
@@ -933,59 +748,6 @@ static inline unsigned local_clock_us(void)
#define prio_buckets(c) \
DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
-#define JSET_MAGIC 0x245235c1a3625032ULL
-#define PSET_MAGIC 0x6750e15f87337f91ULL
-#define BSET_MAGIC 0x90135c78b99e07f5ULL
-
-#define jset_magic(c) ((c)->sb.set_magic ^ JSET_MAGIC)
-#define pset_magic(c) ((c)->sb.set_magic ^ PSET_MAGIC)
-#define bset_magic(c) ((c)->sb.set_magic ^ BSET_MAGIC)
-
-/* Bkey fields: all units are in sectors */
-
-#define KEY_FIELD(name, field, offset, size) \
- BITMASK(name, struct bkey, field, offset, size)
-
-#define PTR_FIELD(name, offset, size) \
- static inline uint64_t name(const struct bkey *k, unsigned i) \
- { return (k->ptr[i] >> offset) & ~(((uint64_t) ~0) << size); } \
- \
- static inline void SET_##name(struct bkey *k, unsigned i, uint64_t v)\
- { \
- k->ptr[i] &= ~(~((uint64_t) ~0 << size) << offset); \
- k->ptr[i] |= v << offset; \
- }
-
-KEY_FIELD(KEY_PTRS, high, 60, 3)
-KEY_FIELD(HEADER_SIZE, high, 58, 2)
-KEY_FIELD(KEY_CSUM, high, 56, 2)
-KEY_FIELD(KEY_PINNED, high, 55, 1)
-KEY_FIELD(KEY_DIRTY, high, 36, 1)
-
-KEY_FIELD(KEY_SIZE, high, 20, 16)
-KEY_FIELD(KEY_INODE, high, 0, 20)
-
-/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
-
-static inline uint64_t KEY_OFFSET(const struct bkey *k)
-{
- return k->low;
-}
-
-static inline void SET_KEY_OFFSET(struct bkey *k, uint64_t v)
-{
- k->low = v;
-}
-
-PTR_FIELD(PTR_DEV, 51, 12)
-PTR_FIELD(PTR_OFFSET, 8, 43)
-PTR_FIELD(PTR_GEN, 0, 8)
-
-#define PTR_CHECK_DEV ((1 << 12) - 1)
-
-#define PTR(gen, offset, dev) \
- ((((uint64_t) dev) << 51) | ((uint64_t) offset) << 8 | gen)
-
static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
{
return s >> c->bucket_bits;
@@ -1024,27 +786,11 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c,
/* Btree key macros */
-/*
- * The high bit being set is a relic from when we used it to do binary
- * searches - it told you where a key started. It's not used anymore,
- * and can probably be safely dropped.
- */
-#define KEY(dev, sector, len) \
-((struct bkey) { \
- .high = (1ULL << 63) | ((uint64_t) (len) << 20) | (dev), \
- .low = (sector) \
-})
-
static inline void bkey_init(struct bkey *k)
{
- *k = KEY(0, 0, 0);
+ *k = ZERO_KEY;
}
-#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
-#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
-#define MAX_KEY KEY(~(~0 << 20), ((uint64_t) ~0) >> 1, 0)
-#define ZERO_KEY KEY(0, 0, 0)
-
/*
* This is used for various on disk data structures - cache_sb, prio_set, bset,
* jset: The checksum is _always_ the first 8 bytes of these structs
@@ -1094,14 +840,6 @@ do { \
for (b = (ca)->buckets + (ca)->sb.first_bucket; \
b < (ca)->buckets + (ca)->sb.nbuckets; b++)
-static inline void __bkey_put(struct cache_set *c, struct bkey *k)
-{
- unsigned i;
-
- for (i = 0; i < KEY_PTRS(k); i++)
- atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
-}
-
static inline void cached_dev_put(struct cached_dev *dc)
{
if (atomic_dec_and_test(&dc->count))
@@ -1173,13 +911,15 @@ uint8_t bch_inc_gen(struct cache *, struct bucket *);
void bch_rescale_priorities(struct cache_set *, int);
bool bch_bucket_add_unused(struct cache *, struct bucket *);
-long bch_bucket_alloc(struct cache *, unsigned, struct closure *);
+long bch_bucket_alloc(struct cache *, unsigned, bool);
void bch_bucket_free(struct cache_set *, struct bkey *);
int __bch_bucket_alloc_set(struct cache_set *, unsigned,
- struct bkey *, int, struct closure *);
+ struct bkey *, int, bool);
int bch_bucket_alloc_set(struct cache_set *, unsigned,
- struct bkey *, int, struct closure *);
+ struct bkey *, int, bool);
+bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
+ unsigned, unsigned, bool);
__printf(2, 3)
bool bch_cache_set_error(struct cache_set *, const char *, ...);
@@ -1187,7 +927,7 @@ bool bch_cache_set_error(struct cache_set *, const char *, ...);
void bch_prio_write(struct cache *);
void bch_write_bdev_super(struct cached_dev *, struct closure *);
-extern struct workqueue_struct *bcache_wq, *bch_gc_wq;
+extern struct workqueue_struct *bcache_wq;
extern const char * const bch_cache_modes[];
extern struct mutex bch_register_lock;
extern struct list_head bch_cache_sets;
@@ -1220,15 +960,14 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *);
void bch_btree_cache_free(struct cache_set *);
int bch_btree_cache_alloc(struct cache_set *);
void bch_moving_init_cache_set(struct cache_set *);
+int bch_open_buckets_alloc(struct cache_set *);
+void bch_open_buckets_free(struct cache_set *);
int bch_cache_allocator_start(struct cache *ca);
-void bch_cache_allocator_exit(struct cache *ca);
int bch_cache_allocator_init(struct cache *ca);
void bch_debug_exit(void);
int bch_debug_init(struct kobject *);
-void bch_writeback_exit(void);
-int bch_writeback_init(void);
void bch_request_exit(void);
int bch_request_init(void);
void bch_btree_exit(void);
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 22d1ae72c282..7d388b8bb50e 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -14,22 +14,12 @@
/* Keylists */
-void bch_keylist_copy(struct keylist *dest, struct keylist *src)
-{
- *dest = *src;
-
- if (src->list == src->d) {
- size_t n = (uint64_t *) src->top - src->d;
- dest->top = (struct bkey *) &dest->d[n];
- dest->list = dest->d;
- }
-}
-
int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
{
- unsigned oldsize = (uint64_t *) l->top - l->list;
- unsigned newsize = oldsize + 2 + nptrs;
- uint64_t *new;
+ size_t oldsize = bch_keylist_nkeys(l);
+ size_t newsize = oldsize + 2 + nptrs;
+ uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
+ uint64_t *new_keys;
/* The journalling code doesn't handle the case where the keys to insert
* is bigger than an empty write: If we just return -ENOMEM here,
@@ -45,24 +35,23 @@ int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
roundup_pow_of_two(oldsize) == newsize)
return 0;
- new = krealloc(l->list == l->d ? NULL : l->list,
- sizeof(uint64_t) * newsize, GFP_NOIO);
+ new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO);
- if (!new)
+ if (!new_keys)
return -ENOMEM;
- if (l->list == l->d)
- memcpy(new, l->list, sizeof(uint64_t) * KEYLIST_INLINE);
+ if (!old_keys)
+ memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize);
- l->list = new;
- l->top = (struct bkey *) (&l->list[oldsize]);
+ l->keys_p = new_keys;
+ l->top_p = new_keys + oldsize;
return 0;
}
struct bkey *bch_keylist_pop(struct keylist *l)
{
- struct bkey *k = l->bottom;
+ struct bkey *k = l->keys;
if (k == l->top)
return NULL;
@@ -73,21 +62,20 @@ struct bkey *bch_keylist_pop(struct keylist *l)
return l->top = k;
}
-/* Pointer validation */
-
-bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
+void bch_keylist_pop_front(struct keylist *l)
{
- unsigned i;
- char buf[80];
+ l->top_p -= bkey_u64s(l->keys);
- if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)))
- goto bad;
+ memmove(l->keys,
+ bkey_next(l->keys),
+ bch_keylist_bytes(l));
+}
- if (!level && KEY_SIZE(k) > KEY_OFFSET(k))
- goto bad;
+/* Pointer validation */
- if (!KEY_SIZE(k))
- return true;
+static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+ unsigned i;
for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i)) {
@@ -98,13 +86,83 @@ bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
if (KEY_SIZE(k) + r > c->sb.bucket_size ||
bucket < ca->sb.first_bucket ||
bucket >= ca->sb.nbuckets)
- goto bad;
+ return true;
}
return false;
+}
+
+bool bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+ char buf[80];
+
+ if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
+ goto bad;
+
+ if (__ptr_invalid(c, k))
+ goto bad;
+
+ return false;
+bad:
+ bch_bkey_to_text(buf, sizeof(buf), k);
+ cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
+ return true;
+}
+
+bool bch_extent_ptr_invalid(struct cache_set *c, const struct bkey *k)
+{
+ char buf[80];
+
+ if (!KEY_SIZE(k))
+ return true;
+
+ if (KEY_SIZE(k) > KEY_OFFSET(k))
+ goto bad;
+
+ if (__ptr_invalid(c, k))
+ goto bad;
+
+ return false;
bad:
bch_bkey_to_text(buf, sizeof(buf), k);
- cache_bug(c, "spotted bad key %s: %s", buf, bch_ptr_status(c, k));
+ cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
+ return true;
+}
+
+static bool ptr_bad_expensive_checks(struct btree *b, const struct bkey *k,
+ unsigned ptr)
+{
+ struct bucket *g = PTR_BUCKET(b->c, k, ptr);
+ char buf[80];
+
+ if (mutex_trylock(&b->c->bucket_lock)) {
+ if (b->level) {
+ if (KEY_DIRTY(k) ||
+ g->prio != BTREE_PRIO ||
+ (b->c->gc_mark_valid &&
+ GC_MARK(g) != GC_MARK_METADATA))
+ goto err;
+
+ } else {
+ if (g->prio == BTREE_PRIO)
+ goto err;
+
+ if (KEY_DIRTY(k) &&
+ b->c->gc_mark_valid &&
+ GC_MARK(g) != GC_MARK_DIRTY)
+ goto err;
+ }
+ mutex_unlock(&b->c->bucket_lock);
+ }
+
+ return false;
+err:
+ mutex_unlock(&b->c->bucket_lock);
+ bch_bkey_to_text(buf, sizeof(buf), k);
+ btree_bug(b,
+"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+ buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
+ g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
return true;
}
@@ -118,64 +176,29 @@ bool bch_ptr_bad(struct btree *b, const struct bkey *k)
bch_ptr_invalid(b, k))
return true;
- if (KEY_PTRS(k) && PTR_DEV(k, 0) == PTR_CHECK_DEV)
- return true;
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ if (!ptr_available(b->c, k, i))
+ return true;
- for (i = 0; i < KEY_PTRS(k); i++)
- if (ptr_available(b->c, k, i)) {
- g = PTR_BUCKET(b->c, k, i);
- stale = ptr_stale(b->c, k, i);
+ g = PTR_BUCKET(b->c, k, i);
+ stale = ptr_stale(b->c, k, i);
- btree_bug_on(stale > 96, b,
- "key too stale: %i, need_gc %u",
- stale, b->c->need_gc);
+ btree_bug_on(stale > 96, b,
+ "key too stale: %i, need_gc %u",
+ stale, b->c->need_gc);
- btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
- b, "stale dirty pointer");
+ btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
+ b, "stale dirty pointer");
- if (stale)
- return true;
+ if (stale)
+ return true;
-#ifdef CONFIG_BCACHE_EDEBUG
- if (!mutex_trylock(&b->c->bucket_lock))
- continue;
-
- if (b->level) {
- if (KEY_DIRTY(k) ||
- g->prio != BTREE_PRIO ||
- (b->c->gc_mark_valid &&
- GC_MARK(g) != GC_MARK_METADATA))
- goto bug;
-
- } else {
- if (g->prio == BTREE_PRIO)
- goto bug;
-
- if (KEY_DIRTY(k) &&
- b->c->gc_mark_valid &&
- GC_MARK(g) != GC_MARK_DIRTY)
- goto bug;
- }
- mutex_unlock(&b->c->bucket_lock);
-#endif
- }
+ if (expensive_debug_checks(b->c) &&
+ ptr_bad_expensive_checks(b, k, i))
+ return true;
+ }
return false;
-#ifdef CONFIG_BCACHE_EDEBUG
-bug:
- mutex_unlock(&b->c->bucket_lock);
-
- {
- char buf[80];
-
- bch_bkey_to_text(buf, sizeof(buf), k);
- btree_bug(b,
-"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
- buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
- g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
- }
- return true;
-#endif
}
/* Key/pointer manipulation */
@@ -458,16 +481,8 @@ static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
{
-#ifdef CONFIG_X86_64
- asm("shrd %[shift],%[high],%[low]"
- : [low] "+Rm" (low)
- : [high] "R" (high),
- [shift] "ci" (shift)
- : "cc");
-#else
low >>= shift;
low |= (high << 1) << (63U - shift);
-#endif
return low;
}
@@ -686,7 +701,7 @@ void bch_bset_init_next(struct btree *b)
} else
get_random_bytes(&i->seq, sizeof(uint64_t));
- i->magic = bset_magic(b->c);
+ i->magic = bset_magic(&b->c->sb);
i->version = 0;
i->keys = 0;
@@ -824,16 +839,16 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
} else
i = bset_search_write_set(b, t, search);
-#ifdef CONFIG_BCACHE_EDEBUG
- BUG_ON(bset_written(b, t) &&
- i.l != t->data->start &&
- bkey_cmp(tree_to_prev_bkey(t,
- inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
- search) > 0);
+ if (expensive_debug_checks(b->c)) {
+ BUG_ON(bset_written(b, t) &&
+ i.l != t->data->start &&
+ bkey_cmp(tree_to_prev_bkey(t,
+ inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
+ search) > 0);
- BUG_ON(i.r != end(t->data) &&
- bkey_cmp(i.r, search) <= 0);
-#endif
+ BUG_ON(i.r != end(t->data) &&
+ bkey_cmp(i.r, search) <= 0);
+ }
while (likely(i.l != i.r) &&
bkey_cmp(i.l, search) <= 0)
@@ -844,6 +859,13 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
/* Btree iterator */
+/*
+ * Returns true if l > r - unless l == r, in which case returns true if l is
+ * older than r.
+ *
+ * Necessary for btree_sort_fixup() - if there are multiple keys that compare
+ * equal in different sets, we have to process them newest to oldest.
+ */
static inline bool btree_iter_cmp(struct btree_iter_set l,
struct btree_iter_set r)
{
@@ -867,12 +889,16 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
}
struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
- struct bkey *search, struct bset_tree *start)
+ struct bkey *search, struct bset_tree *start)
{
struct bkey *ret = NULL;
iter->size = ARRAY_SIZE(iter->data);
iter->used = 0;
+#ifdef CONFIG_BCACHE_DEBUG
+ iter->b = b;
+#endif
+
for (; start <= &b->sets[b->nsets]; start++) {
ret = bch_bset_search(b, start, search);
bch_btree_iter_push(iter, ret, end(start->data));
@@ -887,6 +913,8 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
struct bkey *ret = NULL;
if (!btree_iter_end(iter)) {
+ bch_btree_iter_next_check(iter);
+
ret = iter->data->k;
iter->data->k = bkey_next(iter->data->k);
@@ -916,14 +944,6 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
return ret;
}
-struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
-{
- struct btree_iter iter;
-
- bch_btree_iter_init(b, &iter, search);
- return bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
-}
-
/* Mergesort */
static void sort_key_next(struct btree_iter *iter,
@@ -998,7 +1018,6 @@ static void btree_mergesort(struct btree *b, struct bset *out,
out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
pr_debug("sorted %i keys", out->keys);
- bch_check_key_order(b, out);
}
static void __btree_sort(struct btree *b, struct btree_iter *iter,
@@ -1029,7 +1048,7 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
* memcpy()
*/
- out->magic = bset_magic(b->c);
+ out->magic = bset_magic(&b->c->sb);
out->seq = b->sets[0].data->seq;
out->version = b->sets[0].data->version;
swap(out, b->sets[0].data);
@@ -1050,24 +1069,21 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
if (b->written)
bset_build_written_tree(b);
- if (!start) {
- spin_lock(&b->c->sort_time_lock);
+ if (!start)
bch_time_stats_update(&b->c->sort_time, start_time);
- spin_unlock(&b->c->sort_time_lock);
- }
}
void bch_btree_sort_partial(struct btree *b, unsigned start)
{
- size_t oldsize = 0, order = b->page_order, keys = 0;
+ size_t order = b->page_order, keys = 0;
struct btree_iter iter;
+ int oldsize = bch_count_data(b);
+
__bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
BUG_ON(b->sets[b->nsets].data == write_block(b) &&
(b->sets[b->nsets].size || b->nsets));
- if (b->written)
- oldsize = bch_count_data(b);
if (start) {
unsigned i;
@@ -1083,7 +1099,7 @@ void bch_btree_sort_partial(struct btree *b, unsigned start)
__btree_sort(b, &iter, start, order, false);
- EBUG_ON(b->written && bch_count_data(b) != oldsize);
+ EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
}
void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)
@@ -1101,9 +1117,7 @@ void bch_btree_sort_into(struct btree *b, struct btree *new)
btree_mergesort(b, new->sets->data, &iter, false, true);
- spin_lock(&b->c->sort_time_lock);
bch_time_stats_update(&b->c->sort_time, start_time);
- spin_unlock(&b->c->sort_time_lock);
bkey_copy_key(&new->key, &b->key);
new->sets->size = 0;
@@ -1148,16 +1162,16 @@ out:
/* Sysfs stuff */
struct bset_stats {
+ struct btree_op op;
size_t nodes;
size_t sets_written, sets_unwritten;
size_t bytes_written, bytes_unwritten;
size_t floats, failed;
};
-static int bch_btree_bset_stats(struct btree *b, struct btree_op *op,
- struct bset_stats *stats)
+static int btree_bset_stats(struct btree_op *op, struct btree *b)
{
- struct bkey *k;
+ struct bset_stats *stats = container_of(op, struct bset_stats, op);
unsigned i;
stats->nodes++;
@@ -1182,30 +1196,19 @@ static int bch_btree_bset_stats(struct btree *b, struct btree_op *op,
}
}
- if (b->level) {
- struct btree_iter iter;
-
- for_each_key_filter(b, k, &iter, bch_ptr_bad) {
- int ret = btree(bset_stats, k, b, op, stats);
- if (ret)
- return ret;
- }
- }
-
- return 0;
+ return MAP_CONTINUE;
}
int bch_bset_print_stats(struct cache_set *c, char *buf)
{
- struct btree_op op;
struct bset_stats t;
int ret;
- bch_btree_op_init_stack(&op);
memset(&t, 0, sizeof(struct bset_stats));
+ bch_btree_op_init(&t.op, -1);
- ret = btree_root(bset_stats, c, &op, &t);
- if (ret)
+ ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
+ if (ret < 0)
return ret;
return snprintf(buf, PAGE_SIZE,
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index ae115a253d73..1d3c24f9fa0e 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -148,6 +148,9 @@
struct btree_iter {
size_t size, used;
+#ifdef CONFIG_BCACHE_DEBUG
+ struct btree *b;
+#endif
struct btree_iter_set {
struct bkey *k, *end;
} data[MAX_BSETS];
@@ -193,54 +196,26 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l,
: (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
}
-static inline size_t bkey_u64s(const struct bkey *k)
-{
- BUG_ON(KEY_CSUM(k) > 1);
- return 2 + KEY_PTRS(k) + (KEY_CSUM(k) ? 1 : 0);
-}
-
-static inline size_t bkey_bytes(const struct bkey *k)
-{
- return bkey_u64s(k) * sizeof(uint64_t);
-}
-
-static inline void bkey_copy(struct bkey *dest, const struct bkey *src)
-{
- memcpy(dest, src, bkey_bytes(src));
-}
-
-static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
-{
- if (!src)
- src = &KEY(0, 0, 0);
-
- SET_KEY_INODE(dest, KEY_INODE(src));
- SET_KEY_OFFSET(dest, KEY_OFFSET(src));
-}
-
-static inline struct bkey *bkey_next(const struct bkey *k)
-{
- uint64_t *d = (void *) k;
- return (struct bkey *) (d + bkey_u64s(k));
-}
-
/* Keylists */
struct keylist {
- struct bkey *top;
union {
- uint64_t *list;
- struct bkey *bottom;
+ struct bkey *keys;
+ uint64_t *keys_p;
+ };
+ union {
+ struct bkey *top;
+ uint64_t *top_p;
};
/* Enough room for btree_split's keys without realloc */
#define KEYLIST_INLINE 16
- uint64_t d[KEYLIST_INLINE];
+ uint64_t inline_keys[KEYLIST_INLINE];
};
static inline void bch_keylist_init(struct keylist *l)
{
- l->top = (void *) (l->list = l->d);
+ l->top_p = l->keys_p = l->inline_keys;
}
static inline void bch_keylist_push(struct keylist *l)
@@ -256,17 +231,32 @@ static inline void bch_keylist_add(struct keylist *l, struct bkey *k)
static inline bool bch_keylist_empty(struct keylist *l)
{
- return l->top == (void *) l->list;
+ return l->top == l->keys;
+}
+
+static inline void bch_keylist_reset(struct keylist *l)
+{
+ l->top = l->keys;
}
static inline void bch_keylist_free(struct keylist *l)
{
- if (l->list != l->d)
- kfree(l->list);
+ if (l->keys_p != l->inline_keys)
+ kfree(l->keys_p);
+}
+
+static inline size_t bch_keylist_nkeys(struct keylist *l)
+{
+ return l->top_p - l->keys_p;
+}
+
+static inline size_t bch_keylist_bytes(struct keylist *l)
+{
+ return bch_keylist_nkeys(l) * sizeof(uint64_t);
}
-void bch_keylist_copy(struct keylist *, struct keylist *);
struct bkey *bch_keylist_pop(struct keylist *);
+void bch_keylist_pop_front(struct keylist *);
int bch_keylist_realloc(struct keylist *, int, struct cache_set *);
void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
@@ -287,7 +277,9 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
}
const char *bch_ptr_status(struct cache_set *, const struct bkey *);
-bool __bch_ptr_invalid(struct cache_set *, int level, const struct bkey *);
+bool bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
+bool bch_extent_ptr_invalid(struct cache_set *, const struct bkey *);
+
bool bch_ptr_bad(struct btree *, const struct bkey *);
static inline uint8_t gen_after(uint8_t a, uint8_t b)
@@ -311,7 +303,6 @@ static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *);
-struct bkey *bch_next_recurse_key(struct btree *, struct bkey *);
struct bkey *bch_btree_iter_next(struct btree_iter *);
struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
struct btree *, ptr_filter_fn);
@@ -361,12 +352,30 @@ void bch_bset_fix_lookup_table(struct btree *, struct bkey *);
struct bkey *__bch_bset_search(struct btree *, struct bset_tree *,
const struct bkey *);
+/*
+ * Returns the first key that is strictly greater than search
+ */
static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t,
const struct bkey *search)
{
return search ? __bch_bset_search(b, t, search) : t->data->start;
}
+#define PRECEDING_KEY(_k) \
+({ \
+ struct bkey *_ret = NULL; \
+ \
+ if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
+ _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
+ \
+ if (!_ret->low) \
+ _ret->high--; \
+ _ret->low--; \
+ } \
+ \
+ _ret; \
+})
+
bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *);
void bch_btree_sort_lazy(struct btree *);
void bch_btree_sort_into(struct btree *, struct btree *);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index f42fc7ed9cd6..31bb53fcc67a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -23,12 +23,13 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
-#include "request.h"
#include "writeback.h"
#include <linux/slab.h>
#include <linux/bitops.h>
+#include <linux/freezer.h>
#include <linux/hash.h>
+#include <linux/kthread.h>
#include <linux/prefetch.h>
#include <linux/random.h>
#include <linux/rcupdate.h>
@@ -88,15 +89,13 @@
* Test module load/unload
*/
-static const char * const op_types[] = {
- "insert", "replace"
+enum {
+ BTREE_INSERT_STATUS_INSERT,
+ BTREE_INSERT_STATUS_BACK_MERGE,
+ BTREE_INSERT_STATUS_OVERWROTE,
+ BTREE_INSERT_STATUS_FRONT_MERGE,
};
-static const char *op_type(struct btree_op *op)
-{
- return op_types[op->type];
-}
-
#define MAX_NEED_GC 64
#define MAX_SAVE_PRIO 72
@@ -105,23 +104,89 @@ static const char *op_type(struct btree_op *op)
#define PTR_HASH(c, k) \
(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
-struct workqueue_struct *bch_gc_wq;
static struct workqueue_struct *btree_io_wq;
-void bch_btree_op_init_stack(struct btree_op *op)
+static inline bool should_split(struct btree *b)
{
- memset(op, 0, sizeof(struct btree_op));
- closure_init_stack(&op->cl);
- op->lock = -1;
- bch_keylist_init(&op->keys);
+ struct bset *i = write_block(b);
+ return b->written >= btree_blocks(b) ||
+ (b->written + __set_blocks(i, i->keys + 15, b->c)
+ > btree_blocks(b));
}
+#define insert_lock(s, b) ((b)->level <= (s)->lock)
+
+/*
+ * These macros are for recursing down the btree - they handle the details of
+ * locking and looking up nodes in the cache for you. They're best treated as
+ * mere syntax when reading code that uses them.
+ *
+ * op->lock determines whether we take a read or a write lock at a given depth.
+ * If you've got a read lock and find that you need a write lock (i.e. you're
+ * going to have to split), set op->lock and return -EINTR; btree_root() will
+ * call you again and you'll have the correct lock.
+ */
+
+/**
+ * btree - recurse down the btree on a specified key
+ * @fn: function to call, which will be passed the child node
+ * @key: key to recurse on
+ * @b: parent btree node
+ * @op: pointer to struct btree_op
+ */
+#define btree(fn, key, b, op, ...) \
+({ \
+ int _r, l = (b)->level - 1; \
+ bool _w = l <= (op)->lock; \
+ struct btree *_child = bch_btree_node_get((b)->c, key, l, _w); \
+ if (!IS_ERR(_child)) { \
+ _child->parent = (b); \
+ _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
+ rw_unlock(_w, _child); \
+ } else \
+ _r = PTR_ERR(_child); \
+ _r; \
+})
+
+/**
+ * btree_root - call a function on the root of the btree
+ * @fn: function to call, which will be passed the child node
+ * @c: cache set
+ * @op: pointer to struct btree_op
+ */
+#define btree_root(fn, c, op, ...) \
+({ \
+ int _r = -EINTR; \
+ do { \
+ struct btree *_b = (c)->root; \
+ bool _w = insert_lock(op, _b); \
+ rw_lock(_w, _b, _b->level); \
+ if (_b == (c)->root && \
+ _w == insert_lock(op, _b)) { \
+ _b->parent = NULL; \
+ _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
+ } \
+ rw_unlock(_w, _b); \
+ bch_cannibalize_unlock(c); \
+ if (_r == -ENOSPC) { \
+ wait_event((c)->try_wait, \
+ !(c)->try_harder); \
+ _r = -EINTR; \
+ } \
+ } while (_r == -EINTR); \
+ \
+ _r; \
+})
+
/* Btree key manipulation */
-static void bkey_put(struct cache_set *c, struct bkey *k, int level)
+void bkey_put(struct cache_set *c, struct bkey *k)
{
- if ((level && KEY_OFFSET(k)) || !level)
- __bkey_put(c, k);
+ unsigned i;
+
+ for (i = 0; i < KEY_PTRS(k); i++)
+ if (ptr_available(c, k, i))
+ atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
}
/* Btree IO */
@@ -145,6 +210,10 @@ static void bch_btree_node_read_done(struct btree *b)
iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
iter->used = 0;
+#ifdef CONFIG_BCACHE_DEBUG
+ iter->b = b;
+#endif
+
if (!i->seq)
goto err;
@@ -160,7 +229,7 @@ static void bch_btree_node_read_done(struct btree *b)
goto err;
err = "bad magic";
- if (i->magic != bset_magic(b->c))
+ if (i->magic != bset_magic(&b->c->sb))
goto err;
err = "bad checksum";
@@ -248,10 +317,7 @@ void bch_btree_node_read(struct btree *b)
goto err;
bch_btree_node_read_done(b);
-
- spin_lock(&b->c->btree_read_time_lock);
bch_time_stats_update(&b->c->btree_read_time, start_time);
- spin_unlock(&b->c->btree_read_time_lock);
return;
err:
@@ -327,7 +393,7 @@ static void do_btree_node_write(struct btree *b)
b->bio = bch_bbio_alloc(b->c);
b->bio->bi_end_io = btree_node_write_endio;
- b->bio->bi_private = &b->io.cl;
+ b->bio->bi_private = cl;
b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
bch_bio_map(b->bio, i);
@@ -383,7 +449,7 @@ void bch_btree_node_write(struct btree *b, struct closure *parent)
BUG_ON(b->written >= btree_blocks(b));
BUG_ON(b->written && !i->keys);
BUG_ON(b->sets->data->seq != i->seq);
- bch_check_key_order(b, i);
+ bch_check_keys(b, "writing");
cancel_delayed_work(&b->work);
@@ -405,6 +471,15 @@ void bch_btree_node_write(struct btree *b, struct closure *parent)
bch_bset_init_next(b);
}
+static void bch_btree_node_write_sync(struct btree *b)
+{
+ struct closure cl;
+
+ closure_init_stack(&cl);
+ bch_btree_node_write(b, &cl);
+ closure_sync(&cl);
+}
+
static void btree_node_write_work(struct work_struct *w)
{
struct btree *b = container_of(to_delayed_work(w), struct btree, work);
@@ -416,7 +491,7 @@ static void btree_node_write_work(struct work_struct *w)
rw_unlock(true, b);
}
-static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op)
+static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
{
struct bset *i = b->sets[b->nsets].data;
struct btree_write *w = btree_current_write(b);
@@ -429,15 +504,15 @@ static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op)
set_btree_node_dirty(b);
- if (op && op->journal) {
+ if (journal_ref) {
if (w->journal &&
- journal_pin_cmp(b->c, w, op)) {
+ journal_pin_cmp(b->c, w->journal, journal_ref)) {
atomic_dec_bug(w->journal);
w->journal = NULL;
}
if (!w->journal) {
- w->journal = op->journal;
+ w->journal = journal_ref;
atomic_inc(w->journal);
}
}
@@ -566,33 +641,32 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
return b;
}
-static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order)
+static int mca_reap(struct btree *b, unsigned min_order, bool flush)
{
+ struct closure cl;
+
+ closure_init_stack(&cl);
lockdep_assert_held(&b->c->bucket_lock);
if (!down_write_trylock(&b->lock))
return -ENOMEM;
- if (b->page_order < min_order) {
+ BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
+
+ if (b->page_order < min_order ||
+ (!flush &&
+ (btree_node_dirty(b) ||
+ atomic_read(&b->io.cl.remaining) != -1))) {
rw_unlock(true, b);
return -ENOMEM;
}
- BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
-
- if (cl && btree_node_dirty(b))
- bch_btree_node_write(b, NULL);
-
- if (cl)
- closure_wait_event_async(&b->io.wait, cl,
- atomic_read(&b->io.cl.remaining) == -1);
+ if (btree_node_dirty(b))
+ bch_btree_node_write_sync(b);
- if (btree_node_dirty(b) ||
- !closure_is_unlocked(&b->io.cl) ||
- work_pending(&b->work.work)) {
- rw_unlock(true, b);
- return -EAGAIN;
- }
+ /* wait for any in flight btree write */
+ closure_wait_event(&b->io.wait, &cl,
+ atomic_read(&b->io.cl.remaining) == -1);
return 0;
}
@@ -633,7 +707,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
break;
if (++i > 3 &&
- !mca_reap(b, NULL, 0)) {
+ !mca_reap(b, 0, false)) {
mca_data_free(b);
rw_unlock(true, b);
freed++;
@@ -652,7 +726,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
list_rotate_left(&c->btree_cache);
if (!b->accessed &&
- !mca_reap(b, NULL, 0)) {
+ !mca_reap(b, 0, false)) {
mca_bucket_free(b);
mca_data_free(b);
rw_unlock(true, b);
@@ -723,12 +797,9 @@ int bch_btree_cache_alloc(struct cache_set *c)
{
unsigned i;
- /* XXX: doesn't check for errors */
-
- closure_init_unlocked(&c->gc);
-
for (i = 0; i < mca_reserve(c); i++)
- mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
+ if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
+ return -ENOMEM;
list_splice_init(&c->btree_cache,
&c->btree_cache_freeable);
@@ -775,52 +846,27 @@ out:
return b;
}
-static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k,
- int level, struct closure *cl)
+static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
{
- int ret = -ENOMEM;
- struct btree *i;
+ struct btree *b;
trace_bcache_btree_cache_cannibalize(c);
- if (!cl)
- return ERR_PTR(-ENOMEM);
-
- /*
- * Trying to free up some memory - i.e. reuse some btree nodes - may
- * require initiating IO to flush the dirty part of the node. If we're
- * running under generic_make_request(), that IO will never finish and
- * we would deadlock. Returning -EAGAIN causes the cache lookup code to
- * punt to workqueue and retry.
- */
- if (current->bio_list)
- return ERR_PTR(-EAGAIN);
-
- if (c->try_harder && c->try_harder != cl) {
- closure_wait_event_async(&c->try_wait, cl, !c->try_harder);
- return ERR_PTR(-EAGAIN);
- }
+ if (!c->try_harder) {
+ c->try_harder = current;
+ c->try_harder_start = local_clock();
+ } else if (c->try_harder != current)
+ return ERR_PTR(-ENOSPC);
- c->try_harder = cl;
- c->try_harder_start = local_clock();
-retry:
- list_for_each_entry_reverse(i, &c->btree_cache, list) {
- int r = mca_reap(i, cl, btree_order(k));
- if (!r)
- return i;
- if (r != -ENOMEM)
- ret = r;
- }
+ list_for_each_entry_reverse(b, &c->btree_cache, list)
+ if (!mca_reap(b, btree_order(k), false))
+ return b;
- if (ret == -EAGAIN &&
- closure_blocking(cl)) {
- mutex_unlock(&c->bucket_lock);
- closure_sync(cl);
- mutex_lock(&c->bucket_lock);
- goto retry;
- }
+ list_for_each_entry_reverse(b, &c->btree_cache, list)
+ if (!mca_reap(b, btree_order(k), true))
+ return b;
- return ERR_PTR(ret);
+ return ERR_PTR(-ENOMEM);
}
/*
@@ -829,20 +875,21 @@ retry:
* cannibalize_bucket() will take. This means every time we unlock the root of
* the btree, we need to release this lock if we have it held.
*/
-void bch_cannibalize_unlock(struct cache_set *c, struct closure *cl)
+static void bch_cannibalize_unlock(struct cache_set *c)
{
- if (c->try_harder == cl) {
+ if (c->try_harder == current) {
bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
c->try_harder = NULL;
- __closure_wake_up(&c->try_wait);
+ wake_up(&c->try_wait);
}
}
-static struct btree *mca_alloc(struct cache_set *c, struct bkey *k,
- int level, struct closure *cl)
+static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
{
struct btree *b;
+ BUG_ON(current->bio_list);
+
lockdep_assert_held(&c->bucket_lock);
if (mca_find(c, k))
@@ -852,14 +899,14 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k,
* the list. Check if there's any freed nodes there:
*/
list_for_each_entry(b, &c->btree_cache_freeable, list)
- if (!mca_reap(b, NULL, btree_order(k)))
+ if (!mca_reap(b, btree_order(k), false))
goto out;
/* We never free struct btree itself, just the memory that holds the on
* disk node. Check the freed list before allocating a new one:
*/
list_for_each_entry(b, &c->btree_cache_freed, list)
- if (!mca_reap(b, NULL, 0)) {
+ if (!mca_reap(b, 0, false)) {
mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
if (!b->sets[0].data)
goto err;
@@ -884,6 +931,7 @@ out:
lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
b->level = level;
+ b->parent = (void *) ~0UL;
mca_reinit(b);
@@ -892,7 +940,7 @@ err:
if (b)
rw_unlock(true, b);
- b = mca_cannibalize(c, k, level, cl);
+ b = mca_cannibalize(c, k);
if (!IS_ERR(b))
goto out;
@@ -903,17 +951,15 @@ err:
* bch_btree_node_get - find a btree node in the cache and lock it, reading it
* in from disk if necessary.
*
- * If IO is necessary, it uses the closure embedded in struct btree_op to wait;
- * if that closure is in non blocking mode, will return -EAGAIN.
+ * If IO is necessary and running under generic_make_request, returns -EAGAIN.
*
* The btree node will have either a read or a write lock held, depending on
* level and op->lock.
*/
struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k,
- int level, struct btree_op *op)
+ int level, bool write)
{
int i = 0;
- bool write = level <= op->lock;
struct btree *b;
BUG_ON(level < 0);
@@ -925,7 +971,7 @@ retry:
return ERR_PTR(-EAGAIN);
mutex_lock(&c->bucket_lock);
- b = mca_alloc(c, k, level, &op->cl);
+ b = mca_alloc(c, k, level);
mutex_unlock(&c->bucket_lock);
if (!b)
@@ -971,7 +1017,7 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
struct btree *b;
mutex_lock(&c->bucket_lock);
- b = mca_alloc(c, k, level, NULL);
+ b = mca_alloc(c, k, level);
mutex_unlock(&c->bucket_lock);
if (!IS_ERR_OR_NULL(b)) {
@@ -982,17 +1028,12 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
/* Btree alloc */
-static void btree_node_free(struct btree *b, struct btree_op *op)
+static void btree_node_free(struct btree *b)
{
unsigned i;
trace_bcache_btree_node_free(b);
- /*
- * The BUG_ON() in btree_node_get() implies that we must have a write
- * lock on parent to free or even invalidate a node
- */
- BUG_ON(op->lock <= b->level);
BUG_ON(b == b->c->root);
if (btree_node_dirty(b))
@@ -1015,27 +1056,26 @@ static void btree_node_free(struct btree *b, struct btree_op *op)
mutex_unlock(&b->c->bucket_lock);
}
-struct btree *bch_btree_node_alloc(struct cache_set *c, int level,
- struct closure *cl)
+struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait)
{
BKEY_PADDED(key) k;
struct btree *b = ERR_PTR(-EAGAIN);
mutex_lock(&c->bucket_lock);
retry:
- if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, cl))
+ if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, wait))
goto err;
+ bkey_put(c, &k.key);
SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
- b = mca_alloc(c, &k.key, level, cl);
+ b = mca_alloc(c, &k.key, level);
if (IS_ERR(b))
goto err_free;
if (!b) {
cache_bug(c,
"Tried to allocate bucket that was in btree cache");
- __bkey_put(c, &k.key);
goto retry;
}
@@ -1048,7 +1088,6 @@ retry:
return b;
err_free:
bch_bucket_free(c, &k.key);
- __bkey_put(c, &k.key);
err:
mutex_unlock(&c->bucket_lock);
@@ -1056,16 +1095,31 @@ err:
return b;
}
-static struct btree *btree_node_alloc_replacement(struct btree *b,
- struct closure *cl)
+static struct btree *btree_node_alloc_replacement(struct btree *b, bool wait)
{
- struct btree *n = bch_btree_node_alloc(b->c, b->level, cl);
+ struct btree *n = bch_btree_node_alloc(b->c, b->level, wait);
if (!IS_ERR_OR_NULL(n))
bch_btree_sort_into(b, n);
return n;
}
+static void make_btree_freeing_key(struct btree *b, struct bkey *k)
+{
+ unsigned i;
+
+ bkey_copy(k, &b->key);
+ bkey_copy_key(k, &ZERO_KEY);
+
+ for (i = 0; i < KEY_PTRS(k); i++) {
+ uint8_t g = PTR_BUCKET(b->c, k, i)->gen + 1;
+
+ SET_PTR_GEN(k, i, g);
+ }
+
+ atomic_inc(&b->c->prio_blocked);
+}
+
/* Garbage collection */
uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
@@ -1119,12 +1173,10 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
-static int btree_gc_mark_node(struct btree *b, unsigned *keys,
- struct gc_stat *gc)
+static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
{
uint8_t stale = 0;
- unsigned last_dev = -1;
- struct bcache_device *d = NULL;
+ unsigned keys = 0, good_keys = 0;
struct bkey *k;
struct btree_iter iter;
struct bset_tree *t;
@@ -1132,27 +1184,17 @@ static int btree_gc_mark_node(struct btree *b, unsigned *keys,
gc->nodes++;
for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
- if (last_dev != KEY_INODE(k)) {
- last_dev = KEY_INODE(k);
-
- d = KEY_INODE(k) < b->c->nr_uuids
- ? b->c->devices[last_dev]
- : NULL;
- }
-
stale = max(stale, btree_mark_key(b, k));
+ keys++;
if (bch_ptr_bad(b, k))
continue;
- *keys += bkey_u64s(k);
-
gc->key_bytes += bkey_u64s(k);
gc->nkeys++;
+ good_keys++;
gc->data += KEY_SIZE(k);
- if (KEY_DIRTY(k))
- gc->dirty += KEY_SIZE(k);
}
for (t = b->sets; t <= &b->sets[b->nsets]; t++)
@@ -1161,78 +1203,74 @@ static int btree_gc_mark_node(struct btree *b, unsigned *keys,
bkey_cmp(&b->key, &t->end) < 0,
b, "found short btree key in gc");
- return stale;
-}
-
-static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k,
- struct btree_op *op)
-{
- /*
- * We block priorities from being written for the duration of garbage
- * collection, so we can't sleep in btree_alloc() ->
- * bch_bucket_alloc_set(), or we'd risk deadlock - so we don't pass it
- * our closure.
- */
- struct btree *n = btree_node_alloc_replacement(b, NULL);
+ if (b->c->gc_always_rewrite)
+ return true;
- if (!IS_ERR_OR_NULL(n)) {
- swap(b, n);
- __bkey_put(b->c, &b->key);
+ if (stale > 10)
+ return true;
- memcpy(k->ptr, b->key.ptr,
- sizeof(uint64_t) * KEY_PTRS(&b->key));
+ if ((keys - good_keys) * 2 > keys)
+ return true;
- btree_node_free(n, op);
- up_write(&n->lock);
- }
-
- return b;
+ return false;
}
-/*
- * Leaving this at 2 until we've got incremental garbage collection done; it
- * could be higher (and has been tested with 4) except that garbage collection
- * could take much longer, adversely affecting latency.
- */
-#define GC_MERGE_NODES 2U
+#define GC_MERGE_NODES 4U
struct gc_merge_info {
struct btree *b;
- struct bkey *k;
unsigned keys;
};
-static void btree_gc_coalesce(struct btree *b, struct btree_op *op,
- struct gc_stat *gc, struct gc_merge_info *r)
+static int bch_btree_insert_node(struct btree *, struct btree_op *,
+ struct keylist *, atomic_t *, struct bkey *);
+
+static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
+ struct keylist *keylist, struct gc_stat *gc,
+ struct gc_merge_info *r)
{
- unsigned nodes = 0, keys = 0, blocks;
- int i;
+ unsigned i, nodes = 0, keys = 0, blocks;
+ struct btree *new_nodes[GC_MERGE_NODES];
+ struct closure cl;
+ struct bkey *k;
- while (nodes < GC_MERGE_NODES && r[nodes].b)
+ memset(new_nodes, 0, sizeof(new_nodes));
+ closure_init_stack(&cl);
+
+ while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
keys += r[nodes++].keys;
blocks = btree_default_blocks(b->c) * 2 / 3;
if (nodes < 2 ||
__set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1))
- return;
-
- for (i = nodes - 1; i >= 0; --i) {
- if (r[i].b->written)
- r[i].b = btree_gc_alloc(r[i].b, r[i].k, op);
+ return 0;
- if (r[i].b->written)
- return;
+ for (i = 0; i < nodes; i++) {
+ new_nodes[i] = btree_node_alloc_replacement(r[i].b, false);
+ if (IS_ERR_OR_NULL(new_nodes[i]))
+ goto out_nocoalesce;
}
for (i = nodes - 1; i > 0; --i) {
- struct bset *n1 = r[i].b->sets->data;
- struct bset *n2 = r[i - 1].b->sets->data;
+ struct bset *n1 = new_nodes[i]->sets->data;
+ struct bset *n2 = new_nodes[i - 1]->sets->data;
struct bkey *k, *last = NULL;
keys = 0;
- if (i == 1) {
+ if (i > 1) {
+ for (k = n2->start;
+ k < end(n2);
+ k = bkey_next(k)) {
+ if (__set_blocks(n1, n1->keys + keys +
+ bkey_u64s(k), b->c) > blocks)
+ break;
+
+ last = k;
+ keys += bkey_u64s(k);
+ }
+ } else {
/*
* Last node we're not getting rid of - we're getting
* rid of the node at r[0]. Have to try and fit all of
@@ -1241,37 +1279,27 @@ static void btree_gc_coalesce(struct btree *b, struct btree_op *op,
* length keys (shouldn't be possible in practice,
* though)
*/
- if (__set_blocks(n1, n1->keys + r->keys,
- b->c) > btree_blocks(r[i].b))
- return;
+ if (__set_blocks(n1, n1->keys + n2->keys,
+ b->c) > btree_blocks(new_nodes[i]))
+ goto out_nocoalesce;
keys = n2->keys;
+ /* Take the key of the node we're getting rid of */
last = &r->b->key;
- } else
- for (k = n2->start;
- k < end(n2);
- k = bkey_next(k)) {
- if (__set_blocks(n1, n1->keys + keys +
- bkey_u64s(k), b->c) > blocks)
- break;
-
- last = k;
- keys += bkey_u64s(k);
- }
+ }
BUG_ON(__set_blocks(n1, n1->keys + keys,
- b->c) > btree_blocks(r[i].b));
+ b->c) > btree_blocks(new_nodes[i]));
- if (last) {
- bkey_copy_key(&r[i].b->key, last);
- bkey_copy_key(r[i].k, last);
- }
+ if (last)
+ bkey_copy_key(&new_nodes[i]->key, last);
memcpy(end(n1),
n2->start,
(void *) node(n2, keys) - (void *) n2->start);
n1->keys += keys;
+ r[i].keys = n1->keys;
memmove(n2->start,
node(n2, keys),
@@ -1279,95 +1307,176 @@ static void btree_gc_coalesce(struct btree *b, struct btree_op *op,
n2->keys -= keys;
- r[i].keys = n1->keys;
- r[i - 1].keys = n2->keys;
+ if (bch_keylist_realloc(keylist,
+ KEY_PTRS(&new_nodes[i]->key), b->c))
+ goto out_nocoalesce;
+
+ bch_btree_node_write(new_nodes[i], &cl);
+ bch_keylist_add(keylist, &new_nodes[i]->key);
}
- btree_node_free(r->b, op);
- up_write(&r->b->lock);
+ for (i = 0; i < nodes; i++) {
+ if (bch_keylist_realloc(keylist, KEY_PTRS(&r[i].b->key), b->c))
+ goto out_nocoalesce;
- trace_bcache_btree_gc_coalesce(nodes);
+ make_btree_freeing_key(r[i].b, keylist->top);
+ bch_keylist_push(keylist);
+ }
+
+ /* We emptied out this node */
+ BUG_ON(new_nodes[0]->sets->data->keys);
+ btree_node_free(new_nodes[0]);
+ rw_unlock(true, new_nodes[0]);
+
+ closure_sync(&cl);
+
+ for (i = 0; i < nodes; i++) {
+ btree_node_free(r[i].b);
+ rw_unlock(true, r[i].b);
+
+ r[i].b = new_nodes[i];
+ }
+
+ bch_btree_insert_node(b, op, keylist, NULL, NULL);
+ BUG_ON(!bch_keylist_empty(keylist));
+
+ memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
+ r[nodes - 1].b = ERR_PTR(-EINTR);
+ trace_bcache_btree_gc_coalesce(nodes);
gc->nodes--;
- nodes--;
- memmove(&r[0], &r[1], sizeof(struct gc_merge_info) * nodes);
- memset(&r[nodes], 0, sizeof(struct gc_merge_info));
+ /* Invalidated our iterator */
+ return -EINTR;
+
+out_nocoalesce:
+ closure_sync(&cl);
+
+ while ((k = bch_keylist_pop(keylist)))
+ if (!bkey_cmp(k, &ZERO_KEY))
+ atomic_dec(&b->c->prio_blocked);
+
+ for (i = 0; i < nodes; i++)
+ if (!IS_ERR_OR_NULL(new_nodes[i])) {
+ btree_node_free(new_nodes[i]);
+ rw_unlock(true, new_nodes[i]);
+ }
+ return 0;
}
-static int btree_gc_recurse(struct btree *b, struct btree_op *op,
- struct closure *writes, struct gc_stat *gc)
+static unsigned btree_gc_count_keys(struct btree *b)
{
- void write(struct btree *r)
- {
- if (!r->written)
- bch_btree_node_write(r, &op->cl);
- else if (btree_node_dirty(r))
- bch_btree_node_write(r, writes);
+ struct bkey *k;
+ struct btree_iter iter;
+ unsigned ret = 0;
- up_write(&r->lock);
- }
+ for_each_key_filter(b, k, &iter, bch_ptr_bad)
+ ret += bkey_u64s(k);
- int ret = 0, stale;
+ return ret;
+}
+
+static int btree_gc_recurse(struct btree *b, struct btree_op *op,
+ struct closure *writes, struct gc_stat *gc)
+{
unsigned i;
+ int ret = 0;
+ bool should_rewrite;
+ struct btree *n;
+ struct bkey *k;
+ struct keylist keys;
+ struct btree_iter iter;
struct gc_merge_info r[GC_MERGE_NODES];
+ struct gc_merge_info *last = r + GC_MERGE_NODES - 1;
- memset(r, 0, sizeof(r));
+ bch_keylist_init(&keys);
+ bch_btree_iter_init(b, &iter, &b->c->gc_done);
- while ((r->k = bch_next_recurse_key(b, &b->c->gc_done))) {
- r->b = bch_btree_node_get(b->c, r->k, b->level - 1, op);
+ for (i = 0; i < GC_MERGE_NODES; i++)
+ r[i].b = ERR_PTR(-EINTR);
- if (IS_ERR(r->b)) {
- ret = PTR_ERR(r->b);
- break;
+ while (1) {
+ k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+ if (k) {
+ r->b = bch_btree_node_get(b->c, k, b->level - 1, true);
+ if (IS_ERR(r->b)) {
+ ret = PTR_ERR(r->b);
+ break;
+ }
+
+ r->keys = btree_gc_count_keys(r->b);
+
+ ret = btree_gc_coalesce(b, op, &keys, gc, r);
+ if (ret)
+ break;
}
- r->keys = 0;
- stale = btree_gc_mark_node(r->b, &r->keys, gc);
+ if (!last->b)
+ break;
- if (!b->written &&
- (r->b->level || stale > 10 ||
- b->c->gc_always_rewrite))
- r->b = btree_gc_alloc(r->b, r->k, op);
+ if (!IS_ERR(last->b)) {
+ should_rewrite = btree_gc_mark_node(last->b, gc);
+ if (should_rewrite) {
+ n = btree_node_alloc_replacement(last->b,
+ false);
- if (r->b->level)
- ret = btree_gc_recurse(r->b, op, writes, gc);
+ if (!IS_ERR_OR_NULL(n)) {
+ bch_btree_node_write_sync(n);
+ bch_keylist_add(&keys, &n->key);
- if (ret) {
- write(r->b);
- break;
- }
+ make_btree_freeing_key(last->b,
+ keys.top);
+ bch_keylist_push(&keys);
- bkey_copy_key(&b->c->gc_done, r->k);
+ btree_node_free(last->b);
- if (!b->written)
- btree_gc_coalesce(b, op, gc, r);
+ bch_btree_insert_node(b, op, &keys,
+ NULL, NULL);
+ BUG_ON(!bch_keylist_empty(&keys));
- if (r[GC_MERGE_NODES - 1].b)
- write(r[GC_MERGE_NODES - 1].b);
+ rw_unlock(true, last->b);
+ last->b = n;
- memmove(&r[1], &r[0],
- sizeof(struct gc_merge_info) * (GC_MERGE_NODES - 1));
+ /* Invalidated our iterator */
+ ret = -EINTR;
+ break;
+ }
+ }
+
+ if (last->b->level) {
+ ret = btree_gc_recurse(last->b, op, writes, gc);
+ if (ret)
+ break;
+ }
+
+ bkey_copy_key(&b->c->gc_done, &last->b->key);
+
+ /*
+ * Must flush leaf nodes before gc ends, since replace
+ * operations aren't journalled
+ */
+ if (btree_node_dirty(last->b))
+ bch_btree_node_write(last->b, writes);
+ rw_unlock(true, last->b);
+ }
+
+ memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
+ r->b = NULL;
- /* When we've got incremental GC working, we'll want to do
- * if (should_resched())
- * return -EAGAIN;
- */
- cond_resched();
-#if 0
if (need_resched()) {
ret = -EAGAIN;
break;
}
-#endif
}
- for (i = 1; i < GC_MERGE_NODES && r[i].b; i++)
- write(r[i].b);
+ for (i = 0; i < GC_MERGE_NODES; i++)
+ if (!IS_ERR_OR_NULL(r[i].b)) {
+ if (btree_node_dirty(r[i].b))
+ bch_btree_node_write(r[i].b, writes);
+ rw_unlock(true, r[i].b);
+ }
- /* Might have freed some children, must remove their keys */
- if (!b->written)
- bch_btree_sort(b);
+ bch_keylist_free(&keys);
return ret;
}
@@ -1376,29 +1485,31 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
struct closure *writes, struct gc_stat *gc)
{
struct btree *n = NULL;
- unsigned keys = 0;
- int ret = 0, stale = btree_gc_mark_node(b, &keys, gc);
-
- if (b->level || stale > 10)
- n = btree_node_alloc_replacement(b, NULL);
+ int ret = 0;
+ bool should_rewrite;
- if (!IS_ERR_OR_NULL(n))
- swap(b, n);
+ should_rewrite = btree_gc_mark_node(b, gc);
+ if (should_rewrite) {
+ n = btree_node_alloc_replacement(b, false);
- if (b->level)
- ret = btree_gc_recurse(b, op, writes, gc);
+ if (!IS_ERR_OR_NULL(n)) {
+ bch_btree_node_write_sync(n);
+ bch_btree_set_root(n);
+ btree_node_free(b);
+ rw_unlock(true, n);
- if (!b->written || btree_node_dirty(b)) {
- bch_btree_node_write(b, n ? &op->cl : NULL);
+ return -EINTR;
+ }
}
- if (!IS_ERR_OR_NULL(n)) {
- closure_sync(&op->cl);
- bch_btree_set_root(b);
- btree_node_free(n, op);
- rw_unlock(true, b);
+ if (b->level) {
+ ret = btree_gc_recurse(b, op, writes, gc);
+ if (ret)
+ return ret;
}
+ bkey_copy_key(&b->c->gc_done, &b->key);
+
return ret;
}
@@ -1450,6 +1561,28 @@ size_t bch_btree_gc_finish(struct cache_set *c)
SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
GC_MARK_METADATA);
+ /* don't reclaim buckets to which writeback keys point */
+ rcu_read_lock();
+ for (i = 0; i < c->nr_uuids; i++) {
+ struct bcache_device *d = c->devices[i];
+ struct cached_dev *dc;
+ struct keybuf_key *w, *n;
+ unsigned j;
+
+ if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
+ continue;
+ dc = container_of(d, struct cached_dev, disk);
+
+ spin_lock(&dc->writeback_keys.lock);
+ rbtree_postorder_for_each_entry_safe(w, n,
+ &dc->writeback_keys.keys, node)
+ for (j = 0; j < KEY_PTRS(&w->key); j++)
+ SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
+ GC_MARK_DIRTY);
+ spin_unlock(&dc->writeback_keys.lock);
+ }
+ rcu_read_unlock();
+
for_each_cache(ca, c, i) {
uint64_t *i;
@@ -1479,9 +1612,8 @@ size_t bch_btree_gc_finish(struct cache_set *c)
return available;
}
-static void bch_btree_gc(struct closure *cl)
+static void bch_btree_gc(struct cache_set *c)
{
- struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
int ret;
unsigned long available;
struct gc_stat stats;
@@ -1493,47 +1625,73 @@ static void bch_btree_gc(struct closure *cl)
memset(&stats, 0, sizeof(struct gc_stat));
closure_init_stack(&writes);
- bch_btree_op_init_stack(&op);
- op.lock = SHRT_MAX;
+ bch_btree_op_init(&op, SHRT_MAX);
btree_gc_start(c);
- atomic_inc(&c->prio_blocked);
-
- ret = btree_root(gc_root, c, &op, &writes, &stats);
- closure_sync(&op.cl);
- closure_sync(&writes);
-
- if (ret) {
- pr_warn("gc failed!");
- continue_at(cl, bch_btree_gc, bch_gc_wq);
- }
+ do {
+ ret = btree_root(gc_root, c, &op, &writes, &stats);
+ closure_sync(&writes);
- /* Possibly wait for new UUIDs or whatever to hit disk */
- bch_journal_meta(c, &op.cl);
- closure_sync(&op.cl);
+ if (ret && ret != -EAGAIN)
+ pr_warn("gc failed!");
+ } while (ret);
available = bch_btree_gc_finish(c);
-
- atomic_dec(&c->prio_blocked);
wake_up_allocators(c);
bch_time_stats_update(&c->btree_gc_time, start_time);
stats.key_bytes *= sizeof(uint64_t);
- stats.dirty <<= 9;
stats.data <<= 9;
stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
trace_bcache_gc_end(c);
- continue_at(cl, bch_moving_gc, bch_gc_wq);
+ bch_moving_gc(c);
}
-void bch_queue_gc(struct cache_set *c)
+static int bch_gc_thread(void *arg)
{
- closure_trylock_call(&c->gc.cl, bch_btree_gc, bch_gc_wq, &c->cl);
+ struct cache_set *c = arg;
+ struct cache *ca;
+ unsigned i;
+
+ while (1) {
+again:
+ bch_btree_gc(c);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop())
+ break;
+
+ mutex_lock(&c->bucket_lock);
+
+ for_each_cache(ca, c, i)
+ if (ca->invalidate_needs_gc) {
+ mutex_unlock(&c->bucket_lock);
+ set_current_state(TASK_RUNNING);
+ goto again;
+ }
+
+ mutex_unlock(&c->bucket_lock);
+
+ try_to_freeze();
+ schedule();
+ }
+
+ return 0;
+}
+
+int bch_gc_thread_start(struct cache_set *c)
+{
+ c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
+ if (IS_ERR(c->gc_thread))
+ return PTR_ERR(c->gc_thread);
+
+ set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
+ return 0;
}
/* Initial partial gc */
@@ -1541,9 +1699,9 @@ void bch_queue_gc(struct cache_set *c)
static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
unsigned long **seen)
{
- int ret;
+ int ret = 0;
unsigned i;
- struct bkey *k;
+ struct bkey *k, *p = NULL;
struct bucket *g;
struct btree_iter iter;
@@ -1570,31 +1728,32 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
}
if (b->level) {
- k = bch_next_recurse_key(b, &ZERO_KEY);
+ bch_btree_iter_init(b, &iter, NULL);
- while (k) {
- struct bkey *p = bch_next_recurse_key(b, k);
- if (p)
- btree_node_prefetch(b->c, p, b->level - 1);
+ do {
+ k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+ if (k)
+ btree_node_prefetch(b->c, k, b->level - 1);
- ret = btree(check_recurse, k, b, op, seen);
- if (ret)
- return ret;
+ if (p)
+ ret = btree(check_recurse, p, b, op, seen);
- k = p;
- }
+ p = k;
+ } while (p && !ret);
}
return 0;
}
-int bch_btree_check(struct cache_set *c, struct btree_op *op)
+int bch_btree_check(struct cache_set *c)
{
int ret = -ENOMEM;
unsigned i;
unsigned long *seen[MAX_CACHES_PER_SET];
+ struct btree_op op;
memset(seen, 0, sizeof(seen));
+ bch_btree_op_init(&op, SHRT_MAX);
for (i = 0; c->cache[i]; i++) {
size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
@@ -1606,7 +1765,7 @@ int bch_btree_check(struct cache_set *c, struct btree_op *op)
memset(seen[i], 0xFF, n);
}
- ret = btree_root(check_recurse, c, op, seen);
+ ret = btree_root(check_recurse, c, &op, seen);
err:
for (i = 0; i < MAX_CACHES_PER_SET; i++)
kfree(seen[i]);
@@ -1628,10 +1787,9 @@ static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
bch_bset_fix_lookup_table(b, where);
}
-static bool fix_overlapping_extents(struct btree *b,
- struct bkey *insert,
+static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
struct btree_iter *iter,
- struct btree_op *op)
+ struct bkey *replace_key)
{
void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
{
@@ -1659,39 +1817,39 @@ static bool fix_overlapping_extents(struct btree *b,
* We might overlap with 0 size extents; we can't skip these
* because if they're in the set we're inserting to we have to
* adjust them so they don't overlap with the key we're
- * inserting. But we don't want to check them for BTREE_REPLACE
+ * inserting. But we don't want to check them for replace
* operations.
*/
- if (op->type == BTREE_REPLACE &&
- KEY_SIZE(k)) {
+ if (replace_key && KEY_SIZE(k)) {
/*
* k might have been split since we inserted/found the
* key we're replacing
*/
unsigned i;
uint64_t offset = KEY_START(k) -
- KEY_START(&op->replace);
+ KEY_START(replace_key);
/* But it must be a subset of the replace key */
- if (KEY_START(k) < KEY_START(&op->replace) ||
- KEY_OFFSET(k) > KEY_OFFSET(&op->replace))
+ if (KEY_START(k) < KEY_START(replace_key) ||
+ KEY_OFFSET(k) > KEY_OFFSET(replace_key))
goto check_failed;
/* We didn't find a key that we were supposed to */
if (KEY_START(k) > KEY_START(insert) + sectors_found)
goto check_failed;
- if (KEY_PTRS(&op->replace) != KEY_PTRS(k))
+ if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
+ KEY_DIRTY(k) != KEY_DIRTY(replace_key))
goto check_failed;
/* skip past gen */
offset <<= 8;
- BUG_ON(!KEY_PTRS(&op->replace));
+ BUG_ON(!KEY_PTRS(replace_key));
- for (i = 0; i < KEY_PTRS(&op->replace); i++)
- if (k->ptr[i] != op->replace.ptr[i] + offset)
+ for (i = 0; i < KEY_PTRS(replace_key); i++)
+ if (k->ptr[i] != replace_key->ptr[i] + offset)
goto check_failed;
sectors_found = KEY_OFFSET(k) - KEY_START(insert);
@@ -1742,6 +1900,9 @@ static bool fix_overlapping_extents(struct btree *b,
if (bkey_cmp(insert, k) < 0) {
bch_cut_front(insert, k);
} else {
+ if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
+ old_offset = KEY_START(insert);
+
if (bkey_written(b, k) &&
bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
/*
@@ -1759,9 +1920,8 @@ static bool fix_overlapping_extents(struct btree *b,
}
check_failed:
- if (op->type == BTREE_REPLACE) {
+ if (replace_key) {
if (!sectors_found) {
- op->insert_collision = true;
return true;
} else if (sectors_found < KEY_SIZE(insert)) {
SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
@@ -1774,7 +1934,7 @@ check_failed:
}
static bool btree_insert_key(struct btree *b, struct btree_op *op,
- struct bkey *k)
+ struct bkey *k, struct bkey *replace_key)
{
struct bset *i = b->sets[b->nsets].data;
struct bkey *m, *prev;
@@ -1786,22 +1946,23 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
if (!b->level) {
struct btree_iter iter;
- struct bkey search = KEY(KEY_INODE(k), KEY_START(k), 0);
/*
* bset_search() returns the first key that is strictly greater
* than the search key - but for back merging, we want to find
- * the first key that is greater than or equal to KEY_START(k) -
- * unless KEY_START(k) is 0.
+ * the previous key.
*/
- if (KEY_OFFSET(&search))
- SET_KEY_OFFSET(&search, KEY_OFFSET(&search) - 1);
-
prev = NULL;
- m = bch_btree_iter_init(b, &iter, &search);
+ m = bch_btree_iter_init(b, &iter, PRECEDING_KEY(&START_KEY(k)));
- if (fix_overlapping_extents(b, k, &iter, op))
+ if (fix_overlapping_extents(b, k, &iter, replace_key)) {
+ op->insert_collision = true;
return false;
+ }
+
+ if (KEY_DIRTY(k))
+ bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
+ KEY_START(k), KEY_SIZE(k));
while (m != end(i) &&
bkey_cmp(k, &START_KEY(m)) > 0)
@@ -1825,84 +1986,80 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
if (m != end(i) &&
bch_bkey_try_merge(b, k, m))
goto copy;
- } else
+ } else {
+ BUG_ON(replace_key);
m = bch_bset_search(b, &b->sets[b->nsets], k);
+ }
insert: shift_keys(b, m, k);
copy: bkey_copy(m, k);
merged:
- if (KEY_DIRTY(k))
- bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
- KEY_START(k), KEY_SIZE(k));
-
- bch_check_keys(b, "%u for %s", status, op_type(op));
+ bch_check_keys(b, "%u for %s", status,
+ replace_key ? "replace" : "insert");
if (b->level && !KEY_OFFSET(k))
btree_current_write(b)->prio_blocked++;
- trace_bcache_btree_insert_key(b, k, op->type, status);
+ trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
return true;
}
-static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op)
+static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
+ struct keylist *insert_keys,
+ struct bkey *replace_key)
{
bool ret = false;
- struct bkey *k;
- unsigned oldsize = bch_count_data(b);
+ int oldsize = bch_count_data(b);
- while ((k = bch_keylist_pop(&op->keys))) {
- bkey_put(b->c, k, b->level);
- ret |= btree_insert_key(b, op, k);
- }
+ while (!bch_keylist_empty(insert_keys)) {
+ struct bset *i = write_block(b);
+ struct bkey *k = insert_keys->keys;
- BUG_ON(bch_count_data(b) < oldsize);
- return ret;
-}
-
-bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
- struct bio *bio)
-{
- bool ret = false;
- uint64_t btree_ptr = b->key.ptr[0];
- unsigned long seq = b->seq;
- BKEY_PADDED(k) tmp;
-
- rw_unlock(false, b);
- rw_lock(true, b, b->level);
+ if (b->written + __set_blocks(i, i->keys + bkey_u64s(k), b->c)
+ > btree_blocks(b))
+ break;
- if (b->key.ptr[0] != btree_ptr ||
- b->seq != seq + 1 ||
- should_split(b))
- goto out;
+ if (bkey_cmp(k, &b->key) <= 0) {
+ if (!b->level)
+ bkey_put(b->c, k);
- op->replace = KEY(op->inode, bio_end_sector(bio), bio_sectors(bio));
+ ret |= btree_insert_key(b, op, k, replace_key);
+ bch_keylist_pop_front(insert_keys);
+ } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
+ BKEY_PADDED(key) temp;
+ bkey_copy(&temp.key, insert_keys->keys);
- SET_KEY_PTRS(&op->replace, 1);
- get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t));
+ bch_cut_back(&b->key, &temp.key);
+ bch_cut_front(&b->key, insert_keys->keys);
- SET_PTR_DEV(&op->replace, 0, PTR_CHECK_DEV);
+ ret |= btree_insert_key(b, op, &temp.key, replace_key);
+ break;
+ } else {
+ break;
+ }
+ }
- bkey_copy(&tmp.k, &op->replace);
+ BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
- BUG_ON(op->type != BTREE_INSERT);
- BUG_ON(!btree_insert_key(b, op, &tmp.k));
- ret = true;
-out:
- downgrade_write(&b->lock);
+ BUG_ON(bch_count_data(b) < oldsize);
return ret;
}
-static int btree_split(struct btree *b, struct btree_op *op)
+static int btree_split(struct btree *b, struct btree_op *op,
+ struct keylist *insert_keys,
+ struct bkey *replace_key)
{
- bool split, root = b == b->c->root;
+ bool split;
struct btree *n1, *n2 = NULL, *n3 = NULL;
uint64_t start_time = local_clock();
+ struct closure cl;
+ struct keylist parent_keys;
- if (b->level)
- set_closure_blocking(&op->cl);
+ closure_init_stack(&cl);
+ bch_keylist_init(&parent_keys);
- n1 = btree_node_alloc_replacement(b, &op->cl);
+ n1 = btree_node_alloc_replacement(b, true);
if (IS_ERR(n1))
goto err;
@@ -1913,19 +2070,20 @@ static int btree_split(struct btree *b, struct btree_op *op)
trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
- n2 = bch_btree_node_alloc(b->c, b->level, &op->cl);
+ n2 = bch_btree_node_alloc(b->c, b->level, true);
if (IS_ERR(n2))
goto err_free1;
- if (root) {
- n3 = bch_btree_node_alloc(b->c, b->level + 1, &op->cl);
+ if (!b->parent) {
+ n3 = bch_btree_node_alloc(b->c, b->level + 1, true);
if (IS_ERR(n3))
goto err_free2;
}
- bch_btree_insert_keys(n1, op);
+ bch_btree_insert_keys(n1, op, insert_keys, replace_key);
- /* Has to be a linear search because we don't have an auxiliary
+ /*
+ * Has to be a linear search because we don't have an auxiliary
* search tree yet
*/
@@ -1944,60 +2102,57 @@ static int btree_split(struct btree *b, struct btree_op *op)
bkey_copy_key(&n2->key, &b->key);
- bch_keylist_add(&op->keys, &n2->key);
- bch_btree_node_write(n2, &op->cl);
+ bch_keylist_add(&parent_keys, &n2->key);
+ bch_btree_node_write(n2, &cl);
rw_unlock(true, n2);
} else {
trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
- bch_btree_insert_keys(n1, op);
+ bch_btree_insert_keys(n1, op, insert_keys, replace_key);
}
- bch_keylist_add(&op->keys, &n1->key);
- bch_btree_node_write(n1, &op->cl);
+ bch_keylist_add(&parent_keys, &n1->key);
+ bch_btree_node_write(n1, &cl);
if (n3) {
+ /* Depth increases, make a new root */
bkey_copy_key(&n3->key, &MAX_KEY);
- bch_btree_insert_keys(n3, op);
- bch_btree_node_write(n3, &op->cl);
+ bch_btree_insert_keys(n3, op, &parent_keys, NULL);
+ bch_btree_node_write(n3, &cl);
- closure_sync(&op->cl);
+ closure_sync(&cl);
bch_btree_set_root(n3);
rw_unlock(true, n3);
- } else if (root) {
- op->keys.top = op->keys.bottom;
- closure_sync(&op->cl);
- bch_btree_set_root(n1);
- } else {
- unsigned i;
- bkey_copy(op->keys.top, &b->key);
- bkey_copy_key(op->keys.top, &ZERO_KEY);
+ btree_node_free(b);
+ } else if (!b->parent) {
+ /* Root filled up but didn't need to be split */
+ closure_sync(&cl);
+ bch_btree_set_root(n1);
- for (i = 0; i < KEY_PTRS(&b->key); i++) {
- uint8_t g = PTR_BUCKET(b->c, &b->key, i)->gen + 1;
+ btree_node_free(b);
+ } else {
+ /* Split a non root node */
+ closure_sync(&cl);
+ make_btree_freeing_key(b, parent_keys.top);
+ bch_keylist_push(&parent_keys);
- SET_PTR_GEN(op->keys.top, i, g);
- }
+ btree_node_free(b);
- bch_keylist_push(&op->keys);
- closure_sync(&op->cl);
- atomic_inc(&b->c->prio_blocked);
+ bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
+ BUG_ON(!bch_keylist_empty(&parent_keys));
}
rw_unlock(true, n1);
- btree_node_free(b, op);
bch_time_stats_update(&b->c->btree_split_time, start_time);
return 0;
err_free2:
- __bkey_put(n2->c, &n2->key);
- btree_node_free(n2, op);
+ btree_node_free(n2);
rw_unlock(true, n2);
err_free1:
- __bkey_put(n1->c, &n1->key);
- btree_node_free(n1, op);
+ btree_node_free(n1);
rw_unlock(true, n1);
err:
if (n3 == ERR_PTR(-EAGAIN) ||
@@ -2009,116 +2164,126 @@ err:
return -ENOMEM;
}
-static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
- struct keylist *stack_keys)
+static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
+ struct keylist *insert_keys,
+ atomic_t *journal_ref,
+ struct bkey *replace_key)
{
- if (b->level) {
- int ret;
- struct bkey *insert = op->keys.bottom;
- struct bkey *k = bch_next_recurse_key(b, &START_KEY(insert));
+ BUG_ON(b->level && replace_key);
- if (!k) {
- btree_bug(b, "no key to recurse on at level %i/%i",
- b->level, b->c->root->level);
-
- op->keys.top = op->keys.bottom;
- return -EIO;
+ if (should_split(b)) {
+ if (current->bio_list) {
+ op->lock = b->c->root->level + 1;
+ return -EAGAIN;
+ } else if (op->lock <= b->c->root->level) {
+ op->lock = b->c->root->level + 1;
+ return -EINTR;
+ } else {
+ /* Invalidated all iterators */
+ return btree_split(b, op, insert_keys, replace_key) ?:
+ -EINTR;
}
+ } else {
+ BUG_ON(write_block(b) != b->sets[b->nsets].data);
- if (bkey_cmp(insert, k) > 0) {
- unsigned i;
-
- if (op->type == BTREE_REPLACE) {
- __bkey_put(b->c, insert);
- op->keys.top = op->keys.bottom;
- op->insert_collision = true;
- return 0;
- }
+ if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
+ if (!b->level)
+ bch_btree_leaf_dirty(b, journal_ref);
+ else
+ bch_btree_node_write_sync(b);
+ }
- for (i = 0; i < KEY_PTRS(insert); i++)
- atomic_inc(&PTR_BUCKET(b->c, insert, i)->pin);
+ return 0;
+ }
+}
- bkey_copy(stack_keys->top, insert);
+int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
+ struct bkey *check_key)
+{
+ int ret = -EINTR;
+ uint64_t btree_ptr = b->key.ptr[0];
+ unsigned long seq = b->seq;
+ struct keylist insert;
+ bool upgrade = op->lock == -1;
- bch_cut_back(k, insert);
- bch_cut_front(k, stack_keys->top);
+ bch_keylist_init(&insert);
- bch_keylist_push(stack_keys);
- }
+ if (upgrade) {
+ rw_unlock(false, b);
+ rw_lock(true, b, b->level);
- ret = btree(insert_recurse, k, b, op, stack_keys);
- if (ret)
- return ret;
+ if (b->key.ptr[0] != btree_ptr ||
+ b->seq != seq + 1)
+ goto out;
}
- if (!bch_keylist_empty(&op->keys)) {
- if (should_split(b)) {
- if (op->lock <= b->c->root->level) {
- BUG_ON(b->level);
- op->lock = b->c->root->level + 1;
- return -EINTR;
- }
- return btree_split(b, op);
- }
+ SET_KEY_PTRS(check_key, 1);
+ get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
- BUG_ON(write_block(b) != b->sets[b->nsets].data);
+ SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
- if (bch_btree_insert_keys(b, op)) {
- if (!b->level)
- bch_btree_leaf_dirty(b, op);
- else
- bch_btree_node_write(b, &op->cl);
- }
- }
+ bch_keylist_add(&insert, check_key);
- return 0;
+ ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
+
+ BUG_ON(!ret && !bch_keylist_empty(&insert));
+out:
+ if (upgrade)
+ downgrade_write(&b->lock);
+ return ret;
}
-int bch_btree_insert(struct btree_op *op, struct cache_set *c)
+struct btree_insert_op {
+ struct btree_op op;
+ struct keylist *keys;
+ atomic_t *journal_ref;
+ struct bkey *replace_key;
+};
+
+static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
{
- int ret = 0;
- struct keylist stack_keys;
+ struct btree_insert_op *op = container_of(b_op,
+ struct btree_insert_op, op);
- /*
- * Don't want to block with the btree locked unless we have to,
- * otherwise we get deadlocks with try_harder and between split/gc
- */
- clear_closure_blocking(&op->cl);
-
- BUG_ON(bch_keylist_empty(&op->keys));
- bch_keylist_copy(&stack_keys, &op->keys);
- bch_keylist_init(&op->keys);
-
- while (!bch_keylist_empty(&stack_keys) ||
- !bch_keylist_empty(&op->keys)) {
- if (bch_keylist_empty(&op->keys)) {
- bch_keylist_add(&op->keys,
- bch_keylist_pop(&stack_keys));
- op->lock = 0;
- }
+ int ret = bch_btree_insert_node(b, &op->op, op->keys,
+ op->journal_ref, op->replace_key);
+ if (ret && !bch_keylist_empty(op->keys))
+ return ret;
+ else
+ return MAP_DONE;
+}
- ret = btree_root(insert_recurse, c, op, &stack_keys);
+int bch_btree_insert(struct cache_set *c, struct keylist *keys,
+ atomic_t *journal_ref, struct bkey *replace_key)
+{
+ struct btree_insert_op op;
+ int ret = 0;
- if (ret == -EAGAIN) {
- ret = 0;
- closure_sync(&op->cl);
- } else if (ret) {
- struct bkey *k;
+ BUG_ON(current->bio_list);
+ BUG_ON(bch_keylist_empty(keys));
+
+ bch_btree_op_init(&op.op, 0);
+ op.keys = keys;
+ op.journal_ref = journal_ref;
+ op.replace_key = replace_key;
+
+ while (!ret && !bch_keylist_empty(keys)) {
+ op.op.lock = 0;
+ ret = bch_btree_map_leaf_nodes(&op.op, c,
+ &START_KEY(keys->keys),
+ btree_insert_fn);
+ }
- pr_err("error %i trying to insert key for %s",
- ret, op_type(op));
+ if (ret) {
+ struct bkey *k;
- while ((k = bch_keylist_pop(&stack_keys) ?:
- bch_keylist_pop(&op->keys)))
- bkey_put(c, k, 0);
- }
- }
+ pr_err("error %i", ret);
- bch_keylist_free(&stack_keys);
+ while ((k = bch_keylist_pop(keys)))
+ bkey_put(c, k);
+ } else if (op.op.insert_collision)
+ ret = -ESRCH;
- if (op->journal)
- atomic_dec_bug(op->journal);
- op->journal = NULL;
return ret;
}
@@ -2141,132 +2306,81 @@ void bch_btree_set_root(struct btree *b)
mutex_unlock(&b->c->bucket_lock);
b->c->root = b;
- __bkey_put(b->c, &b->key);
bch_journal_meta(b->c, &cl);
closure_sync(&cl);
}
-/* Cache lookup */
+/* Map across nodes or keys */
-static int submit_partial_cache_miss(struct btree *b, struct btree_op *op,
- struct bkey *k)
+static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
+ struct bkey *from,
+ btree_map_nodes_fn *fn, int flags)
{
- struct search *s = container_of(op, struct search, op);
- struct bio *bio = &s->bio.bio;
- int ret = 0;
+ int ret = MAP_CONTINUE;
+
+ if (b->level) {
+ struct bkey *k;
+ struct btree_iter iter;
- while (!ret &&
- !op->lookup_done) {
- unsigned sectors = INT_MAX;
+ bch_btree_iter_init(b, &iter, from);
- if (KEY_INODE(k) == op->inode) {
- if (KEY_START(k) <= bio->bi_sector)
- break;
+ while ((k = bch_btree_iter_next_filter(&iter, b,
+ bch_ptr_bad))) {
+ ret = btree(map_nodes_recurse, k, b,
+ op, from, fn, flags);
+ from = NULL;
- sectors = min_t(uint64_t, sectors,
- KEY_START(k) - bio->bi_sector);
+ if (ret != MAP_CONTINUE)
+ return ret;
}
-
- ret = s->d->cache_miss(b, s, bio, sectors);
}
+ if (!b->level || flags == MAP_ALL_NODES)
+ ret = fn(op, b);
+
return ret;
}
-/*
- * Read from a single key, handling the initial cache miss if the key starts in
- * the middle of the bio
- */
-static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
- struct bkey *k)
+int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
+ struct bkey *from, btree_map_nodes_fn *fn, int flags)
{
- struct search *s = container_of(op, struct search, op);
- struct bio *bio = &s->bio.bio;
- unsigned ptr;
- struct bio *n;
-
- int ret = submit_partial_cache_miss(b, op, k);
- if (ret || op->lookup_done)
- return ret;
-
- /* XXX: figure out best pointer - for multiple cache devices */
- ptr = 0;
-
- PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
-
- while (!op->lookup_done &&
- KEY_INODE(k) == op->inode &&
- bio->bi_sector < KEY_OFFSET(k)) {
- struct bkey *bio_key;
- sector_t sector = PTR_OFFSET(k, ptr) +
- (bio->bi_sector - KEY_START(k));
- unsigned sectors = min_t(uint64_t, INT_MAX,
- KEY_OFFSET(k) - bio->bi_sector);
-
- n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
- if (n == bio)
- op->lookup_done = true;
-
- bio_key = &container_of(n, struct bbio, bio)->key;
-
- /*
- * The bucket we're reading from might be reused while our bio
- * is in flight, and we could then end up reading the wrong
- * data.
- *
- * We guard against this by checking (in cache_read_endio()) if
- * the pointer is stale again; if so, we treat it as an error
- * and reread from the backing device (but we don't pass that
- * error up anywhere).
- */
-
- bch_bkey_copy_single_ptr(bio_key, k, ptr);
- SET_PTR_OFFSET(bio_key, 0, sector);
-
- n->bi_end_io = bch_cache_read_endio;
- n->bi_private = &s->cl;
-
- __bch_submit_bbio(n, b->c);
- }
-
- return 0;
+ return btree_root(map_nodes_recurse, c, op, from, fn, flags);
}
-int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
+static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
+ struct bkey *from, btree_map_keys_fn *fn,
+ int flags)
{
- struct search *s = container_of(op, struct search, op);
- struct bio *bio = &s->bio.bio;
-
- int ret = 0;
+ int ret = MAP_CONTINUE;
struct bkey *k;
struct btree_iter iter;
- bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0));
- do {
- k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
- if (!k) {
- /*
- * b->key would be exactly what we want, except that
- * pointers to btree nodes have nonzero size - we
- * wouldn't go far enough
- */
+ bch_btree_iter_init(b, &iter, from);
- ret = submit_partial_cache_miss(b, op,
- &KEY(KEY_INODE(&b->key),
- KEY_OFFSET(&b->key), 0));
- break;
- }
+ while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) {
+ ret = !b->level
+ ? fn(op, b, k)
+ : btree(map_keys_recurse, k, b, op, from, fn, flags);
+ from = NULL;
+
+ if (ret != MAP_CONTINUE)
+ return ret;
+ }
- ret = b->level
- ? btree(search_recurse, k, b, op)
- : submit_partial_cache_hit(b, op, k);
- } while (!ret &&
- !op->lookup_done);
+ if (!b->level && (flags & MAP_END_KEY))
+ ret = fn(op, b, &KEY(KEY_INODE(&b->key),
+ KEY_OFFSET(&b->key), 0));
return ret;
}
+int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
+ struct bkey *from, btree_map_keys_fn *fn, int flags)
+{
+ return btree_root(map_keys_recurse, c, op, from, fn, flags);
+}
+
/* Keybuf code */
static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
@@ -2285,80 +2399,79 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
}
-static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
- struct keybuf *buf, struct bkey *end,
- keybuf_pred_fn *pred)
-{
- struct btree_iter iter;
- bch_btree_iter_init(b, &iter, &buf->last_scanned);
-
- while (!array_freelist_empty(&buf->freelist)) {
- struct bkey *k = bch_btree_iter_next_filter(&iter, b,
- bch_ptr_bad);
-
- if (!b->level) {
- if (!k) {
- buf->last_scanned = b->key;
- break;
- }
+struct refill {
+ struct btree_op op;
+ unsigned nr_found;
+ struct keybuf *buf;
+ struct bkey *end;
+ keybuf_pred_fn *pred;
+};
- buf->last_scanned = *k;
- if (bkey_cmp(&buf->last_scanned, end) >= 0)
- break;
+static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
+ struct bkey *k)
+{
+ struct refill *refill = container_of(op, struct refill, op);
+ struct keybuf *buf = refill->buf;
+ int ret = MAP_CONTINUE;
- if (pred(buf, k)) {
- struct keybuf_key *w;
+ if (bkey_cmp(k, refill->end) >= 0) {
+ ret = MAP_DONE;
+ goto out;
+ }
- spin_lock(&buf->lock);
+ if (!KEY_SIZE(k)) /* end key */
+ goto out;
- w = array_alloc(&buf->freelist);
+ if (refill->pred(buf, k)) {
+ struct keybuf_key *w;
- w->private = NULL;
- bkey_copy(&w->key, k);
+ spin_lock(&buf->lock);
- if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
- array_free(&buf->freelist, w);
+ w = array_alloc(&buf->freelist);
+ if (!w) {
+ spin_unlock(&buf->lock);
+ return MAP_DONE;
+ }
- spin_unlock(&buf->lock);
- }
- } else {
- if (!k)
- break;
+ w->private = NULL;
+ bkey_copy(&w->key, k);
- btree(refill_keybuf, k, b, op, buf, end, pred);
- /*
- * Might get an error here, but can't really do anything
- * and it'll get logged elsewhere. Just read what we
- * can.
- */
+ if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
+ array_free(&buf->freelist, w);
+ else
+ refill->nr_found++;
- if (bkey_cmp(&buf->last_scanned, end) >= 0)
- break;
+ if (array_freelist_empty(&buf->freelist))
+ ret = MAP_DONE;
- cond_resched();
- }
+ spin_unlock(&buf->lock);
}
-
- return 0;
+out:
+ buf->last_scanned = *k;
+ return ret;
}
void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
struct bkey *end, keybuf_pred_fn *pred)
{
struct bkey start = buf->last_scanned;
- struct btree_op op;
- bch_btree_op_init_stack(&op);
+ struct refill refill;
cond_resched();
- btree_root(refill_keybuf, c, &op, buf, end, pred);
- closure_sync(&op.cl);
+ bch_btree_op_init(&refill.op, -1);
+ refill.nr_found = 0;
+ refill.buf = buf;
+ refill.end = end;
+ refill.pred = pred;
+
+ bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
+ refill_keybuf_fn, MAP_END_KEY);
- pr_debug("found %s keys from %llu:%llu to %llu:%llu",
- RB_EMPTY_ROOT(&buf->keys) ? "no" :
- array_freelist_empty(&buf->freelist) ? "some" : "a few",
- KEY_INODE(&start), KEY_OFFSET(&start),
- KEY_INODE(&buf->last_scanned), KEY_OFFSET(&buf->last_scanned));
+ trace_bcache_keyscan(refill.nr_found,
+ KEY_INODE(&start), KEY_OFFSET(&start),
+ KEY_INODE(&buf->last_scanned),
+ KEY_OFFSET(&buf->last_scanned));
spin_lock(&buf->lock);
@@ -2436,9 +2549,9 @@ struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
}
struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
- struct keybuf *buf,
- struct bkey *end,
- keybuf_pred_fn *pred)
+ struct keybuf *buf,
+ struct bkey *end,
+ keybuf_pred_fn *pred)
{
struct keybuf_key *ret;
@@ -2471,14 +2584,12 @@ void bch_btree_exit(void)
{
if (btree_io_wq)
destroy_workqueue(btree_io_wq);
- if (bch_gc_wq)
- destroy_workqueue(bch_gc_wq);
}
int __init bch_btree_init(void)
{
- if (!(bch_gc_wq = create_singlethread_workqueue("bch_btree_gc")) ||
- !(btree_io_wq = create_singlethread_workqueue("bch_btree_io")))
+ btree_io_wq = create_singlethread_workqueue("bch_btree_io");
+ if (!btree_io_wq)
return -ENOMEM;
return 0;
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 3333d3723633..767e75570896 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -125,6 +125,7 @@ struct btree {
unsigned long seq;
struct rw_semaphore lock;
struct cache_set *c;
+ struct btree *parent;
unsigned long flags;
uint16_t written; /* would be nice to kill */
@@ -200,12 +201,7 @@ static inline bool bkey_written(struct btree *b, struct bkey *k)
static inline void set_gc_sectors(struct cache_set *c)
{
- atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 8);
-}
-
-static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
-{
- return __bch_ptr_invalid(b->c, b->level, k);
+ atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
}
static inline struct bkey *bch_btree_iter_init(struct btree *b,
@@ -215,6 +211,16 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b,
return __bch_btree_iter_init(b, iter, search, b->sets);
}
+static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
+{
+ if (b->level)
+ return bch_btree_ptr_invalid(b->c, k);
+ else
+ return bch_extent_ptr_invalid(b->c, k);
+}
+
+void bkey_put(struct cache_set *c, struct bkey *k);
+
/* Looping macros */
#define for_each_cached_btree(b, c, iter) \
@@ -234,51 +240,17 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b,
/* Recursing down the btree */
struct btree_op {
- struct closure cl;
- struct cache_set *c;
-
- /* Journal entry we have a refcount on */
- atomic_t *journal;
-
- /* Bio to be inserted into the cache */
- struct bio *cache_bio;
-
- unsigned inode;
-
- uint16_t write_prio;
-
/* Btree level at which we start taking write locks */
short lock;
- /* Btree insertion type */
- enum {
- BTREE_INSERT,
- BTREE_REPLACE
- } type:8;
-
- unsigned csum:1;
- unsigned skip:1;
- unsigned flush_journal:1;
-
- unsigned insert_data_done:1;
- unsigned lookup_done:1;
unsigned insert_collision:1;
-
- /* Anything after this point won't get zeroed in do_bio_hook() */
-
- /* Keys to be inserted */
- struct keylist keys;
- BKEY_PADDED(replace);
};
-enum {
- BTREE_INSERT_STATUS_INSERT,
- BTREE_INSERT_STATUS_BACK_MERGE,
- BTREE_INSERT_STATUS_OVERWROTE,
- BTREE_INSERT_STATUS_FRONT_MERGE,
-};
-
-void bch_btree_op_init_stack(struct btree_op *);
+static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
+{
+ memset(op, 0, sizeof(struct btree_op));
+ op->lock = write_lock_level;
+}
static inline void rw_lock(bool w, struct btree *b, int level)
{
@@ -290,108 +262,71 @@ static inline void rw_lock(bool w, struct btree *b, int level)
static inline void rw_unlock(bool w, struct btree *b)
{
-#ifdef CONFIG_BCACHE_EDEBUG
- unsigned i;
-
- if (w && b->key.ptr[0])
- for (i = 0; i <= b->nsets; i++)
- bch_check_key_order(b, b->sets[i].data);
-#endif
-
if (w)
b->seq++;
(w ? up_write : up_read)(&b->lock);
}
-#define insert_lock(s, b) ((b)->level <= (s)->lock)
+void bch_btree_node_read(struct btree *);
+void bch_btree_node_write(struct btree *, struct closure *);
-/*
- * These macros are for recursing down the btree - they handle the details of
- * locking and looking up nodes in the cache for you. They're best treated as
- * mere syntax when reading code that uses them.
- *
- * op->lock determines whether we take a read or a write lock at a given depth.
- * If you've got a read lock and find that you need a write lock (i.e. you're
- * going to have to split), set op->lock and return -EINTR; btree_root() will
- * call you again and you'll have the correct lock.
- */
+void bch_btree_set_root(struct btree *);
+struct btree *bch_btree_node_alloc(struct cache_set *, int, bool);
+struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
-/**
- * btree - recurse down the btree on a specified key
- * @fn: function to call, which will be passed the child node
- * @key: key to recurse on
- * @b: parent btree node
- * @op: pointer to struct btree_op
- */
-#define btree(fn, key, b, op, ...) \
-({ \
- int _r, l = (b)->level - 1; \
- bool _w = l <= (op)->lock; \
- struct btree *_b = bch_btree_node_get((b)->c, key, l, op); \
- if (!IS_ERR(_b)) { \
- _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
- rw_unlock(_w, _b); \
- } else \
- _r = PTR_ERR(_b); \
- _r; \
-})
-
-/**
- * btree_root - call a function on the root of the btree
- * @fn: function to call, which will be passed the child node
- * @c: cache set
- * @op: pointer to struct btree_op
- */
-#define btree_root(fn, c, op, ...) \
-({ \
- int _r = -EINTR; \
- do { \
- struct btree *_b = (c)->root; \
- bool _w = insert_lock(op, _b); \
- rw_lock(_w, _b, _b->level); \
- if (_b == (c)->root && \
- _w == insert_lock(op, _b)) \
- _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
- rw_unlock(_w, _b); \
- bch_cannibalize_unlock(c, &(op)->cl); \
- } while (_r == -EINTR); \
- \
- _r; \
-})
+int bch_btree_insert_check_key(struct btree *, struct btree_op *,
+ struct bkey *);
+int bch_btree_insert(struct cache_set *, struct keylist *,
+ atomic_t *, struct bkey *);
+
+int bch_gc_thread_start(struct cache_set *);
+size_t bch_btree_gc_finish(struct cache_set *);
+void bch_moving_gc(struct cache_set *);
+int bch_btree_check(struct cache_set *);
+uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
-static inline bool should_split(struct btree *b)
+static inline void wake_up_gc(struct cache_set *c)
{
- struct bset *i = write_block(b);
- return b->written >= btree_blocks(b) ||
- (i->seq == b->sets[0].data->seq &&
- b->written + __set_blocks(i, i->keys + 15, b->c)
- > btree_blocks(b));
+ if (c->gc_thread)
+ wake_up_process(c->gc_thread);
}
-void bch_btree_node_read(struct btree *);
-void bch_btree_node_write(struct btree *, struct closure *);
+#define MAP_DONE 0
+#define MAP_CONTINUE 1
-void bch_cannibalize_unlock(struct cache_set *, struct closure *);
-void bch_btree_set_root(struct btree *);
-struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *);
-struct btree *bch_btree_node_get(struct cache_set *, struct bkey *,
- int, struct btree_op *);
+#define MAP_ALL_NODES 0
+#define MAP_LEAF_NODES 1
-bool bch_btree_insert_check_key(struct btree *, struct btree_op *,
- struct bio *);
-int bch_btree_insert(struct btree_op *, struct cache_set *);
+#define MAP_END_KEY 1
-int bch_btree_search_recurse(struct btree *, struct btree_op *);
+typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *);
+int __bch_btree_map_nodes(struct btree_op *, struct cache_set *,
+ struct bkey *, btree_map_nodes_fn *, int);
-void bch_queue_gc(struct cache_set *);
-size_t bch_btree_gc_finish(struct cache_set *);
-void bch_moving_gc(struct closure *);
-int bch_btree_check(struct cache_set *, struct btree_op *);
-uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
+static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
+ struct bkey *from, btree_map_nodes_fn *fn)
+{
+ return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES);
+}
+
+static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
+ struct cache_set *c,
+ struct bkey *from,
+ btree_map_nodes_fn *fn)
+{
+ return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
+}
+
+typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *,
+ struct bkey *);
+int bch_btree_map_keys(struct btree_op *, struct cache_set *,
+ struct bkey *, btree_map_keys_fn *, int);
+
+typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
void bch_keybuf_init(struct keybuf *);
-void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *,
- keybuf_pred_fn *);
+void bch_refill_keybuf(struct cache_set *, struct keybuf *,
+ struct bkey *, keybuf_pred_fn *);
bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
struct bkey *);
void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 9aba2017f0d1..dfff2410322e 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -11,17 +11,6 @@
#include "closure.h"
-void closure_queue(struct closure *cl)
-{
- struct workqueue_struct *wq = cl->wq;
- if (wq) {
- INIT_WORK(&cl->work, cl->work.func);
- BUG_ON(!queue_work(wq, &cl->work));
- } else
- cl->fn(cl);
-}
-EXPORT_SYMBOL_GPL(closure_queue);
-
#define CL_FIELD(type, field) \
case TYPE_ ## type: \
return &container_of(cl, struct type, cl)->field
@@ -30,17 +19,6 @@ static struct closure_waitlist *closure_waitlist(struct closure *cl)
{
switch (cl->type) {
CL_FIELD(closure_with_waitlist, wait);
- CL_FIELD(closure_with_waitlist_and_timer, wait);
- default:
- return NULL;
- }
-}
-
-static struct timer_list *closure_timer(struct closure *cl)
-{
- switch (cl->type) {
- CL_FIELD(closure_with_timer, timer);
- CL_FIELD(closure_with_waitlist_and_timer, timer);
default:
return NULL;
}
@@ -51,7 +29,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
int r = flags & CLOSURE_REMAINING_MASK;
BUG_ON(flags & CLOSURE_GUARD_MASK);
- BUG_ON(!r && (flags & ~(CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING)));
+ BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
/* Must deliver precisely one wakeup */
if (r == 1 && (flags & CLOSURE_SLEEPING))
@@ -59,7 +37,6 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
if (!r) {
if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
- /* CLOSURE_BLOCKING might be set - clear it */
atomic_set(&cl->remaining,
CLOSURE_REMAINING_INITIALIZER);
closure_queue(cl);
@@ -90,13 +67,13 @@ void closure_sub(struct closure *cl, int v)
{
closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
}
-EXPORT_SYMBOL_GPL(closure_sub);
+EXPORT_SYMBOL(closure_sub);
void closure_put(struct closure *cl)
{
closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
}
-EXPORT_SYMBOL_GPL(closure_put);
+EXPORT_SYMBOL(closure_put);
static void set_waiting(struct closure *cl, unsigned long f)
{
@@ -133,7 +110,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
closure_sub(cl, CLOSURE_WAITING + 1);
}
}
-EXPORT_SYMBOL_GPL(__closure_wake_up);
+EXPORT_SYMBOL(__closure_wake_up);
bool closure_wait(struct closure_waitlist *list, struct closure *cl)
{
@@ -146,7 +123,7 @@ bool closure_wait(struct closure_waitlist *list, struct closure *cl)
return true;
}
-EXPORT_SYMBOL_GPL(closure_wait);
+EXPORT_SYMBOL(closure_wait);
/**
* closure_sync() - sleep until a closure a closure has nothing left to wait on
@@ -169,7 +146,7 @@ void closure_sync(struct closure *cl)
__closure_end_sleep(cl);
}
-EXPORT_SYMBOL_GPL(closure_sync);
+EXPORT_SYMBOL(closure_sync);
/**
* closure_trylock() - try to acquire the closure, without waiting
@@ -183,17 +160,17 @@ bool closure_trylock(struct closure *cl, struct closure *parent)
CLOSURE_REMAINING_INITIALIZER) != -1)
return false;
- closure_set_ret_ip(cl);
-
smp_mb();
+
cl->parent = parent;
if (parent)
closure_get(parent);
+ closure_set_ret_ip(cl);
closure_debug_create(cl);
return true;
}
-EXPORT_SYMBOL_GPL(closure_trylock);
+EXPORT_SYMBOL(closure_trylock);
void __closure_lock(struct closure *cl, struct closure *parent,
struct closure_waitlist *wait_list)
@@ -205,57 +182,11 @@ void __closure_lock(struct closure *cl, struct closure *parent,
if (closure_trylock(cl, parent))
return;
- closure_wait_event_sync(wait_list, &wait,
- atomic_read(&cl->remaining) == -1);
+ closure_wait_event(wait_list, &wait,
+ atomic_read(&cl->remaining) == -1);
}
}
-EXPORT_SYMBOL_GPL(__closure_lock);
-
-static void closure_delay_timer_fn(unsigned long data)
-{
- struct closure *cl = (struct closure *) data;
- closure_sub(cl, CLOSURE_TIMER + 1);
-}
-
-void do_closure_timer_init(struct closure *cl)
-{
- struct timer_list *timer = closure_timer(cl);
-
- init_timer(timer);
- timer->data = (unsigned long) cl;
- timer->function = closure_delay_timer_fn;
-}
-EXPORT_SYMBOL_GPL(do_closure_timer_init);
-
-bool __closure_delay(struct closure *cl, unsigned long delay,
- struct timer_list *timer)
-{
- if (atomic_read(&cl->remaining) & CLOSURE_TIMER)
- return false;
-
- BUG_ON(timer_pending(timer));
-
- timer->expires = jiffies + delay;
-
- atomic_add(CLOSURE_TIMER + 1, &cl->remaining);
- add_timer(timer);
- return true;
-}
-EXPORT_SYMBOL_GPL(__closure_delay);
-
-void __closure_flush(struct closure *cl, struct timer_list *timer)
-{
- if (del_timer(timer))
- closure_sub(cl, CLOSURE_TIMER + 1);
-}
-EXPORT_SYMBOL_GPL(__closure_flush);
-
-void __closure_flush_sync(struct closure *cl, struct timer_list *timer)
-{
- if (del_timer_sync(timer))
- closure_sub(cl, CLOSURE_TIMER + 1);
-}
-EXPORT_SYMBOL_GPL(__closure_flush_sync);
+EXPORT_SYMBOL(__closure_lock);
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
@@ -273,7 +204,7 @@ void closure_debug_create(struct closure *cl)
list_add(&cl->all, &closure_list);
spin_unlock_irqrestore(&closure_list_lock, flags);
}
-EXPORT_SYMBOL_GPL(closure_debug_create);
+EXPORT_SYMBOL(closure_debug_create);
void closure_debug_destroy(struct closure *cl)
{
@@ -286,7 +217,7 @@ void closure_debug_destroy(struct closure *cl)
list_del(&cl->all);
spin_unlock_irqrestore(&closure_list_lock, flags);
}
-EXPORT_SYMBOL_GPL(closure_debug_destroy);
+EXPORT_SYMBOL(closure_debug_destroy);
static struct dentry *debug;
@@ -304,14 +235,12 @@ static int debug_seq_show(struct seq_file *f, void *data)
cl, (void *) cl->ip, cl->fn, cl->parent,
r & CLOSURE_REMAINING_MASK);
- seq_printf(f, "%s%s%s%s%s%s\n",
+ seq_printf(f, "%s%s%s%s\n",
test_bit(WORK_STRUCT_PENDING,
work_data_bits(&cl->work)) ? "Q" : "",
r & CLOSURE_RUNNING ? "R" : "",
- r & CLOSURE_BLOCKING ? "B" : "",
r & CLOSURE_STACK ? "S" : "",
- r & CLOSURE_SLEEPING ? "Sl" : "",
- r & CLOSURE_TIMER ? "T" : "");
+ r & CLOSURE_SLEEPING ? "Sl" : "");
if (r & CLOSURE_WAITING)
seq_printf(f, " W %pF\n",
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index 00039924ea9d..9762f1be3304 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -155,21 +155,6 @@
* delayed_work embeds a work item and a timer_list. The important thing is, use
* it exactly like you would a regular closure and closure_put() will magically
* handle everything for you.
- *
- * We've got closures that embed timers, too. They're called, appropriately
- * enough:
- * struct closure_with_timer;
- *
- * This gives you access to closure_delay(). It takes a refcount for a specified
- * number of jiffies - you could then call closure_sync() (for a slightly
- * convoluted version of msleep()) or continue_at() - which gives you the same
- * effect as using a delayed work item, except you can reuse the work_struct
- * already embedded in struct closure.
- *
- * Lastly, there's struct closure_with_waitlist_and_timer. It does what you
- * probably expect, if you happen to need the features of both. (You don't
- * really want to know how all this is implemented, but if I've done my job
- * right you shouldn't have to care).
*/
struct closure;
@@ -182,16 +167,11 @@ struct closure_waitlist {
enum closure_type {
TYPE_closure = 0,
TYPE_closure_with_waitlist = 1,
- TYPE_closure_with_timer = 2,
- TYPE_closure_with_waitlist_and_timer = 3,
- MAX_CLOSURE_TYPE = 3,
+ MAX_CLOSURE_TYPE = 1,
};
enum closure_state {
/*
- * CLOSURE_BLOCKING: Causes closure_wait_event() to block, instead of
- * waiting asynchronously
- *
* CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
* the thread that owns the closure, and cleared by the thread that's
* waking up the closure.
@@ -200,10 +180,6 @@ enum closure_state {
* - indicates that cl->task is valid and closure_put() may wake it up.
* Only set or cleared by the thread that owns the closure.
*
- * CLOSURE_TIMER: Analagous to CLOSURE_WAITING, indicates that a closure
- * has an outstanding timer. Must be set by the thread that owns the
- * closure, and cleared by the timer function when the timer goes off.
- *
* The rest are for debugging and don't affect behaviour:
*
* CLOSURE_RUNNING: Set when a closure is running (i.e. by
@@ -218,19 +194,17 @@ enum closure_state {
* closure with this flag set
*/
- CLOSURE_BITS_START = (1 << 19),
- CLOSURE_DESTRUCTOR = (1 << 19),
- CLOSURE_BLOCKING = (1 << 21),
- CLOSURE_WAITING = (1 << 23),
- CLOSURE_SLEEPING = (1 << 25),
- CLOSURE_TIMER = (1 << 27),
+ CLOSURE_BITS_START = (1 << 23),
+ CLOSURE_DESTRUCTOR = (1 << 23),
+ CLOSURE_WAITING = (1 << 25),
+ CLOSURE_SLEEPING = (1 << 27),
CLOSURE_RUNNING = (1 << 29),
CLOSURE_STACK = (1 << 31),
};
#define CLOSURE_GUARD_MASK \
- ((CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING|CLOSURE_WAITING| \
- CLOSURE_SLEEPING|CLOSURE_TIMER|CLOSURE_RUNNING|CLOSURE_STACK) << 1)
+ ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_SLEEPING| \
+ CLOSURE_RUNNING|CLOSURE_STACK) << 1)
#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
@@ -268,17 +242,6 @@ struct closure_with_waitlist {
struct closure_waitlist wait;
};
-struct closure_with_timer {
- struct closure cl;
- struct timer_list timer;
-};
-
-struct closure_with_waitlist_and_timer {
- struct closure cl;
- struct closure_waitlist wait;
- struct timer_list timer;
-};
-
extern unsigned invalid_closure_type(void);
#define __CLOSURE_TYPE(cl, _t) \
@@ -289,14 +252,11 @@ extern unsigned invalid_closure_type(void);
( \
__CLOSURE_TYPE(cl, closure) \
__CLOSURE_TYPE(cl, closure_with_waitlist) \
- __CLOSURE_TYPE(cl, closure_with_timer) \
- __CLOSURE_TYPE(cl, closure_with_waitlist_and_timer) \
invalid_closure_type() \
)
void closure_sub(struct closure *cl, int v);
void closure_put(struct closure *cl);
-void closure_queue(struct closure *cl);
void __closure_wake_up(struct closure_waitlist *list);
bool closure_wait(struct closure_waitlist *list, struct closure *cl);
void closure_sync(struct closure *cl);
@@ -305,12 +265,6 @@ bool closure_trylock(struct closure *cl, struct closure *parent);
void __closure_lock(struct closure *cl, struct closure *parent,
struct closure_waitlist *wait_list);
-void do_closure_timer_init(struct closure *cl);
-bool __closure_delay(struct closure *cl, unsigned long delay,
- struct timer_list *timer);
-void __closure_flush(struct closure *cl, struct timer_list *timer);
-void __closure_flush_sync(struct closure *cl, struct timer_list *timer);
-
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
void closure_debug_init(void);
@@ -354,11 +308,6 @@ static inline void closure_set_stopped(struct closure *cl)
atomic_sub(CLOSURE_RUNNING, &cl->remaining);
}
-static inline bool closure_is_stopped(struct closure *cl)
-{
- return !(atomic_read(&cl->remaining) & CLOSURE_RUNNING);
-}
-
static inline bool closure_is_unlocked(struct closure *cl)
{
return atomic_read(&cl->remaining) == -1;
@@ -367,14 +316,6 @@ static inline bool closure_is_unlocked(struct closure *cl)
static inline void do_closure_init(struct closure *cl, struct closure *parent,
bool running)
{
- switch (cl->type) {
- case TYPE_closure_with_timer:
- case TYPE_closure_with_waitlist_and_timer:
- do_closure_timer_init(cl);
- default:
- break;
- }
-
cl->parent = parent;
if (parent)
closure_get(parent);
@@ -429,8 +370,7 @@ do { \
static inline void closure_init_stack(struct closure *cl)
{
memset(cl, 0, sizeof(struct closure));
- atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|
- CLOSURE_BLOCKING|CLOSURE_STACK);
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
}
/**
@@ -461,24 +401,6 @@ do { \
#define closure_lock(cl, parent) \
__closure_lock(__to_internal_closure(cl), parent, &(cl)->wait)
-/**
- * closure_delay() - delay some number of jiffies
- * @cl: the closure that will sleep
- * @delay: the delay in jiffies
- *
- * Takes a refcount on @cl which will be released after @delay jiffies; this may
- * be used to have a function run after a delay with continue_at(), or
- * closure_sync() may be used for a convoluted version of msleep().
- */
-#define closure_delay(cl, delay) \
- __closure_delay(__to_internal_closure(cl), delay, &(cl)->timer)
-
-#define closure_flush(cl) \
- __closure_flush(__to_internal_closure(cl), &(cl)->timer)
-
-#define closure_flush_sync(cl) \
- __closure_flush_sync(__to_internal_closure(cl), &(cl)->timer)
-
static inline void __closure_end_sleep(struct closure *cl)
{
__set_current_state(TASK_RUNNING);
@@ -498,40 +420,6 @@ static inline void __closure_start_sleep(struct closure *cl)
}
/**
- * closure_blocking() - returns true if the closure is in blocking mode.
- *
- * If a closure is in blocking mode, closure_wait_event() will sleep until the
- * condition is true instead of waiting asynchronously.
- */
-static inline bool closure_blocking(struct closure *cl)
-{
- return atomic_read(&cl->remaining) & CLOSURE_BLOCKING;
-}
-
-/**
- * set_closure_blocking() - put a closure in blocking mode.
- *
- * If a closure is in blocking mode, closure_wait_event() will sleep until the
- * condition is true instead of waiting asynchronously.
- *
- * Not thread safe - can only be called by the thread running the closure.
- */
-static inline void set_closure_blocking(struct closure *cl)
-{
- if (!closure_blocking(cl))
- atomic_add(CLOSURE_BLOCKING, &cl->remaining);
-}
-
-/*
- * Not thread safe - can only be called by the thread running the closure.
- */
-static inline void clear_closure_blocking(struct closure *cl)
-{
- if (closure_blocking(cl))
- atomic_sub(CLOSURE_BLOCKING, &cl->remaining);
-}
-
-/**
* closure_wake_up() - wake up all closures on a wait list.
*/
static inline void closure_wake_up(struct closure_waitlist *list)
@@ -561,63 +449,36 @@ static inline void closure_wake_up(struct closure_waitlist *list)
* refcount on our closure. If this was a stack allocated closure, that would be
* bad.
*/
-#define __closure_wait_event(list, cl, condition, _block) \
+#define closure_wait_event(list, cl, condition) \
({ \
- bool block = _block; \
typeof(condition) ret; \
\
while (1) { \
ret = (condition); \
if (ret) { \
__closure_wake_up(list); \
- if (block) \
- closure_sync(cl); \
- \
+ closure_sync(cl); \
break; \
} \
\
- if (block) \
- __closure_start_sleep(cl); \
- \
- if (!closure_wait(list, cl)) { \
- if (!block) \
- break; \
+ __closure_start_sleep(cl); \
\
+ if (!closure_wait(list, cl)) \
schedule(); \
- } \
} \
\
ret; \
})
-/**
- * closure_wait_event() - wait on a condition, synchronously or asynchronously.
- * @list: the wait list to wait on
- * @cl: the closure that is doing the waiting
- * @condition: a C expression for the event to wait for
- *
- * If the closure is in blocking mode, sleeps until the @condition evaluates to
- * true - exactly like wait_event().
- *
- * If the closure is not in blocking mode, waits asynchronously; if the
- * condition is currently false the @cl is put onto @list and returns. @list
- * owns a refcount on @cl; closure_sync() or continue_at() may be used later to
- * wait for another thread to wake up @list, which drops the refcount on @cl.
- *
- * Returns the value of @condition; @cl will be on @list iff @condition was
- * false.
- *
- * closure_wake_up(@list) must be called after changing any variable that could
- * cause @condition to become true.
- */
-#define closure_wait_event(list, cl, condition) \
- __closure_wait_event(list, cl, condition, closure_blocking(cl))
-
-#define closure_wait_event_async(list, cl, condition) \
- __closure_wait_event(list, cl, condition, false)
-
-#define closure_wait_event_sync(list, cl, condition) \
- __closure_wait_event(list, cl, condition, true)
+static inline void closure_queue(struct closure *cl)
+{
+ struct workqueue_struct *wq = cl->wq;
+ if (wq) {
+ INIT_WORK(&cl->work, cl->work.func);
+ BUG_ON(!queue_work(wq, &cl->work));
+ } else
+ cl->fn(cl);
+}
static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
struct workqueue_struct *wq)
@@ -642,7 +503,7 @@ do { \
#define continue_at_nobarrier(_cl, _fn, _wq) \
do { \
set_closure_fn(_cl, _fn, _wq); \
- closure_queue(cl); \
+ closure_queue(_cl); \
return; \
} while (0)
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 88e6411eab4f..264fcfbd6290 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -8,7 +8,6 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
-#include "request.h"
#include <linux/console.h>
#include <linux/debugfs.h>
@@ -77,29 +76,17 @@ int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
return out - buf;
}
-int bch_btree_to_text(char *buf, size_t size, const struct btree *b)
-{
- return scnprintf(buf, size, "%zu level %i/%i",
- PTR_BUCKET_NR(b->c, &b->key, 0),
- b->level, b->c->root ? b->c->root->level : -1);
-}
-
-#if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG)
-
-static bool skipped_backwards(struct btree *b, struct bkey *k)
-{
- return bkey_cmp(k, (!b->level)
- ? &START_KEY(bkey_next(k))
- : bkey_next(k)) > 0;
-}
+#ifdef CONFIG_BCACHE_DEBUG
static void dump_bset(struct btree *b, struct bset *i)
{
- struct bkey *k;
+ struct bkey *k, *next;
unsigned j;
char buf[80];
- for (k = i->start; k < end(i); k = bkey_next(k)) {
+ for (k = i->start; k < end(i); k = next) {
+ next = bkey_next(k);
+
bch_bkey_to_text(buf, sizeof(buf), k);
printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
(uint64_t *) k - i->d, i->keys, buf);
@@ -115,15 +102,21 @@ static void dump_bset(struct btree *b, struct bset *i)
printk(" %s\n", bch_ptr_status(b->c, k));
- if (bkey_next(k) < end(i) &&
- skipped_backwards(b, k))
+ if (next < end(i) &&
+ bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
printk(KERN_ERR "Key skipped backwards\n");
}
}
-#endif
+static void bch_dump_bucket(struct btree *b)
+{
+ unsigned i;
-#ifdef CONFIG_BCACHE_DEBUG
+ console_lock();
+ for (i = 0; i <= b->nsets; i++)
+ dump_bset(b, b->sets[i].data);
+ console_unlock();
+}
void bch_btree_verify(struct btree *b, struct bset *new)
{
@@ -176,66 +169,44 @@ void bch_btree_verify(struct btree *b, struct bset *new)
mutex_unlock(&b->c->verify_lock);
}
-static void data_verify_endio(struct bio *bio, int error)
-{
- struct closure *cl = bio->bi_private;
- closure_put(cl);
-}
-
-void bch_data_verify(struct search *s)
+void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{
char name[BDEVNAME_SIZE];
- struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
- struct closure *cl = &s->cl;
struct bio *check;
struct bio_vec *bv;
int i;
- if (!s->unaligned_bvec)
- bio_for_each_segment(bv, s->orig_bio, i)
- bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
-
- check = bio_clone(s->orig_bio, GFP_NOIO);
+ check = bio_clone(bio, GFP_NOIO);
if (!check)
return;
if (bio_alloc_pages(check, GFP_NOIO))
goto out_put;
- check->bi_rw = READ_SYNC;
- check->bi_private = cl;
- check->bi_end_io = data_verify_endio;
-
- closure_bio_submit(check, cl, &dc->disk);
- closure_sync(cl);
+ submit_bio_wait(READ_SYNC, check);
- bio_for_each_segment(bv, s->orig_bio, i) {
- void *p1 = kmap(bv->bv_page);
- void *p2 = kmap(check->bi_io_vec[i].bv_page);
+ bio_for_each_segment(bv, bio, i) {
+ void *p1 = kmap_atomic(bv->bv_page);
+ void *p2 = page_address(check->bi_io_vec[i].bv_page);
- if (memcmp(p1 + bv->bv_offset,
- p2 + bv->bv_offset,
- bv->bv_len))
- printk(KERN_ERR
- "bcache (%s): verify failed at sector %llu\n",
- bdevname(dc->bdev, name),
- (uint64_t) s->orig_bio->bi_sector);
+ cache_set_err_on(memcmp(p1 + bv->bv_offset,
+ p2 + bv->bv_offset,
+ bv->bv_len),
+ dc->disk.c,
+ "verify failed at dev %s sector %llu",
+ bdevname(dc->bdev, name),
+ (uint64_t) bio->bi_sector);
- kunmap(bv->bv_page);
- kunmap(check->bi_io_vec[i].bv_page);
+ kunmap_atomic(p1);
}
- __bio_for_each_segment(bv, check, i, 0)
+ bio_for_each_segment_all(bv, check, i)
__free_page(bv->bv_page);
out_put:
bio_put(check);
}
-#endif
-
-#ifdef CONFIG_BCACHE_EDEBUG
-
-unsigned bch_count_data(struct btree *b)
+int __bch_count_data(struct btree *b)
{
unsigned ret = 0;
struct btree_iter iter;
@@ -247,72 +218,60 @@ unsigned bch_count_data(struct btree *b)
return ret;
}
-static void vdump_bucket_and_panic(struct btree *b, const char *fmt,
- va_list args)
-{
- unsigned i;
- char buf[80];
-
- console_lock();
-
- for (i = 0; i <= b->nsets; i++)
- dump_bset(b, b->sets[i].data);
-
- vprintk(fmt, args);
-
- console_unlock();
-
- bch_btree_to_text(buf, sizeof(buf), b);
- panic("at %s\n", buf);
-}
-
-void bch_check_key_order_msg(struct btree *b, struct bset *i,
- const char *fmt, ...)
-{
- struct bkey *k;
-
- if (!i->keys)
- return;
-
- for (k = i->start; bkey_next(k) < end(i); k = bkey_next(k))
- if (skipped_backwards(b, k)) {
- va_list args;
- va_start(args, fmt);
-
- vdump_bucket_and_panic(b, fmt, args);
- va_end(args);
- }
-}
-
-void bch_check_keys(struct btree *b, const char *fmt, ...)
+void __bch_check_keys(struct btree *b, const char *fmt, ...)
{
va_list args;
struct bkey *k, *p = NULL;
struct btree_iter iter;
-
- if (b->level)
- return;
+ const char *err;
for_each_key(b, k, &iter) {
- if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) {
- printk(KERN_ERR "Keys out of order:\n");
- goto bug;
- }
-
- if (bch_ptr_invalid(b, k))
- continue;
-
- if (p && bkey_cmp(p, &START_KEY(k)) > 0) {
- printk(KERN_ERR "Overlapping keys:\n");
- goto bug;
+ if (!b->level) {
+ err = "Keys out of order";
+ if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
+ goto bug;
+
+ if (bch_ptr_invalid(b, k))
+ continue;
+
+ err = "Overlapping keys";
+ if (p && bkey_cmp(p, &START_KEY(k)) > 0)
+ goto bug;
+ } else {
+ if (bch_ptr_bad(b, k))
+ continue;
+
+ err = "Duplicate keys";
+ if (p && !bkey_cmp(p, k))
+ goto bug;
}
p = k;
}
+
+ err = "Key larger than btree node key";
+ if (p && bkey_cmp(p, &b->key) > 0)
+ goto bug;
+
return;
bug:
+ bch_dump_bucket(b);
+
va_start(args, fmt);
- vdump_bucket_and_panic(b, fmt, args);
+ vprintk(fmt, args);
va_end(args);
+
+ panic("bcache error: %s:\n", err);
+}
+
+void bch_btree_iter_next_check(struct btree_iter *iter)
+{
+ struct bkey *k = iter->data->k, *next = bkey_next(k);
+
+ if (next < iter->data->end &&
+ bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) {
+ bch_dump_bucket(iter->b);
+ panic("Key skipped backwards\n");
+ }
}
#endif
diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h
index 1c39b5a2489b..2ede60e31874 100644
--- a/drivers/md/bcache/debug.h
+++ b/drivers/md/bcache/debug.h
@@ -4,40 +4,44 @@
/* Btree/bkey debug printing */
int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k);
-int bch_btree_to_text(char *buf, size_t size, const struct btree *b);
-
-#ifdef CONFIG_BCACHE_EDEBUG
-
-unsigned bch_count_data(struct btree *);
-void bch_check_key_order_msg(struct btree *, struct bset *, const char *, ...);
-void bch_check_keys(struct btree *, const char *, ...);
-
-#define bch_check_key_order(b, i) \
- bch_check_key_order_msg(b, i, "keys out of order")
-#define EBUG_ON(cond) BUG_ON(cond)
-
-#else /* EDEBUG */
-
-#define bch_count_data(b) 0
-#define bch_check_key_order(b, i) do {} while (0)
-#define bch_check_key_order_msg(b, i, ...) do {} while (0)
-#define bch_check_keys(b, ...) do {} while (0)
-#define EBUG_ON(cond) do {} while (0)
-
-#endif
#ifdef CONFIG_BCACHE_DEBUG
void bch_btree_verify(struct btree *, struct bset *);
-void bch_data_verify(struct search *);
+void bch_data_verify(struct cached_dev *, struct bio *);
+int __bch_count_data(struct btree *);
+void __bch_check_keys(struct btree *, const char *, ...);
+void bch_btree_iter_next_check(struct btree_iter *);
+
+#define EBUG_ON(cond) BUG_ON(cond)
+#define expensive_debug_checks(c) ((c)->expensive_debug_checks)
+#define key_merging_disabled(c) ((c)->key_merging_disabled)
+#define bypass_torture_test(d) ((d)->bypass_torture_test)
#else /* DEBUG */
static inline void bch_btree_verify(struct btree *b, struct bset *i) {}
-static inline void bch_data_verify(struct search *s) {};
+static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
+static inline int __bch_count_data(struct btree *b) { return -1; }
+static inline void __bch_check_keys(struct btree *b, const char *fmt, ...) {}
+static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
+
+#define EBUG_ON(cond) do { if (cond); } while (0)
+#define expensive_debug_checks(c) 0
+#define key_merging_disabled(c) 0
+#define bypass_torture_test(d) 0
#endif
+#define bch_count_data(b) \
+ (expensive_debug_checks((b)->c) ? __bch_count_data(b) : -1)
+
+#define bch_check_keys(b, ...) \
+do { \
+ if (expensive_debug_checks((b)->c)) \
+ __bch_check_keys(b, __VA_ARGS__); \
+} while (0)
+
#ifdef CONFIG_DEBUG_FS
void bch_debug_init_cache_set(struct cache_set *);
#else
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 8435f81e5d85..ecdaa671bd50 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -7,7 +7,6 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
-#include "request.h"
#include <trace/events/bcache.h>
@@ -31,17 +30,20 @@ static void journal_read_endio(struct bio *bio, int error)
}
static int journal_read_bucket(struct cache *ca, struct list_head *list,
- struct btree_op *op, unsigned bucket_index)
+ unsigned bucket_index)
{
struct journal_device *ja = &ca->journal;
struct bio *bio = &ja->bio;
struct journal_replay *i;
struct jset *j, *data = ca->set->journal.w[0].data;
+ struct closure cl;
unsigned len, left, offset = 0;
int ret = 0;
sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
+ closure_init_stack(&cl);
+
pr_debug("reading %llu", (uint64_t) bucket);
while (offset < ca->sb.bucket_size) {
@@ -55,11 +57,11 @@ reread: left = ca->sb.bucket_size - offset;
bio->bi_size = len << 9;
bio->bi_end_io = journal_read_endio;
- bio->bi_private = &op->cl;
+ bio->bi_private = &cl;
bch_bio_map(bio, data);
- closure_bio_submit(bio, &op->cl, ca);
- closure_sync(&op->cl);
+ closure_bio_submit(bio, &cl, ca);
+ closure_sync(&cl);
/* This function could be simpler now since we no longer write
* journal entries that overlap bucket boundaries; this means
@@ -72,7 +74,7 @@ reread: left = ca->sb.bucket_size - offset;
struct list_head *where;
size_t blocks, bytes = set_bytes(j);
- if (j->magic != jset_magic(ca->set))
+ if (j->magic != jset_magic(&ca->sb))
return ret;
if (bytes > left << 9)
@@ -129,12 +131,11 @@ next_set:
return ret;
}
-int bch_journal_read(struct cache_set *c, struct list_head *list,
- struct btree_op *op)
+int bch_journal_read(struct cache_set *c, struct list_head *list)
{
#define read_bucket(b) \
({ \
- int ret = journal_read_bucket(ca, list, op, b); \
+ int ret = journal_read_bucket(ca, list, b); \
__set_bit(b, bitmap); \
if (ret < 0) \
return ret; \
@@ -292,8 +293,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
}
}
-int bch_journal_replay(struct cache_set *s, struct list_head *list,
- struct btree_op *op)
+int bch_journal_replay(struct cache_set *s, struct list_head *list)
{
int ret = 0, keys = 0, entries = 0;
struct bkey *k;
@@ -301,31 +301,30 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
list_entry(list->prev, struct journal_replay, list);
uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
+ struct keylist keylist;
+
+ bch_keylist_init(&keylist);
list_for_each_entry(i, list, list) {
BUG_ON(i->pin && atomic_read(i->pin) != 1);
- if (n != i->j.seq)
- pr_err(
- "journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
- n, i->j.seq - 1, start, end);
+ cache_set_err_on(n != i->j.seq, s,
+"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
+ n, i->j.seq - 1, start, end);
for (k = i->j.start;
k < end(&i->j);
k = bkey_next(k)) {
trace_bcache_journal_replay_key(k);
- bkey_copy(op->keys.top, k);
- bch_keylist_push(&op->keys);
-
- op->journal = i->pin;
- atomic_inc(op->journal);
+ bkey_copy(keylist.top, k);
+ bch_keylist_push(&keylist);
- ret = bch_btree_insert(op, s);
+ ret = bch_btree_insert(s, &keylist, i->pin, NULL);
if (ret)
goto err;
- BUG_ON(!bch_keylist_empty(&op->keys));
+ BUG_ON(!bch_keylist_empty(&keylist));
keys++;
cond_resched();
@@ -339,14 +338,13 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
pr_info("journal replay done, %i keys in %i entries, seq %llu",
keys, entries, end);
-
+err:
while (!list_empty(list)) {
i = list_first_entry(list, struct journal_replay, list);
list_del(&i->list);
kfree(i);
}
-err:
- closure_sync(&op->cl);
+
return ret;
}
@@ -358,48 +356,35 @@ static void btree_flush_write(struct cache_set *c)
* Try to find the btree node with that references the oldest journal
* entry, best is our current candidate and is locked if non NULL:
*/
- struct btree *b, *best = NULL;
- unsigned iter;
+ struct btree *b, *best;
+ unsigned i;
+retry:
+ best = NULL;
+
+ for_each_cached_btree(b, c, i)
+ if (btree_current_write(b)->journal) {
+ if (!best)
+ best = b;
+ else if (journal_pin_cmp(c,
+ btree_current_write(best)->journal,
+ btree_current_write(b)->journal)) {
+ best = b;
+ }
+ }
- for_each_cached_btree(b, c, iter) {
- if (!down_write_trylock(&b->lock))
- continue;
+ b = best;
+ if (b) {
+ rw_lock(true, b, b->level);
- if (!btree_node_dirty(b) ||
- !btree_current_write(b)->journal) {
+ if (!btree_current_write(b)->journal) {
rw_unlock(true, b);
- continue;
+ /* We raced */
+ goto retry;
}
- if (!best)
- best = b;
- else if (journal_pin_cmp(c,
- btree_current_write(best),
- btree_current_write(b))) {
- rw_unlock(true, best);
- best = b;
- } else
- rw_unlock(true, b);
+ bch_btree_node_write(b, NULL);
+ rw_unlock(true, b);
}
-
- if (best)
- goto out;
-
- /* We can't find the best btree node, just pick the first */
- list_for_each_entry(b, &c->btree_cache, list)
- if (!b->level && btree_node_dirty(b)) {
- best = b;
- rw_lock(true, best, best->level);
- goto found;
- }
-
-out:
- if (!best)
- return;
-found:
- if (btree_node_dirty(best))
- bch_btree_node_write(best, NULL);
- rw_unlock(true, best);
}
#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
@@ -495,7 +480,7 @@ static void journal_reclaim(struct cache_set *c)
do_journal_discard(ca);
if (c->journal.blocks_free)
- return;
+ goto out;
/*
* Allocate:
@@ -521,7 +506,7 @@ static void journal_reclaim(struct cache_set *c)
if (n)
c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
-
+out:
if (!journal_full(&c->journal))
__closure_wake_up(&c->journal.wait);
}
@@ -554,32 +539,26 @@ static void journal_write_endio(struct bio *bio, int error)
struct journal_write *w = bio->bi_private;
cache_set_err_on(error, w->c, "journal io error");
- closure_put(&w->c->journal.io.cl);
+ closure_put(&w->c->journal.io);
}
static void journal_write(struct closure *);
static void journal_write_done(struct closure *cl)
{
- struct journal *j = container_of(cl, struct journal, io.cl);
- struct cache_set *c = container_of(j, struct cache_set, journal);
-
+ struct journal *j = container_of(cl, struct journal, io);
struct journal_write *w = (j->cur == j->w)
? &j->w[1]
: &j->w[0];
__closure_wake_up(&w->wait);
-
- if (c->journal_delay_ms)
- closure_delay(&j->io, msecs_to_jiffies(c->journal_delay_ms));
-
- continue_at(cl, journal_write, system_wq);
+ continue_at_nobarrier(cl, journal_write, system_wq);
}
static void journal_write_unlocked(struct closure *cl)
__releases(c->journal.lock)
{
- struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl);
+ struct cache_set *c = container_of(cl, struct cache_set, journal.io);
struct cache *ca;
struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key;
@@ -617,7 +596,7 @@ static void journal_write_unlocked(struct closure *cl)
for_each_cache(ca, c, i)
w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
- w->data->magic = jset_magic(c);
+ w->data->magic = jset_magic(&c->sb);
w->data->version = BCACHE_JSET_VERSION;
w->data->last_seq = last_seq(&c->journal);
w->data->csum = csum_set(w->data);
@@ -660,121 +639,134 @@ static void journal_write_unlocked(struct closure *cl)
static void journal_write(struct closure *cl)
{
- struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl);
+ struct cache_set *c = container_of(cl, struct cache_set, journal.io);
spin_lock(&c->journal.lock);
journal_write_unlocked(cl);
}
-static void __journal_try_write(struct cache_set *c, bool noflush)
+static void journal_try_write(struct cache_set *c)
__releases(c->journal.lock)
{
- struct closure *cl = &c->journal.io.cl;
+ struct closure *cl = &c->journal.io;
+ struct journal_write *w = c->journal.cur;
- if (!closure_trylock(cl, &c->cl))
- spin_unlock(&c->journal.lock);
- else if (noflush && journal_full(&c->journal)) {
- spin_unlock(&c->journal.lock);
- continue_at(cl, journal_write, system_wq);
- } else
+ w->need_write = true;
+
+ if (closure_trylock(cl, &c->cl))
journal_write_unlocked(cl);
+ else
+ spin_unlock(&c->journal.lock);
}
-#define journal_try_write(c) __journal_try_write(c, false)
-
-void bch_journal_meta(struct cache_set *c, struct closure *cl)
+static struct journal_write *journal_wait_for_write(struct cache_set *c,
+ unsigned nkeys)
{
- struct journal_write *w;
+ size_t sectors;
+ struct closure cl;
- if (CACHE_SYNC(&c->sb)) {
- spin_lock(&c->journal.lock);
+ closure_init_stack(&cl);
+
+ spin_lock(&c->journal.lock);
- w = c->journal.cur;
- w->need_write = true;
+ while (1) {
+ struct journal_write *w = c->journal.cur;
- if (cl)
- BUG_ON(!closure_wait(&w->wait, cl));
+ sectors = __set_blocks(w->data, w->data->keys + nkeys,
+ c) * c->sb.block_size;
- closure_flush(&c->journal.io);
- __journal_try_write(c, true);
+ if (sectors <= min_t(size_t,
+ c->journal.blocks_free * c->sb.block_size,
+ PAGE_SECTORS << JSET_BITS))
+ return w;
+
+ /* XXX: tracepoint */
+ if (!journal_full(&c->journal)) {
+ trace_bcache_journal_entry_full(c);
+
+ /*
+ * XXX: If we were inserting so many keys that they
+ * won't fit in an _empty_ journal write, we'll
+ * deadlock. For now, handle this in
+ * bch_keylist_realloc() - but something to think about.
+ */
+ BUG_ON(!w->data->keys);
+
+ closure_wait(&w->wait, &cl);
+ journal_try_write(c); /* unlocks */
+ } else {
+ trace_bcache_journal_full(c);
+
+ closure_wait(&c->journal.wait, &cl);
+ journal_reclaim(c);
+ spin_unlock(&c->journal.lock);
+
+ btree_flush_write(c);
+ }
+
+ closure_sync(&cl);
+ spin_lock(&c->journal.lock);
}
}
+static void journal_write_work(struct work_struct *work)
+{
+ struct cache_set *c = container_of(to_delayed_work(work),
+ struct cache_set,
+ journal.work);
+ spin_lock(&c->journal.lock);
+ journal_try_write(c);
+}
+
/*
* Entry point to the journalling code - bio_insert() and btree_invalidate()
* pass bch_journal() a list of keys to be journalled, and then
* bch_journal() hands those same keys off to btree_insert_async()
*/
-void bch_journal(struct closure *cl)
+atomic_t *bch_journal(struct cache_set *c,
+ struct keylist *keys,
+ struct closure *parent)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct cache_set *c = op->c;
struct journal_write *w;
- size_t b, n = ((uint64_t *) op->keys.top) - op->keys.list;
-
- if (op->type != BTREE_INSERT ||
- !CACHE_SYNC(&c->sb))
- goto out;
+ atomic_t *ret;
- /*
- * If we're looping because we errored, might already be waiting on
- * another journal write:
- */
- while (atomic_read(&cl->parent->remaining) & CLOSURE_WAITING)
- closure_sync(cl->parent);
+ if (!CACHE_SYNC(&c->sb))
+ return NULL;
- spin_lock(&c->journal.lock);
+ w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
- if (journal_full(&c->journal)) {
- trace_bcache_journal_full(c);
+ memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys));
+ w->data->keys += bch_keylist_nkeys(keys);
- closure_wait(&c->journal.wait, cl);
+ ret = &fifo_back(&c->journal.pin);
+ atomic_inc(ret);
- journal_reclaim(c);
+ if (parent) {
+ closure_wait(&w->wait, parent);
+ journal_try_write(c);
+ } else if (!w->need_write) {
+ schedule_delayed_work(&c->journal.work,
+ msecs_to_jiffies(c->journal_delay_ms));
+ spin_unlock(&c->journal.lock);
+ } else {
spin_unlock(&c->journal.lock);
-
- btree_flush_write(c);
- continue_at(cl, bch_journal, bcache_wq);
}
- w = c->journal.cur;
- w->need_write = true;
- b = __set_blocks(w->data, w->data->keys + n, c);
-
- if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS ||
- b > c->journal.blocks_free) {
- trace_bcache_journal_entry_full(c);
-
- /*
- * XXX: If we were inserting so many keys that they won't fit in
- * an _empty_ journal write, we'll deadlock. For now, handle
- * this in bch_keylist_realloc() - but something to think about.
- */
- BUG_ON(!w->data->keys);
-
- BUG_ON(!closure_wait(&w->wait, cl));
-
- closure_flush(&c->journal.io);
- journal_try_write(c);
- continue_at(cl, bch_journal, bcache_wq);
- }
-
- memcpy(end(w->data), op->keys.list, n * sizeof(uint64_t));
- w->data->keys += n;
+ return ret;
+}
- op->journal = &fifo_back(&c->journal.pin);
- atomic_inc(op->journal);
+void bch_journal_meta(struct cache_set *c, struct closure *cl)
+{
+ struct keylist keys;
+ atomic_t *ref;
- if (op->flush_journal) {
- closure_flush(&c->journal.io);
- closure_wait(&w->wait, cl->parent);
- }
+ bch_keylist_init(&keys);
- journal_try_write(c);
-out:
- bch_btree_insert_async(cl);
+ ref = bch_journal(c, &keys, cl);
+ if (ref)
+ atomic_dec_bug(ref);
}
void bch_journal_free(struct cache_set *c)
@@ -790,6 +782,7 @@ int bch_journal_alloc(struct cache_set *c)
closure_init_unlocked(&j->io);
spin_lock_init(&j->lock);
+ INIT_DELAYED_WORK(&j->work, journal_write_work);
c->journal_delay_ms = 100;
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index 3d7851274b04..a6472fda94b2 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -75,43 +75,6 @@
* nodes that are pinning the oldest journal entries first.
*/
-#define BCACHE_JSET_VERSION_UUIDv1 1
-/* Always latest UUID format */
-#define BCACHE_JSET_VERSION_UUID 1
-#define BCACHE_JSET_VERSION 1
-
-/*
- * On disk format for a journal entry:
- * seq is monotonically increasing; every journal entry has its own unique
- * sequence number.
- *
- * last_seq is the oldest journal entry that still has keys the btree hasn't
- * flushed to disk yet.
- *
- * version is for on disk format changes.
- */
-struct jset {
- uint64_t csum;
- uint64_t magic;
- uint64_t seq;
- uint32_t version;
- uint32_t keys;
-
- uint64_t last_seq;
-
- BKEY_PADDED(uuid_bucket);
- BKEY_PADDED(btree_root);
- uint16_t btree_level;
- uint16_t pad[3];
-
- uint64_t prio_bucket[MAX_CACHES_PER_SET];
-
- union {
- struct bkey start[0];
- uint64_t d[0];
- };
-};
-
/*
* Only used for holding the journal entries we read in btree_journal_read()
* during cache_registration
@@ -140,7 +103,8 @@ struct journal {
spinlock_t lock;
/* used when waiting because the journal was full */
struct closure_waitlist wait;
- struct closure_with_timer io;
+ struct closure io;
+ struct delayed_work work;
/* Number of blocks free in the bucket(s) we're currently writing to */
unsigned blocks_free;
@@ -188,8 +152,7 @@ struct journal_device {
};
#define journal_pin_cmp(c, l, r) \
- (fifo_idx(&(c)->journal.pin, (l)->journal) > \
- fifo_idx(&(c)->journal.pin, (r)->journal))
+ (fifo_idx(&(c)->journal.pin, (l)) > fifo_idx(&(c)->journal.pin, (r)))
#define JOURNAL_PIN 20000
@@ -199,15 +162,14 @@ struct journal_device {
struct closure;
struct cache_set;
struct btree_op;
+struct keylist;
-void bch_journal(struct closure *);
+atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *);
void bch_journal_next(struct journal *);
void bch_journal_mark(struct cache_set *, struct list_head *);
void bch_journal_meta(struct cache_set *, struct closure *);
-int bch_journal_read(struct cache_set *, struct list_head *,
- struct btree_op *);
-int bch_journal_replay(struct cache_set *, struct list_head *,
- struct btree_op *);
+int bch_journal_read(struct cache_set *, struct list_head *);
+int bch_journal_replay(struct cache_set *, struct list_head *);
void bch_journal_free(struct cache_set *);
int bch_journal_alloc(struct cache_set *);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 1a3b4f4786c3..f2f0998c4a91 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -12,8 +12,9 @@
#include <trace/events/bcache.h>
struct moving_io {
+ struct closure cl;
struct keybuf_key *w;
- struct search s;
+ struct data_insert_op op;
struct bbio bio;
};
@@ -24,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
unsigned i;
for (i = 0; i < KEY_PTRS(k); i++) {
- struct cache *ca = PTR_CACHE(c, k, i);
struct bucket *g = PTR_BUCKET(c, k, i);
- if (GC_SECTORS_USED(g) < ca->gc_move_threshold)
+ if (GC_MOVE(g))
return true;
}
@@ -38,13 +38,13 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
static void moving_io_destructor(struct closure *cl)
{
- struct moving_io *io = container_of(cl, struct moving_io, s.cl);
+ struct moving_io *io = container_of(cl, struct moving_io, cl);
kfree(io);
}
static void write_moving_finish(struct closure *cl)
{
- struct moving_io *io = container_of(cl, struct moving_io, s.cl);
+ struct moving_io *io = container_of(cl, struct moving_io, cl);
struct bio *bio = &io->bio.bio;
struct bio_vec *bv;
int i;
@@ -52,26 +52,30 @@ static void write_moving_finish(struct closure *cl)
bio_for_each_segment_all(bv, bio, i)
__free_page(bv->bv_page);
- if (io->s.op.insert_collision)
+ if (io->op.replace_collision)
trace_bcache_gc_copy_collision(&io->w->key);
- bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w);
+ bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
- atomic_dec_bug(&io->s.op.c->in_flight);
- closure_wake_up(&io->s.op.c->moving_gc_wait);
+ up(&io->op.c->moving_in_flight);
closure_return_with_destructor(cl, moving_io_destructor);
}
static void read_moving_endio(struct bio *bio, int error)
{
+ struct bbio *b = container_of(bio, struct bbio, bio);
struct moving_io *io = container_of(bio->bi_private,
- struct moving_io, s.cl);
+ struct moving_io, cl);
if (error)
- io->s.error = error;
+ io->op.error = error;
+ else if (!KEY_DIRTY(&b->key) &&
+ ptr_stale(io->op.c, &b->key, 0)) {
+ io->op.error = -EINTR;
+ }
- bch_bbio_endio(io->s.op.c, bio, error, "reading data to move");
+ bch_bbio_endio(io->op.c, bio, error, "reading data to move");
}
static void moving_init(struct moving_io *io)
@@ -85,54 +89,53 @@ static void moving_init(struct moving_io *io)
bio->bi_size = KEY_SIZE(&io->w->key) << 9;
bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
PAGE_SECTORS);
- bio->bi_private = &io->s.cl;
+ bio->bi_private = &io->cl;
bio->bi_io_vec = bio->bi_inline_vecs;
bch_bio_map(bio, NULL);
}
static void write_moving(struct closure *cl)
{
- struct search *s = container_of(cl, struct search, cl);
- struct moving_io *io = container_of(s, struct moving_io, s);
+ struct moving_io *io = container_of(cl, struct moving_io, cl);
+ struct data_insert_op *op = &io->op;
- if (!s->error) {
+ if (!op->error) {
moving_init(io);
- io->bio.bio.bi_sector = KEY_START(&io->w->key);
- s->op.lock = -1;
- s->op.write_prio = 1;
- s->op.cache_bio = &io->bio.bio;
+ io->bio.bio.bi_sector = KEY_START(&io->w->key);
+ op->write_prio = 1;
+ op->bio = &io->bio.bio;
- s->writeback = KEY_DIRTY(&io->w->key);
- s->op.csum = KEY_CSUM(&io->w->key);
+ op->writeback = KEY_DIRTY(&io->w->key);
+ op->csum = KEY_CSUM(&io->w->key);
- s->op.type = BTREE_REPLACE;
- bkey_copy(&s->op.replace, &io->w->key);
+ bkey_copy(&op->replace_key, &io->w->key);
+ op->replace = true;
- closure_init(&s->op.cl, cl);
- bch_insert_data(&s->op.cl);
+ closure_call(&op->cl, bch_data_insert, NULL, cl);
}
- continue_at(cl, write_moving_finish, NULL);
+ continue_at(cl, write_moving_finish, system_wq);
}
static void read_moving_submit(struct closure *cl)
{
- struct search *s = container_of(cl, struct search, cl);
- struct moving_io *io = container_of(s, struct moving_io, s);
+ struct moving_io *io = container_of(cl, struct moving_io, cl);
struct bio *bio = &io->bio.bio;
- bch_submit_bbio(bio, s->op.c, &io->w->key, 0);
+ bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
- continue_at(cl, write_moving, bch_gc_wq);
+ continue_at(cl, write_moving, system_wq);
}
-static void read_moving(struct closure *cl)
+static void read_moving(struct cache_set *c)
{
- struct cache_set *c = container_of(cl, struct cache_set, moving_gc);
struct keybuf_key *w;
struct moving_io *io;
struct bio *bio;
+ struct closure cl;
+
+ closure_init_stack(&cl);
/* XXX: if we error, background writeback could stall indefinitely */
@@ -142,6 +145,11 @@ static void read_moving(struct closure *cl)
if (!w)
break;
+ if (ptr_stale(c, &w->key, 0)) {
+ bch_keybuf_del(&c->moving_gc_keys, w);
+ continue;
+ }
+
io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
* DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
GFP_KERNEL);
@@ -150,8 +158,8 @@ static void read_moving(struct closure *cl)
w->private = io;
io->w = w;
- io->s.op.inode = KEY_INODE(&w->key);
- io->s.op.c = c;
+ io->op.inode = KEY_INODE(&w->key);
+ io->op.c = c;
moving_init(io);
bio = &io->bio.bio;
@@ -164,13 +172,8 @@ static void read_moving(struct closure *cl)
trace_bcache_gc_copy(&w->key);
- closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl);
-
- if (atomic_inc_return(&c->in_flight) >= 64) {
- closure_wait_event(&c->moving_gc_wait, cl,
- atomic_read(&c->in_flight) < 64);
- continue_at(cl, read_moving, bch_gc_wq);
- }
+ down(&c->moving_in_flight);
+ closure_call(&io->cl, read_moving_submit, NULL, &cl);
}
if (0) {
@@ -180,7 +183,7 @@ err: if (!IS_ERR_OR_NULL(w->private))
bch_keybuf_del(&c->moving_gc_keys, w);
}
- closure_return(cl);
+ closure_sync(&cl);
}
static bool bucket_cmp(struct bucket *l, struct bucket *r)
@@ -190,18 +193,18 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)
static unsigned bucket_heap_top(struct cache *ca)
{
- return GC_SECTORS_USED(heap_peek(&ca->heap));
+ struct bucket *b;
+ return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
}
-void bch_moving_gc(struct closure *cl)
+void bch_moving_gc(struct cache_set *c)
{
- struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
struct cache *ca;
struct bucket *b;
unsigned i;
if (!c->copy_gc_enabled)
- closure_return(cl);
+ return;
mutex_lock(&c->bucket_lock);
@@ -233,22 +236,19 @@ void bch_moving_gc(struct closure *cl)
sectors_to_move -= GC_SECTORS_USED(b);
}
- ca->gc_move_threshold = bucket_heap_top(ca);
-
- pr_debug("threshold %u", ca->gc_move_threshold);
+ while (heap_pop(&ca->heap, b, bucket_cmp))
+ SET_GC_MOVE(b, 1);
}
mutex_unlock(&c->bucket_lock);
c->moving_gc_keys.last_scanned = ZERO_KEY;
- closure_init(&c->moving_gc, cl);
- read_moving(&c->moving_gc);
-
- closure_return(cl);
+ read_moving(c);
}
void bch_moving_init_cache_set(struct cache_set *c)
{
bch_keybuf_init(&c->moving_gc_keys);
+ sema_init(&c->moving_in_flight, 64);
}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 2a7f0dd6abab..fbcc851ed5a5 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -25,7 +25,7 @@
struct kmem_cache *bch_search_cache;
-static void check_should_skip(struct cached_dev *, struct search *);
+static void bch_data_insert_start(struct closure *);
/* Cgroup interface */
@@ -213,221 +213,79 @@ static void bio_csum(struct bio *bio, struct bkey *k)
/* Insert data into cache */
-static void bio_invalidate(struct closure *cl)
+static void bch_data_insert_keys(struct closure *cl)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct bio *bio = op->cache_bio;
-
- pr_debug("invalidating %i sectors from %llu",
- bio_sectors(bio), (uint64_t) bio->bi_sector);
-
- while (bio_sectors(bio)) {
- unsigned len = min(bio_sectors(bio), 1U << 14);
-
- if (bch_keylist_realloc(&op->keys, 0, op->c))
- goto out;
-
- bio->bi_sector += len;
- bio->bi_size -= len << 9;
-
- bch_keylist_add(&op->keys,
- &KEY(op->inode, bio->bi_sector, len));
- }
-
- op->insert_data_done = true;
- bio_put(bio);
-out:
- continue_at(cl, bch_journal, bcache_wq);
-}
-
-struct open_bucket {
- struct list_head list;
- struct task_struct *last;
- unsigned sectors_free;
- BKEY_PADDED(key);
-};
-
-void bch_open_buckets_free(struct cache_set *c)
-{
- struct open_bucket *b;
-
- while (!list_empty(&c->data_buckets)) {
- b = list_first_entry(&c->data_buckets,
- struct open_bucket, list);
- list_del(&b->list);
- kfree(b);
- }
-}
-
-int bch_open_buckets_alloc(struct cache_set *c)
-{
- int i;
-
- spin_lock_init(&c->data_bucket_lock);
-
- for (i = 0; i < 6; i++) {
- struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
- if (!b)
- return -ENOMEM;
-
- list_add(&b->list, &c->data_buckets);
- }
-
- return 0;
-}
-
-/*
- * We keep multiple buckets open for writes, and try to segregate different
- * write streams for better cache utilization: first we look for a bucket where
- * the last write to it was sequential with the current write, and failing that
- * we look for a bucket that was last used by the same task.
- *
- * The ideas is if you've got multiple tasks pulling data into the cache at the
- * same time, you'll get better cache utilization if you try to segregate their
- * data and preserve locality.
- *
- * For example, say you've starting Firefox at the same time you're copying a
- * bunch of files. Firefox will likely end up being fairly hot and stay in the
- * cache awhile, but the data you copied might not be; if you wrote all that
- * data to the same buckets it'd get invalidated at the same time.
- *
- * Both of those tasks will be doing fairly random IO so we can't rely on
- * detecting sequential IO to segregate their data, but going off of the task
- * should be a sane heuristic.
- */
-static struct open_bucket *pick_data_bucket(struct cache_set *c,
- const struct bkey *search,
- struct task_struct *task,
- struct bkey *alloc)
-{
- struct open_bucket *ret, *ret_task = NULL;
-
- list_for_each_entry_reverse(ret, &c->data_buckets, list)
- if (!bkey_cmp(&ret->key, search))
- goto found;
- else if (ret->last == task)
- ret_task = ret;
-
- ret = ret_task ?: list_first_entry(&c->data_buckets,
- struct open_bucket, list);
-found:
- if (!ret->sectors_free && KEY_PTRS(alloc)) {
- ret->sectors_free = c->sb.bucket_size;
- bkey_copy(&ret->key, alloc);
- bkey_init(alloc);
- }
-
- if (!ret->sectors_free)
- ret = NULL;
-
- return ret;
-}
-
-/*
- * Allocates some space in the cache to write to, and k to point to the newly
- * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
- * end of the newly allocated space).
- *
- * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
- * sectors were actually allocated.
- *
- * If s->writeback is true, will not fail.
- */
-static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
- struct search *s)
-{
- struct cache_set *c = s->op.c;
- struct open_bucket *b;
- BKEY_PADDED(key) alloc;
- struct closure cl, *w = NULL;
- unsigned i;
-
- if (s->writeback) {
- closure_init_stack(&cl);
- w = &cl;
- }
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ atomic_t *journal_ref = NULL;
+ struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
+ int ret;
/*
- * We might have to allocate a new bucket, which we can't do with a
- * spinlock held. So if we have to allocate, we drop the lock, allocate
- * and then retry. KEY_PTRS() indicates whether alloc points to
- * allocated bucket(s).
+ * If we're looping, might already be waiting on
+ * another journal write - can't wait on more than one journal write at
+ * a time
+ *
+ * XXX: this looks wrong
*/
+#if 0
+ while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
+ closure_sync(&s->cl);
+#endif
- bkey_init(&alloc.key);
- spin_lock(&c->data_bucket_lock);
-
- while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) {
- unsigned watermark = s->op.write_prio
- ? WATERMARK_MOVINGGC
- : WATERMARK_NONE;
-
- spin_unlock(&c->data_bucket_lock);
-
- if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w))
- return false;
+ if (!op->replace)
+ journal_ref = bch_journal(op->c, &op->insert_keys,
+ op->flush_journal ? cl : NULL);
- spin_lock(&c->data_bucket_lock);
+ ret = bch_btree_insert(op->c, &op->insert_keys,
+ journal_ref, replace_key);
+ if (ret == -ESRCH) {
+ op->replace_collision = true;
+ } else if (ret) {
+ op->error = -ENOMEM;
+ op->insert_data_done = true;
}
- /*
- * If we had to allocate, we might race and not need to allocate the
- * second time we call find_data_bucket(). If we allocated a bucket but
- * didn't use it, drop the refcount bch_bucket_alloc_set() took:
- */
- if (KEY_PTRS(&alloc.key))
- __bkey_put(c, &alloc.key);
-
- for (i = 0; i < KEY_PTRS(&b->key); i++)
- EBUG_ON(ptr_stale(c, &b->key, i));
+ if (journal_ref)
+ atomic_dec_bug(journal_ref);
- /* Set up the pointer to the space we're allocating: */
+ if (!op->insert_data_done)
+ continue_at(cl, bch_data_insert_start, bcache_wq);
- for (i = 0; i < KEY_PTRS(&b->key); i++)
- k->ptr[i] = b->key.ptr[i];
+ bch_keylist_free(&op->insert_keys);
+ closure_return(cl);
+}
- sectors = min(sectors, b->sectors_free);
+static void bch_data_invalidate(struct closure *cl)
+{
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ struct bio *bio = op->bio;
- SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
- SET_KEY_SIZE(k, sectors);
- SET_KEY_PTRS(k, KEY_PTRS(&b->key));
+ pr_debug("invalidating %i sectors from %llu",
+ bio_sectors(bio), (uint64_t) bio->bi_sector);
- /*
- * Move b to the end of the lru, and keep track of what this bucket was
- * last used for:
- */
- list_move_tail(&b->list, &c->data_buckets);
- bkey_copy_key(&b->key, k);
- b->last = s->task;
+ while (bio_sectors(bio)) {
+ unsigned sectors = min(bio_sectors(bio),
+ 1U << (KEY_SIZE_BITS - 1));
- b->sectors_free -= sectors;
+ if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
+ goto out;
- for (i = 0; i < KEY_PTRS(&b->key); i++) {
- SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
+ bio->bi_sector += sectors;
+ bio->bi_size -= sectors << 9;
- atomic_long_add(sectors,
- &PTR_CACHE(c, &b->key, i)->sectors_written);
+ bch_keylist_add(&op->insert_keys,
+ &KEY(op->inode, bio->bi_sector, sectors));
}
- if (b->sectors_free < c->sb.block_size)
- b->sectors_free = 0;
-
- /*
- * k takes refcounts on the buckets it points to until it's inserted
- * into the btree, but if we're done with this bucket we just transfer
- * get_data_bucket()'s refcount.
- */
- if (b->sectors_free)
- for (i = 0; i < KEY_PTRS(&b->key); i++)
- atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
-
- spin_unlock(&c->data_bucket_lock);
- return true;
+ op->insert_data_done = true;
+ bio_put(bio);
+out:
+ continue_at(cl, bch_data_insert_keys, bcache_wq);
}
-static void bch_insert_data_error(struct closure *cl)
+static void bch_data_insert_error(struct closure *cl)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
/*
* Our data write just errored, which means we've got a bunch of keys to
@@ -438,35 +296,34 @@ static void bch_insert_data_error(struct closure *cl)
* from the keys we'll accomplish just that.
*/
- struct bkey *src = op->keys.bottom, *dst = op->keys.bottom;
+ struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
- while (src != op->keys.top) {
+ while (src != op->insert_keys.top) {
struct bkey *n = bkey_next(src);
SET_KEY_PTRS(src, 0);
- bkey_copy(dst, src);
+ memmove(dst, src, bkey_bytes(src));
dst = bkey_next(dst);
src = n;
}
- op->keys.top = dst;
+ op->insert_keys.top = dst;
- bch_journal(cl);
+ bch_data_insert_keys(cl);
}
-static void bch_insert_data_endio(struct bio *bio, int error)
+static void bch_data_insert_endio(struct bio *bio, int error)
{
struct closure *cl = bio->bi_private;
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct search *s = container_of(op, struct search, op);
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
if (error) {
/* TODO: We could try to recover from this. */
- if (s->writeback)
- s->error = error;
- else if (s->write)
- set_closure_fn(cl, bch_insert_data_error, bcache_wq);
+ if (op->writeback)
+ op->error = error;
+ else if (!op->replace)
+ set_closure_fn(cl, bch_data_insert_error, bcache_wq);
else
set_closure_fn(cl, NULL, NULL);
}
@@ -474,18 +331,17 @@ static void bch_insert_data_endio(struct bio *bio, int error)
bch_bbio_endio(op->c, bio, error, "writing data to cache");
}
-static void bch_insert_data_loop(struct closure *cl)
+static void bch_data_insert_start(struct closure *cl)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct search *s = container_of(op, struct search, op);
- struct bio *bio = op->cache_bio, *n;
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ struct bio *bio = op->bio, *n;
- if (op->skip)
- return bio_invalidate(cl);
+ if (op->bypass)
+ return bch_data_invalidate(cl);
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
set_gc_sectors(op->c);
- bch_queue_gc(op->c);
+ wake_up_gc(op->c);
}
/*
@@ -497,29 +353,30 @@ static void bch_insert_data_loop(struct closure *cl)
do {
unsigned i;
struct bkey *k;
- struct bio_set *split = s->d
- ? s->d->bio_split : op->c->bio_split;
+ struct bio_set *split = op->c->bio_split;
/* 1 for the device pointer and 1 for the chksum */
- if (bch_keylist_realloc(&op->keys,
+ if (bch_keylist_realloc(&op->insert_keys,
1 + (op->csum ? 1 : 0),
op->c))
- continue_at(cl, bch_journal, bcache_wq);
+ continue_at(cl, bch_data_insert_keys, bcache_wq);
- k = op->keys.top;
+ k = op->insert_keys.top;
bkey_init(k);
SET_KEY_INODE(k, op->inode);
SET_KEY_OFFSET(k, bio->bi_sector);
- if (!bch_alloc_sectors(k, bio_sectors(bio), s))
+ if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
+ op->write_point, op->write_prio,
+ op->writeback))
goto err;
n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
- n->bi_end_io = bch_insert_data_endio;
+ n->bi_end_io = bch_data_insert_endio;
n->bi_private = cl;
- if (s->writeback) {
+ if (op->writeback) {
SET_KEY_DIRTY(k, true);
for (i = 0; i < KEY_PTRS(k); i++)
@@ -532,17 +389,17 @@ static void bch_insert_data_loop(struct closure *cl)
bio_csum(n, k);
trace_bcache_cache_insert(k);
- bch_keylist_push(&op->keys);
+ bch_keylist_push(&op->insert_keys);
n->bi_rw |= REQ_WRITE;
bch_submit_bbio(n, op->c, k, 0);
} while (n != bio);
op->insert_data_done = true;
- continue_at(cl, bch_journal, bcache_wq);
+ continue_at(cl, bch_data_insert_keys, bcache_wq);
err:
/* bch_alloc_sectors() blocks if s->writeback = true */
- BUG_ON(s->writeback);
+ BUG_ON(op->writeback);
/*
* But if it's not a writeback write we'd rather just bail out if
@@ -550,15 +407,15 @@ err:
* we might be starving btree writes for gc or something.
*/
- if (s->write) {
+ if (!op->replace) {
/*
* Writethrough write: We can't complete the write until we've
* updated the index. But we don't want to delay the write while
* we wait for buckets to be freed up, so just invalidate the
* rest of the write.
*/
- op->skip = true;
- return bio_invalidate(cl);
+ op->bypass = true;
+ return bch_data_invalidate(cl);
} else {
/*
* From a cache miss, we can just insert the keys for the data
@@ -567,15 +424,15 @@ err:
op->insert_data_done = true;
bio_put(bio);
- if (!bch_keylist_empty(&op->keys))
- continue_at(cl, bch_journal, bcache_wq);
+ if (!bch_keylist_empty(&op->insert_keys))
+ continue_at(cl, bch_data_insert_keys, bcache_wq);
else
closure_return(cl);
}
}
/**
- * bch_insert_data - stick some data in the cache
+ * bch_data_insert - stick some data in the cache
*
* This is the starting point for any data to end up in a cache device; it could
* be from a normal write, or a writeback write, or a write to a flash only
@@ -587,56 +444,179 @@ err:
* data is written it calls bch_journal, and after the keys have been added to
* the next journal write they're inserted into the btree.
*
- * It inserts the data in op->cache_bio; bi_sector is used for the key offset,
+ * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
* and op->inode is used for the key inode.
*
- * If op->skip is true, instead of inserting the data it invalidates the region
- * of the cache represented by op->cache_bio and op->inode.
+ * If s->bypass is true, instead of inserting the data it invalidates the
+ * region of the cache represented by s->cache_bio and op->inode.
*/
-void bch_insert_data(struct closure *cl)
+void bch_data_insert(struct closure *cl)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+
+ trace_bcache_write(op->bio, op->writeback, op->bypass);
- bch_keylist_init(&op->keys);
- bio_get(op->cache_bio);
- bch_insert_data_loop(cl);
+ bch_keylist_init(&op->insert_keys);
+ bio_get(op->bio);
+ bch_data_insert_start(cl);
}
-void bch_btree_insert_async(struct closure *cl)
+/* Congested? */
+
+unsigned bch_get_congested(struct cache_set *c)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct search *s = container_of(op, struct search, op);
+ int i;
+ long rand;
- if (bch_btree_insert(op, op->c)) {
- s->error = -ENOMEM;
- op->insert_data_done = true;
- }
+ if (!c->congested_read_threshold_us &&
+ !c->congested_write_threshold_us)
+ return 0;
+
+ i = (local_clock_us() - c->congested_last_us) / 1024;
+ if (i < 0)
+ return 0;
+
+ i += atomic_read(&c->congested);
+ if (i >= 0)
+ return 0;
- if (op->insert_data_done) {
- bch_keylist_free(&op->keys);
- closure_return(cl);
- } else
- continue_at(cl, bch_insert_data_loop, bcache_wq);
+ i += CONGESTED_MAX;
+
+ if (i > 0)
+ i = fract_exp_two(i, 6);
+
+ rand = get_random_int();
+ i -= bitmap_weight(&rand, BITS_PER_LONG);
+
+ return i > 0 ? i : 1;
}
-/* Common code for the make_request functions */
+static void add_sequential(struct task_struct *t)
+{
+ ewma_add(t->sequential_io_avg,
+ t->sequential_io, 8, 0);
-static void request_endio(struct bio *bio, int error)
+ t->sequential_io = 0;
+}
+
+static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
{
- struct closure *cl = bio->bi_private;
+ return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
+}
- if (error) {
- struct search *s = container_of(cl, struct search, cl);
- s->error = error;
- /* Only cache read errors are recoverable */
- s->recoverable = false;
+static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
+{
+ struct cache_set *c = dc->disk.c;
+ unsigned mode = cache_mode(dc, bio);
+ unsigned sectors, congested = bch_get_congested(c);
+ struct task_struct *task = current;
+ struct io *i;
+
+ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
+ c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
+ (bio->bi_rw & REQ_DISCARD))
+ goto skip;
+
+ if (mode == CACHE_MODE_NONE ||
+ (mode == CACHE_MODE_WRITEAROUND &&
+ (bio->bi_rw & REQ_WRITE)))
+ goto skip;
+
+ if (bio->bi_sector & (c->sb.block_size - 1) ||
+ bio_sectors(bio) & (c->sb.block_size - 1)) {
+ pr_debug("skipping unaligned io");
+ goto skip;
}
- bio_put(bio);
- closure_put(cl);
+ if (bypass_torture_test(dc)) {
+ if ((get_random_int() & 3) == 3)
+ goto skip;
+ else
+ goto rescale;
+ }
+
+ if (!congested && !dc->sequential_cutoff)
+ goto rescale;
+
+ if (!congested &&
+ mode == CACHE_MODE_WRITEBACK &&
+ (bio->bi_rw & REQ_WRITE) &&
+ (bio->bi_rw & REQ_SYNC))
+ goto rescale;
+
+ spin_lock(&dc->io_lock);
+
+ hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
+ if (i->last == bio->bi_sector &&
+ time_before(jiffies, i->jiffies))
+ goto found;
+
+ i = list_first_entry(&dc->io_lru, struct io, lru);
+
+ add_sequential(task);
+ i->sequential = 0;
+found:
+ if (i->sequential + bio->bi_size > i->sequential)
+ i->sequential += bio->bi_size;
+
+ i->last = bio_end_sector(bio);
+ i->jiffies = jiffies + msecs_to_jiffies(5000);
+ task->sequential_io = i->sequential;
+
+ hlist_del(&i->hash);
+ hlist_add_head(&i->hash, iohash(dc, i->last));
+ list_move_tail(&i->lru, &dc->io_lru);
+
+ spin_unlock(&dc->io_lock);
+
+ sectors = max(task->sequential_io,
+ task->sequential_io_avg) >> 9;
+
+ if (dc->sequential_cutoff &&
+ sectors >= dc->sequential_cutoff >> 9) {
+ trace_bcache_bypass_sequential(bio);
+ goto skip;
+ }
+
+ if (congested && sectors >= congested) {
+ trace_bcache_bypass_congested(bio);
+ goto skip;
+ }
+
+rescale:
+ bch_rescale_priorities(c, bio_sectors(bio));
+ return false;
+skip:
+ bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
+ return true;
}
-void bch_cache_read_endio(struct bio *bio, int error)
+/* Cache lookup */
+
+struct search {
+ /* Stack frame for bio_complete */
+ struct closure cl;
+
+ struct bcache_device *d;
+
+ struct bbio bio;
+ struct bio *orig_bio;
+ struct bio *cache_miss;
+
+ unsigned insert_bio_sectors;
+
+ unsigned recoverable:1;
+ unsigned unaligned_bvec:1;
+ unsigned write:1;
+ unsigned read_dirty_data:1;
+
+ unsigned long start_time;
+
+ struct btree_op op;
+ struct data_insert_op iop;
+};
+
+static void bch_cache_read_endio(struct bio *bio, int error)
{
struct bbio *b = container_of(bio, struct bbio, bio);
struct closure *cl = bio->bi_private;
@@ -650,13 +630,113 @@ void bch_cache_read_endio(struct bio *bio, int error)
*/
if (error)
- s->error = error;
- else if (ptr_stale(s->op.c, &b->key, 0)) {
- atomic_long_inc(&s->op.c->cache_read_races);
- s->error = -EINTR;
+ s->iop.error = error;
+ else if (ptr_stale(s->iop.c, &b->key, 0)) {
+ atomic_long_inc(&s->iop.c->cache_read_races);
+ s->iop.error = -EINTR;
}
- bch_bbio_endio(s->op.c, bio, error, "reading from cache");
+ bch_bbio_endio(s->iop.c, bio, error, "reading from cache");
+}
+
+/*
+ * Read from a single key, handling the initial cache miss if the key starts in
+ * the middle of the bio
+ */
+static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
+{
+ struct search *s = container_of(op, struct search, op);
+ struct bio *n, *bio = &s->bio.bio;
+ struct bkey *bio_key;
+ unsigned ptr;
+
+ if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0)
+ return MAP_CONTINUE;
+
+ if (KEY_INODE(k) != s->iop.inode ||
+ KEY_START(k) > bio->bi_sector) {
+ unsigned bio_sectors = bio_sectors(bio);
+ unsigned sectors = KEY_INODE(k) == s->iop.inode
+ ? min_t(uint64_t, INT_MAX,
+ KEY_START(k) - bio->bi_sector)
+ : INT_MAX;
+
+ int ret = s->d->cache_miss(b, s, bio, sectors);
+ if (ret != MAP_CONTINUE)
+ return ret;
+
+ /* if this was a complete miss we shouldn't get here */
+ BUG_ON(bio_sectors <= sectors);
+ }
+
+ if (!KEY_SIZE(k))
+ return MAP_CONTINUE;
+
+ /* XXX: figure out best pointer - for multiple cache devices */
+ ptr = 0;
+
+ PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
+
+ if (KEY_DIRTY(k))
+ s->read_dirty_data = true;
+
+ n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
+ KEY_OFFSET(k) - bio->bi_sector),
+ GFP_NOIO, s->d->bio_split);
+
+ bio_key = &container_of(n, struct bbio, bio)->key;
+ bch_bkey_copy_single_ptr(bio_key, k, ptr);
+
+ bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key);
+ bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
+
+ n->bi_end_io = bch_cache_read_endio;
+ n->bi_private = &s->cl;
+
+ /*
+ * The bucket we're reading from might be reused while our bio
+ * is in flight, and we could then end up reading the wrong
+ * data.
+ *
+ * We guard against this by checking (in cache_read_endio()) if
+ * the pointer is stale again; if so, we treat it as an error
+ * and reread from the backing device (but we don't pass that
+ * error up anywhere).
+ */
+
+ __bch_submit_bbio(n, b->c);
+ return n == bio ? MAP_DONE : MAP_CONTINUE;
+}
+
+static void cache_lookup(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, iop.cl);
+ struct bio *bio = &s->bio.bio;
+
+ int ret = bch_btree_map_keys(&s->op, s->iop.c,
+ &KEY(s->iop.inode, bio->bi_sector, 0),
+ cache_lookup_fn, MAP_END_KEY);
+ if (ret == -EAGAIN)
+ continue_at(cl, cache_lookup, bcache_wq);
+
+ closure_return(cl);
+}
+
+/* Common code for the make_request functions */
+
+static void request_endio(struct bio *bio, int error)
+{
+ struct closure *cl = bio->bi_private;
+
+ if (error) {
+ struct search *s = container_of(cl, struct search, cl);
+ s->iop.error = error;
+ /* Only cache read errors are recoverable */
+ s->recoverable = false;
+ }
+
+ bio_put(bio);
+ closure_put(cl);
}
static void bio_complete(struct search *s)
@@ -670,8 +750,8 @@ static void bio_complete(struct search *s)
part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
part_stat_unlock();
- trace_bcache_request_end(s, s->orig_bio);
- bio_endio(s->orig_bio, s->error);
+ trace_bcache_request_end(s->d, s->orig_bio);
+ bio_endio(s->orig_bio, s->iop.error);
s->orig_bio = NULL;
}
}
@@ -691,8 +771,8 @@ static void search_free(struct closure *cl)
struct search *s = container_of(cl, struct search, cl);
bio_complete(s);
- if (s->op.cache_bio)
- bio_put(s->op.cache_bio);
+ if (s->iop.bio)
+ bio_put(s->iop.bio);
if (s->unaligned_bvec)
mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
@@ -703,21 +783,22 @@ static void search_free(struct closure *cl)
static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
{
+ struct search *s;
struct bio_vec *bv;
- struct search *s = mempool_alloc(d->c->search, GFP_NOIO);
- memset(s, 0, offsetof(struct search, op.keys));
+
+ s = mempool_alloc(d->c->search, GFP_NOIO);
+ memset(s, 0, offsetof(struct search, iop.insert_keys));
__closure_init(&s->cl, NULL);
- s->op.inode = d->id;
- s->op.c = d->c;
+ s->iop.inode = d->id;
+ s->iop.c = d->c;
s->d = d;
s->op.lock = -1;
- s->task = current;
+ s->iop.write_point = hash_long((unsigned long) current, 16);
s->orig_bio = bio;
s->write = (bio->bi_rw & REQ_WRITE) != 0;
- s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
- s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0;
+ s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
s->recoverable = 1;
s->start_time = jiffies;
do_bio_hook(s);
@@ -734,18 +815,6 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
return s;
}
-static void btree_read_async(struct closure *cl)
-{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
-
- int ret = btree_root(search_recurse, op->c, op);
-
- if (ret == -EAGAIN)
- continue_at(cl, btree_read_async, bcache_wq);
-
- closure_return(cl);
-}
-
/* Cached devices */
static void cached_dev_bio_complete(struct closure *cl)
@@ -759,27 +828,28 @@ static void cached_dev_bio_complete(struct closure *cl)
/* Process reads */
-static void cached_dev_read_complete(struct closure *cl)
+static void cached_dev_cache_miss_done(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
- if (s->op.insert_collision)
- bch_mark_cache_miss_collision(s);
+ if (s->iop.replace_collision)
+ bch_mark_cache_miss_collision(s->iop.c, s->d);
- if (s->op.cache_bio) {
+ if (s->iop.bio) {
int i;
struct bio_vec *bv;
- __bio_for_each_segment(bv, s->op.cache_bio, i, 0)
+ bio_for_each_segment_all(bv, s->iop.bio, i)
__free_page(bv->bv_page);
}
cached_dev_bio_complete(cl);
}
-static void request_read_error(struct closure *cl)
+static void cached_dev_read_error(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
+ struct bio *bio = &s->bio.bio;
struct bio_vec *bv;
int i;
@@ -787,7 +857,7 @@ static void request_read_error(struct closure *cl)
/* Retry from the backing device: */
trace_bcache_read_retry(s->orig_bio);
- s->error = 0;
+ s->iop.error = 0;
bv = s->bio.bio.bi_io_vec;
do_bio_hook(s);
s->bio.bio.bi_io_vec = bv;
@@ -803,146 +873,148 @@ static void request_read_error(struct closure *cl)
/* XXX: invalidate cache */
- closure_bio_submit(&s->bio.bio, &s->cl, s->d);
+ closure_bio_submit(bio, cl, s->d);
}
- continue_at(cl, cached_dev_read_complete, NULL);
+ continue_at(cl, cached_dev_cache_miss_done, NULL);
}
-static void request_read_done(struct closure *cl)
+static void cached_dev_read_done(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
/*
- * s->cache_bio != NULL implies that we had a cache miss; cache_bio now
- * contains data ready to be inserted into the cache.
+ * We had a cache miss; cache_bio now contains data ready to be inserted
+ * into the cache.
*
* First, we copy the data we just read from cache_bio's bounce buffers
* to the buffers the original bio pointed to:
*/
- if (s->op.cache_bio) {
- bio_reset(s->op.cache_bio);
- s->op.cache_bio->bi_sector = s->cache_miss->bi_sector;
- s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev;
- s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
- bch_bio_map(s->op.cache_bio, NULL);
+ if (s->iop.bio) {
+ bio_reset(s->iop.bio);
+ s->iop.bio->bi_sector = s->cache_miss->bi_sector;
+ s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
+ s->iop.bio->bi_size = s->insert_bio_sectors << 9;
+ bch_bio_map(s->iop.bio, NULL);
- bio_copy_data(s->cache_miss, s->op.cache_bio);
+ bio_copy_data(s->cache_miss, s->iop.bio);
bio_put(s->cache_miss);
s->cache_miss = NULL;
}
- if (verify(dc, &s->bio.bio) && s->recoverable)
- bch_data_verify(s);
+ if (verify(dc, &s->bio.bio) && s->recoverable &&
+ !s->unaligned_bvec && !s->read_dirty_data)
+ bch_data_verify(dc, s->orig_bio);
bio_complete(s);
- if (s->op.cache_bio &&
- !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) {
- s->op.type = BTREE_REPLACE;
- closure_call(&s->op.cl, bch_insert_data, NULL, cl);
+ if (s->iop.bio &&
+ !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
+ BUG_ON(!s->iop.replace);
+ closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
}
- continue_at(cl, cached_dev_read_complete, NULL);
+ continue_at(cl, cached_dev_cache_miss_done, NULL);
}
-static void request_read_done_bh(struct closure *cl)
+static void cached_dev_read_done_bh(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
- bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip);
- trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip);
+ bch_mark_cache_accounting(s->iop.c, s->d,
+ !s->cache_miss, s->iop.bypass);
+ trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
- if (s->error)
- continue_at_nobarrier(cl, request_read_error, bcache_wq);
- else if (s->op.cache_bio || verify(dc, &s->bio.bio))
- continue_at_nobarrier(cl, request_read_done, bcache_wq);
+ if (s->iop.error)
+ continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
+ else if (s->iop.bio || verify(dc, &s->bio.bio))
+ continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
else
- continue_at_nobarrier(cl, cached_dev_read_complete, NULL);
+ continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
}
static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned sectors)
{
- int ret = 0;
- unsigned reada;
+ int ret = MAP_CONTINUE;
+ unsigned reada = 0;
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
- struct bio *miss;
-
- miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
- if (miss == bio)
- s->op.lookup_done = true;
+ struct bio *miss, *cache_bio;
- miss->bi_end_io = request_endio;
- miss->bi_private = &s->cl;
-
- if (s->cache_miss || s->op.skip)
+ if (s->cache_miss || s->iop.bypass) {
+ miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+ ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
goto out_submit;
-
- if (miss != bio ||
- (bio->bi_rw & REQ_RAHEAD) ||
- (bio->bi_rw & REQ_META) ||
- s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA)
- reada = 0;
- else {
- reada = min(dc->readahead >> 9,
- sectors - bio_sectors(miss));
-
- if (bio_end_sector(miss) + reada > bdev_sectors(miss->bi_bdev))
- reada = bdev_sectors(miss->bi_bdev) -
- bio_end_sector(miss);
}
- s->cache_bio_sectors = bio_sectors(miss) + reada;
- s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT,
- DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS),
- dc->disk.bio_split);
+ if (!(bio->bi_rw & REQ_RAHEAD) &&
+ !(bio->bi_rw & REQ_META) &&
+ s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
+ reada = min_t(sector_t, dc->readahead >> 9,
+ bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
- if (!s->op.cache_bio)
- goto out_submit;
+ s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
- s->op.cache_bio->bi_sector = miss->bi_sector;
- s->op.cache_bio->bi_bdev = miss->bi_bdev;
- s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
+ s->iop.replace_key = KEY(s->iop.inode,
+ bio->bi_sector + s->insert_bio_sectors,
+ s->insert_bio_sectors);
- s->op.cache_bio->bi_end_io = request_endio;
- s->op.cache_bio->bi_private = &s->cl;
+ ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
+ if (ret)
+ return ret;
+
+ s->iop.replace = true;
+
+ miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
/* btree_search_recurse()'s btree iterator is no good anymore */
- ret = -EINTR;
- if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio))
- goto out_put;
+ ret = miss == bio ? MAP_DONE : -EINTR;
+
+ cache_bio = bio_alloc_bioset(GFP_NOWAIT,
+ DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
+ dc->disk.bio_split);
+ if (!cache_bio)
+ goto out_submit;
+
+ cache_bio->bi_sector = miss->bi_sector;
+ cache_bio->bi_bdev = miss->bi_bdev;
+ cache_bio->bi_size = s->insert_bio_sectors << 9;
+
+ cache_bio->bi_end_io = request_endio;
+ cache_bio->bi_private = &s->cl;
- bch_bio_map(s->op.cache_bio, NULL);
- if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
+ bch_bio_map(cache_bio, NULL);
+ if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
goto out_put;
- s->cache_miss = miss;
- bio_get(s->op.cache_bio);
+ if (reada)
+ bch_mark_cache_readahead(s->iop.c, s->d);
- closure_bio_submit(s->op.cache_bio, &s->cl, s->d);
+ s->cache_miss = miss;
+ s->iop.bio = cache_bio;
+ bio_get(cache_bio);
+ closure_bio_submit(cache_bio, &s->cl, s->d);
return ret;
out_put:
- bio_put(s->op.cache_bio);
- s->op.cache_bio = NULL;
+ bio_put(cache_bio);
out_submit:
+ miss->bi_end_io = request_endio;
+ miss->bi_private = &s->cl;
closure_bio_submit(miss, &s->cl, s->d);
return ret;
}
-static void request_read(struct cached_dev *dc, struct search *s)
+static void cached_dev_read(struct cached_dev *dc, struct search *s)
{
struct closure *cl = &s->cl;
- check_should_skip(dc, s);
- closure_call(&s->op.cl, btree_read_async, NULL, cl);
-
- continue_at(cl, request_read_done_bh, NULL);
+ closure_call(&s->iop.cl, cache_lookup, NULL, cl);
+ continue_at(cl, cached_dev_read_done_bh, NULL);
}
/* Process writes */
@@ -956,47 +1028,52 @@ static void cached_dev_write_complete(struct closure *cl)
cached_dev_bio_complete(cl);
}
-static void request_write(struct cached_dev *dc, struct search *s)
+static void cached_dev_write(struct cached_dev *dc, struct search *s)
{
struct closure *cl = &s->cl;
struct bio *bio = &s->bio.bio;
- struct bkey start, end;
- start = KEY(dc->disk.id, bio->bi_sector, 0);
- end = KEY(dc->disk.id, bio_end_sector(bio), 0);
+ struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0);
+ struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
- bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
+ bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
- check_should_skip(dc, s);
down_read_non_owner(&dc->writeback_lock);
-
if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
- s->op.skip = false;
- s->writeback = true;
+ /*
+ * We overlap with some dirty data undergoing background
+ * writeback, force this write to writeback
+ */
+ s->iop.bypass = false;
+ s->iop.writeback = true;
}
+ /*
+ * Discards aren't _required_ to do anything, so skipping if
+ * check_overlapping returned true is ok
+ *
+ * But check_overlapping drops dirty keys for which io hasn't started,
+ * so we still want to call it.
+ */
if (bio->bi_rw & REQ_DISCARD)
- goto skip;
+ s->iop.bypass = true;
if (should_writeback(dc, s->orig_bio,
cache_mode(dc, bio),
- s->op.skip)) {
- s->op.skip = false;
- s->writeback = true;
+ s->iop.bypass)) {
+ s->iop.bypass = false;
+ s->iop.writeback = true;
}
- if (s->op.skip)
- goto skip;
-
- trace_bcache_write(s->orig_bio, s->writeback, s->op.skip);
+ if (s->iop.bypass) {
+ s->iop.bio = s->orig_bio;
+ bio_get(s->iop.bio);
- if (!s->writeback) {
- s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
- dc->disk.bio_split);
-
- closure_bio_submit(bio, cl, s->d);
- } else {
+ if (!(bio->bi_rw & REQ_DISCARD) ||
+ blk_queue_discard(bdev_get_queue(dc->bdev)))
+ closure_bio_submit(bio, cl, s->d);
+ } else if (s->iop.writeback) {
bch_writeback_add(dc);
- s->op.cache_bio = bio;
+ s->iop.bio = bio;
if (bio->bi_rw & REQ_FLUSH) {
/* Also need to send a flush to the backing device */
@@ -1010,36 +1087,26 @@ static void request_write(struct cached_dev *dc, struct search *s)
closure_bio_submit(flush, cl, s->d);
}
- }
-out:
- closure_call(&s->op.cl, bch_insert_data, NULL, cl);
- continue_at(cl, cached_dev_write_complete, NULL);
-skip:
- s->op.skip = true;
- s->op.cache_bio = s->orig_bio;
- bio_get(s->op.cache_bio);
+ } else {
+ s->iop.bio = bio_clone_bioset(bio, GFP_NOIO,
+ dc->disk.bio_split);
- if ((bio->bi_rw & REQ_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(dc->bdev)))
- goto out;
+ closure_bio_submit(bio, cl, s->d);
+ }
- closure_bio_submit(bio, cl, s->d);
- goto out;
+ closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
+ continue_at(cl, cached_dev_write_complete, NULL);
}
-static void request_nodata(struct cached_dev *dc, struct search *s)
+static void cached_dev_nodata(struct closure *cl)
{
- struct closure *cl = &s->cl;
+ struct search *s = container_of(cl, struct search, cl);
struct bio *bio = &s->bio.bio;
- if (bio->bi_rw & REQ_DISCARD) {
- request_write(dc, s);
- return;
- }
-
- if (s->op.flush_journal)
- bch_journal_meta(s->op.c, cl);
+ if (s->iop.flush_journal)
+ bch_journal_meta(s->iop.c, cl);
+ /* If it's a flush, we send the flush to the backing device too */
closure_bio_submit(bio, cl, s->d);
continue_at(cl, cached_dev_bio_complete, NULL);
@@ -1047,134 +1114,6 @@ static void request_nodata(struct cached_dev *dc, struct search *s)
/* Cached devices - read & write stuff */
-unsigned bch_get_congested(struct cache_set *c)
-{
- int i;
- long rand;
-
- if (!c->congested_read_threshold_us &&
- !c->congested_write_threshold_us)
- return 0;
-
- i = (local_clock_us() - c->congested_last_us) / 1024;
- if (i < 0)
- return 0;
-
- i += atomic_read(&c->congested);
- if (i >= 0)
- return 0;
-
- i += CONGESTED_MAX;
-
- if (i > 0)
- i = fract_exp_two(i, 6);
-
- rand = get_random_int();
- i -= bitmap_weight(&rand, BITS_PER_LONG);
-
- return i > 0 ? i : 1;
-}
-
-static void add_sequential(struct task_struct *t)
-{
- ewma_add(t->sequential_io_avg,
- t->sequential_io, 8, 0);
-
- t->sequential_io = 0;
-}
-
-static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
-{
- return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
-}
-
-static void check_should_skip(struct cached_dev *dc, struct search *s)
-{
- struct cache_set *c = s->op.c;
- struct bio *bio = &s->bio.bio;
- unsigned mode = cache_mode(dc, bio);
- unsigned sectors, congested = bch_get_congested(c);
-
- if (atomic_read(&dc->disk.detaching) ||
- c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
- (bio->bi_rw & REQ_DISCARD))
- goto skip;
-
- if (mode == CACHE_MODE_NONE ||
- (mode == CACHE_MODE_WRITEAROUND &&
- (bio->bi_rw & REQ_WRITE)))
- goto skip;
-
- if (bio->bi_sector & (c->sb.block_size - 1) ||
- bio_sectors(bio) & (c->sb.block_size - 1)) {
- pr_debug("skipping unaligned io");
- goto skip;
- }
-
- if (!congested && !dc->sequential_cutoff)
- goto rescale;
-
- if (!congested &&
- mode == CACHE_MODE_WRITEBACK &&
- (bio->bi_rw & REQ_WRITE) &&
- (bio->bi_rw & REQ_SYNC))
- goto rescale;
-
- if (dc->sequential_merge) {
- struct io *i;
-
- spin_lock(&dc->io_lock);
-
- hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
- if (i->last == bio->bi_sector &&
- time_before(jiffies, i->jiffies))
- goto found;
-
- i = list_first_entry(&dc->io_lru, struct io, lru);
-
- add_sequential(s->task);
- i->sequential = 0;
-found:
- if (i->sequential + bio->bi_size > i->sequential)
- i->sequential += bio->bi_size;
-
- i->last = bio_end_sector(bio);
- i->jiffies = jiffies + msecs_to_jiffies(5000);
- s->task->sequential_io = i->sequential;
-
- hlist_del(&i->hash);
- hlist_add_head(&i->hash, iohash(dc, i->last));
- list_move_tail(&i->lru, &dc->io_lru);
-
- spin_unlock(&dc->io_lock);
- } else {
- s->task->sequential_io = bio->bi_size;
-
- add_sequential(s->task);
- }
-
- sectors = max(s->task->sequential_io,
- s->task->sequential_io_avg) >> 9;
-
- if (dc->sequential_cutoff &&
- sectors >= dc->sequential_cutoff >> 9) {
- trace_bcache_bypass_sequential(s->orig_bio);
- goto skip;
- }
-
- if (congested && sectors >= congested) {
- trace_bcache_bypass_congested(s->orig_bio);
- goto skip;
- }
-
-rescale:
- bch_rescale_priorities(c, bio_sectors(bio));
- return;
-skip:
- bch_mark_sectors_bypassed(s, bio_sectors(bio));
- s->op.skip = true;
-}
-
static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
{
struct search *s;
@@ -1192,14 +1131,24 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
if (cached_dev_get(dc)) {
s = search_alloc(bio, d);
- trace_bcache_request_start(s, bio);
-
- if (!bio_has_data(bio))
- request_nodata(dc, s);
- else if (rw)
- request_write(dc, s);
- else
- request_read(dc, s);
+ trace_bcache_request_start(s->d, bio);
+
+ if (!bio->bi_size) {
+ /*
+ * can't call bch_journal_meta from under
+ * generic_make_request
+ */
+ continue_at_nobarrier(&s->cl,
+ cached_dev_nodata,
+ bcache_wq);
+ } else {
+ s->iop.bypass = check_should_bypass(dc, bio);
+
+ if (rw)
+ cached_dev_write(dc, s);
+ else
+ cached_dev_read(dc, s);
+ }
} else {
if ((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(dc->bdev)))
@@ -1274,9 +1223,19 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s,
bio_advance(bio, min(sectors << 9, bio->bi_size));
if (!bio->bi_size)
- s->op.lookup_done = true;
+ return MAP_DONE;
- return 0;
+ return MAP_CONTINUE;
+}
+
+static void flash_dev_nodata(struct closure *cl)
+{
+ struct search *s = container_of(cl, struct search, cl);
+
+ if (s->iop.flush_journal)
+ bch_journal_meta(s->iop.c, cl);
+
+ continue_at(cl, search_free, NULL);
}
static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
@@ -1295,23 +1254,28 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
cl = &s->cl;
bio = &s->bio.bio;
- trace_bcache_request_start(s, bio);
+ trace_bcache_request_start(s->d, bio);
- if (bio_has_data(bio) && !rw) {
- closure_call(&s->op.cl, btree_read_async, NULL, cl);
- } else if (bio_has_data(bio) || s->op.skip) {
- bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
+ if (!bio->bi_size) {
+ /*
+ * can't call bch_journal_meta from under
+ * generic_make_request
+ */
+ continue_at_nobarrier(&s->cl,
+ flash_dev_nodata,
+ bcache_wq);
+ } else if (rw) {
+ bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
&KEY(d->id, bio->bi_sector, 0),
&KEY(d->id, bio_end_sector(bio), 0));
- s->writeback = true;
- s->op.cache_bio = bio;
+ s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
+ s->iop.writeback = true;
+ s->iop.bio = bio;
- closure_call(&s->op.cl, bch_insert_data, NULL, cl);
+ closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
} else {
- /* No data - probably a cache flush */
- if (s->op.flush_journal)
- bch_journal_meta(s->op.c, cl);
+ closure_call(&s->iop.cl, cache_lookup, NULL, cl);
}
continue_at(cl, search_free, NULL);
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 57dc4784f4f4..2cd65bf073c2 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -3,40 +3,33 @@
#include <linux/cgroup.h>
-struct search {
- /* Stack frame for bio_complete */
+struct data_insert_op {
struct closure cl;
+ struct cache_set *c;
+ struct bio *bio;
- struct bcache_device *d;
- struct task_struct *task;
-
- struct bbio bio;
- struct bio *orig_bio;
- struct bio *cache_miss;
- unsigned cache_bio_sectors;
-
- unsigned recoverable:1;
- unsigned unaligned_bvec:1;
+ unsigned inode;
+ uint16_t write_point;
+ uint16_t write_prio;
+ short error;
- unsigned write:1;
+ unsigned bypass:1;
unsigned writeback:1;
+ unsigned flush_journal:1;
+ unsigned csum:1;
- /* IO error returned to s->bio */
- short error;
- unsigned long start_time;
+ unsigned replace:1;
+ unsigned replace_collision:1;
+
+ unsigned insert_data_done:1;
- /* Anything past op->keys won't get zeroed in do_bio_hook */
- struct btree_op op;
+ /* Anything past this point won't get zeroed in search_alloc() */
+ struct keylist insert_keys;
+ BKEY_PADDED(replace_key);
};
-void bch_cache_read_endio(struct bio *, int);
unsigned bch_get_congested(struct cache_set *);
-void bch_insert_data(struct closure *cl);
-void bch_btree_insert_async(struct closure *);
-void bch_cache_read_endio(struct bio *, int);
-
-void bch_open_buckets_free(struct cache_set *);
-int bch_open_buckets_alloc(struct cache_set *);
+void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc);
void bch_flash_dev_request_init(struct bcache_device *d);
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index b8730e714d69..84d0782f702e 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -7,7 +7,6 @@
#include "bcache.h"
#include "stats.h"
#include "btree.h"
-#include "request.h"
#include "sysfs.h"
/*
@@ -196,35 +195,36 @@ static void mark_cache_stats(struct cache_stat_collector *stats,
atomic_inc(&stats->cache_bypass_misses);
}
-void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass)
+void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
+ bool hit, bool bypass)
{
- struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
mark_cache_stats(&dc->accounting.collector, hit, bypass);
- mark_cache_stats(&s->op.c->accounting.collector, hit, bypass);
+ mark_cache_stats(&c->accounting.collector, hit, bypass);
#ifdef CONFIG_CGROUP_BCACHE
mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass);
#endif
}
-void bch_mark_cache_readahead(struct search *s)
+void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
{
- struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_readaheads);
- atomic_inc(&s->op.c->accounting.collector.cache_readaheads);
+ atomic_inc(&c->accounting.collector.cache_readaheads);
}
-void bch_mark_cache_miss_collision(struct search *s)
+void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
{
- struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_miss_collisions);
- atomic_inc(&s->op.c->accounting.collector.cache_miss_collisions);
+ atomic_inc(&c->accounting.collector.cache_miss_collisions);
}
-void bch_mark_sectors_bypassed(struct search *s, int sectors)
+void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc,
+ int sectors)
{
- struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
- atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed);
+ atomic_add(sectors, &c->accounting.collector.sectors_bypassed);
}
void bch_cache_accounting_init(struct cache_accounting *acc,
diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
index c7c7a8fd29fe..adbff141c887 100644
--- a/drivers/md/bcache/stats.h
+++ b/drivers/md/bcache/stats.h
@@ -38,7 +38,9 @@ struct cache_accounting {
struct cache_stats day;
};
-struct search;
+struct cache_set;
+struct cached_dev;
+struct bcache_device;
void bch_cache_accounting_init(struct cache_accounting *acc,
struct closure *parent);
@@ -50,9 +52,10 @@ void bch_cache_accounting_clear(struct cache_accounting *acc);
void bch_cache_accounting_destroy(struct cache_accounting *acc);
-void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass);
-void bch_mark_cache_readahead(struct search *s);
-void bch_mark_cache_miss_collision(struct search *s);
-void bch_mark_sectors_bypassed(struct search *s, int sectors);
+void bch_mark_cache_accounting(struct cache_set *, struct bcache_device *,
+ bool, bool);
+void bch_mark_cache_readahead(struct cache_set *, struct bcache_device *);
+void bch_mark_cache_miss_collision(struct cache_set *, struct bcache_device *);
+void bch_mark_sectors_bypassed(struct cache_set *, struct cached_dev *, int);
#endif /* _BCACHE_STATS_H_ */
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 547c4c57b052..c57bfa071a57 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -16,6 +16,7 @@
#include <linux/buffer_head.h>
#include <linux/debugfs.h>
#include <linux/genhd.h>
+#include <linux/idr.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/random.h>
@@ -45,21 +46,13 @@ const char * const bch_cache_modes[] = {
NULL
};
-struct uuid_entry_v0 {
- uint8_t uuid[16];
- uint8_t label[32];
- uint32_t first_reg;
- uint32_t last_reg;
- uint32_t invalidated;
- uint32_t pad;
-};
-
static struct kobject *bcache_kobj;
struct mutex bch_register_lock;
LIST_HEAD(bch_cache_sets);
static LIST_HEAD(uncached_devices);
-static int bcache_major, bcache_minor;
+static int bcache_major;
+static DEFINE_IDA(bcache_minor);
static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq;
@@ -382,7 +375,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
{
struct bkey *k = &j->uuid_bucket;
- if (__bch_ptr_invalid(c, 1, k))
+ if (bch_btree_ptr_invalid(c, k))
return "bad uuid pointer";
bkey_copy(&c->uuid_bucket, k);
@@ -427,7 +420,7 @@ static int __uuid_write(struct cache_set *c)
lockdep_assert_held(&bch_register_lock);
- if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, &cl))
+ if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true))
return 1;
SET_KEY_SIZE(&k.key, c->sb.bucket_size);
@@ -435,7 +428,7 @@ static int __uuid_write(struct cache_set *c)
closure_sync(&cl);
bkey_copy(&c->uuid_bucket, &k.key);
- __bkey_put(c, &k.key);
+ bkey_put(c, &k.key);
return 0;
}
@@ -562,10 +555,10 @@ void bch_prio_write(struct cache *ca)
}
p->next_bucket = ca->prio_buckets[i + 1];
- p->magic = pset_magic(ca);
+ p->magic = pset_magic(&ca->sb);
p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
- bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, &cl);
+ bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, true);
BUG_ON(bucket == -1);
mutex_unlock(&ca->set->bucket_lock);
@@ -613,7 +606,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
pr_warn("bad csum reading priorities");
- if (p->magic != pset_magic(ca))
+ if (p->magic != pset_magic(&ca->sb))
pr_warn("bad magic reading priorities");
bucket = p->next_bucket;
@@ -630,7 +623,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
static int open_dev(struct block_device *b, fmode_t mode)
{
struct bcache_device *d = b->bd_disk->private_data;
- if (atomic_read(&d->closing))
+ if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
return -ENXIO;
closure_get(&d->cl);
@@ -659,20 +652,24 @@ static const struct block_device_operations bcache_ops = {
void bcache_device_stop(struct bcache_device *d)
{
- if (!atomic_xchg(&d->closing, 1))
+ if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
closure_queue(&d->cl);
}
static void bcache_device_unlink(struct bcache_device *d)
{
- unsigned i;
- struct cache *ca;
+ lockdep_assert_held(&bch_register_lock);
- sysfs_remove_link(&d->c->kobj, d->name);
- sysfs_remove_link(&d->kobj, "cache");
+ if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
+ unsigned i;
+ struct cache *ca;
- for_each_cache(ca, d->c, i)
- bd_unlink_disk_holder(ca->bdev, d->disk);
+ sysfs_remove_link(&d->c->kobj, d->name);
+ sysfs_remove_link(&d->kobj, "cache");
+
+ for_each_cache(ca, d->c, i)
+ bd_unlink_disk_holder(ca->bdev, d->disk);
+ }
}
static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
@@ -696,19 +693,16 @@ static void bcache_device_detach(struct bcache_device *d)
{
lockdep_assert_held(&bch_register_lock);
- if (atomic_read(&d->detaching)) {
+ if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
struct uuid_entry *u = d->c->uuids + d->id;
SET_UUID_FLASH_ONLY(u, 0);
memcpy(u->uuid, invalid_uuid, 16);
u->invalidated = cpu_to_le32(get_seconds());
bch_uuid_write(d->c);
-
- atomic_set(&d->detaching, 0);
}
- if (!d->flush_done)
- bcache_device_unlink(d);
+ bcache_device_unlink(d);
d->c->devices[d->id] = NULL;
closure_put(&d->c->caching);
@@ -739,14 +733,20 @@ static void bcache_device_free(struct bcache_device *d)
del_gendisk(d->disk);
if (d->disk && d->disk->queue)
blk_cleanup_queue(d->disk->queue);
- if (d->disk)
+ if (d->disk) {
+ ida_simple_remove(&bcache_minor, d->disk->first_minor);
put_disk(d->disk);
+ }
bio_split_pool_free(&d->bio_split_hook);
if (d->unaligned_bvec)
mempool_destroy(d->unaligned_bvec);
if (d->bio_split)
bioset_free(d->bio_split);
+ if (is_vmalloc_addr(d->full_dirty_stripes))
+ vfree(d->full_dirty_stripes);
+ else
+ kfree(d->full_dirty_stripes);
if (is_vmalloc_addr(d->stripe_sectors_dirty))
vfree(d->stripe_sectors_dirty);
else
@@ -760,15 +760,19 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
{
struct request_queue *q;
size_t n;
+ int minor;
- if (!d->stripe_size_bits)
- d->stripe_size_bits = 31;
+ if (!d->stripe_size)
+ d->stripe_size = 1 << 31;
- d->nr_stripes = round_up(sectors, 1 << d->stripe_size_bits) >>
- d->stripe_size_bits;
+ d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
- if (!d->nr_stripes || d->nr_stripes > SIZE_MAX / sizeof(atomic_t))
+ if (!d->nr_stripes ||
+ d->nr_stripes > INT_MAX ||
+ d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) {
+ pr_err("nr_stripes too large");
return -ENOMEM;
+ }
n = d->nr_stripes * sizeof(atomic_t);
d->stripe_sectors_dirty = n < PAGE_SIZE << 6
@@ -777,22 +781,38 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
if (!d->stripe_sectors_dirty)
return -ENOMEM;
+ n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
+ d->full_dirty_stripes = n < PAGE_SIZE << 6
+ ? kzalloc(n, GFP_KERNEL)
+ : vzalloc(n);
+ if (!d->full_dirty_stripes)
+ return -ENOMEM;
+
+ minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL);
+ if (minor < 0)
+ return minor;
+
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
!(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
bio_split_pool_init(&d->bio_split_hook) ||
- !(d->disk = alloc_disk(1)) ||
- !(q = blk_alloc_queue(GFP_KERNEL)))
+ !(d->disk = alloc_disk(1))) {
+ ida_simple_remove(&bcache_minor, minor);
return -ENOMEM;
+ }
set_capacity(d->disk, sectors);
- snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
+ snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor);
d->disk->major = bcache_major;
- d->disk->first_minor = bcache_minor++;
+ d->disk->first_minor = minor;
d->disk->fops = &bcache_ops;
d->disk->private_data = d;
+ q = blk_alloc_queue(GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
blk_queue_make_request(q, NULL);
d->disk->queue = q;
q->queuedata = d;
@@ -874,7 +894,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
struct closure cl;
closure_init_stack(&cl);
- BUG_ON(!atomic_read(&dc->disk.detaching));
+ BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
BUG_ON(atomic_read(&dc->count));
mutex_lock(&bch_register_lock);
@@ -888,6 +908,8 @@ static void cached_dev_detach_finish(struct work_struct *w)
bcache_device_detach(&dc->disk);
list_move(&dc->list, &uncached_devices);
+ clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
+
mutex_unlock(&bch_register_lock);
pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
@@ -900,10 +922,10 @@ void bch_cached_dev_detach(struct cached_dev *dc)
{
lockdep_assert_held(&bch_register_lock);
- if (atomic_read(&dc->disk.closing))
+ if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
return;
- if (atomic_xchg(&dc->disk.detaching, 1))
+ if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
return;
/*
@@ -1030,6 +1052,7 @@ static void cached_dev_free(struct closure *cl)
struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
cancel_delayed_work_sync(&dc->writeback_rate_update);
+ kthread_stop(dc->writeback_thread);
mutex_lock(&bch_register_lock);
@@ -1058,11 +1081,7 @@ static void cached_dev_flush(struct closure *cl)
struct bcache_device *d = &dc->disk;
mutex_lock(&bch_register_lock);
- d->flush_done = 1;
-
- if (d->c)
- bcache_device_unlink(d);
-
+ bcache_device_unlink(d);
mutex_unlock(&bch_register_lock);
bch_cache_accounting_destroy(&dc->accounting);
@@ -1088,7 +1107,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
spin_lock_init(&dc->io_lock);
bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
- dc->sequential_merge = true;
dc->sequential_cutoff = 4 << 20;
for (io = dc->io; io < dc->io + RECENT_IO; io++) {
@@ -1260,7 +1278,8 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
{
va_list args;
- if (test_bit(CACHE_SET_STOPPING, &c->flags))
+ if (c->on_error != ON_ERROR_PANIC &&
+ test_bit(CACHE_SET_STOPPING, &c->flags))
return false;
/* XXX: we can be called from atomic context
@@ -1275,6 +1294,9 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
printk(", disabling caching\n");
+ if (c->on_error == ON_ERROR_PANIC)
+ panic("panic forced after error\n");
+
bch_cache_set_unregister(c);
return true;
}
@@ -1339,6 +1361,9 @@ static void cache_set_flush(struct closure *cl)
kobject_put(&c->internal);
kobject_del(&c->kobj);
+ if (c->gc_thread)
+ kthread_stop(c->gc_thread);
+
if (!IS_ERR_OR_NULL(c->root))
list_add(&c->root->list, &c->btree_cache);
@@ -1433,12 +1458,19 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->sort_crit_factor = int_sqrt(c->btree_pages);
- mutex_init(&c->bucket_lock);
- mutex_init(&c->sort_lock);
- spin_lock_init(&c->sort_time_lock);
closure_init_unlocked(&c->sb_write);
+ mutex_init(&c->bucket_lock);
+ init_waitqueue_head(&c->try_wait);
+ init_waitqueue_head(&c->bucket_wait);
closure_init_unlocked(&c->uuid_write);
- spin_lock_init(&c->btree_read_time_lock);
+ mutex_init(&c->sort_lock);
+
+ spin_lock_init(&c->sort_time.lock);
+ spin_lock_init(&c->btree_gc_time.lock);
+ spin_lock_init(&c->btree_split_time.lock);
+ spin_lock_init(&c->btree_read_time.lock);
+ spin_lock_init(&c->try_harder_time.lock);
+
bch_moving_init_cache_set(c);
INIT_LIST_HEAD(&c->list);
@@ -1483,11 +1515,10 @@ static void run_cache_set(struct cache_set *c)
const char *err = "cannot allocate memory";
struct cached_dev *dc, *t;
struct cache *ca;
+ struct closure cl;
unsigned i;
- struct btree_op op;
- bch_btree_op_init_stack(&op);
- op.lock = SHRT_MAX;
+ closure_init_stack(&cl);
for_each_cache(ca, c, i)
c->nbuckets += ca->sb.nbuckets;
@@ -1498,7 +1529,7 @@ static void run_cache_set(struct cache_set *c)
struct jset *j;
err = "cannot allocate memory for journal";
- if (bch_journal_read(c, &journal, &op))
+ if (bch_journal_read(c, &journal))
goto err;
pr_debug("btree_journal_read() done");
@@ -1522,23 +1553,23 @@ static void run_cache_set(struct cache_set *c)
k = &j->btree_root;
err = "bad btree root";
- if (__bch_ptr_invalid(c, j->btree_level + 1, k))
+ if (bch_btree_ptr_invalid(c, k))
goto err;
err = "error reading btree root";
- c->root = bch_btree_node_get(c, k, j->btree_level, &op);
+ c->root = bch_btree_node_get(c, k, j->btree_level, true);
if (IS_ERR_OR_NULL(c->root))
goto err;
list_del_init(&c->root->list);
rw_unlock(true, c->root);
- err = uuid_read(c, j, &op.cl);
+ err = uuid_read(c, j, &cl);
if (err)
goto err;
err = "error in recovery";
- if (bch_btree_check(c, &op))
+ if (bch_btree_check(c))
goto err;
bch_journal_mark(c, &journal);
@@ -1570,11 +1601,9 @@ static void run_cache_set(struct cache_set *c)
if (j->version < BCACHE_JSET_VERSION_UUID)
__uuid_write(c);
- bch_journal_replay(c, &journal, &op);
+ bch_journal_replay(c, &journal);
} else {
pr_notice("invalidating existing data");
- /* Don't want invalidate_buckets() to queue a gc yet */
- closure_lock(&c->gc, NULL);
for_each_cache(ca, c, i) {
unsigned j;
@@ -1600,15 +1629,15 @@ static void run_cache_set(struct cache_set *c)
err = "cannot allocate new UUID bucket";
if (__uuid_write(c))
- goto err_unlock_gc;
+ goto err;
err = "cannot allocate new btree root";
- c->root = bch_btree_node_alloc(c, 0, &op.cl);
+ c->root = bch_btree_node_alloc(c, 0, true);
if (IS_ERR_OR_NULL(c->root))
- goto err_unlock_gc;
+ goto err;
bkey_copy_key(&c->root->key, &MAX_KEY);
- bch_btree_node_write(c->root, &op.cl);
+ bch_btree_node_write(c->root, &cl);
bch_btree_set_root(c->root);
rw_unlock(true, c->root);
@@ -1621,14 +1650,14 @@ static void run_cache_set(struct cache_set *c)
SET_CACHE_SYNC(&c->sb, true);
bch_journal_next(&c->journal);
- bch_journal_meta(c, &op.cl);
-
- /* Unlock */
- closure_set_stopped(&c->gc.cl);
- closure_put(&c->gc.cl);
+ bch_journal_meta(c, &cl);
}
- closure_sync(&op.cl);
+ err = "error starting gc thread";
+ if (bch_gc_thread_start(c))
+ goto err;
+
+ closure_sync(&cl);
c->sb.last_mount = get_seconds();
bcache_write_super(c);
@@ -1638,19 +1667,16 @@ static void run_cache_set(struct cache_set *c)
flash_devs_run(c);
return;
-err_unlock_gc:
- closure_set_stopped(&c->gc.cl);
- closure_put(&c->gc.cl);
err:
- closure_sync(&op.cl);
+ closure_sync(&cl);
/* XXX: test this, it's broken */
- bch_cache_set_error(c, err);
+ bch_cache_set_error(c, "%s", err);
}
static bool can_attach_cache(struct cache *ca, struct cache_set *c)
{
return ca->sb.block_size == c->sb.block_size &&
- ca->sb.bucket_size == c->sb.block_size &&
+ ca->sb.bucket_size == c->sb.bucket_size &&
ca->sb.nr_in_set == c->sb.nr_in_set;
}
@@ -1725,8 +1751,6 @@ void bch_cache_release(struct kobject *kobj)
if (ca->set)
ca->set->cache[ca->sb.nr_this_dev] = NULL;
- bch_cache_allocator_exit(ca);
-
bio_split_pool_free(&ca->bio_split_hook);
free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
@@ -1758,8 +1782,6 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
- INIT_LIST_HEAD(&ca->discards);
-
bio_init(&ca->journal.bio);
ca->journal.bio.bi_max_vecs = 8;
ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
@@ -2006,7 +2028,6 @@ static struct notifier_block reboot = {
static void bcache_exit(void)
{
bch_debug_exit();
- bch_writeback_exit();
bch_request_exit();
bch_btree_exit();
if (bcache_kobj)
@@ -2039,7 +2060,6 @@ static int __init bcache_init(void)
sysfs_create_files(bcache_kobj, files) ||
bch_btree_init() ||
bch_request_init() ||
- bch_writeback_init() ||
bch_debug_init(bcache_kobj))
goto err;
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 924dcfdae111..a1f85612f0b3 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -21,6 +21,12 @@ static const char * const cache_replacement_policies[] = {
NULL
};
+static const char * const error_actions[] = {
+ "unregister",
+ "panic",
+ NULL
+};
+
write_attribute(attach);
write_attribute(detach);
write_attribute(unregister);
@@ -66,7 +72,6 @@ rw_attribute(congested_read_threshold_us);
rw_attribute(congested_write_threshold_us);
rw_attribute(sequential_cutoff);
-rw_attribute(sequential_merge);
rw_attribute(data_csum);
rw_attribute(cache_mode);
rw_attribute(writeback_metadata);
@@ -78,7 +83,6 @@ rw_attribute(writeback_rate);
rw_attribute(writeback_rate_update_seconds);
rw_attribute(writeback_rate_d_term);
rw_attribute(writeback_rate_p_term_inverse);
-rw_attribute(writeback_rate_d_smooth);
read_attribute(writeback_rate_debug);
read_attribute(stripe_size);
@@ -90,11 +94,14 @@ rw_attribute(discard);
rw_attribute(running);
rw_attribute(label);
rw_attribute(readahead);
+rw_attribute(errors);
rw_attribute(io_error_limit);
rw_attribute(io_error_halflife);
rw_attribute(verify);
+rw_attribute(bypass_torture_test);
rw_attribute(key_merging_disabled);
rw_attribute(gc_always_rewrite);
+rw_attribute(expensive_debug_checks);
rw_attribute(freelist_percent);
rw_attribute(cache_replacement_policy);
rw_attribute(btree_shrinker_disabled);
@@ -116,44 +123,54 @@ SHOW(__bch_cached_dev)
sysfs_printf(data_csum, "%i", dc->disk.data_csum);
var_printf(verify, "%i");
+ var_printf(bypass_torture_test, "%i");
var_printf(writeback_metadata, "%i");
var_printf(writeback_running, "%i");
var_print(writeback_delay);
var_print(writeback_percent);
- sysfs_print(writeback_rate, dc->writeback_rate.rate);
+ sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
var_print(writeback_rate_update_seconds);
var_print(writeback_rate_d_term);
var_print(writeback_rate_p_term_inverse);
- var_print(writeback_rate_d_smooth);
if (attr == &sysfs_writeback_rate_debug) {
+ char rate[20];
char dirty[20];
- char derivative[20];
char target[20];
- bch_hprint(dirty,
- bcache_dev_sectors_dirty(&dc->disk) << 9);
- bch_hprint(derivative, dc->writeback_rate_derivative << 9);
+ char proportional[20];
+ char derivative[20];
+ char change[20];
+ s64 next_io;
+
+ bch_hprint(rate, dc->writeback_rate.rate << 9);
+ bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
bch_hprint(target, dc->writeback_rate_target << 9);
+ bch_hprint(proportional,dc->writeback_rate_proportional << 9);
+ bch_hprint(derivative, dc->writeback_rate_derivative << 9);
+ bch_hprint(change, dc->writeback_rate_change << 9);
+
+ next_io = div64_s64(dc->writeback_rate.next - local_clock(),
+ NSEC_PER_MSEC);
return sprintf(buf,
- "rate:\t\t%u\n"
- "change:\t\t%i\n"
+ "rate:\t\t%s/sec\n"
"dirty:\t\t%s\n"
+ "target:\t\t%s\n"
+ "proportional:\t%s\n"
"derivative:\t%s\n"
- "target:\t\t%s\n",
- dc->writeback_rate.rate,
- dc->writeback_rate_change,
- dirty, derivative, target);
+ "change:\t\t%s/sec\n"
+ "next io:\t%llims\n",
+ rate, dirty, target, proportional,
+ derivative, change, next_io);
}
sysfs_hprint(dirty_data,
bcache_dev_sectors_dirty(&dc->disk) << 9);
- sysfs_hprint(stripe_size, (1 << dc->disk.stripe_size_bits) << 9);
+ sysfs_hprint(stripe_size, dc->disk.stripe_size << 9);
var_printf(partial_stripes_expensive, "%u");
- var_printf(sequential_merge, "%i");
var_hprint(sequential_cutoff);
var_hprint(readahead);
@@ -181,25 +198,25 @@ STORE(__cached_dev)
struct kobj_uevent_env *env;
#define d_strtoul(var) sysfs_strtoul(var, dc->var)
+#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
sysfs_strtoul(data_csum, dc->disk.data_csum);
d_strtoul(verify);
+ d_strtoul(bypass_torture_test);
d_strtoul(writeback_metadata);
d_strtoul(writeback_running);
d_strtoul(writeback_delay);
- sysfs_strtoul_clamp(writeback_rate,
- dc->writeback_rate.rate, 1, 1000000);
+
sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
- d_strtoul(writeback_rate_update_seconds);
+ sysfs_strtoul_clamp(writeback_rate,
+ dc->writeback_rate.rate, 1, INT_MAX);
+
+ d_strtoul_nonzero(writeback_rate_update_seconds);
d_strtoul(writeback_rate_d_term);
- d_strtoul(writeback_rate_p_term_inverse);
- sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
- dc->writeback_rate_p_term_inverse, 1, INT_MAX);
- d_strtoul(writeback_rate_d_smooth);
+ d_strtoul_nonzero(writeback_rate_p_term_inverse);
- d_strtoul(sequential_merge);
d_strtoi_h(sequential_cutoff);
d_strtoi_h(readahead);
@@ -305,13 +322,11 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_writeback_rate_update_seconds,
&sysfs_writeback_rate_d_term,
&sysfs_writeback_rate_p_term_inverse,
- &sysfs_writeback_rate_d_smooth,
&sysfs_writeback_rate_debug,
&sysfs_dirty_data,
&sysfs_stripe_size,
&sysfs_partial_stripes_expensive,
&sysfs_sequential_cutoff,
- &sysfs_sequential_merge,
&sysfs_clear_stats,
&sysfs_running,
&sysfs_state,
@@ -319,6 +334,7 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_readahead,
#ifdef CONFIG_BCACHE_DEBUG
&sysfs_verify,
+ &sysfs_bypass_torture_test,
#endif
NULL
};
@@ -366,7 +382,7 @@ STORE(__bch_flash_dev)
}
if (attr == &sysfs_unregister) {
- atomic_set(&d->detaching, 1);
+ set_bit(BCACHE_DEV_DETACHING, &d->flags);
bcache_device_stop(d);
}
@@ -481,7 +497,6 @@ lock_root:
sysfs_print(btree_used_percent, btree_used(c));
sysfs_print(btree_nodes, c->gc_stats.nodes);
- sysfs_hprint(dirty_data, c->gc_stats.dirty);
sysfs_hprint(average_key_size, average_key_size(c));
sysfs_print(cache_read_races,
@@ -492,6 +507,10 @@ lock_root:
sysfs_print(writeback_keys_failed,
atomic_long_read(&c->writeback_keys_failed));
+ if (attr == &sysfs_errors)
+ return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
+ c->on_error);
+
/* See count_io_errors for why 88 */
sysfs_print(io_error_halflife, c->error_decay * 88);
sysfs_print(io_error_limit, c->error_limit >> IO_ERROR_SHIFT);
@@ -506,6 +525,8 @@ lock_root:
sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
sysfs_printf(verify, "%i", c->verify);
sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
+ sysfs_printf(expensive_debug_checks,
+ "%i", c->expensive_debug_checks);
sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
@@ -555,7 +576,7 @@ STORE(__bch_cache_set)
}
if (attr == &sysfs_trigger_gc)
- bch_queue_gc(c);
+ wake_up_gc(c);
if (attr == &sysfs_prune_cache) {
struct shrink_control sc;
@@ -569,6 +590,15 @@ STORE(__bch_cache_set)
sysfs_strtoul(congested_write_threshold_us,
c->congested_write_threshold_us);
+ if (attr == &sysfs_errors) {
+ ssize_t v = bch_read_string_list(buf, error_actions);
+
+ if (v < 0)
+ return v;
+
+ c->on_error = v;
+ }
+
if (attr == &sysfs_io_error_limit)
c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
@@ -579,6 +609,7 @@ STORE(__bch_cache_set)
sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
sysfs_strtoul(verify, c->verify);
sysfs_strtoul(key_merging_disabled, c->key_merging_disabled);
+ sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
@@ -618,8 +649,8 @@ static struct attribute *bch_cache_set_files[] = {
&sysfs_cache_available_percent,
&sysfs_average_key_size,
- &sysfs_dirty_data,
+ &sysfs_errors,
&sysfs_io_error_limit,
&sysfs_io_error_halflife,
&sysfs_congested,
@@ -653,6 +684,7 @@ static struct attribute *bch_cache_set_internal_files[] = {
#ifdef CONFIG_BCACHE_DEBUG
&sysfs_verify,
&sysfs_key_merging_disabled,
+ &sysfs_expensive_debug_checks,
#endif
&sysfs_gc_always_rewrite,
&sysfs_btree_shrinker_disabled,
diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c
index f7b6c197f90f..adbc3df17a80 100644
--- a/drivers/md/bcache/trace.c
+++ b/drivers/md/bcache/trace.c
@@ -1,6 +1,5 @@
#include "bcache.h"
#include "btree.h"
-#include "request.h"
#include <linux/blktrace_api.h>
#include <linux/module.h>
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 420dad545c7d..bb37618e7664 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -168,10 +168,14 @@ int bch_parse_uuid(const char *s, char *uuid)
void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
{
- uint64_t now = local_clock();
- uint64_t duration = time_after64(now, start_time)
+ uint64_t now, duration, last;
+
+ spin_lock(&stats->lock);
+
+ now = local_clock();
+ duration = time_after64(now, start_time)
? now - start_time : 0;
- uint64_t last = time_after64(now, stats->last)
+ last = time_after64(now, stats->last)
? now - stats->last : 0;
stats->max_duration = max(stats->max_duration, duration);
@@ -188,6 +192,8 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
}
stats->last = now ?: 1;
+
+ spin_unlock(&stats->lock);
}
/**
@@ -203,7 +209,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
{
uint64_t now = local_clock();
- d->next += div_u64(done, d->rate);
+ d->next += div_u64(done * NSEC_PER_SEC, d->rate);
+
+ if (time_before64(now + NSEC_PER_SEC, d->next))
+ d->next = now + NSEC_PER_SEC;
+
+ if (time_after64(now - NSEC_PER_SEC * 2, d->next))
+ d->next = now - NSEC_PER_SEC * 2;
return time_after64(d->next, now)
? div_u64(d->next - now, NSEC_PER_SEC / HZ)
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index ea345c6896f4..1030c6020e98 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -15,28 +15,18 @@
struct closure;
-#ifdef CONFIG_BCACHE_EDEBUG
+#ifdef CONFIG_BCACHE_DEBUG
#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
-#else /* EDEBUG */
+#else /* DEBUG */
#define atomic_dec_bug(v) atomic_dec(v)
#define atomic_inc_bug(v, i) atomic_inc(v)
#endif
-#define BITMASK(name, type, field, offset, size) \
-static inline uint64_t name(const type *k) \
-{ return (k->field >> offset) & ~(((uint64_t) ~0) << size); } \
- \
-static inline void SET_##name(type *k, uint64_t v) \
-{ \
- k->field &= ~(~((uint64_t) ~0 << size) << offset); \
- k->field |= v << offset; \
-}
-
#define DECLARE_HEAP(type, name) \
struct { \
size_t size, used; \
@@ -120,7 +110,7 @@ do { \
_r; \
})
-#define heap_peek(h) ((h)->size ? (h)->data[0] : NULL)
+#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
#define heap_full(h) ((h)->used == (h)->size)
@@ -388,6 +378,7 @@ ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[
ssize_t bch_read_string_list(const char *buf, const char * const list[]);
struct time_stats {
+ spinlock_t lock;
/*
* all fields are in nanoseconds, averages are ewmas stored left shifted
* by 8
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index ba3ee48320f2..6c44fe059c27 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -11,18 +11,11 @@
#include "debug.h"
#include "writeback.h"
+#include <linux/delay.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
#include <trace/events/bcache.h>
-static struct workqueue_struct *dirty_wq;
-
-static void read_dirty(struct closure *);
-
-struct dirty_io {
- struct closure cl;
- struct cached_dev *dc;
- struct bio bio;
-};
-
/* Rate limiting */
static void __update_writeback_rate(struct cached_dev *dc)
@@ -37,44 +30,43 @@ static void __update_writeback_rate(struct cached_dev *dc)
/* PD controller */
- int change = 0;
- int64_t error;
int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
int64_t derivative = dirty - dc->disk.sectors_dirty_last;
+ int64_t proportional = dirty - target;
+ int64_t change;
dc->disk.sectors_dirty_last = dirty;
- derivative *= dc->writeback_rate_d_term;
- derivative = clamp(derivative, -dirty, dirty);
+ /* Scale to sectors per second */
- derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
- dc->writeback_rate_d_smooth, 0);
+ proportional *= dc->writeback_rate_update_seconds;
+ proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
- /* Avoid divide by zero */
- if (!target)
- goto out;
+ derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
- error = div64_s64((dirty + derivative - target) << 8, target);
+ derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
+ (dc->writeback_rate_d_term /
+ dc->writeback_rate_update_seconds) ?: 1, 0);
+
+ derivative *= dc->writeback_rate_d_term;
+ derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
- change = div_s64((dc->writeback_rate.rate * error) >> 8,
- dc->writeback_rate_p_term_inverse);
+ change = proportional + derivative;
/* Don't increase writeback rate if the device isn't keeping up */
if (change > 0 &&
time_after64(local_clock(),
- dc->writeback_rate.next + 10 * NSEC_PER_MSEC))
+ dc->writeback_rate.next + NSEC_PER_MSEC))
change = 0;
dc->writeback_rate.rate =
- clamp_t(int64_t, dc->writeback_rate.rate + change,
+ clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
1, NSEC_PER_MSEC);
-out:
+
+ dc->writeback_rate_proportional = proportional;
dc->writeback_rate_derivative = derivative;
dc->writeback_rate_change = change;
dc->writeback_rate_target = target;
-
- schedule_delayed_work(&dc->writeback_rate_update,
- dc->writeback_rate_update_seconds * HZ);
}
static void update_writeback_rate(struct work_struct *work)
@@ -90,52 +82,25 @@ static void update_writeback_rate(struct work_struct *work)
__update_writeback_rate(dc);
up_read(&dc->writeback_lock);
+
+ schedule_delayed_work(&dc->writeback_rate_update,
+ dc->writeback_rate_update_seconds * HZ);
}
static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
{
- uint64_t ret;
-
- if (atomic_read(&dc->disk.detaching) ||
+ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
!dc->writeback_percent)
return 0;
- ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
-
- return min_t(uint64_t, ret, HZ);
-}
-
-/* Background writeback */
-
-static bool dirty_pred(struct keybuf *buf, struct bkey *k)
-{
- return KEY_DIRTY(k);
+ return bch_next_delay(&dc->writeback_rate, sectors);
}
-static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k)
-{
- uint64_t stripe;
- unsigned nr_sectors = KEY_SIZE(k);
- struct cached_dev *dc = container_of(buf, struct cached_dev,
- writeback_keys);
- unsigned stripe_size = 1 << dc->disk.stripe_size_bits;
-
- if (!KEY_DIRTY(k))
- return false;
-
- stripe = KEY_START(k) >> dc->disk.stripe_size_bits;
- while (1) {
- if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) !=
- stripe_size)
- return false;
-
- if (nr_sectors <= stripe_size)
- return true;
-
- nr_sectors -= stripe_size;
- stripe++;
- }
-}
+struct dirty_io {
+ struct closure cl;
+ struct cached_dev *dc;
+ struct bio bio;
+};
static void dirty_init(struct keybuf_key *w)
{
@@ -153,131 +118,6 @@ static void dirty_init(struct keybuf_key *w)
bch_bio_map(bio, NULL);
}
-static void refill_dirty(struct closure *cl)
-{
- struct cached_dev *dc = container_of(cl, struct cached_dev,
- writeback.cl);
- struct keybuf *buf = &dc->writeback_keys;
- bool searched_from_start = false;
- struct bkey end = MAX_KEY;
- SET_KEY_INODE(&end, dc->disk.id);
-
- if (!atomic_read(&dc->disk.detaching) &&
- !dc->writeback_running)
- closure_return(cl);
-
- down_write(&dc->writeback_lock);
-
- if (!atomic_read(&dc->has_dirty)) {
- SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
- bch_write_bdev_super(dc, NULL);
-
- up_write(&dc->writeback_lock);
- closure_return(cl);
- }
-
- if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
- buf->last_scanned = KEY(dc->disk.id, 0, 0);
- searched_from_start = true;
- }
-
- if (dc->partial_stripes_expensive) {
- uint64_t i;
-
- for (i = 0; i < dc->disk.nr_stripes; i++)
- if (atomic_read(dc->disk.stripe_sectors_dirty + i) ==
- 1 << dc->disk.stripe_size_bits)
- goto full_stripes;
-
- goto normal_refill;
-full_stripes:
- bch_refill_keybuf(dc->disk.c, buf, &end,
- dirty_full_stripe_pred);
- } else {
-normal_refill:
- bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
- }
-
- if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) {
- /* Searched the entire btree - delay awhile */
-
- if (RB_EMPTY_ROOT(&buf->keys)) {
- atomic_set(&dc->has_dirty, 0);
- cached_dev_put(dc);
- }
-
- if (!atomic_read(&dc->disk.detaching))
- closure_delay(&dc->writeback, dc->writeback_delay * HZ);
- }
-
- up_write(&dc->writeback_lock);
-
- bch_ratelimit_reset(&dc->writeback_rate);
-
- /* Punt to workqueue only so we don't recurse and blow the stack */
- continue_at(cl, read_dirty, dirty_wq);
-}
-
-void bch_writeback_queue(struct cached_dev *dc)
-{
- if (closure_trylock(&dc->writeback.cl, &dc->disk.cl)) {
- if (!atomic_read(&dc->disk.detaching))
- closure_delay(&dc->writeback, dc->writeback_delay * HZ);
-
- continue_at(&dc->writeback.cl, refill_dirty, dirty_wq);
- }
-}
-
-void bch_writeback_add(struct cached_dev *dc)
-{
- if (!atomic_read(&dc->has_dirty) &&
- !atomic_xchg(&dc->has_dirty, 1)) {
- atomic_inc(&dc->count);
-
- if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
- SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
- /* XXX: should do this synchronously */
- bch_write_bdev_super(dc, NULL);
- }
-
- bch_writeback_queue(dc);
-
- if (dc->writeback_percent)
- schedule_delayed_work(&dc->writeback_rate_update,
- dc->writeback_rate_update_seconds * HZ);
- }
-}
-
-void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
- uint64_t offset, int nr_sectors)
-{
- struct bcache_device *d = c->devices[inode];
- unsigned stripe_size, stripe_offset;
- uint64_t stripe;
-
- if (!d)
- return;
-
- stripe_size = 1 << d->stripe_size_bits;
- stripe = offset >> d->stripe_size_bits;
- stripe_offset = offset & (stripe_size - 1);
-
- while (nr_sectors) {
- int s = min_t(unsigned, abs(nr_sectors),
- stripe_size - stripe_offset);
-
- if (nr_sectors < 0)
- s = -s;
-
- atomic_add(s, d->stripe_sectors_dirty + stripe);
- nr_sectors -= s;
- stripe_offset = 0;
- stripe++;
- }
-}
-
-/* Background writeback - IO loop */
-
static void dirty_io_destructor(struct closure *cl)
{
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
@@ -297,26 +137,25 @@ static void write_dirty_finish(struct closure *cl)
/* This is kind of a dumb way of signalling errors. */
if (KEY_DIRTY(&w->key)) {
+ int ret;
unsigned i;
- struct btree_op op;
- bch_btree_op_init_stack(&op);
+ struct keylist keys;
- op.type = BTREE_REPLACE;
- bkey_copy(&op.replace, &w->key);
+ bch_keylist_init(&keys);
- SET_KEY_DIRTY(&w->key, false);
- bch_keylist_add(&op.keys, &w->key);
+ bkey_copy(keys.top, &w->key);
+ SET_KEY_DIRTY(keys.top, false);
+ bch_keylist_push(&keys);
for (i = 0; i < KEY_PTRS(&w->key); i++)
atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
- bch_btree_insert(&op, dc->disk.c);
- closure_sync(&op.cl);
+ ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
- if (op.insert_collision)
+ if (ret)
trace_bcache_writeback_collision(&w->key);
- atomic_long_inc(op.insert_collision
+ atomic_long_inc(ret
? &dc->disk.c->writeback_keys_failed
: &dc->disk.c->writeback_keys_done);
}
@@ -374,30 +213,33 @@ static void read_dirty_submit(struct closure *cl)
continue_at(cl, write_dirty, system_wq);
}
-static void read_dirty(struct closure *cl)
+static void read_dirty(struct cached_dev *dc)
{
- struct cached_dev *dc = container_of(cl, struct cached_dev,
- writeback.cl);
- unsigned delay = writeback_delay(dc, 0);
+ unsigned delay = 0;
struct keybuf_key *w;
struct dirty_io *io;
+ struct closure cl;
+
+ closure_init_stack(&cl);
/*
* XXX: if we error, background writeback just spins. Should use some
* mempools.
*/
- while (1) {
+ while (!kthread_should_stop()) {
+ try_to_freeze();
+
w = bch_keybuf_next(&dc->writeback_keys);
if (!w)
break;
BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
- if (delay > 0 &&
- (KEY_START(&w->key) != dc->last_read ||
- jiffies_to_msecs(delay) > 50))
- delay = schedule_timeout_uninterruptible(delay);
+ if (KEY_START(&w->key) != dc->last_read ||
+ jiffies_to_msecs(delay) > 50)
+ while (!kthread_should_stop() && delay)
+ delay = schedule_timeout_uninterruptible(delay);
dc->last_read = KEY_OFFSET(&w->key);
@@ -423,7 +265,7 @@ static void read_dirty(struct closure *cl)
trace_bcache_writeback(&w->key);
down(&dc->in_flight);
- closure_call(&io->cl, read_dirty_submit, NULL, cl);
+ closure_call(&io->cl, read_dirty_submit, NULL, &cl);
delay = writeback_delay(dc, KEY_SIZE(&w->key));
}
@@ -439,52 +281,207 @@ err:
* Wait for outstanding writeback IOs to finish (and keybuf slots to be
* freed) before refilling again
*/
- continue_at(cl, refill_dirty, dirty_wq);
+ closure_sync(&cl);
}
-/* Init */
+/* Scan for dirty data */
+
+void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
+ uint64_t offset, int nr_sectors)
+{
+ struct bcache_device *d = c->devices[inode];
+ unsigned stripe_offset, stripe, sectors_dirty;
+
+ if (!d)
+ return;
+
+ stripe = offset_to_stripe(d, offset);
+ stripe_offset = offset & (d->stripe_size - 1);
+
+ while (nr_sectors) {
+ int s = min_t(unsigned, abs(nr_sectors),
+ d->stripe_size - stripe_offset);
+
+ if (nr_sectors < 0)
+ s = -s;
+
+ if (stripe >= d->nr_stripes)
+ return;
+
+ sectors_dirty = atomic_add_return(s,
+ d->stripe_sectors_dirty + stripe);
+ if (sectors_dirty == d->stripe_size)
+ set_bit(stripe, d->full_dirty_stripes);
+ else
+ clear_bit(stripe, d->full_dirty_stripes);
-static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op,
- struct cached_dev *dc)
+ nr_sectors -= s;
+ stripe_offset = 0;
+ stripe++;
+ }
+}
+
+static bool dirty_pred(struct keybuf *buf, struct bkey *k)
{
- struct bkey *k;
- struct btree_iter iter;
-
- bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0));
- while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad)))
- if (!b->level) {
- if (KEY_INODE(k) > dc->disk.id)
- break;
-
- if (KEY_DIRTY(k))
- bcache_dev_sectors_dirty_add(b->c, dc->disk.id,
- KEY_START(k),
- KEY_SIZE(k));
- } else {
- btree(sectors_dirty_init, k, b, op, dc);
- if (KEY_INODE(k) > dc->disk.id)
- break;
-
- cond_resched();
+ return KEY_DIRTY(k);
+}
+
+static void refill_full_stripes(struct cached_dev *dc)
+{
+ struct keybuf *buf = &dc->writeback_keys;
+ unsigned start_stripe, stripe, next_stripe;
+ bool wrapped = false;
+
+ stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
+
+ if (stripe >= dc->disk.nr_stripes)
+ stripe = 0;
+
+ start_stripe = stripe;
+
+ while (1) {
+ stripe = find_next_bit(dc->disk.full_dirty_stripes,
+ dc->disk.nr_stripes, stripe);
+
+ if (stripe == dc->disk.nr_stripes)
+ goto next;
+
+ next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
+ dc->disk.nr_stripes, stripe);
+
+ buf->last_scanned = KEY(dc->disk.id,
+ stripe * dc->disk.stripe_size, 0);
+
+ bch_refill_keybuf(dc->disk.c, buf,
+ &KEY(dc->disk.id,
+ next_stripe * dc->disk.stripe_size, 0),
+ dirty_pred);
+
+ if (array_freelist_empty(&buf->freelist))
+ return;
+
+ stripe = next_stripe;
+next:
+ if (wrapped && stripe > start_stripe)
+ return;
+
+ if (stripe == dc->disk.nr_stripes) {
+ stripe = 0;
+ wrapped = true;
}
+ }
+}
+
+static bool refill_dirty(struct cached_dev *dc)
+{
+ struct keybuf *buf = &dc->writeback_keys;
+ struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
+ bool searched_from_start = false;
+
+ if (dc->partial_stripes_expensive) {
+ refill_full_stripes(dc);
+ if (array_freelist_empty(&buf->freelist))
+ return false;
+ }
+
+ if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
+ buf->last_scanned = KEY(dc->disk.id, 0, 0);
+ searched_from_start = true;
+ }
+
+ bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
+
+ return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
+}
+
+static int bch_writeback_thread(void *arg)
+{
+ struct cached_dev *dc = arg;
+ bool searched_full_index;
+
+ while (!kthread_should_stop()) {
+ down_write(&dc->writeback_lock);
+ if (!atomic_read(&dc->has_dirty) ||
+ (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
+ !dc->writeback_running)) {
+ up_write(&dc->writeback_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (kthread_should_stop())
+ return 0;
+
+ try_to_freeze();
+ schedule();
+ continue;
+ }
+
+ searched_full_index = refill_dirty(dc);
+
+ if (searched_full_index &&
+ RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
+ atomic_set(&dc->has_dirty, 0);
+ cached_dev_put(dc);
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
+ bch_write_bdev_super(dc, NULL);
+ }
+
+ up_write(&dc->writeback_lock);
+
+ bch_ratelimit_reset(&dc->writeback_rate);
+ read_dirty(dc);
+
+ if (searched_full_index) {
+ unsigned delay = dc->writeback_delay * HZ;
+
+ while (delay &&
+ !kthread_should_stop() &&
+ !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
+ delay = schedule_timeout_uninterruptible(delay);
+ }
+ }
return 0;
}
+/* Init */
+
+struct sectors_dirty_init {
+ struct btree_op op;
+ unsigned inode;
+};
+
+static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
+ struct bkey *k)
+{
+ struct sectors_dirty_init *op = container_of(_op,
+ struct sectors_dirty_init, op);
+ if (KEY_INODE(k) > op->inode)
+ return MAP_DONE;
+
+ if (KEY_DIRTY(k))
+ bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
+ KEY_START(k), KEY_SIZE(k));
+
+ return MAP_CONTINUE;
+}
+
void bch_sectors_dirty_init(struct cached_dev *dc)
{
- struct btree_op op;
+ struct sectors_dirty_init op;
+
+ bch_btree_op_init(&op.op, -1);
+ op.inode = dc->disk.id;
- bch_btree_op_init_stack(&op);
- btree_root(sectors_dirty_init, dc->disk.c, &op, dc);
+ bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
+ sectors_dirty_init_fn, 0);
+
+ dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
}
-void bch_cached_dev_writeback_init(struct cached_dev *dc)
+int bch_cached_dev_writeback_init(struct cached_dev *dc)
{
sema_init(&dc->in_flight, 64);
- closure_init_unlocked(&dc->writeback);
init_rwsem(&dc->writeback_lock);
-
bch_keybuf_init(&dc->writeback_keys);
dc->writeback_metadata = true;
@@ -493,27 +490,18 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
dc->writeback_delay = 30;
dc->writeback_rate.rate = 1024;
- dc->writeback_rate_update_seconds = 30;
- dc->writeback_rate_d_term = 16;
- dc->writeback_rate_p_term_inverse = 64;
- dc->writeback_rate_d_smooth = 8;
+ dc->writeback_rate_update_seconds = 5;
+ dc->writeback_rate_d_term = 30;
+ dc->writeback_rate_p_term_inverse = 6000;
+
+ dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
+ "bcache_writeback");
+ if (IS_ERR(dc->writeback_thread))
+ return PTR_ERR(dc->writeback_thread);
INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
schedule_delayed_work(&dc->writeback_rate_update,
dc->writeback_rate_update_seconds * HZ);
-}
-
-void bch_writeback_exit(void)
-{
- if (dirty_wq)
- destroy_workqueue(dirty_wq);
-}
-
-int __init bch_writeback_init(void)
-{
- dirty_wq = create_workqueue("bcache_writeback");
- if (!dirty_wq)
- return -ENOMEM;
return 0;
}
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index c91f61bb95b6..c9ddcf4614b9 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -14,20 +14,27 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
return ret;
}
-static inline bool bcache_dev_stripe_dirty(struct bcache_device *d,
+static inline unsigned offset_to_stripe(struct bcache_device *d,
+ uint64_t offset)
+{
+ do_div(offset, d->stripe_size);
+ return offset;
+}
+
+static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
uint64_t offset,
unsigned nr_sectors)
{
- uint64_t stripe = offset >> d->stripe_size_bits;
+ unsigned stripe = offset_to_stripe(&dc->disk, offset);
while (1) {
- if (atomic_read(d->stripe_sectors_dirty + stripe))
+ if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
return true;
- if (nr_sectors <= 1 << d->stripe_size_bits)
+ if (nr_sectors <= dc->disk.stripe_size)
return false;
- nr_sectors -= 1 << d->stripe_size_bits;
+ nr_sectors -= dc->disk.stripe_size;
stripe++;
}
}
@@ -38,12 +45,12 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
unsigned in_use = dc->disk.c->gc_stats.in_use;
if (cache_mode != CACHE_MODE_WRITEBACK ||
- atomic_read(&dc->disk.detaching) ||
+ test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
in_use > CUTOFF_WRITEBACK_SYNC)
return false;
if (dc->partial_stripes_expensive &&
- bcache_dev_stripe_dirty(&dc->disk, bio->bi_sector,
+ bcache_dev_stripe_dirty(dc, bio->bi_sector,
bio_sectors(bio)))
return true;
@@ -54,11 +61,30 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
in_use <= CUTOFF_WRITEBACK;
}
+static inline void bch_writeback_queue(struct cached_dev *dc)
+{
+ wake_up_process(dc->writeback_thread);
+}
+
+static inline void bch_writeback_add(struct cached_dev *dc)
+{
+ if (!atomic_read(&dc->has_dirty) &&
+ !atomic_xchg(&dc->has_dirty, 1)) {
+ atomic_inc(&dc->count);
+
+ if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
+ SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
+ /* XXX: should do this synchronously */
+ bch_write_bdev_super(dc, NULL);
+ }
+
+ bch_writeback_queue(dc);
+ }
+}
+
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
-void bch_writeback_queue(struct cached_dev *);
-void bch_writeback_add(struct cached_dev *);
void bch_sectors_dirty_init(struct cached_dev *dc);
-void bch_cached_dev_writeback_init(struct cached_dev *);
+int bch_cached_dev_writeback_init(struct cached_dev *);
#endif
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index a7fd82133b12..12dc29ba7399 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1654,9 +1654,9 @@ int bitmap_create(struct mddev *mddev)
bitmap->mddev = mddev;
if (mddev->kobj.sd)
- bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap");
+ bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
if (bm) {
- bitmap->sysfs_can_clear = sysfs_get_dirent(bm, NULL, "can_clear");
+ bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
sysfs_put(bm);
} else
bitmap->sysfs_can_clear = NULL;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 173cbb20d104..54bdd923316f 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1717,6 +1717,11 @@ static int __init dm_bufio_init(void)
{
__u64 mem;
+ dm_bufio_allocated_kmem_cache = 0;
+ dm_bufio_allocated_get_free_pages = 0;
+ dm_bufio_allocated_vmalloc = 0;
+ dm_bufio_current_allocated = 0;
+
memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 1af7255bbffb..9ef0752e8a08 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -20,7 +20,13 @@
#define CACHE_SUPERBLOCK_MAGIC 06142003
#define CACHE_SUPERBLOCK_LOCATION 0
-#define CACHE_VERSION 1
+
+/*
+ * defines a range of metadata versions that this module can handle.
+ */
+#define MIN_CACHE_VERSION 1
+#define MAX_CACHE_VERSION 1
+
#define CACHE_METADATA_CACHE_SIZE 64
/*
@@ -134,6 +140,18 @@ static void sb_prepare_for_write(struct dm_block_validator *v,
SUPERBLOCK_CSUM_XOR));
}
+static int check_metadata_version(struct cache_disk_superblock *disk_super)
+{
+ uint32_t metadata_version = le32_to_cpu(disk_super->version);
+ if (metadata_version < MIN_CACHE_VERSION || metadata_version > MAX_CACHE_VERSION) {
+ DMERR("Cache metadata version %u found, but only versions between %u and %u supported.",
+ metadata_version, MIN_CACHE_VERSION, MAX_CACHE_VERSION);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int sb_check(struct dm_block_validator *v,
struct dm_block *b,
size_t sb_block_size)
@@ -164,7 +182,7 @@ static int sb_check(struct dm_block_validator *v,
return -EILSEQ;
}
- return 0;
+ return check_metadata_version(disk_super);
}
static struct dm_block_validator sb_validator = {
@@ -198,7 +216,7 @@ static int superblock_lock(struct dm_cache_metadata *cmd,
/*----------------------------------------------------------------*/
-static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
+static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
{
int r;
unsigned i;
@@ -214,10 +232,10 @@ static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
return r;
data_le = dm_block_data(b);
- *result = 1;
+ *result = true;
for (i = 0; i < sb_block_size; i++) {
if (data_le[i] != zero) {
- *result = 0;
+ *result = false;
break;
}
}
@@ -270,7 +288,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
disk_super->flags = 0;
memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
- disk_super->version = cpu_to_le32(CACHE_VERSION);
+ disk_super->version = cpu_to_le32(MAX_CACHE_VERSION);
memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
disk_super->policy_hint_size = 0;
@@ -411,7 +429,8 @@ bad:
static int __open_or_format_metadata(struct dm_cache_metadata *cmd,
bool format_device)
{
- int r, unformatted;
+ int r;
+ bool unformatted = false;
r = __superblock_all_zeroes(cmd->bm, &unformatted);
if (r)
@@ -666,19 +685,85 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
kfree(cmd);
}
+/*
+ * Checks that the given cache block is either unmapped or clean.
+ */
+static int block_unmapped_or_clean(struct dm_cache_metadata *cmd, dm_cblock_t b,
+ bool *result)
+{
+ int r;
+ __le64 value;
+ dm_oblock_t ob;
+ unsigned flags;
+
+ r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
+ if (r) {
+ DMERR("block_unmapped_or_clean failed");
+ return r;
+ }
+
+ unpack_value(value, &ob, &flags);
+ *result = !((flags & M_VALID) && (flags & M_DIRTY));
+
+ return 0;
+}
+
+static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
+ dm_cblock_t begin, dm_cblock_t end,
+ bool *result)
+{
+ int r;
+ *result = true;
+
+ while (begin != end) {
+ r = block_unmapped_or_clean(cmd, begin, result);
+ if (r)
+ return r;
+
+ if (!*result) {
+ DMERR("cache block %llu is dirty",
+ (unsigned long long) from_cblock(begin));
+ return 0;
+ }
+
+ begin = to_cblock(from_cblock(begin) + 1);
+ }
+
+ return 0;
+}
+
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
{
int r;
+ bool clean;
__le64 null_mapping = pack_value(0, 0);
down_write(&cmd->root_lock);
__dm_bless_for_disk(&null_mapping);
+
+ if (from_cblock(new_cache_size) < from_cblock(cmd->cache_blocks)) {
+ r = blocks_are_unmapped_or_clean(cmd, new_cache_size, cmd->cache_blocks, &clean);
+ if (r) {
+ __dm_unbless_for_disk(&null_mapping);
+ goto out;
+ }
+
+ if (!clean) {
+ DMERR("unable to shrink cache due to dirty blocks");
+ r = -EINVAL;
+ __dm_unbless_for_disk(&null_mapping);
+ goto out;
+ }
+ }
+
r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),
from_cblock(new_cache_size),
&null_mapping, &cmd->root);
if (!r)
cmd->cache_blocks = new_cache_size;
cmd->changed = true;
+
+out:
up_write(&cmd->root_lock);
return r;
@@ -1182,3 +1267,8 @@ int dm_cache_save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
return r;
}
+
+int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
+{
+ return blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
+}
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
index f45cef21f3d0..cd906f14f98d 100644
--- a/drivers/md/dm-cache-metadata.h
+++ b/drivers/md/dm-cache-metadata.h
@@ -137,6 +137,11 @@ int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
int dm_cache_save_hint(struct dm_cache_metadata *cmd,
dm_cblock_t cblock, uint32_t hint);
+/*
+ * Query method. Are all the blocks in the cache clean?
+ */
+int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
+
/*----------------------------------------------------------------*/
#endif /* DM_CACHE_METADATA_H */
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
index 0928abdc49f0..2256a1f24f73 100644
--- a/drivers/md/dm-cache-policy-internal.h
+++ b/drivers/md/dm-cache-policy-internal.h
@@ -61,7 +61,12 @@ static inline int policy_writeback_work(struct dm_cache_policy *p,
static inline void policy_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
{
- return p->remove_mapping(p, oblock);
+ p->remove_mapping(p, oblock);
+}
+
+static inline int policy_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
+{
+ return p->remove_cblock(p, cblock);
}
static inline void policy_force_mapping(struct dm_cache_policy *p,
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 4296155090b2..64780ad73bb0 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -26,19 +26,6 @@ static unsigned next_power(unsigned n, unsigned min)
/*----------------------------------------------------------------*/
-static unsigned long *alloc_bitset(unsigned nr_entries)
-{
- size_t s = sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
- return vzalloc(s);
-}
-
-static void free_bitset(unsigned long *bits)
-{
- vfree(bits);
-}
-
-/*----------------------------------------------------------------*/
-
/*
* Large, sequential ios are probably better left on the origin device since
* spindles tend to have good bandwidth.
@@ -151,6 +138,21 @@ static void queue_init(struct queue *q)
}
/*
+ * Checks to see if the queue is empty.
+ * FIXME: reduce cpu usage.
+ */
+static bool queue_empty(struct queue *q)
+{
+ unsigned i;
+
+ for (i = 0; i < NR_QUEUE_LEVELS; i++)
+ if (!list_empty(q->qs + i))
+ return false;
+
+ return true;
+}
+
+/*
* Insert an entry to the back of the given level.
*/
static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
@@ -218,17 +220,116 @@ struct entry {
struct hlist_node hlist;
struct list_head list;
dm_oblock_t oblock;
- dm_cblock_t cblock; /* valid iff in_cache */
/*
* FIXME: pack these better
*/
- bool in_cache:1;
+ bool dirty:1;
unsigned hit_count;
unsigned generation;
unsigned tick;
};
+/*
+ * Rather than storing the cblock in an entry, we allocate all entries in
+ * an array, and infer the cblock from the entry position.
+ *
+ * Free entries are linked together into a list.
+ */
+struct entry_pool {
+ struct entry *entries, *entries_end;
+ struct list_head free;
+ unsigned nr_allocated;
+};
+
+static int epool_init(struct entry_pool *ep, unsigned nr_entries)
+{
+ unsigned i;
+
+ ep->entries = vzalloc(sizeof(struct entry) * nr_entries);
+ if (!ep->entries)
+ return -ENOMEM;
+
+ ep->entries_end = ep->entries + nr_entries;
+
+ INIT_LIST_HEAD(&ep->free);
+ for (i = 0; i < nr_entries; i++)
+ list_add(&ep->entries[i].list, &ep->free);
+
+ ep->nr_allocated = 0;
+
+ return 0;
+}
+
+static void epool_exit(struct entry_pool *ep)
+{
+ vfree(ep->entries);
+}
+
+static struct entry *alloc_entry(struct entry_pool *ep)
+{
+ struct entry *e;
+
+ if (list_empty(&ep->free))
+ return NULL;
+
+ e = list_entry(list_pop(&ep->free), struct entry, list);
+ INIT_LIST_HEAD(&e->list);
+ INIT_HLIST_NODE(&e->hlist);
+ ep->nr_allocated++;
+
+ return e;
+}
+
+/*
+ * This assumes the cblock hasn't already been allocated.
+ */
+static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock)
+{
+ struct entry *e = ep->entries + from_cblock(cblock);
+ list_del(&e->list);
+
+ INIT_LIST_HEAD(&e->list);
+ INIT_HLIST_NODE(&e->hlist);
+ ep->nr_allocated++;
+
+ return e;
+}
+
+static void free_entry(struct entry_pool *ep, struct entry *e)
+{
+ BUG_ON(!ep->nr_allocated);
+ ep->nr_allocated--;
+ INIT_HLIST_NODE(&e->hlist);
+ list_add(&e->list, &ep->free);
+}
+
+/*
+ * Returns NULL if the entry is free.
+ */
+static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock)
+{
+ struct entry *e = ep->entries + from_cblock(cblock);
+ return !hlist_unhashed(&e->hlist) ? e : NULL;
+}
+
+static bool epool_empty(struct entry_pool *ep)
+{
+ return list_empty(&ep->free);
+}
+
+static bool in_pool(struct entry_pool *ep, struct entry *e)
+{
+ return e >= ep->entries && e < ep->entries_end;
+}
+
+static dm_cblock_t infer_cblock(struct entry_pool *ep, struct entry *e)
+{
+ return to_cblock(e - ep->entries);
+}
+
+/*----------------------------------------------------------------*/
+
struct mq_policy {
struct dm_cache_policy policy;
@@ -238,13 +339,22 @@ struct mq_policy {
struct io_tracker tracker;
/*
- * We maintain two queues of entries. The cache proper contains
- * the currently active mappings. Whereas the pre_cache tracks
- * blocks that are being hit frequently and potential candidates
- * for promotion to the cache.
+ * Entries come from two pools, one of pre-cache entries, and one
+ * for the cache proper.
+ */
+ struct entry_pool pre_cache_pool;
+ struct entry_pool cache_pool;
+
+ /*
+ * We maintain three queues of entries. The cache proper,
+ * consisting of a clean and dirty queue, contains the currently
+ * active mappings. Whereas the pre_cache tracks blocks that
+ * are being hit frequently and potential candidates for promotion
+ * to the cache.
*/
struct queue pre_cache;
- struct queue cache;
+ struct queue cache_clean;
+ struct queue cache_dirty;
/*
* Keeps track of time, incremented by the core. We use this to
@@ -282,25 +392,6 @@ struct mq_policy {
unsigned promote_threshold;
/*
- * We need cache_size entries for the cache, and choose to have
- * cache_size entries for the pre_cache too. One motivation for
- * using the same size is to make the hit counts directly
- * comparable between pre_cache and cache.
- */
- unsigned nr_entries;
- unsigned nr_entries_allocated;
- struct list_head free;
-
- /*
- * Cache blocks may be unallocated. We store this info in a
- * bitset.
- */
- unsigned long *allocation_bitset;
- unsigned nr_cblocks_allocated;
- unsigned find_free_nr_words;
- unsigned find_free_last_word;
-
- /*
* The hash table allows us to quickly find an entry by origin
* block. Both pre_cache and cache entries are in here.
*/
@@ -310,49 +401,6 @@ struct mq_policy {
};
/*----------------------------------------------------------------*/
-/* Free/alloc mq cache entry structures. */
-static void takeout_queue(struct list_head *lh, struct queue *q)
-{
- unsigned level;
-
- for (level = 0; level < NR_QUEUE_LEVELS; level++)
- list_splice(q->qs + level, lh);
-}
-
-static void free_entries(struct mq_policy *mq)
-{
- struct entry *e, *tmp;
-
- takeout_queue(&mq->free, &mq->pre_cache);
- takeout_queue(&mq->free, &mq->cache);
-
- list_for_each_entry_safe(e, tmp, &mq->free, list)
- kmem_cache_free(mq_entry_cache, e);
-}
-
-static int alloc_entries(struct mq_policy *mq, unsigned elts)
-{
- unsigned u = mq->nr_entries;
-
- INIT_LIST_HEAD(&mq->free);
- mq->nr_entries_allocated = 0;
-
- while (u--) {
- struct entry *e = kmem_cache_zalloc(mq_entry_cache, GFP_KERNEL);
-
- if (!e) {
- free_entries(mq);
- return -ENOMEM;
- }
-
-
- list_add(&e->list, &mq->free);
- }
-
- return 0;
-}
-
-/*----------------------------------------------------------------*/
/*
* Simple hash table implementation. Should replace with the standard hash
@@ -388,96 +436,14 @@ static void hash_remove(struct entry *e)
/*----------------------------------------------------------------*/
-/*
- * Allocates a new entry structure. The memory is allocated in one lump,
- * so we just handing it out here. Returns NULL if all entries have
- * already been allocated. Cannot fail otherwise.
- */
-static struct entry *alloc_entry(struct mq_policy *mq)
-{
- struct entry *e;
-
- if (mq->nr_entries_allocated >= mq->nr_entries) {
- BUG_ON(!list_empty(&mq->free));
- return NULL;
- }
-
- e = list_entry(list_pop(&mq->free), struct entry, list);
- INIT_LIST_HEAD(&e->list);
- INIT_HLIST_NODE(&e->hlist);
-
- mq->nr_entries_allocated++;
- return e;
-}
-
-/*----------------------------------------------------------------*/
-
-/*
- * Mark cache blocks allocated or not in the bitset.
- */
-static void alloc_cblock(struct mq_policy *mq, dm_cblock_t cblock)
-{
- BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size));
- BUG_ON(test_bit(from_cblock(cblock), mq->allocation_bitset));
-
- set_bit(from_cblock(cblock), mq->allocation_bitset);
- mq->nr_cblocks_allocated++;
-}
-
-static void free_cblock(struct mq_policy *mq, dm_cblock_t cblock)
-{
- BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size));
- BUG_ON(!test_bit(from_cblock(cblock), mq->allocation_bitset));
-
- clear_bit(from_cblock(cblock), mq->allocation_bitset);
- mq->nr_cblocks_allocated--;
-}
-
static bool any_free_cblocks(struct mq_policy *mq)
{
- return mq->nr_cblocks_allocated < from_cblock(mq->cache_size);
-}
-
-/*
- * Fills result out with a cache block that isn't in use, or return
- * -ENOSPC. This does _not_ mark the cblock as allocated, the caller is
- * reponsible for that.
- */
-static int __find_free_cblock(struct mq_policy *mq, unsigned begin, unsigned end,
- dm_cblock_t *result, unsigned *last_word)
-{
- int r = -ENOSPC;
- unsigned w;
-
- for (w = begin; w < end; w++) {
- /*
- * ffz is undefined if no zero exists
- */
- if (mq->allocation_bitset[w] != ~0UL) {
- *last_word = w;
- *result = to_cblock((w * BITS_PER_LONG) + ffz(mq->allocation_bitset[w]));
- if (from_cblock(*result) < from_cblock(mq->cache_size))
- r = 0;
-
- break;
- }
- }
-
- return r;
+ return !epool_empty(&mq->cache_pool);
}
-static int find_free_cblock(struct mq_policy *mq, dm_cblock_t *result)
+static bool any_clean_cblocks(struct mq_policy *mq)
{
- int r;
-
- if (!any_free_cblocks(mq))
- return -ENOSPC;
-
- r = __find_free_cblock(mq, mq->find_free_last_word, mq->find_free_nr_words, result, &mq->find_free_last_word);
- if (r == -ENOSPC && mq->find_free_last_word)
- r = __find_free_cblock(mq, 0, mq->find_free_last_word, result, &mq->find_free_last_word);
-
- return r;
+ return !queue_empty(&mq->cache_clean);
}
/*----------------------------------------------------------------*/
@@ -496,33 +462,35 @@ static unsigned queue_level(struct entry *e)
return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u);
}
+static bool in_cache(struct mq_policy *mq, struct entry *e)
+{
+ return in_pool(&mq->cache_pool, e);
+}
+
/*
* Inserts the entry into the pre_cache or the cache. Ensures the cache
- * block is marked as allocated if necc. Inserts into the hash table. Sets the
- * tick which records when the entry was last moved about.
+ * block is marked as allocated if necc. Inserts into the hash table.
+ * Sets the tick which records when the entry was last moved about.
*/
static void push(struct mq_policy *mq, struct entry *e)
{
e->tick = mq->tick;
hash_insert(mq, e);
- if (e->in_cache) {
- alloc_cblock(mq, e->cblock);
- queue_push(&mq->cache, queue_level(e), &e->list);
- } else
+ if (in_cache(mq, e))
+ queue_push(e->dirty ? &mq->cache_dirty : &mq->cache_clean,
+ queue_level(e), &e->list);
+ else
queue_push(&mq->pre_cache, queue_level(e), &e->list);
}
/*
* Removes an entry from pre_cache or cache. Removes from the hash table.
- * Frees off the cache block if necc.
*/
static void del(struct mq_policy *mq, struct entry *e)
{
queue_remove(&e->list);
hash_remove(e);
- if (e->in_cache)
- free_cblock(mq, e->cblock);
}
/*
@@ -531,14 +499,14 @@ static void del(struct mq_policy *mq, struct entry *e)
*/
static struct entry *pop(struct mq_policy *mq, struct queue *q)
{
- struct entry *e = container_of(queue_pop(q), struct entry, list);
+ struct entry *e;
+ struct list_head *h = queue_pop(q);
- if (e) {
- hash_remove(e);
+ if (!h)
+ return NULL;
- if (e->in_cache)
- free_cblock(mq, e->cblock);
- }
+ e = container_of(h, struct entry, list);
+ hash_remove(e);
return e;
}
@@ -556,7 +524,8 @@ static bool updated_this_tick(struct mq_policy *mq, struct entry *e)
* of the entries.
*
* At the moment the threshold is taken by averaging the hit counts of some
- * of the entries in the cache (the first 20 entries of the first level).
+ * of the entries in the cache (the first 20 entries across all levels in
+ * ascending order, giving preference to the clean entries at each level).
*
* We can be much cleverer than this though. For example, each promotion
* could bump up the threshold helping to prevent churn. Much more to do
@@ -571,14 +540,21 @@ static void check_generation(struct mq_policy *mq)
struct list_head *head;
struct entry *e;
- if ((mq->hit_count >= mq->generation_period) &&
- (mq->nr_cblocks_allocated == from_cblock(mq->cache_size))) {
-
+ if ((mq->hit_count >= mq->generation_period) && (epool_empty(&mq->cache_pool))) {
mq->hit_count = 0;
mq->generation++;
for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) {
- head = mq->cache.qs + level;
+ head = mq->cache_clean.qs + level;
+ list_for_each_entry(e, head, list) {
+ nr++;
+ total += e->hit_count;
+
+ if (++count >= MAX_TO_AVERAGE)
+ break;
+ }
+
+ head = mq->cache_dirty.qs + level;
list_for_each_entry(e, head, list) {
nr++;
total += e->hit_count;
@@ -631,19 +607,30 @@ static void requeue_and_update_tick(struct mq_policy *mq, struct entry *e)
* - set the hit count to a hard coded value other than 1, eg, is it better
* if it goes in at level 2?
*/
-static dm_cblock_t demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
+static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
{
- dm_cblock_t result;
- struct entry *demoted = pop(mq, &mq->cache);
+ struct entry *demoted = pop(mq, &mq->cache_clean);
+
+ if (!demoted)
+ /*
+ * We could get a block from mq->cache_dirty, but that
+ * would add extra latency to the triggering bio as it
+ * waits for the writeback. Better to not promote this
+ * time and hope there's a clean block next time this block
+ * is hit.
+ */
+ return -ENOSPC;
- BUG_ON(!demoted);
- result = demoted->cblock;
*oblock = demoted->oblock;
- demoted->in_cache = false;
- demoted->hit_count = 1;
- push(mq, demoted);
+ free_entry(&mq->cache_pool, demoted);
+
+ /*
+ * We used to put the demoted block into the pre-cache, but I think
+ * it's simpler to just let it work it's way up from zero again.
+ * Stops blocks flickering in and out of the cache.
+ */
- return result;
+ return 0;
}
/*
@@ -662,17 +649,18 @@ static dm_cblock_t demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
static unsigned adjusted_promote_threshold(struct mq_policy *mq,
bool discarded_oblock, int data_dir)
{
- if (discarded_oblock && any_free_cblocks(mq) && data_dir == WRITE)
+ if (data_dir == READ)
+ return mq->promote_threshold + READ_PROMOTE_THRESHOLD;
+
+ if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {
/*
* We don't need to do any copying at all, so give this a
- * very low threshold. In practice this only triggers
- * during initial population after a format.
+ * very low threshold.
*/
return DISCARDED_PROMOTE_THRESHOLD;
+ }
- return data_dir == READ ?
- (mq->promote_threshold + READ_PROMOTE_THRESHOLD) :
- (mq->promote_threshold + WRITE_PROMOTE_THRESHOLD);
+ return mq->promote_threshold + WRITE_PROMOTE_THRESHOLD;
}
static bool should_promote(struct mq_policy *mq, struct entry *e,
@@ -688,34 +676,49 @@ static int cache_entry_found(struct mq_policy *mq,
{
requeue_and_update_tick(mq, e);
- if (e->in_cache) {
+ if (in_cache(mq, e)) {
result->op = POLICY_HIT;
- result->cblock = e->cblock;
+ result->cblock = infer_cblock(&mq->cache_pool, e);
}
return 0;
}
/*
- * Moves and entry from the pre_cache to the cache. The main work is
+ * Moves an entry from the pre_cache to the cache. The main work is
* finding which cache block to use.
*/
static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
struct policy_result *result)
{
- dm_cblock_t cblock;
+ int r;
+ struct entry *new_e;
- if (find_free_cblock(mq, &cblock) == -ENOSPC) {
+ /* Ensure there's a free cblock in the cache */
+ if (epool_empty(&mq->cache_pool)) {
result->op = POLICY_REPLACE;
- cblock = demote_cblock(mq, &result->old_oblock);
+ r = demote_cblock(mq, &result->old_oblock);
+ if (r) {
+ result->op = POLICY_MISS;
+ return 0;
+ }
} else
result->op = POLICY_NEW;
- result->cblock = e->cblock = cblock;
+ new_e = alloc_entry(&mq->cache_pool);
+ BUG_ON(!new_e);
+
+ new_e->oblock = e->oblock;
+ new_e->dirty = false;
+ new_e->hit_count = e->hit_count;
+ new_e->generation = e->generation;
+ new_e->tick = e->tick;
del(mq, e);
- e->in_cache = true;
- push(mq, e);
+ free_entry(&mq->pre_cache_pool, e);
+ push(mq, new_e);
+
+ result->cblock = infer_cblock(&mq->cache_pool, new_e);
return 0;
}
@@ -727,15 +730,18 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
int r = 0;
bool updated = updated_this_tick(mq, e);
- requeue_and_update_tick(mq, e);
-
if ((!discarded_oblock && updated) ||
- !should_promote(mq, e, discarded_oblock, data_dir))
+ !should_promote(mq, e, discarded_oblock, data_dir)) {
+ requeue_and_update_tick(mq, e);
result->op = POLICY_MISS;
- else if (!can_migrate)
+
+ } else if (!can_migrate)
r = -EWOULDBLOCK;
- else
+
+ else {
+ requeue_and_update_tick(mq, e);
r = pre_cache_to_cache(mq, e, result);
+ }
return r;
}
@@ -743,7 +749,7 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
static void insert_in_pre_cache(struct mq_policy *mq,
dm_oblock_t oblock)
{
- struct entry *e = alloc_entry(mq);
+ struct entry *e = alloc_entry(&mq->pre_cache_pool);
if (!e)
/*
@@ -757,7 +763,7 @@ static void insert_in_pre_cache(struct mq_policy *mq,
return;
}
- e->in_cache = false;
+ e->dirty = false;
e->oblock = oblock;
e->hit_count = 1;
e->generation = mq->generation;
@@ -767,30 +773,36 @@ static void insert_in_pre_cache(struct mq_policy *mq,
static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
struct policy_result *result)
{
+ int r;
struct entry *e;
- dm_cblock_t cblock;
- if (find_free_cblock(mq, &cblock) == -ENOSPC) {
- result->op = POLICY_MISS;
- insert_in_pre_cache(mq, oblock);
- return;
- }
+ if (epool_empty(&mq->cache_pool)) {
+ result->op = POLICY_REPLACE;
+ r = demote_cblock(mq, &result->old_oblock);
+ if (unlikely(r)) {
+ result->op = POLICY_MISS;
+ insert_in_pre_cache(mq, oblock);
+ return;
+ }
- e = alloc_entry(mq);
- if (unlikely(!e)) {
- result->op = POLICY_MISS;
- return;
+ /*
+ * This will always succeed, since we've just demoted.
+ */
+ e = alloc_entry(&mq->cache_pool);
+ BUG_ON(!e);
+
+ } else {
+ e = alloc_entry(&mq->cache_pool);
+ result->op = POLICY_NEW;
}
e->oblock = oblock;
- e->cblock = cblock;
- e->in_cache = true;
+ e->dirty = false;
e->hit_count = 1;
e->generation = mq->generation;
push(mq, e);
- result->op = POLICY_NEW;
- result->cblock = e->cblock;
+ result->cblock = infer_cblock(&mq->cache_pool, e);
}
static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
@@ -821,13 +833,16 @@ static int map(struct mq_policy *mq, dm_oblock_t oblock,
int r = 0;
struct entry *e = hash_lookup(mq, oblock);
- if (e && e->in_cache)
+ if (e && in_cache(mq, e))
r = cache_entry_found(mq, e, result);
+
else if (iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL)
result->op = POLICY_MISS;
+
else if (e)
r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
data_dir, result);
+
else
r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
data_dir, result);
@@ -854,9 +869,9 @@ static void mq_destroy(struct dm_cache_policy *p)
{
struct mq_policy *mq = to_mq_policy(p);
- free_bitset(mq->allocation_bitset);
kfree(mq->table);
- free_entries(mq);
+ epool_exit(&mq->cache_pool);
+ epool_exit(&mq->pre_cache_pool);
kfree(mq);
}
@@ -904,8 +919,8 @@ static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t
return -EWOULDBLOCK;
e = hash_lookup(mq, oblock);
- if (e && e->in_cache) {
- *cblock = e->cblock;
+ if (e && in_cache(mq, e)) {
+ *cblock = infer_cblock(&mq->cache_pool, e);
r = 0;
} else
r = -ENOENT;
@@ -915,6 +930,36 @@ static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t
return r;
}
+static void __mq_set_clear_dirty(struct mq_policy *mq, dm_oblock_t oblock, bool set)
+{
+ struct entry *e;
+
+ e = hash_lookup(mq, oblock);
+ BUG_ON(!e || !in_cache(mq, e));
+
+ del(mq, e);
+ e->dirty = set;
+ push(mq, e);
+}
+
+static void mq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+
+ mutex_lock(&mq->lock);
+ __mq_set_clear_dirty(mq, oblock, true);
+ mutex_unlock(&mq->lock);
+}
+
+static void mq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+
+ mutex_lock(&mq->lock);
+ __mq_set_clear_dirty(mq, oblock, false);
+ mutex_unlock(&mq->lock);
+}
+
static int mq_load_mapping(struct dm_cache_policy *p,
dm_oblock_t oblock, dm_cblock_t cblock,
uint32_t hint, bool hint_valid)
@@ -922,13 +967,9 @@ static int mq_load_mapping(struct dm_cache_policy *p,
struct mq_policy *mq = to_mq_policy(p);
struct entry *e;
- e = alloc_entry(mq);
- if (!e)
- return -ENOMEM;
-
- e->cblock = cblock;
+ e = alloc_particular_entry(&mq->cache_pool, cblock);
e->oblock = oblock;
- e->in_cache = true;
+ e->dirty = false; /* this gets corrected in a minute */
e->hit_count = hint_valid ? hint : 1;
e->generation = mq->generation;
push(mq, e);
@@ -936,57 +977,126 @@ static int mq_load_mapping(struct dm_cache_policy *p,
return 0;
}
+static int mq_save_hints(struct mq_policy *mq, struct queue *q,
+ policy_walk_fn fn, void *context)
+{
+ int r;
+ unsigned level;
+ struct entry *e;
+
+ for (level = 0; level < NR_QUEUE_LEVELS; level++)
+ list_for_each_entry(e, q->qs + level, list) {
+ r = fn(context, infer_cblock(&mq->cache_pool, e),
+ e->oblock, e->hit_count);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
void *context)
{
struct mq_policy *mq = to_mq_policy(p);
int r = 0;
- struct entry *e;
- unsigned level;
mutex_lock(&mq->lock);
- for (level = 0; level < NR_QUEUE_LEVELS; level++)
- list_for_each_entry(e, &mq->cache.qs[level], list) {
- r = fn(context, e->cblock, e->oblock, e->hit_count);
- if (r)
- goto out;
- }
+ r = mq_save_hints(mq, &mq->cache_clean, fn, context);
+ if (!r)
+ r = mq_save_hints(mq, &mq->cache_dirty, fn, context);
-out:
mutex_unlock(&mq->lock);
return r;
}
+static void __remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
+{
+ struct entry *e;
+
+ e = hash_lookup(mq, oblock);
+ BUG_ON(!e || !in_cache(mq, e));
+
+ del(mq, e);
+ free_entry(&mq->cache_pool, e);
+}
+
static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
{
struct mq_policy *mq = to_mq_policy(p);
- struct entry *e;
mutex_lock(&mq->lock);
+ __remove_mapping(mq, oblock);
+ mutex_unlock(&mq->lock);
+}
- e = hash_lookup(mq, oblock);
+static int __remove_cblock(struct mq_policy *mq, dm_cblock_t cblock)
+{
+ struct entry *e = epool_find(&mq->cache_pool, cblock);
- BUG_ON(!e || !e->in_cache);
+ if (!e)
+ return -ENODATA;
del(mq, e);
- e->in_cache = false;
- push(mq, e);
+ free_entry(&mq->cache_pool, e);
+
+ return 0;
+}
+
+static int mq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
+{
+ int r;
+ struct mq_policy *mq = to_mq_policy(p);
+ mutex_lock(&mq->lock);
+ r = __remove_cblock(mq, cblock);
mutex_unlock(&mq->lock);
+
+ return r;
}
-static void force_mapping(struct mq_policy *mq,
- dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock,
+ dm_cblock_t *cblock)
{
- struct entry *e = hash_lookup(mq, current_oblock);
+ struct entry *e = pop(mq, &mq->cache_dirty);
- BUG_ON(!e || !e->in_cache);
+ if (!e)
+ return -ENODATA;
- del(mq, e);
- e->oblock = new_oblock;
+ *oblock = e->oblock;
+ *cblock = infer_cblock(&mq->cache_pool, e);
+ e->dirty = false;
push(mq, e);
+
+ return 0;
+}
+
+static int mq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
+ dm_cblock_t *cblock)
+{
+ int r;
+ struct mq_policy *mq = to_mq_policy(p);
+
+ mutex_lock(&mq->lock);
+ r = __mq_writeback_work(mq, oblock, cblock);
+ mutex_unlock(&mq->lock);
+
+ return r;
+}
+
+static void __force_mapping(struct mq_policy *mq,
+ dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+{
+ struct entry *e = hash_lookup(mq, current_oblock);
+
+ if (e && in_cache(mq, e)) {
+ del(mq, e);
+ e->oblock = new_oblock;
+ e->dirty = true;
+ push(mq, e);
+ }
}
static void mq_force_mapping(struct dm_cache_policy *p,
@@ -995,16 +1105,20 @@ static void mq_force_mapping(struct dm_cache_policy *p,
struct mq_policy *mq = to_mq_policy(p);
mutex_lock(&mq->lock);
- force_mapping(mq, current_oblock, new_oblock);
+ __force_mapping(mq, current_oblock, new_oblock);
mutex_unlock(&mq->lock);
}
static dm_cblock_t mq_residency(struct dm_cache_policy *p)
{
+ dm_cblock_t r;
struct mq_policy *mq = to_mq_policy(p);
- /* FIXME: lock mutex, not sure we can block here */
- return to_cblock(mq->nr_cblocks_allocated);
+ mutex_lock(&mq->lock);
+ r = to_cblock(mq->cache_pool.nr_allocated);
+ mutex_unlock(&mq->lock);
+
+ return r;
}
static void mq_tick(struct dm_cache_policy *p)
@@ -1057,10 +1171,13 @@ static void init_policy_functions(struct mq_policy *mq)
mq->policy.destroy = mq_destroy;
mq->policy.map = mq_map;
mq->policy.lookup = mq_lookup;
+ mq->policy.set_dirty = mq_set_dirty;
+ mq->policy.clear_dirty = mq_clear_dirty;
mq->policy.load_mapping = mq_load_mapping;
mq->policy.walk_mappings = mq_walk_mappings;
mq->policy.remove_mapping = mq_remove_mapping;
- mq->policy.writeback_work = NULL;
+ mq->policy.remove_cblock = mq_remove_cblock;
+ mq->policy.writeback_work = mq_writeback_work;
mq->policy.force_mapping = mq_force_mapping;
mq->policy.residency = mq_residency;
mq->policy.tick = mq_tick;
@@ -1072,7 +1189,6 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
- int r;
struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
if (!mq)
@@ -1080,8 +1196,18 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
init_policy_functions(mq);
iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT);
-
mq->cache_size = cache_size;
+
+ if (epool_init(&mq->pre_cache_pool, from_cblock(cache_size))) {
+ DMERR("couldn't initialize pool of pre-cache entries");
+ goto bad_pre_cache_init;
+ }
+
+ if (epool_init(&mq->cache_pool, from_cblock(cache_size))) {
+ DMERR("couldn't initialize pool of cache entries");
+ goto bad_cache_init;
+ }
+
mq->tick_protected = 0;
mq->tick = 0;
mq->hit_count = 0;
@@ -1089,20 +1215,12 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
mq->promote_threshold = 0;
mutex_init(&mq->lock);
spin_lock_init(&mq->tick_lock);
- mq->find_free_nr_words = dm_div_up(from_cblock(mq->cache_size), BITS_PER_LONG);
- mq->find_free_last_word = 0;
queue_init(&mq->pre_cache);
- queue_init(&mq->cache);
- mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
-
- mq->nr_entries = 2 * from_cblock(cache_size);
- r = alloc_entries(mq, mq->nr_entries);
- if (r)
- goto bad_cache_alloc;
+ queue_init(&mq->cache_clean);
+ queue_init(&mq->cache_dirty);
- mq->nr_entries_allocated = 0;
- mq->nr_cblocks_allocated = 0;
+ mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
mq->hash_bits = ffs(mq->nr_buckets) - 1;
@@ -1110,17 +1228,13 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
if (!mq->table)
goto bad_alloc_table;
- mq->allocation_bitset = alloc_bitset(from_cblock(cache_size));
- if (!mq->allocation_bitset)
- goto bad_alloc_bitset;
-
return &mq->policy;
-bad_alloc_bitset:
- kfree(mq->table);
bad_alloc_table:
- free_entries(mq);
-bad_cache_alloc:
+ epool_exit(&mq->cache_pool);
+bad_cache_init:
+ epool_exit(&mq->pre_cache_pool);
+bad_pre_cache_init:
kfree(mq);
return NULL;
@@ -1130,7 +1244,7 @@ bad_cache_alloc:
static struct dm_cache_policy_type mq_policy_type = {
.name = "mq",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.hint_size = 4,
.owner = THIS_MODULE,
.create = mq_create
@@ -1138,7 +1252,7 @@ static struct dm_cache_policy_type mq_policy_type = {
static struct dm_cache_policy_type default_policy_type = {
.name = "default",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.hint_size = 4,
.owner = THIS_MODULE,
.create = mq_create
diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c
index 21c03c570c06..d80057968407 100644
--- a/drivers/md/dm-cache-policy.c
+++ b/drivers/md/dm-cache-policy.c
@@ -119,13 +119,13 @@ struct dm_cache_policy *dm_cache_policy_create(const char *name,
type = get_policy(name);
if (!type) {
DMWARN("unknown policy type");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
p = type->create(cache_size, origin_size, cache_block_size);
if (!p) {
put_policy(type);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
p->private = type;
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index 33369ca9614f..052c00a84a5c 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -135,9 +135,6 @@ struct dm_cache_policy {
*/
int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
- /*
- * oblock must be a mapped block. Must not block.
- */
void (*set_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
void (*clear_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
@@ -159,8 +156,24 @@ struct dm_cache_policy {
void (*force_mapping)(struct dm_cache_policy *p, dm_oblock_t current_oblock,
dm_oblock_t new_oblock);
- int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock);
+ /*
+ * This is called via the invalidate_cblocks message. It is
+ * possible the particular cblock has already been removed due to a
+ * write io in passthrough mode. In which case this should return
+ * -ENODATA.
+ */
+ int (*remove_cblock)(struct dm_cache_policy *p, dm_cblock_t cblock);
+ /*
+ * Provide a dirty block to be written back by the core target.
+ *
+ * Returns:
+ *
+ * 0 and @cblock,@oblock: block to write back provided
+ *
+ * -ENODATA: no dirty blocks available
+ */
+ int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock);
/*
* How full is the cache?
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 29569768ffbf..1b1469ebe5cb 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -61,6 +61,34 @@ static void free_bitset(unsigned long *bits)
/*----------------------------------------------------------------*/
+/*
+ * There are a couple of places where we let a bio run, but want to do some
+ * work before calling its endio function. We do this by temporarily
+ * changing the endio fn.
+ */
+struct dm_hook_info {
+ bio_end_io_t *bi_end_io;
+ void *bi_private;
+};
+
+static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
+ bio_end_io_t *bi_end_io, void *bi_private)
+{
+ h->bi_end_io = bio->bi_end_io;
+ h->bi_private = bio->bi_private;
+
+ bio->bi_end_io = bi_end_io;
+ bio->bi_private = bi_private;
+}
+
+static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
+{
+ bio->bi_end_io = h->bi_end_io;
+ bio->bi_private = h->bi_private;
+}
+
+/*----------------------------------------------------------------*/
+
#define PRISON_CELLS 1024
#define MIGRATION_POOL_SIZE 128
#define COMMIT_PERIOD HZ
@@ -76,14 +104,37 @@ static void free_bitset(unsigned long *bits)
/*
* FIXME: the cache is read/write for the time being.
*/
-enum cache_mode {
+enum cache_metadata_mode {
CM_WRITE, /* metadata may be changed */
CM_READ_ONLY, /* metadata may not be changed */
};
+enum cache_io_mode {
+ /*
+ * Data is written to cached blocks only. These blocks are marked
+ * dirty. If you lose the cache device you will lose data.
+ * Potential performance increase for both reads and writes.
+ */
+ CM_IO_WRITEBACK,
+
+ /*
+ * Data is written to both cache and origin. Blocks are never
+ * dirty. Potential performance benfit for reads only.
+ */
+ CM_IO_WRITETHROUGH,
+
+ /*
+ * A degraded mode useful for various cache coherency situations
+ * (eg, rolling back snapshots). Reads and writes always go to the
+ * origin. If a write goes to a cached oblock, then the cache
+ * block is invalidated.
+ */
+ CM_IO_PASSTHROUGH
+};
+
struct cache_features {
- enum cache_mode mode;
- bool write_through:1;
+ enum cache_metadata_mode mode;
+ enum cache_io_mode io_mode;
};
struct cache_stats {
@@ -99,6 +150,25 @@ struct cache_stats {
atomic_t discard_count;
};
+/*
+ * Defines a range of cblocks, begin to (end - 1) are in the range. end is
+ * the one-past-the-end value.
+ */
+struct cblock_range {
+ dm_cblock_t begin;
+ dm_cblock_t end;
+};
+
+struct invalidation_request {
+ struct list_head list;
+ struct cblock_range *cblocks;
+
+ atomic_t complete;
+ int err;
+
+ wait_queue_head_t result_wait;
+};
+
struct cache {
struct dm_target *ti;
struct dm_target_callbacks callbacks;
@@ -148,6 +218,10 @@ struct cache {
wait_queue_head_t migration_wait;
atomic_t nr_migrations;
+ wait_queue_head_t quiescing_wait;
+ atomic_t quiescing;
+ atomic_t quiescing_ack;
+
/*
* cache_size entries, dirty if set
*/
@@ -186,7 +260,7 @@ struct cache {
bool need_tick_bio:1;
bool sized:1;
- bool quiescing:1;
+ bool invalidate:1;
bool commit_requested:1;
bool loaded_mappings:1;
bool loaded_discards:1;
@@ -197,6 +271,12 @@ struct cache {
struct cache_features features;
struct cache_stats stats;
+
+ /*
+ * Invalidation fields.
+ */
+ spinlock_t invalidation_lock;
+ struct list_head invalidation_requests;
};
struct per_bio_data {
@@ -211,7 +291,7 @@ struct per_bio_data {
*/
struct cache *cache;
dm_cblock_t cblock;
- bio_end_io_t *saved_bi_end_io;
+ struct dm_hook_info hook_info;
struct dm_bio_details bio_details;
};
@@ -228,6 +308,8 @@ struct dm_cache_migration {
bool writeback:1;
bool demote:1;
bool promote:1;
+ bool requeue_holder:1;
+ bool invalidate:1;
struct dm_bio_prison_cell *old_ocell;
struct dm_bio_prison_cell *new_ocell;
@@ -533,9 +615,24 @@ static void save_stats(struct cache *cache)
#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
+static bool writethrough_mode(struct cache_features *f)
+{
+ return f->io_mode == CM_IO_WRITETHROUGH;
+}
+
+static bool writeback_mode(struct cache_features *f)
+{
+ return f->io_mode == CM_IO_WRITEBACK;
+}
+
+static bool passthrough_mode(struct cache_features *f)
+{
+ return f->io_mode == CM_IO_PASSTHROUGH;
+}
+
static size_t get_per_bio_data_size(struct cache *cache)
{
- return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
+ return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
}
static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
@@ -605,6 +702,7 @@ static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
dm_oblock_t oblock, dm_cblock_t cblock)
{
+ check_if_tick_bio_needed(cache, bio);
remap_to_cache(cache, bio, cblock);
if (bio_data_dir(bio) == WRITE) {
set_dirty(cache, oblock, cblock);
@@ -662,7 +760,8 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
static void writethrough_endio(struct bio *bio, int err)
{
struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
- bio->bi_end_io = pb->saved_bi_end_io;
+
+ dm_unhook_bio(&pb->hook_info, bio);
if (err) {
bio_endio(bio, err);
@@ -693,9 +792,8 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
pb->cache = cache;
pb->cblock = cblock;
- pb->saved_bi_end_io = bio->bi_end_io;
+ dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
dm_bio_record(&pb->bio_details, bio);
- bio->bi_end_io = writethrough_endio;
remap_to_origin_clear_discard(pb->cache, bio, oblock);
}
@@ -748,8 +846,9 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
static void cleanup_migration(struct dm_cache_migration *mg)
{
- dec_nr_migrations(mg->cache);
+ struct cache *cache = mg->cache;
free_migration(mg);
+ dec_nr_migrations(cache);
}
static void migration_failure(struct dm_cache_migration *mg)
@@ -765,13 +864,13 @@ static void migration_failure(struct dm_cache_migration *mg)
DMWARN_LIMIT("demotion failed; couldn't copy block");
policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
- cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
+ cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
if (mg->promote)
- cell_defer(cache, mg->new_ocell, 1);
+ cell_defer(cache, mg->new_ocell, true);
} else {
DMWARN_LIMIT("promotion failed; couldn't copy block");
policy_remove_mapping(cache->policy, mg->new_oblock);
- cell_defer(cache, mg->new_ocell, 1);
+ cell_defer(cache, mg->new_ocell, true);
}
cleanup_migration(mg);
@@ -823,7 +922,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
return;
} else if (mg->demote) {
- cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
+ cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
if (mg->promote) {
mg->demote = false;
@@ -832,11 +931,19 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
list_add_tail(&mg->list, &cache->quiesced_migrations);
spin_unlock_irqrestore(&cache->lock, flags);
- } else
+ } else {
+ if (mg->invalidate)
+ policy_remove_mapping(cache->policy, mg->old_oblock);
cleanup_migration(mg);
+ }
} else {
- cell_defer(cache, mg->new_ocell, true);
+ if (mg->requeue_holder)
+ cell_defer(cache, mg->new_ocell, true);
+ else {
+ bio_endio(mg->new_ocell->holder, 0);
+ cell_defer(cache, mg->new_ocell, false);
+ }
clear_dirty(cache, mg->new_oblock, mg->cblock);
cleanup_migration(mg);
}
@@ -881,8 +988,46 @@ static void issue_copy_real(struct dm_cache_migration *mg)
r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
}
- if (r < 0)
+ if (r < 0) {
+ DMERR_LIMIT("issuing migration failed");
migration_failure(mg);
+ }
+}
+
+static void overwrite_endio(struct bio *bio, int err)
+{
+ struct dm_cache_migration *mg = bio->bi_private;
+ struct cache *cache = mg->cache;
+ size_t pb_data_size = get_per_bio_data_size(cache);
+ struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ unsigned long flags;
+
+ if (err)
+ mg->err = true;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ list_add_tail(&mg->list, &cache->completed_migrations);
+ dm_unhook_bio(&pb->hook_info, bio);
+ mg->requeue_holder = false;
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
+{
+ size_t pb_data_size = get_per_bio_data_size(mg->cache);
+ struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+
+ dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
+ remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock);
+ generic_make_request(bio);
+}
+
+static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
+{
+ return (bio_data_dir(bio) == WRITE) &&
+ (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
}
static void avoid_copy(struct dm_cache_migration *mg)
@@ -899,9 +1044,17 @@ static void issue_copy(struct dm_cache_migration *mg)
if (mg->writeback || mg->demote)
avoid = !is_dirty(cache, mg->cblock) ||
is_discarded_oblock(cache, mg->old_oblock);
- else
+ else {
+ struct bio *bio = mg->new_ocell->holder;
+
avoid = is_discarded_oblock(cache, mg->new_oblock);
+ if (!avoid && bio_writes_complete_block(cache, bio)) {
+ issue_overwrite(mg, bio);
+ return;
+ }
+ }
+
avoid ? avoid_copy(mg) : issue_copy_real(mg);
}
@@ -991,6 +1144,8 @@ static void promote(struct cache *cache, struct prealloc *structs,
mg->writeback = false;
mg->demote = false;
mg->promote = true;
+ mg->requeue_holder = true;
+ mg->invalidate = false;
mg->cache = cache;
mg->new_oblock = oblock;
mg->cblock = cblock;
@@ -1012,6 +1167,8 @@ static void writeback(struct cache *cache, struct prealloc *structs,
mg->writeback = true;
mg->demote = false;
mg->promote = false;
+ mg->requeue_holder = true;
+ mg->invalidate = false;
mg->cache = cache;
mg->old_oblock = oblock;
mg->cblock = cblock;
@@ -1035,6 +1192,8 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
mg->writeback = false;
mg->demote = true;
mg->promote = true;
+ mg->requeue_holder = true;
+ mg->invalidate = false;
mg->cache = cache;
mg->old_oblock = old_oblock;
mg->new_oblock = new_oblock;
@@ -1047,6 +1206,33 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
quiesce_migration(mg);
}
+/*
+ * Invalidate a cache entry. No writeback occurs; any changes in the cache
+ * block are thrown away.
+ */
+static void invalidate(struct cache *cache, struct prealloc *structs,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ struct dm_bio_prison_cell *cell)
+{
+ struct dm_cache_migration *mg = prealloc_get_migration(structs);
+
+ mg->err = false;
+ mg->writeback = false;
+ mg->demote = true;
+ mg->promote = false;
+ mg->requeue_holder = true;
+ mg->invalidate = true;
+ mg->cache = cache;
+ mg->old_oblock = oblock;
+ mg->cblock = cblock;
+ mg->old_ocell = cell;
+ mg->new_ocell = NULL;
+ mg->start_jiffies = jiffies;
+
+ inc_nr_migrations(cache);
+ quiesce_migration(mg);
+}
+
/*----------------------------------------------------------------
* bio processing
*--------------------------------------------------------------*/
@@ -1109,13 +1295,6 @@ static bool spare_migration_bandwidth(struct cache *cache)
return current_volume < cache->migration_threshold;
}
-static bool is_writethrough_io(struct cache *cache, struct bio *bio,
- dm_cblock_t cblock)
-{
- return bio_data_dir(bio) == WRITE &&
- cache->features.write_through && !is_dirty(cache, cblock);
-}
-
static void inc_hit_counter(struct cache *cache, struct bio *bio)
{
atomic_inc(bio_data_dir(bio) == READ ?
@@ -1128,6 +1307,15 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)
&cache->stats.read_miss : &cache->stats.write_miss);
}
+static void issue_cache_bio(struct cache *cache, struct bio *bio,
+ struct per_bio_data *pb,
+ dm_oblock_t oblock, dm_cblock_t cblock)
+{
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+ remap_to_cache_dirty(cache, bio, oblock, cblock);
+ issue(cache, bio);
+}
+
static void process_bio(struct cache *cache, struct prealloc *structs,
struct bio *bio)
{
@@ -1139,7 +1327,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
size_t pb_data_size = get_per_bio_data_size(cache);
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
bool discarded_block = is_discarded_oblock(cache, block);
- bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
+ bool passthrough = passthrough_mode(&cache->features);
+ bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
/*
* Check to see if that block is currently migrating.
@@ -1160,15 +1349,39 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
switch (lookup_result.op) {
case POLICY_HIT:
- inc_hit_counter(cache, bio);
- pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+ if (passthrough) {
+ inc_miss_counter(cache, bio);
- if (is_writethrough_io(cache, bio, lookup_result.cblock))
- remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
- else
- remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+ /*
+ * Passthrough always maps to the origin,
+ * invalidating any cache blocks that are written
+ * to.
+ */
+
+ if (bio_data_dir(bio) == WRITE) {
+ atomic_inc(&cache->stats.demotion);
+ invalidate(cache, structs, block, lookup_result.cblock, new_ocell);
+ release_cell = false;
+
+ } else {
+ /* FIXME: factor out issue_origin() */
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+ remap_to_origin_clear_discard(cache, bio, block);
+ issue(cache, bio);
+ }
+ } else {
+ inc_hit_counter(cache, bio);
+
+ if (bio_data_dir(bio) == WRITE &&
+ writethrough_mode(&cache->features) &&
+ !is_dirty(cache, lookup_result.cblock)) {
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+ remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
+ issue(cache, bio);
+ } else
+ issue_cache_bio(cache, bio, pb, block, lookup_result.cblock);
+ }
- issue(cache, bio);
break;
case POLICY_MISS:
@@ -1227,15 +1440,17 @@ static int need_commit_due_to_time(struct cache *cache)
static int commit_if_needed(struct cache *cache)
{
- if (dm_cache_changed_this_transaction(cache->cmd) &&
- (cache->commit_requested || need_commit_due_to_time(cache))) {
+ int r = 0;
+
+ if ((cache->commit_requested || need_commit_due_to_time(cache)) &&
+ dm_cache_changed_this_transaction(cache->cmd)) {
atomic_inc(&cache->stats.commit_count);
- cache->last_commit_jiffies = jiffies;
cache->commit_requested = false;
- return dm_cache_commit(cache->cmd, false);
+ r = dm_cache_commit(cache->cmd, false);
+ cache->last_commit_jiffies = jiffies;
}
- return 0;
+ return r;
}
static void process_deferred_bios(struct cache *cache)
@@ -1344,36 +1559,88 @@ static void writeback_some_dirty_blocks(struct cache *cache)
}
/*----------------------------------------------------------------
- * Main worker loop
+ * Invalidations.
+ * Dropping something from the cache *without* writing back.
*--------------------------------------------------------------*/
-static void start_quiescing(struct cache *cache)
+
+static void process_invalidation_request(struct cache *cache, struct invalidation_request *req)
{
- unsigned long flags;
+ int r = 0;
+ uint64_t begin = from_cblock(req->cblocks->begin);
+ uint64_t end = from_cblock(req->cblocks->end);
- spin_lock_irqsave(&cache->lock, flags);
- cache->quiescing = 1;
- spin_unlock_irqrestore(&cache->lock, flags);
+ while (begin != end) {
+ r = policy_remove_cblock(cache->policy, to_cblock(begin));
+ if (!r) {
+ r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin));
+ if (r)
+ break;
+
+ } else if (r == -ENODATA) {
+ /* harmless, already unmapped */
+ r = 0;
+
+ } else {
+ DMERR("policy_remove_cblock failed");
+ break;
+ }
+
+ begin++;
+ }
+
+ cache->commit_requested = true;
+
+ req->err = r;
+ atomic_set(&req->complete, 1);
+
+ wake_up(&req->result_wait);
}
-static void stop_quiescing(struct cache *cache)
+static void process_invalidation_requests(struct cache *cache)
{
- unsigned long flags;
+ struct list_head list;
+ struct invalidation_request *req, *tmp;
- spin_lock_irqsave(&cache->lock, flags);
- cache->quiescing = 0;
- spin_unlock_irqrestore(&cache->lock, flags);
+ INIT_LIST_HEAD(&list);
+ spin_lock(&cache->invalidation_lock);
+ list_splice_init(&cache->invalidation_requests, &list);
+ spin_unlock(&cache->invalidation_lock);
+
+ list_for_each_entry_safe (req, tmp, &list, list)
+ process_invalidation_request(cache, req);
}
+/*----------------------------------------------------------------
+ * Main worker loop
+ *--------------------------------------------------------------*/
static bool is_quiescing(struct cache *cache)
{
- int r;
- unsigned long flags;
+ return atomic_read(&cache->quiescing);
+}
- spin_lock_irqsave(&cache->lock, flags);
- r = cache->quiescing;
- spin_unlock_irqrestore(&cache->lock, flags);
+static void ack_quiescing(struct cache *cache)
+{
+ if (is_quiescing(cache)) {
+ atomic_inc(&cache->quiescing_ack);
+ wake_up(&cache->quiescing_wait);
+ }
+}
- return r;
+static void wait_for_quiescing_ack(struct cache *cache)
+{
+ wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
+}
+
+static void start_quiescing(struct cache *cache)
+{
+ atomic_inc(&cache->quiescing);
+ wait_for_quiescing_ack(cache);
+}
+
+static void stop_quiescing(struct cache *cache)
+{
+ atomic_set(&cache->quiescing, 0);
+ atomic_set(&cache->quiescing_ack, 0);
}
static void wait_for_migrations(struct cache *cache)
@@ -1412,7 +1679,8 @@ static int more_work(struct cache *cache)
!bio_list_empty(&cache->deferred_writethrough_bios) ||
!list_empty(&cache->quiesced_migrations) ||
!list_empty(&cache->completed_migrations) ||
- !list_empty(&cache->need_commit_migrations);
+ !list_empty(&cache->need_commit_migrations) ||
+ cache->invalidate;
}
static void do_worker(struct work_struct *ws)
@@ -1420,16 +1688,16 @@ static void do_worker(struct work_struct *ws)
struct cache *cache = container_of(ws, struct cache, worker);
do {
- if (!is_quiescing(cache))
+ if (!is_quiescing(cache)) {
+ writeback_some_dirty_blocks(cache);
+ process_deferred_writethrough_bios(cache);
process_deferred_bios(cache);
+ process_invalidation_requests(cache);
+ }
process_migrations(cache, &cache->quiesced_migrations, issue_copy);
process_migrations(cache, &cache->completed_migrations, complete_migration);
- writeback_some_dirty_blocks(cache);
-
- process_deferred_writethrough_bios(cache);
-
if (commit_if_needed(cache)) {
process_deferred_flush_bios(cache, false);
@@ -1442,6 +1710,9 @@ static void do_worker(struct work_struct *ws)
process_migrations(cache, &cache->need_commit_migrations,
migration_success_post_commit);
}
+
+ ack_quiescing(cache);
+
} while (more_work(cache));
}
@@ -1715,7 +1986,7 @@ static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
static void init_features(struct cache_features *cf)
{
cf->mode = CM_WRITE;
- cf->write_through = false;
+ cf->io_mode = CM_IO_WRITEBACK;
}
static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
@@ -1740,10 +2011,13 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
arg = dm_shift_arg(as);
if (!strcasecmp(arg, "writeback"))
- cf->write_through = false;
+ cf->io_mode = CM_IO_WRITEBACK;
else if (!strcasecmp(arg, "writethrough"))
- cf->write_through = true;
+ cf->io_mode = CM_IO_WRITETHROUGH;
+
+ else if (!strcasecmp(arg, "passthrough"))
+ cf->io_mode = CM_IO_PASSTHROUGH;
else {
*error = "Unrecognised cache feature requested";
@@ -1872,14 +2146,15 @@ static int set_config_values(struct cache *cache, int argc, const char **argv)
static int create_cache_policy(struct cache *cache, struct cache_args *ca,
char **error)
{
- cache->policy = dm_cache_policy_create(ca->policy_name,
- cache->cache_size,
- cache->origin_sectors,
- cache->sectors_per_block);
- if (!cache->policy) {
+ struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
+ cache->cache_size,
+ cache->origin_sectors,
+ cache->sectors_per_block);
+ if (IS_ERR(p)) {
*error = "Error creating cache's policy";
- return -ENOMEM;
+ return PTR_ERR(p);
}
+ cache->policy = p;
return 0;
}
@@ -1995,6 +2270,22 @@ static int cache_create(struct cache_args *ca, struct cache **result)
}
cache->cmd = cmd;
+ if (passthrough_mode(&cache->features)) {
+ bool all_clean;
+
+ r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
+ if (r) {
+ *error = "dm_cache_metadata_all_clean() failed";
+ goto bad;
+ }
+
+ if (!all_clean) {
+ *error = "Cannot enter passthrough mode unless all blocks are clean";
+ r = -EINVAL;
+ goto bad;
+ }
+ }
+
spin_lock_init(&cache->lock);
bio_list_init(&cache->deferred_bios);
bio_list_init(&cache->deferred_flush_bios);
@@ -2005,6 +2296,10 @@ static int cache_create(struct cache_args *ca, struct cache **result)
atomic_set(&cache->nr_migrations, 0);
init_waitqueue_head(&cache->migration_wait);
+ init_waitqueue_head(&cache->quiescing_wait);
+ atomic_set(&cache->quiescing, 0);
+ atomic_set(&cache->quiescing_ack, 0);
+
r = -ENOMEM;
cache->nr_dirty = 0;
cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
@@ -2064,7 +2359,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
cache->need_tick_bio = true;
cache->sized = false;
- cache->quiescing = false;
+ cache->invalidate = false;
cache->commit_requested = false;
cache->loaded_mappings = false;
cache->loaded_discards = false;
@@ -2078,6 +2373,9 @@ static int cache_create(struct cache_args *ca, struct cache **result)
atomic_set(&cache->stats.commit_count, 0);
atomic_set(&cache->stats.discard_count, 0);
+ spin_lock_init(&cache->invalidation_lock);
+ INIT_LIST_HEAD(&cache->invalidation_requests);
+
*result = cache;
return 0;
@@ -2207,17 +2505,37 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
+ r = DM_MAPIO_REMAPPED;
switch (lookup_result.op) {
case POLICY_HIT:
- inc_hit_counter(cache, bio);
- pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+ if (passthrough_mode(&cache->features)) {
+ if (bio_data_dir(bio) == WRITE) {
+ /*
+ * We need to invalidate this block, so
+ * defer for the worker thread.
+ */
+ cell_defer(cache, cell, true);
+ r = DM_MAPIO_SUBMITTED;
+
+ } else {
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+ inc_miss_counter(cache, bio);
+ remap_to_origin_clear_discard(cache, bio, block);
+
+ cell_defer(cache, cell, false);
+ }
- if (is_writethrough_io(cache, bio, lookup_result.cblock))
- remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
- else
- remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+ } else {
+ inc_hit_counter(cache, bio);
- cell_defer(cache, cell, false);
+ if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
+ !is_dirty(cache, lookup_result.cblock))
+ remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
+ else
+ remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+
+ cell_defer(cache, cell, false);
+ }
break;
case POLICY_MISS:
@@ -2242,10 +2560,10 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
(unsigned) lookup_result.op);
bio_io_error(bio);
- return DM_MAPIO_SUBMITTED;
+ r = DM_MAPIO_SUBMITTED;
}
- return DM_MAPIO_REMAPPED;
+ return r;
}
static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
@@ -2406,26 +2724,71 @@ static int load_discard(void *context, sector_t discard_block_size,
return 0;
}
+static dm_cblock_t get_cache_dev_size(struct cache *cache)
+{
+ sector_t size = get_dev_size(cache->cache_dev);
+ (void) sector_div(size, cache->sectors_per_block);
+ return to_cblock(size);
+}
+
+static bool can_resize(struct cache *cache, dm_cblock_t new_size)
+{
+ if (from_cblock(new_size) > from_cblock(cache->cache_size))
+ return true;
+
+ /*
+ * We can't drop a dirty block when shrinking the cache.
+ */
+ while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
+ new_size = to_cblock(from_cblock(new_size) + 1);
+ if (is_dirty(cache, new_size)) {
+ DMERR("unable to shrink cache; cache block %llu is dirty",
+ (unsigned long long) from_cblock(new_size));
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
+{
+ int r;
+
+ r = dm_cache_resize(cache->cmd, new_size);
+ if (r) {
+ DMERR("could not resize cache metadata");
+ return r;
+ }
+
+ cache->cache_size = new_size;
+
+ return 0;
+}
+
static int cache_preresume(struct dm_target *ti)
{
int r = 0;
struct cache *cache = ti->private;
- sector_t actual_cache_size = get_dev_size(cache->cache_dev);
- (void) sector_div(actual_cache_size, cache->sectors_per_block);
+ dm_cblock_t csize = get_cache_dev_size(cache);
/*
* Check to see if the cache has resized.
*/
- if (from_cblock(cache->cache_size) != actual_cache_size || !cache->sized) {
- cache->cache_size = to_cblock(actual_cache_size);
-
- r = dm_cache_resize(cache->cmd, cache->cache_size);
- if (r) {
- DMERR("could not resize cache metadata");
+ if (!cache->sized) {
+ r = resize_cache_dev(cache, csize);
+ if (r)
return r;
- }
cache->sized = true;
+
+ } else if (csize != cache->cache_size) {
+ if (!can_resize(cache, csize))
+ return -EINVAL;
+
+ r = resize_cache_dev(cache, csize);
+ if (r)
+ return r;
}
if (!cache->loaded_mappings) {
@@ -2518,10 +2881,19 @@ static void cache_status(struct dm_target *ti, status_type_t type,
(unsigned long long) from_cblock(residency),
cache->nr_dirty);
- if (cache->features.write_through)
+ if (writethrough_mode(&cache->features))
DMEMIT("1 writethrough ");
- else
- DMEMIT("0 ");
+
+ else if (passthrough_mode(&cache->features))
+ DMEMIT("1 passthrough ");
+
+ else if (writeback_mode(&cache->features))
+ DMEMIT("1 writeback ");
+
+ else {
+ DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode);
+ goto err;
+ }
DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
if (sz < maxlen) {
@@ -2553,7 +2925,128 @@ err:
}
/*
- * Supports <key> <value>.
+ * A cache block range can take two forms:
+ *
+ * i) A single cblock, eg. '3456'
+ * ii) A begin and end cblock with dots between, eg. 123-234
+ */
+static int parse_cblock_range(struct cache *cache, const char *str,
+ struct cblock_range *result)
+{
+ char dummy;
+ uint64_t b, e;
+ int r;
+
+ /*
+ * Try and parse form (ii) first.
+ */
+ r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
+ if (r < 0)
+ return r;
+
+ if (r == 2) {
+ result->begin = to_cblock(b);
+ result->end = to_cblock(e);
+ return 0;
+ }
+
+ /*
+ * That didn't work, try form (i).
+ */
+ r = sscanf(str, "%llu%c", &b, &dummy);
+ if (r < 0)
+ return r;
+
+ if (r == 1) {
+ result->begin = to_cblock(b);
+ result->end = to_cblock(from_cblock(result->begin) + 1u);
+ return 0;
+ }
+
+ DMERR("invalid cblock range '%s'", str);
+ return -EINVAL;
+}
+
+static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
+{
+ uint64_t b = from_cblock(range->begin);
+ uint64_t e = from_cblock(range->end);
+ uint64_t n = from_cblock(cache->cache_size);
+
+ if (b >= n) {
+ DMERR("begin cblock out of range: %llu >= %llu", b, n);
+ return -EINVAL;
+ }
+
+ if (e > n) {
+ DMERR("end cblock out of range: %llu > %llu", e, n);
+ return -EINVAL;
+ }
+
+ if (b >= e) {
+ DMERR("invalid cblock range: %llu >= %llu", b, e);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int request_invalidation(struct cache *cache, struct cblock_range *range)
+{
+ struct invalidation_request req;
+
+ INIT_LIST_HEAD(&req.list);
+ req.cblocks = range;
+ atomic_set(&req.complete, 0);
+ req.err = 0;
+ init_waitqueue_head(&req.result_wait);
+
+ spin_lock(&cache->invalidation_lock);
+ list_add(&req.list, &cache->invalidation_requests);
+ spin_unlock(&cache->invalidation_lock);
+ wake_worker(cache);
+
+ wait_event(req.result_wait, atomic_read(&req.complete));
+ return req.err;
+}
+
+static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
+ const char **cblock_ranges)
+{
+ int r = 0;
+ unsigned i;
+ struct cblock_range range;
+
+ if (!passthrough_mode(&cache->features)) {
+ DMERR("cache has to be in passthrough mode for invalidation");
+ return -EPERM;
+ }
+
+ for (i = 0; i < count; i++) {
+ r = parse_cblock_range(cache, cblock_ranges[i], &range);
+ if (r)
+ break;
+
+ r = validate_cblock_range(cache, &range);
+ if (r)
+ break;
+
+ /*
+ * Pass begin and end origin blocks to the worker and wake it.
+ */
+ r = request_invalidation(cache, &range);
+ if (r)
+ break;
+ }
+
+ return r;
+}
+
+/*
+ * Supports
+ * "<key> <value>"
+ * and
+ * "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
*
* The key migration_threshold is supported by the cache target core.
*/
@@ -2561,6 +3054,12 @@ static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
{
struct cache *cache = ti->private;
+ if (!argc)
+ return -EINVAL;
+
+ if (!strcasecmp(argv[0], "invalidate_cblocks"))
+ return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
+
if (argc != 2)
return -EINVAL;
@@ -2630,7 +3129,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {1, 1, 1},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 0fce0bc1a957..81b0fa660452 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2,6 +2,7 @@
* Copyright (C) 2003 Christophe Saout <christophe@saout.de>
* Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
* Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
*
* This file is released under the GPL.
*/
@@ -98,6 +99,13 @@ struct iv_lmk_private {
u8 *seed;
};
+#define TCW_WHITENING_SIZE 16
+struct iv_tcw_private {
+ struct crypto_shash *crc32_tfm;
+ u8 *iv_seed;
+ u8 *whitening;
+};
+
/*
* Crypt: maps a linear range of a block device
* and encrypts / decrypts at the same time.
@@ -139,6 +147,7 @@ struct crypt_config {
struct iv_essiv_private essiv;
struct iv_benbi_private benbi;
struct iv_lmk_private lmk;
+ struct iv_tcw_private tcw;
} iv_gen_private;
sector_t iv_offset;
unsigned int iv_size;
@@ -171,7 +180,8 @@ struct crypt_config {
unsigned long flags;
unsigned int key_size;
- unsigned int key_parts;
+ unsigned int key_parts; /* independent parts in key buffer */
+ unsigned int key_extra_size; /* additional keys length */
u8 key[0];
};
@@ -230,6 +240,16 @@ static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
* version 3: the same as version 2 with additional IV seed
* (it uses 65 keys, last key is used as IV seed)
*
+ * tcw: Compatible implementation of the block chaining mode used
+ * by the TrueCrypt device encryption system (prior to version 4.1).
+ * For more info see: http://www.truecrypt.org
+ * It operates on full 512 byte sectors and uses CBC
+ * with an IV derived from initial key and the sector number.
+ * In addition, whitening value is applied on every sector, whitening
+ * is calculated from initial key, sector number and mixed using CRC32.
+ * Note that this encryption scheme is vulnerable to watermarking attacks
+ * and should be used for old compatible containers access only.
+ *
* plumb: unimplemented, see:
* http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
*/
@@ -530,7 +550,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
char ctx[crypto_shash_descsize(lmk->hash_tfm)];
} sdesc;
struct md5_state md5state;
- u32 buf[4];
+ __le32 buf[4];
int i, r;
sdesc.desc.tfm = lmk->hash_tfm;
@@ -608,6 +628,153 @@ static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
return r;
}
+static void crypt_iv_tcw_dtr(struct crypt_config *cc)
+{
+ struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+
+ kzfree(tcw->iv_seed);
+ tcw->iv_seed = NULL;
+ kzfree(tcw->whitening);
+ tcw->whitening = NULL;
+
+ if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
+ crypto_free_shash(tcw->crc32_tfm);
+ tcw->crc32_tfm = NULL;
+}
+
+static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
+ const char *opts)
+{
+ struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+
+ if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
+ ti->error = "Wrong key size for TCW";
+ return -EINVAL;
+ }
+
+ tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
+ if (IS_ERR(tcw->crc32_tfm)) {
+ ti->error = "Error initializing CRC32 in TCW";
+ return PTR_ERR(tcw->crc32_tfm);
+ }
+
+ tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
+ tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
+ if (!tcw->iv_seed || !tcw->whitening) {
+ crypt_iv_tcw_dtr(cc);
+ ti->error = "Error allocating seed storage in TCW";
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int crypt_iv_tcw_init(struct crypt_config *cc)
+{
+ struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+ int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
+
+ memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
+ memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
+ TCW_WHITENING_SIZE);
+
+ return 0;
+}
+
+static int crypt_iv_tcw_wipe(struct crypt_config *cc)
+{
+ struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+
+ memset(tcw->iv_seed, 0, cc->iv_size);
+ memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
+
+ return 0;
+}
+
+static int crypt_iv_tcw_whitening(struct crypt_config *cc,
+ struct dm_crypt_request *dmreq,
+ u8 *data)
+{
+ struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+ u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
+ u8 buf[TCW_WHITENING_SIZE];
+ struct {
+ struct shash_desc desc;
+ char ctx[crypto_shash_descsize(tcw->crc32_tfm)];
+ } sdesc;
+ int i, r;
+
+ /* xor whitening with sector number */
+ memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
+ crypto_xor(buf, (u8 *)&sector, 8);
+ crypto_xor(&buf[8], (u8 *)&sector, 8);
+
+ /* calculate crc32 for every 32bit part and xor it */
+ sdesc.desc.tfm = tcw->crc32_tfm;
+ sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ for (i = 0; i < 4; i++) {
+ r = crypto_shash_init(&sdesc.desc);
+ if (r)
+ goto out;
+ r = crypto_shash_update(&sdesc.desc, &buf[i * 4], 4);
+ if (r)
+ goto out;
+ r = crypto_shash_final(&sdesc.desc, &buf[i * 4]);
+ if (r)
+ goto out;
+ }
+ crypto_xor(&buf[0], &buf[12], 4);
+ crypto_xor(&buf[4], &buf[8], 4);
+
+ /* apply whitening (8 bytes) to whole sector */
+ for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
+ crypto_xor(data + i * 8, buf, 8);
+out:
+ memset(buf, 0, sizeof(buf));
+ return r;
+}
+
+static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq)
+{
+ struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
+ u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
+ u8 *src;
+ int r = 0;
+
+ /* Remove whitening from ciphertext */
+ if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
+ src = kmap_atomic(sg_page(&dmreq->sg_in));
+ r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset);
+ kunmap_atomic(src);
+ }
+
+ /* Calculate IV */
+ memcpy(iv, tcw->iv_seed, cc->iv_size);
+ crypto_xor(iv, (u8 *)&sector, 8);
+ if (cc->iv_size > 8)
+ crypto_xor(&iv[8], (u8 *)&sector, cc->iv_size - 8);
+
+ return r;
+}
+
+static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq)
+{
+ u8 *dst;
+ int r;
+
+ if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
+ return 0;
+
+ /* Apply whitening on ciphertext */
+ dst = kmap_atomic(sg_page(&dmreq->sg_out));
+ r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset);
+ kunmap_atomic(dst);
+
+ return r;
+}
+
static struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
@@ -643,6 +810,15 @@ static struct crypt_iv_operations crypt_iv_lmk_ops = {
.post = crypt_iv_lmk_post
};
+static struct crypt_iv_operations crypt_iv_tcw_ops = {
+ .ctr = crypt_iv_tcw_ctr,
+ .dtr = crypt_iv_tcw_dtr,
+ .init = crypt_iv_tcw_init,
+ .wipe = crypt_iv_tcw_wipe,
+ .generator = crypt_iv_tcw_gen,
+ .post = crypt_iv_tcw_post
+};
+
static void crypt_convert_init(struct crypt_config *cc,
struct convert_context *ctx,
struct bio *bio_out, struct bio *bio_in,
@@ -774,7 +950,7 @@ static int crypt_convert(struct crypt_config *cc,
/* async */
case -EBUSY:
wait_for_completion(&ctx->restart);
- INIT_COMPLETION(ctx->restart);
+ reinit_completion(&ctx->restart);
/* fall through*/
case -EINPROGRESS:
this_cc->req = NULL;
@@ -1274,9 +1450,12 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
static int crypt_setkey_allcpus(struct crypt_config *cc)
{
- unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
+ unsigned subkey_size;
int err = 0, i, r;
+ /* Ignore extra keys (which are used for IV etc) */
+ subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
+
for (i = 0; i < cc->tfms_count; i++) {
r = crypto_ablkcipher_setkey(cc->tfms[i],
cc->key + (i * subkey_size),
@@ -1409,6 +1588,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
return -EINVAL;
}
cc->key_parts = cc->tfms_count;
+ cc->key_extra_size = 0;
cc->cipher = kstrdup(cipher, GFP_KERNEL);
if (!cc->cipher)
@@ -1460,13 +1640,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
goto bad;
}
- /* Initialize and set key */
- ret = crypt_set_key(cc, key);
- if (ret < 0) {
- ti->error = "Error decoding and setting key";
- goto bad;
- }
-
/* Initialize IV */
cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
if (cc->iv_size)
@@ -1493,18 +1666,33 @@ static int crypt_ctr_cipher(struct dm_target *ti,
cc->iv_gen_ops = &crypt_iv_null_ops;
else if (strcmp(ivmode, "lmk") == 0) {
cc->iv_gen_ops = &crypt_iv_lmk_ops;
- /* Version 2 and 3 is recognised according
+ /*
+ * Version 2 and 3 is recognised according
* to length of provided multi-key string.
* If present (version 3), last key is used as IV seed.
+ * All keys (including IV seed) are always the same size.
*/
- if (cc->key_size % cc->key_parts)
+ if (cc->key_size % cc->key_parts) {
cc->key_parts++;
+ cc->key_extra_size = cc->key_size / cc->key_parts;
+ }
+ } else if (strcmp(ivmode, "tcw") == 0) {
+ cc->iv_gen_ops = &crypt_iv_tcw_ops;
+ cc->key_parts += 2; /* IV + whitening */
+ cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
} else {
ret = -EINVAL;
ti->error = "Invalid IV mode";
goto bad;
}
+ /* Initialize and set key */
+ ret = crypt_set_key(cc, key);
+ if (ret < 0) {
+ ti->error = "Error decoding and setting key";
+ goto bad;
+ }
+
/* Allocate IV */
if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
@@ -1817,7 +2005,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 12, 1},
+ .version = {1, 13, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 496d5f3646a5..2f91d6d4a2cc 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -20,6 +20,7 @@
struct delay_c {
struct timer_list delay_timer;
struct mutex timer_lock;
+ struct workqueue_struct *kdelayd_wq;
struct work_struct flush_expired_bios;
struct list_head delayed_bios;
atomic_t may_delay;
@@ -45,14 +46,13 @@ struct dm_delay_info {
static DEFINE_MUTEX(delayed_bios_lock);
-static struct workqueue_struct *kdelayd_wq;
static struct kmem_cache *delayed_cache;
static void handle_delayed_timer(unsigned long data)
{
struct delay_c *dc = (struct delay_c *)data;
- queue_work(kdelayd_wq, &dc->flush_expired_bios);
+ queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
}
static void queue_timeout(struct delay_c *dc, unsigned long expires)
@@ -191,6 +191,12 @@ out:
goto bad_dev_write;
}
+ dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
+ if (!dc->kdelayd_wq) {
+ DMERR("Couldn't start kdelayd");
+ goto bad_queue;
+ }
+
setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
@@ -203,6 +209,8 @@ out:
ti->private = dc;
return 0;
+bad_queue:
+ mempool_destroy(dc->delayed_pool);
bad_dev_write:
if (dc->dev_write)
dm_put_device(ti, dc->dev_write);
@@ -217,7 +225,7 @@ static void delay_dtr(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
- flush_workqueue(kdelayd_wq);
+ destroy_workqueue(dc->kdelayd_wq);
dm_put_device(ti, dc->dev_read);
@@ -350,12 +358,6 @@ static int __init dm_delay_init(void)
{
int r = -ENOMEM;
- kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
- if (!kdelayd_wq) {
- DMERR("Couldn't start kdelayd");
- goto bad_queue;
- }
-
delayed_cache = KMEM_CACHE(dm_delay_info, 0);
if (!delayed_cache) {
DMERR("Couldn't create delayed bio cache.");
@@ -373,8 +375,6 @@ static int __init dm_delay_init(void)
bad_register:
kmem_cache_destroy(delayed_cache);
bad_memcache:
- destroy_workqueue(kdelayd_wq);
-bad_queue:
return r;
}
@@ -382,7 +382,6 @@ static void __exit dm_delay_exit(void)
{
dm_unregister_target(&delay_target);
kmem_cache_destroy(delayed_cache);
- destroy_workqueue(kdelayd_wq);
}
/* Module hooks */
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index afe08146f73e..51521429fb59 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -57,7 +57,7 @@ struct vers_iter {
static struct list_head _name_buckets[NUM_BUCKETS];
static struct list_head _uuid_buckets[NUM_BUCKETS];
-static void dm_hash_remove_all(int keep_open_devices);
+static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred);
/*
* Guards access to both hash tables.
@@ -86,7 +86,7 @@ static int dm_hash_init(void)
static void dm_hash_exit(void)
{
- dm_hash_remove_all(0);
+ dm_hash_remove_all(false, false, false);
}
/*-----------------------------------------------------------------
@@ -276,7 +276,7 @@ static struct dm_table *__hash_remove(struct hash_cell *hc)
return table;
}
-static void dm_hash_remove_all(int keep_open_devices)
+static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred)
{
int i, dev_skipped;
struct hash_cell *hc;
@@ -293,7 +293,8 @@ retry:
md = hc->md;
dm_get(md);
- if (keep_open_devices && dm_lock_for_deletion(md)) {
+ if (keep_open_devices &&
+ dm_lock_for_deletion(md, mark_deferred, only_deferred)) {
dm_put(md);
dev_skipped++;
continue;
@@ -450,6 +451,11 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
return md;
}
+void dm_deferred_remove(void)
+{
+ dm_hash_remove_all(true, false, true);
+}
+
/*-----------------------------------------------------------------
* Implementation of the ioctl commands
*---------------------------------------------------------------*/
@@ -461,7 +467,7 @@ typedef int (*ioctl_fn)(struct dm_ioctl *param, size_t param_size);
static int remove_all(struct dm_ioctl *param, size_t param_size)
{
- dm_hash_remove_all(1);
+ dm_hash_remove_all(true, !!(param->flags & DM_DEFERRED_REMOVE), false);
param->data_size = 0;
return 0;
}
@@ -683,6 +689,9 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
if (dm_suspended_md(md))
param->flags |= DM_SUSPEND_FLAG;
+ if (dm_test_deferred_remove_flag(md))
+ param->flags |= DM_DEFERRED_REMOVE;
+
param->dev = huge_encode_dev(disk_devt(disk));
/*
@@ -832,8 +841,13 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
/*
* Ensure the device is not open and nothing further can open it.
*/
- r = dm_lock_for_deletion(md);
+ r = dm_lock_for_deletion(md, !!(param->flags & DM_DEFERRED_REMOVE), false);
if (r) {
+ if (r == -EBUSY && param->flags & DM_DEFERRED_REMOVE) {
+ up_write(&_hash_lock);
+ dm_put(md);
+ return 0;
+ }
DMDEBUG_LIMIT("unable to remove open device %s", hc->name);
up_write(&_hash_lock);
dm_put(md);
@@ -848,6 +862,8 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
dm_table_destroy(t);
}
+ param->flags &= ~DM_DEFERRED_REMOVE;
+
if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
param->flags |= DM_UEVENT_GENERATED_FLAG;
@@ -1469,6 +1485,14 @@ static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
if (**argv != '@')
return 2; /* no '@' prefix, deliver to target */
+ if (!strcasecmp(argv[0], "@cancel_deferred_remove")) {
+ if (argc != 1) {
+ DMERR("Invalid arguments for @cancel_deferred_remove");
+ return -EINVAL;
+ }
+ return dm_cancel_deferred_remove(md);
+ }
+
r = dm_stats_message(md, argc, argv, result, maxlen);
if (r < 2)
return r;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index de570a558764..6eb9dc9ef8f3 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -87,6 +87,7 @@ struct multipath {
unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
+ unsigned pg_init_disabled:1; /* pg_init is not currently allowed */
unsigned pg_init_retries; /* Number of times to retry pg_init */
unsigned pg_init_count; /* Number of times pg_init called */
@@ -390,13 +391,16 @@ static int map_io(struct multipath *m, struct request *clone,
if (was_queued)
m->queue_size--;
- if ((pgpath && m->queue_io) ||
- (!pgpath && m->queue_if_no_path)) {
+ if (m->pg_init_required) {
+ if (!m->pg_init_in_progress)
+ queue_work(kmultipathd, &m->process_queued_ios);
+ r = DM_MAPIO_REQUEUE;
+ } else if ((pgpath && m->queue_io) ||
+ (!pgpath && m->queue_if_no_path)) {
/* Queue for the daemon to resubmit */
list_add_tail(&clone->queuelist, &m->queued_ios);
m->queue_size++;
- if ((m->pg_init_required && !m->pg_init_in_progress) ||
- !m->queue_io)
+ if (!m->queue_io)
queue_work(kmultipathd, &m->process_queued_ios);
pgpath = NULL;
r = DM_MAPIO_SUBMITTED;
@@ -497,7 +501,8 @@ static void process_queued_ios(struct work_struct *work)
(!pgpath && !m->queue_if_no_path))
must_queue = 0;
- if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
+ if (m->pg_init_required && !m->pg_init_in_progress && pgpath &&
+ !m->pg_init_disabled)
__pg_init_all_paths(m);
spin_unlock_irqrestore(&m->lock, flags);
@@ -942,10 +947,20 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
static void flush_multipath_work(struct multipath *m)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&m->lock, flags);
+ m->pg_init_disabled = 1;
+ spin_unlock_irqrestore(&m->lock, flags);
+
flush_workqueue(kmpath_handlerd);
multipath_wait_for_pg_init_completion(m);
flush_workqueue(kmultipathd);
flush_work(&m->trigger_event);
+
+ spin_lock_irqsave(&m->lock, flags);
+ m->pg_init_disabled = 0;
+ spin_unlock_irqrestore(&m->lock, flags);
}
static void multipath_dtr(struct dm_target *ti)
@@ -1164,7 +1179,7 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
spin_lock_irqsave(&m->lock, flags);
- if (m->pg_init_count <= m->pg_init_retries)
+ if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
m->pg_init_required = 1;
else
limit_reached = 1;
@@ -1665,6 +1680,11 @@ static int multipath_busy(struct dm_target *ti)
spin_lock_irqsave(&m->lock, flags);
+ /* pg_init in progress, requeue until done */
+ if (m->pg_init_in_progress) {
+ busy = 1;
+ goto out;
+ }
/* Guess which priority_group will be used at next mapping time */
if (unlikely(!m->current_pgpath && m->next_pg))
pg = m->next_pg;
@@ -1714,7 +1734,7 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 5, 1},
+ .version = {1, 6, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index aec57d76db5d..944690bafd93 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -66,6 +66,18 @@ struct dm_snapshot {
atomic_t pending_exceptions_count;
+ /* Protected by "lock" */
+ sector_t exception_start_sequence;
+
+ /* Protected by kcopyd single-threaded callback */
+ sector_t exception_complete_sequence;
+
+ /*
+ * A list of pending exceptions that completed out of order.
+ * Protected by kcopyd single-threaded callback.
+ */
+ struct list_head out_of_order_list;
+
mempool_t *pending_pool;
struct dm_exception_table pending;
@@ -173,6 +185,14 @@ struct dm_snap_pending_exception {
*/
int started;
+ /* There was copying error. */
+ int copy_error;
+
+ /* A sequence number, it is used for in-order completion. */
+ sector_t exception_sequence;
+
+ struct list_head out_of_order_entry;
+
/*
* For writing a complete chunk, bypassing the copy.
*/
@@ -1094,6 +1114,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->valid = 1;
s->active = 0;
atomic_set(&s->pending_exceptions_count, 0);
+ s->exception_start_sequence = 0;
+ s->exception_complete_sequence = 0;
+ INIT_LIST_HEAD(&s->out_of_order_list);
init_rwsem(&s->lock);
INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
@@ -1443,6 +1466,19 @@ static void commit_callback(void *context, int success)
pending_complete(pe, success);
}
+static void complete_exception(struct dm_snap_pending_exception *pe)
+{
+ struct dm_snapshot *s = pe->snap;
+
+ if (unlikely(pe->copy_error))
+ pending_complete(pe, 0);
+
+ else
+ /* Update the metadata if we are persistent */
+ s->store->type->commit_exception(s->store, &pe->e,
+ commit_callback, pe);
+}
+
/*
* Called when the copy I/O has finished. kcopyd actually runs
* this code so don't block.
@@ -1452,13 +1488,32 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
struct dm_snap_pending_exception *pe = context;
struct dm_snapshot *s = pe->snap;
- if (read_err || write_err)
- pending_complete(pe, 0);
+ pe->copy_error = read_err || write_err;
- else
- /* Update the metadata if we are persistent */
- s->store->type->commit_exception(s->store, &pe->e,
- commit_callback, pe);
+ if (pe->exception_sequence == s->exception_complete_sequence) {
+ s->exception_complete_sequence++;
+ complete_exception(pe);
+
+ while (!list_empty(&s->out_of_order_list)) {
+ pe = list_entry(s->out_of_order_list.next,
+ struct dm_snap_pending_exception, out_of_order_entry);
+ if (pe->exception_sequence != s->exception_complete_sequence)
+ break;
+ s->exception_complete_sequence++;
+ list_del(&pe->out_of_order_entry);
+ complete_exception(pe);
+ }
+ } else {
+ struct list_head *lh;
+ struct dm_snap_pending_exception *pe2;
+
+ list_for_each_prev(lh, &s->out_of_order_list) {
+ pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
+ if (pe2->exception_sequence < pe->exception_sequence)
+ break;
+ }
+ list_add(&pe->out_of_order_entry, lh);
+ }
}
/*
@@ -1553,6 +1608,8 @@ __find_pending_exception(struct dm_snapshot *s,
return NULL;
}
+ pe->exception_sequence = s->exception_start_sequence++;
+
dm_insert_exception(&s->pending, &pe->e);
return pe;
@@ -2192,7 +2249,7 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 11, 1},
+ .version = {1, 12, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 3d404c1371ed..28a90122a5a8 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -964,6 +964,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
int __init dm_statistics_init(void)
{
+ shared_memory_amount = 0;
dm_stat_need_rcu_barrier = 0;
return 0;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 8f8783533ac7..3ba6a3859ce3 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -200,6 +200,11 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
+ if (!num_targets) {
+ kfree(t);
+ return -ENOMEM;
+ }
+
if (alloc_targets(t, num_targets)) {
kfree(t);
return -ENOMEM;
@@ -545,14 +550,28 @@ static int adjoin(struct dm_table *table, struct dm_target *ti)
/*
* Used to dynamically allocate the arg array.
+ *
+ * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
+ * process messages even if some device is suspended. These messages have a
+ * small fixed number of arguments.
+ *
+ * On the other hand, dm-switch needs to process bulk data using messages and
+ * excessive use of GFP_NOIO could cause trouble.
*/
static char **realloc_argv(unsigned *array_size, char **old_argv)
{
char **argv;
unsigned new_size;
+ gfp_t gfp;
- new_size = *array_size ? *array_size * 2 : 64;
- argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
+ if (*array_size) {
+ new_size = *array_size * 2;
+ gfp = GFP_KERNEL;
+ } else {
+ new_size = 8;
+ gfp = GFP_NOIO;
+ }
+ argv = kmalloc(new_size * sizeof(*argv), gfp);
if (argv) {
memcpy(argv, old_argv, *array_size * sizeof(*argv));
*array_size = new_size;
@@ -1548,8 +1567,11 @@ int dm_table_resume_targets(struct dm_table *t)
continue;
r = ti->type->preresume(ti);
- if (r)
+ if (r) {
+ DMERR("%s: %s: preresume failed, error = %d",
+ dm_device_name(t->md), ti->type->name, r);
return r;
+ }
}
for (i = 0; i < t->num_targets; i++) {
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 60bce435f4fa..8a30ad54bd46 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1697,6 +1697,14 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
up_write(&pmd->root_lock);
}
+void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
+{
+ down_write(&pmd->root_lock);
+ pmd->read_only = false;
+ dm_bm_set_read_write(pmd->bm);
+ up_write(&pmd->root_lock);
+}
+
int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
dm_block_t threshold,
dm_sm_threshold_fn fn,
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 845ebbe589a9..7bcc0e1d6238 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -193,6 +193,7 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_siz
* that nothing is changing.
*/
void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd);
+void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd);
int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
dm_block_t threshold,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 2c0cf511ec23..ee29037ffc2e 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -640,7 +640,9 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
*/
r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
if (r) {
- DMERR_LIMIT("dm_thin_insert_block() failed");
+ DMERR_LIMIT("%s: dm_thin_insert_block() failed: error = %d",
+ dm_device_name(pool->pool_md), r);
+ set_pool_mode(pool, PM_READ_ONLY);
cell_error(pool, m->cell);
goto out;
}
@@ -881,32 +883,23 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
}
}
-static int commit(struct pool *pool)
-{
- int r;
-
- r = dm_pool_commit_metadata(pool->pmd);
- if (r)
- DMERR_LIMIT("%s: commit failed: error = %d",
- dm_device_name(pool->pool_md), r);
-
- return r;
-}
-
/*
* A non-zero return indicates read_only or fail_io mode.
* Many callers don't care about the return value.
*/
-static int commit_or_fallback(struct pool *pool)
+static int commit(struct pool *pool)
{
int r;
if (get_pool_mode(pool) != PM_WRITE)
return -EINVAL;
- r = commit(pool);
- if (r)
+ r = dm_pool_commit_metadata(pool->pmd);
+ if (r) {
+ DMERR_LIMIT("%s: dm_pool_commit_metadata failed: error = %d",
+ dm_device_name(pool->pool_md), r);
set_pool_mode(pool, PM_READ_ONLY);
+ }
return r;
}
@@ -943,7 +936,9 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
* Try to commit to see if that will free up some
* more space.
*/
- (void) commit_or_fallback(pool);
+ r = commit(pool);
+ if (r)
+ return r;
r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
if (r)
@@ -957,7 +952,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
* table reload).
*/
if (!free_blocks) {
- DMWARN("%s: no free space available.",
+ DMWARN("%s: no free data space available.",
dm_device_name(pool->pool_md));
spin_lock_irqsave(&pool->lock, flags);
pool->no_free_space = 1;
@@ -967,8 +962,16 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
}
r = dm_pool_alloc_data_block(pool->pmd, result);
- if (r)
+ if (r) {
+ if (r == -ENOSPC &&
+ !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
+ !free_blocks) {
+ DMWARN("%s: no free metadata space available.",
+ dm_device_name(pool->pool_md));
+ set_pool_mode(pool, PM_READ_ONLY);
+ }
return r;
+ }
return 0;
}
@@ -1349,7 +1352,7 @@ static void process_deferred_bios(struct pool *pool)
if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
return;
- if (commit_or_fallback(pool)) {
+ if (commit(pool)) {
while ((bio = bio_list_pop(&bios)))
bio_io_error(bio);
return;
@@ -1397,6 +1400,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
case PM_FAIL:
DMERR("%s: switching pool to failure mode",
dm_device_name(pool->pool_md));
+ dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_fail;
pool->process_discard = process_bio_fail;
pool->process_prepared_mapping = process_prepared_mapping_fail;
@@ -1421,6 +1425,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
break;
case PM_WRITE:
+ dm_pool_metadata_read_write(pool->pmd);
pool->process_bio = process_bio;
pool->process_discard = process_discard;
pool->process_prepared_mapping = process_prepared_mapping;
@@ -1637,12 +1642,19 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
struct pool_c *pt = ti->private;
/*
- * We want to make sure that degraded pools are never upgraded.
+ * We want to make sure that a pool in PM_FAIL mode is never upgraded.
*/
enum pool_mode old_mode = pool->pf.mode;
enum pool_mode new_mode = pt->adjusted_pf.mode;
- if (old_mode > new_mode)
+ /*
+ * If we were in PM_FAIL mode, rollback of metadata failed. We're
+ * not going to recover without a thin_repair. So we never let the
+ * pool move out of the old mode. On the other hand a PM_READ_ONLY
+ * may have been due to a lack of metadata or data space, and may
+ * now work (ie. if the underlying devices have been resized).
+ */
+ if (old_mode == PM_FAIL)
new_mode = old_mode;
pool->ti = ti;
@@ -2266,7 +2278,7 @@ static int pool_preresume(struct dm_target *ti)
return r;
if (need_commit1 || need_commit2)
- (void) commit_or_fallback(pool);
+ (void) commit(pool);
return 0;
}
@@ -2293,7 +2305,7 @@ static void pool_postsuspend(struct dm_target *ti)
cancel_delayed_work(&pool->waker);
flush_workqueue(pool->wq);
- (void) commit_or_fallback(pool);
+ (void) commit(pool);
}
static int check_arg_count(unsigned argc, unsigned args_required)
@@ -2427,7 +2439,7 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
if (r)
return r;
- (void) commit_or_fallback(pool);
+ (void) commit(pool);
r = dm_pool_reserve_metadata_snap(pool->pmd);
if (r)
@@ -2489,7 +2501,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
if (!r)
- (void) commit_or_fallback(pool);
+ (void) commit(pool);
return r;
}
@@ -2544,7 +2556,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
/* Commit to ensure statistics aren't out-of-date */
if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
- (void) commit_or_fallback(pool);
+ (void) commit(pool);
r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
if (r) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b3e26c7d1417..0704c523a76b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -49,6 +49,11 @@ static unsigned int _major = 0;
static DEFINE_IDR(_minor_idr);
static DEFINE_SPINLOCK(_minor_lock);
+
+static void do_deferred_remove(struct work_struct *w);
+
+static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
+
/*
* For bio-based dm.
* One of these is allocated per bio.
@@ -116,6 +121,7 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
#define DMF_DELETING 4
#define DMF_NOFLUSH_SUSPENDING 5
#define DMF_MERGE_IS_OPTIONAL 6
+#define DMF_DEFERRED_REMOVE 7
/*
* A dummy definition to make RCU happy.
@@ -299,6 +305,8 @@ out_free_io_cache:
static void local_exit(void)
{
+ flush_scheduled_work();
+
kmem_cache_destroy(_rq_tio_cache);
kmem_cache_destroy(_io_cache);
unregister_blkdev(_major, _name);
@@ -404,7 +412,10 @@ static void dm_blk_close(struct gendisk *disk, fmode_t mode)
spin_lock(&_minor_lock);
- atomic_dec(&md->open_count);
+ if (atomic_dec_and_test(&md->open_count) &&
+ (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
+ schedule_work(&deferred_remove_work);
+
dm_put(md);
spin_unlock(&_minor_lock);
@@ -418,14 +429,18 @@ int dm_open_count(struct mapped_device *md)
/*
* Guarantees nothing is using the device before it's deleted.
*/
-int dm_lock_for_deletion(struct mapped_device *md)
+int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
{
int r = 0;
spin_lock(&_minor_lock);
- if (dm_open_count(md))
+ if (dm_open_count(md)) {
r = -EBUSY;
+ if (mark_deferred)
+ set_bit(DMF_DEFERRED_REMOVE, &md->flags);
+ } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
+ r = -EEXIST;
else
set_bit(DMF_DELETING, &md->flags);
@@ -434,6 +449,27 @@ int dm_lock_for_deletion(struct mapped_device *md)
return r;
}
+int dm_cancel_deferred_remove(struct mapped_device *md)
+{
+ int r = 0;
+
+ spin_lock(&_minor_lock);
+
+ if (test_bit(DMF_DELETING, &md->flags))
+ r = -EBUSY;
+ else
+ clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
+
+ spin_unlock(&_minor_lock);
+
+ return r;
+}
+
+static void do_deferred_remove(struct work_struct *w)
+{
+ dm_deferred_remove();
+}
+
sector_t dm_get_size(struct mapped_device *md)
{
return get_capacity(md->disk);
@@ -2894,6 +2930,11 @@ int dm_suspended_md(struct mapped_device *md)
return test_bit(DMF_SUSPENDED, &md->flags);
}
+int dm_test_deferred_remove_flag(struct mapped_device *md)
+{
+ return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
+}
+
int dm_suspended(struct dm_target *ti)
{
return dm_suspended_md(dm_table_get_md(ti->table));
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 1d1ad7b7e527..c57ba550f69e 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -129,6 +129,16 @@ int dm_deleting_md(struct mapped_device *md);
int dm_suspended_md(struct mapped_device *md);
/*
+ * Test if the device is scheduled for deferred remove.
+ */
+int dm_test_deferred_remove_flag(struct mapped_device *md);
+
+/*
+ * Try to remove devices marked for deferred removal.
+ */
+void dm_deferred_remove(void);
+
+/*
* The device-mapper can be driven through one of two interfaces;
* ioctl or filesystem, depending which patch you have applied.
*/
@@ -158,7 +168,8 @@ void dm_stripe_exit(void);
void dm_destroy(struct mapped_device *md);
void dm_destroy_immediate(struct mapped_device *md);
int dm_open_count(struct mapped_device *md);
-int dm_lock_for_deletion(struct mapped_device *md);
+int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
+int dm_cancel_deferred_remove(struct mapped_device *md);
int dm_request_based(struct mapped_device *md);
sector_t dm_get_size(struct mapped_device *md);
struct dm_stats *dm_get_stats(struct mapped_device *md);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 561a65f82e26..369d919bdafe 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -112,7 +112,7 @@ static inline int speed_max(struct mddev *mddev)
static struct ctl_table_header *raid_table_header;
-static ctl_table raid_table[] = {
+static struct ctl_table raid_table[] = {
{
.procname = "speed_limit_min",
.data = &sysctl_speed_limit_min,
@@ -130,7 +130,7 @@ static ctl_table raid_table[] = {
{ }
};
-static ctl_table raid_dir_table[] = {
+static struct ctl_table raid_dir_table[] = {
{
.procname = "raid",
.maxlen = 0,
@@ -140,7 +140,7 @@ static ctl_table raid_dir_table[] = {
{ }
};
-static ctl_table raid_root_table[] = {
+static struct ctl_table raid_root_table[] = {
{
.procname = "dev",
.maxlen = 0,
@@ -183,46 +183,6 @@ struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
}
EXPORT_SYMBOL_GPL(bio_clone_mddev);
-void md_trim_bio(struct bio *bio, int offset, int size)
-{
- /* 'bio' is a cloned bio which we need to trim to match
- * the given offset and size.
- * This requires adjusting bi_sector, bi_size, and bi_io_vec
- */
- int i;
- struct bio_vec *bvec;
- int sofar = 0;
-
- size <<= 9;
- if (offset == 0 && size == bio->bi_size)
- return;
-
- clear_bit(BIO_SEG_VALID, &bio->bi_flags);
-
- bio_advance(bio, offset << 9);
-
- bio->bi_size = size;
-
- /* avoid any complications with bi_idx being non-zero*/
- if (bio->bi_idx) {
- memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
- (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
- bio->bi_vcnt -= bio->bi_idx;
- bio->bi_idx = 0;
- }
- /* Make sure vcnt and last bv are not too big */
- bio_for_each_segment(bvec, bio, i) {
- if (sofar + bvec->bv_len > size)
- bvec->bv_len = size - sofar;
- if (bvec->bv_len == 0) {
- bio->bi_vcnt = i;
- break;
- }
- sofar += bvec->bv_len;
- }
-}
-EXPORT_SYMBOL_GPL(md_trim_bio);
-
/*
* We have a system wide 'event count' that is incremented
* on any 'interesting' event, and readers of /proc/mdstat
@@ -602,11 +562,19 @@ static struct mddev * mddev_find(dev_t unit)
goto retry;
}
-static inline int mddev_lock(struct mddev * mddev)
+static inline int __must_check mddev_lock(struct mddev * mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
}
+/* Sometimes we need to take the lock in a situation where
+ * failure due to interrupts is not acceptable.
+ */
+static inline void mddev_lock_nointr(struct mddev * mddev)
+{
+ mutex_lock(&mddev->reconfig_mutex);
+}
+
static inline int mddev_is_locked(struct mddev *mddev)
{
return mutex_is_locked(&mddev->reconfig_mutex);
@@ -808,16 +776,10 @@ void md_super_wait(struct mddev *mddev)
finish_wait(&mddev->sb_wait, &wq);
}
-static void bi_complete(struct bio *bio, int error)
-{
- complete((struct completion*)bio->bi_private);
-}
-
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int rw, bool metadata_op)
{
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
- struct completion event;
int ret;
rw |= REQ_SYNC;
@@ -833,11 +795,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
else
bio->bi_sector = sector + rdev->data_offset;
bio_add_page(bio, page, size, 0);
- init_completion(&event);
- bio->bi_private = &event;
- bio->bi_end_io = bi_complete;
- submit_bio(rw, bio);
- wait_for_completion(&event);
+ submit_bio_wait(rw, bio);
ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
bio_put(bio);
@@ -1119,6 +1077,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
+ clear_bit(Bitmap_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
if (mddev->raid_disks == 0) {
@@ -1197,6 +1156,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
+ if (ev1 < mddev->events)
+ set_bit(Bitmap_sync, &rdev->flags);
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
@@ -1605,6 +1566,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
+ clear_bit(Bitmap_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
if (mddev->raid_disks == 0) {
@@ -1687,6 +1649,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
+ if (ev1 < mddev->events)
+ set_bit(Bitmap_sync, &rdev->flags);
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
@@ -2830,6 +2794,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
else
rdev->saved_raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
+ clear_bit(Bitmap_sync, &rdev->flags);
err = rdev->mddev->pers->
hot_add_disk(rdev->mddev, rdev);
if (err) {
@@ -3018,7 +2983,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
for_each_mddev(mddev, tmp) {
struct md_rdev *rdev2;
- mddev_lock(mddev);
+ mddev_lock_nointr(mddev);
rdev_for_each(rdev2, mddev)
if (rdev->bdev == rdev2->bdev &&
rdev != rdev2 &&
@@ -3034,7 +2999,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
break;
}
}
- mddev_lock(my_mddev);
+ mddev_lock_nointr(my_mddev);
if (overlap) {
/* Someone else could have slipped in a size
* change here, but doing so is just silly.
@@ -3555,7 +3520,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
- mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
+ mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
}
if (mddev->pers->sync_request != NULL &&
pers->sync_request == NULL) {
@@ -3620,6 +3585,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->in_sync = 1;
del_timer_sync(&mddev->safemode_timer);
}
+ blk_set_stacking_limits(&mddev->queue->limits);
pers->run(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev_resume(mddev);
@@ -5298,7 +5264,7 @@ static void __md_stop_writes(struct mddev *mddev)
void md_stop_writes(struct mddev *mddev)
{
- mddev_lock(mddev);
+ mddev_lock_nointr(mddev);
__md_stop_writes(mddev);
mddev_unlock(mddev);
}
@@ -5331,20 +5297,35 @@ EXPORT_SYMBOL_GPL(md_stop);
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
{
int err = 0;
+ int did_freeze = 0;
+
+ if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
+ did_freeze = 1;
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
+ if (mddev->sync_thread) {
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ /* Thread might be blocked waiting for metadata update
+ * which will now never happen */
+ wake_up_process(mddev->sync_thread->tsk);
+ }
+ mddev_unlock(mddev);
+ wait_event(resync_wait, mddev->sync_thread == NULL);
+ mddev_lock_nointr(mddev);
+
mutex_lock(&mddev->open_mutex);
- if (atomic_read(&mddev->openers) > !!bdev) {
+ if (atomic_read(&mddev->openers) > !!bdev ||
+ mddev->sync_thread ||
+ (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
+ if (did_freeze) {
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
err = -EBUSY;
goto out;
}
- if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
- /* Someone opened the device since we flushed it
- * so page cache could be dirty and it is too late
- * to flush. So abort
- */
- mutex_unlock(&mddev->open_mutex);
- return -EBUSY;
- }
if (mddev->pers) {
__md_stop_writes(mddev);
@@ -5355,7 +5336,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
set_disk_ro(mddev->gendisk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_state);
- err = 0;
+ err = 0;
}
out:
mutex_unlock(&mddev->open_mutex);
@@ -5371,20 +5352,34 @@ static int do_md_stop(struct mddev * mddev, int mode,
{
struct gendisk *disk = mddev->gendisk;
struct md_rdev *rdev;
+ int did_freeze = 0;
+
+ if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
+ did_freeze = 1;
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
+ if (mddev->sync_thread) {
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ /* Thread might be blocked waiting for metadata update
+ * which will now never happen */
+ wake_up_process(mddev->sync_thread->tsk);
+ }
+ mddev_unlock(mddev);
+ wait_event(resync_wait, mddev->sync_thread == NULL);
+ mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > !!bdev ||
- mddev->sysfs_active) {
+ mddev->sysfs_active ||
+ mddev->sync_thread ||
+ (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex);
- return -EBUSY;
- }
- if (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags)) {
- /* Someone opened the device since we flushed it
- * so page cache could be dirty and it is too late
- * to flush. So abort
- */
- mutex_unlock(&mddev->open_mutex);
+ if (did_freeze) {
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
return -EBUSY;
}
if (mddev->pers) {
@@ -5772,6 +5767,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
info->raid_disk < mddev->raid_disks) {
rdev->raid_disk = info->raid_disk;
set_bit(In_sync, &rdev->flags);
+ clear_bit(Bitmap_sync, &rdev->flags);
} else
rdev->raid_disk = -1;
} else
@@ -6591,7 +6587,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
- mddev_lock(mddev);
+ mddev_lock_nointr(mddev);
}
} else {
err = -EROFS;
@@ -7401,9 +7397,6 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync = 2;
try_again:
- if (kthread_should_stop())
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto skip;
for_each_mddev(mddev2, tmp) {
@@ -7428,7 +7421,7 @@ void md_do_sync(struct md_thread *thread)
* be caught by 'softlockup'
*/
prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
- if (!kthread_should_stop() &&
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
mddev2->curr_resync >= mddev->curr_resync) {
printk(KERN_INFO "md: delaying %s of %s"
" until %s has finished (they"
@@ -7504,7 +7497,7 @@ void md_do_sync(struct md_thread *thread)
last_check = 0;
if (j>2) {
- printk(KERN_INFO
+ printk(KERN_INFO
"md: resuming %s of %s from checkpoint.\n",
desc, mdname(mddev));
mddev->curr_resync = j;
@@ -7541,7 +7534,8 @@ void md_do_sync(struct md_thread *thread)
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
- while (j >= mddev->resync_max && !kthread_should_stop()) {
+ while (j >= mddev->resync_max &&
+ !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* As this condition is controlled by user-space,
* we can block indefinitely, so use '_interruptible'
* to avoid triggering warnings.
@@ -7549,17 +7543,18 @@ void md_do_sync(struct md_thread *thread)
flush_signals(current); /* just in case */
wait_event_interruptible(mddev->recovery_wait,
mddev->resync_max > j
- || kthread_should_stop());
+ || test_bit(MD_RECOVERY_INTR,
+ &mddev->recovery));
}
- if (kthread_should_stop())
- goto interrupted;
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ break;
sectors = mddev->pers->sync_request(mddev, j, &skipped,
currspeed < speed_min(mddev));
if (sectors == 0) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- goto out;
+ break;
}
if (!skipped) { /* actual IO requested */
@@ -7596,10 +7591,8 @@ void md_do_sync(struct md_thread *thread)
last_mark = next;
}
-
- if (kthread_should_stop())
- goto interrupted;
-
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ break;
/*
* this loop exits only if either when we are slower than
@@ -7622,11 +7615,12 @@ void md_do_sync(struct md_thread *thread)
}
}
}
- printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
+ printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
+ test_bit(MD_RECOVERY_INTR, &mddev->recovery)
+ ? "interrupted" : "done");
/*
* this also signals 'finished resyncing' to md_stop
*/
- out:
blk_finish_plug(&plug);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
@@ -7680,16 +7674,6 @@ void md_do_sync(struct md_thread *thread)
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
md_wakeup_thread(mddev->thread);
return;
-
- interrupted:
- /*
- * got a signal, exit.
- */
- printk(KERN_INFO
- "md: md_do_sync() got signal ... exiting\n");
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- goto out;
-
}
EXPORT_SYMBOL_GPL(md_do_sync);
@@ -7730,7 +7714,8 @@ static int remove_and_add_spares(struct mddev *mddev,
if (test_bit(Faulty, &rdev->flags))
continue;
if (mddev->ro &&
- rdev->saved_raid_disk < 0)
+ ! (rdev->saved_raid_disk >= 0 &&
+ !test_bit(Bitmap_sync, &rdev->flags)))
continue;
rdev->recovery_offset = 0;
@@ -7791,7 +7776,7 @@ void md_check_recovery(struct mddev *mddev)
if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return;
if ( ! (
- (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
+ (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
(mddev->external == 0 && mddev->safemode == 1) ||
@@ -7811,9 +7796,12 @@ void md_check_recovery(struct mddev *mddev)
* As we only add devices that are already in-sync,
* we can activate the spares immediately.
*/
- clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
remove_and_add_spares(mddev, NULL);
- mddev->pers->spare_active(mddev);
+ /* There is no thread, but we need to call
+ * ->spare_active and clear saved_raid_disk
+ */
+ md_reap_sync_thread(mddev);
+ clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}
@@ -7934,6 +7922,7 @@ void md_reap_sync_thread(struct mddev *mddev)
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
+ wake_up(&resync_wait);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 608050c43f17..0095ec84ffc7 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -129,6 +129,9 @@ struct md_rdev {
enum flag_bits {
Faulty, /* device is known to have a fault */
In_sync, /* device is in_sync with rest of array */
+ Bitmap_sync, /* ..actually, not quite In_sync. Need a
+ * bitmap-based recovery to get fully in sync
+ */
Unmerged, /* device is being added to array and should
* be considerred for bvec_merge_fn but not
* yet for actual IO
@@ -501,7 +504,7 @@ extern struct attribute_group md_bitmap_group;
static inline struct sysfs_dirent *sysfs_get_dirent_safe(struct sysfs_dirent *sd, char *name)
{
if (sd)
- return sysfs_get_dirent(sd, NULL, name);
+ return sysfs_get_dirent(sd, name);
return sd;
}
static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd)
@@ -617,7 +620,6 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
struct mddev *mddev);
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev);
-extern void md_trim_bio(struct bio *bio, int offset, int size);
extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
static inline int mddev_check_plugged(struct mddev *mddev)
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index 172147eb1d40..1d75b1dc1e2e 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -317,8 +317,16 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
* The shadow op will often be a noop. Only insert if it really
* copied data.
*/
- if (dm_block_location(*block) != b)
+ if (dm_block_location(*block) != b) {
+ /*
+ * dm_tm_shadow_block will have already decremented the old
+ * block, but it is still referenced by the btree. We
+ * increment to stop the insert decrementing it below zero
+ * when overwriting the old value.
+ */
+ dm_tm_inc(info->btree_info.tm, b);
r = insert_ablock(info, index, *block, root);
+ }
return r;
}
@@ -509,15 +517,18 @@ static int grow_add_tail_block(struct resize *resize)
static int grow_needs_more_blocks(struct resize *resize)
{
int r;
+ unsigned old_nr_blocks = resize->old_nr_full_blocks;
if (resize->old_nr_entries_in_last_block > 0) {
+ old_nr_blocks++;
+
r = grow_extend_tail_block(resize, resize->max_entries);
if (r)
return r;
}
r = insert_full_ablocks(resize->info, resize->size_of_block,
- resize->old_nr_full_blocks,
+ old_nr_blocks,
resize->new_nr_full_blocks,
resize->max_entries, resize->value,
&resize->root);
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index a7e8bf296388..064a3c271baa 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -626,6 +626,12 @@ void dm_bm_set_read_only(struct dm_block_manager *bm)
}
EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
+void dm_bm_set_read_write(struct dm_block_manager *bm)
+{
+ bm->read_only = false;
+}
+EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
+
u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor)
{
return crc32c(~(u32) 0, data, len) ^ init_xor;
diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
index 9a82083a66b6..13cd58e1fe69 100644
--- a/drivers/md/persistent-data/dm-block-manager.h
+++ b/drivers/md/persistent-data/dm-block-manager.h
@@ -108,9 +108,9 @@ int dm_bm_unlock(struct dm_block *b);
int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
struct dm_block *superblock);
- /*
- * Request data be prefetched into the cache.
- */
+/*
+ * Request data is prefetched into the cache.
+ */
void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
/*
@@ -125,6 +125,7 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
* be returned if you do.
*/
void dm_bm_set_read_only(struct dm_block_manager *bm);
+void dm_bm_set_read_write(struct dm_block_manager *bm);
u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor);
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 6058569fe86c..466a60bbd716 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -381,7 +381,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
}
static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
- uint32_t (*mutator)(void *context, uint32_t old),
+ int (*mutator)(void *context, uint32_t old, uint32_t *new),
void *context, enum allocation_event *ev)
{
int r;
@@ -410,11 +410,17 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
if (old > 2) {
r = sm_ll_lookup_big_ref_count(ll, b, &old);
- if (r < 0)
+ if (r < 0) {
+ dm_tm_unlock(ll->tm, nb);
return r;
+ }
}
- ref_count = mutator(context, old);
+ r = mutator(context, old, &ref_count);
+ if (r) {
+ dm_tm_unlock(ll->tm, nb);
+ return r;
+ }
if (ref_count <= 2) {
sm_set_bitmap(bm_le, bit, ref_count);
@@ -465,9 +471,10 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
return ll->save_ie(ll, index, &ie_disk);
}
-static uint32_t set_ref_count(void *context, uint32_t old)
+static int set_ref_count(void *context, uint32_t old, uint32_t *new)
{
- return *((uint32_t *) context);
+ *new = *((uint32_t *) context);
+ return 0;
}
int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
@@ -476,9 +483,10 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev);
}
-static uint32_t inc_ref_count(void *context, uint32_t old)
+static int inc_ref_count(void *context, uint32_t old, uint32_t *new)
{
- return old + 1;
+ *new = old + 1;
+ return 0;
}
int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
@@ -486,9 +494,15 @@ int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev);
}
-static uint32_t dec_ref_count(void *context, uint32_t old)
+static int dec_ref_count(void *context, uint32_t old, uint32_t *new)
{
- return old - 1;
+ if (!old) {
+ DMERR_LIMIT("unable to decrement a reference count below 0");
+ return -EINVAL;
+ }
+
+ *new = old - 1;
+ return 0;
}
int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index e735a6d5a793..cfbf9617e465 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -140,26 +140,10 @@ static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)
static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
{
- int r;
- uint32_t old_count;
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
- r = sm_ll_dec(&smd->ll, b, &ev);
- if (!r && (ev == SM_FREE)) {
- /*
- * It's only free if it's also free in the last
- * transaction.
- */
- r = sm_ll_lookup(&smd->old_ll, b, &old_count);
- if (r)
- return r;
-
- if (!old_count)
- smd->nr_allocated_this_transaction--;
- }
-
- return r;
+ return sm_ll_dec(&smd->ll, b, &ev);
}
static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 1c959684caef..58fc1eef7499 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -384,12 +384,16 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
int r = sm_metadata_new_block_(sm, b);
- if (r)
+ if (r) {
DMERR("unable to allocate new metadata block");
+ return r;
+ }
r = sm_metadata_get_nr_free(sm, &count);
- if (r)
+ if (r) {
DMERR("couldn't get free block count");
+ return r;
+ }
check_threshold(&smm->threshold, count);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index aacf6bf352d8..a49cfcc7a343 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -66,7 +66,8 @@
*/
static int max_queued_requests = 1024;
-static void allow_barrier(struct r1conf *conf);
+static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
+ sector_t bi_sector);
static void lower_barrier(struct r1conf *conf);
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
@@ -84,10 +85,12 @@ static void r1bio_pool_free(void *r1_bio, void *data)
}
#define RESYNC_BLOCK_SIZE (64*1024)
-//#define RESYNC_BLOCK_SIZE PAGE_SIZE
+#define RESYNC_DEPTH 32
#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
-#define RESYNC_WINDOW (2048*1024)
+#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
+#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
+#define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
{
@@ -225,6 +228,8 @@ static void call_bio_endio(struct r1bio *r1_bio)
struct bio *bio = r1_bio->master_bio;
int done;
struct r1conf *conf = r1_bio->mddev->private;
+ sector_t start_next_window = r1_bio->start_next_window;
+ sector_t bi_sector = bio->bi_sector;
if (bio->bi_phys_segments) {
unsigned long flags;
@@ -232,6 +237,11 @@ static void call_bio_endio(struct r1bio *r1_bio)
bio->bi_phys_segments--;
done = (bio->bi_phys_segments == 0);
spin_unlock_irqrestore(&conf->device_lock, flags);
+ /*
+ * make_request() might be waiting for
+ * bi_phys_segments to decrease
+ */
+ wake_up(&conf->wait_barrier);
} else
done = 1;
@@ -243,7 +253,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
* Wake up any possible resync thread that waits for the device
* to go idle.
*/
- allow_barrier(conf);
+ allow_barrier(conf, start_next_window, bi_sector);
}
}
@@ -814,8 +824,6 @@ static void flush_pending_writes(struct r1conf *conf)
* there is no normal IO happeing. It must arrange to call
* lower_barrier when the particular background IO completes.
*/
-#define RESYNC_DEPTH 32
-
static void raise_barrier(struct r1conf *conf)
{
spin_lock_irq(&conf->resync_lock);
@@ -827,9 +835,19 @@ static void raise_barrier(struct r1conf *conf)
/* block any new IO from starting */
conf->barrier++;
- /* Now wait for all pending IO to complete */
+ /* For these conditions we must wait:
+ * A: while the array is in frozen state
+ * B: while barrier >= RESYNC_DEPTH, meaning resync reach
+ * the max count which allowed.
+ * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
+ * next resync will reach to the window which normal bios are
+ * handling.
+ */
wait_event_lock_irq(conf->wait_barrier,
- !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
+ !conf->array_frozen &&
+ conf->barrier < RESYNC_DEPTH &&
+ (conf->start_next_window >=
+ conf->next_resync + RESYNC_SECTORS),
conf->resync_lock);
spin_unlock_irq(&conf->resync_lock);
@@ -845,10 +863,33 @@ static void lower_barrier(struct r1conf *conf)
wake_up(&conf->wait_barrier);
}
-static void wait_barrier(struct r1conf *conf)
+static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
{
+ bool wait = false;
+
+ if (conf->array_frozen || !bio)
+ wait = true;
+ else if (conf->barrier && bio_data_dir(bio) == WRITE) {
+ if (conf->next_resync < RESYNC_WINDOW_SECTORS)
+ wait = true;
+ else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
+ >= bio_end_sector(bio)) ||
+ (conf->next_resync + NEXT_NORMALIO_DISTANCE
+ <= bio->bi_sector))
+ wait = false;
+ else
+ wait = true;
+ }
+
+ return wait;
+}
+
+static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
+{
+ sector_t sector = 0;
+
spin_lock_irq(&conf->resync_lock);
- if (conf->barrier) {
+ if (need_to_wait_for_sync(conf, bio)) {
conf->nr_waiting++;
/* Wait for the barrier to drop.
* However if there are already pending
@@ -860,22 +901,66 @@ static void wait_barrier(struct r1conf *conf)
* count down.
*/
wait_event_lock_irq(conf->wait_barrier,
- !conf->barrier ||
- (conf->nr_pending &&
+ !conf->array_frozen &&
+ (!conf->barrier ||
+ ((conf->start_next_window <
+ conf->next_resync + RESYNC_SECTORS) &&
current->bio_list &&
- !bio_list_empty(current->bio_list)),
+ !bio_list_empty(current->bio_list))),
conf->resync_lock);
conf->nr_waiting--;
}
+
+ if (bio && bio_data_dir(bio) == WRITE) {
+ if (conf->next_resync + NEXT_NORMALIO_DISTANCE
+ <= bio->bi_sector) {
+ if (conf->start_next_window == MaxSector)
+ conf->start_next_window =
+ conf->next_resync +
+ NEXT_NORMALIO_DISTANCE;
+
+ if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
+ <= bio->bi_sector)
+ conf->next_window_requests++;
+ else
+ conf->current_window_requests++;
+ sector = conf->start_next_window;
+ }
+ }
+
conf->nr_pending++;
spin_unlock_irq(&conf->resync_lock);
+ return sector;
}
-static void allow_barrier(struct r1conf *conf)
+static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
+ sector_t bi_sector)
{
unsigned long flags;
+
spin_lock_irqsave(&conf->resync_lock, flags);
conf->nr_pending--;
+ if (start_next_window) {
+ if (start_next_window == conf->start_next_window) {
+ if (conf->start_next_window + NEXT_NORMALIO_DISTANCE
+ <= bi_sector)
+ conf->next_window_requests--;
+ else
+ conf->current_window_requests--;
+ } else
+ conf->current_window_requests--;
+
+ if (!conf->current_window_requests) {
+ if (conf->next_window_requests) {
+ conf->current_window_requests =
+ conf->next_window_requests;
+ conf->next_window_requests = 0;
+ conf->start_next_window +=
+ NEXT_NORMALIO_DISTANCE;
+ } else
+ conf->start_next_window = MaxSector;
+ }
+ }
spin_unlock_irqrestore(&conf->resync_lock, flags);
wake_up(&conf->wait_barrier);
}
@@ -884,8 +969,7 @@ static void freeze_array(struct r1conf *conf, int extra)
{
/* stop syncio and normal IO and wait for everything to
* go quite.
- * We increment barrier and nr_waiting, and then
- * wait until nr_pending match nr_queued+extra
+ * We wait until nr_pending match nr_queued+extra
* This is called in the context of one normal IO request
* that has failed. Thus any sync request that might be pending
* will be blocked by nr_pending, and we need to wait for
@@ -895,8 +979,7 @@ static void freeze_array(struct r1conf *conf, int extra)
* we continue.
*/
spin_lock_irq(&conf->resync_lock);
- conf->barrier++;
- conf->nr_waiting++;
+ conf->array_frozen = 1;
wait_event_lock_irq_cmd(conf->wait_barrier,
conf->nr_pending == conf->nr_queued+extra,
conf->resync_lock,
@@ -907,8 +990,7 @@ static void unfreeze_array(struct r1conf *conf)
{
/* reverse the effect of the freeze */
spin_lock_irq(&conf->resync_lock);
- conf->barrier--;
- conf->nr_waiting--;
+ conf->array_frozen = 0;
wake_up(&conf->wait_barrier);
spin_unlock_irq(&conf->resync_lock);
}
@@ -1013,6 +1095,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
int first_clone;
int sectors_handled;
int max_sectors;
+ sector_t start_next_window;
/*
* Register the new request and wait if the reconstruction
@@ -1042,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
finish_wait(&conf->wait_barrier, &w);
}
- wait_barrier(conf);
+ start_next_window = wait_barrier(conf, bio);
bitmap = mddev->bitmap;
@@ -1097,8 +1180,8 @@ read_again:
r1_bio->read_disk = rdisk;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector,
- max_sectors);
+ bio_trim(read_bio, r1_bio->sector - bio->bi_sector,
+ max_sectors);
r1_bio->bios[rdisk] = read_bio;
@@ -1163,6 +1246,7 @@ read_again:
disks = conf->raid_disks * 2;
retry_write:
+ r1_bio->start_next_window = start_next_window;
blocked_rdev = NULL;
rcu_read_lock();
max_sectors = r1_bio->sectors;
@@ -1231,14 +1315,24 @@ read_again:
if (unlikely(blocked_rdev)) {
/* Wait for this device to become unblocked */
int j;
+ sector_t old = start_next_window;
for (j = 0; j < i; j++)
if (r1_bio->bios[j])
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
r1_bio->state = 0;
- allow_barrier(conf);
+ allow_barrier(conf, start_next_window, bio->bi_sector);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
- wait_barrier(conf);
+ start_next_window = wait_barrier(conf, bio);
+ /*
+ * We must make sure the multi r1bios of bio have
+ * the same value of bi_phys_segments
+ */
+ if (bio->bi_phys_segments && old &&
+ old != start_next_window)
+ /* Wait for the former r1bio(s) to complete */
+ wait_event(conf->wait_barrier,
+ bio->bi_phys_segments == 1);
goto retry_write;
}
@@ -1266,7 +1360,7 @@ read_again:
continue;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
+ bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
if (first_clone) {
/* do behind I/O ?
@@ -1438,11 +1532,14 @@ static void print_conf(struct r1conf *conf)
static void close_sync(struct r1conf *conf)
{
- wait_barrier(conf);
- allow_barrier(conf);
+ wait_barrier(conf, NULL);
+ allow_barrier(conf, 0, 0);
mempool_destroy(conf->r1buf_pool);
conf->r1buf_pool = NULL;
+
+ conf->next_resync = 0;
+ conf->start_next_window = MaxSector;
}
static int raid1_spare_active(struct mddev *mddev)
@@ -2126,7 +2223,7 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
wbio->bi_sector = r1_bio->sector;
wbio->bi_size = r1_bio->sectors << 9;
- md_trim_bio(wbio, sector - r1_bio->sector, sectors);
+ bio_trim(wbio, sector - r1_bio->sector, sectors);
wbio->bi_sector += rdev->data_offset;
wbio->bi_bdev = rdev->bdev;
if (submit_bio_wait(WRITE, wbio) == 0)
@@ -2241,7 +2338,7 @@ read_more:
}
r1_bio->read_disk = disk;
bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
- md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors);
+ bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors);
r1_bio->bios[r1_bio->read_disk] = bio;
rdev = conf->mirrors[disk].rdev;
printk_ratelimited(KERN_ERR
@@ -2714,6 +2811,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)
conf->pending_count = 0;
conf->recovery_disabled = mddev->recovery_disabled - 1;
+ conf->start_next_window = MaxSector;
+ conf->current_window_requests = conf->next_window_requests = 0;
+
err = -EIO;
for (i = 0; i < conf->raid_disks * 2; i++) {
@@ -2871,8 +2971,8 @@ static int stop(struct mddev *mddev)
atomic_read(&bitmap->behind_writes) == 0);
}
- raise_barrier(conf);
- lower_barrier(conf);
+ freeze_array(conf, 0);
+ unfreeze_array(conf);
md_unregister_thread(&mddev->thread);
if (conf->r1bio_pool)
@@ -3031,10 +3131,10 @@ static void raid1_quiesce(struct mddev *mddev, int state)
wake_up(&conf->wait_barrier);
break;
case 1:
- raise_barrier(conf);
+ freeze_array(conf, 0);
break;
case 0:
- lower_barrier(conf);
+ unfreeze_array(conf);
break;
}
}
@@ -3051,7 +3151,8 @@ static void *raid1_takeover(struct mddev *mddev)
mddev->new_chunk_sectors = 0;
conf = setup_conf(mddev);
if (!IS_ERR(conf))
- conf->barrier = 1;
+ /* Array must appear to be quiesced */
+ conf->array_frozen = 1;
return conf;
}
return ERR_PTR(-EINVAL);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 0ff3715fb7eb..9bebca7bff2f 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -41,6 +41,19 @@ struct r1conf {
*/
sector_t next_resync;
+ /* When raid1 starts resync, we divide array into four partitions
+ * |---------|--------------|---------------------|-------------|
+ * next_resync start_next_window end_window
+ * start_next_window = next_resync + NEXT_NORMALIO_DISTANCE
+ * end_window = start_next_window + NEXT_NORMALIO_DISTANCE
+ * current_window_requests means the count of normalIO between
+ * start_next_window and end_window.
+ * next_window_requests means the count of normalIO after end_window.
+ * */
+ sector_t start_next_window;
+ int current_window_requests;
+ int next_window_requests;
+
spinlock_t device_lock;
/* list of 'struct r1bio' that need to be processed by raid1d,
@@ -65,6 +78,7 @@ struct r1conf {
int nr_waiting;
int nr_queued;
int barrier;
+ int array_frozen;
/* Set to 1 if a full sync is needed, (fresh device added).
* Cleared when a sync completes.
@@ -111,6 +125,7 @@ struct r1bio {
* in this BehindIO request
*/
sector_t sector;
+ sector_t start_next_window;
int sectors;
unsigned long state;
struct mddev *mddev;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 73dc8a377522..06eeb99ea6fc 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1302,8 +1302,8 @@ read_again:
slot = r10_bio->read_slot;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
- max_sectors);
+ bio_trim(read_bio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
r10_bio->devs[slot].bio = read_bio;
r10_bio->devs[slot].rdev = rdev;
@@ -1319,7 +1319,7 @@ read_again:
/* Could not read all from this device, so we will
* need another r10_bio.
*/
- sectors_handled = (r10_bio->sectors + max_sectors
+ sectors_handled = (r10_bio->sector + max_sectors
- bio->bi_sector);
r10_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
@@ -1327,7 +1327,7 @@ read_again:
bio->bi_phys_segments = 2;
else
bio->bi_phys_segments++;
- spin_unlock(&conf->device_lock);
+ spin_unlock_irq(&conf->device_lock);
/* Cannot call generic_make_request directly
* as that will be queued in __generic_make_request
* and subsequent mempool_alloc might block
@@ -1510,8 +1510,8 @@ retry_write:
if (r10_bio->devs[i].bio) {
struct md_rdev *rdev = conf->mirrors[d].rdev;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
- max_sectors);
+ bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
r10_bio->devs[i].bio = mbio;
mbio->bi_sector = (r10_bio->devs[i].addr+
@@ -1553,8 +1553,8 @@ retry_write:
rdev = conf->mirrors[d].rdev;
}
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
- max_sectors);
+ bio_trim(mbio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
r10_bio->devs[i].repl_bio = mbio;
mbio->bi_sector = (r10_bio->devs[i].addr +
@@ -2614,7 +2614,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
sectors = sect_to_write;
/* Write at 'sector' for 'sectors' */
wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(wbio, sector - bio->bi_sector, sectors);
+ bio_trim(wbio, sector - bio->bi_sector, sectors);
wbio->bi_sector = (r10_bio->devs[i].addr+
choose_data_offset(r10_bio, rdev) +
(sector - r10_bio->sector));
@@ -2687,9 +2687,7 @@ read_more:
(unsigned long long)r10_bio->sector);
bio = bio_clone_mddev(r10_bio->master_bio,
GFP_NOIO, mddev);
- md_trim_bio(bio,
- r10_bio->sector - bio->bi_sector,
- max_sectors);
+ bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors);
r10_bio->devs[slot].bio = bio;
r10_bio->devs[slot].rdev = rdev;
bio->bi_sector = r10_bio->devs[slot].addr
@@ -3220,10 +3218,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (j == conf->copies) {
/* Cannot recover, so abort the recovery or
* record a bad block */
- put_buf(r10_bio);
- if (rb2)
- atomic_dec(&rb2->remaining);
- r10_bio = rb2;
if (any_working) {
/* problem is that there are bad blocks
* on other device(s)
@@ -3255,6 +3249,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
mirror->recovery_disabled
= mddev->recovery_disabled;
}
+ put_buf(r10_bio);
+ if (rb2)
+ atomic_dec(&rb2->remaining);
+ r10_bio = rb2;
break;
}
}
@@ -4386,7 +4384,11 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait, mddev->flags == 0 ||
- kthread_should_stop());
+ test_bit(MD_RECOVERY_INTR, &mddev->recovery));
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+ allow_barrier(conf);
+ return sectors_done;
+ }
conf->reshape_safe = mddev->reshape_position;
allow_barrier(conf);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f8b906843926..cbb15716a5db 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -85,6 +85,42 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
return &conf->stripe_hashtbl[hash];
}
+static inline int stripe_hash_locks_hash(sector_t sect)
+{
+ return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK;
+}
+
+static inline void lock_device_hash_lock(struct r5conf *conf, int hash)
+{
+ spin_lock_irq(conf->hash_locks + hash);
+ spin_lock(&conf->device_lock);
+}
+
+static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
+{
+ spin_unlock(&conf->device_lock);
+ spin_unlock_irq(conf->hash_locks + hash);
+}
+
+static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
+{
+ int i;
+ local_irq_disable();
+ spin_lock(conf->hash_locks);
+ for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
+ spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
+ spin_lock(&conf->device_lock);
+}
+
+static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
+{
+ int i;
+ spin_unlock(&conf->device_lock);
+ for (i = NR_STRIPE_HASH_LOCKS; i; i--)
+ spin_unlock(conf->hash_locks + i - 1);
+ local_irq_enable();
+}
+
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
* order without overlap. There may be several bio's per stripe+device, and
* a bio could span several devices.
@@ -249,7 +285,8 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
}
}
-static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
+static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
+ struct list_head *temp_inactive_list)
{
BUG_ON(!list_empty(&sh->lru));
BUG_ON(atomic_read(&conf->active_stripes)==0);
@@ -278,37 +315,68 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
< IO_THRESHOLD)
md_wakeup_thread(conf->mddev->thread);
atomic_dec(&conf->active_stripes);
- if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
- list_add_tail(&sh->lru, &conf->inactive_list);
- wake_up(&conf->wait_for_stripe);
- if (conf->retry_read_aligned)
- md_wakeup_thread(conf->mddev->thread);
- }
+ if (!test_bit(STRIPE_EXPANDING, &sh->state))
+ list_add_tail(&sh->lru, temp_inactive_list);
}
}
-static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
+static void __release_stripe(struct r5conf *conf, struct stripe_head *sh,
+ struct list_head *temp_inactive_list)
{
if (atomic_dec_and_test(&sh->count))
- do_release_stripe(conf, sh);
+ do_release_stripe(conf, sh, temp_inactive_list);
}
-static struct llist_node *llist_reverse_order(struct llist_node *head)
+/*
+ * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list
+ *
+ * Be careful: Only one task can add/delete stripes from temp_inactive_list at
+ * given time. Adding stripes only takes device lock, while deleting stripes
+ * only takes hash lock.
+ */
+static void release_inactive_stripe_list(struct r5conf *conf,
+ struct list_head *temp_inactive_list,
+ int hash)
{
- struct llist_node *new_head = NULL;
+ int size;
+ bool do_wakeup = false;
+ unsigned long flags;
- while (head) {
- struct llist_node *tmp = head;
- head = head->next;
- tmp->next = new_head;
- new_head = tmp;
+ if (hash == NR_STRIPE_HASH_LOCKS) {
+ size = NR_STRIPE_HASH_LOCKS;
+ hash = NR_STRIPE_HASH_LOCKS - 1;
+ } else
+ size = 1;
+ while (size) {
+ struct list_head *list = &temp_inactive_list[size - 1];
+
+ /*
+ * We don't hold any lock here yet, get_active_stripe() might
+ * remove stripes from the list
+ */
+ if (!list_empty_careful(list)) {
+ spin_lock_irqsave(conf->hash_locks + hash, flags);
+ if (list_empty(conf->inactive_list + hash) &&
+ !list_empty(list))
+ atomic_dec(&conf->empty_inactive_list_nr);
+ list_splice_tail_init(list, conf->inactive_list + hash);
+ do_wakeup = true;
+ spin_unlock_irqrestore(conf->hash_locks + hash, flags);
+ }
+ size--;
+ hash--;
}
- return new_head;
+ if (do_wakeup) {
+ wake_up(&conf->wait_for_stripe);
+ if (conf->retry_read_aligned)
+ md_wakeup_thread(conf->mddev->thread);
+ }
}
/* should hold conf->device_lock already */
-static int release_stripe_list(struct r5conf *conf)
+static int release_stripe_list(struct r5conf *conf,
+ struct list_head *temp_inactive_list)
{
struct stripe_head *sh;
int count = 0;
@@ -317,6 +385,8 @@ static int release_stripe_list(struct r5conf *conf)
head = llist_del_all(&conf->released_stripes);
head = llist_reverse_order(head);
while (head) {
+ int hash;
+
sh = llist_entry(head, struct stripe_head, release_list);
head = llist_next(head);
/* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
@@ -327,7 +397,8 @@ static int release_stripe_list(struct r5conf *conf)
* again, the count is always > 1. This is true for
* STRIPE_ON_UNPLUG_LIST bit too.
*/
- __release_stripe(conf, sh);
+ hash = sh->hash_lock_index;
+ __release_stripe(conf, sh, &temp_inactive_list[hash]);
count++;
}
@@ -338,9 +409,12 @@ static void release_stripe(struct stripe_head *sh)
{
struct r5conf *conf = sh->raid_conf;
unsigned long flags;
+ struct list_head list;
+ int hash;
bool wakeup;
- if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
+ if (unlikely(!conf->mddev->thread) ||
+ test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
goto slow_path;
wakeup = llist_add(&sh->release_list, &conf->released_stripes);
if (wakeup)
@@ -350,8 +424,11 @@ slow_path:
local_irq_save(flags);
/* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
- do_release_stripe(conf, sh);
+ INIT_LIST_HEAD(&list);
+ hash = sh->hash_lock_index;
+ do_release_stripe(conf, sh, &list);
spin_unlock(&conf->device_lock);
+ release_inactive_stripe_list(conf, &list, hash);
}
local_irq_restore(flags);
}
@@ -376,18 +453,21 @@ static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
/* find an idle stripe, make sure it is unhashed, and return it. */
-static struct stripe_head *get_free_stripe(struct r5conf *conf)
+static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash)
{
struct stripe_head *sh = NULL;
struct list_head *first;
- if (list_empty(&conf->inactive_list))
+ if (list_empty(conf->inactive_list + hash))
goto out;
- first = conf->inactive_list.next;
+ first = (conf->inactive_list + hash)->next;
sh = list_entry(first, struct stripe_head, lru);
list_del_init(first);
remove_hash(sh);
atomic_inc(&conf->active_stripes);
+ BUG_ON(hash != sh->hash_lock_index);
+ if (list_empty(conf->inactive_list + hash))
+ atomic_inc(&conf->empty_inactive_list_nr);
out:
return sh;
}
@@ -430,7 +510,7 @@ static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
{
struct r5conf *conf = sh->raid_conf;
- int i;
+ int i, seq;
BUG_ON(atomic_read(&sh->count) != 0);
BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
@@ -440,7 +520,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
(unsigned long long)sh->sector);
remove_hash(sh);
-
+retry:
+ seq = read_seqcount_begin(&conf->gen_lock);
sh->generation = conf->generation - previous;
sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
sh->sector = sector;
@@ -462,6 +543,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
dev->flags = 0;
raid5_build_block(sh, i, previous);
}
+ if (read_seqcount_retry(&conf->gen_lock, seq))
+ goto retry;
insert_hash(conf, sh);
sh->cpu = smp_processor_id();
}
@@ -566,57 +649,60 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
int previous, int noblock, int noquiesce)
{
struct stripe_head *sh;
+ int hash = stripe_hash_locks_hash(sector);
pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
- spin_lock_irq(&conf->device_lock);
+ spin_lock_irq(conf->hash_locks + hash);
do {
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0 || noquiesce,
- conf->device_lock);
+ *(conf->hash_locks + hash));
sh = __find_stripe(conf, sector, conf->generation - previous);
if (!sh) {
if (!conf->inactive_blocked)
- sh = get_free_stripe(conf);
+ sh = get_free_stripe(conf, hash);
if (noblock && sh == NULL)
break;
if (!sh) {
conf->inactive_blocked = 1;
- wait_event_lock_irq(conf->wait_for_stripe,
- !list_empty(&conf->inactive_list) &&
- (atomic_read(&conf->active_stripes)
- < (conf->max_nr_stripes *3/4)
- || !conf->inactive_blocked),
- conf->device_lock);
+ wait_event_lock_irq(
+ conf->wait_for_stripe,
+ !list_empty(conf->inactive_list + hash) &&
+ (atomic_read(&conf->active_stripes)
+ < (conf->max_nr_stripes * 3 / 4)
+ || !conf->inactive_blocked),
+ *(conf->hash_locks + hash));
conf->inactive_blocked = 0;
} else
init_stripe(sh, sector, previous);
} else {
+ spin_lock(&conf->device_lock);
if (atomic_read(&sh->count)) {
BUG_ON(!list_empty(&sh->lru)
&& !test_bit(STRIPE_EXPANDING, &sh->state)
&& !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
- && !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
+ );
} else {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
- if (list_empty(&sh->lru) &&
- !test_bit(STRIPE_EXPANDING, &sh->state))
- BUG();
+ BUG_ON(list_empty(&sh->lru) &&
+ !test_bit(STRIPE_EXPANDING, &sh->state));
list_del_init(&sh->lru);
if (sh->group) {
sh->group->stripes_cnt--;
sh->group = NULL;
}
}
+ spin_unlock(&conf->device_lock);
}
} while (sh == NULL);
if (sh)
atomic_inc(&sh->count);
- spin_unlock_irq(&conf->device_lock);
+ spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
@@ -772,7 +858,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_sector = (sh->sector
+ rdev->data_offset);
if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
- bi->bi_rw |= REQ_FLUSH;
+ bi->bi_rw |= REQ_NOMERGE;
bi->bi_vcnt = 1;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
@@ -1596,7 +1682,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
put_cpu();
}
-static int grow_one_stripe(struct r5conf *conf)
+static int grow_one_stripe(struct r5conf *conf, int hash)
{
struct stripe_head *sh;
sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
@@ -1612,6 +1698,7 @@ static int grow_one_stripe(struct r5conf *conf)
kmem_cache_free(conf->slab_cache, sh);
return 0;
}
+ sh->hash_lock_index = hash;
/* we just created an active stripe so... */
atomic_set(&sh->count, 1);
atomic_inc(&conf->active_stripes);
@@ -1624,6 +1711,7 @@ static int grow_stripes(struct r5conf *conf, int num)
{
struct kmem_cache *sc;
int devs = max(conf->raid_disks, conf->previous_raid_disks);
+ int hash;
if (conf->mddev->gendisk)
sprintf(conf->cache_name[0],
@@ -1641,9 +1729,13 @@ static int grow_stripes(struct r5conf *conf, int num)
return 1;
conf->slab_cache = sc;
conf->pool_size = devs;
- while (num--)
- if (!grow_one_stripe(conf))
+ hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
+ while (num--) {
+ if (!grow_one_stripe(conf, hash))
return 1;
+ conf->max_nr_stripes++;
+ hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
+ }
return 0;
}
@@ -1701,6 +1793,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
int err;
struct kmem_cache *sc;
int i;
+ int hash, cnt;
if (newsize <= conf->pool_size)
return 0; /* never bother to shrink */
@@ -1740,19 +1833,29 @@ static int resize_stripes(struct r5conf *conf, int newsize)
* OK, we have enough stripes, start collecting inactive
* stripes and copying them over
*/
+ hash = 0;
+ cnt = 0;
list_for_each_entry(nsh, &newstripes, lru) {
- spin_lock_irq(&conf->device_lock);
- wait_event_lock_irq(conf->wait_for_stripe,
- !list_empty(&conf->inactive_list),
- conf->device_lock);
- osh = get_free_stripe(conf);
- spin_unlock_irq(&conf->device_lock);
+ lock_device_hash_lock(conf, hash);
+ wait_event_cmd(conf->wait_for_stripe,
+ !list_empty(conf->inactive_list + hash),
+ unlock_device_hash_lock(conf, hash),
+ lock_device_hash_lock(conf, hash));
+ osh = get_free_stripe(conf, hash);
+ unlock_device_hash_lock(conf, hash);
atomic_set(&nsh->count, 1);
for(i=0; i<conf->pool_size; i++)
nsh->dev[i].page = osh->dev[i].page;
for( ; i<newsize; i++)
nsh->dev[i].page = NULL;
+ nsh->hash_lock_index = hash;
kmem_cache_free(conf->slab_cache, osh);
+ cnt++;
+ if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS +
+ !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) {
+ hash++;
+ cnt = 0;
+ }
}
kmem_cache_destroy(conf->slab_cache);
@@ -1811,13 +1914,13 @@ static int resize_stripes(struct r5conf *conf, int newsize)
return err;
}
-static int drop_one_stripe(struct r5conf *conf)
+static int drop_one_stripe(struct r5conf *conf, int hash)
{
struct stripe_head *sh;
- spin_lock_irq(&conf->device_lock);
- sh = get_free_stripe(conf);
- spin_unlock_irq(&conf->device_lock);
+ spin_lock_irq(conf->hash_locks + hash);
+ sh = get_free_stripe(conf, hash);
+ spin_unlock_irq(conf->hash_locks + hash);
if (!sh)
return 0;
BUG_ON(atomic_read(&sh->count));
@@ -1829,8 +1932,10 @@ static int drop_one_stripe(struct r5conf *conf)
static void shrink_stripes(struct r5conf *conf)
{
- while (drop_one_stripe(conf))
- ;
+ int hash;
+ for (hash = 0; hash < NR_STRIPE_HASH_LOCKS; hash++)
+ while (drop_one_stripe(conf, hash))
+ ;
if (conf->slab_cache)
kmem_cache_destroy(conf->slab_cache);
@@ -1935,6 +2040,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
mdname(conf->mddev), bdn);
else
retry = 1;
+ if (set_bad && test_bit(In_sync, &rdev->flags)
+ && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
+ retry = 1;
if (retry)
if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
set_bit(R5_ReadError, &sh->dev[i].flags);
@@ -3501,7 +3609,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
*/
set_bit(R5_Insync, &dev->flags);
- if (rdev && test_bit(R5_WriteError, &dev->flags)) {
+ if (test_bit(R5_WriteError, &dev->flags)) {
/* This flag does not apply to '.replacement'
* only to .rdev, so make sure to check that*/
struct md_rdev *rdev2 = rcu_dereference(
@@ -3514,7 +3622,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
} else
clear_bit(R5_WriteError, &dev->flags);
}
- if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
+ if (test_bit(R5_MadeGood, &dev->flags)) {
/* This flag does not apply to '.replacement'
* only to .rdev, so make sure to check that*/
struct md_rdev *rdev2 = rcu_dereference(
@@ -3914,7 +4022,8 @@ static void raid5_activate_delayed(struct r5conf *conf)
}
}
-static void activate_bit_delay(struct r5conf *conf)
+static void activate_bit_delay(struct r5conf *conf,
+ struct list_head *temp_inactive_list)
{
/* device_lock is held */
struct list_head head;
@@ -3922,9 +4031,11 @@ static void activate_bit_delay(struct r5conf *conf)
list_del_init(&conf->bitmap_list);
while (!list_empty(&head)) {
struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
+ int hash;
list_del_init(&sh->lru);
atomic_inc(&sh->count);
- __release_stripe(conf, sh);
+ hash = sh->hash_lock_index;
+ __release_stripe(conf, sh, &temp_inactive_list[hash]);
}
}
@@ -3940,7 +4051,7 @@ int md_raid5_congested(struct mddev *mddev, int bits)
return 1;
if (conf->quiesce)
return 1;
- if (list_empty_careful(&conf->inactive_list))
+ if (atomic_read(&conf->empty_inactive_list_nr))
return 1;
return 0;
@@ -4270,6 +4381,7 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
struct raid5_plug_cb {
struct blk_plug_cb cb;
struct list_head list;
+ struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
};
static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
@@ -4280,6 +4392,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
struct mddev *mddev = cb->cb.data;
struct r5conf *conf = mddev->private;
int cnt = 0;
+ int hash;
if (cb->list.next && !list_empty(&cb->list)) {
spin_lock_irq(&conf->device_lock);
@@ -4297,11 +4410,14 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
* STRIPE_ON_RELEASE_LIST could be set here. In that
* case, the count is always > 1 here
*/
- __release_stripe(conf, sh);
+ hash = sh->hash_lock_index;
+ __release_stripe(conf, sh, &cb->temp_inactive_list[hash]);
cnt++;
}
spin_unlock_irq(&conf->device_lock);
}
+ release_inactive_stripe_list(conf, cb->temp_inactive_list,
+ NR_STRIPE_HASH_LOCKS);
if (mddev->queue)
trace_block_unplug(mddev->queue, cnt, !from_schedule);
kfree(cb);
@@ -4322,8 +4438,12 @@ static void release_stripe_plug(struct mddev *mddev,
cb = container_of(blk_cb, struct raid5_plug_cb, cb);
- if (cb->list.next == NULL)
+ if (cb->list.next == NULL) {
+ int i;
INIT_LIST_HEAD(&cb->list);
+ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
+ INIT_LIST_HEAD(cb->temp_inactive_list + i);
+ }
if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
list_add_tail(&sh->lru, &cb->list);
@@ -4706,14 +4826,19 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
/* Cannot proceed until we've updated the superblock... */
wait_event(conf->wait_for_overlap,
- atomic_read(&conf->reshape_stripes)==0);
+ atomic_read(&conf->reshape_stripes)==0
+ || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
+ if (atomic_read(&conf->reshape_stripes) != 0)
+ return 0;
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
conf->reshape_checkpoint = jiffies;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait, mddev->flags == 0 ||
- kthread_should_stop());
+ test_bit(MD_RECOVERY_INTR, &mddev->recovery));
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ return 0;
spin_lock_irq(&conf->device_lock);
conf->reshape_safe = mddev->reshape_position;
spin_unlock_irq(&conf->device_lock);
@@ -4796,7 +4921,10 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
>= mddev->resync_max - mddev->curr_resync_completed) {
/* Cannot proceed until we've updated the superblock... */
wait_event(conf->wait_for_overlap,
- atomic_read(&conf->reshape_stripes) == 0);
+ atomic_read(&conf->reshape_stripes) == 0
+ || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
+ if (atomic_read(&conf->reshape_stripes) != 0)
+ goto ret;
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
conf->reshape_checkpoint = jiffies;
@@ -4804,13 +4932,16 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_DEVS, &mddev->flags)
- || kthread_should_stop());
+ || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ goto ret;
spin_lock_irq(&conf->device_lock);
conf->reshape_safe = mddev->reshape_position;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
+ret:
return reshape_sectors;
}
@@ -4968,27 +5099,45 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
}
static int handle_active_stripes(struct r5conf *conf, int group,
- struct r5worker *worker)
+ struct r5worker *worker,
+ struct list_head *temp_inactive_list)
{
struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
- int i, batch_size = 0;
+ int i, batch_size = 0, hash;
+ bool release_inactive = false;
while (batch_size < MAX_STRIPE_BATCH &&
(sh = __get_priority_stripe(conf, group)) != NULL)
batch[batch_size++] = sh;
- if (batch_size == 0)
- return batch_size;
+ if (batch_size == 0) {
+ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
+ if (!list_empty(temp_inactive_list + i))
+ break;
+ if (i == NR_STRIPE_HASH_LOCKS)
+ return batch_size;
+ release_inactive = true;
+ }
spin_unlock_irq(&conf->device_lock);
+ release_inactive_stripe_list(conf, temp_inactive_list,
+ NR_STRIPE_HASH_LOCKS);
+
+ if (release_inactive) {
+ spin_lock_irq(&conf->device_lock);
+ return 0;
+ }
+
for (i = 0; i < batch_size; i++)
handle_stripe(batch[i]);
cond_resched();
spin_lock_irq(&conf->device_lock);
- for (i = 0; i < batch_size; i++)
- __release_stripe(conf, batch[i]);
+ for (i = 0; i < batch_size; i++) {
+ hash = batch[i]->hash_lock_index;
+ __release_stripe(conf, batch[i], &temp_inactive_list[hash]);
+ }
return batch_size;
}
@@ -5009,9 +5158,10 @@ static void raid5_do_work(struct work_struct *work)
while (1) {
int batch_size, released;
- released = release_stripe_list(conf);
+ released = release_stripe_list(conf, worker->temp_inactive_list);
- batch_size = handle_active_stripes(conf, group_id, worker);
+ batch_size = handle_active_stripes(conf, group_id, worker,
+ worker->temp_inactive_list);
worker->working = false;
if (!batch_size && !released)
break;
@@ -5050,7 +5200,7 @@ static void raid5d(struct md_thread *thread)
struct bio *bio;
int batch_size, released;
- released = release_stripe_list(conf);
+ released = release_stripe_list(conf, conf->temp_inactive_list);
if (
!list_empty(&conf->bitmap_list)) {
@@ -5060,7 +5210,7 @@ static void raid5d(struct md_thread *thread)
bitmap_unplug(mddev->bitmap);
spin_lock_irq(&conf->device_lock);
conf->seq_write = conf->seq_flush;
- activate_bit_delay(conf);
+ activate_bit_delay(conf, conf->temp_inactive_list);
}
raid5_activate_delayed(conf);
@@ -5074,7 +5224,8 @@ static void raid5d(struct md_thread *thread)
handled++;
}
- batch_size = handle_active_stripes(conf, ANY_GROUP, NULL);
+ batch_size = handle_active_stripes(conf, ANY_GROUP, NULL,
+ conf->temp_inactive_list);
if (!batch_size && !released)
break;
handled += batch_size;
@@ -5110,22 +5261,29 @@ raid5_set_cache_size(struct mddev *mddev, int size)
{
struct r5conf *conf = mddev->private;
int err;
+ int hash;
if (size <= 16 || size > 32768)
return -EINVAL;
+ hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
while (size < conf->max_nr_stripes) {
- if (drop_one_stripe(conf))
+ if (drop_one_stripe(conf, hash))
conf->max_nr_stripes--;
else
break;
+ hash--;
+ if (hash < 0)
+ hash = NR_STRIPE_HASH_LOCKS - 1;
}
err = md_allow_write(mddev);
if (err)
return err;
+ hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
while (size > conf->max_nr_stripes) {
- if (grow_one_stripe(conf))
+ if (grow_one_stripe(conf, hash))
conf->max_nr_stripes++;
else break;
+ hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
}
return 0;
}
@@ -5213,15 +5371,18 @@ raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
return 0;
}
-static int alloc_thread_groups(struct r5conf *conf, int cnt);
+static int alloc_thread_groups(struct r5conf *conf, int cnt,
+ int *group_cnt,
+ int *worker_cnt_per_group,
+ struct r5worker_group **worker_groups);
static ssize_t
raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
{
struct r5conf *conf = mddev->private;
unsigned long new;
int err;
- struct r5worker_group *old_groups;
- int old_group_cnt;
+ struct r5worker_group *new_groups, *old_groups;
+ int group_cnt, worker_cnt_per_group;
if (len >= PAGE_SIZE)
return -EINVAL;
@@ -5237,14 +5398,19 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
mddev_suspend(mddev);
old_groups = conf->worker_groups;
- old_group_cnt = conf->worker_cnt_per_group;
+ if (old_groups)
+ flush_workqueue(raid5_wq);
+
+ err = alloc_thread_groups(conf, new,
+ &group_cnt, &worker_cnt_per_group,
+ &new_groups);
+ if (!err) {
+ spin_lock_irq(&conf->device_lock);
+ conf->group_cnt = group_cnt;
+ conf->worker_cnt_per_group = worker_cnt_per_group;
+ conf->worker_groups = new_groups;
+ spin_unlock_irq(&conf->device_lock);
- conf->worker_groups = NULL;
- err = alloc_thread_groups(conf, new);
- if (err) {
- conf->worker_groups = old_groups;
- conf->worker_cnt_per_group = old_group_cnt;
- } else {
if (old_groups)
kfree(old_groups[0].workers);
kfree(old_groups);
@@ -5274,40 +5440,47 @@ static struct attribute_group raid5_attrs_group = {
.attrs = raid5_attrs,
};
-static int alloc_thread_groups(struct r5conf *conf, int cnt)
+static int alloc_thread_groups(struct r5conf *conf, int cnt,
+ int *group_cnt,
+ int *worker_cnt_per_group,
+ struct r5worker_group **worker_groups)
{
- int i, j;
+ int i, j, k;
ssize_t size;
struct r5worker *workers;
- conf->worker_cnt_per_group = cnt;
+ *worker_cnt_per_group = cnt;
if (cnt == 0) {
- conf->worker_groups = NULL;
+ *group_cnt = 0;
+ *worker_groups = NULL;
return 0;
}
- conf->group_cnt = num_possible_nodes();
+ *group_cnt = num_possible_nodes();
size = sizeof(struct r5worker) * cnt;
- workers = kzalloc(size * conf->group_cnt, GFP_NOIO);
- conf->worker_groups = kzalloc(sizeof(struct r5worker_group) *
- conf->group_cnt, GFP_NOIO);
- if (!conf->worker_groups || !workers) {
+ workers = kzalloc(size * *group_cnt, GFP_NOIO);
+ *worker_groups = kzalloc(sizeof(struct r5worker_group) *
+ *group_cnt, GFP_NOIO);
+ if (!*worker_groups || !workers) {
kfree(workers);
- kfree(conf->worker_groups);
- conf->worker_groups = NULL;
+ kfree(*worker_groups);
return -ENOMEM;
}
- for (i = 0; i < conf->group_cnt; i++) {
+ for (i = 0; i < *group_cnt; i++) {
struct r5worker_group *group;
- group = &conf->worker_groups[i];
+ group = &(*worker_groups)[i];
INIT_LIST_HEAD(&group->handle_list);
group->conf = conf;
group->workers = workers + i * cnt;
for (j = 0; j < cnt; j++) {
- group->workers[j].group = group;
- INIT_WORK(&group->workers[j].work, raid5_do_work);
+ struct r5worker *worker = group->workers + j;
+ worker->group = group;
+ INIT_WORK(&worker->work, raid5_do_work);
+
+ for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++)
+ INIT_LIST_HEAD(worker->temp_inactive_list + k);
}
}
@@ -5458,6 +5631,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)
struct md_rdev *rdev;
struct disk_info *disk;
char pers_name[6];
+ int i;
+ int group_cnt, worker_cnt_per_group;
+ struct r5worker_group *new_group;
if (mddev->new_level != 5
&& mddev->new_level != 4
@@ -5492,7 +5668,12 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if (conf == NULL)
goto abort;
/* Don't enable multi-threading by default*/
- if (alloc_thread_groups(conf, 0))
+ if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group,
+ &new_group)) {
+ conf->group_cnt = group_cnt;
+ conf->worker_cnt_per_group = worker_cnt_per_group;
+ conf->worker_groups = new_group;
+ } else
goto abort;
spin_lock_init(&conf->device_lock);
seqcount_init(&conf->gen_lock);
@@ -5502,7 +5683,6 @@ static struct r5conf *setup_conf(struct mddev *mddev)
INIT_LIST_HEAD(&conf->hold_list);
INIT_LIST_HEAD(&conf->delayed_list);
INIT_LIST_HEAD(&conf->bitmap_list);
- INIT_LIST_HEAD(&conf->inactive_list);
init_llist_head(&conf->released_stripes);
atomic_set(&conf->active_stripes, 0);
atomic_set(&conf->preread_active_stripes, 0);
@@ -5528,6 +5708,21 @@ static struct r5conf *setup_conf(struct mddev *mddev)
if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
goto abort;
+ /* We init hash_locks[0] separately to that it can be used
+ * as the reference lock in the spin_lock_nest_lock() call
+ * in lock_all_device_hash_locks_irq in order to convince
+ * lockdep that we know what we are doing.
+ */
+ spin_lock_init(conf->hash_locks);
+ for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
+ spin_lock_init(conf->hash_locks + i);
+
+ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
+ INIT_LIST_HEAD(conf->inactive_list + i);
+
+ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
+ INIT_LIST_HEAD(conf->temp_inactive_list + i);
+
conf->level = mddev->new_level;
if (raid5_alloc_percpu(conf) != 0)
goto abort;
@@ -5568,7 +5763,6 @@ static struct r5conf *setup_conf(struct mddev *mddev)
else
conf->max_degraded = 1;
conf->algorithm = mddev->new_layout;
- conf->max_nr_stripes = NR_STRIPES;
conf->reshape_progress = mddev->reshape_position;
if (conf->reshape_progress != MaxSector) {
conf->prev_chunk_sectors = mddev->chunk_sectors;
@@ -5577,7 +5771,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
- if (grow_stripes(conf, conf->max_nr_stripes)) {
+ atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
+ if (grow_stripes(conf, NR_STRIPES)) {
printk(KERN_ERR
"md/raid:%s: couldn't allocate %dkB for buffers\n",
mdname(mddev), memory);
@@ -6383,12 +6578,18 @@ static int raid5_start_reshape(struct mddev *mddev)
if (!mddev->sync_thread) {
mddev->recovery = 0;
spin_lock_irq(&conf->device_lock);
+ write_seqcount_begin(&conf->gen_lock);
mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
+ mddev->new_chunk_sectors =
+ conf->chunk_sectors = conf->prev_chunk_sectors;
+ mddev->new_layout = conf->algorithm = conf->prev_algo;
rdev_for_each(rdev, mddev)
rdev->new_data_offset = rdev->data_offset;
smp_wmb();
+ conf->generation --;
conf->reshape_progress = MaxSector;
mddev->reshape_position = MaxSector;
+ write_seqcount_end(&conf->gen_lock);
spin_unlock_irq(&conf->device_lock);
return -EAGAIN;
}
@@ -6476,27 +6677,28 @@ static void raid5_quiesce(struct mddev *mddev, int state)
break;
case 1: /* stop all writes */
- spin_lock_irq(&conf->device_lock);
+ lock_all_device_hash_locks_irq(conf);
/* '2' tells resync/reshape to pause so that all
* active stripes can drain
*/
conf->quiesce = 2;
- wait_event_lock_irq(conf->wait_for_stripe,
+ wait_event_cmd(conf->wait_for_stripe,
atomic_read(&conf->active_stripes) == 0 &&
atomic_read(&conf->active_aligned_reads) == 0,
- conf->device_lock);
+ unlock_all_device_hash_locks_irq(conf),
+ lock_all_device_hash_locks_irq(conf));
conf->quiesce = 1;
- spin_unlock_irq(&conf->device_lock);
+ unlock_all_device_hash_locks_irq(conf);
/* allow reshape to continue */
wake_up(&conf->wait_for_overlap);
break;
case 0: /* re-enable writes */
- spin_lock_irq(&conf->device_lock);
+ lock_all_device_hash_locks_irq(conf);
conf->quiesce = 0;
wake_up(&conf->wait_for_stripe);
wake_up(&conf->wait_for_overlap);
- spin_unlock_irq(&conf->device_lock);
+ unlock_all_device_hash_locks_irq(conf);
break;
}
}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 2113ffa82c7a..01ad8ae8f578 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -49,7 +49,7 @@
* can't distinguish between a clean block that has been generated
* from parity calculations, and a clean block that has been
* successfully written to the spare ( or to parity when resyncing).
- * To distingush these states we have a stripe bit STRIPE_INSYNC that
+ * To distinguish these states we have a stripe bit STRIPE_INSYNC that
* is set whenever a write is scheduled to the spare, or to the parity
* disc if there is no spare. A sync request clears this bit, and
* when we find it set with no buffers locked, we know the sync is
@@ -205,6 +205,7 @@ struct stripe_head {
short pd_idx; /* parity disk index */
short qd_idx; /* 'Q' disk index for raid6 */
short ddf_layout;/* use DDF ordering to calculate Q */
+ short hash_lock_index;
unsigned long state; /* state flags */
atomic_t count; /* nr of active thread/requests */
int bm_seq; /* sequence number for bitmap flushes */
@@ -367,9 +368,18 @@ struct disk_info {
struct md_rdev *rdev, *replacement;
};
+/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
+ * This is because we sometimes take all the spinlocks
+ * and creating that much locking depth can cause
+ * problems.
+ */
+#define NR_STRIPE_HASH_LOCKS 8
+#define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1)
+
struct r5worker {
struct work_struct work;
struct r5worker_group *group;
+ struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
bool working;
};
@@ -382,6 +392,8 @@ struct r5worker_group {
struct r5conf {
struct hlist_head *stripe_hashtbl;
+ /* only protect corresponding hash list and inactive_list */
+ spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS];
struct mddev *mddev;
int chunk_sectors;
int level, algorithm;
@@ -462,7 +474,8 @@ struct r5conf {
* Free stripes pool
*/
atomic_t active_stripes;
- struct list_head inactive_list;
+ struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
+ atomic_t empty_inactive_list_nr;
struct llist_head released_stripes;
wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
@@ -477,6 +490,7 @@ struct r5conf {
* the new thread here until we fully activate the array.
*/
struct md_thread *thread;
+ struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
struct r5worker_group *worker_groups;
int group_cnt;
int worker_cnt_per_group;