summaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/btree.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-12-17 10:29:34 +0100
committerKent Overstreet <kmo@daterainc.com>2014-01-08 22:05:09 +0100
commit78365411b344df35a198b119133e6515c2dcfb9f (patch)
treee94c2e1bd0d5dc53e6a938b012e9b20d3a511eca /drivers/md/bcache/btree.c
parentbcache: kill closure locking code (diff)
downloadlinux-78365411b344df35a198b119133e6515c2dcfb9f.tar.xz
linux-78365411b344df35a198b119133e6515c2dcfb9f.zip
bcache: Rework allocator reserves
We need a reserve for allocating buckets for new btree nodes - and now that we've got multiple btrees, it really needs to be per btree. This reworks the reserves so we've got separate freelists for each reserve instead of watermarks, which seems to make things a bit cleaner, and it adds some code so that btree_split() can make sure the reserve is available before it starts. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/btree.c')
-rw-r--r--drivers/md/bcache/btree.c34
1 files changed, 32 insertions, 2 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 101231f0f399..6a0f5faf0bed 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -167,6 +167,8 @@ static inline bool should_split(struct btree *b)
_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
} \
rw_unlock(_w, _b); \
+ if (_r == -EINTR) \
+ schedule(); \
bch_cannibalize_unlock(c); \
if (_r == -ENOSPC) { \
wait_event((c)->try_wait, \
@@ -175,6 +177,7 @@ static inline bool should_split(struct btree *b)
} \
} while (_r == -EINTR); \
\
+ finish_wait(&(c)->bucket_wait, &(op)->wait); \
_r; \
})
@@ -1075,7 +1078,7 @@ struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait)
mutex_lock(&c->bucket_lock);
retry:
- if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, wait))
+ if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
goto err;
bkey_put(c, &k.key);
@@ -1132,6 +1135,28 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
atomic_inc(&b->c->prio_blocked);
}
+static int btree_check_reserve(struct btree *b, struct btree_op *op)
+{
+ struct cache_set *c = b->c;
+ struct cache *ca;
+ unsigned i, reserve = c->root->level * 2 + 1;
+ int ret = 0;
+
+ mutex_lock(&c->bucket_lock);
+
+ for_each_cache(ca, c, i)
+ if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
+ if (op)
+ prepare_to_wait(&c->bucket_wait, &op->wait,
+ TASK_UNINTERRUPTIBLE);
+ ret = -EINTR;
+ break;
+ }
+
+ mutex_unlock(&c->bucket_lock);
+ return ret;
+}
+
/* Garbage collection */
uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
@@ -1428,7 +1453,8 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
if (!IS_ERR(last->b)) {
should_rewrite = btree_gc_mark_node(last->b, gc);
- if (should_rewrite) {
+ if (should_rewrite &&
+ !btree_check_reserve(b, NULL)) {
n = btree_node_alloc_replacement(last->b,
false);
@@ -2071,6 +2097,10 @@ static int btree_split(struct btree *b, struct btree_op *op,
closure_init_stack(&cl);
bch_keylist_init(&parent_keys);
+ if (!b->level &&
+ btree_check_reserve(b, op))
+ return -EINTR;
+
n1 = btree_node_alloc_replacement(b, true);
if (IS_ERR(n1))
goto err;