summaryrefslogtreecommitdiffstats
path: root/fs/bcachefs/btree_key_cache.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-11-23 02:15:33 +0100
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 23:09:47 +0200
commit1617d56dc9bc3d9fd56824e8e488e88acbba152f (patch)
tree842d081d2ef876baca5cde97d7f12bd8ea77cc99 /fs/bcachefs/btree_key_cache.c
parentbcachefs: Bring back BTREE_ITER_CACHED_NOFILL (diff)
downloadlinux-1617d56dc9bc3d9fd56824e8e488e88acbba152f.tar.xz
linux-1617d56dc9bc3d9fd56824e8e488e88acbba152f.zip
bcachefs: Key cache now works for snapshots btrees
This switches btree_key_cache_fill() to use a btree iterator, not a btree path, so that it can search for keys in previous snapshots. We also add another iterator flag, BTREE_ITER_KEY_CACHE_FILL, to avoid recursion back into the key cache. This will allow us to re-enable the key cache for inodes in the next patch. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_key_cache.c')
-rw-r--r--fs/bcachefs/btree_key_cache.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index fc924fd24274..c118d1b8241f 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -370,20 +370,20 @@ static int btree_key_cache_fill(struct btree_trans *trans,
struct btree_path *ck_path,
struct bkey_cached *ck)
{
- struct btree_path *path;
+ struct btree_iter iter;
struct bkey_s_c k;
unsigned new_u64s = 0;
struct bkey_i *new_k = NULL;
- struct bkey u;
int ret;
- path = bch2_path_get(trans, ck->key.btree_id, ck->key.pos, 0, 0, 0);
- ret = bch2_btree_path_traverse(trans, path, 0);
+ bch2_trans_iter_init(trans, &iter, ck->key.btree_id, ck->key.pos,
+ BTREE_ITER_KEY_CACHE_FILL|
+ BTREE_ITER_CACHED_NOFILL);
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
if (ret)
goto err;
- k = bch2_btree_path_peek_slot(path, &u);
-
if (!bch2_btree_node_relock(trans, ck_path, 0)) {
trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
@@ -431,9 +431,9 @@ static int btree_key_cache_fill(struct btree_trans *trans,
bch2_btree_node_unlock_write(trans, ck_path, ck_path->l[0].b);
/* We're not likely to need this iterator again: */
- path->preserve = false;
+ set_btree_iter_dontneed(&iter);
err:
- bch2_path_put(trans, path, 0);
+ bch2_trans_iter_exit(trans, &iter);
return ret;
}