diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2021-03-31 22:16:39 +0200 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-22 23:08:58 +0200 |
commit | ecab6be7e5c3e19d25a4ad9d5d97c83e3ac67507 (patch) | |
tree | 6658063cd9d687ef8d283ca1e31e4d1279b8bfae /fs/bcachefs/btree_update_interior.h | |
parent | bcachefs: Kill bch2_btree_node_get_sibling() (diff) | |
download | linux-ecab6be7e5c3e19d25a4ad9d5d97c83e3ac67507.tar.xz linux-ecab6be7e5c3e19d25a4ad9d5d97c83e3ac67507.zip |
bcachefs: bch2_foreground_maybe_merge() now correctly reports lock restarts
This means that btree node splits don't have to automatically trigger a
transaction restart.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_update_interior.h')
-rw-r--r-- | fs/bcachefs/btree_update_interior.h | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h index 2a6b51ece0f8..f2925b0d7f17 100644 --- a/fs/bcachefs/btree_update_interior.h +++ b/fs/bcachefs/btree_update_interior.h @@ -132,10 +132,10 @@ void bch2_btree_insert_node(struct btree_update *, struct btree *, unsigned); int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned); -void __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *, - unsigned, unsigned, enum btree_node_sibling); +int __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *, + unsigned, unsigned, enum btree_node_sibling); -static inline void bch2_foreground_maybe_merge_sibling(struct bch_fs *c, +static inline int bch2_foreground_maybe_merge_sibling(struct bch_fs *c, struct btree_iter *iter, unsigned level, unsigned flags, enum btree_node_sibling sib) @@ -143,27 +143,27 @@ static inline void bch2_foreground_maybe_merge_sibling(struct bch_fs *c, struct btree *b; if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE) - return; + return 0; if (!bch2_btree_node_relock(iter, level)) - return; + return 0; b = iter->l[level].b; if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold) - return; + return 0; - __bch2_foreground_maybe_merge(c, iter, level, flags, sib); + return __bch2_foreground_maybe_merge(c, iter, level, flags, sib); } -static inline void bch2_foreground_maybe_merge(struct bch_fs *c, +static inline int bch2_foreground_maybe_merge(struct bch_fs *c, struct btree_iter *iter, unsigned level, unsigned flags) { - bch2_foreground_maybe_merge_sibling(c, iter, level, flags, - btree_prev_sib); - bch2_foreground_maybe_merge_sibling(c, iter, level, flags, - btree_next_sib); + return bch2_foreground_maybe_merge_sibling(c, iter, level, flags, + btree_prev_sib) ?: + bch2_foreground_maybe_merge_sibling(c, iter, level, flags, + btree_next_sib); } void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *); |