diff options
author | Liu Bo <bo.liu@linux.alibaba.com> | 2018-08-22 21:51:49 +0200 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2018-10-15 17:23:33 +0200 |
commit | 5c9d028b3b174e5cf3678a7b0c14e21e51665793 (patch) | |
tree | 7871db45680d3cb98c67dc8ee31a8e5a8275abcf /fs/btrfs/delayed-ref.c | |
parent | btrfs: wait on caching when putting the bg cache (diff) | |
download | linux-5c9d028b3b174e5cf3678a7b0c14e21e51665793.tar.xz linux-5c9d028b3b174e5cf3678a7b0c14e21e51665793.zip |
Btrfs: delayed-refs: use rb_first_cached for href_root
rb_first_cached() trades an extra pointer "leftmost" for doing the same
job as rb_first() but in O(1).
Functions manipulating href_root need to get the first entry, this
converts href_root to use rb_first_cached().
This patch is first in the sequenct of similar updates to other rbtrees
and this is analysis of the expected behaviour and improvements.
There's a common pattern:
while (node = rb_first) {
entry = rb_entry(node)
next = rb_next(node)
rb_erase(node)
cleanup(entry)
}
rb_first needs to traverse the tree up to logN depth, rb_erase can
completely reshuffle the tree. With the caching we'll skip the traversal
in rb_first. That's a cached memory access vs looped pointer
dereference trade-off that IMHO has a clear winner.
Measurements show there's not much difference in a sample tree with
10000 nodes: 4.5s / rb_first and 4.8s / rb_first_cached. Real effects of
caching and pointer chasing are unpredictable though.
Further optimzations can be done to avoid the expensive rb_erase step.
In some cases it's ok to process the nodes in any order, so the tree can
be traversed in post-order, not rebalancing the children nodes and just
calling free. Care must be taken regarding the next node.
Tested-by: Holger Hoffstätte <holger@applied-asynchrony.com>
Signed-off-by: Liu Bo <bo.liu@linux.alibaba.com>
Reviewed-by: David Sterba <dsterba@suse.com>
[ update changelog from mail discussions ]
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/delayed-ref.c')
-rw-r--r-- | fs/btrfs/delayed-ref.c | 30 |
1 files changed, 17 insertions, 13 deletions
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 62ff545ba1f7..f07952e16a3b 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -101,14 +101,15 @@ static int comp_refs(struct btrfs_delayed_ref_node *ref1, } /* insert a new ref to head ref rbtree */ -static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root, +static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root, struct rb_node *node) { - struct rb_node **p = &root->rb_node; + struct rb_node **p = &root->rb_root.rb_node; struct rb_node *parent_node = NULL; struct btrfs_delayed_ref_head *entry; struct btrfs_delayed_ref_head *ins; u64 bytenr; + bool leftmost = true; ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node); bytenr = ins->bytenr; @@ -117,16 +118,18 @@ static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root, entry = rb_entry(parent_node, struct btrfs_delayed_ref_head, href_node); - if (bytenr < entry->bytenr) + if (bytenr < entry->bytenr) { p = &(*p)->rb_left; - else if (bytenr > entry->bytenr) + } else if (bytenr > entry->bytenr) { p = &(*p)->rb_right; - else + leftmost = false; + } else { return entry; + } } rb_link_node(node, parent_node, p); - rb_insert_color(node, root); + rb_insert_color_cached(node, root, leftmost); return NULL; } @@ -164,10 +167,11 @@ static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root, * If return_bigger is given, the next bigger entry is returned if no exact * match is found. */ -static struct btrfs_delayed_ref_head * -find_ref_head(struct rb_root *root, u64 bytenr, - int return_bigger) +static struct btrfs_delayed_ref_head* find_ref_head( + struct btrfs_delayed_ref_root *dr, u64 bytenr, + int return_bigger) { + struct rb_root *root = &dr->href_root.rb_root; struct rb_node *n; struct btrfs_delayed_ref_head *entry; @@ -187,7 +191,7 @@ find_ref_head(struct rb_root *root, u64 bytenr, if (bytenr > entry->bytenr) { n = rb_next(&entry->href_node); if (!n) - n = rb_first(root); + n = rb_first_cached(&dr->href_root); entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node); return entry; @@ -357,12 +361,12 @@ btrfs_select_ref_head(struct btrfs_trans_handle *trans) again: start = delayed_refs->run_delayed_start; - head = find_ref_head(&delayed_refs->href_root, start, 1); + head = find_ref_head(delayed_refs, start, 1); if (!head && !loop) { delayed_refs->run_delayed_start = 0; start = 0; loop = true; - head = find_ref_head(&delayed_refs->href_root, start, 1); + head = find_ref_head(delayed_refs, start, 1); if (!head) return NULL; } else if (!head && loop) { @@ -903,7 +907,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_head * btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr) { - return find_ref_head(&delayed_refs->href_root, bytenr, 0); + return find_ref_head(delayed_refs, bytenr, 0); } void __cold btrfs_delayed_ref_exit(void) |