summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ctree.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-06-25 22:14:04 +0200
committerChris Mason <chris.mason@oracle.com>2008-09-25 17:04:04 +0200
commitf9efa9c784aa3b801feb367f72c6867d26fb348e (patch)
treed9f678310c6c8a4daf2003fa4f6b4164111dbbd7 /fs/btrfs/ctree.c
parentBtrfs: Online btree defragmentation fixes (diff)
downloadlinux-f9efa9c784aa3b801feb367f72c6867d26fb348e.tar.xz
linux-f9efa9c784aa3b801feb367f72c6867d26fb348e.zip
Btrfs: Reduce contention on the root node
This calls unlock_up sooner in btrfs_search_slot in order to decrease the amount of work done with the higher level tree locks held. Also, it changes btrfs_tree_lock to spin for a big against the page lock before scheduling. This makes a big difference in context switch rate under highly contended workloads. Longer term, a better locking structure is needed than the page lock. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/ctree.c')
-rw-r--r--fs/btrfs/ctree.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 0cb80f32a9c7..c6759fc1004a 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1313,16 +1313,13 @@ again:
slot = p->slots[level];
BUG_ON(btrfs_header_nritems(b) == 1);
}
+ unlock_up(p, level, lowest_unlock);
+
/* this is only true while dropping a snapshot */
if (level == lowest_level) {
- unlock_up(p, level, lowest_unlock);
break;
}
- if (should_reada)
- reada_for_search(root, p, level, slot,
- key->objectid);
-
blocknr = btrfs_node_blockptr(b, slot);
gen = btrfs_node_ptr_generation(b, slot);
blocksize = btrfs_level_size(root, level - 1);
@@ -1340,6 +1337,11 @@ again:
btrfs_release_path(NULL, p);
if (tmp)
free_extent_buffer(tmp);
+ if (should_reada)
+ reada_for_search(root, p,
+ level, slot,
+ key->objectid);
+
tmp = read_tree_block(root, blocknr,
blocksize, gen);
if (tmp)
@@ -1348,12 +1350,15 @@ again:
} else {
if (tmp)
free_extent_buffer(tmp);
+ if (should_reada)
+ reada_for_search(root, p,
+ level, slot,
+ key->objectid);
b = read_node_slot(root, b, slot);
}
}
if (!p->skip_locking)
btrfs_tree_lock(b);
- unlock_up(p, level, lowest_unlock);
} else {
p->slots[level] = slot;
if (ins_len > 0 && btrfs_leaf_free_space(root, b) <