summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/transaction.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-03-13 15:10:06 +0100
committerChris Mason <chris.mason@oracle.com>2009-03-24 21:14:25 +0100
commit56bec294dea971335d4466b30f2d959f28f6e36d (patch)
treefc0b5bbf4bb6ab35582a4c7f58f5ac88f71c38bf /fs/btrfs/transaction.c
parentBtrfs: don't preallocate metadata blocks during btrfs_search_slot (diff)
downloadlinux-56bec294dea971335d4466b30f2d959f28f6e36d.tar.xz
linux-56bec294dea971335d4466b30f2d959f28f6e36d.zip
Btrfs: do extent allocation and reference count updates in the background
The extent allocation tree maintains a reference count and full back reference information for every extent allocated in the filesystem. For subvolume and snapshot trees, every time a block goes through COW, the new copy of the block adds a reference on every block it points to. If a btree node points to 150 leaves, then the COW code needs to go and add backrefs on 150 different extents, which might be spread all over the extent allocation tree. These updates currently happen during btrfs_cow_block, and most COWs happen during btrfs_search_slot. btrfs_search_slot has locks held on both the parent and the node we are COWing, and so we really want to avoid IO during the COW if we can. This commit adds an rbtree of pending reference count updates and extent allocations. The tree is ordered by byte number of the extent and byte number of the parent for the back reference. The tree allows us to: 1) Modify back references in something close to disk order, reducing seeks 2) Significantly reduce the number of modifications made as block pointers are balanced around 3) Do all of the extent insertion and back reference modifications outside of the performance critical btrfs_search_slot code. #3 has the added benefit of greatly reducing the btrfs stack footprint. The extent allocation tree modifications are done without the deep (and somewhat recursive) call chains used in the past. These delayed back reference updates must be done before the transaction commits, and so the rbtree is tied to the transaction. Throttling is implemented to help keep the queue of backrefs at a reasonable size. Since there was a similar mechanism in place for the extent tree extents, that is removed and replaced by the delayed reference tree. Yan Zheng <yan.zheng@oracle.com> helped review and fixup this code. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/transaction.c')
-rw-r--r--fs/btrfs/transaction.c54
1 files changed, 47 insertions, 7 deletions
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index d638c54d39e9..f94c2ad8996c 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -65,6 +65,12 @@ static noinline int join_transaction(struct btrfs_root *root)
cur_trans->use_count = 1;
cur_trans->commit_done = 0;
cur_trans->start_time = get_seconds();
+
+ cur_trans->delayed_refs.root.rb_node = NULL;
+ cur_trans->delayed_refs.num_entries = 0;
+ cur_trans->delayed_refs.flushing = 0;
+ spin_lock_init(&cur_trans->delayed_refs.lock);
+
INIT_LIST_HEAD(&cur_trans->pending_snapshots);
list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
extent_io_tree_init(&cur_trans->dirty_pages,
@@ -182,6 +188,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
h->block_group = 0;
h->alloc_exclude_nr = 0;
h->alloc_exclude_start = 0;
+ h->delayed_ref_updates = 0;
root->fs_info->running_transaction->use_count++;
mutex_unlock(&root->fs_info->trans_mutex);
return h;
@@ -281,6 +288,14 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_transaction *cur_trans;
struct btrfs_fs_info *info = root->fs_info;
+ if (trans->delayed_ref_updates &&
+ (trans->transaction->delayed_refs.flushing ||
+ trans->transaction->delayed_refs.num_entries > 16384)) {
+ btrfs_run_delayed_refs(trans, root, trans->delayed_ref_updates);
+ } else if (trans->transaction->delayed_refs.num_entries > 64) {
+ wake_up_process(root->fs_info->transaction_kthread);
+ }
+
mutex_lock(&info->trans_mutex);
cur_trans = info->running_transaction;
WARN_ON(cur_trans != trans->transaction);
@@ -424,9 +439,10 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
u64 old_root_bytenr;
struct btrfs_root *tree_root = root->fs_info->tree_root;
- btrfs_extent_post_op(trans, root);
btrfs_write_dirty_block_groups(trans, root);
- btrfs_extent_post_op(trans, root);
+
+ ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ BUG_ON(ret);
while (1) {
old_root_bytenr = btrfs_root_bytenr(&root->root_item);
@@ -438,14 +454,14 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
btrfs_header_level(root->node));
btrfs_set_root_generation(&root->root_item, trans->transid);
- btrfs_extent_post_op(trans, root);
-
ret = btrfs_update_root(trans, tree_root,
&root->root_key,
&root->root_item);
BUG_ON(ret);
btrfs_write_dirty_block_groups(trans, root);
- btrfs_extent_post_op(trans, root);
+
+ ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ BUG_ON(ret);
}
return 0;
}
@@ -459,15 +475,18 @@ int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = root->fs_info;
struct list_head *next;
struct extent_buffer *eb;
+ int ret;
- btrfs_extent_post_op(trans, fs_info->tree_root);
+ ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ BUG_ON(ret);
eb = btrfs_lock_root_node(fs_info->tree_root);
btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb);
btrfs_tree_unlock(eb);
free_extent_buffer(eb);
- btrfs_extent_post_op(trans, fs_info->tree_root);
+ ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ BUG_ON(ret);
while (!list_empty(&fs_info->dirty_cowonly_roots)) {
next = fs_info->dirty_cowonly_roots.next;
@@ -475,6 +494,9 @@ int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
root = list_entry(next, struct btrfs_root, dirty_list);
update_cowonly_root(trans, root);
+
+ ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ BUG_ON(ret);
}
return 0;
}
@@ -895,6 +917,21 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
DEFINE_WAIT(wait);
int ret;
+ /* make a pass through all the delayed refs we have so far
+ * any runnings procs may add more while we are here
+ */
+ ret = btrfs_run_delayed_refs(trans, root, 0);
+ BUG_ON(ret);
+
+ /*
+ * set the flushing flag so procs in this transaction have to
+ * start sending their work down.
+ */
+ trans->transaction->delayed_refs.flushing = 1;
+
+ ret = btrfs_run_delayed_refs(trans, root, (u64)-1);
+ BUG_ON(ret);
+
INIT_LIST_HEAD(&dirty_fs_roots);
mutex_lock(&root->fs_info->trans_mutex);
if (trans->transaction->in_commit) {
@@ -969,6 +1006,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
ret = create_pending_snapshots(trans, root->fs_info);
BUG_ON(ret);
+ ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ BUG_ON(ret);
+
WARN_ON(cur_trans != trans->transaction);
/* btrfs_commit_tree_roots is responsible for getting the