diff options
Diffstat (limited to 'fs/btrfs/qgroup.c')
-rw-r--r-- | fs/btrfs/qgroup.c | 95 |
1 files changed, 39 insertions, 56 deletions
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 87bd37b70738..808370ada888 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -894,8 +894,6 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - path->leave_spinning = 1; - key.objectid = 0; key.offset = 0; key.type = 0; @@ -1944,34 +1942,22 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans, struct btrfs_key dst_key; if (src_path->nodes[cur_level] == NULL) { - struct btrfs_key first_key; struct extent_buffer *eb; int parent_slot; - u64 child_gen; - u64 child_bytenr; eb = src_path->nodes[cur_level + 1]; parent_slot = src_path->slots[cur_level + 1]; - child_bytenr = btrfs_node_blockptr(eb, parent_slot); - child_gen = btrfs_node_ptr_generation(eb, parent_slot); - btrfs_node_key_to_cpu(eb, &first_key, parent_slot); - eb = read_tree_block(fs_info, child_bytenr, child_gen, - cur_level, &first_key); + eb = btrfs_read_node_slot(eb, parent_slot); if (IS_ERR(eb)) { ret = PTR_ERR(eb); goto out; - } else if (!extent_buffer_uptodate(eb)) { - free_extent_buffer(eb); - ret = -EIO; - goto out; } src_path->nodes[cur_level] = eb; btrfs_tree_read_lock(eb); - btrfs_set_lock_blocking_read(eb); - src_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING; + src_path->locks[cur_level] = BTRFS_READ_LOCK; } src_path->slots[cur_level] = dst_path->slots[cur_level]; @@ -2066,10 +2052,8 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans, /* Read the tree block if needed */ if (dst_path->nodes[cur_level] == NULL) { - struct btrfs_key first_key; int parent_slot; u64 child_gen; - u64 child_bytenr; /* * dst_path->nodes[root_level] must be initialized before @@ -2088,31 +2072,23 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans, */ eb = dst_path->nodes[cur_level + 1]; parent_slot = dst_path->slots[cur_level + 1]; - child_bytenr = btrfs_node_blockptr(eb, parent_slot); child_gen = btrfs_node_ptr_generation(eb, parent_slot); - btrfs_node_key_to_cpu(eb, &first_key, parent_slot); /* This node is old, no need to trace */ if (child_gen < last_snapshot) goto out; - eb = read_tree_block(fs_info, child_bytenr, child_gen, - cur_level, &first_key); + eb = btrfs_read_node_slot(eb, parent_slot); if (IS_ERR(eb)) { ret = PTR_ERR(eb); goto out; - } else if (!extent_buffer_uptodate(eb)) { - free_extent_buffer(eb); - ret = -EIO; - goto out; } dst_path->nodes[cur_level] = eb; dst_path->slots[cur_level] = 0; btrfs_tree_read_lock(eb); - btrfs_set_lock_blocking_read(eb); - dst_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING; + dst_path->locks[cur_level] = BTRFS_READ_LOCK; need_cleanup = true; } @@ -2256,38 +2232,28 @@ walk_down: level = root_level; while (level >= 0) { if (path->nodes[level] == NULL) { - struct btrfs_key first_key; int parent_slot; - u64 child_gen; u64 child_bytenr; /* - * We need to get child blockptr/gen from parent before - * we can read it. + * We need to get child blockptr from parent before we + * can read it. */ eb = path->nodes[level + 1]; parent_slot = path->slots[level + 1]; child_bytenr = btrfs_node_blockptr(eb, parent_slot); - child_gen = btrfs_node_ptr_generation(eb, parent_slot); - btrfs_node_key_to_cpu(eb, &first_key, parent_slot); - eb = read_tree_block(fs_info, child_bytenr, child_gen, - level, &first_key); + eb = btrfs_read_node_slot(eb, parent_slot); if (IS_ERR(eb)) { ret = PTR_ERR(eb); goto out; - } else if (!extent_buffer_uptodate(eb)) { - free_extent_buffer(eb); - ret = -EIO; - goto out; } path->nodes[level] = eb; path->slots[level] = 0; btrfs_tree_read_lock(eb); - btrfs_set_lock_blocking_read(eb); - path->locks[level] = BTRFS_READ_LOCK_BLOCKING; + path->locks[level] = BTRFS_READ_LOCK; ret = btrfs_qgroup_trace_extent(trans, child_bytenr, fs_info->nodesize, @@ -3224,6 +3190,12 @@ out: return ret; } +static bool rescan_should_stop(struct btrfs_fs_info *fs_info) +{ + return btrfs_fs_closing(fs_info) || + test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); +} + static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) { struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info, @@ -3232,6 +3204,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) struct btrfs_trans_handle *trans = NULL; int err = -ENOMEM; int ret = 0; + bool stopped = false; path = btrfs_alloc_path(); if (!path) @@ -3244,7 +3217,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) path->skip_locking = 1; err = 0; - while (!err && !btrfs_fs_closing(fs_info)) { + while (!err && !(stopped = rescan_should_stop(fs_info))) { trans = btrfs_start_transaction(fs_info->fs_root, 0); if (IS_ERR(trans)) { err = PTR_ERR(trans); @@ -3287,7 +3260,7 @@ out: } mutex_lock(&fs_info->qgroup_rescan_lock); - if (!btrfs_fs_closing(fs_info)) + if (!stopped) fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; if (trans) { ret = update_qgroup_status_item(trans); @@ -3306,7 +3279,7 @@ out: btrfs_end_transaction(trans); - if (btrfs_fs_closing(fs_info)) { + if (stopped) { btrfs_info(fs_info, "qgroup scan paused"); } else if (err >= 0) { btrfs_info(fs_info, "qgroup scan completed%s", @@ -3565,16 +3538,6 @@ static int try_flush_qgroup(struct btrfs_root *root) bool can_commit = true; /* - * We don't want to run flush again and again, so if there is a running - * one, we won't try to start a new flush, but exit directly. - */ - if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) { - wait_event(root->qgroup_flush_wait, - !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)); - return 0; - } - - /* * If current process holds a transaction, we shouldn't flush, as we * assume all space reservation happens before a transaction handle is * held. @@ -3588,6 +3551,26 @@ static int try_flush_qgroup(struct btrfs_root *root) current->journal_info != BTRFS_SEND_TRANS_STUB) can_commit = false; + /* + * We don't want to run flush again and again, so if there is a running + * one, we won't try to start a new flush, but exit directly. + */ + if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) { + /* + * We are already holding a transaction, thus we can block other + * threads from flushing. So exit right now. This increases + * the chance of EDQUOT for heavy load and near limit cases. + * But we can argue that if we're already near limit, EDQUOT is + * unavoidable anyway. + */ + if (!can_commit) + return 0; + + wait_event(root->qgroup_flush_wait, + !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)); + return 0; + } + ret = btrfs_start_delalloc_snapshot(root); if (ret < 0) goto out; @@ -4242,7 +4225,7 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, spin_unlock(&blocks->lock); /* Read out reloc subtree root */ - reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, + reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, 0, block->reloc_generation, block->level, &block->first_key); if (IS_ERR(reloc_eb)) { |