summaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorQu Wenruo <quwenruo@cn.fujitsu.com>2015-09-08 11:22:42 +0200
committerChris Mason <clm@fb.com>2015-10-22 03:41:03 +0200
commit4ceff0792d36256a5f879cec51c56e44db90b8ec (patch)
treed894475934305dbbed66bc7f3f9a119c026db944 /fs/btrfs
parentbtrfs: qgroup: Use new metadata reservation. (diff)
downloadlinux-4ceff0792d36256a5f879cec51c56e44db90b8ec.tar.xz
linux-4ceff0792d36256a5f879cec51c56e44db90b8ec.zip
btrfs: extent-tree: Add new version of btrfs_check_data_free_space and btrfs_free_reserved_data_space.
Add new functions __btrfs_check_data_free_space() and __btrfs_free_reserved_data_space() to work with new accurate qgroup reserved space framework. The new function will replace old btrfs_check_data_free_space() and btrfs_free_reserved_data_space() respectively, but until all the change is done, let's just use the new name. Also, export internal use function btrfs_alloc_data_chunk_ondemand(), as now qgroup reserve requires precious bytes, some operation can't get the accurate number in advance(like fallocate). But data space info check and data chunk allocate doesn't need to be that accurate, and can be called at the beginning. So export it for later operations. Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com> Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/extent-tree.c85
2 files changed, 79 insertions, 9 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 9df8f01de4df..a55d19986311 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3453,7 +3453,10 @@ enum btrfs_reserve_flush_enum {
};
int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes);
+int __btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len);
+int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes);
void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes);
+void __btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len);
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 130df70f3348..71c172e12b19 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3904,11 +3904,7 @@ u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
return ret;
}
-/*
- * This will check the space that the inode allocates from to make sure we have
- * enough space for bytes.
- */
-int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
+int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
{
struct btrfs_space_info *data_sinfo;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -4029,19 +4025,55 @@ commit_trans:
data_sinfo->flags, bytes, 1);
return -ENOSPC;
}
- ret = btrfs_qgroup_reserve(root, write_bytes);
- if (ret)
- goto out;
data_sinfo->bytes_may_use += bytes;
trace_btrfs_space_reservation(root->fs_info, "space_info",
data_sinfo->flags, bytes, 1);
-out:
spin_unlock(&data_sinfo->lock);
return ret;
}
/*
+ * This will check the space that the inode allocates from to make sure we have
+ * enough space for bytes.
+ */
+int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ int ret;
+
+ ret = btrfs_alloc_data_chunk_ondemand(inode, bytes);
+ if (ret < 0)
+ return ret;
+ ret = btrfs_qgroup_reserve(root, write_bytes);
+ return ret;
+}
+
+/*
+ * New check_data_free_space() with ability for precious data reservation
+ * Will replace old btrfs_check_data_free_space(), but for patch split,
+ * add a new function first and then replace it.
+ */
+int __btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ int ret;
+
+ /* align the range */
+ len = round_up(start + len, root->sectorsize) -
+ round_down(start, root->sectorsize);
+ start = round_down(start, root->sectorsize);
+
+ ret = btrfs_alloc_data_chunk_ondemand(inode, len);
+ if (ret < 0)
+ return ret;
+
+ /* Use new btrfs_qgroup_reserve_data to reserve precious data space */
+ ret = btrfs_qgroup_reserve_data(inode, start, len);
+ return ret;
+}
+
+/*
* Called if we need to clear a data reservation for this inode.
*/
void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
@@ -4061,6 +4093,41 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
spin_unlock(&data_sinfo->lock);
}
+/*
+ * Called if we need to clear a data reservation for this inode
+ * Normally in a error case.
+ *
+ * This one will handle the per-indoe data rsv map for accurate reserved
+ * space framework.
+ */
+void __btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_space_info *data_sinfo;
+
+ /* Make sure the range is aligned to sectorsize */
+ len = round_up(start + len, root->sectorsize) -
+ round_down(start, root->sectorsize);
+ start = round_down(start, root->sectorsize);
+
+ /*
+ * Free any reserved qgroup data space first
+ * As it will alloc memory, we can't do it with data sinfo
+ * spinlock hold.
+ */
+ btrfs_qgroup_free_data(inode, start, len);
+
+ data_sinfo = root->fs_info->data_sinfo;
+ spin_lock(&data_sinfo->lock);
+ if (WARN_ON(data_sinfo->bytes_may_use < len))
+ data_sinfo->bytes_may_use = 0;
+ else
+ data_sinfo->bytes_may_use -= len;
+ trace_btrfs_space_reservation(root->fs_info, "space_info",
+ data_sinfo->flags, len, 0);
+ spin_unlock(&data_sinfo->lock);
+}
+
static void force_metadata_allocation(struct btrfs_fs_info *info)
{
struct list_head *head = &info->space_info;