summaryrefslogtreecommitdiffstats
path: root/fs/f2fs/extent_cache.c
diff options
context:
space:
mode:
authorChao Yu <chao@kernel.org>2024-11-21 02:57:50 +0100
committerJaegeuk Kim <jaegeuk@kernel.org>2024-11-21 17:16:54 +0100
commit3fc5d5a182f6a1f8bd4dc775feb54c369dd2c343 (patch)
tree018a2e518a0c38b1d9aa90260cc894eca42e3222 /fs/f2fs/extent_cache.c
parentf2fs: print message if fscorrupted was found in f2fs_new_node_page() (diff)
downloadlinux-3fc5d5a182f6a1f8bd4dc775feb54c369dd2c343.tar.xz
linux-3fc5d5a182f6a1f8bd4dc775feb54c369dd2c343.zip
f2fs: fix to shrink read extent node in batches
We use rwlock to protect core structure data of extent tree during its shrink, however, if there is a huge number of extent nodes in extent tree, during shrink of extent tree, it may hold rwlock for a very long time, which may trigger kernel hang issue. This patch fixes to shrink read extent node in batches, so that, critical region of the rwlock can be shrunk to avoid its extreme long time hold. Reported-by: Xiuhong Wang <xiuhong.wang@unisoc.com> Closes: https://lore.kernel.org/linux-f2fs-devel/20241112110627.1314632-1-xiuhong.wang@unisoc.com/ Signed-off-by: Xiuhong Wang <xiuhong.wang@unisoc.com> Signed-off-by: Zhiguo Niu <zhiguo.niu@unisoc.com> Signed-off-by: Chao Yu <chao@kernel.org> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to '')
-rw-r--r--fs/f2fs/extent_cache.c69
1 files changed, 41 insertions, 28 deletions
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 019c1f7b7fa5..b7a6817b44b0 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -379,21 +379,22 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode,
}
static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
- struct extent_tree *et)
+ struct extent_tree *et, unsigned int nr_shrink)
{
struct rb_node *node, *next;
struct extent_node *en;
- unsigned int count = atomic_read(&et->node_cnt);
+ unsigned int count;
node = rb_first_cached(&et->root);
- while (node) {
+
+ for (count = 0; node && count < nr_shrink; count++) {
next = rb_next(node);
en = rb_entry(node, struct extent_node, rb_node);
__release_extent_node(sbi, et, en);
node = next;
}
- return count - atomic_read(&et->node_cnt);
+ return count;
}
static void __drop_largest_extent(struct extent_tree *et,
@@ -622,6 +623,30 @@ do_insert:
return en;
}
+static unsigned int __destroy_extent_node(struct inode *inode,
+ enum extent_type type)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
+ unsigned int nr_shrink = type == EX_READ ?
+ READ_EXTENT_CACHE_SHRINK_NUMBER :
+ AGE_EXTENT_CACHE_SHRINK_NUMBER;
+ unsigned int node_cnt = 0;
+
+ if (!et || !atomic_read(&et->node_cnt))
+ return 0;
+
+ while (atomic_read(&et->node_cnt)) {
+ write_lock(&et->lock);
+ node_cnt += __free_extent_tree(sbi, et, nr_shrink);
+ write_unlock(&et->lock);
+ }
+
+ f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
+
+ return node_cnt;
+}
+
static void __update_extent_tree_range(struct inode *inode,
struct extent_info *tei, enum extent_type type)
{
@@ -760,9 +785,6 @@ static void __update_extent_tree_range(struct inode *inode,
}
}
- if (is_inode_flag_set(inode, FI_NO_EXTENT))
- __free_extent_tree(sbi, et);
-
if (et->largest_updated) {
et->largest_updated = false;
updated = true;
@@ -780,6 +802,9 @@ update_age_extent_cache:
out_read_extent_cache:
write_unlock(&et->lock);
+ if (is_inode_flag_set(inode, FI_NO_EXTENT))
+ __destroy_extent_node(inode, EX_READ);
+
if (updated)
f2fs_mark_inode_dirty_sync(inode, true);
}
@@ -942,10 +967,14 @@ static unsigned int __shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink
list_for_each_entry_safe(et, next, &eti->zombie_list, list) {
if (atomic_read(&et->node_cnt)) {
write_lock(&et->lock);
- node_cnt += __free_extent_tree(sbi, et);
+ node_cnt += __free_extent_tree(sbi, et,
+ nr_shrink - node_cnt - tree_cnt);
write_unlock(&et->lock);
}
- f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
+
+ if (atomic_read(&et->node_cnt))
+ goto unlock_out;
+
list_del_init(&et->list);
radix_tree_delete(&eti->extent_tree_root, et->ino);
kmem_cache_free(extent_tree_slab, et);
@@ -1084,23 +1113,6 @@ unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink
return __shrink_extent_tree(sbi, nr_shrink, EX_BLOCK_AGE);
}
-static unsigned int __destroy_extent_node(struct inode *inode,
- enum extent_type type)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
- unsigned int node_cnt = 0;
-
- if (!et || !atomic_read(&et->node_cnt))
- return 0;
-
- write_lock(&et->lock);
- node_cnt = __free_extent_tree(sbi, et);
- write_unlock(&et->lock);
-
- return node_cnt;
-}
-
void f2fs_destroy_extent_node(struct inode *inode)
{
__destroy_extent_node(inode, EX_READ);
@@ -1109,7 +1121,6 @@ void f2fs_destroy_extent_node(struct inode *inode)
static void __drop_extent_tree(struct inode *inode, enum extent_type type)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
bool updated = false;
@@ -1117,7 +1128,6 @@ static void __drop_extent_tree(struct inode *inode, enum extent_type type)
return;
write_lock(&et->lock);
- __free_extent_tree(sbi, et);
if (type == EX_READ) {
set_inode_flag(inode, FI_NO_EXTENT);
if (et->largest.len) {
@@ -1126,6 +1136,9 @@ static void __drop_extent_tree(struct inode *inode, enum extent_type type)
}
}
write_unlock(&et->lock);
+
+ __destroy_extent_node(inode, type);
+
if (updated)
f2fs_mark_inode_dirty_sync(inode, true);
}