diff options
author | Daeho Jeong <daehojeong@google.com> | 2024-09-10 00:19:44 +0200 |
---|---|---|
committer | Jaegeuk Kim <jaegeuk@kernel.org> | 2024-09-11 05:33:02 +0200 |
commit | 9748c2ddea4a3f46a498bff4cf2bf9a5629e3f8b (patch) | |
tree | 78944daac25bacfe25a676cd577e3c4ab03546cc | |
parent | f2fs: increase BG GC migration window granularity when boosted for zoned devices (diff) | |
download | linux-9748c2ddea4a3f46a498bff4cf2bf9a5629e3f8b.tar.xz linux-9748c2ddea4a3f46a498bff4cf2bf9a5629e3f8b.zip |
f2fs: do FG_GC when GC boosting is required for zoned devices
Under low free section count, we need to use FG_GC instead of BG_GC to
recover free sections.
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
-rw-r--r-- | fs/f2fs/f2fs.h | 1 | ||||
-rw-r--r-- | fs/f2fs/gc.c | 24 |
2 files changed, 18 insertions, 7 deletions
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 635318af9c43..3e4371d51042 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1300,6 +1300,7 @@ struct f2fs_gc_control { bool no_bg_gc; /* check the space and stop bg_gc */ bool should_migrate_blocks; /* should migrate blocks */ bool err_gc_skipped; /* return EAGAIN if GC skipped */ + bool one_time; /* require one time GC in one migration unit */ unsigned int nr_free_secs; /* # of free sections to do GC */ }; diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 9a3d3994cf2b..a59fec64eccf 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -81,6 +81,8 @@ static int gc_thread_func(void *data) continue; } + gc_control.one_time = false; + /* * [GC triggering condition] * 0. GC is not conducted currently. @@ -126,15 +128,19 @@ static int gc_thread_func(void *data) wait_ms = gc_th->max_sleep_time; } - if (need_to_boost_gc(sbi)) + if (need_to_boost_gc(sbi)) { decrease_sleep_time(gc_th, &wait_ms); - else + if (f2fs_sb_has_blkzoned(sbi)) + gc_control.one_time = true; + } else { increase_sleep_time(gc_th, &wait_ms); + } do_gc: stat_inc_gc_call_count(sbi, foreground ? FOREGROUND : BACKGROUND); - sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC; + sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) || + gc_control.one_time; /* foreground GC was been triggered via f2fs_balance_fs() */ if (foreground) @@ -1701,7 +1707,7 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int start_segno, struct gc_inode_list *gc_list, int gc_type, - bool force_migrate) + bool force_migrate, bool one_time) { struct page *sum_page; struct f2fs_summary_block *sum; @@ -1728,7 +1734,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, sec_end_segno -= SEGS_PER_SEC(sbi) - f2fs_usable_segs_in_sec(sbi, segno); - if (gc_type == BG_GC) { + if (gc_type == BG_GC || one_time) { unsigned int window_granularity = sbi->migration_window_granularity; @@ -1911,7 +1917,8 @@ retry: } seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, - gc_control->should_migrate_blocks); + gc_control->should_migrate_blocks, + gc_control->one_time); if (seg_freed < 0) goto stop; @@ -1922,6 +1929,9 @@ retry: total_sec_freed++; } + if (gc_control->one_time) + goto stop; + if (gc_type == FG_GC) { sbi->cur_victim_sec = NULL_SEGNO; @@ -2047,7 +2057,7 @@ int f2fs_gc_range(struct f2fs_sb_info *sbi, }; do_garbage_collect(sbi, segno, &gc_list, FG_GC, - dry_run_sections == 0); + dry_run_sections == 0, false); put_gc_inode(&gc_list); if (!dry_run && get_valid_blocks(sbi, segno, true)) |