summaryrefslogtreecommitdiffstats
path: root/fs/ext4
diff options
context:
space:
mode:
authorAlex Zhuravlev <azhuravlev@whamcloud.com>2020-06-20 04:08:56 +0200
committerTheodore Ts'o <tytso@mit.edu>2020-08-06 07:44:55 +0200
commitc1d2c7d47e15482bb23cda83a5021e60f624a09c (patch)
tree9d1bd02a84b1a0e707537842a59a4b64f68e0a07 /fs/ext4
parentext4: add prefetching for block allocation bitmaps (diff)
downloadlinux-c1d2c7d47e15482bb23cda83a5021e60f624a09c.tar.xz
linux-c1d2c7d47e15482bb23cda83a5021e60f624a09c.zip
ext4: skip non-loaded groups at cr=0/1 when scanning for good groups
cr=0 is supposed to be an optimization to save CPU cycles, but if buddy data (in memory) is not initialized then all this makes no sense as we have to do sync IO taking a lot of cycles. Also, at cr=0 mballoc doesn't choose any available chunk. cr=1 also skips groups using heuristic based on avg. fragment size. It's more useful to skip such groups and switch to cr=2 where groups will be scanned for available chunks. However, we always read the first block group in a flex_bg so metadata blocks will get read into the first flex_bg if possible. Using sparse image and dm-slow virtual device of 120TB was simulated, then the image was formatted and filled using debugfs to mark ~85% of available space as busy. mount process w/o the patch couldn't complete in half an hour (according to vmstat it would take ~10-11 hours). With the patch applied mount took ~20 seconds. Lustre-bug-id: https://jira.whamcloud.com/browse/LU-12988 Signed-off-by: Alex Zhuravlev <azhuravlev@whamcloud.com> Reviewed-by: Andreas Dilger <adilger@whamcloud.com> Reviewed-by: Artem Blagodarenko <artem.blagodarenko@gmail.com>
Diffstat (limited to 'fs/ext4')
-rw-r--r--fs/ext4/mballoc.c21
1 files changed, 20 insertions, 1 deletions
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c350ae9cb0d9..9a07da53ab7b 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2177,6 +2177,7 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
{
struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
struct super_block *sb = ac->ac_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
ext4_grpblk_t free;
int ret = 0;
@@ -2195,7 +2196,25 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
/* We only do this if the grp has never been initialized */
if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
- ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS);
+ struct ext4_group_desc *gdp =
+ ext4_get_group_desc(sb, group, NULL);
+ int ret;
+
+ /* cr=0/1 is a very optimistic search to find large
+ * good chunks almost for free. If buddy data is not
+ * ready, then this optimization makes no sense. But
+ * we never skip the first block group in a flex_bg,
+ * since this gets used for metadata block allocation,
+ * and we want to make sure we locate metadata blocks
+ * in the first block group in the flex_bg if possible.
+ */
+ if (cr < 2 &&
+ (!sbi->s_log_groups_per_flex ||
+ ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
+ !(ext4_has_group_desc_csum(sb) &&
+ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
+ return 0;
+ ret = ext4_mb_init_group(sb, group, GFP_NOFS);
if (ret)
return ret;
}