diff options
author | Theodore Ts'o <tytso@mit.edu> | 2012-09-05 07:29:50 +0200 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2012-09-05 07:29:50 +0200 |
commit | 117fff10d7f140e12dd43df20d3f9fda80577460 (patch) | |
tree | 0236d1dfb77e794b1628b723f21b3c83383e0475 /fs/ext4/super.c | |
parent | ext4: avoid duplicate writes of the backup bg descriptor blocks (diff) | |
download | linux-117fff10d7f140e12dd43df20d3f9fda80577460.tar.xz linux-117fff10d7f140e12dd43df20d3f9fda80577460.zip |
ext4: grow the s_flex_groups array as needed when resizing
Previously, we allocated the s_flex_groups array to the maximum size
that the file system could be resized. There was two problems with
this approach. First, it wasted memory in the common case where the
file system was not resized. Secondly, once we start allowing online
resizing using the meta_bg scheme, there is no maximum size that the
file system can be resized. So instead, we need to grow the
s_flex_groups at inline resize time.
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to '')
-rw-r--r-- | fs/ext4/super.c | 48 |
1 files changed, 35 insertions, 13 deletions
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index b875ff555865..b8de488889d6 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1925,15 +1925,45 @@ done: return res; } +int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) +{ + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct flex_groups *new_groups; + int size; + + if (!sbi->s_log_groups_per_flex) + return 0; + + size = ext4_flex_group(sbi, ngroup - 1) + 1; + if (size <= sbi->s_flex_groups_allocated) + return 0; + + size = roundup_pow_of_two(size * sizeof(struct flex_groups)); + new_groups = ext4_kvzalloc(size, GFP_KERNEL); + if (!new_groups) { + ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups", + size / (int) sizeof(struct flex_groups)); + return -ENOMEM; + } + + if (sbi->s_flex_groups) { + memcpy(new_groups, sbi->s_flex_groups, + (sbi->s_flex_groups_allocated * + sizeof(struct flex_groups))); + ext4_kvfree(sbi->s_flex_groups); + } + sbi->s_flex_groups = new_groups; + sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups); + return 0; +} + static int ext4_fill_flex_info(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_desc *gdp = NULL; - ext4_group_t flex_group_count; ext4_group_t flex_group; unsigned int groups_per_flex = 0; - size_t size; - int i; + int i, err; sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) { @@ -1942,17 +1972,9 @@ static int ext4_fill_flex_info(struct super_block *sb) } groups_per_flex = 1 << sbi->s_log_groups_per_flex; - /* We allocate both existing and potentially added groups */ - flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) + - ((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) << - EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex; - size = flex_group_count * sizeof(struct flex_groups); - sbi->s_flex_groups = ext4_kvzalloc(size, GFP_KERNEL); - if (sbi->s_flex_groups == NULL) { - ext4_msg(sb, KERN_ERR, "not enough memory for %u flex groups", - flex_group_count); + err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count); + if (err) goto failed; - } for (i = 0; i < sbi->s_groups_count; i++) { gdp = ext4_get_group_desc(sb, i, NULL); |