diff options
author | Dennis Zhou <dennis@kernel.org> | 2020-10-30 21:40:21 +0100 |
---|---|---|
committer | Dennis Zhou <dennis@kernel.org> | 2020-10-31 00:02:28 +0100 |
commit | 61cf93d3e14a29288e4d5522aecb6e58268eec62 (patch) | |
tree | 854c2d392a1f1d066ba4dac3e33e7b3b3cc0c4e8 /mm/percpu.c | |
parent | asm-generic: percpu: avoid Wshadow warning (diff) | |
download | linux-61cf93d3e14a29288e4d5522aecb6e58268eec62.tar.xz linux-61cf93d3e14a29288e4d5522aecb6e58268eec62.zip |
percpu: convert flexible array initializers to use struct_size()
Use the safer macro as sparked by the long discussion in [1].
[1] https://lore.kernel.org/lkml/20200917204514.GA2880159@google.com/
Reviewed-by: Gustavo A. R. Silva <gustavoars@kernel.org>
Signed-off-by: Dennis Zhou <dennis@kernel.org>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r-- | mm/percpu.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index 66a93f096394..ad7a37ee74ef 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1315,8 +1315,8 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, region_size = ALIGN(start_offset + map_size, lcm_align); /* allocate chunk */ - alloc_size = sizeof(struct pcpu_chunk) + - BITS_TO_LONGS(region_size >> PAGE_SHIFT) * sizeof(unsigned long); + alloc_size = struct_size(chunk, populated, + BITS_TO_LONGS(region_size >> PAGE_SHIFT)); chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES); if (!chunk) panic("%s: Failed to allocate %zu bytes\n", __func__, @@ -2521,8 +2521,8 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; pcpu_atom_size = ai->atom_size; - pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + - BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); + pcpu_chunk_struct_size = struct_size(chunk, populated, + BITS_TO_LONGS(pcpu_unit_pages)); pcpu_stats_save_ai(ai); |