summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-07-04 21:23:05 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2021-07-04 21:23:05 +0200
commita412897fb546fbb291095be576165ce757eff70b (patch)
treeca1e00e721e255d7d6a62862a629697adb22a880 /mm
parentMerge tag 's390-5.14-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390... (diff)
parentarm: extend pfn_valid to take into account freed memory map alignment (diff)
downloadlinux-a412897fb546fbb291095be576165ce757eff70b.tar.xz
linux-a412897fb546fbb291095be576165ce757eff70b.zip
Merge tag 'memblock-v5.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock
Pull memblock updates from Mike Rapoport: "Fix arm crashes caused by holes in the memory map. The coordination between freeing of unused memory map, pfn_valid() and core mm assumptions about validity of the memory map in various ranges was not designed for complex layouts of the physical memory with a lot of holes all over the place. Kefen Wang reported crashes in move_freepages() on a system with the following memory layout [1]: node 0: [mem 0x0000000080a00000-0x00000000855fffff] node 0: [mem 0x0000000086a00000-0x0000000087dfffff] node 0: [mem 0x000000008bd00000-0x000000008c4fffff] node 0: [mem 0x000000008e300000-0x000000008ecfffff] node 0: [mem 0x0000000090d00000-0x00000000bfffffff] node 0: [mem 0x00000000cc000000-0x00000000dc9fffff] node 0: [mem 0x00000000de700000-0x00000000de9fffff] node 0: [mem 0x00000000e0800000-0x00000000e0bfffff] node 0: [mem 0x00000000f4b00000-0x00000000f6ffffff] node 0: [mem 0x00000000fda00000-0x00000000ffffefff] These crashes can be mitigated by enabling CONFIG_HOLES_IN_ZONE on ARM and essentially turning pfn_valid_within() to pfn_valid() instead of having it hardwired to 1 on that architecture, but this would require to keep CONFIG_HOLES_IN_ZONE solely for this purpose. A cleaner approach is to update ARM's implementation of pfn_valid() to take into accounting rounding of the freed memory map to pageblock boundaries and make sure it returns true for PFNs that have memory map entries even if there is no physical memory backing those PFNs" Link: https://lore.kernel.org/lkml/2a1592ad-bc9d-4664-fd19-f7448a37edc0@huawei.com [1] * tag 'memblock-v5.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock: arm: extend pfn_valid to take into account freed memory map alignment memblock: ensure there is no overflow in memblock_overlaps_region() memblock: align freed memory map on pageblock boundaries with SPARSEMEM memblock: free_unused_memmap: use pageblock units instead of MAX_ORDER
Diffstat (limited to 'mm')
-rw-r--r--mm/memblock.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index 3e4acbf03ab7..0041ff62c584 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -182,6 +182,8 @@ bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
{
unsigned long i;
+ memblock_cap_size(base, &size);
+
for (i = 0; i < type->cnt; i++)
if (memblock_addrs_overlap(base, size, type->regions[i].base,
type->regions[i].size))
@@ -1799,7 +1801,6 @@ bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t siz
*/
bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
{
- memblock_cap_size(base, &size);
return memblock_overlaps_region(&memblock.reserved, base, size);
}
@@ -1946,14 +1947,13 @@ static void __init free_unused_memmap(void)
* due to SPARSEMEM sections which aren't present.
*/
start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
-#else
+#endif
/*
- * Align down here since the VM subsystem insists that the
- * memmap entries are valid from the bank start aligned to
- * MAX_ORDER_NR_PAGES.
+ * Align down here since many operations in VM subsystem
+ * presume that there are no holes in the memory map inside
+ * a pageblock
*/
- start = round_down(start, MAX_ORDER_NR_PAGES);
-#endif
+ start = round_down(start, pageblock_nr_pages);
/*
* If we had a previous bank, and there is a space
@@ -1963,16 +1963,18 @@ static void __init free_unused_memmap(void)
free_memmap(prev_end, start);
/*
- * Align up here since the VM subsystem insists that the
- * memmap entries are valid from the bank end aligned to
- * MAX_ORDER_NR_PAGES.
+ * Align up here since many operations in VM subsystem
+ * presume that there are no holes in the memory map inside
+ * a pageblock
*/
- prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
+ prev_end = ALIGN(end, pageblock_nr_pages);
}
#ifdef CONFIG_SPARSEMEM
- if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
+ if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
+ prev_end = ALIGN(end, pageblock_nr_pages);
free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
+ }
#endif
}