summaryrefslogtreecommitdiffstats
path: root/mm/sparse.c
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.ibm.com>2021-09-02 23:58:02 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2021-09-03 18:58:15 +0200
commitc803b3c8b3b70f306ee6300bf8acdd70ffd1441a (patch)
tree7ffa7206a7d32de16edc7741d482c9c3d2c8f219 /mm/sparse.c
parentmicroblaze: simplify pte_alloc_one_kernel() (diff)
downloadlinux-c803b3c8b3b70f306ee6300bf8acdd70ffd1441a.tar.xz
linux-c803b3c8b3b70f306ee6300bf8acdd70ffd1441a.zip
mm: introduce memmap_alloc() to unify memory map allocation
There are several places that allocate memory for the memory map: alloc_node_mem_map() for FLATMEM, sparse_buffer_init() and __populate_section_memmap() for SPARSEMEM. The memory allocated in the FLATMEM case is zeroed and it is never poisoned, regardless of CONFIG_PAGE_POISON setting. The memory allocated in the SPARSEMEM cases is not zeroed and it is implicitly poisoned inside memblock if CONFIG_PAGE_POISON is set. Introduce memmap_alloc() wrapper for memblock allocators that will be used for both FLATMEM and SPARSEMEM cases and will makei memory map zeroing and poisoning consistent for different memory models. Link: https://lkml.kernel.org/r/20210714123739.16493-4-rppt@kernel.org Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Michal Simek <monstr@monstr.eu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index fd29b3abffa0..120bc8ea5293 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -436,8 +436,7 @@ struct page __init *__populate_section_memmap(unsigned long pfn,
if (map)
return map;
- map = memblock_alloc_try_nid_raw(size, size, addr,
- MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+ map = memmap_alloc(size, size, addr, nid, false);
if (!map)
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
__func__, size, PAGE_SIZE, nid, &addr);
@@ -464,8 +463,7 @@ static void __init sparse_buffer_init(unsigned long size, int nid)
* and we want it to be properly aligned to the section size - this is
* especially the case for VMEMMAP which maps memmap to PMDs
*/
- sparsemap_buf = memblock_alloc_exact_nid_raw(size, section_map_size(),
- addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+ sparsemap_buf = memmap_alloc(size, section_map_size(), addr, nid, true);
sparsemap_buf_end = sparsemap_buf + size;
}