From 739e49e0fc80990a351961c99a3142094822f040 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 20 Sep 2022 09:49:51 +0800 Subject: arm64: mm: handle ARM64_KERNEL_USES_PMD_MAPS in vmemmap_populate() Directly check ARM64_SWAPPER_USES_SECTION_MAPS to choose base page or PMD level huge page mapping in vmemmap_populate() to simplify code a bit. Signed-off-by: Kefeng Wang Link: https://lore.kernel.org/r/20220920014951.196191-1-wangkefeng.wang@huawei.com Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index e7ad44585f40..69deed27dec8 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1180,14 +1180,6 @@ static void free_empty_tables(unsigned long addr, unsigned long end, } #endif -#if !ARM64_KERNEL_USES_PMD_MAPS -int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, - struct vmem_altmap *altmap) -{ - WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); - return vmemmap_populate_basepages(start, end, node, altmap); -} -#else /* !ARM64_KERNEL_USES_PMD_MAPS */ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { @@ -1199,6 +1191,10 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, pmd_t *pmdp; WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); + + if (!ARM64_KERNEL_USES_PMD_MAPS) + return vmemmap_populate_basepages(start, end, node, altmap); + do { next = pmd_addr_end(addr, end); @@ -1232,7 +1228,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, return 0; } -#endif /* !ARM64_KERNEL_USES_PMD_MAPS */ #ifdef CONFIG_MEMORY_HOTPLUG void vmemmap_free(unsigned long start, unsigned long end, -- cgit v1.2.3