summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2023-12-13 09:40:31 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2024-02-09 11:56:12 +0100
commitd432b8d57c0c41873f1b8743203776baeb5778b6 (patch)
treeae0d590e05a73ee43a81d3683144de6fe807a685
parentarm64: vmemmap: Avoid base2 order of struct page size to dimension region (diff)
downloadlinux-d432b8d57c0c41873f1b8743203776baeb5778b6.tar.xz
linux-d432b8d57c0c41873f1b8743203776baeb5778b6.zip
arm64: mm: Reclaim unused vmemmap region for vmalloc use
The vmemmap array is statically sized based on the maximum supported size of the virtual address space, but it is located inside the upper VA region, which is statically sized based on the *minimum* supported size of the VA space. This doesn't matter much when using 64k pages, which is the only configuration that currently supports 52-bit virtual addressing. However, upcoming LPA2 support will change this picture somewhat, as in that case, the vmemmap array will take up more than 25% of the upper VA region when using 4k pages. Given that most of this space is never used when running on a system that does not support 52-bit virtual addressing, let's reclaim the unused vmemmap area in that case. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20231213084024.2367360-15-ardb@google.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Mark Rutland <mark.rutland@arm.com>
-rw-r--r--arch/arm64/include/asm/pgtable.h8
1 files changed, 6 insertions, 2 deletions
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 2aa2e3c961d7..522c21348ae8 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -18,11 +18,15 @@
* VMALLOC range.
*
* VMALLOC_START: beginning of the kernel vmalloc space
- * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
- * and fixed mappings
+ * VMALLOC_END: extends to the available space below vmemmap
*/
#define VMALLOC_START (MODULES_END)
+#if VA_BITS == VA_BITS_MIN
#define VMALLOC_END (VMEMMAP_START - SZ_8M)
+#else
+#define VMEMMAP_UNUSED_NPAGES ((_PAGE_OFFSET(vabits_actual) - PAGE_OFFSET) >> PAGE_SHIFT)
+#define VMALLOC_END (VMEMMAP_START + VMEMMAP_UNUSED_NPAGES * sizeof(struct page) - SZ_8M)
+#endif
#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))