summaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2020-11-10 19:05:11 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2020-11-12 09:32:25 +0100
commitc1090bb10d5e15906d296936e64317e35c43f21d (patch)
treecaf928e4d9bd18669e753444458088a7f6654bb9 /arch/arm64
parentDocumentation/arm64: fix RST layout of memory.rst (diff)
downloadlinux-c1090bb10d5e15906d296936e64317e35c43f21d.tar.xz
linux-c1090bb10d5e15906d296936e64317e35c43f21d.zip
arm64: mm: don't assume struct page is always 64 bytes
Commit 8c96400d6a39be7 simplified the page-to-virt and virt-to-page conversions, based on the assumption that struct page is always 64 bytes in size, in which case we can use a single signed shift to perform the conversion (provided that the vmemmap array is placed appropriately in the kernel VA space) Unfortunately, this assumption turns out not to hold, and so we need to revert part of this commit, and go back to an affine transformation. Given that all the quantities involved are compile time constants, this should not make any practical difference. Fixes: 8c96400d6a39 ("arm64: mm: make vmemmap region a projection of the linear region") Reported-by: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20201110180511.29083-1-ardb@kernel.org Tested-by: Geert Uytterhoeven <geert+renesas@glider.be> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/memory.h8
-rw-r--r--arch/arm64/mm/init.c2
2 files changed, 5 insertions, 5 deletions
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 03e9b112bd94..556cb2d62b5b 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -308,13 +308,15 @@ static inline void *phys_to_virt(phys_addr_t x)
#else
#define page_to_virt(x) ({ \
__typeof__(x) __page = x; \
- u64 __addr = (u64)__page << VMEMMAP_SHIFT; \
+ u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\
+ u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE); \
(void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
})
#define virt_to_page(x) ({ \
- u64 __addr = __tag_reset((u64)(x)) & PAGE_MASK; \
- (struct page *)((s64)__addr >> VMEMMAP_SHIFT); \
+ u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE; \
+ u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \
+ (struct page *)__addr; \
})
#endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 3a5e9f9298e9..7e15d92836d8 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -502,8 +502,6 @@ static void __init free_unused_memmap(void)
*/
void __init mem_init(void)
{
- BUILD_BUG_ON(!is_power_of_2(sizeof(struct page)));
-
if (swiotlb_force == SWIOTLB_FORCE ||
max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
swiotlb_init(1);