diff options
author | Will Deacon <will@kernel.org> | 2019-08-13 17:46:11 +0200 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2019-08-14 14:05:35 +0200 |
commit | 96628f0fb18080a4166fc9eab8f7fd062d860667 (patch) | |
tree | daa0bed086bd029c26e7e1b0827f11293b74119d /arch/arm64/include | |
parent | arm64: memory: Ensure address tag is masked in conversion macros (diff) | |
download | linux-96628f0fb18080a4166fc9eab8f7fd062d860667.tar.xz linux-96628f0fb18080a4166fc9eab8f7fd062d860667.zip |
arm64: memory: Rewrite default page_to_virt()/virt_to_page()
The default implementations of page_to_virt() and virt_to_page() are
fairly confusing to read and the former evaluates its 'page' parameter
twice in the macro
Rewrite them so that the computation is expressed as 'base + index' in
both cases and the parameter is always evaluated exactly once.
Tested-by: Steve Capper <steve.capper@arm.com>
Reviewed-by: Steve Capper <steve.capper@arm.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/memory.h | 21 |
1 files changed, 10 insertions, 11 deletions
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 243e05ad4a67..636d414608cb 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -313,19 +313,18 @@ static inline void *phys_to_virt(phys_addr_t x) #if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #else -#define __virt_to_pgoff(kaddr) (((u64)(kaddr) - PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) -#define __page_to_voff(kaddr) (((u64)(kaddr) - VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) - -#define page_to_virt(page) ({ \ - unsigned long __addr = \ - ((__page_to_voff(page)) + PAGE_OFFSET); \ - const void *__addr_tag = \ - __tag_set((void *)__addr, page_kasan_tag(page)); \ - ((void *)__addr_tag); \ +#define page_to_virt(x) ({ \ + __typeof__(x) __page = x; \ + u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\ + u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE); \ + (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\ }) -#define virt_to_page(vaddr) \ - ((struct page *)((__virt_to_pgoff(__tag_reset(vaddr))) + VMEMMAP_START)) +#define virt_to_page(x) ({ \ + u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE; \ + u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \ + (struct page *)__addr; \ +}) #endif #define virt_addr_valid(addr) ({ \ |