summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2019-08-14 15:28:48 +0200
committerWill Deacon <will@kernel.org>2019-08-14 18:06:58 +0200
commit77ad4ce69321abbe26ec92b2a2691a66531eb688 (patch)
tree22698a3b56a62124fc144dc7424d3050f47b0bc9 /arch/arm64/include
parentarm64: memory: fix flipped VA space fallout (diff)
downloadlinux-77ad4ce69321abbe26ec92b2a2691a66531eb688.tar.xz
linux-77ad4ce69321abbe26ec92b2a2691a66531eb688.zip
arm64: memory: rename VA_START to PAGE_END
Prior to commit: 14c127c957c1c607 ("arm64: mm: Flip kernel VA space") ... VA_START described the start of the TTBR1 address space for a given VA size described by VA_BITS, where all kernel mappings began. Since that commit, VA_START described a portion midway through the address space, where the linear map ends and other kernel mappings begin. To avoid confusion, let's rename VA_START to PAGE_END, making it clear that it's not the start of the TTBR1 address space and implying that it's related to PAGE_OFFSET. Comments and other mnemonics are updated accordingly, along with a typo fix in the decription of VMEMMAP_SIZE. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Steve Capper <steve.capper@arm.com> Reviewed-by: Steve Capper <steve.capper@arm.com> Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/memory.h20
-rw-r--r--arch/arm64/include/asm/pgtable.h4
2 files changed, 12 insertions, 12 deletions
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index d69c2865ae40..a713bad71db5 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -28,20 +28,20 @@
* a struct page array
*
* If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE
- * neads to cover the memory region from the beginning of the 52-bit
- * PAGE_OFFSET all the way to VA_START for 48-bit. This allows us to
+ * needs to cover the memory region from the beginning of the 52-bit
+ * PAGE_OFFSET all the way to PAGE_END for 48-bit. This allows us to
* keep a constant PAGE_OFFSET and "fallback" to using the higher end
* of the VMEMMAP where 52-bit support is not available in hardware.
*/
-#define VMEMMAP_SIZE ((_VA_START(VA_BITS_MIN) - PAGE_OFFSET) \
+#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) \
>> (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT))
/*
- * PAGE_OFFSET - the virtual address of the start of the linear map (top
- * (VA_BITS - 1))
- * KIMAGE_VADDR - the virtual address of the start of the kernel image
+ * PAGE_OFFSET - the virtual address of the start of the linear map, at the
+ * start of the TTBR1 address space.
+ * PAGE_END - the end of the linear map, where all other kernel mappings begin.
+ * KIMAGE_VADDR - the virtual address of the start of the kernel image.
* VA_BITS - the maximum number of bits for virtual addresses.
- * VA_START - the first kernel virtual address.
*/
#define VA_BITS (CONFIG_ARM64_VA_BITS)
#define _PAGE_OFFSET(va) (-(UL(1) << (va)))
@@ -64,7 +64,7 @@
#define VA_BITS_MIN (VA_BITS)
#endif
-#define _VA_START(va) (-(UL(1) << ((va) - 1)))
+#define _PAGE_END(va) (-(UL(1) << ((va) - 1)))
#define KERNEL_START _text
#define KERNEL_END _end
@@ -87,7 +87,7 @@
#define KASAN_THREAD_SHIFT 1
#else
#define KASAN_THREAD_SHIFT 0
-#define KASAN_SHADOW_END (_VA_START(VA_BITS_MIN))
+#define KASAN_SHADOW_END (_PAGE_END(VA_BITS_MIN))
#endif /* CONFIG_KASAN */
#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
@@ -173,7 +173,7 @@
#ifndef __ASSEMBLY__
extern u64 vabits_actual;
-#define VA_START (_VA_START(vabits_actual))
+#define PAGE_END (_PAGE_END(vabits_actual))
#include <linux/bitops.h>
#include <linux/mmdebug.h>
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 4a695b9ee0f0..979e24fadf35 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -856,8 +856,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
-#define kc_vaddr_to_offset(v) ((v) & ~VA_START)
-#define kc_offset_to_vaddr(o) ((o) | VA_START)
+#define kc_vaddr_to_offset(v) ((v) & ~PAGE_END)
+#define kc_offset_to_vaddr(o) ((o) | PAGE_END)
#ifdef CONFIG_ARM64_PA_BITS_52
#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)