summaryrefslogtreecommitdiffstats
path: root/arch/riscv/include/asm/pgtable-64.h
diff options
context:
space:
mode:
authorNick Hu <nickhu@andestech.com>2020-01-06 19:38:32 +0100
committerPalmer Dabbelt <palmerdabbelt@google.com>2020-01-22 22:09:58 +0100
commit8ad8b72721d0f07fa02dbe71f901743f9c71c8e6 (patch)
treef7e4dfb5c72ced986c1183e9d0c830de608ea2fc /arch/riscv/include/asm/pgtable-64.h
parentkasan: No KASAN's memmove check if archs don't have it. (diff)
downloadlinux-8ad8b72721d0f07fa02dbe71f901743f9c71c8e6.tar.xz
linux-8ad8b72721d0f07fa02dbe71f901743f9c71c8e6.zip
riscv: Add KASAN support
This patch ports the feature Kernel Address SANitizer (KASAN). Note: The start address of shadow memory is at the beginning of kernel space, which is 2^64 - (2^39 / 2) in SV39. The size of the kernel space is 2^38 bytes so the size of shadow memory should be 2^38 / 8. Thus, the shadow memory would not overlap with the fixmap area. There are currently two limitations in this port, 1. RV64 only: KASAN need large address space for extra shadow memory region. 2. KASAN can't debug the modules since the modules are allocated in VMALLOC area. We mapped the shadow memory, which corresponding to VMALLOC area, to the kasan_early_shadow_page because we don't have enough physical space for all the shadow memory corresponding to VMALLOC area. Signed-off-by: Nick Hu <nickhu@andestech.com> Reported-by: Greentime Hu <green.hu@gmail.com> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
Diffstat (limited to 'arch/riscv/include/asm/pgtable-64.h')
-rw-r--r--arch/riscv/include/asm/pgtable-64.h5
1 files changed, 5 insertions, 0 deletions
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index 74630989006d..36e638d1dfe4 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -58,6 +58,11 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
return (unsigned long)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
}
+static inline struct page *pud_page(pud_t pud)
+{
+ return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT);
+}
+
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)