summaryrefslogtreecommitdiffstats
path: root/arch/loongarch/include
diff options
context:
space:
mode:
authorQing Zhang <zhangqing@loongson.cn>2023-09-06 16:54:16 +0200
committerHuacai Chen <chenhuacai@loongson.cn>2023-09-06 16:54:16 +0200
commit5aa4ac64e6add3e40d5049e31275b2822daf885d (patch)
tree84a7ec4d51784b69c8af20f376eb16ebd72eeac5 /arch/loongarch/include
parentLoongArch: Simplify the processing of jumping new kernel for KASLR (diff)
downloadlinux-5aa4ac64e6add3e40d5049e31275b2822daf885d.tar.xz
linux-5aa4ac64e6add3e40d5049e31275b2822daf885d.zip
LoongArch: Add KASAN (Kernel Address Sanitizer) support
1/8 of kernel addresses reserved for shadow memory. But for LoongArch, There are a lot of holes between different segments and valid address space (256T available) is insufficient to map all these segments to kasan shadow memory with the common formula provided by kasan core, saying (addr >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET So LoongArch has a arch-specific mapping formula, different segments are mapped individually, and only limited space lengths of these specific segments are mapped to shadow. At early boot stage the whole shadow region populated with just one physical page (kasan_early_shadow_page). Later, this page is reused as readonly zero shadow for some memory that kasan currently don't track. After mapping the physical memory, pages for shadow memory are allocated and mapped. Functions like memset()/memcpy()/memmove() do a lot of memory accesses. If bad pointer passed to one of these function it is important to be caught. Compiler's instrumentation cannot do this since these functions are written in assembly. KASan replaces memory functions with manually instrumented variants. Original functions declared as weak symbols so strong definitions in mm/kasan/kasan.c could replace them. Original functions have aliases with '__' prefix in names, so we could call non-instrumented variant if needed. Signed-off-by: Qing Zhang <zhangqing@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
Diffstat (limited to 'arch/loongarch/include')
-rw-r--r--arch/loongarch/include/asm/kasan.h126
-rw-r--r--arch/loongarch/include/asm/pgtable.h7
-rw-r--r--arch/loongarch/include/asm/string.h20
3 files changed, 153 insertions, 0 deletions
diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h
new file mode 100644
index 000000000000..deeff8158f45
--- /dev/null
+++ b/arch/loongarch/include/asm/kasan.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/linkage.h>
+#include <linux/mmzone.h>
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+#define __HAVE_ARCH_SHADOW_MAP
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+
+#define XRANGE_SHIFT (48)
+
+/* Valid address length */
+#define XRANGE_SHADOW_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
+/* Used for taking out the valid address */
+#define XRANGE_SHADOW_MASK GENMASK_ULL(XRANGE_SHADOW_SHIFT - 1, 0)
+/* One segment whole address space size */
+#define XRANGE_SIZE (XRANGE_SHADOW_MASK + 1)
+
+/* 64-bit segment value. */
+#define XKPRANGE_UC_SEG (0x8000)
+#define XKPRANGE_CC_SEG (0x9000)
+#define XKVRANGE_VC_SEG (0xffff)
+
+/* Cached */
+#define XKPRANGE_CC_START CACHE_BASE
+#define XKPRANGE_CC_SIZE XRANGE_SIZE
+#define XKPRANGE_CC_KASAN_OFFSET (0)
+#define XKPRANGE_CC_SHADOW_SIZE (XKPRANGE_CC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
+#define XKPRANGE_CC_SHADOW_END (XKPRANGE_CC_KASAN_OFFSET + XKPRANGE_CC_SHADOW_SIZE)
+
+/* UnCached */
+#define XKPRANGE_UC_START UNCACHE_BASE
+#define XKPRANGE_UC_SIZE XRANGE_SIZE
+#define XKPRANGE_UC_KASAN_OFFSET XKPRANGE_CC_SHADOW_END
+#define XKPRANGE_UC_SHADOW_SIZE (XKPRANGE_UC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
+#define XKPRANGE_UC_SHADOW_END (XKPRANGE_UC_KASAN_OFFSET + XKPRANGE_UC_SHADOW_SIZE)
+
+/* VMALLOC (Cached or UnCached) */
+#define XKVRANGE_VC_START MODULES_VADDR
+#define XKVRANGE_VC_SIZE round_up(KFENCE_AREA_END - MODULES_VADDR + 1, PGDIR_SIZE)
+#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END
+#define XKVRANGE_VC_SHADOW_SIZE (XKVRANGE_VC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
+#define XKVRANGE_VC_SHADOW_END (XKVRANGE_VC_KASAN_OFFSET + XKVRANGE_VC_SHADOW_SIZE)
+
+/* KAsan shadow memory start right after vmalloc. */
+#define KASAN_SHADOW_START round_up(KFENCE_AREA_END, PGDIR_SIZE)
+#define KASAN_SHADOW_SIZE (XKVRANGE_VC_SHADOW_END - XKPRANGE_CC_KASAN_OFFSET)
+#define KASAN_SHADOW_END round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE)
+
+#define XKPRANGE_CC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET)
+#define XKPRANGE_UC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET)
+#define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + XKVRANGE_VC_KASAN_OFFSET)
+
+extern bool kasan_early_stage;
+extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
+
+#define kasan_arch_is_ready kasan_arch_is_ready
+static __always_inline bool kasan_arch_is_ready(void)
+{
+ return !kasan_early_stage;
+}
+
+static inline void *kasan_mem_to_shadow(const void *addr)
+{
+ if (!kasan_arch_is_ready()) {
+ return (void *)(kasan_early_shadow_page);
+ } else {
+ unsigned long maddr = (unsigned long)addr;
+ unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
+ unsigned long offset = 0;
+
+ maddr &= XRANGE_SHADOW_MASK;
+ switch (xrange) {
+ case XKPRANGE_CC_SEG:
+ offset = XKPRANGE_CC_SHADOW_OFFSET;
+ break;
+ case XKPRANGE_UC_SEG:
+ offset = XKPRANGE_UC_SHADOW_OFFSET;
+ break;
+ case XKVRANGE_VC_SEG:
+ offset = XKVRANGE_VC_SHADOW_OFFSET;
+ break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
+ }
+}
+
+static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
+{
+ unsigned long addr = (unsigned long)shadow_addr;
+
+ if (unlikely(addr > KASAN_SHADOW_END) ||
+ unlikely(addr < KASAN_SHADOW_START)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
+ return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
+ else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
+ return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
+ else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
+ return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
+ else {
+ WARN_ON(1);
+ return NULL;
+ }
+}
+
+void kasan_init(void);
+asmlinkage void kasan_early_init(void);
+
+#endif
+#endif
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index 7e3708883994..e5675ce0907a 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -89,9 +89,16 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#endif
#define VMALLOC_START MODULES_END
+
+#ifndef CONFIG_KASAN
#define VMALLOC_END \
(vm_map_base + \
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
+#else
+#define VMALLOC_END \
+ (vm_map_base + \
+ min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
+#endif
#define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
#define VMEMMAP_END ((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
diff --git a/arch/loongarch/include/asm/string.h b/arch/loongarch/include/asm/string.h
index 7b29cc9c70aa..5bb5a90d2681 100644
--- a/arch/loongarch/include/asm/string.h
+++ b/arch/loongarch/include/asm/string.h
@@ -7,11 +7,31 @@
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
+extern void *__memset(void *__s, int __c, size_t __count);
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
+
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#define memset(s, c, n) __memset(s, c, n)
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
+#endif
#endif /* _ASM_STRING_H */