diff options
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/memory.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/asm/mte-kasan.h | 81 | ||||
-rw-r--r-- | arch/arm64/include/asm/mte.h | 12 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/sparsemem.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/uaccess.h | 66 | ||||
-rw-r--r-- | arch/arm64/include/asm/word-at-a-time.h | 4 |
7 files changed, 100 insertions, 75 deletions
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index efcd68154a3a..c735afdf639b 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -261,9 +261,11 @@ static inline const void *__tag_set(const void *addr, u8 tag) } #ifdef CONFIG_KASAN_HW_TAGS -#define arch_enable_tagging_sync() mte_enable_kernel_sync() -#define arch_enable_tagging_async() mte_enable_kernel_async() -#define arch_enable_tagging_asymm() mte_enable_kernel_asymm() +#define arch_enable_tag_checks_sync() mte_enable_kernel_sync() +#define arch_enable_tag_checks_async() mte_enable_kernel_async() +#define arch_enable_tag_checks_asymm() mte_enable_kernel_asymm() +#define arch_suppress_tag_checks_start() mte_enable_tco() +#define arch_suppress_tag_checks_stop() mte_disable_tco() #define arch_force_async_tag_fault() mte_check_tfsr_exit() #define arch_get_random_tag() mte_get_random_tag() #define arch_get_mem_tag(addr) mte_get_mem_tag(addr) diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h index 9f79425fc65a..2e98028c1965 100644 --- a/arch/arm64/include/asm/mte-kasan.h +++ b/arch/arm64/include/asm/mte-kasan.h @@ -13,9 +13,74 @@ #include <linux/types.h> +#ifdef CONFIG_KASAN_HW_TAGS + +/* Whether the MTE asynchronous mode is enabled. */ +DECLARE_STATIC_KEY_FALSE(mte_async_or_asymm_mode); + +static inline bool system_uses_mte_async_or_asymm_mode(void) +{ + return static_branch_unlikely(&mte_async_or_asymm_mode); +} + +#else /* CONFIG_KASAN_HW_TAGS */ + +static inline bool system_uses_mte_async_or_asymm_mode(void) +{ + return false; +} + +#endif /* CONFIG_KASAN_HW_TAGS */ + #ifdef CONFIG_ARM64_MTE /* + * The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0 + * affects EL0 and TCF affects EL1 irrespective of which TTBR is + * used. + * The kernel accesses TTBR0 usually with LDTR/STTR instructions + * when UAO is available, so these would act as EL0 accesses using + * TCF0. + * However futex.h code uses exclusives which would be executed as + * EL1, this can potentially cause a tag check fault even if the + * user disables TCF0. + * + * To address the problem we set the PSTATE.TCO bit in uaccess_enable() + * and reset it in uaccess_disable(). + * + * The Tag check override (TCO) bit disables temporarily the tag checking + * preventing the issue. + */ +static inline void mte_disable_tco(void) +{ + asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0), + ARM64_MTE, CONFIG_KASAN_HW_TAGS)); +} + +static inline void mte_enable_tco(void) +{ + asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1), + ARM64_MTE, CONFIG_KASAN_HW_TAGS)); +} + +/* + * These functions disable tag checking only if in MTE async mode + * since the sync mode generates exceptions synchronously and the + * nofault or load_unaligned_zeropad can handle them. + */ +static inline void __mte_disable_tco_async(void) +{ + if (system_uses_mte_async_or_asymm_mode()) + mte_disable_tco(); +} + +static inline void __mte_enable_tco_async(void) +{ + if (system_uses_mte_async_or_asymm_mode()) + mte_enable_tco(); +} + +/* * These functions are meant to be only used from KASAN runtime through * the arch_*() interface defined in asm/memory.h. * These functions don't include system_supports_mte() checks, @@ -138,6 +203,22 @@ void mte_enable_kernel_asymm(void); #else /* CONFIG_ARM64_MTE */ +static inline void mte_disable_tco(void) +{ +} + +static inline void mte_enable_tco(void) +{ +} + +static inline void __mte_disable_tco_async(void) +{ +} + +static inline void __mte_enable_tco_async(void) +{ +} + static inline u8 mte_get_ptr_tag(void *ptr) { return 0xFF; diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index 20dd06d70af5..c028afb1cd0b 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -178,14 +178,6 @@ static inline void mte_disable_tco_entry(struct task_struct *task) } #ifdef CONFIG_KASAN_HW_TAGS -/* Whether the MTE asynchronous mode is enabled. */ -DECLARE_STATIC_KEY_FALSE(mte_async_or_asymm_mode); - -static inline bool system_uses_mte_async_or_asymm_mode(void) -{ - return static_branch_unlikely(&mte_async_or_asymm_mode); -} - void mte_check_tfsr_el1(void); static inline void mte_check_tfsr_entry(void) @@ -212,10 +204,6 @@ static inline void mte_check_tfsr_exit(void) mte_check_tfsr_el1(); } #else -static inline bool system_uses_mte_async_or_asymm_mode(void) -{ - return false; -} static inline void mte_check_tfsr_el1(void) { } diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index b6ba466e2e8a..0bd18de9fd97 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -57,7 +57,7 @@ static inline bool arch_thp_swp_supported(void) * fault on one CPU which has been handled concurrently by another CPU * does not need to perform additional invalidation. */ -#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) +#define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0) /* * ZERO_PAGE is a global shared page that is always zero: used diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h index 4b73463423c3..5f5437621029 100644 --- a/arch/arm64/include/asm/sparsemem.h +++ b/arch/arm64/include/asm/sparsemem.h @@ -10,7 +10,7 @@ /* * Section size must be at least 512MB for 64K base * page size config. Otherwise it will be less than - * (MAX_ORDER - 1) and the build process will fail. + * MAX_ORDER and the build process will fail. */ #ifdef CONFIG_ARM64_64K_PAGES #define SECTION_SIZE_BITS 29 diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 8209e6a86989..05f4fc265428 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -136,55 +136,9 @@ static inline void __uaccess_enable_hw_pan(void) CONFIG_ARM64_PAN)); } -/* - * The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0 - * affects EL0 and TCF affects EL1 irrespective of which TTBR is - * used. - * The kernel accesses TTBR0 usually with LDTR/STTR instructions - * when UAO is available, so these would act as EL0 accesses using - * TCF0. - * However futex.h code uses exclusives which would be executed as - * EL1, this can potentially cause a tag check fault even if the - * user disables TCF0. - * - * To address the problem we set the PSTATE.TCO bit in uaccess_enable() - * and reset it in uaccess_disable(). - * - * The Tag check override (TCO) bit disables temporarily the tag checking - * preventing the issue. - */ -static inline void __uaccess_disable_tco(void) -{ - asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0), - ARM64_MTE, CONFIG_KASAN_HW_TAGS)); -} - -static inline void __uaccess_enable_tco(void) -{ - asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1), - ARM64_MTE, CONFIG_KASAN_HW_TAGS)); -} - -/* - * These functions disable tag checking only if in MTE async mode - * since the sync mode generates exceptions synchronously and the - * nofault or load_unaligned_zeropad can handle them. - */ -static inline void __uaccess_disable_tco_async(void) -{ - if (system_uses_mte_async_or_asymm_mode()) - __uaccess_disable_tco(); -} - -static inline void __uaccess_enable_tco_async(void) -{ - if (system_uses_mte_async_or_asymm_mode()) - __uaccess_enable_tco(); -} - static inline void uaccess_disable_privileged(void) { - __uaccess_disable_tco(); + mte_disable_tco(); if (uaccess_ttbr0_disable()) return; @@ -194,7 +148,7 @@ static inline void uaccess_disable_privileged(void) static inline void uaccess_enable_privileged(void) { - __uaccess_enable_tco(); + mte_enable_tco(); if (uaccess_ttbr0_enable()) return; @@ -302,8 +256,8 @@ do { \ #define get_user __get_user /* - * We must not call into the scheduler between __uaccess_enable_tco_async() and - * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking + * We must not call into the scheduler between __mte_enable_tco_async() and + * __mte_disable_tco_async(). As `dst` and `src` may contain blocking * functions, we must evaluate these outside of the critical section. */ #define __get_kernel_nofault(dst, src, type, err_label) \ @@ -312,10 +266,10 @@ do { \ __typeof__(src) __gkn_src = (src); \ int __gkn_err = 0; \ \ - __uaccess_enable_tco_async(); \ + __mte_enable_tco_async(); \ __raw_get_mem("ldr", *((type *)(__gkn_dst)), \ (__force type *)(__gkn_src), __gkn_err, K); \ - __uaccess_disable_tco_async(); \ + __mte_disable_tco_async(); \ \ if (unlikely(__gkn_err)) \ goto err_label; \ @@ -388,8 +342,8 @@ do { \ #define put_user __put_user /* - * We must not call into the scheduler between __uaccess_enable_tco_async() and - * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking + * We must not call into the scheduler between __mte_enable_tco_async() and + * __mte_disable_tco_async(). As `dst` and `src` may contain blocking * functions, we must evaluate these outside of the critical section. */ #define __put_kernel_nofault(dst, src, type, err_label) \ @@ -398,10 +352,10 @@ do { \ __typeof__(src) __pkn_src = (src); \ int __pkn_err = 0; \ \ - __uaccess_enable_tco_async(); \ + __mte_enable_tco_async(); \ __raw_put_mem("str", *((type *)(__pkn_src)), \ (__force type *)(__pkn_dst), __pkn_err, K); \ - __uaccess_disable_tco_async(); \ + __mte_disable_tco_async(); \ \ if (unlikely(__pkn_err)) \ goto err_label; \ diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h index 1c8e4f2490bf..f3b151ed0d7a 100644 --- a/arch/arm64/include/asm/word-at-a-time.h +++ b/arch/arm64/include/asm/word-at-a-time.h @@ -55,7 +55,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr) { unsigned long ret; - __uaccess_enable_tco_async(); + __mte_enable_tco_async(); /* Load word from unaligned pointer addr */ asm( @@ -65,7 +65,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr) : "=&r" (ret) : "r" (addr), "Q" (*(unsigned long *)addr)); - __uaccess_disable_tco_async(); + __mte_disable_tco_async(); return ret; } |