diff options
Diffstat (limited to 'arch/arm64/include/asm')
39 files changed, 527 insertions, 257 deletions
diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h index 8a078fc662ac..7e157ab6cd50 100644 --- a/arch/arm64/include/asm/alternative-macros.h +++ b/arch/arm64/include/asm/alternative-macros.h @@ -3,12 +3,10 @@ #define __ASM_ALTERNATIVE_MACROS_H #include <asm/cpucaps.h> +#include <asm/insn-def.h> #define ARM64_CB_PATCH ARM64_NCAPS -/* A64 instructions are always 32 bits. */ -#define AARCH64_INSN_SIZE 4 - #ifndef __ASSEMBLY__ #include <linux/stringify.h> @@ -197,11 +195,6 @@ alternative_endif #define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \ alternative_insn insn1, insn2, cap, IS_ENABLED(cfg) -.macro user_alt, label, oldinstr, newinstr, cond -9999: alternative_insn "\oldinstr", "\newinstr", \cond - _asm_extable 9999b, \label -.endm - #endif /* __ASSEMBLY__ */ /* diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 934b9be582d2..4ad22c3135db 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -124,7 +124,8 @@ static inline u32 gic_read_rpr(void) #define gic_read_lpir(c) readq_relaxed(c) #define gic_write_lpir(v, c) writeq_relaxed(v, c) -#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) +#define gic_flush_dcache_to_poc(a,l) \ + dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l)) #define gits_read_baser(c) readq_relaxed(c) #define gits_write_baser(v, c) writeq_relaxed(v, c) diff --git a/arch/arm64/include/asm/asm-prototypes.h b/arch/arm64/include/asm/asm-prototypes.h index 1c9a3a0c5fa5..ec1d9655f885 100644 --- a/arch/arm64/include/asm/asm-prototypes.h +++ b/arch/arm64/include/asm/asm-prototypes.h @@ -23,4 +23,10 @@ long long __ashlti3(long long a, int b); long long __ashrti3(long long a, int b); long long __lshrti3(long long a, int b); +/* + * This function uses a custom calling convention and cannot be called from C so + * this prototype is not entirely accurate. + */ +void __hwasan_tag_mismatch(unsigned long addr, unsigned long access_info); + #endif /* __ASM_PROTOTYPES_H */ diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h index 8ca2dc0661ee..f1bba5fc61c4 100644 --- a/arch/arm64/include/asm/asm_pointer_auth.h +++ b/arch/arm64/include/asm/asm_pointer_auth.h @@ -7,19 +7,7 @@ #include <asm/cpufeature.h> #include <asm/sysreg.h> -#ifdef CONFIG_ARM64_PTR_AUTH -/* - * thread.keys_user.ap* as offset exceeds the #imm offset range - * so use the base value of ldp as thread.keys_user and offset as - * thread.keys_user.ap*. - */ - .macro __ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3 - mov \tmp1, #THREAD_KEYS_USER - add \tmp1, \tsk, \tmp1 - ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA] - msr_s SYS_APIAKEYLO_EL1, \tmp2 - msr_s SYS_APIAKEYHI_EL1, \tmp3 - .endm +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL .macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3 mov \tmp1, #THREAD_KEYS_KERNEL @@ -42,6 +30,33 @@ alternative_if ARM64_HAS_ADDRESS_AUTH alternative_else_nop_endif .endm +#else /* CONFIG_ARM64_PTR_AUTH_KERNEL */ + + .macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3 + .endm + + .macro ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3 + .endm + + .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3 + .endm + +#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */ + +#ifdef CONFIG_ARM64_PTR_AUTH +/* + * thread.keys_user.ap* as offset exceeds the #imm offset range + * so use the base value of ldp as thread.keys_user and offset as + * thread.keys_user.ap*. + */ + .macro __ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3 + mov \tmp1, #THREAD_KEYS_USER + add \tmp1, \tsk, \tmp1 + ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA] + msr_s SYS_APIAKEYLO_EL1, \tmp2 + msr_s SYS_APIAKEYHI_EL1, \tmp3 + .endm + .macro __ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3 mrs \tmp1, id_aa64isar1_el1 ubfx \tmp1, \tmp1, #ID_AA64ISAR1_APA_SHIFT, #8 @@ -64,17 +79,11 @@ alternative_else_nop_endif .Lno_addr_auth\@: .endm -#else /* CONFIG_ARM64_PTR_AUTH */ +#else /* !CONFIG_ARM64_PTR_AUTH */ .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3 .endm - .macro ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3 - .endm - - .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3 - .endm - #endif /* CONFIG_ARM64_PTR_AUTH */ #endif /* __ASM_ASM_POINTER_AUTH_H */ diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 8418c1bd8f04..89faca0e740d 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -130,15 +130,27 @@ alternative_endif .endm /* - * Emit an entry into the exception table + * Create an exception table entry for `insn`, which will branch to `fixup` + * when an unhandled fault is taken. */ - .macro _asm_extable, from, to + .macro _asm_extable, insn, fixup .pushsection __ex_table, "a" .align 3 - .long (\from - .), (\to - .) + .long (\insn - .), (\fixup - .) .popsection .endm +/* + * Create an exception table entry for `insn` if `fixup` is provided. Otherwise + * do nothing. + */ + .macro _cond_extable, insn, fixup + .ifnc \fixup, + _asm_extable \insn, \fixup + .endif + .endm + + #define USER(l, x...) \ 9999: x; \ _asm_extable 9999b, l @@ -232,17 +244,25 @@ lr .req x30 // link register * @dst: destination register */ #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__) - .macro this_cpu_offset, dst + .macro get_this_cpu_offset, dst mrs \dst, tpidr_el2 .endm #else - .macro this_cpu_offset, dst + .macro get_this_cpu_offset, dst alternative_if_not ARM64_HAS_VIRT_HOST_EXTN mrs \dst, tpidr_el1 alternative_else mrs \dst, tpidr_el2 alternative_endif .endm + + .macro set_this_cpu_offset, src +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN + msr tpidr_el1, \src +alternative_else + msr tpidr_el2, \src +alternative_endif + .endm #endif /* @@ -253,7 +273,7 @@ alternative_endif .macro adr_this_cpu, dst, sym, tmp adrp \tmp, \sym add \dst, \tmp, #:lo12:\sym - this_cpu_offset \tmp + get_this_cpu_offset \tmp add \dst, \dst, \tmp .endm @@ -264,7 +284,7 @@ alternative_endif */ .macro ldr_this_cpu dst, sym, tmp adr_l \dst, \sym - this_cpu_offset \tmp + get_this_cpu_offset \tmp ldr \dst, [\dst, \tmp] .endm @@ -375,51 +395,53 @@ alternative_cb_end bfi \tcr, \tmp0, \pos, #3 .endm + .macro __dcache_op_workaround_clean_cache, op, addr +alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE + dc \op, \addr +alternative_else + dc civac, \addr +alternative_endif + .endm + /* * Macro to perform a data cache maintenance for the interval - * [kaddr, kaddr + size) + * [start, end) * * op: operation passed to dc instruction * domain: domain used in dsb instruciton - * kaddr: starting virtual address of the region - * size: size of the region - * Corrupts: kaddr, size, tmp1, tmp2 + * start: starting virtual address of the region + * end: end virtual address of the region + * fixup: optional label to branch to on user fault + * Corrupts: start, end, tmp1, tmp2 */ - .macro __dcache_op_workaround_clean_cache, op, kaddr -alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE - dc \op, \kaddr -alternative_else - dc civac, \kaddr -alternative_endif - .endm - - .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2 + .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup dcache_line_size \tmp1, \tmp2 - add \size, \kaddr, \size sub \tmp2, \tmp1, #1 - bic \kaddr, \kaddr, \tmp2 -9998: + bic \start, \start, \tmp2 +.Ldcache_op\@: .ifc \op, cvau - __dcache_op_workaround_clean_cache \op, \kaddr + __dcache_op_workaround_clean_cache \op, \start .else .ifc \op, cvac - __dcache_op_workaround_clean_cache \op, \kaddr + __dcache_op_workaround_clean_cache \op, \start .else .ifc \op, cvap - sys 3, c7, c12, 1, \kaddr // dc cvap + sys 3, c7, c12, 1, \start // dc cvap .else .ifc \op, cvadp - sys 3, c7, c13, 1, \kaddr // dc cvadp + sys 3, c7, c13, 1, \start // dc cvadp .else - dc \op, \kaddr + dc \op, \start .endif .endif .endif .endif - add \kaddr, \kaddr, \tmp1 - cmp \kaddr, \size - b.lo 9998b + add \start, \start, \tmp1 + cmp \start, \end + b.lo .Ldcache_op\@ dsb \domain + + _cond_extable .Ldcache_op\@, \fixup .endm /* @@ -427,20 +449,22 @@ alternative_endif * [start, end) * * start, end: virtual addresses describing the region - * label: A label to branch to on user fault. + * fixup: optional label to branch to on user fault * Corrupts: tmp1, tmp2 */ - .macro invalidate_icache_by_line start, end, tmp1, tmp2, label + .macro invalidate_icache_by_line start, end, tmp1, tmp2, fixup icache_line_size \tmp1, \tmp2 sub \tmp2, \tmp1, #1 bic \tmp2, \start, \tmp2 -9997: -USER(\label, ic ivau, \tmp2) // invalidate I line PoU +.Licache_op\@: + ic ivau, \tmp2 // invalidate I line PoU add \tmp2, \tmp2, \tmp1 cmp \tmp2, \end - b.lo 9997b + b.lo .Licache_op\@ dsb ish isb + + _cond_extable .Licache_op\@, \fixup .endm /* @@ -745,7 +769,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU cbz \tmp, \lbl #endif adr_l \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING - this_cpu_offset \tmp2 + get_this_cpu_offset \tmp2 ldr w\tmp, [\tmp, \tmp2] cbnz w\tmp, \lbl // yield on pending softirq in task context .Lnoyield_\@: diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index a074459f8f2f..a9c0716e7440 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -47,7 +47,7 @@ * cache before the transfer is done, causing old data to be seen by * the CPU. */ -#define ARCH_DMA_MINALIGN (128) +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES #ifdef CONFIG_KASAN_SW_TAGS #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 52e5c1623224..543c997eb3b7 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -30,45 +30,58 @@ * the implementation assumes non-aliasing VIPT D-cache and (aliasing) * VIPT I-cache. * - * flush_icache_range(start, end) + * All functions below apply to the interval [start, end) + * - start - virtual start address (inclusive) + * - end - virtual end address (exclusive) * - * Ensure coherency between the I-cache and the D-cache in the - * region described by start, end. - * - start - virtual start address - * - end - virtual end address + * caches_clean_inval_pou(start, end) * - * invalidate_icache_range(start, end) + * Ensure coherency between the I-cache and the D-cache region to + * the Point of Unification. * - * Invalidate the I-cache in the region described by start, end. - * - start - virtual start address - * - end - virtual end address + * caches_clean_inval_user_pou(start, end) * - * __flush_cache_user_range(start, end) + * Ensure coherency between the I-cache and the D-cache region to + * the Point of Unification. + * Use only if the region might access user memory. * - * Ensure coherency between the I-cache and the D-cache in the - * region described by start, end. - * - start - virtual start address - * - end - virtual end address + * icache_inval_pou(start, end) * - * __flush_dcache_area(kaddr, size) + * Invalidate I-cache region to the Point of Unification. * - * Ensure that the data held in page is written back. - * - kaddr - page address - * - size - region size + * dcache_clean_inval_poc(start, end) + * + * Clean and invalidate D-cache region to the Point of Coherency. + * + * dcache_inval_poc(start, end) + * + * Invalidate D-cache region to the Point of Coherency. + * + * dcache_clean_poc(start, end) + * + * Clean D-cache region to the Point of Coherency. + * + * dcache_clean_pop(start, end) + * + * Clean D-cache region to the Point of Persistence. + * + * dcache_clean_pou(start, end) + * + * Clean D-cache region to the Point of Unification. */ -extern void __flush_icache_range(unsigned long start, unsigned long end); -extern int invalidate_icache_range(unsigned long start, unsigned long end); -extern void __flush_dcache_area(void *addr, size_t len); -extern void __inval_dcache_area(void *addr, size_t len); -extern void __clean_dcache_area_poc(void *addr, size_t len); -extern void __clean_dcache_area_pop(void *addr, size_t len); -extern void __clean_dcache_area_pou(void *addr, size_t len); -extern long __flush_cache_user_range(unsigned long start, unsigned long end); -extern void sync_icache_aliases(void *kaddr, unsigned long len); +extern void caches_clean_inval_pou(unsigned long start, unsigned long end); +extern void icache_inval_pou(unsigned long start, unsigned long end); +extern void dcache_clean_inval_poc(unsigned long start, unsigned long end); +extern void dcache_inval_poc(unsigned long start, unsigned long end); +extern void dcache_clean_poc(unsigned long start, unsigned long end); +extern void dcache_clean_pop(unsigned long start, unsigned long end); +extern void dcache_clean_pou(unsigned long start, unsigned long end); +extern long caches_clean_inval_user_pou(unsigned long start, unsigned long end); +extern void sync_icache_aliases(unsigned long start, unsigned long end); static inline void flush_icache_range(unsigned long start, unsigned long end) { - __flush_icache_range(start, end); + caches_clean_inval_pou(start, end); /* * IPI all online CPUs so that they undergo a context synchronization @@ -122,7 +135,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *); -static __always_inline void __flush_icache_all(void) +static __always_inline void icache_inval_all_pou(void) { if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) return; diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index 7faae6ff3ab4..0f6d16faa540 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -12,26 +12,7 @@ /* * Records attributes of an individual CPU. */ -struct cpuinfo_arm64 { - struct cpu cpu; - struct kobject kobj; - u32 reg_ctr; - u32 reg_cntfrq; - u32 reg_dczid; - u32 reg_midr; - u32 reg_revidr; - - u64 reg_id_aa64dfr0; - u64 reg_id_aa64dfr1; - u64 reg_id_aa64isar0; - u64 reg_id_aa64isar1; - u64 reg_id_aa64mmfr0; - u64 reg_id_aa64mmfr1; - u64 reg_id_aa64mmfr2; - u64 reg_id_aa64pfr0; - u64 reg_id_aa64pfr1; - u64 reg_id_aa64zfr0; - +struct cpuinfo_32bit { u32 reg_id_dfr0; u32 reg_id_dfr1; u32 reg_id_isar0; @@ -54,6 +35,30 @@ struct cpuinfo_arm64 { u32 reg_mvfr0; u32 reg_mvfr1; u32 reg_mvfr2; +}; + +struct cpuinfo_arm64 { + struct cpu cpu; + struct kobject kobj; + u64 reg_ctr; + u64 reg_cntfrq; + u64 reg_dczid; + u64 reg_midr; + u64 reg_revidr; + u64 reg_gmid; + + u64 reg_id_aa64dfr0; + u64 reg_id_aa64dfr1; + u64 reg_id_aa64isar0; + u64 reg_id_aa64isar1; + u64 reg_id_aa64mmfr0; + u64 reg_id_aa64mmfr1; + u64 reg_id_aa64mmfr2; + u64 reg_id_aa64pfr0; + u64 reg_id_aa64pfr1; + u64 reg_id_aa64zfr0; + + struct cpuinfo_32bit aarch32; /* pseudo-ZCR for recording maximum ZCR_EL1 LEN value: */ u64 reg_zcr; diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 338840c00e8e..9bb9d11750d7 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -619,6 +619,13 @@ static inline bool id_aa64pfr0_sve(u64 pfr0) return val > 0; } +static inline bool id_aa64pfr1_mte(u64 pfr1) +{ + u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT); + + return val >= ID_AA64PFR1_MTE; +} + void __init setup_cpu_features(void); void check_local_cpu_capabilities(void); @@ -630,9 +637,15 @@ static inline bool cpu_supports_mixed_endian_el0(void) return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1)); } +const struct cpumask *system_32bit_el0_cpumask(void); +DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0); + static inline bool system_supports_32bit_el0(void) { - return cpus_have_const_cap(ARM64_HAS_32BIT_EL0); + u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + + return static_branch_unlikely(&arm64_mismatched_32bit_el0) || + id_aa64pfr0_32bit_el0(pfr0); } static inline bool system_supports_4kb_granule(void) diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h index 3c5ddb429ea2..14a19d1141bd 100644 --- a/arch/arm64/include/asm/cpuidle.h +++ b/arch/arm64/include/asm/cpuidle.h @@ -18,4 +18,39 @@ static inline int arm_cpuidle_suspend(int index) return -EOPNOTSUPP; } #endif + +#ifdef CONFIG_ARM64_PSEUDO_NMI +#include <asm/arch_gicv3.h> + +struct arm_cpuidle_irq_context { + unsigned long pmr; + unsigned long daif_bits; +}; + +#define arm_cpuidle_save_irq_context(__c) \ + do { \ + struct arm_cpuidle_irq_context *c = __c; \ + if (system_uses_irq_prio_masking()) { \ + c->daif_bits = read_sysreg(daif); \ + write_sysreg(c->daif_bits | PSR_I_BIT | PSR_F_BIT, \ + daif); \ + c->pmr = gic_read_pmr(); \ + gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); \ + } \ + } while (0) + +#define arm_cpuidle_restore_irq_context(__c) \ + do { \ + struct arm_cpuidle_irq_context *c = __c; \ + if (system_uses_irq_prio_masking()) { \ + gic_write_pmr(c->pmr); \ + write_sysreg(c->daif_bits, daif); \ + } \ + } while (0) +#else +struct arm_cpuidle_irq_context { }; + +#define arm_cpuidle_save_irq_context(c) (void)c +#define arm_cpuidle_restore_irq_context(c) (void)c +#endif #endif diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 3578aba9c608..1bed37eb013a 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -137,7 +137,7 @@ void efi_virtmap_unload(void); static inline void efi_capsule_flush_cache_range(void *addr, int size) { - __flush_dcache_area(addr, size); + dcache_clean_inval_poc((unsigned long)addr, (unsigned long)addr + size); } #endif /* _ASM_EFI_H */ diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 6546158d2f2d..4afbc45b8bb0 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -31,20 +31,35 @@ static inline u32 disr_to_esr(u64 disr) return esr; } -asmlinkage void el1_sync_handler(struct pt_regs *regs); -asmlinkage void el0_sync_handler(struct pt_regs *regs); -asmlinkage void el0_sync_compat_handler(struct pt_regs *regs); +asmlinkage void handle_bad_stack(struct pt_regs *regs); -asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs); -asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs); +asmlinkage void el1t_64_sync_handler(struct pt_regs *regs); +asmlinkage void el1t_64_irq_handler(struct pt_regs *regs); +asmlinkage void el1t_64_fiq_handler(struct pt_regs *regs); +asmlinkage void el1t_64_error_handler(struct pt_regs *regs); + +asmlinkage void el1h_64_sync_handler(struct pt_regs *regs); +asmlinkage void el1h_64_irq_handler(struct pt_regs *regs); +asmlinkage void el1h_64_fiq_handler(struct pt_regs *regs); +asmlinkage void el1h_64_error_handler(struct pt_regs *regs); + +asmlinkage void el0t_64_sync_handler(struct pt_regs *regs); +asmlinkage void el0t_64_irq_handler(struct pt_regs *regs); +asmlinkage void el0t_64_fiq_handler(struct pt_regs *regs); +asmlinkage void el0t_64_error_handler(struct pt_regs *regs); + +asmlinkage void el0t_32_sync_handler(struct pt_regs *regs); +asmlinkage void el0t_32_irq_handler(struct pt_regs *regs); +asmlinkage void el0t_32_fiq_handler(struct pt_regs *regs); +asmlinkage void el0t_32_error_handler(struct pt_regs *regs); + +asmlinkage void call_on_irq_stack(struct pt_regs *regs, + void (*func)(struct pt_regs *)); asmlinkage void enter_from_user_mode(void); asmlinkage void exit_to_user_mode(void); -void arm64_enter_nmi(struct pt_regs *regs); -void arm64_exit_nmi(struct pt_regs *regs); void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs); void do_undefinstr(struct pt_regs *regs); void do_bti(struct pt_regs *regs); -asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr); void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, struct pt_regs *regs); void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs); @@ -57,4 +72,7 @@ void do_cp15instr(unsigned int esr, struct pt_regs *regs); void do_el0_svc(struct pt_regs *regs); void do_el0_svc_compat(struct pt_regs *regs); void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr); +void do_serror(struct pt_regs *regs, unsigned int esr); + +void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far); #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 2599504674b5..c072161d5c65 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -69,7 +69,7 @@ static inline void *sve_pffr(struct thread_struct *thread) extern void sve_save_state(void *state, u32 *pfpsr); extern void sve_load_state(void const *state, u32 const *pfpsr, unsigned long vq_minus_1); -extern void sve_flush_live(void); +extern void sve_flush_live(unsigned long vq_minus_1); extern void sve_load_from_fpsimd_state(struct user_fpsimd_state const *state, unsigned long vq_minus_1); extern unsigned int sve_get_vl(void); diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index a2563992d2dc..059204477ce6 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -213,8 +213,10 @@ mov v\nz\().16b, v\nz\().16b .endm -.macro sve_flush +.macro sve_flush_z _for n, 0, 31, _sve_flush_z \n +.endm +.macro sve_flush_p_ffr _for n, 0, 15, _sve_pfalse \n _sve_wrffr 0 .endm diff --git a/arch/arm64/include/asm/insn-def.h b/arch/arm64/include/asm/insn-def.h new file mode 100644 index 000000000000..2c075f615c6a --- /dev/null +++ b/arch/arm64/include/asm/insn-def.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_INSN_DEF_H +#define __ASM_INSN_DEF_H + +/* A64 instructions are always 32 bits. */ +#define AARCH64_INSN_SIZE 4 + +#endif /* __ASM_INSN_DEF_H */ diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 4ebb9c054ccc..6b776c8667b2 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -10,7 +10,7 @@ #include <linux/build_bug.h> #include <linux/types.h> -#include <asm/alternative.h> +#include <asm/insn-def.h> #ifndef __ASSEMBLY__ /* @@ -30,6 +30,7 @@ */ enum aarch64_insn_encoding_class { AARCH64_INSN_CLS_UNKNOWN, /* UNALLOCATED */ + AARCH64_INSN_CLS_SVE, /* SVE instructions */ AARCH64_INSN_CLS_DP_IMM, /* Data processing - immediate */ AARCH64_INSN_CLS_DP_REG, /* Data processing - register */ AARCH64_INSN_CLS_DP_FPSIMD, /* Data processing - SIMD and FP */ @@ -294,6 +295,12 @@ __AARCH64_INSN_FUNCS(adr, 0x9F000000, 0x10000000) __AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000) __AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000) __AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000) +__AARCH64_INSN_FUNCS(store_imm, 0x3FC00000, 0x39000000) +__AARCH64_INSN_FUNCS(load_imm, 0x3FC00000, 0x39400000) +__AARCH64_INSN_FUNCS(store_pre, 0x3FE00C00, 0x38000C00) +__AARCH64_INSN_FUNCS(load_pre, 0x3FE00C00, 0x38400C00) +__AARCH64_INSN_FUNCS(store_post, 0x3FE00C00, 0x38000400) +__AARCH64_INSN_FUNCS(load_post, 0x3FE00C00, 0x38400400) __AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800) __AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000) __AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800) @@ -302,6 +309,8 @@ __AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000) __AARCH64_INSN_FUNCS(exclusive, 0x3F800000, 0x08000000) __AARCH64_INSN_FUNCS(load_ex, 0x3F400000, 0x08400000) __AARCH64_INSN_FUNCS(store_ex, 0x3F400000, 0x08000000) +__AARCH64_INSN_FUNCS(stp, 0x7FC00000, 0x29000000) +__AARCH64_INSN_FUNCS(ldp, 0x7FC00000, 0x29400000) __AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000) __AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000) __AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000) @@ -334,6 +343,7 @@ __AARCH64_INSN_FUNCS(rev64, 0x7FFFFC00, 0x5AC00C00) __AARCH64_INSN_FUNCS(and, 0x7F200000, 0x0A000000) __AARCH64_INSN_FUNCS(bic, 0x7F200000, 0x0A200000) __AARCH64_INSN_FUNCS(orr, 0x7F200000, 0x2A000000) +__AARCH64_INSN_FUNCS(mov_reg, 0x7FE0FFE0, 0x2A0003E0) __AARCH64_INSN_FUNCS(orn, 0x7F200000, 0x2A200000) __AARCH64_INSN_FUNCS(eor, 0x7F200000, 0x4A000000) __AARCH64_INSN_FUNCS(eon, 0x7F200000, 0x4A200000) @@ -368,6 +378,14 @@ __AARCH64_INSN_FUNCS(eret_auth, 0xFFFFFBFF, 0xD69F0BFF) __AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000) __AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F) __AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000) +__AARCH64_INSN_FUNCS(dmb, 0xFFFFF0FF, 0xD50330BF) +__AARCH64_INSN_FUNCS(dsb_base, 0xFFFFF0FF, 0xD503309F) +__AARCH64_INSN_FUNCS(dsb_nxs, 0xFFFFF3FF, 0xD503323F) +__AARCH64_INSN_FUNCS(isb, 0xFFFFF0FF, 0xD50330DF) +__AARCH64_INSN_FUNCS(sb, 0xFFFFFFFF, 0xD50330FF) +__AARCH64_INSN_FUNCS(clrex, 0xFFFFF0FF, 0xD503305F) +__AARCH64_INSN_FUNCS(ssbb, 0xFFFFFFFF, 0xD503309F) +__AARCH64_INSN_FUNCS(pssbb, 0xFFFFFFFF, 0xD503349F) #undef __AARCH64_INSN_FUNCS @@ -379,8 +397,47 @@ static inline bool aarch64_insn_is_adr_adrp(u32 insn) return aarch64_insn_is_adr(insn) || aarch64_insn_is_adrp(insn); } -int aarch64_insn_read(void *addr, u32 *insnp); -int aarch64_insn_write(void *addr, u32 insn); +static inline bool aarch64_insn_is_dsb(u32 insn) +{ + return aarch64_insn_is_dsb_base(insn) || aarch64_insn_is_dsb_nxs(insn); +} + +static inline bool aarch64_insn_is_barrier(u32 insn) +{ + return aarch64_insn_is_dmb(insn) || aarch64_insn_is_dsb(insn) || + aarch64_insn_is_isb(insn) || aarch64_insn_is_sb(insn) || + aarch64_insn_is_clrex(insn) || aarch64_insn_is_ssbb(insn) || + aarch64_insn_is_pssbb(insn); +} + +static inline bool aarch64_insn_is_store_single(u32 insn) +{ + return aarch64_insn_is_store_imm(insn) || + aarch64_insn_is_store_pre(insn) || + aarch64_insn_is_store_post(insn); +} + +static inline bool aarch64_insn_is_store_pair(u32 insn) +{ + return aarch64_insn_is_stp(insn) || + aarch64_insn_is_stp_pre(insn) || + aarch64_insn_is_stp_post(insn); +} + +static inline bool aarch64_insn_is_load_single(u32 insn) +{ + return aarch64_insn_is_load_imm(insn) || + aarch64_insn_is_load_pre(insn) || + aarch64_insn_is_load_post(insn); +} + +static inline bool aarch64_insn_is_load_pair(u32 insn) +{ + return aarch64_insn_is_ldp(insn) || + aarch64_insn_is_ldp_pre(insn) || + aarch64_insn_is_ldp_post(insn); +} + enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); bool aarch64_insn_uses_literal(u32 insn); bool aarch64_insn_is_branch(u32 insn); @@ -487,9 +544,6 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base, s32 aarch64_get_branch_offset(u32 insn); u32 aarch64_set_branch_offset(u32 insn, s32 offset); -int aarch64_insn_patch_text_nosync(void *addr, u32 insn); -int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); - s32 aarch64_insn_adrp_get_offset(u32 insn); u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset); @@ -506,6 +560,7 @@ u32 aarch32_insn_mcr_extract_crm(u32 insn); typedef bool (pstate_check_t)(unsigned long); extern pstate_check_t * const aarch32_opcode_cond_checks[16]; + #endif /* __ASSEMBLY__ */ #endif /* __ASM_INSN_H */ diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index d44df9d62fc9..3512184cfec1 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -18,9 +18,9 @@ * 64K (section size = 512M). */ #ifdef CONFIG_ARM64_4K_PAGES -#define ARM64_SWAPPER_USES_SECTION_MAPS 1 +#define ARM64_KERNEL_USES_PMD_MAPS 1 #else -#define ARM64_SWAPPER_USES_SECTION_MAPS 0 +#define ARM64_KERNEL_USES_PMD_MAPS 0 #endif /* @@ -33,7 +33,7 @@ * VA range, so pages required to map highest possible PA are reserved in all * cases. */ -#if ARM64_SWAPPER_USES_SECTION_MAPS +#if ARM64_KERNEL_USES_PMD_MAPS #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1) #define IDMAP_PGTABLE_LEVELS (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT) - 1) #else @@ -90,9 +90,9 @@ #define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE) /* Initial memory map size */ -#if ARM64_SWAPPER_USES_SECTION_MAPS -#define SWAPPER_BLOCK_SHIFT SECTION_SHIFT -#define SWAPPER_BLOCK_SIZE SECTION_SIZE +#if ARM64_KERNEL_USES_PMD_MAPS +#define SWAPPER_BLOCK_SHIFT PMD_SHIFT +#define SWAPPER_BLOCK_SIZE PMD_SIZE #define SWAPPER_TABLE_SHIFT PUD_SHIFT #else #define SWAPPER_BLOCK_SHIFT PAGE_SHIFT @@ -100,16 +100,13 @@ #define SWAPPER_TABLE_SHIFT PMD_SHIFT #endif -/* The size of the initial kernel direct mapping */ -#define SWAPPER_INIT_MAP_SIZE (_AC(1, UL) << SWAPPER_TABLE_SHIFT) - /* * Initial memory map attributes. */ #define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) -#if ARM64_SWAPPER_USES_SECTION_MAPS +#if ARM64_KERNEL_USES_PMD_MAPS #define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) #else #define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS) @@ -125,7 +122,7 @@ #if defined(CONFIG_ARM64_4K_PAGES) #define ARM64_MEMSTART_SHIFT PUD_SHIFT #elif defined(CONFIG_ARM64_16K_PAGES) -#define ARM64_MEMSTART_SHIFT (PMD_SHIFT + 5) +#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT #else #define ARM64_MEMSTART_SHIFT PMD_SHIFT #endif diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 5e9b33cbac51..9f0bf2109be7 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -8,6 +8,7 @@ #define __ARM_KVM_ASM_H__ #include <asm/hyp_image.h> +#include <asm/insn.h> #include <asm/virt.h> #define ARM_EXIT_WITH_SERROR_BIT 31 diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 25ed956f9af1..f4cbfa9025a8 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -180,7 +180,8 @@ static inline void *__kvm_vector_slot2addr(void *base, struct kvm; -#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) +#define kvm_flush_dcache_to_poc(a,l) \ + dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l)) static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) { @@ -208,12 +209,12 @@ static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn, { if (icache_is_aliasing()) { /* any kind of VIPT cache */ - __flush_icache_all(); + icache_inval_all_pou(); } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) { /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ void *va = page_address(pfn_to_page(pfn)); - invalidate_icache_range((unsigned long)va, + icache_inval_pou((unsigned long)va, (unsigned long)va + size); } } diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h index ba89a9af820a..9906541a6861 100644 --- a/arch/arm64/include/asm/linkage.h +++ b/arch/arm64/include/asm/linkage.h @@ -56,8 +56,16 @@ SYM_FUNC_START_ALIAS(__pi_##x); \ SYM_FUNC_START_WEAK(x) +#define SYM_FUNC_START_WEAK_ALIAS_PI(x) \ + SYM_FUNC_START_ALIAS(__pi_##x); \ + SYM_START(x, SYM_L_WEAK, SYM_A_ALIGN) + #define SYM_FUNC_END_PI(x) \ SYM_FUNC_END(x); \ SYM_FUNC_END_ALIAS(__pi_##x) +#define SYM_FUNC_END_ALIAS_PI(x) \ + SYM_FUNC_END_ALIAS(x); \ + SYM_FUNC_END_ALIAS(__pi_##x) + #endif diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 87b90dc27a43..7b360960cc35 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -135,10 +135,8 @@ #define MT_NORMAL 0 #define MT_NORMAL_TAGGED 1 #define MT_NORMAL_NC 2 -#define MT_NORMAL_WT 3 -#define MT_DEVICE_nGnRnE 4 -#define MT_DEVICE_nGnRE 5 -#define MT_DEVICE_GRE 6 +#define MT_DEVICE_nGnRnE 3 +#define MT_DEVICE_nGnRE 4 /* * Memory types for Stage-2 translation diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index d3cef9133539..eeb210997149 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -177,9 +177,9 @@ static inline void update_saved_ttbr0(struct task_struct *tsk, return; if (mm == &init_mm) - ttbr = __pa_symbol(reserved_pg_dir); + ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); else - ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48; + ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48; WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr); } diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h index 810045628c66..a11ccadd47d2 100644 --- a/arch/arm64/include/asm/module.lds.h +++ b/arch/arm64/include/asm/module.lds.h @@ -1,7 +1,20 @@ -#ifdef CONFIG_ARM64_MODULE_PLTS SECTIONS { +#ifdef CONFIG_ARM64_MODULE_PLTS .plt 0 (NOLOAD) : { BYTE(0) } .init.plt 0 (NOLOAD) : { BYTE(0) } .text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) } -} #endif + +#ifdef CONFIG_KASAN_SW_TAGS + /* + * Outlined checks go into comdat-deduplicated sections named .text.hot. + * Because they are in comdats they are not combined by the linker and + * we otherwise end up with multiple sections with the same .text.hot + * name in the .ko file. The kernel module loader warns if it sees + * multiple sections with the same name so we use this sections + * directive to force them into a single section and silence the + * warning. + */ + .text.hot : { *(.text.hot) } +#endif +} diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h index ddd4d17cf9a0..d952352bd008 100644 --- a/arch/arm64/include/asm/mte-kasan.h +++ b/arch/arm64/include/asm/mte-kasan.h @@ -48,43 +48,84 @@ static inline u8 mte_get_random_tag(void) return mte_get_ptr_tag(addr); } +static inline u64 __stg_post(u64 p) +{ + asm volatile(__MTE_PREAMBLE "stg %0, [%0], #16" + : "+r"(p) + : + : "memory"); + return p; +} + +static inline u64 __stzg_post(u64 p) +{ + asm volatile(__MTE_PREAMBLE "stzg %0, [%0], #16" + : "+r"(p) + : + : "memory"); + return p; +} + +static inline void __dc_gva(u64 p) +{ + asm volatile(__MTE_PREAMBLE "dc gva, %0" : : "r"(p) : "memory"); +} + +static inline void __dc_gzva(u64 p) +{ + asm volatile(__MTE_PREAMBLE "dc gzva, %0" : : "r"(p) : "memory"); +} + /* * Assign allocation tags for a region of memory based on the pointer tag. * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and - * size must be non-zero and MTE_GRANULE_SIZE aligned. + * size must be MTE_GRANULE_SIZE aligned. */ -static inline void mte_set_mem_tag_range(void *addr, size_t size, - u8 tag, bool init) +static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag, + bool init) { - u64 curr, end; + u64 curr, mask, dczid_bs, end1, end2, end3; - if (!size) - return; + /* Read DC G(Z)VA block size from the system register. */ + dczid_bs = 4ul << (read_cpuid(DCZID_EL0) & 0xf); curr = (u64)__tag_set(addr, tag); - end = curr + size; + mask = dczid_bs - 1; + /* STG/STZG up to the end of the first block. */ + end1 = curr | mask; + end3 = curr + size; + /* DC GVA / GZVA in [end1, end2) */ + end2 = end3 & ~mask; /* - * 'asm volatile' is required to prevent the compiler to move - * the statement outside of the loop. + * The following code uses STG on the first DC GVA block even if the + * start address is aligned - it appears to be faster than an alignment + * check + conditional branch. Also, if the range size is at least 2 DC + * GVA blocks, the first two loops can use post-condition to save one + * branch each. */ - if (init) { - do { - asm volatile(__MTE_PREAMBLE "stzg %0, [%0]" - : - : "r" (curr) - : "memory"); - curr += MTE_GRANULE_SIZE; - } while (curr != end); - } else { - do { - asm volatile(__MTE_PREAMBLE "stg %0, [%0]" - : - : "r" (curr) - : "memory"); - curr += MTE_GRANULE_SIZE; - } while (curr != end); - } +#define SET_MEMTAG_RANGE(stg_post, dc_gva) \ + do { \ + if (size >= 2 * dczid_bs) { \ + do { \ + curr = stg_post(curr); \ + } while (curr < end1); \ + \ + do { \ + dc_gva(curr); \ + curr += dczid_bs; \ + } while (curr < end2); \ + } \ + \ + while (curr < end3) \ + curr = stg_post(curr); \ + } while (0) + + if (init) + SET_MEMTAG_RANGE(__stzg_post, __dc_gzva); + else + SET_MEMTAG_RANGE(__stg_post, __dc_gva); +#undef SET_MEMTAG_RANGE } void mte_enable_kernel_sync(void); diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index bc88a1ced0d7..67bf259ae768 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -37,6 +37,7 @@ void mte_free_tag_storage(char *storage); /* track which pages have valid allocation tags */ #define PG_mte_tagged PG_arch_2 +void mte_zero_clear_page_tags(void *addr); void mte_sync_tags(pte_t *ptep, pte_t pte); void mte_copy_page_tags(void *kto, const void *kfrom); void mte_thread_init_user(void); @@ -53,6 +54,9 @@ int mte_ptrace_copy_tags(struct task_struct *child, long request, /* unused if !CONFIG_ARM64_MTE, silence the compiler */ #define PG_mte_tagged 0 +static inline void mte_zero_clear_page_tags(void *addr) +{ +} static inline void mte_sync_tags(pte_t *ptep, pte_t pte) { } diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 012cffc574e8..ed1b9dcf12b2 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -13,6 +13,7 @@ #ifndef __ASSEMBLY__ #include <linux/personality.h> /* for READ_IMPLIES_EXEC */ +#include <linux/types.h> /* for gfp_t */ #include <asm/pgtable-types.h> struct page; @@ -28,9 +29,12 @@ void copy_user_highpage(struct page *to, struct page *from, void copy_highpage(struct page *to, struct page *from); #define __HAVE_ARCH_COPY_HIGHPAGE -#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ - alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) -#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE +struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, + unsigned long vaddr); +#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE + +void tag_clear_highpage(struct page *to); +#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) diff --git a/arch/arm64/include/asm/patching.h b/arch/arm64/include/asm/patching.h new file mode 100644 index 000000000000..6bf5adc56295 --- /dev/null +++ b/arch/arm64/include/asm/patching.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_PATCHING_H +#define __ASM_PATCHING_H + +#include <linux/types.h> + +int aarch64_insn_read(void *addr, u32 *insnp); +int aarch64_insn_write(void *addr, u32 insn); + +int aarch64_insn_patch_text_nosync(void *addr, u32 insn); +int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); + +#endif /* __ASM_PATCHING_H */ diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index 60731f602d3e..4ef6f19331f9 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h @@ -239,6 +239,11 @@ /* PMMIR_EL1.SLOTS mask */ #define ARMV8_PMU_SLOTS_MASK 0xff +#define ARMV8_PMU_BUS_SLOTS_SHIFT 8 +#define ARMV8_PMU_BUS_SLOTS_MASK 0xff +#define ARMV8_PMU_BUS_WIDTH_SHIFT 16 +#define ARMV8_PMU_BUS_WIDTH_MASK 0xf + #ifdef CONFIG_PERF_EVENTS struct pt_regs; extern unsigned long perf_instruction_pointer(struct pt_regs *regs); diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index b82575a33f8b..40085e53f573 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -72,13 +72,6 @@ #define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT)) /* - * Section address mask and size definitions. - */ -#define SECTION_SHIFT PMD_SHIFT -#define SECTION_SIZE (_AC(1, UL) << SECTION_SHIFT) -#define SECTION_MASK (~(SECTION_SIZE-1)) - -/* * Contiguous page definitions. */ #define CONT_PTE_SHIFT (CONFIG_ARM64_CONT_PTE_SHIFT + PAGE_SHIFT) diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 938092df76cf..7032f04c8ac6 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -55,7 +55,6 @@ extern bool arm64_use_ng_mappings; #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) -#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) #define PROT_NORMAL_TAGGED (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_TAGGED)) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 0b10204e72fc..11e60d0cd9b6 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -511,13 +511,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, #define pmd_none(pmd) (!pmd_val(pmd)) -#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT)) - #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ PMD_TYPE_TABLE) #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ PMD_TYPE_SECT) #define pmd_leaf(pmd) pmd_sect(pmd) +#define pmd_bad(pmd) (!pmd_table(pmd)) #define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE) #define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE) @@ -604,7 +603,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) #define pud_none(pud) (!pud_val(pud)) -#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT)) +#define pud_bad(pud) (!pud_table(pud)) #define pud_present(pud) pte_present(pud_pte(pud)) #define pud_leaf(pud) pud_sect(pud) #define pud_valid(pud) pte_valid(pud_pte(pud)) diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index d50416be99be..28a78b67d9b4 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -31,10 +31,6 @@ struct ptrauth_keys_user { struct ptrauth_key apga; }; -struct ptrauth_keys_kernel { - struct ptrauth_key apia; -}; - #define __ptrauth_key_install_nosync(k, v) \ do { \ struct ptrauth_key __pki_v = (v); \ @@ -42,6 +38,29 @@ do { \ write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \ } while (0) +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL + +struct ptrauth_keys_kernel { + struct ptrauth_key apia; +}; + +static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys) +{ + if (system_supports_address_auth()) + get_random_bytes(&keys->apia, sizeof(keys->apia)); +} + +static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kernel *keys) +{ + if (!system_supports_address_auth()) + return; + + __ptrauth_key_install_nosync(APIA, keys->apia); + isb(); +} + +#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */ + static inline void ptrauth_keys_install_user(struct ptrauth_keys_user *keys) { if (system_supports_address_auth()) { @@ -69,21 +88,6 @@ static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys) ptrauth_keys_install_user(keys); } -static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys) -{ - if (system_supports_address_auth()) - get_random_bytes(&keys->apia, sizeof(keys->apia)); -} - -static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kernel *keys) -{ - if (!system_supports_address_auth()) - return; - - __ptrauth_key_install_nosync(APIA, keys->apia); - isb(); -} - extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg); extern int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys, @@ -121,11 +125,6 @@ static __always_inline void ptrauth_enable(void) #define ptrauth_thread_switch_user(tsk) \ ptrauth_keys_install_user(&(tsk)->thread.keys_user) -#define ptrauth_thread_init_kernel(tsk) \ - ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel) -#define ptrauth_thread_switch_kernel(tsk) \ - ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel) - #else /* CONFIG_ARM64_PTR_AUTH */ #define ptrauth_enable() #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) @@ -134,11 +133,19 @@ static __always_inline void ptrauth_enable(void) #define ptrauth_strip_insn_pac(lr) (lr) #define ptrauth_suspend_exit() #define ptrauth_thread_init_user() -#define ptrauth_thread_init_kernel(tsk) #define ptrauth_thread_switch_user(tsk) -#define ptrauth_thread_switch_kernel(tsk) #endif /* CONFIG_ARM64_PTR_AUTH */ +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL +#define ptrauth_thread_init_kernel(tsk) \ + ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel) +#define ptrauth_thread_switch_kernel(tsk) \ + ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel) +#else +#define ptrauth_thread_init_kernel(tsk) +#define ptrauth_thread_switch_kernel(tsk) +#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */ + #define PR_PAC_ENABLED_KEYS_MASK \ (PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 9df3feeee890..b6517fd03d7b 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -148,8 +148,10 @@ struct thread_struct { struct debug_info debug; /* debugging */ #ifdef CONFIG_ARM64_PTR_AUTH struct ptrauth_keys_user keys_user; +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL struct ptrauth_keys_kernel keys_kernel; #endif +#endif #ifdef CONFIG_ARM64_MTE u64 gcr_user_excl; #endif @@ -257,8 +259,6 @@ void set_task_sctlr_el1(u64 sctlr); extern struct task_struct *cpu_switch_to(struct task_struct *prev, struct task_struct *next); -asmlinkage void arm64_preempt_schedule_irq(void); - #define task_pt_regs(p) \ ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) @@ -329,13 +329,13 @@ long get_tagged_addr_ctrl(struct task_struct *task); * of header definitions for the use of task_stack_page. */ -#define current_top_of_stack() \ -({ \ - struct stack_info _info; \ - BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info)); \ - _info.high; \ +#define current_top_of_stack() \ +({ \ + struct stack_info _info; \ + BUG_ON(!on_accessible_stack(current, current_stack_pointer, 1, &_info)); \ + _info.high; \ }) -#define on_thread_stack() (on_task_stack(current, current_stack_pointer, NULL)) +#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1, NULL)) #endif /* __ASSEMBLY__ */ #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h index eaa2cd92e4c1..8297bccf0784 100644 --- a/arch/arm64/include/asm/scs.h +++ b/arch/arm64/include/asm/scs.h @@ -9,18 +9,18 @@ #ifdef CONFIG_SHADOW_CALL_STACK scs_sp .req x18 - .macro scs_load tsk, tmp + .macro scs_load tsk ldr scs_sp, [\tsk, #TSK_TI_SCS_SP] .endm - .macro scs_save tsk, tmp + .macro scs_save tsk str scs_sp, [\tsk, #TSK_TI_SCS_SP] .endm #else - .macro scs_load tsk, tmp + .macro scs_load tsk .endm - .macro scs_save tsk, tmp + .macro scs_save tsk .endm #endif /* CONFIG_SHADOW_CALL_STACK */ diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h index 63e0b92a5fbb..7bea1d705dd6 100644 --- a/arch/arm64/include/asm/sdei.h +++ b/arch/arm64/include/asm/sdei.h @@ -37,13 +37,17 @@ struct sdei_registered_event; asmlinkage unsigned long __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg); +unsigned long do_sdei_event(struct pt_regs *regs, + struct sdei_registered_event *arg); + unsigned long sdei_arch_get_entry_point(int conduit); #define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x) struct stack_info; -bool _on_sdei_stack(unsigned long sp, struct stack_info *info); -static inline bool on_sdei_stack(unsigned long sp, +bool _on_sdei_stack(unsigned long sp, unsigned long size, + struct stack_info *info); +static inline bool on_sdei_stack(unsigned long sp, unsigned long size, struct stack_info *info) { if (!IS_ENABLED(CONFIG_VMAP_STACK)) @@ -51,7 +55,7 @@ static inline bool on_sdei_stack(unsigned long sp, if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) return false; if (in_nmi()) - return _on_sdei_stack(sp, info); + return _on_sdei_stack(sp, size, info); return false; } diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 0e357757c0cc..fc55f5a57a06 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -73,12 +73,10 @@ asmlinkage void secondary_start_kernel(void); /* * Initial data for bringing up a secondary CPU. - * @stack - sp for the secondary CPU * @status - Result passed back from the secondary CPU to * indicate failure. */ struct secondary_data { - void *stack; struct task_struct *task; long status; }; diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 4b33ca620679..1801399204d7 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -69,14 +69,14 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, DECLARE_PER_CPU(unsigned long *, irq_stack_ptr); -static inline bool on_stack(unsigned long sp, unsigned long low, - unsigned long high, enum stack_type type, - struct stack_info *info) +static inline bool on_stack(unsigned long sp, unsigned long size, + unsigned long low, unsigned long high, + enum stack_type type, struct stack_info *info) { if (!low) return false; - if (sp < low || sp >= high) + if (sp < low || sp + size < sp || sp + size > high) return false; if (info) { @@ -87,38 +87,38 @@ static inline bool on_stack(unsigned long sp, unsigned long low, return true; } -static inline bool on_irq_stack(unsigned long sp, +static inline bool on_irq_stack(unsigned long sp, unsigned long size, struct stack_info *info) { unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr); unsigned long high = low + IRQ_STACK_SIZE; - return on_stack(sp, low, high, STACK_TYPE_IRQ, info); + return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info); } static inline bool on_task_stack(const struct task_struct *tsk, - unsigned long sp, + unsigned long sp, unsigned long size, struct stack_info *info) { unsigned long low = (unsigned long)task_stack_page(tsk); unsigned long high = low + THREAD_SIZE; - return on_stack(sp, low, high, STACK_TYPE_TASK, info); + return on_stack(sp, size, low, high, STACK_TYPE_TASK, info); } #ifdef CONFIG_VMAP_STACK DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); -static inline bool on_overflow_stack(unsigned long sp, +static inline bool on_overflow_stack(unsigned long sp, unsigned long size, struct stack_info *info) { unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack); unsigned long high = low + OVERFLOW_STACK_SIZE; - return on_stack(sp, low, high, STACK_TYPE_OVERFLOW, info); + return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); } #else -static inline bool on_overflow_stack(unsigned long sp, +static inline bool on_overflow_stack(unsigned long sp, unsigned long size, struct stack_info *info) { return false; } #endif @@ -128,21 +128,21 @@ static inline bool on_overflow_stack(unsigned long sp, * context. */ static inline bool on_accessible_stack(const struct task_struct *tsk, - unsigned long sp, + unsigned long sp, unsigned long size, struct stack_info *info) { if (info) info->type = STACK_TYPE_UNKNOWN; - if (on_task_stack(tsk, sp, info)) + if (on_task_stack(tsk, sp, size, info)) return true; if (tsk != current || preemptible()) return false; - if (on_irq_stack(sp, info)) + if (on_irq_stack(sp, size, info)) return true; - if (on_overflow_stack(sp, info)) + if (on_overflow_stack(sp, size, info)) return true; - if (on_sdei_stack(sp, info)) + if (on_sdei_stack(sp, size, info)) return true; return false; diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 65d15700a168..9ea84bcddf85 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -703,9 +703,7 @@ /* MAIR_ELx memory attributes (used by Linux) */ #define MAIR_ATTR_DEVICE_nGnRnE UL(0x00) #define MAIR_ATTR_DEVICE_nGnRE UL(0x04) -#define MAIR_ATTR_DEVICE_GRE UL(0x0c) #define MAIR_ATTR_NORMAL_NC UL(0x44) -#define MAIR_ATTR_NORMAL_WT UL(0xbb) #define MAIR_ATTR_NORMAL_TAGGED UL(0xf0) #define MAIR_ATTR_NORMAL UL(0xff) #define MAIR_ATTR_MASK UL(0xff) diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 61c97d3b58c7..c995d1f4594f 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -28,6 +28,10 @@ static void tlb_flush(struct mmu_gather *tlb); */ static inline int tlb_get_level(struct mmu_gather *tlb) { + /* The TTL field is only valid for the leaf entry. */ + if (tlb->freed_tables) + return 0; + if (tlb->cleared_ptes && !(tlb->cleared_pmds || tlb->cleared_puds || tlb->cleared_p4ds)) |