summaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig25
-rw-r--r--arch/arm64/Kconfig.platforms2
-rw-r--r--arch/arm64/Makefile70
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h5
-rw-r--r--arch/arm64/include/asm/barrier.h11
-rw-r--r--arch/arm64/include/asm/cpufeature.h14
-rw-r--r--arch/arm64/include/asm/efi.h2
-rw-r--r--arch/arm64/include/asm/el2_setup.h8
-rw-r--r--arch/arm64/include/asm/esr.h1
-rw-r--r--arch/arm64/include/asm/fpsimd.h30
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h22
-rw-r--r--arch/arm64/include/asm/ftrace.h15
-rw-r--r--arch/arm64/include/asm/hwcap.h14
-rw-r--r--arch/arm64/include/asm/insn.h1
-rw-r--r--arch/arm64/include/asm/irqflags.h191
-rw-r--r--arch/arm64/include/asm/linkage.h4
-rw-r--r--arch/arm64/include/asm/patching.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h8
-rw-r--r--arch/arm64/include/asm/processor.h2
-rw-r--r--arch/arm64/include/asm/ptrace.h2
-rw-r--r--arch/arm64/include/asm/scs.h7
-rw-r--r--arch/arm64/include/asm/sysreg.h106
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h6
-rw-r--r--arch/arm64/include/uapi/asm/sigcontext.h27
-rw-r--r--arch/arm64/kernel/asm-offsets.c4
-rw-r--r--arch/arm64/kernel/cpufeature.c114
-rw-r--r--arch/arm64/kernel/cpuinfo.c14
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S30
-rw-r--r--arch/arm64/kernel/entry-ftrace.S32
-rw-r--r--arch/arm64/kernel/entry.S41
-rw-r--r--arch/arm64/kernel/fpsimd.c52
-rw-r--r--arch/arm64/kernel/ftrace.c158
-rw-r--r--arch/arm64/kernel/head.S112
-rw-r--r--arch/arm64/kernel/hyp-stub.S7
-rw-r--r--arch/arm64/kernel/idreg-override.c1
-rw-r--r--arch/arm64/kernel/image-vars.h7
-rw-r--r--arch/arm64/kernel/patch-scs.c11
-rw-r--r--arch/arm64/kernel/patching.c17
-rw-r--r--arch/arm64/kernel/probes/kprobes.c4
-rw-r--r--arch/arm64/kernel/process.c21
-rw-r--r--arch/arm64/kernel/ptrace.c64
-rw-r--r--arch/arm64/kernel/setup.c17
-rw-r--r--arch/arm64/kernel/signal.c172
-rw-r--r--arch/arm64/kernel/sleep.S6
-rw-r--r--arch/arm64/kernel/syscall.c8
-rw-r--r--arch/arm64/kernel/traps.c6
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S8
-rw-r--r--arch/arm64/kvm/debug.c2
-rw-r--r--arch/arm64/kvm/fpsimd.c2
-rw-r--r--arch/arm64/kvm/hyp/entry.S2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/debug-sr.c2
-rw-r--r--arch/arm64/mm/cache.S1
-rw-r--r--arch/arm64/mm/mmu.c8
-rw-r--r--arch/arm64/mm/proc.S2
-rw-r--r--arch/arm64/tools/cpucaps6
-rw-r--r--arch/arm64/tools/sysreg189
56 files changed, 1301 insertions, 394 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 03934808b2ed..619ab046744a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -123,6 +123,8 @@ config ARM64
select DMA_DIRECT_REMAP
select EDAC_SUPPORT
select FRAME_POINTER
+ select FUNCTION_ALIGNMENT_4B
+ select FUNCTION_ALIGNMENT_8B if DYNAMIC_FTRACE_WITH_CALL_OPS
select GENERIC_ALLOCATOR
select GENERIC_ARCH_TOPOLOGY
select GENERIC_CLOCKEVENTS_BROADCAST
@@ -186,6 +188,8 @@ config ARM64
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_ARGS \
if $(cc-option,-fpatchable-function-entry=2)
+ select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS \
+ if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG)
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
if DYNAMIC_FTRACE_WITH_ARGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -1456,10 +1460,23 @@ config XEN
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
+# include/linux/mmzone.h requires the following to be true:
+#
+# MAX_ORDER - 1 + PAGE_SHIFT <= SECTION_SIZE_BITS
+#
+# so the maximum value of MAX_ORDER is SECTION_SIZE_BITS + 1 - PAGE_SHIFT:
+#
+# | SECTION_SIZE_BITS | PAGE_SHIFT | max MAX_ORDER | default MAX_ORDER |
+# ----+-------------------+--------------+-----------------+--------------------+
+# 4K | 27 | 12 | 16 | 11 |
+# 16K | 27 | 14 | 14 | 12 |
+# 64K | 29 | 16 | 14 | 14 |
config ARCH_FORCE_MAX_ORDER
- int
+ int "Maximum zone order" if ARM64_4K_PAGES || ARM64_16K_PAGES
default "14" if ARM64_64K_PAGES
+ range 12 14 if ARM64_16K_PAGES
default "12" if ARM64_16K_PAGES
+ range 11 16 if ARM64_4K_PAGES
default "11"
help
The kernel memory allocator divides physically contiguous memory
@@ -1472,7 +1489,7 @@ config ARCH_FORCE_MAX_ORDER
This config option is actually maximum order plus one. For example,
a value of 11 means that the largest free memory block is 2^10 pages.
- We make sure that we can allocate upto a HugePage size for each configuration.
+ We make sure that we can allocate up to a HugePage size for each configuration.
Hence we have :
MAX_ORDER = (PMD_SHIFT - PAGE_SHIFT) + 1 => PAGE_SHIFT - 2
@@ -1818,7 +1835,7 @@ config ARM64_PTR_AUTH_KERNEL
bool "Use pointer authentication for kernel"
default y
depends on ARM64_PTR_AUTH
- depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
+ depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_ARMV8_3
# Modern compilers insert a .note.gnu.property section note for PAC
# which is only understood by binutils starting with version 2.33.1.
depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100)
@@ -1843,7 +1860,7 @@ config CC_HAS_SIGN_RETURN_ADDRESS
# GCC 7, 8
def_bool $(cc-option,-msign-return-address=all)
-config AS_HAS_PAC
+config AS_HAS_ARMV8_3
def_bool $(cc-option,-Wa$(comma)-march=armv8.3-a)
config AS_HAS_CFI_NEGATE_RA_STATE
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index d1970adf80ab..333d0af650d2 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -187,7 +187,7 @@ config ARCH_MVEBU
select PINCTRL_ARMADA_CP110
select PINCTRL_AC5
help
- This enables support for Marvell EBU familly, including:
+ This enables support for Marvell EBU family, including:
- Armada 3700 SoC Family
- Armada 7K SoC Family
- Armada 8K SoC Family
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index d62bd221828f..c33b5da95b4a 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -63,50 +63,37 @@ stack_protector_prepare: prepare0
include/generated/asm-offsets.h))
endif
-ifeq ($(CONFIG_AS_HAS_ARMV8_2), y)
-# make sure to pass the newest target architecture to -march.
-asm-arch := armv8.2-a
-endif
-
-# Ensure that if the compiler supports branch protection we default it
-# off, this will be overridden if we are using branch protection.
-branch-prot-flags-y += $(call cc-option,-mbranch-protection=none)
-
-ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y)
-branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all
-# We enable additional protection for leaf functions as there is some
-# narrow potential for ROP protection benefits and no substantial
-# performance impact has been observed.
-PACRET-y := pac-ret+leaf
-
-# Using a shadow call stack in leaf functions is too costly, so avoid PAC there
-# as well when we may be patching PAC into SCS
-PACRET-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) := pac-ret
-
ifeq ($(CONFIG_ARM64_BTI_KERNEL),y)
-branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI) := -mbranch-protection=$(PACRET-y)+bti
+ KBUILD_CFLAGS += -mbranch-protection=pac-ret+bti
+else ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y)
+ ifeq ($(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET),y)
+ KBUILD_CFLAGS += -mbranch-protection=pac-ret
+ else
+ KBUILD_CFLAGS += -msign-return-address=non-leaf
+ endif
else
-branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=$(PACRET-y)
-endif
-# -march=armv8.3-a enables the non-nops instructions for PAC, to avoid the
-# compiler to generate them and consequently to break the single image contract
-# we pass it only to the assembler. This option is utilized only in case of non
-# integrated assemblers.
-ifeq ($(CONFIG_AS_HAS_PAC), y)
-asm-arch := armv8.3-a
-endif
-endif
-
-KBUILD_CFLAGS += $(branch-prot-flags-y)
-
-ifeq ($(CONFIG_AS_HAS_ARMV8_4), y)
-# make sure to pass the newest target architecture to -march.
-asm-arch := armv8.4-a
+ KBUILD_CFLAGS += $(call cc-option,-mbranch-protection=none)
endif
+# Tell the assembler to support instructions from the latest target
+# architecture.
+#
+# For non-integrated assemblers we'll pass this on the command line, and for
+# integrated assemblers we'll define ARM64_ASM_ARCH and ARM64_ASM_PREAMBLE for
+# inline usage.
+#
+# We cannot pass the same arch flag to the compiler as this would allow it to
+# freely generate instructions which are not supported by earlier architecture
+# versions, which would prevent a single kernel image from working on earlier
+# hardware.
ifeq ($(CONFIG_AS_HAS_ARMV8_5), y)
-# make sure to pass the newest target architecture to -march.
-asm-arch := armv8.5-a
+ asm-arch := armv8.5-a
+else ifeq ($(CONFIG_AS_HAS_ARMV8_4), y)
+ asm-arch := armv8.4-a
+else ifeq ($(CONFIG_AS_HAS_ARMV8_3), y)
+ asm-arch := armv8.3-a
+else ifeq ($(CONFIG_AS_HAS_ARMV8_2), y)
+ asm-arch := armv8.2-a
endif
ifdef asm-arch
@@ -139,7 +126,10 @@ endif
CHECKFLAGS += -D__aarch64__
-ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_ARGS),y)
+ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS),y)
+ KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
+ CC_FLAGS_FTRACE := -fpatchable-function-entry=4,2
+else ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_ARGS),y)
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
CC_FLAGS_FTRACE := -fpatchable-function-entry=2
endif
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 48d4473e8eee..01281a5336cf 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -190,5 +190,10 @@ static inline void gic_arch_enable_irqs(void)
asm volatile ("msr daifclr, #3" : : : "memory");
}
+static inline bool gic_has_relaxed_pmr_sync(void)
+{
+ return cpus_have_cap(ARM64_HAS_GIC_PRIO_RELAXED_SYNC);
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ARCH_GICV3_H */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 2cfc4245d2e2..3dd8982a9ce3 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -11,6 +11,8 @@
#include <linux/kasan-checks.h>
+#include <asm/alternative-macros.h>
+
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
#define nops(n) asm volatile(__nops(n))
@@ -41,10 +43,11 @@
#ifdef CONFIG_ARM64_PSEUDO_NMI
#define pmr_sync() \
do { \
- extern struct static_key_false gic_pmr_sync; \
- \
- if (static_branch_unlikely(&gic_pmr_sync)) \
- dsb(sy); \
+ asm volatile( \
+ ALTERNATIVE_CB("dsb sy", \
+ ARM64_HAS_GIC_PRIO_RELAXED_SYNC, \
+ alt_cb_patch_nops) \
+ ); \
} while(0)
#else
#define pmr_sync() do {} while (0)
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 03d1c9d7af82..6bf013fb110d 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -769,6 +769,12 @@ static __always_inline bool system_supports_sme(void)
cpus_have_const_cap(ARM64_SME);
}
+static __always_inline bool system_supports_sme2(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_SME) &&
+ cpus_have_const_cap(ARM64_SME2);
+}
+
static __always_inline bool system_supports_fa64(void)
{
return IS_ENABLED(CONFIG_ARM64_SME) &&
@@ -806,7 +812,7 @@ static inline bool system_has_full_ptr_auth(void)
static __always_inline bool system_uses_irq_prio_masking(void)
{
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
- cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
+ cpus_have_const_cap(ARM64_HAS_GIC_PRIO_MASKING);
}
static inline bool system_supports_mte(void)
@@ -864,7 +870,11 @@ static inline bool cpu_has_hw_af(void)
if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM))
return false;
- mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
+ /*
+ * Use cached version to avoid emulated msr operation on KVM
+ * guests.
+ */
+ mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
return cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_EL1_HAFDBS_SHIFT);
}
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 31d13a6001df..0f0e729b40ef 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -105,6 +105,8 @@ static inline unsigned long efi_get_kimg_min_align(void)
#define EFI_ALLOC_ALIGN SZ_64K
#define EFI_ALLOC_LIMIT ((1UL << 48) - 1)
+extern unsigned long primary_entry_offset(void);
+
/*
* On ARM systems, virtually remapped UEFI runtime services are set up in two
* distinct stages:
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index 668569adf4d3..2cdd010f9524 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -53,10 +53,10 @@
cbz x0, .Lskip_spe_\@ // Skip if SPE not present
mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2,
- and x0, x0, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
+ and x0, x0, #(1 << PMBIDR_EL1_P_SHIFT)
cbnz x0, .Lskip_spe_el2_\@ // then permit sampling of physical
- mov x0, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
- 1 << SYS_PMSCR_EL2_PA_SHIFT)
+ mov x0, #(1 << PMSCR_EL2_PCT_SHIFT | \
+ 1 << PMSCR_EL2_PA_SHIFT)
msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter
.Lskip_spe_el2_\@:
mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
@@ -177,7 +177,7 @@
/**
* Initialize EL2 registers to sane values. This should be called early on all
* cores that were booted in EL2. Note that everything gets initialised as
- * if VHE was not evailable. The kernel context will be upgraded to VHE
+ * if VHE was not available. The kernel context will be upgraded to VHE
* if possible later on in the boot process
*
* Regs: x0, x1 and x2 are clobbered.
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 15b34fbfca66..5f3271e9d5df 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -341,6 +341,7 @@
#define ESR_ELx_SME_ISS_ILL 1
#define ESR_ELx_SME_ISS_SM_DISABLED 2
#define ESR_ELx_SME_ISS_ZA_DISABLED 3
+#define ESR_ELx_SME_ISS_ZT_DISABLED 4
#ifndef __ASSEMBLY__
#include <asm/types.h>
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index e6fa1e2982c8..67f2fb781f59 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -61,7 +61,7 @@ extern void fpsimd_kvm_prepare(void);
struct cpu_fp_state {
struct user_fpsimd_state *st;
void *sve_state;
- void *za_state;
+ void *sme_state;
u64 *svcr;
unsigned int sve_vl;
unsigned int sme_vl;
@@ -105,6 +105,13 @@ static inline void *sve_pffr(struct thread_struct *thread)
return (char *)thread->sve_state + sve_ffr_offset(vl);
}
+static inline void *thread_zt_state(struct thread_struct *thread)
+{
+ /* The ZT register state is stored immediately after the ZA state */
+ unsigned int sme_vq = sve_vq_from_vl(thread_get_sme_vl(thread));
+ return thread->sme_state + ZA_SIG_REGS_SIZE(sme_vq);
+}
+
extern void sve_save_state(void *state, u32 *pfpsr, int save_ffr);
extern void sve_load_state(void const *state, u32 const *pfpsr,
int restore_ffr);
@@ -112,12 +119,13 @@ extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1);
extern unsigned int sve_get_vl(void);
extern void sve_set_vq(unsigned long vq_minus_1);
extern void sme_set_vq(unsigned long vq_minus_1);
-extern void za_save_state(void *state);
-extern void za_load_state(void const *state);
+extern void sme_save_state(void *state, int zt);
+extern void sme_load_state(void const *state, int zt);
struct arm64_cpu_capabilities;
extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused);
+extern void sme2_kernel_enable(const struct arm64_cpu_capabilities *__unused);
extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused);
extern u64 read_zcr_features(void);
@@ -355,14 +363,20 @@ extern int sme_get_current_vl(void);
/*
* Return how many bytes of memory are required to store the full SME
- * specific state (currently just ZA) for task, given task's currently
- * configured vector length.
+ * specific state for task, given task's currently configured vector
+ * length.
*/
-static inline size_t za_state_size(struct task_struct const *task)
+static inline size_t sme_state_size(struct task_struct const *task)
{
unsigned int vl = task_get_sme_vl(task);
+ size_t size;
+
+ size = ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl));
+
+ if (system_supports_sme2())
+ size += ZT_SIG_REG_SIZE;
- return ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl));
+ return size;
}
#else
@@ -382,7 +396,7 @@ static inline int sme_max_virtualisable_vl(void) { return 0; }
static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
static inline int sme_get_current_vl(void) { return -EINVAL; }
-static inline size_t za_state_size(struct task_struct const *task)
+static inline size_t sme_state_size(struct task_struct const *task)
{
return 0;
}
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index 5e0910cf4832..cd03819a3b68 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -221,6 +221,28 @@
.endm
/*
+ * LDR (ZT0)
+ *
+ * LDR ZT0, nx
+ */
+.macro _ldr_zt nx
+ _check_general_reg \nx
+ .inst 0xe11f8000 \
+ | (\nx << 5)
+.endm
+
+/*
+ * STR (ZT0)
+ *
+ * STR ZT0, nx
+ */
+.macro _str_zt nx
+ _check_general_reg \nx
+ .inst 0xe13f8000 \
+ | (\nx << 5)
+.endm
+
+/*
* Zero the entire ZA array
* ZERO ZA
*/
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 5664729800ae..1c2672bbbf37 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -62,20 +62,7 @@ extern unsigned long ftrace_graph_call;
extern void return_to_handler(void);
-static inline unsigned long ftrace_call_adjust(unsigned long addr)
-{
- /*
- * Adjust addr to point at the BL in the callsite.
- * See ftrace_init_nop() for the callsite sequence.
- */
- if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
- return addr + AARCH64_INSN_SIZE;
- /*
- * addr is the address of the mcount call instruction.
- * recordmcount does the necessary offset calculation.
- */
- return addr;
-}
+unsigned long ftrace_call_adjust(unsigned long addr);
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
struct dyn_ftrace;
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 06dd12c514e6..5d45f19fda7f 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -31,12 +31,20 @@
#define COMPAT_HWCAP_VFPD32 (1 << 19)
#define COMPAT_HWCAP_LPAE (1 << 20)
#define COMPAT_HWCAP_EVTSTRM (1 << 21)
+#define COMPAT_HWCAP_FPHP (1 << 22)
+#define COMPAT_HWCAP_ASIMDHP (1 << 23)
+#define COMPAT_HWCAP_ASIMDDP (1 << 24)
+#define COMPAT_HWCAP_ASIMDFHM (1 << 25)
+#define COMPAT_HWCAP_ASIMDBF16 (1 << 26)
+#define COMPAT_HWCAP_I8MM (1 << 27)
#define COMPAT_HWCAP2_AES (1 << 0)
#define COMPAT_HWCAP2_PMULL (1 << 1)
#define COMPAT_HWCAP2_SHA1 (1 << 2)
#define COMPAT_HWCAP2_SHA2 (1 << 3)
#define COMPAT_HWCAP2_CRC32 (1 << 4)
+#define COMPAT_HWCAP2_SB (1 << 5)
+#define COMPAT_HWCAP2_SSBS (1 << 6)
#ifndef __ASSEMBLY__
#include <linux/log2.h>
@@ -123,6 +131,12 @@
#define KERNEL_HWCAP_CSSC __khwcap2_feature(CSSC)
#define KERNEL_HWCAP_RPRFM __khwcap2_feature(RPRFM)
#define KERNEL_HWCAP_SVE2P1 __khwcap2_feature(SVE2P1)
+#define KERNEL_HWCAP_SME2 __khwcap2_feature(SME2)
+#define KERNEL_HWCAP_SME2P1 __khwcap2_feature(SME2P1)
+#define KERNEL_HWCAP_SME_I16I32 __khwcap2_feature(SME_I16I32)
+#define KERNEL_HWCAP_SME_BI32I32 __khwcap2_feature(SME_BI32I32)
+#define KERNEL_HWCAP_SME_B16B16 __khwcap2_feature(SME_B16B16)
+#define KERNEL_HWCAP_SME_F16F16 __khwcap2_feature(SME_F16F16)
/*
* This yields a mask that user programs can use to figure out what
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index aaf1f52fbf3e..139a88e4e852 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -420,6 +420,7 @@ __AARCH64_INSN_FUNCS(sb, 0xFFFFFFFF, 0xD50330FF)
__AARCH64_INSN_FUNCS(clrex, 0xFFFFF0FF, 0xD503305F)
__AARCH64_INSN_FUNCS(ssbb, 0xFFFFFFFF, 0xD503309F)
__AARCH64_INSN_FUNCS(pssbb, 0xFFFFFFFF, 0xD503349F)
+__AARCH64_INSN_FUNCS(bti, 0xFFFFFF3F, 0xD503241f)
#undef __AARCH64_INSN_FUNCS
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index b57b9b1e4344..e0f5f6b73edd 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -21,43 +21,77 @@
* exceptions should be unmasked.
*/
-/*
- * CPU interrupt mask handling.
- */
-static inline void arch_local_irq_enable(void)
+static __always_inline bool __irqflags_uses_pmr(void)
{
- if (system_has_prio_mask_debugging()) {
- u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
+ return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
+ alternative_has_feature_unlikely(ARM64_HAS_GIC_PRIO_MASKING);
+}
+static __always_inline void __daif_local_irq_enable(void)
+{
+ barrier();
+ asm volatile("msr daifclr, #3");
+ barrier();
+}
+
+static __always_inline void __pmr_local_irq_enable(void)
+{
+ if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) {
+ u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
}
- asm volatile(ALTERNATIVE(
- "msr daifclr, #3 // arch_local_irq_enable",
- __msr_s(SYS_ICC_PMR_EL1, "%0"),
- ARM64_HAS_IRQ_PRIO_MASKING)
- :
- : "r" ((unsigned long) GIC_PRIO_IRQON)
- : "memory");
-
+ barrier();
+ write_sysreg_s(GIC_PRIO_IRQON, SYS_ICC_PMR_EL1);
pmr_sync();
+ barrier();
}
-static inline void arch_local_irq_disable(void)
+static inline void arch_local_irq_enable(void)
{
- if (system_has_prio_mask_debugging()) {
- u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
+ if (__irqflags_uses_pmr()) {
+ __pmr_local_irq_enable();
+ } else {
+ __daif_local_irq_enable();
+ }
+}
+static __always_inline void __daif_local_irq_disable(void)
+{
+ barrier();
+ asm volatile("msr daifset, #3");
+ barrier();
+}
+
+static __always_inline void __pmr_local_irq_disable(void)
+{
+ if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) {
+ u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
}
- asm volatile(ALTERNATIVE(
- "msr daifset, #3 // arch_local_irq_disable",
- __msr_s(SYS_ICC_PMR_EL1, "%0"),
- ARM64_HAS_IRQ_PRIO_MASKING)
- :
- : "r" ((unsigned long) GIC_PRIO_IRQOFF)
- : "memory");
+ barrier();
+ write_sysreg_s(GIC_PRIO_IRQOFF, SYS_ICC_PMR_EL1);
+ barrier();
+}
+
+static inline void arch_local_irq_disable(void)
+{
+ if (__irqflags_uses_pmr()) {
+ __pmr_local_irq_disable();
+ } else {
+ __daif_local_irq_disable();
+ }
+}
+
+static __always_inline unsigned long __daif_local_save_flags(void)
+{
+ return read_sysreg(daif);
+}
+
+static __always_inline unsigned long __pmr_local_save_flags(void)
+{
+ return read_sysreg_s(SYS_ICC_PMR_EL1);
}
/*
@@ -65,69 +99,108 @@ static inline void arch_local_irq_disable(void)
*/
static inline unsigned long arch_local_save_flags(void)
{
- unsigned long flags;
+ if (__irqflags_uses_pmr()) {
+ return __pmr_local_save_flags();
+ } else {
+ return __daif_local_save_flags();
+ }
+}
- asm volatile(ALTERNATIVE(
- "mrs %0, daif",
- __mrs_s("%0", SYS_ICC_PMR_EL1),
- ARM64_HAS_IRQ_PRIO_MASKING)
- : "=&r" (flags)
- :
- : "memory");
+static __always_inline bool __daif_irqs_disabled_flags(unsigned long flags)
+{
+ return flags & PSR_I_BIT;
+}
- return flags;
+static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags)
+{
+ return flags != GIC_PRIO_IRQON;
}
-static inline int arch_irqs_disabled_flags(unsigned long flags)
+static inline bool arch_irqs_disabled_flags(unsigned long flags)
{
- int res;
+ if (__irqflags_uses_pmr()) {
+ return __pmr_irqs_disabled_flags(flags);
+ } else {
+ return __daif_irqs_disabled_flags(flags);
+ }
+}
- asm volatile(ALTERNATIVE(
- "and %w0, %w1, #" __stringify(PSR_I_BIT),
- "eor %w0, %w1, #" __stringify(GIC_PRIO_IRQON),
- ARM64_HAS_IRQ_PRIO_MASKING)
- : "=&r" (res)
- : "r" ((int) flags)
- : "memory");
+static __always_inline bool __daif_irqs_disabled(void)
+{
+ return __daif_irqs_disabled_flags(__daif_local_save_flags());
+}
- return res;
+static __always_inline bool __pmr_irqs_disabled(void)
+{
+ return __pmr_irqs_disabled_flags(__pmr_local_save_flags());
}
-static inline int arch_irqs_disabled(void)
+static inline bool arch_irqs_disabled(void)
{
- return arch_irqs_disabled_flags(arch_local_save_flags());
+ if (__irqflags_uses_pmr()) {
+ return __pmr_irqs_disabled();
+ } else {
+ return __daif_irqs_disabled();
+ }
}
-static inline unsigned long arch_local_irq_save(void)
+static __always_inline unsigned long __daif_local_irq_save(void)
{
- unsigned long flags;
+ unsigned long flags = __daif_local_save_flags();
+
+ __daif_local_irq_disable();
+
+ return flags;
+}
- flags = arch_local_save_flags();
+static __always_inline unsigned long __pmr_local_irq_save(void)
+{
+ unsigned long flags = __pmr_local_save_flags();
/*
* There are too many states with IRQs disabled, just keep the current
* state if interrupts are already disabled/masked.
*/
- if (!arch_irqs_disabled_flags(flags))
- arch_local_irq_disable();
+ if (!__pmr_irqs_disabled_flags(flags))
+ __pmr_local_irq_disable();
return flags;
}
+static inline unsigned long arch_local_irq_save(void)
+{
+ if (__irqflags_uses_pmr()) {
+ return __pmr_local_irq_save();
+ } else {
+ return __daif_local_irq_save();
+ }
+}
+
+static __always_inline void __daif_local_irq_restore(unsigned long flags)
+{
+ barrier();
+ write_sysreg(flags, daif);
+ barrier();
+}
+
+static __always_inline void __pmr_local_irq_restore(unsigned long flags)
+{
+ barrier();
+ write_sysreg_s(flags, SYS_ICC_PMR_EL1);
+ pmr_sync();
+ barrier();
+}
+
/*
* restore saved IRQ state
*/
static inline void arch_local_irq_restore(unsigned long flags)
{
- asm volatile(ALTERNATIVE(
- "msr daif, %0",
- __msr_s(SYS_ICC_PMR_EL1, "%0"),
- ARM64_HAS_IRQ_PRIO_MASKING)
- :
- : "r" (flags)
- : "memory");
-
- pmr_sync();
+ if (__irqflags_uses_pmr()) {
+ __pmr_local_irq_restore(flags);
+ } else {
+ __daif_local_irq_restore(flags);
+ }
}
#endif /* __ASM_IRQFLAGS_H */
diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h
index 1436fa1cde24..d3acd9c87509 100644
--- a/arch/arm64/include/asm/linkage.h
+++ b/arch/arm64/include/asm/linkage.h
@@ -5,8 +5,8 @@
#include <asm/assembler.h>
#endif
-#define __ALIGN .align 2
-#define __ALIGN_STR ".align 2"
+#define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT
+#define __ALIGN_STR ".balign " #CONFIG_FUNCTION_ALIGNMENT
/*
* When using in-kernel BTI we need to ensure that PCS-conformant
diff --git a/arch/arm64/include/asm/patching.h b/arch/arm64/include/asm/patching.h
index 6bf5adc56295..68908b82b168 100644
--- a/arch/arm64/include/asm/patching.h
+++ b/arch/arm64/include/asm/patching.h
@@ -7,6 +7,8 @@
int aarch64_insn_read(void *addr, u32 *insnp);
int aarch64_insn_write(void *addr, u32 insn);
+int aarch64_insn_write_literal_u64(void *addr, u64 val);
+
int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b4bbeed80fb6..832c9c8fb58f 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -275,6 +275,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
}
extern void __sync_icache_dcache(pte_t pteval);
+bool pgattr_change_is_safe(u64 old, u64 new);
/*
* PTE bits configuration in the presence of hardware Dirty Bit Management
@@ -292,7 +293,7 @@ extern void __sync_icache_dcache(pte_t pteval);
* PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
*/
-static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
+static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep,
pte_t pte)
{
pte_t old_pte;
@@ -318,6 +319,9 @@ static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
"%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
__func__, pte_val(old_pte), pte_val(pte));
+ VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)),
+ "%s: unsafe attribute change: 0x%016llx -> 0x%016llx",
+ __func__, pte_val(old_pte), pte_val(pte));
}
static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
@@ -346,7 +350,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
mte_sync_tags(old_pte, pte);
}
- __check_racy_pte_update(mm, ptep, pte);
+ __check_safe_pte_update(mm, ptep, pte);
set_pte(ptep, pte);
}
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index d51b32a69309..3918f2a67970 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -161,7 +161,7 @@ struct thread_struct {
enum fp_type fp_type; /* registers FPSIMD or SVE? */
unsigned int fpsimd_cpu;
void *sve_state; /* SVE registers, if any */
- void *za_state; /* ZA register, if any */
+ void *sme_state; /* ZA and ZT state, if any */
unsigned int vl[ARM64_VEC_MAX]; /* vector length */
unsigned int vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */
unsigned long fault_address; /* fault info */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 41b332c054ab..47ec58031f11 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -194,7 +194,7 @@ struct pt_regs {
u32 unused2;
#endif
u64 sdei_ttbr1;
- /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */
+ /* Only valid when ARM64_HAS_GIC_PRIO_MASKING is enabled. */
u64 pmr_save;
u64 stackframe[2];
diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h
index ff7da1268a52..13df982a0808 100644
--- a/arch/arm64/include/asm/scs.h
+++ b/arch/arm64/include/asm/scs.h
@@ -10,15 +10,16 @@
#ifdef CONFIG_SHADOW_CALL_STACK
scs_sp .req x18
- .macro scs_load tsk
- ldr scs_sp, [\tsk, #TSK_TI_SCS_SP]
+ .macro scs_load_current
+ get_current_task scs_sp
+ ldr scs_sp, [scs_sp, #TSK_TI_SCS_SP]
.endm
.macro scs_save tsk
str scs_sp, [\tsk, #TSK_TI_SCS_SP]
.endm
#else
- .macro scs_load tsk
+ .macro scs_load_current
.endm
.macro scs_save tsk
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 1312fb48f18b..043ecc3405e7 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -216,101 +216,22 @@
#define SYS_PAR_EL1_FST GENMASK(6, 1)
/*** Statistical Profiling Extension ***/
-/* ID registers */
-#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7)
-#define SYS_PMSIDR_EL1_FE_SHIFT 0
-#define SYS_PMSIDR_EL1_FT_SHIFT 1
-#define SYS_PMSIDR_EL1_FL_SHIFT 2
-#define SYS_PMSIDR_EL1_ARCHINST_SHIFT 3
-#define SYS_PMSIDR_EL1_LDS_SHIFT 4
-#define SYS_PMSIDR_EL1_ERND_SHIFT 5
-#define SYS_PMSIDR_EL1_INTERVAL_SHIFT 8
-#define SYS_PMSIDR_EL1_INTERVAL_MASK 0xfUL
-#define SYS_PMSIDR_EL1_MAXSIZE_SHIFT 12
-#define SYS_PMSIDR_EL1_MAXSIZE_MASK 0xfUL
-#define SYS_PMSIDR_EL1_COUNTSIZE_SHIFT 16
-#define SYS_PMSIDR_EL1_COUNTSIZE_MASK 0xfUL
-
-#define SYS_PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7)
-#define SYS_PMBIDR_EL1_ALIGN_SHIFT 0
-#define SYS_PMBIDR_EL1_ALIGN_MASK 0xfU
-#define SYS_PMBIDR_EL1_P_SHIFT 4
-#define SYS_PMBIDR_EL1_F_SHIFT 5
-
-/* Sampling controls */
-#define SYS_PMSCR_EL1 sys_reg(3, 0, 9, 9, 0)
-#define SYS_PMSCR_EL1_E0SPE_SHIFT 0
-#define SYS_PMSCR_EL1_E1SPE_SHIFT 1
-#define SYS_PMSCR_EL1_CX_SHIFT 3
-#define SYS_PMSCR_EL1_PA_SHIFT 4
-#define SYS_PMSCR_EL1_TS_SHIFT 5
-#define SYS_PMSCR_EL1_PCT_SHIFT 6
-
-#define SYS_PMSCR_EL2 sys_reg(3, 4, 9, 9, 0)
-#define SYS_PMSCR_EL2_E0HSPE_SHIFT 0
-#define SYS_PMSCR_EL2_E2SPE_SHIFT 1
-#define SYS_PMSCR_EL2_CX_SHIFT 3
-#define SYS_PMSCR_EL2_PA_SHIFT 4
-#define SYS_PMSCR_EL2_TS_SHIFT 5
-#define SYS_PMSCR_EL2_PCT_SHIFT 6
-
-#define SYS_PMSICR_EL1 sys_reg(3, 0, 9, 9, 2)
-
-#define SYS_PMSIRR_EL1 sys_reg(3, 0, 9, 9, 3)
-#define SYS_PMSIRR_EL1_RND_SHIFT 0
-#define SYS_PMSIRR_EL1_INTERVAL_SHIFT 8
-#define SYS_PMSIRR_EL1_INTERVAL_MASK 0xffffffUL
-
-/* Filtering controls */
-#define SYS_PMSNEVFR_EL1 sys_reg(3, 0, 9, 9, 1)
-
-#define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4)
-#define SYS_PMSFCR_EL1_FE_SHIFT 0
-#define SYS_PMSFCR_EL1_FT_SHIFT 1
-#define SYS_PMSFCR_EL1_FL_SHIFT 2
-#define SYS_PMSFCR_EL1_B_SHIFT 16
-#define SYS_PMSFCR_EL1_LD_SHIFT 17
-#define SYS_PMSFCR_EL1_ST_SHIFT 18
-
-#define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5)
-#define SYS_PMSEVFR_EL1_RES0_8_2 \
+#define PMSEVFR_EL1_RES0_IMP \
(GENMASK_ULL(47, 32) | GENMASK_ULL(23, 16) | GENMASK_ULL(11, 8) |\
BIT_ULL(6) | BIT_ULL(4) | BIT_ULL(2) | BIT_ULL(0))
-#define SYS_PMSEVFR_EL1_RES0_8_3 \
- (SYS_PMSEVFR_EL1_RES0_8_2 & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11)))
-
-#define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6)
-#define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0
-
-/* Buffer controls */
-#define SYS_PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0)
-#define SYS_PMBLIMITR_EL1_E_SHIFT 0
-#define SYS_PMBLIMITR_EL1_FM_SHIFT 1
-#define SYS_PMBLIMITR_EL1_FM_MASK 0x3UL
-#define SYS_PMBLIMITR_EL1_FM_STOP_IRQ (0 << SYS_PMBLIMITR_EL1_FM_SHIFT)
-
-#define SYS_PMBPTR_EL1 sys_reg(3, 0, 9, 10, 1)
+#define PMSEVFR_EL1_RES0_V1P1 \
+ (PMSEVFR_EL1_RES0_IMP & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11)))
+#define PMSEVFR_EL1_RES0_V1P2 \
+ (PMSEVFR_EL1_RES0_V1P1 & ~BIT_ULL(6))
/* Buffer error reporting */
-#define SYS_PMBSR_EL1 sys_reg(3, 0, 9, 10, 3)
-#define SYS_PMBSR_EL1_COLL_SHIFT 16
-#define SYS_PMBSR_EL1_S_SHIFT 17
-#define SYS_PMBSR_EL1_EA_SHIFT 18
-#define SYS_PMBSR_EL1_DL_SHIFT 19
-#define SYS_PMBSR_EL1_EC_SHIFT 26
-#define SYS_PMBSR_EL1_EC_MASK 0x3fUL
-
-#define SYS_PMBSR_EL1_EC_BUF (0x0UL << SYS_PMBSR_EL1_EC_SHIFT)
-#define SYS_PMBSR_EL1_EC_FAULT_S1 (0x24UL << SYS_PMBSR_EL1_EC_SHIFT)
-#define SYS_PMBSR_EL1_EC_FAULT_S2 (0x25UL << SYS_PMBSR_EL1_EC_SHIFT)
-
-#define SYS_PMBSR_EL1_FAULT_FSC_SHIFT 0
-#define SYS_PMBSR_EL1_FAULT_FSC_MASK 0x3fUL
+#define PMBSR_EL1_FAULT_FSC_SHIFT PMBSR_EL1_MSS_SHIFT
+#define PMBSR_EL1_FAULT_FSC_MASK PMBSR_EL1_MSS_MASK
-#define SYS_PMBSR_EL1_BUF_BSC_SHIFT 0
-#define SYS_PMBSR_EL1_BUF_BSC_MASK 0x3fUL
+#define PMBSR_EL1_BUF_BSC_SHIFT PMBSR_EL1_MSS_SHIFT
+#define PMBSR_EL1_BUF_BSC_MASK PMBSR_EL1_MSS_MASK
-#define SYS_PMBSR_EL1_BUF_BSC_FULL (0x1UL << SYS_PMBSR_EL1_BUF_BSC_SHIFT)
+#define PMBSR_EL1_BUF_BSC_FULL 0x1UL
/*** End of Statistical Profiling Extension ***/
@@ -575,6 +496,7 @@
#define SCTLR_ELx_DSSBS (BIT(44))
#define SCTLR_ELx_ATA (BIT(43))
+#define SCTLR_ELx_EE_SHIFT 25
#define SCTLR_ELx_ENIA_SHIFT 31
#define SCTLR_ELx_ITFSB (BIT(37))
@@ -583,7 +505,7 @@
#define SCTLR_ELx_LSMAOE (BIT(29))
#define SCTLR_ELx_nTLSMD (BIT(28))
#define SCTLR_ELx_ENDA (BIT(27))
-#define SCTLR_ELx_EE (BIT(25))
+#define SCTLR_ELx_EE (BIT(SCTLR_ELx_EE_SHIFT))
#define SCTLR_ELx_EIS (BIT(22))
#define SCTLR_ELx_IESB (BIT(21))
#define SCTLR_ELx_TSCXT (BIT(20))
@@ -809,8 +731,8 @@
#define ARM64_FEATURE_FIELD_BITS 4
-/* Create a mask for the feature bits of the specified feature. */
-#define ARM64_FEATURE_MASK(x) (GENMASK_ULL(x##_SHIFT + ARM64_FEATURE_FIELD_BITS - 1, x##_SHIFT))
+/* Defined for compatibility only, do not add new users. */
+#define ARM64_FEATURE_MASK(x) (x##_MASK)
#ifdef __ASSEMBLY__
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index b713d30544f1..69a4fb749c65 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -96,5 +96,11 @@
#define HWCAP2_CSSC (1UL << 34)
#define HWCAP2_RPRFM (1UL << 35)
#define HWCAP2_SVE2P1 (1UL << 36)
+#define HWCAP2_SME2 (1UL << 37)
+#define HWCAP2_SME2P1 (1UL << 38)
+#define HWCAP2_SME_I16I32 (1UL << 39)
+#define HWCAP2_SME_BI32I32 (1UL << 40)
+#define HWCAP2_SME_B16B16 (1UL << 41)
+#define HWCAP2_SME_F16F16 (1UL << 42)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h
index 9525041e4a14..656a10ea6c67 100644
--- a/arch/arm64/include/uapi/asm/sigcontext.h
+++ b/arch/arm64/include/uapi/asm/sigcontext.h
@@ -144,6 +144,14 @@ struct sve_context {
#define SVE_SIG_FLAG_SM 0x1 /* Context describes streaming mode */
+/* TPIDR2_EL0 context */
+#define TPIDR2_MAGIC 0x54504902
+
+struct tpidr2_context {
+ struct _aarch64_ctx head;
+ __u64 tpidr2;
+};
+
#define ZA_MAGIC 0x54366345
struct za_context {
@@ -152,6 +160,14 @@ struct za_context {
__u16 __reserved[3];
};
+#define ZT_MAGIC 0x5a544e01
+
+struct zt_context {
+ struct _aarch64_ctx head;
+ __u16 nregs;
+ __u16 __reserved[3];
+};
+
#endif /* !__ASSEMBLY__ */
#include <asm/sve_context.h>
@@ -304,4 +320,15 @@ struct za_context {
#define ZA_SIG_CONTEXT_SIZE(vq) \
(ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq))
+#define ZT_SIG_REG_SIZE 512
+
+#define ZT_SIG_REG_BYTES (ZT_SIG_REG_SIZE / 8)
+
+#define ZT_SIG_REGS_OFFSET sizeof(struct zt_context)
+
+#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * n)
+
+#define ZT_SIG_CONTEXT_SIZE(n) \
+ (sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n))
+
#endif /* _UAPI__ASM_SIGCONTEXT_H */
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 2234624536d9..ae345b06e9f7 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -9,6 +9,7 @@
#include <linux/arm_sdei.h>
#include <linux/sched.h>
+#include <linux/ftrace.h>
#include <linux/kexec.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
@@ -194,5 +195,8 @@ int main(void)
DEFINE(KIMAGE_START, offsetof(struct kimage, start));
BLANK();
#endif
+#ifdef CONFIG_FUNCTION_TRACER
+ DEFINE(FTRACE_OPS_FUNC, offsetof(struct ftrace_ops, func));
+#endif
return 0;
}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index a77315b338e6..da0f656079ea 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -65,6 +65,7 @@
#include <linux/bsearch.h>
#include <linux/cpumask.h>
#include <linux/crash_dump.h>
+#include <linux/kstrtox.h>
#include <linux/sort.h>
#include <linux/stop_machine.h>
#include <linux/sysfs.h>
@@ -283,16 +284,26 @@ static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = {
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I32_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16B16_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F16_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F32_SHIFT, 1, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16F32_SHIFT, 1, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_BI32I32_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, 0),
ARM64_FTR_END,
};
@@ -444,8 +455,8 @@ static const struct arm64_ftr_bits ftr_mvfr0[] = {
static const struct arm64_ftr_bits ftr_mvfr1[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDFMAC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPHP_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDHP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPHP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDHP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDSP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDInt_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDLS_SHIFT, 4, 0),
@@ -529,12 +540,12 @@ static const struct arm64_ftr_bits ftr_id_mmfr5[] = {
};
static const struct arm64_ftr_bits ftr_id_isar6[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_I8MM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_I8MM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SPECRES_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_DP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_DP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_JSCVT_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -562,7 +573,7 @@ static const struct arm64_ftr_bits ftr_id_pfr1[] = {
};
static const struct arm64_ftr_bits ftr_id_pfr2[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_SSBS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_SSBS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_CSV3_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -1795,7 +1806,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
static int __init parse_kpti(char *str)
{
bool enabled;
- int ret = strtobool(str, &enabled);
+ int ret = kstrtobool(str, &enabled);
if (ret)
return ret;
@@ -2039,14 +2050,50 @@ static bool enable_pseudo_nmi;
static int __init early_enable_pseudo_nmi(char *p)
{
- return strtobool(p, &enable_pseudo_nmi);
+ return kstrtobool(p, &enable_pseudo_nmi);
}
early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);
static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
int scope)
{
- return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope);
+ /*
+ * ARM64_HAS_GIC_CPUIF_SYSREGS has a lower index, and is a boot CPU
+ * feature, so will be detected earlier.
+ */
+ BUILD_BUG_ON(ARM64_HAS_GIC_PRIO_MASKING <= ARM64_HAS_GIC_CPUIF_SYSREGS);
+ if (!cpus_have_cap(ARM64_HAS_GIC_CPUIF_SYSREGS))
+ return false;
+
+ return enable_pseudo_nmi;
+}
+
+static bool has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ /*
+ * If we're not using priority masking then we won't be poking PMR_EL1,
+ * and there's no need to relax synchronization of writes to it, and
+ * ICC_CTLR_EL1 might not be accessible and we must avoid reads from
+ * that.
+ *
+ * ARM64_HAS_GIC_PRIO_MASKING has a lower index, and is a boot CPU
+ * feature, so will be detected earlier.
+ */
+ BUILD_BUG_ON(ARM64_HAS_GIC_PRIO_RELAXED_SYNC <= ARM64_HAS_GIC_PRIO_MASKING);
+ if (!cpus_have_cap(ARM64_HAS_GIC_PRIO_MASKING))
+ return false;
+
+ /*
+ * When Priority Mask Hint Enable (PMHE) == 0b0, PMR is not used as a
+ * hint for interrupt distribution, a DSB is not necessary when
+ * unmasking IRQs via PMR, and we can relax the barrier to a NOP.
+ *
+ * Linux itself doesn't use 1:N distribution, so has no need to
+ * set PMHE. The only reason to have it set is if EL3 requires it
+ * (and we can't change it).
+ */
+ return (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK) == 0;
}
#endif
@@ -2142,7 +2189,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
},
{
.desc = "GIC system register CPU interface",
- .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
+ .capability = ARM64_HAS_GIC_CPUIF_SYSREGS,
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = has_useable_gicv3_cpuif,
.sys_reg = SYS_ID_AA64PFR0_EL1,
@@ -2534,14 +2581,17 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
* Depends on having GICv3
*/
.desc = "IRQ priority masking",
- .capability = ARM64_HAS_IRQ_PRIO_MASKING,
+ .capability = ARM64_HAS_GIC_PRIO_MASKING,
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = can_use_gic_priorities,
- .sys_reg = SYS_ID_AA64PFR0_EL1,
- .field_pos = ID_AA64PFR0_EL1_GIC_SHIFT,
- .field_width = 4,
- .sign = FTR_UNSIGNED,
- .min_field_value = 1,
+ },
+ {
+ /*
+ * Depends on ARM64_HAS_GIC_PRIO_MASKING
+ */
+ .capability = ARM64_HAS_GIC_PRIO_RELAXED_SYNC,
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+ .matches = has_gic_prio_relaxed_sync,
},
#endif
#ifdef CONFIG_ARM64_E0PD
@@ -2649,6 +2699,18 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
.cpu_enable = fa64_kernel_enable,
},
+ {
+ .desc = "SME2",
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .capability = ARM64_SME2,
+ .sys_reg = SYS_ID_AA64PFR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64PFR1_EL1_SME_SHIFT,
+ .field_width = ID_AA64PFR1_EL1_SME_WIDTH,
+ .min_field_value = ID_AA64PFR1_EL1_SME_SME2,
+ .matches = has_cpuid_feature,
+ .cpu_enable = sme2_kernel_enable,
+ },
#endif /* CONFIG_ARM64_SME */
{
.desc = "WFx with timeout",
@@ -2777,7 +2839,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
@@ -2827,11 +2889,17 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
#ifdef CONFIG_ARM64_SME
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SME_IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_FA64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_SMEver_SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_SMEver_SME2, CAP_HWCAP, KERNEL_HWCAP_SME2),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F64F64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_B16B16_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_B16B16_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F16F16_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F16F16_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I8I32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F16F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_B16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_B16F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_BI32I32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_BI32I32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F32F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32),
#endif /* CONFIG_ARM64_SME */
{},
@@ -2866,11 +2934,19 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
/* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_EL1_FPDP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_EL1_FPDP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
+ HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_EL1_FPHP_SHIFT, 4, FTR_UNSIGNED, 3, CAP_COMPAT_HWCAP, COMPAT_HWCAP_FPHP),
+ HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_EL1_SIMDHP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDHP),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
+ HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDDP),
+ HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDFHM),
+ HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_SB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SB),
+ HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_ASIMDBF16),
+ HWCAP_CAP(SYS_ID_ISAR6_EL1, ID_ISAR6_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_I8MM),
+ HWCAP_CAP(SYS_ID_PFR2_EL1, ID_PFR2_EL1_SSBS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SSBS),
#endif
{},
};
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 379695262b77..eb4378c23b3c 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -119,6 +119,12 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_CSSC] = "cssc",
[KERNEL_HWCAP_RPRFM] = "rprfm",
[KERNEL_HWCAP_SVE2P1] = "sve2p1",
+ [KERNEL_HWCAP_SME2] = "sme2",
+ [KERNEL_HWCAP_SME2P1] = "sme2p1",
+ [KERNEL_HWCAP_SME_I16I32] = "smei16i32",
+ [KERNEL_HWCAP_SME_BI32I32] = "smebi32i32",
+ [KERNEL_HWCAP_SME_B16B16] = "smeb16b16",
+ [KERNEL_HWCAP_SME_F16F16] = "smef16f16",
};
#ifdef CONFIG_COMPAT
@@ -146,6 +152,12 @@ static const char *const compat_hwcap_str[] = {
[COMPAT_KERNEL_HWCAP(VFPD32)] = NULL, /* Not possible on arm64 */
[COMPAT_KERNEL_HWCAP(LPAE)] = "lpae",
[COMPAT_KERNEL_HWCAP(EVTSTRM)] = "evtstrm",
+ [COMPAT_KERNEL_HWCAP(FPHP)] = "fphp",
+ [COMPAT_KERNEL_HWCAP(ASIMDHP)] = "asimdhp",
+ [COMPAT_KERNEL_HWCAP(ASIMDDP)] = "asimddp",
+ [COMPAT_KERNEL_HWCAP(ASIMDFHM)] = "asimdfhm",
+ [COMPAT_KERNEL_HWCAP(ASIMDBF16)] = "asimdbf16",
+ [COMPAT_KERNEL_HWCAP(I8MM)] = "i8mm",
};
#define COMPAT_KERNEL_HWCAP2(x) const_ilog2(COMPAT_HWCAP2_ ## x)
@@ -155,6 +167,8 @@ static const char *const compat_hwcap2_str[] = {
[COMPAT_KERNEL_HWCAP2(SHA1)] = "sha1",
[COMPAT_KERNEL_HWCAP2(SHA2)] = "sha2",
[COMPAT_KERNEL_HWCAP2(CRC32)] = "crc32",
+ [COMPAT_KERNEL_HWCAP2(SB)] = "sb",
+ [COMPAT_KERNEL_HWCAP2(SSBS)] = "ssbs",
};
#endif /* CONFIG_COMPAT */
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 229436f33df5..6325db1a2179 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -100,25 +100,35 @@ SYM_FUNC_START(sme_set_vq)
SYM_FUNC_END(sme_set_vq)
/*
- * Save the SME state
+ * Save the ZA and ZT state
*
* x0 - pointer to buffer for state
+ * x1 - number of ZT registers to save
*/
-SYM_FUNC_START(za_save_state)
- _sme_rdsvl 1, 1 // x1 = VL/8
- sme_save_za 0, x1, 12
+SYM_FUNC_START(sme_save_state)
+ _sme_rdsvl 2, 1 // x2 = VL/8
+ sme_save_za 0, x2, 12 // Leaves x0 pointing to the end of ZA
+
+ cbz x1, 1f
+ _str_zt 0
+1:
ret
-SYM_FUNC_END(za_save_state)
+SYM_FUNC_END(sme_save_state)
/*
- * Load the SME state
+ * Load the ZA and ZT state
*
* x0 - pointer to buffer for state
+ * x1 - number of ZT registers to save
*/
-SYM_FUNC_START(za_load_state)
- _sme_rdsvl 1, 1 // x1 = VL/8
- sme_load_za 0, x1, 12
+SYM_FUNC_START(sme_load_state)
+ _sme_rdsvl 2, 1 // x2 = VL/8
+ sme_load_za 0, x2, 12 // Leaves x0 pointing to the end of ZA
+
+ cbz x1, 1f
+ _ldr_zt 0
+1:
ret
-SYM_FUNC_END(za_load_state)
+SYM_FUNC_END(sme_load_state)
#endif /* CONFIG_ARM64_SME */
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 3b625f76ffba..350ed81324ac 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -65,13 +65,35 @@ SYM_CODE_START(ftrace_caller)
stp x29, x30, [sp, #FREGS_SIZE]
add x29, sp, #FREGS_SIZE
- sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
- mov x1, x9 // parent_ip (callsite's LR)
- ldr_l x2, function_trace_op // op
- mov x3, sp // regs
+ /* Prepare arguments for the the tracer func */
+ sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
+ mov x1, x9 // parent_ip (callsite's LR)
+ mov x3, sp // regs
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+ /*
+ * The literal pointer to the ops is at an 8-byte aligned boundary
+ * which is either 12 or 16 bytes before the BL instruction in the call
+ * site. See ftrace_call_adjust() for details.
+ *
+ * Therefore here the LR points at `literal + 16` or `literal + 20`,
+ * and we can find the address of the literal in either case by
+ * aligning to an 8-byte boundary and subtracting 16. We do the
+ * alignment first as this allows us to fold the subtraction into the
+ * LDR.
+ */
+ bic x2, x30, 0x7
+ ldr x2, [x2, #-16] // op
+
+ ldr x4, [x2, #FTRACE_OPS_FUNC] // op->func
+ blr x4 // op->func(ip, parent_ip, op, regs)
+
+#else
+ ldr_l x2, function_trace_op // op
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
- bl ftrace_stub
+ bl ftrace_stub // func(ip, parent_ip, op, regs)
+#endif
/*
* At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 11cb99c4d298..ab2a6e33c052 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -275,7 +275,7 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
alternative_else_nop_endif
1:
- scs_load tsk
+ scs_load_current
.else
add x21, sp, #PT_REGS_SIZE
get_current_task tsk
@@ -311,13 +311,16 @@ alternative_else_nop_endif
.endif
#ifdef CONFIG_ARM64_PSEUDO_NMI
- /* Save pmr */
-alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+alternative_if_not ARM64_HAS_GIC_PRIO_MASKING
+ b .Lskip_pmr_save\@
+alternative_else_nop_endif
+
mrs_s x20, SYS_ICC_PMR_EL1
str x20, [sp, #S_PMR_SAVE]
mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
msr_s SYS_ICC_PMR_EL1, x20
-alternative_else_nop_endif
+
+.Lskip_pmr_save\@:
#endif
/*
@@ -336,15 +339,19 @@ alternative_else_nop_endif
.endif
#ifdef CONFIG_ARM64_PSEUDO_NMI
- /* Restore pmr */
-alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+alternative_if_not ARM64_HAS_GIC_PRIO_MASKING
+ b .Lskip_pmr_restore\@
+alternative_else_nop_endif
+
ldr x20, [sp, #S_PMR_SAVE]
msr_s SYS_ICC_PMR_EL1, x20
- mrs_s x21, SYS_ICC_CTLR_EL1
- tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE
- dsb sy // Ensure priority change is seen by redistributor
-.L__skip_pmr_sync\@:
+
+ /* Ensure priority change is seen by redistributor */
+alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_SYNC
+ dsb sy
alternative_else_nop_endif
+
+.Lskip_pmr_restore\@:
#endif
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
@@ -848,7 +855,7 @@ SYM_FUNC_START(cpu_switch_to)
msr sp_el0, x1
ptrauth_keys_install_kernel x1, x8, x9, x10
scs_save x0
- scs_load x1
+ scs_load_current
ret
SYM_FUNC_END(cpu_switch_to)
NOKPROBE(cpu_switch_to)
@@ -876,19 +883,19 @@ NOKPROBE(ret_from_fork)
*/
SYM_FUNC_START(call_on_irq_stack)
#ifdef CONFIG_SHADOW_CALL_STACK
- stp scs_sp, xzr, [sp, #-16]!
+ get_current_task x16
+ scs_save x16
ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
#endif
+
/* Create a frame record to save our LR and SP (implicit in FP) */
stp x29, x30, [sp, #-16]!
mov x29, sp
ldr_this_cpu x16, irq_stack_ptr, x17
- mov x15, #IRQ_STACK_SIZE
- add x16, x16, x15
/* Move to the new stack and call the function there */
- mov sp, x16
+ add sp, x16, #IRQ_STACK_SIZE
blr x1
/*
@@ -897,9 +904,7 @@ SYM_FUNC_START(call_on_irq_stack)
*/
mov sp, x29
ldp x29, x30, [sp], #16
-#ifdef CONFIG_SHADOW_CALL_STACK
- ldp scs_sp, xzr, [sp], #16
-#endif
+ scs_load_current
ret
SYM_FUNC_END(call_on_irq_stack)
NOKPROBE(call_on_irq_stack)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index dcc81e7200d4..c11cb445ffca 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -299,7 +299,7 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
/*
* TIF_SME controls whether a task can use SME without trapping while
* in userspace, when TIF_SME is set then we must have storage
- * alocated in sve_state and za_state to store the contents of both ZA
+ * alocated in sve_state and sme_state to store the contents of both ZA
* and the SVE registers for both streaming and non-streaming modes.
*
* If both SVCR.ZA and SVCR.SM are disabled then at any point we
@@ -429,7 +429,8 @@ static void task_fpsimd_load(void)
write_sysreg_s(current->thread.svcr, SYS_SVCR);
if (thread_za_enabled(&current->thread))
- za_load_state(current->thread.za_state);
+ sme_load_state(current->thread.sme_state,
+ system_supports_sme2());
if (thread_sm_enabled(&current->thread))
restore_ffr = system_supports_fa64();
@@ -490,7 +491,8 @@ static void fpsimd_save(void)
*svcr = read_sysreg_s(SYS_SVCR);
if (*svcr & SVCR_ZA_MASK)
- za_save_state(last->za_state);
+ sme_save_state(last->sme_state,
+ system_supports_sme2());
/* If we are in streaming mode override regular SVE. */
if (*svcr & SVCR_SM_MASK) {
@@ -1257,30 +1259,30 @@ void fpsimd_release_task(struct task_struct *dead_task)
#ifdef CONFIG_ARM64_SME
/*
- * Ensure that task->thread.za_state is allocated and sufficiently large.
+ * Ensure that task->thread.sme_state is allocated and sufficiently large.
*
* This function should be used only in preparation for replacing
- * task->thread.za_state with new data. The memory is always zeroed
+ * task->thread.sme_state with new data. The memory is always zeroed
* here to prevent stale data from showing through: this is done in
* the interest of testability and predictability, the architecture
* guarantees that when ZA is enabled it will be zeroed.
*/
void sme_alloc(struct task_struct *task)
{
- if (task->thread.za_state) {
- memset(task->thread.za_state, 0, za_state_size(task));
+ if (task->thread.sme_state) {
+ memset(task->thread.sme_state, 0, sme_state_size(task));
return;
}
/* This could potentially be up to 64K. */
- task->thread.za_state =
- kzalloc(za_state_size(task), GFP_KERNEL);
+ task->thread.sme_state =
+ kzalloc(sme_state_size(task), GFP_KERNEL);
}
static void sme_free(struct task_struct *task)
{
- kfree(task->thread.za_state);
- task->thread.za_state = NULL;
+ kfree(task->thread.sme_state);
+ task->thread.sme_state = NULL;
}
void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
@@ -1302,6 +1304,17 @@ void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
* This must be called after sme_kernel_enable(), we rely on the
* feature table being sorted to ensure this.
*/
+void sme2_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+{
+ /* Allow use of ZT0 */
+ write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_EZT0_MASK,
+ SYS_SMCR_EL1);
+}
+
+/*
+ * This must be called after sme_kernel_enable(), we rely on the
+ * feature table being sorted to ensure this.
+ */
void fa64_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
{
/* Allow use of FA64 */
@@ -1322,7 +1335,6 @@ u64 read_smcr_features(void)
unsigned int vq_max;
sme_kernel_enable(NULL);
- sme_smstart_sm();
/*
* Set the maximum possible VL.
@@ -1332,11 +1344,9 @@ u64 read_smcr_features(void)
smcr = read_sysreg_s(SYS_SMCR_EL1);
smcr &= ~(u64)SMCR_ELx_LEN_MASK; /* Only the LEN field */
- vq_max = sve_vq_from_vl(sve_get_vl());
+ vq_max = sve_vq_from_vl(sme_get_vl());
smcr |= vq_max - 1; /* set LEN field to maximum effective value */
- sme_smstop_sm();
-
return smcr;
}
@@ -1488,7 +1498,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
sve_alloc(current, false);
sme_alloc(current);
- if (!current->thread.sve_state || !current->thread.za_state) {
+ if (!current->thread.sve_state || !current->thread.sme_state) {
force_sig(SIGKILL);
return;
}
@@ -1609,7 +1619,7 @@ static void fpsimd_flush_thread_vl(enum vec_type type)
void fpsimd_flush_thread(void)
{
void *sve_state = NULL;
- void *za_state = NULL;
+ void *sme_state = NULL;
if (!system_supports_fpsimd())
return;
@@ -1634,8 +1644,8 @@ void fpsimd_flush_thread(void)
clear_thread_flag(TIF_SME);
/* Defer kfree() while in atomic context */
- za_state = current->thread.za_state;
- current->thread.za_state = NULL;
+ sme_state = current->thread.sme_state;
+ current->thread.sme_state = NULL;
fpsimd_flush_thread_vl(ARM64_VEC_SME);
current->thread.svcr = 0;
@@ -1645,7 +1655,7 @@ void fpsimd_flush_thread(void)
put_cpu_fpsimd_context();
kfree(sve_state);
- kfree(za_state);
+ kfree(sme_state);
}
/*
@@ -1711,7 +1721,7 @@ static void fpsimd_bind_task_to_cpu(void)
WARN_ON(!system_supports_fpsimd());
last->st = &current->thread.uw.fpsimd_state;
last->sve_state = current->thread.sve_state;
- last->za_state = current->thread.za_state;
+ last->sme_state = current->thread.sme_state;
last->sve_vl = task_get_sve_vl(current);
last->sme_vl = task_get_sme_vl(current);
last->svcr = &current->thread.svcr;
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index b30b955a8921..5545fe1a9012 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -60,6 +60,89 @@ int ftrace_regs_query_register_offset(const char *name)
}
#endif
+unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ /*
+ * When using mcount, addr is the address of the mcount call
+ * instruction, and no adjustment is necessary.
+ */
+ if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
+ return addr;
+
+ /*
+ * When using patchable-function-entry without pre-function NOPS, addr
+ * is the address of the first NOP after the function entry point.
+ *
+ * The compiler has either generated:
+ *
+ * addr+00: func: NOP // To be patched to MOV X9, LR
+ * addr+04: NOP // To be patched to BL <caller>
+ *
+ * Or:
+ *
+ * addr-04: BTI C
+ * addr+00: func: NOP // To be patched to MOV X9, LR
+ * addr+04: NOP // To be patched to BL <caller>
+ *
+ * We must adjust addr to the address of the NOP which will be patched
+ * to `BL <caller>`, which is at `addr + 4` bytes in either case.
+ *
+ */
+ if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
+ return addr + AARCH64_INSN_SIZE;
+
+ /*
+ * When using patchable-function-entry with pre-function NOPs, addr is
+ * the address of the first pre-function NOP.
+ *
+ * Starting from an 8-byte aligned base, the compiler has either
+ * generated:
+ *
+ * addr+00: NOP // Literal (first 32 bits)
+ * addr+04: NOP // Literal (last 32 bits)
+ * addr+08: func: NOP // To be patched to MOV X9, LR
+ * addr+12: NOP // To be patched to BL <caller>
+ *
+ * Or:
+ *
+ * addr+00: NOP // Literal (first 32 bits)
+ * addr+04: NOP // Literal (last 32 bits)
+ * addr+08: func: BTI C
+ * addr+12: NOP // To be patched to MOV X9, LR
+ * addr+16: NOP // To be patched to BL <caller>
+ *
+ * We must adjust addr to the address of the NOP which will be patched
+ * to `BL <caller>`, which is at either addr+12 or addr+16 depending on
+ * whether there is a BTI.
+ */
+
+ if (!IS_ALIGNED(addr, sizeof(unsigned long))) {
+ WARN_RATELIMIT(1, "Misaligned patch-site %pS\n",
+ (void *)(addr + 8));
+ return 0;
+ }
+
+ /* Skip the NOPs placed before the function entry point */
+ addr += 2 * AARCH64_INSN_SIZE;
+
+ /* Skip any BTI */
+ if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) {
+ u32 insn = le32_to_cpu(*(__le32 *)addr);
+
+ if (aarch64_insn_is_bti(insn)) {
+ addr += AARCH64_INSN_SIZE;
+ } else if (insn != aarch64_insn_gen_nop()) {
+ WARN_RATELIMIT(1, "unexpected insn in patch-site %pS: 0x%08x\n",
+ (void *)addr, insn);
+ }
+ }
+
+ /* Skip the first NOP after function entry */
+ addr += AARCH64_INSN_SIZE;
+
+ return addr;
+}
+
/*
* Replace a single instruction, which may be a branch or NOP.
* If @validate == true, a replaced instruction is checked against 'old'.
@@ -98,6 +181,13 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
unsigned long pc;
u32 new;
+ /*
+ * When using CALL_OPS, the function to call is associated with the
+ * call site, and we don't have a global function pointer to update.
+ */
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
+ return 0;
+
pc = (unsigned long)ftrace_call;
new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
AARCH64_INSN_BRANCH_LINK);
@@ -176,6 +266,44 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
return true;
}
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+static const struct ftrace_ops *arm64_rec_get_ops(struct dyn_ftrace *rec)
+{
+ const struct ftrace_ops *ops = NULL;
+
+ if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
+ ops = ftrace_find_unique_ops(rec);
+ WARN_ON_ONCE(!ops);
+ }
+
+ if (!ops)
+ ops = &ftrace_list_ops;
+
+ return ops;
+}
+
+static int ftrace_rec_set_ops(const struct dyn_ftrace *rec,
+ const struct ftrace_ops *ops)
+{
+ unsigned long literal = ALIGN_DOWN(rec->ip - 12, 8);
+ return aarch64_insn_write_literal_u64((void *)literal,
+ (unsigned long)ops);
+}
+
+static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec)
+{
+ return ftrace_rec_set_ops(rec, &ftrace_nop_ops);
+}
+
+static int ftrace_rec_update_ops(struct dyn_ftrace *rec)
+{
+ return ftrace_rec_set_ops(rec, arm64_rec_get_ops(rec));
+}
+#else
+static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) { return 0; }
+static int ftrace_rec_update_ops(struct dyn_ftrace *rec) { return 0; }
+#endif
+
/*
* Turn on the call to ftrace_caller() in instrumented function
*/
@@ -183,6 +311,11 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long pc = rec->ip;
u32 old, new;
+ int ret;
+
+ ret = ftrace_rec_update_ops(rec);
+ if (ret)
+ return ret;
if (!ftrace_find_callable_addr(rec, NULL, &addr))
return -EINVAL;
@@ -193,6 +326,19 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return ftrace_modify_code(pc, old, new, true);
}
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ if (WARN_ON_ONCE(old_addr != (unsigned long)ftrace_caller))
+ return -EINVAL;
+ if (WARN_ON_ONCE(addr != (unsigned long)ftrace_caller))
+ return -EINVAL;
+
+ return ftrace_rec_update_ops(rec);
+}
+#endif
+
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
/*
* The compiler has inserted two NOPs before the regular function prologue.
@@ -209,7 +355,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
* | NOP | MOV X9, LR | MOV X9, LR |
* | NOP | NOP | BL <entry> |
*
- * The LR value will be recovered by ftrace_regs_entry, and restored into LR
+ * The LR value will be recovered by ftrace_caller, and restored into LR
* before returning to the regular function prologue. When a function is not
* being traced, the MOV is not harmful given x9 is not live per the AAPCS.
*
@@ -220,6 +366,11 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
{
unsigned long pc = rec->ip - AARCH64_INSN_SIZE;
u32 old, new;
+ int ret;
+
+ ret = ftrace_rec_set_nop_ops(rec);
+ if (ret)
+ return ret;
old = aarch64_insn_gen_nop();
new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9,
@@ -237,9 +388,14 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
{
unsigned long pc = rec->ip;
u32 old = 0, new;
+ int ret;
new = aarch64_insn_gen_nop();
+ ret = ftrace_rec_set_nop_ops(rec);
+ if (ret)
+ return ret;
+
/*
* When using mcount, callsites in modules may have been initalized to
* call an arbitrary module PLT (which redirects to the _mcount stub)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 952e17bd1c0b..212d93aca5e6 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -70,13 +70,14 @@
__EFI_PE_HEADER
- __INIT
+ .section ".idmap.text","awx"
/*
* The following callee saved general purpose registers are used on the
* primary lowlevel boot path:
*
* Register Scope Purpose
+ * x19 primary_entry() .. start_kernel() whether we entered with the MMU on
* x20 primary_entry() .. __primary_switch() CPU boot mode
* x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0
* x22 create_idmap() .. start_kernel() ID map VA of the DT blob
@@ -86,10 +87,22 @@
* x28 create_idmap() callee preserved temp register
*/
SYM_CODE_START(primary_entry)
+ bl record_mmu_state
bl preserve_boot_args
+ bl create_idmap
+
+ /*
+ * If we entered with the MMU and caches on, clean the ID mapped part
+ * of the primary boot code to the PoC so we can safely execute it with
+ * the MMU off.
+ */
+ cbz x19, 0f
+ adrp x0, __idmap_text_start
+ adr_l x1, __idmap_text_end
+ bl dcache_clean_poc
+0: mov x0, x19
bl init_kernel_el // w0=cpu_boot_mode
mov x20, x0
- bl create_idmap
/*
* The following calls CPU setup code, see arch/arm64/mm/proc.S for
@@ -109,6 +122,40 @@ SYM_CODE_START(primary_entry)
b __primary_switch
SYM_CODE_END(primary_entry)
+ __INIT
+SYM_CODE_START_LOCAL(record_mmu_state)
+ mrs x19, CurrentEL
+ cmp x19, #CurrentEL_EL2
+ mrs x19, sctlr_el1
+ b.ne 0f
+ mrs x19, sctlr_el2
+0:
+CPU_LE( tbnz x19, #SCTLR_ELx_EE_SHIFT, 1f )
+CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f )
+ tst x19, #SCTLR_ELx_C // Z := (C == 0)
+ and x19, x19, #SCTLR_ELx_M // isolate M bit
+ csel x19, xzr, x19, eq // clear x19 if Z
+ ret
+
+ /*
+ * Set the correct endianness early so all memory accesses issued
+ * before init_kernel_el() occur in the correct byte order. Note that
+ * this means the MMU must be disabled, or the active ID map will end
+ * up getting interpreted with the wrong byte order.
+ */
+1: eor x19, x19, #SCTLR_ELx_EE
+ bic x19, x19, #SCTLR_ELx_M
+ b.ne 2f
+ pre_disable_mmu_workaround
+ msr sctlr_el2, x19
+ b 3f
+ pre_disable_mmu_workaround
+2: msr sctlr_el1, x19
+3: isb
+ mov x19, xzr
+ ret
+SYM_CODE_END(record_mmu_state)
+
/*
* Preserve the arguments passed by the bootloader in x0 .. x3
*/
@@ -119,11 +166,14 @@ SYM_CODE_START_LOCAL(preserve_boot_args)
stp x21, x1, [x0] // x0 .. x3 at kernel entry
stp x2, x3, [x0, #16]
+ cbnz x19, 0f // skip cache invalidation if MMU is on
dmb sy // needed before dc ivac with
// MMU off
add x1, x0, #0x20 // 4 x 8 bytes
b dcache_inval_poc // tail call
+0: str_l x19, mmu_enabled_at_boot, x0
+ ret
SYM_CODE_END(preserve_boot_args)
SYM_FUNC_START_LOCAL(clear_page_tables)
@@ -360,12 +410,13 @@ SYM_FUNC_START_LOCAL(create_idmap)
* accesses (MMU disabled), invalidate those tables again to
* remove any speculatively loaded cache lines.
*/
+ cbnz x19, 0f // skip cache invalidation if MMU is on
dmb sy
adrp x0, init_idmap_pg_dir
adrp x1, init_idmap_pg_end
bl dcache_inval_poc
- ret x28
+0: ret x28
SYM_FUNC_END(create_idmap)
SYM_FUNC_START_LOCAL(create_kernel_mapping)
@@ -404,7 +455,7 @@ SYM_FUNC_END(create_kernel_mapping)
stp xzr, xzr, [sp, #S_STACKFRAME]
add x29, sp, #S_STACKFRAME
- scs_load \tsk
+ scs_load_current
adr_l \tmp1, __per_cpu_offset
ldr w\tmp2, [\tsk, #TSK_TI_CPU]
@@ -489,14 +540,17 @@ SYM_FUNC_END(__primary_switched)
* Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x0 if
* booted in EL1 or EL2 respectively, with the top 32 bits containing
* potential context flags. These flags are *not* stored in __boot_cpu_mode.
+ *
+ * x0: whether we are being called from the primary boot path with the MMU on
*/
SYM_FUNC_START(init_kernel_el)
- mrs x0, CurrentEL
- cmp x0, #CurrentEL_EL2
+ mrs x1, CurrentEL
+ cmp x1, #CurrentEL_EL2
b.eq init_el2
SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
mov_q x0, INIT_SCTLR_EL1_MMU_OFF
+ pre_disable_mmu_workaround
msr sctlr_el1, x0
isb
mov_q x0, INIT_PSTATE_EL1
@@ -506,6 +560,14 @@ SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
eret
SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
+ msr elr_el2, lr
+
+ // clean all HYP code to the PoC if we booted at EL2 with the MMU on
+ cbz x0, 0f
+ adrp x0, __hyp_idmap_text_start
+ adr_l x1, __hyp_text_end
+ bl dcache_clean_poc
+0:
mov_q x0, HCR_HOST_NVHE_FLAGS
msr hcr_el2, x0
isb
@@ -529,38 +591,27 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
cbz x0, 1f
/* Set a sane SCTLR_EL1, the VHE way */
+ pre_disable_mmu_workaround
msr_s SYS_SCTLR_EL12, x1
mov x2, #BOOT_CPU_FLAG_E2H
b 2f
1:
+ pre_disable_mmu_workaround
msr sctlr_el1, x1
mov x2, xzr
2:
- msr elr_el2, lr
mov w0, #BOOT_CPU_MODE_EL2
orr x0, x0, x2
eret
SYM_FUNC_END(init_kernel_el)
-/*
- * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
- * in w0. See arch/arm64/include/asm/virt.h for more info.
- */
-SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
- adr_l x1, __boot_cpu_mode
- cmp w0, #BOOT_CPU_MODE_EL2
- b.ne 1f
- add x1, x1, #4
-1: str w0, [x1] // Save CPU boot mode
- ret
-SYM_FUNC_END(set_cpu_boot_mode_flag)
-
/*
* This provides a "holding pen" for platforms to hold all secondary
* cores are held until we're ready for them to initialise.
*/
SYM_FUNC_START(secondary_holding_pen)
+ mov x0, xzr
bl init_kernel_el // w0=cpu_boot_mode
mrs x2, mpidr_el1
mov_q x1, MPIDR_HWID_BITMASK
@@ -578,6 +629,7 @@ SYM_FUNC_END(secondary_holding_pen)
* be used where CPUs are brought online dynamically by the kernel.
*/
SYM_FUNC_START(secondary_entry)
+ mov x0, xzr
bl init_kernel_el // w0=cpu_boot_mode
b secondary_startup
SYM_FUNC_END(secondary_entry)
@@ -587,7 +639,6 @@ SYM_FUNC_START_LOCAL(secondary_startup)
* Common entry point for secondary CPUs.
*/
mov x20, x0 // preserve boot mode
- bl finalise_el2
bl __cpu_secondary_check52bitva
#if VA_BITS > 48
ldr_l x0, vabits_actual
@@ -600,9 +651,14 @@ SYM_FUNC_START_LOCAL(secondary_startup)
br x8
SYM_FUNC_END(secondary_startup)
+ .text
SYM_FUNC_START_LOCAL(__secondary_switched)
mov x0, x20
bl set_cpu_boot_mode_flag
+
+ mov x0, x20
+ bl finalise_el2
+
str_l xzr, __early_cpu_boot_status, x3
adr_l x5, vectors
msr vbar_el1, x5
@@ -629,6 +685,19 @@ SYM_FUNC_START_LOCAL(__secondary_too_slow)
SYM_FUNC_END(__secondary_too_slow)
/*
+ * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
+ * in w0. See arch/arm64/include/asm/virt.h for more info.
+ */
+SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
+ adr_l x1, __boot_cpu_mode
+ cmp w0, #BOOT_CPU_MODE_EL2
+ b.ne 1f
+ add x1, x1, #4
+1: str w0, [x1] // Save CPU boot mode
+ ret
+SYM_FUNC_END(set_cpu_boot_mode_flag)
+
+/*
* The booting CPU updates the failed status @__early_cpu_boot_status,
* with MMU turned off.
*
@@ -659,6 +728,7 @@ SYM_FUNC_END(__secondary_too_slow)
* Checks if the selected granule size is supported by the CPU.
* If it isn't, park the CPU
*/
+ .section ".idmap.text","awx"
SYM_FUNC_START(__enable_mmu)
mrs x3, ID_AA64MMFR0_EL1
ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 2ee18c860f2a..111ff33d93ee 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -132,6 +132,13 @@ SYM_CODE_START_LOCAL(__finalise_el2)
orr x0, x0, SMCR_ELx_FA64_MASK
.Lskip_sme_fa64:
+ // ZT0 available?
+ mrs_s x1, SYS_ID_AA64SMFR0_EL1
+ __check_override id_aa64smfr0 ID_AA64SMFR0_EL1_SMEver_SHIFT 4 .Linit_sme_zt0 .Lskip_sme_zt0
+.Linit_sme_zt0:
+ orr x0, x0, SMCR_ELx_EZT0_MASK
+.Lskip_sme_zt0:
+
orr x0, x0, #SMCR_ELx_LEN_MASK // Enable full SME vector
msr_s SYS_SMCR_EL2, x0 // length for EL1.
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index 95133765ed29..d833d78a7f31 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -131,6 +131,7 @@ static const struct ftr_set_desc smfr0 __initconst = {
.name = "id_aa64smfr0",
.override = &id_aa64smfr0_override,
.fields = {
+ FIELD("smever", ID_AA64SMFR0_EL1_SMEver_SHIFT, NULL),
/* FA64 is a one bit field... :-/ */
{ "fa64", ID_AA64SMFR0_EL1_FA64_SHIFT, 1, },
{}
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index d0e9bb5c91fc..8309197c0ebd 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -10,7 +10,7 @@
#error This file should only be included in vmlinux.lds.S
#endif
-PROVIDE(__efistub_primary_entry_offset = primary_entry - _text);
+PROVIDE(__efistub_primary_entry = primary_entry);
/*
* The EFI stub has its own symbol namespace prefixed by __efistub_, to
@@ -21,10 +21,11 @@ PROVIDE(__efistub_primary_entry_offset = primary_entry - _text);
* linked at. The routines below are all implemented in assembler in a
* position independent manner
*/
-PROVIDE(__efistub_dcache_clean_poc = __pi_dcache_clean_poc);
+PROVIDE(__efistub_caches_clean_inval_pou = __pi_caches_clean_inval_pou);
PROVIDE(__efistub__text = _text);
PROVIDE(__efistub__end = _end);
+PROVIDE(__efistub___inittext_end = __inittext_end);
PROVIDE(__efistub__edata = _edata);
PROVIDE(__efistub_screen_info = screen_info);
PROVIDE(__efistub__ctype = _ctype);
@@ -67,9 +68,7 @@ KVM_NVHE_ALIAS(__hyp_stub_vectors);
KVM_NVHE_ALIAS(vgic_v2_cpuif_trap);
KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
-/* Static key checked in pmr_sync(). */
#ifdef CONFIG_ARM64_PSEUDO_NMI
-KVM_NVHE_ALIAS(gic_pmr_sync);
/* Static key checked in GIC_PRIO_IRQOFF. */
KVM_NVHE_ALIAS(gic_nonsecure_priorities);
#endif
diff --git a/arch/arm64/kernel/patch-scs.c b/arch/arm64/kernel/patch-scs.c
index 1b3da02d5b74..a1fe4b4ff591 100644
--- a/arch/arm64/kernel/patch-scs.c
+++ b/arch/arm64/kernel/patch-scs.c
@@ -130,7 +130,8 @@ struct eh_frame {
static int noinstr scs_handle_fde_frame(const struct eh_frame *frame,
bool fde_has_augmentation_data,
- int code_alignment_factor)
+ int code_alignment_factor,
+ bool dry_run)
{
int size = frame->size - offsetof(struct eh_frame, opcodes) + 4;
u64 loc = (u64)offset_to_ptr(&frame->initial_loc);
@@ -184,7 +185,8 @@ static int noinstr scs_handle_fde_frame(const struct eh_frame *frame,
break;
case DW_CFA_negate_ra_state:
- scs_patch_loc(loc - 4);
+ if (!dry_run)
+ scs_patch_loc(loc - 4);
break;
case 0x40 ... 0x7f:
@@ -235,9 +237,12 @@ int noinstr scs_patch(const u8 eh_frame[], int size)
} else {
ret = scs_handle_fde_frame(frame,
fde_has_augmentation_data,
- code_alignment_factor);
+ code_alignment_factor,
+ true);
if (ret)
return ret;
+ scs_handle_fde_frame(frame, fde_has_augmentation_data,
+ code_alignment_factor, false);
}
p += sizeof(frame->size) + frame->size;
diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c
index 33e0fabc0b79..b4835f6d594b 100644
--- a/arch/arm64/kernel/patching.c
+++ b/arch/arm64/kernel/patching.c
@@ -88,6 +88,23 @@ int __kprobes aarch64_insn_write(void *addr, u32 insn)
return __aarch64_insn_write(addr, cpu_to_le32(insn));
}
+noinstr int aarch64_insn_write_literal_u64(void *addr, u64 val)
+{
+ u64 *waddr;
+ unsigned long flags;
+ int ret;
+
+ raw_spin_lock_irqsave(&patch_lock, flags);
+ waddr = patch_map(addr, FIX_TEXT_POKE0);
+
+ ret = copy_to_kernel_nofault(waddr, &val, sizeof(val));
+
+ patch_unmap(FIX_TEXT_POKE0);
+ raw_spin_unlock_irqrestore(&patch_lock, flags);
+
+ return ret;
+}
+
int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
{
u32 *tp = addr;
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index f35d059a9a36..70b91a8c6bb3 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -387,10 +387,6 @@ int __init arch_populate_kprobe_blacklist(void)
(unsigned long)__irqentry_text_end);
if (ret)
return ret;
- ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start,
- (unsigned long)__idmap_text_end);
- if (ret)
- return ret;
ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start,
(unsigned long)__hyp_text_end);
if (ret || is_kernel_in_hyp_mode())
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 269ac1c25ae2..71d59b5abede 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -307,27 +307,28 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
/*
* In the unlikely event that we create a new thread with ZA
- * enabled we should retain the ZA state so duplicate it here.
- * This may be shortly freed if we exec() or if CLONE_SETTLS
- * but it's simpler to do it here. To avoid confusing the rest
- * of the code ensure that we have a sve_state allocated
- * whenever za_state is allocated.
+ * enabled we should retain the ZA and ZT state so duplicate
+ * it here. This may be shortly freed if we exec() or if
+ * CLONE_SETTLS but it's simpler to do it here. To avoid
+ * confusing the rest of the code ensure that we have a
+ * sve_state allocated whenever sme_state is allocated.
*/
if (thread_za_enabled(&src->thread)) {
dst->thread.sve_state = kzalloc(sve_state_size(src),
GFP_KERNEL);
if (!dst->thread.sve_state)
return -ENOMEM;
- dst->thread.za_state = kmemdup(src->thread.za_state,
- za_state_size(src),
- GFP_KERNEL);
- if (!dst->thread.za_state) {
+
+ dst->thread.sme_state = kmemdup(src->thread.sme_state,
+ sme_state_size(src),
+ GFP_KERNEL);
+ if (!dst->thread.sme_state) {
kfree(dst->thread.sve_state);
dst->thread.sve_state = NULL;
return -ENOMEM;
}
} else {
- dst->thread.za_state = NULL;
+ dst->thread.sme_state = NULL;
clear_tsk_thread_flag(dst, TIF_SME);
}
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 2686ab157601..143a971d7511 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -683,7 +683,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
unsigned long tls[2];
tls[0] = target->thread.uw.tp_value;
- if (system_supports_sme())
+ if (system_supports_tpidr2())
tls[1] = target->thread.tpidr2_el0;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count);
@@ -691,7 +691,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
return ret;
target->thread.uw.tp_value = tls[0];
- if (system_supports_sme())
+ if (system_supports_tpidr2())
target->thread.tpidr2_el0 = tls[1];
return ret;
@@ -1045,7 +1045,7 @@ static int za_get(struct task_struct *target,
if (thread_za_enabled(&target->thread)) {
start = end;
end = ZA_PT_SIZE(vq);
- membuf_write(&to, target->thread.za_state, end - start);
+ membuf_write(&to, target->thread.sme_state, end - start);
}
/* Zero any trailing padding */
@@ -1099,7 +1099,7 @@ static int za_set(struct task_struct *target,
/* Allocate/reinit ZA storage */
sme_alloc(target);
- if (!target->thread.za_state) {
+ if (!target->thread.sme_state) {
ret = -ENOMEM;
goto out;
}
@@ -1124,7 +1124,7 @@ static int za_set(struct task_struct *target,
start = ZA_PT_ZA_OFFSET;
end = ZA_PT_SIZE(vq);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- target->thread.za_state,
+ target->thread.sme_state,
start, end);
if (ret)
goto out;
@@ -1138,6 +1138,51 @@ out:
return ret;
}
+static int zt_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ if (!system_supports_sme2())
+ return -EINVAL;
+
+ /*
+ * If PSTATE.ZA is not set then ZT will be zeroed when it is
+ * enabled so report the current register value as zero.
+ */
+ if (thread_za_enabled(&target->thread))
+ membuf_write(&to, thread_zt_state(&target->thread),
+ ZT_SIG_REG_BYTES);
+ else
+ membuf_zero(&to, ZT_SIG_REG_BYTES);
+
+ return 0;
+}
+
+static int zt_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ if (!system_supports_sme2())
+ return -EINVAL;
+
+ if (!thread_za_enabled(&target->thread)) {
+ sme_alloc(target);
+ if (!target->thread.sme_state)
+ return -ENOMEM;
+ }
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ thread_zt_state(&target->thread),
+ 0, ZT_SIG_REG_BYTES);
+ if (ret == 0)
+ target->thread.svcr |= SVCR_ZA_MASK;
+
+ return ret;
+}
+
#endif /* CONFIG_ARM64_SME */
#ifdef CONFIG_ARM64_PTR_AUTH
@@ -1360,6 +1405,7 @@ enum aarch64_regset {
#ifdef CONFIG_ARM64_SVE
REGSET_SSVE,
REGSET_ZA,
+ REGSET_ZT,
#endif
#ifdef CONFIG_ARM64_PTR_AUTH
REGSET_PAC_MASK,
@@ -1467,6 +1513,14 @@ static const struct user_regset aarch64_regsets[] = {
.regset_get = za_get,
.set = za_set,
},
+ [REGSET_ZT] = { /* SME ZT */
+ .core_note_type = NT_ARM_ZT,
+ .n = 1,
+ .size = ZT_SIG_REG_BYTES,
+ .align = sizeof(u64),
+ .regset_get = zt_get,
+ .set = zt_set,
+ },
#endif
#ifdef CONFIG_ARM64_PTR_AUTH
[REGSET_PAC_MASK] = {
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 12cfe9d0d3fa..b8ec7b3ac9cb 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -58,6 +58,7 @@ static int num_standard_resources;
static struct resource *standard_resources;
phys_addr_t __fdt_pointer __initdata;
+u64 mmu_enabled_at_boot __initdata;
/*
* Standard memory resources
@@ -332,8 +333,12 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
xen_early_init();
efi_init();
- if (!efi_enabled(EFI_BOOT) && ((u64)_text % MIN_KIMG_ALIGN) != 0)
- pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!");
+ if (!efi_enabled(EFI_BOOT)) {
+ if ((u64)_text % MIN_KIMG_ALIGN)
+ pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!");
+ WARN_TAINT(mmu_enabled_at_boot, TAINT_FIRMWARE_WORKAROUND,
+ FW_BUG "Booted with MMU enabled!");
+ }
arm64_memblock_init();
@@ -442,3 +447,11 @@ static int __init register_arm64_panic_block(void)
return 0;
}
device_initcall(register_arm64_panic_block);
+
+static int __init check_mmu_enabled_at_boot(void)
+{
+ if (!efi_enabled(EFI_BOOT) && mmu_enabled_at_boot)
+ panic("Non-EFI boot detected with MMU and caches enabled");
+ return 0;
+}
+device_initcall_sync(check_mmu_enabled_at_boot);
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index e0d09bf5b01b..ed692284f199 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -56,7 +56,9 @@ struct rt_sigframe_user_layout {
unsigned long fpsimd_offset;
unsigned long esr_offset;
unsigned long sve_offset;
+ unsigned long tpidr2_offset;
unsigned long za_offset;
+ unsigned long zt_offset;
unsigned long extra_offset;
unsigned long end_offset;
};
@@ -220,7 +222,9 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
struct user_ctxs {
struct fpsimd_context __user *fpsimd;
struct sve_context __user *sve;
+ struct tpidr2_context __user *tpidr2;
struct za_context __user *za;
+ struct zt_context __user *zt;
};
#ifdef CONFIG_ARM64_SVE
@@ -361,6 +365,32 @@ extern int preserve_sve_context(void __user *ctx);
#ifdef CONFIG_ARM64_SME
+static int preserve_tpidr2_context(struct tpidr2_context __user *ctx)
+{
+ int err = 0;
+
+ current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
+
+ __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err);
+ __put_user_error(sizeof(*ctx), &ctx->head.size, err);
+ __put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err);
+
+ return err;
+}
+
+static int restore_tpidr2_context(struct user_ctxs *user)
+{
+ u64 tpidr2_el0;
+ int err = 0;
+
+ /* Magic and size were validated deciding to call this function */
+ __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err);
+ if (!err)
+ current->thread.tpidr2_el0 = tpidr2_el0;
+
+ return err;
+}
+
static int preserve_za_context(struct za_context __user *ctx)
{
int err = 0;
@@ -389,7 +419,7 @@ static int preserve_za_context(struct za_context __user *ctx)
* fpsimd_signal_preserve_current_state().
*/
err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET,
- current->thread.za_state,
+ current->thread.sme_state,
ZA_SIG_REGS_SIZE(vq));
}
@@ -420,7 +450,7 @@ static int restore_za_context(struct user_ctxs *user)
/*
* Careful: we are about __copy_from_user() directly into
- * thread.za_state with preemption enabled, so protection is
+ * thread.sme_state with preemption enabled, so protection is
* needed to prevent a racing context switch from writing stale
* registers back over the new data.
*/
@@ -429,13 +459,13 @@ static int restore_za_context(struct user_ctxs *user)
/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
sme_alloc(current);
- if (!current->thread.za_state) {
+ if (!current->thread.sme_state) {
current->thread.svcr &= ~SVCR_ZA_MASK;
clear_thread_flag(TIF_SME);
return -ENOMEM;
}
- err = __copy_from_user(current->thread.za_state,
+ err = __copy_from_user(current->thread.sme_state,
(char __user const *)user->za +
ZA_SIG_REGS_OFFSET,
ZA_SIG_REGS_SIZE(vq));
@@ -447,11 +477,83 @@ static int restore_za_context(struct user_ctxs *user)
return 0;
}
+
+static int preserve_zt_context(struct zt_context __user *ctx)
+{
+ int err = 0;
+ u16 reserved[ARRAY_SIZE(ctx->__reserved)];
+
+ if (WARN_ON(!thread_za_enabled(&current->thread)))
+ return -EINVAL;
+
+ memset(reserved, 0, sizeof(reserved));
+
+ __put_user_error(ZT_MAGIC, &ctx->head.magic, err);
+ __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16),
+ &ctx->head.size, err);
+ __put_user_error(1, &ctx->nregs, err);
+ BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
+ err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
+
+ /*
+ * This assumes that the ZT state has already been saved to
+ * the task struct by calling the function
+ * fpsimd_signal_preserve_current_state().
+ */
+ err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET,
+ thread_zt_state(&current->thread),
+ ZT_SIG_REGS_SIZE(1));
+
+ return err ? -EFAULT : 0;
+}
+
+static int restore_zt_context(struct user_ctxs *user)
+{
+ int err;
+ struct zt_context zt;
+
+ /* ZA must be restored first for this check to be valid */
+ if (!thread_za_enabled(&current->thread))
+ return -EINVAL;
+
+ if (__copy_from_user(&zt, user->zt, sizeof(zt)))
+ return -EFAULT;
+
+ if (zt.nregs != 1)
+ return -EINVAL;
+
+ if (zt.head.size != ZT_SIG_CONTEXT_SIZE(zt.nregs))
+ return -EINVAL;
+
+ /*
+ * Careful: we are about __copy_from_user() directly into
+ * thread.zt_state with preemption enabled, so protection is
+ * needed to prevent a racing context switch from writing stale
+ * registers back over the new data.
+ */
+
+ fpsimd_flush_task_state(current);
+ /* From now, fpsimd_thread_switch() won't touch ZT in thread state */
+
+ err = __copy_from_user(thread_zt_state(&current->thread),
+ (char __user const *)user->zt +
+ ZT_SIG_REGS_OFFSET,
+ ZT_SIG_REGS_SIZE(1));
+ if (err)
+ return -EFAULT;
+
+ return 0;
+}
+
#else /* ! CONFIG_ARM64_SME */
/* Turn any non-optimised out attempts to use these into a link error: */
+extern int preserve_tpidr2_context(void __user *ctx);
+extern int restore_tpidr2_context(struct user_ctxs *user);
extern int preserve_za_context(void __user *ctx);
extern int restore_za_context(struct user_ctxs *user);
+extern int preserve_zt_context(void __user *ctx);
+extern int restore_zt_context(struct user_ctxs *user);
#endif /* ! CONFIG_ARM64_SME */
@@ -468,7 +570,9 @@ static int parse_user_sigframe(struct user_ctxs *user,
user->fpsimd = NULL;
user->sve = NULL;
+ user->tpidr2 = NULL;
user->za = NULL;
+ user->zt = NULL;
if (!IS_ALIGNED((unsigned long)base, 16))
goto invalid;
@@ -534,6 +638,19 @@ static int parse_user_sigframe(struct user_ctxs *user,
user->sve = (struct sve_context __user *)head;
break;
+ case TPIDR2_MAGIC:
+ if (!system_supports_sme())
+ goto invalid;
+
+ if (user->tpidr2)
+ goto invalid;
+
+ if (size != sizeof(*user->tpidr2))
+ goto invalid;
+
+ user->tpidr2 = (struct tpidr2_context __user *)head;
+ break;
+
case ZA_MAGIC:
if (!system_supports_sme())
goto invalid;
@@ -547,6 +664,19 @@ static int parse_user_sigframe(struct user_ctxs *user,
user->za = (struct za_context __user *)head;
break;
+ case ZT_MAGIC:
+ if (!system_supports_sme2())
+ goto invalid;
+
+ if (user->zt)
+ goto invalid;
+
+ if (size < sizeof(*user->zt))
+ goto invalid;
+
+ user->zt = (struct zt_context __user *)head;
+ break;
+
case EXTRA_MAGIC:
if (have_extra_context)
goto invalid;
@@ -666,9 +796,15 @@ static int restore_sigframe(struct pt_regs *regs,
err = restore_fpsimd_context(user.fpsimd);
}
+ if (err == 0 && system_supports_sme() && user.tpidr2)
+ err = restore_tpidr2_context(&user);
+
if (err == 0 && system_supports_sme() && user.za)
err = restore_za_context(&user);
+ if (err == 0 && system_supports_sme2() && user.zt)
+ err = restore_zt_context(&user);
+
return err;
}
@@ -760,6 +896,11 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
else
vl = task_get_sme_vl(current);
+ err = sigframe_alloc(user, &user->tpidr2_offset,
+ sizeof(struct tpidr2_context));
+ if (err)
+ return err;
+
if (thread_za_enabled(&current->thread))
vq = sve_vq_from_vl(vl);
@@ -769,6 +910,15 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
return err;
}
+ if (system_supports_sme2()) {
+ if (add_all || thread_za_enabled(&current->thread)) {
+ err = sigframe_alloc(user, &user->zt_offset,
+ ZT_SIG_CONTEXT_SIZE(1));
+ if (err)
+ return err;
+ }
+ }
+
return sigframe_alloc_end(user);
}
@@ -817,6 +967,13 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
err |= preserve_sve_context(sve_ctx);
}
+ /* TPIDR2 if supported */
+ if (system_supports_sme() && err == 0) {
+ struct tpidr2_context __user *tpidr2_ctx =
+ apply_user_offset(user, user->tpidr2_offset);
+ err |= preserve_tpidr2_context(tpidr2_ctx);
+ }
+
/* ZA state if present */
if (system_supports_sme() && err == 0 && user->za_offset) {
struct za_context __user *za_ctx =
@@ -824,6 +981,13 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
err |= preserve_za_context(za_ctx);
}
+ /* ZT state if present */
+ if (system_supports_sme2() && err == 0 && user->zt_offset) {
+ struct zt_context __user *zt_ctx =
+ apply_user_offset(user, user->zt_offset);
+ err |= preserve_zt_context(zt_ctx);
+ }
+
if (err == 0 && user->extra_offset) {
char __user *sfp = (char __user *)user->sigframe;
char __user *userp =
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 97c9de57725d..2ae7cff1953a 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -99,8 +99,9 @@ SYM_FUNC_END(__cpu_suspend_enter)
.pushsection ".idmap.text", "awx"
SYM_CODE_START(cpu_resume)
+ mov x0, xzr
bl init_kernel_el
- bl finalise_el2
+ mov x19, x0 // preserve boot mode
#if VA_BITS > 48
ldr_l x0, vabits_actual
#endif
@@ -116,6 +117,9 @@ SYM_CODE_END(cpu_resume)
.popsection
SYM_FUNC_START(_cpu_resume)
+ mov x0, x19
+ bl finalise_el2
+
mrs x1, mpidr_el1
adr_l x8, mpidr_hash // x8 = struct mpidr_hash virt address
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index a5de47e3df2b..da84cf855c44 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -173,12 +173,8 @@ static inline void fp_user_discard(void)
* register state to track, if this changes the KVM code will
* need updating.
*/
- if (system_supports_sme() && test_thread_flag(TIF_SME)) {
- u64 svcr = read_sysreg_s(SYS_SVCR);
-
- if (svcr & SVCR_SM_MASK)
- sme_smstop_sm();
- }
+ if (system_supports_sme())
+ sme_smstop_sm();
if (!system_supports_sve())
return;
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 4c0caa589e12..0ccc063daccb 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -162,10 +162,8 @@ static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
if (!bad)
p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
- else {
- p += sprintf(p, "bad PC value");
- break;
- }
+ else
+ p += sprintf(p, i == 0 ? "(????????) " : "???????? ");
}
printk("%sCode: %s\n", lvl, str);
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 4c13dafc98b8..1a43df27a204 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -93,6 +93,7 @@ jiffies = jiffies_64;
#ifdef CONFIG_HIBERNATION
#define HIBERNATE_TEXT \
+ ALIGN_FUNCTION(); \
__hibernate_exit_text_start = .; \
*(.hibernate_exit.text) \
__hibernate_exit_text_end = .;
@@ -102,6 +103,7 @@ jiffies = jiffies_64;
#ifdef CONFIG_KEXEC_CORE
#define KEXEC_TEXT \
+ ALIGN_FUNCTION(); \
__relocate_new_kernel_start = .; \
*(.kexec_relocate.text) \
__relocate_new_kernel_end = .;
@@ -179,7 +181,6 @@ SECTIONS
LOCK_TEXT
KPROBES_TEXT
HYPERVISOR_TEXT
- IDMAP_TEXT
*(.gnu.warning)
. = ALIGN(16);
*(.got) /* Global offset table */
@@ -206,6 +207,7 @@ SECTIONS
TRAMP_TEXT
HIBERNATE_TEXT
KEXEC_TEXT
+ IDMAP_TEXT
. = ALIGN(PAGE_SIZE);
}
@@ -355,6 +357,8 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
#ifdef CONFIG_HIBERNATION
ASSERT(__hibernate_exit_text_end - __hibernate_exit_text_start <= SZ_4K,
"Hibernate exit text is bigger than 4 KiB")
+ASSERT(__hibernate_exit_text_start == swsusp_arch_suspend_exit,
+ "Hibernate exit text does not start with swsusp_arch_suspend_exit")
#endif
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE,
@@ -381,4 +385,6 @@ ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET,
ASSERT(__relocate_new_kernel_end - __relocate_new_kernel_start <= SZ_4K,
"kexec relocation code is bigger than 4 KiB")
ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is broken")
+ASSERT(__relocate_new_kernel_start == arm64_relocate_new_kernel,
+ "kexec control page does not start with arm64_relocate_new_kernel")
#endif
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index fccf9ec01813..55f80fb93925 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -328,7 +328,7 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
* we may need to check if the host state needs to be saved.
*/
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
- !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT)))
+ !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(PMBIDR_EL1_P_SHIFT)))
vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE);
/* Check if we have TRBE implemented and available at the host */
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 02dd7e9ebd39..235775d0c825 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -143,7 +143,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
fp_state.st = &vcpu->arch.ctxt.fp_regs;
fp_state.sve_state = vcpu->arch.sve_state;
fp_state.sve_vl = vcpu->arch.sve_max_vl;
- fp_state.za_state = NULL;
+ fp_state.sme_state = NULL;
fp_state.svcr = &vcpu->arch.svcr;
fp_state.fp_type = &vcpu->arch.fp_type;
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index 435346ea1504..f3aa7738b477 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -171,7 +171,7 @@ alternative_else
dsb sy // Synchronize against in-flight ld/st
isb // Prevent an early read of side-effect free ISR
mrs x2, isr_el1
- tbnz x2, #8, 2f // ISR_EL1.A
+ tbnz x2, #ISR_EL1_A_SHIFT, 2f
ret
nop
2:
diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
index e17455773b98..2673bde62fad 100644
--- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
@@ -27,7 +27,7 @@ static void __debug_save_spe(u64 *pmscr_el1)
* Check if the host is actually using it ?
*/
reg = read_sysreg_s(SYS_PMBLIMITR_EL1);
- if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)))
+ if (!(reg & BIT(PMBLIMITR_EL1_E_SHIFT)))
return;
/* Yes; save the control register and disable data generation */
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 081058d4e436..503567c864fd 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -56,6 +56,7 @@ SYM_FUNC_START(caches_clean_inval_pou)
caches_clean_inval_pou_macro
ret
SYM_FUNC_END(caches_clean_inval_pou)
+SYM_FUNC_ALIAS(__pi_caches_clean_inval_pou, caches_clean_inval_pou)
/*
* caches_clean_inval_user_pou(start,end)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 14c87e8d69d8..34d5f7c4c64e 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -133,7 +133,7 @@ static phys_addr_t __init early_pgtable_alloc(int shift)
return phys;
}
-static bool pgattr_change_is_safe(u64 old, u64 new)
+bool pgattr_change_is_safe(u64 old, u64 new)
{
/*
* The following mapping attributes may be updated in live
@@ -142,9 +142,13 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
/* creating or taking down mappings is always safe */
- if (old == 0 || new == 0)
+ if (!pte_valid(__pte(old)) || !pte_valid(__pte(new)))
return true;
+ /* A live entry's pfn should not change */
+ if (pte_pfn(__pte(old)) != pte_pfn(__pte(new)))
+ return false;
+
/* live contiguous mappings may not be manipulated at all */
if ((old | new) & PTE_CONT)
return false;
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 066fa60b93d2..91410f488090 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -110,7 +110,6 @@ SYM_FUNC_END(cpu_do_suspend)
*
* x0: Address of context pointer
*/
- .pushsection ".idmap.text", "awx"
SYM_FUNC_START(cpu_do_resume)
ldp x2, x3, [x0]
ldp x4, x5, [x0, #16]
@@ -166,7 +165,6 @@ alternative_else_nop_endif
isb
ret
SYM_FUNC_END(cpu_do_resume)
- .popsection
#endif
.pushsection ".idmap.text", "awx"
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index a86ee376920a..b98f71120588 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -28,7 +28,9 @@ HAS_GENERIC_AUTH
HAS_GENERIC_AUTH_ARCH_QARMA3
HAS_GENERIC_AUTH_ARCH_QARMA5
HAS_GENERIC_AUTH_IMP_DEF
-HAS_IRQ_PRIO_MASKING
+HAS_GIC_CPUIF_SYSREGS
+HAS_GIC_PRIO_MASKING
+HAS_GIC_PRIO_RELAXED_SYNC
HAS_LDAPR
HAS_LSE_ATOMICS
HAS_NO_FPSIMD
@@ -38,7 +40,6 @@ HAS_RAS_EXTN
HAS_RNG
HAS_SB
HAS_STAGE2_FWB
-HAS_SYSREG_GIC_CPUIF
HAS_TIDCP1
HAS_TLB_RANGE
HAS_VIRT_HOST_EXTN
@@ -50,6 +51,7 @@ MTE
MTE_ASYMM
SME
SME_FA64
+SME2
SPECTRE_V2
SPECTRE_V3A
SPECTRE_V4
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index 184e58fd5631..ef9995cea1b4 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -689,17 +689,17 @@ EndEnum
Enum 11:8 FPDP
0b0000 NI
0b0001 VFPv2
- 0b0001 VFPv3
+ 0b0010 VFPv3
EndEnum
Enum 7:4 FPSP
0b0000 NI
0b0001 VFPv2
- 0b0001 VFPv3
+ 0b0010 VFPv3
EndEnum
Enum 3:0 SIMDReg
0b0000 NI
0b0001 IMP_16x64
- 0b0001 IMP_32x64
+ 0b0010 IMP_32x64
EndEnum
EndSysreg
@@ -718,7 +718,7 @@ EndEnum
Enum 23:20 SIMDHP
0b0000 NI
0b0001 SIMDHP
- 0b0001 SIMDHP_FLOAT
+ 0b0010 SIMDHP_FLOAT
EndEnum
Enum 19:16 SIMDSP
0b0000 NI
@@ -894,6 +894,7 @@ EndEnum
Enum 27:24 SME
0b0000 NI
0b0001 IMP
+ 0b0010 SME2
EndEnum
Res0 23:20
Enum 19:16 MPAM_frac
@@ -975,7 +976,9 @@ Enum 63 FA64
EndEnum
Res0 62:60
Enum 59:56 SMEver
- 0b0000 IMP
+ 0b0000 SME
+ 0b0001 SME2
+ 0b0010 SME2p1
EndEnum
Enum 55:52 I16I64
0b0000 NI
@@ -986,7 +989,19 @@ Enum 48 F64F64
0b0 NI
0b1 IMP
EndEnum
-Res0 47:40
+Enum 47:44 I16I32
+ 0b0000 NI
+ 0b0101 IMP
+EndEnum
+Enum 43 B16B16
+ 0b0 NI
+ 0b1 IMP
+EndEnum
+Enum 42 F16F16
+ 0b0 NI
+ 0b1 IMP
+EndEnum
+Res0 41:40
Enum 39:36 I8I32
0b0000 NI
0b1111 IMP
@@ -999,7 +1014,10 @@ Enum 34 B16F32
0b0 NI
0b1 IMP
EndEnum
-Res0 33
+Enum 33 BI32I32
+ 0b0 NI
+ 0b1 IMP
+EndEnum
Enum 32 F32F32
0b0 NI
0b1 IMP
@@ -1599,7 +1617,8 @@ EndSysreg
SysregFields SMCR_ELx
Res0 63:32
Field 31 FA64
-Res0 30:9
+Field 30 EZT0
+Res0 29:9
Raz 8:4
Field 3:0 LEN
EndSysregFields
@@ -1618,6 +1637,130 @@ Sysreg FAR_EL1 3 0 6 0 0
Field 63:0 ADDR
EndSysreg
+Sysreg PMSCR_EL1 3 0 9 9 0
+Res0 63:8
+Field 7:6 PCT
+Field 5 TS
+Field 4 PA
+Field 3 CX
+Res0 2
+Field 1 E1SPE
+Field 0 E0SPE
+EndSysreg
+
+Sysreg PMSNEVFR_EL1 3 0 9 9 1
+Field 63:0 E
+EndSysreg
+
+Sysreg PMSICR_EL1 3 0 9 9 2
+Field 63:56 ECOUNT
+Res0 55:32
+Field 31:0 COUNT
+EndSysreg
+
+Sysreg PMSIRR_EL1 3 0 9 9 3
+Res0 63:32
+Field 31:8 INTERVAL
+Res0 7:1
+Field 0 RND
+EndSysreg
+
+Sysreg PMSFCR_EL1 3 0 9 9 4
+Res0 63:19
+Field 18 ST
+Field 17 LD
+Field 16 B
+Res0 15:4
+Field 3 FnE
+Field 2 FL
+Field 1 FT
+Field 0 FE
+EndSysreg
+
+Sysreg PMSEVFR_EL1 3 0 9 9 5
+Field 63:0 E
+EndSysreg
+
+Sysreg PMSLATFR_EL1 3 0 9 9 6
+Res0 63:16
+Field 15:0 MINLAT
+EndSysreg
+
+Sysreg PMSIDR_EL1 3 0 9 9 7
+Res0 63:25
+Field 24 PBT
+Field 23:20 FORMAT
+Enum 19:16 COUNTSIZE
+ 0b0010 12_BIT_SAT
+ 0b0011 16_BIT_SAT
+EndEnum
+Field 15:12 MAXSIZE
+Enum 11:8 INTERVAL
+ 0b0000 256
+ 0b0010 512
+ 0b0011 768
+ 0b0100 1024
+ 0b0101 1536
+ 0b0110 2048
+ 0b0111 3072
+ 0b1000 4096
+EndEnum
+Res0 7
+Field 6 FnE
+Field 5 ERND
+Field 4 LDS
+Field 3 ARCHINST
+Field 2 FL
+Field 1 FT
+Field 0 FE
+EndSysreg
+
+Sysreg PMBLIMITR_EL1 3 0 9 10 0
+Field 63:12 LIMIT
+Res0 11:6
+Field 5 PMFZ
+Res0 4:3
+Enum 2:1 FM
+ 0b00 FILL
+ 0b10 DISCARD
+EndEnum
+Field 0 E
+EndSysreg
+
+Sysreg PMBPTR_EL1 3 0 9 10 1
+Field 63:0 PTR
+EndSysreg
+
+Sysreg PMBSR_EL1 3 0 9 10 3
+Res0 63:32
+Enum 31:26 EC
+ 0b000000 BUF
+ 0b100100 FAULT_S1
+ 0b100101 FAULT_S2
+ 0b011110 FAULT_GPC
+ 0b011111 IMP_DEF
+EndEnum
+Res0 25:20
+Field 19 DL
+Field 18 EA
+Field 17 S
+Field 16 COLL
+Field 15:0 MSS
+EndSysreg
+
+Sysreg PMBIDR_EL1 3 0 9 10 7
+Res0 63:12
+Enum 11:8 EA
+ 0b0000 NotDescribed
+ 0b0001 Ignored
+ 0b0010 SError
+EndEnum
+Res0 7:6
+Field 5 F
+Field 4 P
+Field 3:0 ALIGN
+EndSysreg
+
SysregFields CONTEXTIDR_ELx
Res0 63:32
Field 31:0 PROCID
@@ -1772,6 +1915,21 @@ Sysreg FAR_EL2 3 4 6 0 0
Field 63:0 ADDR
EndSysreg
+Sysreg PMSCR_EL2 3 4 9 9 0
+Res0 63:8
+Enum 7:6 PCT
+ 0b00 VIRT
+ 0b01 PHYS
+ 0b11 GUEST
+EndEnum
+Field 5 TS
+Field 4 PA
+Field 3 CX
+Res0 2
+Field 1 E2SPE
+Field 0 E0HSPE
+EndSysreg
+
Sysreg CONTEXTIDR_EL2 3 4 13 0 1
Fields CONTEXTIDR_ELx
EndSysreg
@@ -1842,3 +2000,18 @@ Field 23:16 LD
Res0 15:8
Field 7:0 LR
EndSysreg
+
+Sysreg ISR_EL1 3 0 12 1 0
+Res0 63:11
+Field 10 IS
+Field 9 FS
+Field 8 A
+Field 7 I
+Field 6 F
+Res0 5:0
+EndSysreg
+
+Sysreg ICC_NMIAR1_EL1 3 0 12 9 5
+Res0 63:24
+Field 23:0 INTID
+EndSysreg