summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2020-09-15 23:11:13 +0200
committerWill Deacon <will@kernel.org>2020-09-29 17:08:15 +0200
commit6e5f0927846adf39aebee450f13871e3cb4ab012 (patch)
tree261a6871c7b1a509831ad48b9ec2ea0fe9204675 /arch/arm64/kernel
parentarm64: Run ARCH_WORKAROUND_2 enabling code on all CPUs (diff)
downloadlinux-6e5f0927846adf39aebee450f13871e3cb4ab012.tar.xz
linux-6e5f0927846adf39aebee450f13871e3cb4ab012.zip
arm64: Remove Spectre-related CONFIG_* options
The spectre mitigations are too configurable for their own good, leading to confusing logic trying to figure out when we should mitigate and when we shouldn't. Although the plethora of command-line options need to stick around for backwards compatibility, the default-on CONFIG options that depend on EXPERT can be dropped, as the mitigations only do anything if the system is vulnerable, a mitigation is available and the command-line hasn't disabled it. Remove CONFIG_HARDEN_BRANCH_PREDICTOR and CONFIG_ARM64_SSBD in favour of enabling this code unconditionally. Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/Makefile3
-rw-r--r--arch/arm64/kernel/cpu_errata.c19
-rw-r--r--arch/arm64/kernel/cpufeature.c4
-rw-r--r--arch/arm64/kernel/entry.S4
4 files changed, 3 insertions, 27 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index a561cbb91d4d..ed8799bdd41f 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -19,7 +19,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \
- syscall.o
+ ssbd.o syscall.o
targets += efi-entry.o
@@ -59,7 +59,6 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_CRASH_CORE) += crash_core.o
obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index abfef5f3b5fd..dd9103915f1e 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -254,9 +254,7 @@ static int detect_harden_bp_fw(void)
((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
cb = qcom_link_stack_sanitization;
- if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
- install_bp_hardening_cb(cb, smccc_start, smccc_end);
-
+ install_bp_hardening_cb(cb, smccc_start, smccc_end);
return 1;
}
@@ -335,11 +333,6 @@ void arm64_set_ssbd_mitigation(bool state)
{
int conduit;
- if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
- pr_info_once("SSBD disabled by kernel configuration\n");
- return;
- }
-
if (this_cpu_has_cap(ARM64_SSBS)) {
if (state)
asm volatile(SET_PSTATE_SSBS(0));
@@ -584,12 +577,6 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
__spectrev2_safe = false;
- if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
- pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
- __hardenbp_enab = false;
- return false;
- }
-
/* forced off */
if (__nospectre_v2 || cpu_mitigations_off()) {
pr_info_once("spectrev2 mitigation disabled by command line option\n");
@@ -1004,9 +991,7 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev,
switch (ssbd_state) {
case ARM64_SSBD_KERNEL:
case ARM64_SSBD_FORCE_ENABLE:
- if (IS_ENABLED(CONFIG_ARM64_SSBD))
- return sprintf(buf,
- "Mitigation: Speculative Store Bypass disabled via prctl\n");
+ return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
}
return sprintf(buf, "Vulnerable\n");
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 6424584be01e..4bb45b1d4ae4 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1583,7 +1583,6 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
WARN_ON(val & (7 << 27 | 7 << 21));
}
-#ifdef CONFIG_ARM64_SSBD
static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
{
if (user_mode(regs))
@@ -1623,7 +1622,6 @@ static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
arm64_set_ssbd_mitigation(true);
}
}
-#endif /* CONFIG_ARM64_SSBD */
#ifdef CONFIG_ARM64_PAN
static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
@@ -1976,7 +1974,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = ID_AA64ISAR0_CRC32_SHIFT,
.min_field_value = 1,
},
-#ifdef CONFIG_ARM64_SSBD
{
.desc = "Speculative Store Bypassing Safe (SSBS)",
.capability = ARM64_SSBS,
@@ -1988,7 +1985,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
.cpu_enable = cpu_enable_ssbs,
},
-#endif
#ifdef CONFIG_ARM64_CNP
{
.desc = "Common not Private translations",
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 55af8b504b65..81b709349d7b 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -132,7 +132,6 @@ alternative_else_nop_endif
* them if required.
*/
.macro apply_ssbd, state, tmp1, tmp2
-#ifdef CONFIG_ARM64_SSBD
alternative_cb arm64_enable_wa2_handling
b .L__asm_ssbd_skip\@
alternative_cb_end
@@ -146,7 +145,6 @@ alternative_cb arm64_update_smccc_conduit
nop // Patched to SMC/HVC #0
alternative_cb_end
.L__asm_ssbd_skip\@:
-#endif
.endm
.macro kernel_entry, el, regsize = 64
@@ -697,11 +695,9 @@ el0_irq_naked:
bl trace_hardirqs_off
#endif
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
tbz x22, #55, 1f
bl do_el0_irq_bp_hardening
1:
-#endif
irq_handler
#ifdef CONFIG_TRACE_IRQFLAGS