From e5ce5e7267ddcbe13ab9ead2542524e1b7993e5a Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Mon, 15 Apr 2019 16:21:20 -0500 Subject: arm64: Provide a command line to disable spectre_v2 mitigation There are various reasons, such as benchmarking, to disable spectrev2 mitigation on a machine. Provide a command-line option to do so. Signed-off-by: Jeremy Linton Reviewed-by: Suzuki K Poulose Reviewed-by: Andre Przywara Reviewed-by: Catalin Marinas Tested-by: Stefan Wahren Cc: Jonathan Corbet Cc: linux-doc@vger.kernel.org Signed-off-by: Will Deacon --- arch/arm64/kernel/cpu_errata.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 9950bb0cbd52..d2b2c69d31bb 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -220,6 +220,14 @@ static void qcom_link_stack_sanitization(void) : "=&r" (tmp)); } +static bool __nospectre_v2; +static int __init parse_nospectre_v2(char *str) +{ + __nospectre_v2 = true; + return 0; +} +early_param("nospectre_v2", parse_nospectre_v2); + static void enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) { @@ -231,6 +239,11 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) if (!entry->matches(entry, SCOPE_LOCAL_CPU)) return; + if (__nospectre_v2) { + pr_info_once("spectrev2 mitigation disabled by command line option\n"); + return; + } + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) return; -- cgit v1.2.3 From 3891ebccace188af075ce143d8b072b65e90f695 Mon Sep 17 00:00:00 2001 From: Mian Yousaf Kaukab Date: Mon, 15 Apr 2019 16:21:21 -0500 Subject: arm64: Add sysfs vulnerability show for spectre-v1 spectre-v1 has been mitigated and the mitigation is always active. Report this to userspace via sysfs Signed-off-by: Mian Yousaf Kaukab Signed-off-by: Jeremy Linton Reviewed-by: Andre Przywara Reviewed-by: Catalin Marinas Tested-by: Stefan Wahren Acked-by: Suzuki K Poulose Signed-off-by: Will Deacon --- arch/arm64/kernel/cpu_errata.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index d2b2c69d31bb..cf623657cf3c 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -755,3 +755,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = { { } }; + +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); +} -- cgit v1.2.3 From 1b3ccf4be0e7be8c4bd8522066b6cbc92591e912 Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Mon, 15 Apr 2019 16:21:22 -0500 Subject: arm64: add sysfs vulnerability show for meltdown We implement page table isolation as a mitigation for meltdown. Report this to userspace via sysfs. Signed-off-by: Jeremy Linton Reviewed-by: Suzuki K Poulose Reviewed-by: Andre Przywara Reviewed-by: Catalin Marinas Tested-by: Stefan Wahren Signed-off-by: Will Deacon --- arch/arm64/kernel/cpufeature.c | 58 ++++++++++++++++++++++++++++++++---------- 1 file changed, 44 insertions(+), 14 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 4061de10cea6..703ee8564fbd 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -947,7 +947,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope) return has_cpuid_feature(entry, scope); } -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +static bool __meltdown_safe = true; static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, @@ -967,6 +967,16 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, { /* sentinel */ } }; char const *str = "command line option"; + bool meltdown_safe; + + meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list); + + /* Defer to CPU feature registers */ + if (has_cpuid_feature(entry, scope)) + meltdown_safe = true; + + if (!meltdown_safe) + __meltdown_safe = false; /* * For reasons that aren't entirely clear, enabling KPTI on Cavium @@ -978,6 +988,19 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, __kpti_forced = -1; } + /* Useful for KASLR robustness */ + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) { + if (!__kpti_forced) { + str = "KASLR"; + __kpti_forced = 1; + } + } + + if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) { + pr_info_once("kernel page table isolation disabled by kernel configuration\n"); + return false; + } + /* Forced? */ if (__kpti_forced) { pr_info_once("kernel page table isolation forced %s by %s\n", @@ -985,18 +1008,10 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, return __kpti_forced > 0; } - /* Useful for KASLR robustness */ - if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) - return kaslr_offset() > 0; - - /* Don't force KPTI for CPUs that are not vulnerable */ - if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list)) - return false; - - /* Defer to CPU feature registers */ - return !has_cpuid_feature(entry, scope); + return !meltdown_safe; } +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 static void kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) { @@ -1026,6 +1041,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) return; } +#else +static void +kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) +{ +} +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ static int __init parse_kpti(char *str) { @@ -1039,7 +1060,6 @@ static int __init parse_kpti(char *str) return 0; } early_param("kpti", parse_kpti); -#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ #ifdef CONFIG_ARM64_HW_AFDBM static inline void __cpu_enable_hw_dbm(void) @@ -1306,7 +1326,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .field_pos = ID_AA64PFR0_EL0_SHIFT, .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT, }, -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 { .desc = "Kernel page table isolation (KPTI)", .capability = ARM64_UNMAP_KERNEL_AT_EL0, @@ -1322,7 +1341,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = unmap_kernel_at_el0, .cpu_enable = kpti_install_ng_mappings, }, -#endif { /* FP/SIMD is not implemented */ .capability = ARM64_HAS_NO_FPSIMD, @@ -2101,3 +2119,15 @@ static int __init enable_mrs_emulation(void) } core_initcall(enable_mrs_emulation); + +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, + char *buf) +{ + if (__meltdown_safe) + return sprintf(buf, "Not affected\n"); + + if (arm64_kernel_unmapped_at_el0()) + return sprintf(buf, "Mitigation: PTI\n"); + + return sprintf(buf, "Vulnerable\n"); +} -- cgit v1.2.3 From 73f38166095947f3b86b02fbed6bd592223a7ac8 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 15 Apr 2019 16:21:23 -0500 Subject: arm64: Advertise mitigation of Spectre-v2, or lack thereof We currently have a list of CPUs affected by Spectre-v2, for which we check that the firmware implements ARCH_WORKAROUND_1. It turns out that not all firmwares do implement the required mitigation, and that we fail to let the user know about it. Instead, let's slightly revamp our checks, and rely on a whitelist of cores that are known to be non-vulnerable, and let the user know the status of the mitigation in the kernel log. Signed-off-by: Marc Zyngier Signed-off-by: Jeremy Linton Reviewed-by: Andre Przywara Reviewed-by: Suzuki K Poulose Reviewed-by: Catalin Marinas Tested-by: Stefan Wahren Signed-off-by: Will Deacon --- arch/arm64/kernel/cpu_errata.c | 109 +++++++++++++++++++++-------------------- 1 file changed, 56 insertions(+), 53 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index cf623657cf3c..032f1a4dbea2 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -131,9 +131,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); } -static void __install_bp_hardening_cb(bp_hardening_cb_t fn, - const char *hyp_vecs_start, - const char *hyp_vecs_end) +static void install_bp_hardening_cb(bp_hardening_cb_t fn, + const char *hyp_vecs_start, + const char *hyp_vecs_end) { static DEFINE_RAW_SPINLOCK(bp_lock); int cpu, slot = -1; @@ -169,7 +169,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, #define __smccc_workaround_1_smc_start NULL #define __smccc_workaround_1_smc_end NULL -static void __install_bp_hardening_cb(bp_hardening_cb_t fn, +static void install_bp_hardening_cb(bp_hardening_cb_t fn, const char *hyp_vecs_start, const char *hyp_vecs_end) { @@ -177,23 +177,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, } #endif /* CONFIG_KVM_INDIRECT_VECTORS */ -static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, - bp_hardening_cb_t fn, - const char *hyp_vecs_start, - const char *hyp_vecs_end) -{ - u64 pfr0; - - if (!entry->matches(entry, SCOPE_LOCAL_CPU)) - return; - - pfr0 = read_cpuid(ID_AA64PFR0_EL1); - if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) - return; - - __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end); -} - #include #include #include @@ -228,31 +211,27 @@ static int __init parse_nospectre_v2(char *str) } early_param("nospectre_v2", parse_nospectre_v2); -static void -enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) +/* + * -1: No workaround + * 0: No workaround required + * 1: Workaround installed + */ +static int detect_harden_bp_fw(void) { bp_hardening_cb_t cb; void *smccc_start, *smccc_end; struct arm_smccc_res res; u32 midr = read_cpuid_id(); - if (!entry->matches(entry, SCOPE_LOCAL_CPU)) - return; - - if (__nospectre_v2) { - pr_info_once("spectrev2 mitigation disabled by command line option\n"); - return; - } - if (psci_ops.smccc_version == SMCCC_VERSION_1_0) - return; + return -1; switch (psci_ops.conduit) { case PSCI_CONDUIT_HVC: arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_ARCH_WORKAROUND_1, &res); if ((int)res.a0 < 0) - return; + return -1; cb = call_hvc_arch_workaround_1; /* This is a guest, no need to patch KVM vectors */ smccc_start = NULL; @@ -263,23 +242,23 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_ARCH_WORKAROUND_1, &res); if ((int)res.a0 < 0) - return; + return -1; cb = call_smc_arch_workaround_1; smccc_start = __smccc_workaround_1_smc_start; smccc_end = __smccc_workaround_1_smc_end; break; default: - return; + return -1; } if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) cb = qcom_link_stack_sanitization; - install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); + install_bp_hardening_cb(cb, smccc_start, smccc_end); - return; + return 1; } #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ @@ -521,24 +500,48 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) CAP_MIDR_RANGE_LIST(midr_list) #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR - /* - * List of CPUs where we need to issue a psci call to - * harden the branch predictor. + * List of CPUs that do not need any Spectre-v2 mitigation at all. */ -static const struct midr_range arm64_bp_harden_smccc_cpus[] = { - MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), - MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), - MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), - MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), - MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), - MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER), - {}, +static const struct midr_range spectre_v2_safe_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + { /* sentinel */ } }; +static bool __maybe_unused +check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) +{ + int need_wa; + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + /* If the CPU has CSV2 set, we're safe */ + if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1), + ID_AA64PFR0_CSV2_SHIFT)) + return false; + + /* Alternatively, we have a list of unaffected CPUs */ + if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) + return false; + + /* Fallback to firmware detection */ + need_wa = detect_harden_bp_fw(); + if (!need_wa) + return false; + + /* forced off */ + if (__nospectre_v2) { + pr_info_once("spectrev2 mitigation disabled by command line option\n"); + return false; + } + + if (need_wa < 0) + pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n"); + + return (need_wa > 0); +} #endif #ifdef CONFIG_HARDEN_EL2_VECTORS @@ -717,8 +720,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = { #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, - .cpu_enable = enable_smccc_arch_workaround_1, - ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus), + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = check_branch_predictor, }, #endif #ifdef CONFIG_HARDEN_EL2_VECTORS -- cgit v1.2.3 From 517953c2c47f9c00a002f588ac856a5bc70cede3 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 15 Apr 2019 16:21:24 -0500 Subject: arm64: Use firmware to detect CPUs that are not affected by Spectre-v2 The SMCCC ARCH_WORKAROUND_1 service can indicate that although the firmware knows about the Spectre-v2 mitigation, this particular CPU is not vulnerable, and it is thus not necessary to call the firmware on this CPU. Let's use this information to our benefit. Signed-off-by: Marc Zyngier Signed-off-by: Jeremy Linton Reviewed-by: Andre Przywara Reviewed-by: Catalin Marinas Tested-by: Stefan Wahren Signed-off-by: Will Deacon --- arch/arm64/kernel/cpu_errata.c | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 032f1a4dbea2..60cf87c4deb7 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -230,22 +230,36 @@ static int detect_harden_bp_fw(void) case PSCI_CONDUIT_HVC: arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_ARCH_WORKAROUND_1, &res); - if ((int)res.a0 < 0) + switch ((int)res.a0) { + case 1: + /* Firmware says we're just fine */ + return 0; + case 0: + cb = call_hvc_arch_workaround_1; + /* This is a guest, no need to patch KVM vectors */ + smccc_start = NULL; + smccc_end = NULL; + break; + default: return -1; - cb = call_hvc_arch_workaround_1; - /* This is a guest, no need to patch KVM vectors */ - smccc_start = NULL; - smccc_end = NULL; + } break; case PSCI_CONDUIT_SMC: arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_ARCH_WORKAROUND_1, &res); - if ((int)res.a0 < 0) + switch ((int)res.a0) { + case 1: + /* Firmware says we're just fine */ + return 0; + case 0: + cb = call_smc_arch_workaround_1; + smccc_start = __smccc_workaround_1_smc_start; + smccc_end = __smccc_workaround_1_smc_end; + break; + default: return -1; - cb = call_smc_arch_workaround_1; - smccc_start = __smccc_workaround_1_smc_start; - smccc_end = __smccc_workaround_1_smc_end; + } break; default: -- cgit v1.2.3 From 8c1e3d2bb44cbb998cb28ff9a18f105fee7f1eb3 Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Mon, 15 Apr 2019 16:21:25 -0500 Subject: arm64: Always enable spectre-v2 vulnerability detection Ensure we are always able to detect whether or not the CPU is affected by Spectre-v2, so that we can later advertise this to userspace. Signed-off-by: Jeremy Linton Reviewed-by: Andre Przywara Reviewed-by: Catalin Marinas Tested-by: Stefan Wahren Signed-off-by: Will Deacon --- arch/arm64/kernel/cpu_errata.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 60cf87c4deb7..a9c3ad4f7948 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -109,7 +109,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused) atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR #include #include @@ -270,11 +269,11 @@ static int detect_harden_bp_fw(void) ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) cb = qcom_link_stack_sanitization; - install_bp_hardening_cb(cb, smccc_start, smccc_end); + if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) + install_bp_hardening_cb(cb, smccc_start, smccc_end); return 1; } -#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ #ifdef CONFIG_ARM64_SSBD DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); @@ -513,7 +512,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ CAP_MIDR_RANGE_LIST(midr_list) -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR /* * List of CPUs that do not need any Spectre-v2 mitigation at all. */ @@ -545,6 +543,12 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) if (!need_wa) return false; + if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) { + pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n"); + __hardenbp_enab = false; + return false; + } + /* forced off */ if (__nospectre_v2) { pr_info_once("spectrev2 mitigation disabled by command line option\n"); @@ -556,7 +560,6 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) return (need_wa > 0); } -#endif #ifdef CONFIG_HARDEN_EL2_VECTORS @@ -731,13 +734,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), }, #endif -#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .matches = check_branch_predictor, }, -#endif #ifdef CONFIG_HARDEN_EL2_VECTORS { .desc = "EL2 vector hardening", -- cgit v1.2.3 From d2532e27b5638bb2e2dd52b80b7ea2ec65135377 Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Mon, 15 Apr 2019 16:21:26 -0500 Subject: arm64: add sysfs vulnerability show for spectre-v2 Track whether all the cores in the machine are vulnerable to Spectre-v2, and whether all the vulnerable cores have been mitigated. We then expose this information to userspace via sysfs. Signed-off-by: Jeremy Linton Reviewed-by: Andre Przywara Reviewed-by: Catalin Marinas Tested-by: Stefan Wahren Signed-off-by: Will Deacon --- arch/arm64/kernel/cpu_errata.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index a9c3ad4f7948..d2bbafa04b3c 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -512,6 +512,10 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ CAP_MIDR_RANGE_LIST(midr_list) +/* Track overall mitigation state. We are only mitigated if all cores are ok */ +static bool __hardenbp_enab = true; +static bool __spectrev2_safe = true; + /* * List of CPUs that do not need any Spectre-v2 mitigation at all. */ @@ -522,6 +526,10 @@ static const struct midr_range spectre_v2_safe_list[] = { { /* sentinel */ } }; +/* + * Track overall bp hardening for all heterogeneous cores in the machine. + * We are only considered "safe" if all booted cores are known safe. + */ static bool __maybe_unused check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) { @@ -543,6 +551,8 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) if (!need_wa) return false; + __spectrev2_safe = false; + if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) { pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n"); __hardenbp_enab = false; @@ -552,11 +562,14 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) /* forced off */ if (__nospectre_v2) { pr_info_once("spectrev2 mitigation disabled by command line option\n"); + __hardenbp_enab = false; return false; } - if (need_wa < 0) + if (need_wa < 0) { pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n"); + __hardenbp_enab = false; + } return (need_wa > 0); } @@ -779,3 +792,15 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, { return sprintf(buf, "Mitigation: __user pointer sanitization\n"); } + +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, + char *buf) +{ + if (__spectrev2_safe) + return sprintf(buf, "Not affected\n"); + + if (__hardenbp_enab) + return sprintf(buf, "Mitigation: Branch predictor hardening\n"); + + return sprintf(buf, "Vulnerable\n"); +} -- cgit v1.2.3 From d42281b6e49510f078ace15a8ea10f71e6262581 Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Mon, 15 Apr 2019 16:21:27 -0500 Subject: arm64: Always enable ssb vulnerability detection Ensure we are always able to detect whether or not the CPU is affected by SSB, so that we can later advertise this to userspace. Signed-off-by: Jeremy Linton Reviewed-by: Andre Przywara Reviewed-by: Catalin Marinas Tested-by: Stefan Wahren [will: Use IS_ENABLED instead of #ifdef] Signed-off-by: Will Deacon --- arch/arm64/include/asm/cpufeature.h | 4 ---- arch/arm64/kernel/cpu_errata.c | 9 +++++---- 2 files changed, 5 insertions(+), 8 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index e505e1fbd2b9..6ccdc97e5d6a 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -638,11 +638,7 @@ static inline int arm64_get_ssbd_state(void) #endif } -#ifdef CONFIG_ARM64_SSBD void arm64_set_ssbd_mitigation(bool state); -#else -static inline void arm64_set_ssbd_mitigation(bool state) {} -#endif extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index d2bbafa04b3c..b6132783e8cf 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -275,7 +275,6 @@ static int detect_harden_bp_fw(void) return 1; } -#ifdef CONFIG_ARM64_SSBD DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; @@ -348,6 +347,11 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt, void arm64_set_ssbd_mitigation(bool state) { + if (!IS_ENABLED(CONFIG_ARM64_SSBD)) { + pr_info_once("SSBD disabled by kernel configuration\n"); + return; + } + if (this_cpu_has_cap(ARM64_SSBS)) { if (state) asm volatile(SET_PSTATE_SSBS(0)); @@ -467,7 +471,6 @@ out_printmsg: return required; } -#endif /* CONFIG_ARM64_SSBD */ static void __maybe_unused cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) @@ -759,14 +762,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors), }, #endif -#ifdef CONFIG_ARM64_SSBD { .desc = "Speculative Store Bypass Disable", .capability = ARM64_SSBD, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .matches = has_ssbd_mitigation, }, -#endif #ifdef CONFIG_ARM64_ERRATUM_1188873 { /* Cortex-A76 r0p0 to r2p0 */ -- cgit v1.2.3 From 526e065dbca6df0b5a130b84b836b8b3c9f54e21 Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Mon, 15 Apr 2019 16:21:28 -0500 Subject: arm64: add sysfs vulnerability show for speculative store bypass Return status based on ssbd_state and __ssb_safe. If the mitigation is disabled, or the firmware isn't responding then return the expected machine state based on a whitelist of known good cores. Given a heterogeneous machine, the overall machine vulnerability defaults to safe but is reset to unsafe when we miss the whitelist and the firmware doesn't explicitly tell us the core is safe. In order to make that work we delay transitioning to vulnerable until we know the firmware isn't responding to avoid a case where we miss the whitelist, but the firmware goes ahead and reports the core is not vulnerable. If all the cores in the machine have SSBS, then __ssb_safe will remain true. Tested-by: Stefan Wahren Signed-off-by: Jeremy Linton Signed-off-by: Will Deacon --- arch/arm64/kernel/cpu_errata.c | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index b6132783e8cf..4bb0f7cad418 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -278,6 +278,7 @@ static int detect_harden_bp_fw(void) DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; +static bool __ssb_safe = true; static const struct ssbd_options { const char *str; @@ -381,6 +382,7 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, struct arm_smccc_res res; bool required = true; s32 val; + bool this_cpu_safe = false; WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); @@ -389,8 +391,14 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, goto out_printmsg; } + /* delay setting __ssb_safe until we get a firmware response */ + if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list)) + this_cpu_safe = true; + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) { ssbd_state = ARM64_SSBD_UNKNOWN; + if (!this_cpu_safe) + __ssb_safe = false; return false; } @@ -407,6 +415,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, default: ssbd_state = ARM64_SSBD_UNKNOWN; + if (!this_cpu_safe) + __ssb_safe = false; return false; } @@ -415,14 +425,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, switch (val) { case SMCCC_RET_NOT_SUPPORTED: ssbd_state = ARM64_SSBD_UNKNOWN; + if (!this_cpu_safe) + __ssb_safe = false; return false; + /* machines with mixed mitigation requirements must not return this */ case SMCCC_RET_NOT_REQUIRED: pr_info_once("%s mitigation not required\n", entry->desc); ssbd_state = ARM64_SSBD_MITIGATED; return false; case SMCCC_RET_SUCCESS: + __ssb_safe = false; required = true; break; @@ -432,6 +446,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, default: WARN_ON(1); + if (!this_cpu_safe) + __ssb_safe = false; return false; } @@ -472,6 +488,14 @@ out_printmsg: return required; } +/* known invulnerable cores */ +static const struct midr_range arm64_ssb_cpus[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + {}, +}; + static void __maybe_unused cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) { @@ -767,6 +791,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .capability = ARM64_SSBD, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .matches = has_ssbd_mitigation, + .midr_range_list = arm64_ssb_cpus, }, #ifdef CONFIG_ARM64_ERRATUM_1188873 { @@ -805,3 +830,20 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, return sprintf(buf, "Vulnerable\n"); } + +ssize_t cpu_show_spec_store_bypass(struct device *dev, + struct device_attribute *attr, char *buf) +{ + if (__ssb_safe) + return sprintf(buf, "Not affected\n"); + + switch (ssbd_state) { + case ARM64_SSBD_KERNEL: + case ARM64_SSBD_FORCE_ENABLE: + if (IS_ENABLED(CONFIG_ARM64_SSBD)) + return sprintf(buf, + "Mitigation: Speculative Store Bypass disabled via prctl\n"); + } + + return sprintf(buf, "Vulnerable\n"); +} -- cgit v1.2.3 From eb337cdfcd5dd3b10522c2f34140a73a4c285c30 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 30 Apr 2019 16:58:56 +0100 Subject: arm64: ssbs: Don't treat CPUs with SSBS as unaffected by SSB SSBS provides a relatively cheap mitigation for SSB, but it is still a mitigation and its presence does not indicate that the CPU is unaffected by the vulnerability. Tweak the mitigation logic so that we report the correct string in sysfs. Signed-off-by: Will Deacon --- arch/arm64/kernel/cpu_errata.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 4bb0f7cad418..44ef98be001e 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -386,15 +386,17 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + /* delay setting __ssb_safe until we get a firmware response */ + if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list)) + this_cpu_safe = true; + if (this_cpu_has_cap(ARM64_SSBS)) { + if (!this_cpu_safe) + __ssb_safe = false; required = false; goto out_printmsg; } - /* delay setting __ssb_safe until we get a firmware response */ - if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list)) - this_cpu_safe = true; - if (psci_ops.smccc_version == SMCCC_VERSION_1_0) { ssbd_state = ARM64_SSBD_UNKNOWN; if (!this_cpu_safe) -- cgit v1.2.3 From a111b7c0f20e13b54df2fa959b3dc0bdf1925ae6 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Fri, 12 Apr 2019 15:39:32 -0500 Subject: arm64/speculation: Support 'mitigations=' cmdline option Configure arm64 runtime CPU speculation bug mitigations in accordance with the 'mitigations=' cmdline option. This affects Meltdown, Spectre v2, and Speculative Store Bypass. The default behavior is unchanged. Signed-off-by: Josh Poimboeuf [will: reorder checks so KASLR implies KPTI and SSBS is affected by cmdline] Signed-off-by: Will Deacon --- Documentation/admin-guide/kernel-parameters.txt | 8 +++++--- arch/arm64/kernel/cpu_errata.c | 6 +++++- arch/arm64/kernel/cpufeature.c | 8 +++++++- 3 files changed, 17 insertions(+), 5 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 6a929258faf7..ce226f7ee566 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2545,8 +2545,8 @@ http://repo.or.cz/w/linux-2.6/mini2440.git mitigations= - [X86,PPC,S390] Control optional mitigations for CPU - vulnerabilities. This is a set of curated, + [X86,PPC,S390,ARM64] Control optional mitigations for + CPU vulnerabilities. This is a set of curated, arch-independent options, each of which is an aggregation of existing arch-specific options. @@ -2555,11 +2555,13 @@ improves system performance, but it may also expose users to several CPU vulnerabilities. Equivalent to: nopti [X86,PPC] + kpti=0 [ARM64] nospectre_v1 [PPC] nobp=0 [S390] - nospectre_v2 [X86,PPC,S390] + nospectre_v2 [X86,PPC,S390,ARM64] spectre_v2_user=off [X86] spec_store_bypass_disable=off [X86,PPC] + ssbd=force-off [ARM64] l1tf=off [X86] auto (default) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 44ef98be001e..1b9ce0fdd81d 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -386,6 +387,9 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + if (cpu_mitigations_off()) + ssbd_state = ARM64_SSBD_FORCE_DISABLE; + /* delay setting __ssb_safe until we get a firmware response */ if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list)) this_cpu_safe = true; @@ -589,7 +593,7 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) } /* forced off */ - if (__nospectre_v2) { + if (__nospectre_v2 || cpu_mitigations_off()) { pr_info_once("spectrev2 mitigation disabled by command line option\n"); __hardenbp_enab = false; return false; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 703ee8564fbd..f3b32d88f165 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -966,7 +967,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), { /* sentinel */ } }; - char const *str = "command line option"; + char const *str = "kpti command line option"; bool meltdown_safe; meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list); @@ -996,6 +997,11 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, } } + if (cpu_mitigations_off() && !__kpti_forced) { + str = "mitigations=off"; + __kpti_forced = -1; + } + if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) { pr_info_once("kernel page table isolation disabled by kernel configuration\n"); return false; -- cgit v1.2.3