summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2020-09-16 00:00:31 +0200
committerWill Deacon <will@kernel.org>2020-09-29 17:08:15 +0200
commit688f1e4b6d8f792c44bec3a448664fd9e8949964 (patch)
tree009c75b4e887f954e97244795411b7205d29a6a2
parentKVM: arm64: Simplify install_bp_hardening_cb() (diff)
downloadlinux-688f1e4b6d8f792c44bec3a448664fd9e8949964.tar.xz
linux-688f1e4b6d8f792c44bec3a448664fd9e8949964.zip
arm64: Rename ARM64_HARDEN_BRANCH_PREDICTOR to ARM64_SPECTRE_V2
For better or worse, the world knows about "Spectre" and not about "Branch predictor hardening". Rename ARM64_HARDEN_BRANCH_PREDICTOR to ARM64_SPECTRE_V2 as part of moving all of the Spectre mitigations into their own little corner. Signed-off-by: Will Deacon <will@kernel.org>
-rw-r--r--arch/arm64/include/asm/cpucaps.h2
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h27
-rw-r--r--arch/arm64/include/asm/mmu.h2
-rw-r--r--arch/arm64/kernel/cpu_errata.c2
4 files changed, 16 insertions, 17 deletions
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 07b643a70710..348bfcf6c818 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -31,7 +31,7 @@
#define ARM64_HAS_DCPOP 21
#define ARM64_SVE 22
#define ARM64_UNMAP_KERNEL_AT_EL0 23
-#define ARM64_HARDEN_BRANCH_PREDICTOR 24
+#define ARM64_SPECTRE_V2 24
#define ARM64_HAS_RAS_EXTN 25
#define ARM64_WORKAROUND_843419 26
#define ARM64_HAS_CACHE_IDC 27
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index dfd176b0642e..873e12430ac7 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -435,14 +435,13 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
* EL2 vectors can be mapped and rerouted in a number of ways,
* depending on the kernel configuration and CPU present:
*
- * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
- * hardening sequence is placed in one of the vector slots, which is
- * executed before jumping to the real vectors.
+ * - If the CPU is affected by Spectre-v2, the hardening sequence is
+ * placed in one of the vector slots, which is executed before jumping
+ * to the real vectors.
*
- * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the
- * ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the
- * hardening sequence is mapped next to the idmap page, and executed
- * before jumping to the real vectors.
+ * - If the CPU also has the ARM64_HARDEN_EL2_VECTORS cap, the slot
+ * containing the hardening sequence is mapped next to the idmap page,
+ * and executed before jumping to the real vectors.
*
* - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
* empty slot is selected, mapped next to the idmap page, and
@@ -464,7 +463,7 @@ static inline void *kvm_get_hyp_vector(void)
void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
int slot = -1;
- if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
+ if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) {
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
slot = data->hyp_vectors_slot;
}
@@ -485,15 +484,15 @@ static inline void *kvm_get_hyp_vector(void)
static inline int kvm_map_vectors(void)
{
/*
- * HBP = ARM64_HARDEN_BRANCH_PREDICTOR
+ * SV2 = ARM64_SPECTRE_V2
* HEL2 = ARM64_HARDEN_EL2_VECTORS
*
- * !HBP + !HEL2 -> use direct vectors
- * HBP + !HEL2 -> use hardened vectors in place
- * !HBP + HEL2 -> allocate one vector slot and use exec mapping
- * HBP + HEL2 -> use hardened vertors and use exec mapping
+ * !SV2 + !HEL2 -> use direct vectors
+ * SV2 + !HEL2 -> use hardened vectors in place
+ * !SV2 + HEL2 -> allocate one vector slot and use exec mapping
+ * SV2 + HEL2 -> use hardened vertors and use exec mapping
*/
- if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
+ if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
}
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index f5e3efeb5b97..cbff2d42c1d8 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -56,7 +56,7 @@ static inline void arm64_apply_bp_hardening(void)
{
struct bp_hardening_data *d;
- if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
+ if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
return;
d = arm64_get_bp_hardening_data();
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index a72ca57f5630..b275f2d5e7a3 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -877,7 +877,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
#endif
{
.desc = "Branch predictor hardening",
- .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ .capability = ARM64_SPECTRE_V2,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = check_branch_predictor,
.cpu_enable = cpu_enable_branch_predictor_hardening,