From 9ef2b48be9bba70ded9372fc81e678e707306060 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 28 Sep 2020 11:45:24 +0100 Subject: KVM: arm64: Allow patching EL2 vectors even with KASLR is not enabled Patching the EL2 exception vectors is integral to the Spectre-v2 workaround, where it can be necessary to execute CPU-specific sequences to nobble the branch predictor before running the hypervisor text proper. Remove the dependency on CONFIG_RANDOMIZE_BASE and allow the EL2 vectors to be patched even when KASLR is not enabled. Fixes: 7a132017e7a5 ("KVM: arm64: Replace CONFIG_KVM_INDIRECT_VECTORS with CONFIG_RANDOMIZE_BASE") Reported-by: kernel test robot Link: https://lore.kernel.org/r/202009221053.Jv1XsQUZ%lkp@intel.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/kvm_mmu.h | 51 +--------------------------------------- 1 file changed, 1 insertion(+), 50 deletions(-) (limited to 'arch/arm64/include/asm/kvm_mmu.h') diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 36606ef9e435..cff1cebc7590 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -9,6 +9,7 @@ #include #include +#include #include /* @@ -430,7 +431,6 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, return ret; } -#ifdef CONFIG_RANDOMIZE_BASE /* * EL2 vectors can be mapped and rerouted in a number of ways, * depending on the kernel configuration and CPU present: @@ -451,12 +451,9 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, * VHE, as we don't have hypervisor-specific mappings. If the system * is VHE and yet selects this capability, it will be ignored. */ -#include - extern void *__kvm_bp_vect_base; extern int __kvm_harden_el2_vector_slot; -/* This is called on both VHE and !VHE systems */ static inline void *kvm_get_hyp_vector(void) { struct bp_hardening_data *data = arm64_get_bp_hardening_data(); @@ -480,52 +477,6 @@ static inline void *kvm_get_hyp_vector(void) return vect; } -/* This is only called on a !VHE system */ -static inline int kvm_map_vectors(void) -{ - /* - * SV2 = ARM64_SPECTRE_V2 - * HEL2 = ARM64_HARDEN_EL2_VECTORS - * - * !SV2 + !HEL2 -> use direct vectors - * SV2 + !HEL2 -> use hardened vectors in place - * !SV2 + HEL2 -> allocate one vector slot and use exec mapping - * SV2 + HEL2 -> use hardened vertors and use exec mapping - */ - if (cpus_have_const_cap(ARM64_SPECTRE_V2)) { - __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs); - __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base); - } - - if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) { - phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs); - unsigned long size = __BP_HARDEN_HYP_VECS_SZ; - - /* - * Always allocate a spare vector slot, as we don't - * know yet which CPUs have a BP hardening slot that - * we can reuse. - */ - __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot); - BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS); - return create_hyp_exec_mappings(vect_pa, size, - &__kvm_bp_vect_base); - } - - return 0; -} -#else -static inline void *kvm_get_hyp_vector(void) -{ - return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector)); -} - -static inline int kvm_map_vectors(void) -{ - return 0; -} -#endif - #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) /* -- cgit v1.2.3