diff options
author | Oliver Upton <oliver.upton@linux.dev> | 2023-06-15 15:02:49 +0200 |
---|---|---|
committer | Oliver Upton <oliver.upton@linux.dev> | 2023-06-15 15:02:49 +0200 |
commit | b710fe0d30dd0ad0402ab4936a5ab16b87987e70 (patch) | |
tree | 66cd099cf3309b38071e4dfb2313486e2e8ffc59 /arch/arm64/include/asm | |
parent | Merge branch kvm-arm64/ffa-proxy into kvmarm/next (diff) | |
parent | KVM: arm64: Fix hVHE init on CPUs where HCR_EL2.E2H is not RES1 (diff) | |
download | linux-b710fe0d30dd0ad0402ab4936a5ab16b87987e70.tar.xz linux-b710fe0d30dd0ad0402ab4936a5ab16b87987e70.zip |
Merge branch kvm-arm64/hvhe into kvmarm/next
* kvm-arm64/hvhe:
: Support for running split-hypervisor w/VHE, courtesy of Marc Zyngier
:
: From the cover letter:
:
: KVM (on ARMv8.0) and pKVM (on all revisions of the architecture) use
: the split hypervisor model that makes the EL2 code more or less
: standalone. In the later case, we totally ignore the VHE mode and
: stick with the good old v8.0 EL2 setup.
:
: We introduce a new "mode" for KVM called hVHE, in reference to the
: nVHE mode, and indicating that only the hypervisor is using VHE.
KVM: arm64: Fix hVHE init on CPUs where HCR_EL2.E2H is not RES1
arm64: Allow arm64_sw.hvhe on command line
KVM: arm64: Force HCR_E2H in guest context when ARM64_KVM_HVHE is set
KVM: arm64: Program the timer traps with VHE layout in hVHE mode
KVM: arm64: Rework CPTR_EL2 programming for HVHE configuration
KVM: arm64: Adjust EL2 stage-1 leaf AP bits when ARM64_KVM_HVHE is set
KVM: arm64: Disable TTBR1_EL2 when using ARM64_KVM_HVHE
KVM: arm64: Force HCR_EL2.E2H when ARM64_KVM_HVHE is set
KVM: arm64: Key use of VHE instructions in nVHE code off ARM64_KVM_HVHE
KVM: arm64: Remove alternatives from sysreg accessors in VHE hypervisor context
arm64: Use CPACR_EL1 format to set CPTR_EL2 when E2H is set
arm64: Allow EL1 physical timer access when running VHE
arm64: Don't enable VHE for the kernel if OVERRIDE_HVHE is set
arm64: Add KVM_HVHE capability and has_hvhe() predicate
arm64: Turn kaslr_feature_override into a generic SW feature override
arm64: Prevent the use of is_kernel_in_hyp_mode() in hypervisor code
KVM: arm64: Drop is_kernel_in_hyp_mode() from __invalidate_icache_guest_page()
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 5 | ||||
-rw-r--r-- | arch/arm64/include/asm/el2_setup.h | 27 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_arm.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_emulate.h | 33 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_hyp.h | 37 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/virt.h | 12 |
7 files changed, 103 insertions, 18 deletions
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 6bf013fb110d..3d4b547ae312 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -15,6 +15,9 @@ #define MAX_CPU_FEATURES 128 #define cpu_feature(x) KERNEL_HWCAP_ ## x +#define ARM64_SW_FEATURE_OVERRIDE_NOKASLR 0 +#define ARM64_SW_FEATURE_OVERRIDE_HVHE 4 + #ifndef __ASSEMBLY__ #include <linux/bug.h> @@ -925,6 +928,8 @@ extern struct arm64_ftr_override id_aa64smfr0_override; extern struct arm64_ftr_override id_aa64isar1_override; extern struct arm64_ftr_override id_aa64isar2_override; +extern struct arm64_ftr_override arm64_sw_feature_override; + u32 get_kvm_ipa_limit(void); void dump_cpu_features(void); diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 037724b19c5c..5a353f94e9cd 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -34,6 +34,11 @@ */ .macro __init_el2_timers mov x0, #3 // Enable EL1 physical timers + mrs x1, hcr_el2 + and x1, x1, #HCR_E2H + cbz x1, .LnVHE_\@ + lsl x0, x0, #10 +.LnVHE_\@: msr cnthctl_el2, x0 msr cntvoff_el2, xzr // Clear virtual offset .endm @@ -124,8 +129,15 @@ .endm /* Coprocessor traps */ -.macro __init_el2_nvhe_cptr +.macro __init_el2_cptr + mrs x1, hcr_el2 + and x1, x1, #HCR_E2H + cbz x1, .LnVHE_\@ + mov x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN) + b .Lset_cptr_\@ +.LnVHE_\@: mov x0, #0x33ff +.Lset_cptr_\@: msr cptr_el2, x0 // Disable copro. traps to EL2 .endm @@ -191,9 +203,8 @@ __init_el2_gicv3 __init_el2_hstr __init_el2_nvhe_idregs - __init_el2_nvhe_cptr + __init_el2_cptr __init_el2_fgt - __init_el2_nvhe_prepare_eret .endm #ifndef __KVM_NVHE_HYPERVISOR__ @@ -239,7 +250,17 @@ .Linit_sve_\@: /* SVE register access */ mrs x0, cptr_el2 // Disable SVE traps + mrs x1, hcr_el2 + and x1, x1, #HCR_E2H + cbz x1, .Lcptr_nvhe_\@ + + // VHE case + orr x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) + b .Lset_cptr_\@ + +.Lcptr_nvhe_\@: // nVHE case bic x0, x0, #CPTR_EL2_TZ +.Lset_cptr_\@: msr cptr_el2, x0 isb mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index baef29fcbeee..e448f8f7fd7e 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -285,7 +285,6 @@ #define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) #define CPTR_EL2_TZ (1 << 8) #define CPTR_NVHE_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */ -#define CPTR_EL2_DEFAULT CPTR_NVHE_EL2_RES1 #define CPTR_NVHE_EL2_RES0 (GENMASK(63, 32) | \ GENMASK(29, 21) | \ GENMASK(19, 14) | \ @@ -347,8 +346,7 @@ ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \ ECN(BKPT32), ECN(VECTOR32), ECN(BRK64), ECN(ERET) -#define CPACR_EL1_DEFAULT (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |\ - CPACR_EL1_ZEN_EL1EN) +#define CPACR_EL1_TTA (1 << 28) #define kvm_mode_names \ { PSR_MODE_EL0t, "EL0t" }, \ diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index b31b32ecbe2d..cf40d19a72f8 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -74,7 +74,7 @@ static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) { vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; - if (is_kernel_in_hyp_mode()) + if (has_vhe() || has_hvhe()) vcpu->arch.hcr_el2 |= HCR_E2H; if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) { /* route synchronous external abort exceptions to EL2 */ @@ -570,4 +570,35 @@ static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature) return test_bit(feature, vcpu->arch.features); } +static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) +{ + u64 val; + + if (has_vhe()) { + val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN | + CPACR_EL1_ZEN_EL1EN); + } else if (has_hvhe()) { + val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN); + } else { + val = CPTR_NVHE_EL2_RES1; + + if (vcpu_has_sve(vcpu) && + (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)) + val |= CPTR_EL2_TZ; + if (cpus_have_final_cap(ARM64_SME)) + val &= ~CPTR_EL2_TSM; + } + + return val; +} + +static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu) +{ + u64 val = kvm_get_reset_cptr_el2(vcpu); + + if (has_vhe() || has_hvhe()) + write_sysreg(val, cpacr_el1); + else + write_sysreg(val, cptr_el2); +} #endif /* __ARM64_KVM_EMULATE_H__ */ diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index bdd9cf546d95..b7238c72a04c 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -16,12 +16,35 @@ DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); DECLARE_PER_CPU(unsigned long, kvm_hyp_vector); DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); +/* + * Unified accessors for registers that have a different encoding + * between VHE and non-VHE. They must be specified without their "ELx" + * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h. + */ + +#if defined(__KVM_VHE_HYPERVISOR__) + +#define read_sysreg_el0(r) read_sysreg_s(r##_EL02) +#define write_sysreg_el0(v,r) write_sysreg_s(v, r##_EL02) +#define read_sysreg_el1(r) read_sysreg_s(r##_EL12) +#define write_sysreg_el1(v,r) write_sysreg_s(v, r##_EL12) +#define read_sysreg_el2(r) read_sysreg_s(r##_EL1) +#define write_sysreg_el2(v,r) write_sysreg_s(v, r##_EL1) + +#else // !__KVM_VHE_HYPERVISOR__ + +#if defined(__KVM_NVHE_HYPERVISOR__) +#define VHE_ALT_KEY ARM64_KVM_HVHE +#else +#define VHE_ALT_KEY ARM64_HAS_VIRT_HOST_EXTN +#endif + #define read_sysreg_elx(r,nvh,vh) \ ({ \ u64 reg; \ - asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh), \ + asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh), \ __mrs_s("%0", r##vh), \ - ARM64_HAS_VIRT_HOST_EXTN) \ + VHE_ALT_KEY) \ : "=r" (reg)); \ reg; \ }) @@ -31,16 +54,10 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); u64 __val = (u64)(v); \ asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"), \ __msr_s(r##vh, "%x0"), \ - ARM64_HAS_VIRT_HOST_EXTN) \ + VHE_ALT_KEY) \ : : "rZ" (__val)); \ } while (0) -/* - * Unified accessors for registers that have a different encoding - * between VHE and non-VHE. They must be specified without their "ELx" - * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h. - */ - #define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02) #define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02) #define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12) @@ -48,6 +65,8 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); #define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1) #define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1) +#endif // __KVM_VHE_HYPERVISOR__ + /* * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the * static inline can allow the compiler to out-of-line this. KVM always wants diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 20c50e00496d..0e1e1ab17b4d 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -228,7 +228,8 @@ static inline void __invalidate_icache_guest_page(void *va, size_t size) if (icache_is_aliasing()) { /* any kind of VIPT cache */ icache_inval_all_pou(); - } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) { + } else if (read_sysreg(CurrentEL) != CurrentEL_EL1 || + !icache_is_vpipt()) { /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ icache_inval_pou((unsigned long)va, (unsigned long)va + size); } diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 4eb601e7de50..5227db7640c8 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -110,8 +110,10 @@ static inline bool is_hyp_mode_mismatched(void) return __boot_cpu_mode[0] != __boot_cpu_mode[1]; } -static inline bool is_kernel_in_hyp_mode(void) +static __always_inline bool is_kernel_in_hyp_mode(void) { + BUILD_BUG_ON(__is_defined(__KVM_NVHE_HYPERVISOR__) || + __is_defined(__KVM_VHE_HYPERVISOR__)); return read_sysreg(CurrentEL) == CurrentEL_EL2; } @@ -140,6 +142,14 @@ static __always_inline bool is_protected_kvm_enabled(void) return cpus_have_final_cap(ARM64_KVM_PROTECTED_MODE); } +static __always_inline bool has_hvhe(void) +{ + if (is_vhe_hyp_code()) + return false; + + return cpus_have_final_cap(ARM64_KVM_HVHE); +} + static inline bool is_hyp_nvhe(void) { return is_hyp_mode_available() && !is_kernel_in_hyp_mode(); |