diff options
author | Oliver Upton <oliver.upton@linux.dev> | 2023-10-30 21:21:19 +0100 |
---|---|---|
committer | Oliver Upton <oliver.upton@linux.dev> | 2023-10-30 21:21:19 +0100 |
commit | 53ce49ea75602b51a1feb3844d535ced42b2d8c2 (patch) | |
tree | e454a1245943f9d7927a786cb343f946ab03525a /arch/arm64 | |
parent | Merge branch kvm-arm64/writable-id-regs into kvmarm/next (diff) | |
parent | KVM: arm64: Expose MOPS instructions to guests (diff) | |
download | linux-53ce49ea75602b51a1feb3844d535ced42b2d8c2.tar.xz linux-53ce49ea75602b51a1feb3844d535ced42b2d8c2.zip |
Merge branch kvm-arm64/mops into kvmarm/next
* kvm-arm64/mops:
: KVM support for MOPS, courtesy of Kristina Martsenko
:
: MOPS adds new instructions for accelerating memcpy(), memset(), and
: memmove() operations in hardware. This series brings virtualization
: support for KVM guests, and allows VMs to run on asymmetrict systems
: that may have different MOPS implementations.
KVM: arm64: Expose MOPS instructions to guests
KVM: arm64: Add handler for MOPS exceptions
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/kvm_arm.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/traps.h | 54 | ||||
-rw-r--r-- | arch/arm64/kernel/traps.c | 48 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/include/hyp/switch.h | 17 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 3 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/switch.c | 2 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/vhe/switch.c | 1 | ||||
-rw-r--r-- | arch/arm64/kvm/sys_regs.c | 2 |
8 files changed, 78 insertions, 53 deletions
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 5882b2415596..2186927b1d21 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -102,7 +102,9 @@ #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) -#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En) +#define HCRX_GUEST_FLAGS \ + (HCRX_EL2_SMPME | HCRX_EL2_TCR2En | \ + (cpus_have_final_cap(ARM64_HAS_MOPS) ? (HCRX_EL2_MSCEn | HCRX_EL2_MCE2) : 0)) #define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En) /* TCR_EL2 Registers bits */ diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index d66dfb3a72dd..eefe766d6161 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -9,10 +9,9 @@ #include <linux/list.h> #include <asm/esr.h> +#include <asm/ptrace.h> #include <asm/sections.h> -struct pt_regs; - #ifdef CONFIG_ARMV8_DEPRECATED bool try_emulate_armv8_deprecated(struct pt_regs *regs, u32 insn); #else @@ -101,4 +100,55 @@ static inline unsigned long arm64_ras_serror_get_severity(unsigned long esr) bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr); void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr); + +static inline void arm64_mops_reset_regs(struct user_pt_regs *regs, unsigned long esr) +{ + bool wrong_option = esr & ESR_ELx_MOPS_ISS_WRONG_OPTION; + bool option_a = esr & ESR_ELx_MOPS_ISS_OPTION_A; + int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr); + int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr); + int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr); + unsigned long dst, src, size; + + dst = regs->regs[dstreg]; + src = regs->regs[srcreg]; + size = regs->regs[sizereg]; + + /* + * Put the registers back in the original format suitable for a + * prologue instruction, using the generic return routine from the + * Arm ARM (DDI 0487I.a) rules CNTMJ and MWFQH. + */ + if (esr & ESR_ELx_MOPS_ISS_MEM_INST) { + /* SET* instruction */ + if (option_a ^ wrong_option) { + /* Format is from Option A; forward set */ + regs->regs[dstreg] = dst + size; + regs->regs[sizereg] = -size; + } + } else { + /* CPY* instruction */ + if (!(option_a ^ wrong_option)) { + /* Format is from Option B */ + if (regs->pstate & PSR_N_BIT) { + /* Backward copy */ + regs->regs[dstreg] = dst - size; + regs->regs[srcreg] = src - size; + } + } else { + /* Format is from Option A */ + if (size & BIT(63)) { + /* Forward copy */ + regs->regs[dstreg] = dst + size; + regs->regs[srcreg] = src + size; + regs->regs[sizereg] = -size; + } + } + } + + if (esr & ESR_ELx_MOPS_ISS_FROM_EPILOGUE) + regs->pc -= 8; + else + regs->pc -= 4; +} #endif diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 8b70759cdbb9..ede65a20e7dc 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -516,53 +516,7 @@ void do_el1_fpac(struct pt_regs *regs, unsigned long esr) void do_el0_mops(struct pt_regs *regs, unsigned long esr) { - bool wrong_option = esr & ESR_ELx_MOPS_ISS_WRONG_OPTION; - bool option_a = esr & ESR_ELx_MOPS_ISS_OPTION_A; - int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr); - int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr); - int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr); - unsigned long dst, src, size; - - dst = pt_regs_read_reg(regs, dstreg); - src = pt_regs_read_reg(regs, srcreg); - size = pt_regs_read_reg(regs, sizereg); - - /* - * Put the registers back in the original format suitable for a - * prologue instruction, using the generic return routine from the - * Arm ARM (DDI 0487I.a) rules CNTMJ and MWFQH. - */ - if (esr & ESR_ELx_MOPS_ISS_MEM_INST) { - /* SET* instruction */ - if (option_a ^ wrong_option) { - /* Format is from Option A; forward set */ - pt_regs_write_reg(regs, dstreg, dst + size); - pt_regs_write_reg(regs, sizereg, -size); - } - } else { - /* CPY* instruction */ - if (!(option_a ^ wrong_option)) { - /* Format is from Option B */ - if (regs->pstate & PSR_N_BIT) { - /* Backward copy */ - pt_regs_write_reg(regs, dstreg, dst - size); - pt_regs_write_reg(regs, srcreg, src - size); - } - } else { - /* Format is from Option A */ - if (size & BIT(63)) { - /* Forward copy */ - pt_regs_write_reg(regs, dstreg, dst + size); - pt_regs_write_reg(regs, srcreg, src + size); - pt_regs_write_reg(regs, sizereg, -size); - } - } - } - - if (esr & ESR_ELx_MOPS_ISS_FROM_EPILOGUE) - regs->pc -= 8; - else - regs->pc -= 4; + arm64_mops_reset_regs(®s->user_regs, esr); /* * If single stepping then finish the step before executing the diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 9cfe6bd1dbe4..f99d8af0b9af 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -30,6 +30,7 @@ #include <asm/fpsimd.h> #include <asm/debug-monitors.h> #include <asm/processor.h> +#include <asm/traps.h> struct kvm_exception_table_entry { int insn, fixup; @@ -265,6 +266,22 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault); } +static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code) +{ + *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); + arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2); + write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); + + /* + * Finish potential single step before executing the prologue + * instruction. + */ + *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; + write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR); + + return true; +} + static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) { sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 37440e1dda93..e91922daa8ca 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -197,7 +197,8 @@ #define PVM_ID_AA64ISAR2_ALLOW (\ ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) \ + ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS) \ ) u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id); diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index c353a06ee7e6..c50f8459e4fc 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -192,6 +192,7 @@ static const exit_handler_fn hyp_exit_handlers[] = { [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, + [ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops, }; static const exit_handler_fn pvm_exit_handlers[] = { @@ -203,6 +204,7 @@ static const exit_handler_fn pvm_exit_handlers[] = { [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, + [ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops, }; static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index b0cafd7c5f8f..c8e70e8ceeeb 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -139,6 +139,7 @@ static const exit_handler_fn hyp_exit_handlers[] = { [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, + [ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops, }; static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2c4923fce573..d541ce9f48b9 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1348,7 +1348,6 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3)); if (!cpus_have_final_cap(ARM64_HAS_WFXT)) val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT); - val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS); break; case SYS_ID_AA64MMFR2_EL1: val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK; @@ -2099,7 +2098,6 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_AA64ISAR1_EL1_API | ID_AA64ISAR1_EL1_APA)), ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 | - ID_AA64ISAR2_EL1_MOPS | ID_AA64ISAR2_EL1_APA3 | ID_AA64ISAR2_EL1_GPA3)), ID_UNALLOCATED(6,3), |