diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2022-02-05 06:58:25 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2022-02-05 06:58:25 +0100 |
commit | 7e6a6b400db8048bd1c06e497e338388413cf5bc (patch) | |
tree | 794f9fcdc7a1bfb9a2812e90fc76809d810203b2 /arch/x86/kvm/vmx | |
parent | KVM: x86: Use ERR_PTR_USR() to return -EFAULT as a __user pointer (diff) | |
parent | KVM: arm64: Workaround Cortex-A510's single-step and PAC trap errata (diff) | |
download | linux-7e6a6b400db8048bd1c06e497e338388413cf5bc.tar.xz linux-7e6a6b400db8048bd1c06e497e338388413cf5bc.zip |
Merge tag 'kvmarm-fixes-5.17-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 fixes for 5.17, take #2
- A couple of fixes when handling an exception while a SError has been
delivered
- Workaround for Cortex-A510's single-step[ erratum
Diffstat (limited to 'arch/x86/kvm/vmx')
-rw-r--r-- | arch/x86/kvm/vmx/vmenter.S | 14 | ||||
-rw-r--r-- | arch/x86/kvm/vmx/vmx.c | 25 | ||||
-rw-r--r-- | arch/x86/kvm/vmx/vmx_ops.h | 16 |
3 files changed, 38 insertions, 17 deletions
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index 3a6461694fc2..435c187927c4 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -49,14 +49,14 @@ SYM_FUNC_START_LOCAL(vmx_vmenter) je 2f 1: vmresume - ret + RET 2: vmlaunch - ret + RET 3: cmpb $0, kvm_rebooting je 4f - ret + RET 4: ud2 _ASM_EXTABLE(1b, 3b) @@ -89,7 +89,7 @@ SYM_FUNC_START(vmx_vmexit) pop %_ASM_AX .Lvmexit_skip_rsb: #endif - ret + RET SYM_FUNC_END(vmx_vmexit) /** @@ -228,7 +228,7 @@ SYM_FUNC_START(__vmx_vcpu_run) pop %edi #endif pop %_ASM_BP - ret + RET /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */ 2: mov $1, %eax @@ -293,7 +293,7 @@ SYM_FUNC_START(vmread_error_trampoline) pop %_ASM_AX pop %_ASM_BP - ret + RET SYM_FUNC_END(vmread_error_trampoline) SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff) @@ -326,5 +326,5 @@ SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff) */ mov %_ASM_BP, %_ASM_SP pop %_ASM_BP - ret + RET SYM_FUNC_END(vmx_do_interrupt_nmi_irqoff) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index c0c256c33d21..6c27bd0c89e1 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6495,7 +6495,9 @@ void vmx_do_interrupt_nmi_irqoff(unsigned long entry); static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu, unsigned long entry) { - kvm_before_interrupt(vcpu); + bool is_nmi = entry == (unsigned long)asm_exc_nmi_noist; + + kvm_before_interrupt(vcpu, is_nmi ? KVM_HANDLING_NMI : KVM_HANDLING_IRQ); vmx_do_interrupt_nmi_irqoff(entry); kvm_after_interrupt(vcpu); } @@ -7839,6 +7841,20 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector, }; +static unsigned int vmx_handle_intel_pt_intr(void) +{ + struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); + + /* '0' on failure so that the !PT case can use a RET0 static call. */ + if (!kvm_arch_pmi_in_guest(vcpu)) + return 0; + + kvm_make_request(KVM_REQ_PMI, vcpu); + __set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT, + (unsigned long *)&vcpu->arch.pmu.global_status); + return 1; +} + static __init void vmx_setup_user_return_msrs(void) { @@ -7865,6 +7881,8 @@ static __init void vmx_setup_user_return_msrs(void) kvm_add_user_return_msr(vmx_uret_msrs_list[i]); } +static struct kvm_x86_init_ops vmx_init_ops __initdata; + static __init int hardware_setup(void) { unsigned long host_bndcfgs; @@ -8015,6 +8033,10 @@ static __init int hardware_setup(void) return -EINVAL; if (!enable_ept || !cpu_has_vmx_intel_pt()) pt_mode = PT_MODE_SYSTEM; + if (pt_mode == PT_MODE_HOST_GUEST) + vmx_init_ops.handle_intel_pt_intr = vmx_handle_intel_pt_intr; + else + vmx_init_ops.handle_intel_pt_intr = NULL; setup_default_sgx_lepubkeyhash(); @@ -8043,6 +8065,7 @@ static struct kvm_x86_init_ops vmx_init_ops __initdata = { .disabled_by_bios = vmx_disabled_by_bios, .check_processor_compatibility = vmx_check_processor_compat, .hardware_setup = hardware_setup, + .handle_intel_pt_intr = NULL, .runtime_ops = &vmx_x86_ops, }; diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h index 67f745250e50..5e7f41225780 100644 --- a/arch/x86/kvm/vmx/vmx_ops.h +++ b/arch/x86/kvm/vmx/vmx_ops.h @@ -105,9 +105,11 @@ do_exception: * @field, and bounce through the trampoline to preserve * volatile registers. */ - "push $0\n\t" + "xorl %k1, %k1\n\t" + "2:\n\t" + "push %1\n\t" "push %2\n\t" - "2:call vmread_error_trampoline\n\t" + "call vmread_error_trampoline\n\t" /* * Unwind the stack. Note, the trampoline zeros out the @@ -118,13 +120,9 @@ do_exception: "3:\n\t" /* VMREAD faulted. As above, except push '1' for @fault. */ - ".pushsection .fixup, \"ax\"\n\t" - "4: push $1\n\t" - "push %2\n\t" - "jmp 2b\n\t" - ".popsection\n\t" - _ASM_EXTABLE(1b, 4b) - : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc"); + _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %1) + + : ASM_CALL_CONSTRAINT, "=&r"(value) : "r"(field) : "cc"); return value; #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */ |