summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2020-05-18 19:08:37 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2020-06-01 10:26:03 +0200
commitc513f484c5582a8efadf3d72298e2285b041536e (patch)
treeb8450accf8e427bb6e7ceaa9c4711bae0bc2d6c9 /arch/x86/kvm/svm
parentKVM: nSVM: split nested_vmcb_check_controls (diff)
downloadlinux-c513f484c5582a8efadf3d72298e2285b041536e.tar.xz
linux-c513f484c5582a8efadf3d72298e2285b041536e.zip
KVM: nSVM: leave guest mode when clearing EFER.SVME
According to the AMD manual, the effect of turning off EFER.SVME while a guest is running is undefined. We make it leave guest mode immediately, similar to the effect of clearing the VMX bit in MSR_IA32_FEAT_CTL. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm')
-rw-r--r--arch/x86/kvm/svm/nested.c16
-rw-r--r--arch/x86/kvm/svm/svm.c10
-rw-r--r--arch/x86/kvm/svm/svm.h1
3 files changed, 25 insertions, 2 deletions
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index bd3a89cd4070..369eca73fe3e 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -618,6 +618,22 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
return 0;
}
+/*
+ * Forcibly leave nested mode in order to be able to reset the VCPU later on.
+ */
+void svm_leave_nested(struct vcpu_svm *svm)
+{
+ if (is_guest_mode(&svm->vcpu)) {
+ struct vmcb *hsave = svm->nested.hsave;
+ struct vmcb *vmcb = svm->vmcb;
+
+ svm->nested.nested_run_pending = 0;
+ leave_guest_mode(&svm->vcpu);
+ copy_vmcb_control_area(&vmcb->control, &hsave->control);
+ nested_svm_uninit_mmu_context(&svm->vcpu);
+ }
+}
+
static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
{
u32 offset, msr, value;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index bc08221f6743..b4db9a980469 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -265,6 +265,7 @@ static int get_npt_level(struct kvm_vcpu *vcpu)
void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
vcpu->arch.efer = efer;
if (!npt_enabled) {
@@ -275,8 +276,13 @@ void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
efer &= ~EFER_LME;
}
- to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
- mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
+ if (!(efer & EFER_SVME)) {
+ svm_leave_nested(svm);
+ svm_set_gif(svm, true);
+ }
+
+ svm->vmcb->save.efer = efer | EFER_SVME;
+ mark_dirty(svm->vmcb, VMCB_CR);
}
static int is_external_interrupt(u32 info)
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index be8e830f83fa..6ac4c00a5d82 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -389,6 +389,7 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
struct vmcb *nested_vmcb);
+void svm_leave_nested(struct vcpu_svm *svm);
int nested_svm_vmrun(struct vcpu_svm *svm);
void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
int nested_svm_vmexit(struct vcpu_svm *svm);