diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2020-05-13 18:57:26 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-06-01 10:24:32 +0200 |
commit | 69c9dfa24bb7bac5c9e2bd4d3f631e35b91e3733 (patch) | |
tree | 8b0a08cbf4bce6840707ddbf717f76d5c716899e /arch/x86/kvm/svm | |
parent | KVM: nVMX: always update CR3 in VMCS (diff) | |
download | linux-69c9dfa24bb7bac5c9e2bd4d3f631e35b91e3733.tar.xz linux-69c9dfa24bb7bac5c9e2bd4d3f631e35b91e3733.zip |
KVM: nSVM: move map argument out of enter_svm_guest_mode
Unmapping the nested VMCB in enter_svm_guest_mode is a bit of a wart,
since the map argument is not used elsewhere in the function. There are
just two callers, and those are also the place where kvm_vcpu_map is
called, so it is cleaner to unmap there.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm')
-rw-r--r-- | arch/x86/kvm/svm/nested.c | 14 | ||||
-rw-r--r-- | arch/x86/kvm/svm/svm.c | 3 | ||||
-rw-r--r-- | arch/x86/kvm/svm/svm.h | 2 |
3 files changed, 9 insertions, 10 deletions
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 8756c9f463fd..8e98d5e420a5 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -229,7 +229,7 @@ static bool nested_vmcb_checks(struct vmcb *vmcb) } void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, - struct vmcb *nested_vmcb, struct kvm_host_map *map) + struct vmcb *nested_vmcb) { bool evaluate_pending_interrupts = is_intercept(svm, INTERCEPT_VINTR) || @@ -304,8 +304,6 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, svm->vmcb->control.pause_filter_thresh = nested_vmcb->control.pause_filter_thresh; - kvm_vcpu_unmap(&svm->vcpu, map, true); - /* Enter Guest-Mode */ enter_guest_mode(&svm->vcpu); @@ -368,10 +366,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm) nested_vmcb->control.exit_code_hi = 0; nested_vmcb->control.exit_info_1 = 0; nested_vmcb->control.exit_info_2 = 0; - - kvm_vcpu_unmap(&svm->vcpu, &map, true); - - return ret; + goto out; } trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa, @@ -414,7 +409,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm) copy_vmcb_control_area(hsave, vmcb); svm->nested.nested_run_pending = 1; - enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map); + enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb); if (!nested_svm_vmrun_msrpm(svm)) { svm->vmcb->control.exit_code = SVM_EXIT_ERR; @@ -425,6 +420,9 @@ int nested_svm_vmrun(struct vcpu_svm *svm) nested_svm_vmexit(svm); } +out: + kvm_vcpu_unmap(&svm->vcpu, &map, true); + return ret; } diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index feb96a410f2d..76b3f553815e 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3814,7 +3814,8 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL) return 1; nested_vmcb = map.hva; - enter_svm_guest_mode(svm, vmcb, nested_vmcb, &map); + enter_svm_guest_mode(svm, vmcb, nested_vmcb); + kvm_vcpu_unmap(&svm->vcpu, &map, true); } return 0; } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 89fab75dd4f5..33e3f09d7a8e 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -395,7 +395,7 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) } void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, - struct vmcb *nested_vmcb, struct kvm_host_map *map); + struct vmcb *nested_vmcb); int nested_svm_vmrun(struct vcpu_svm *svm); void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb); int nested_svm_vmexit(struct vcpu_svm *svm); |