summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx/vmx.c
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2024-10-10 20:23:52 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2024-10-25 19:00:48 +0200
commitcb444acb697943c0aaeab085c43e07727cb0b85c (patch)
tree2a4c546dced04a6f0cf2d624955fc2c664411a9c /arch/x86/kvm/vmx/vmx.c
parentKVM: Move x86's API to release a faultin page to common KVM (diff)
downloadlinux-cb444acb697943c0aaeab085c43e07727cb0b85c.tar.xz
linux-cb444acb697943c0aaeab085c43e07727cb0b85c.zip
KVM: VMX: Hold mmu_lock until page is released when updating APIC access page
Hold mmu_lock across kvm_release_pfn_clean() when refreshing the APIC access page address to ensure that KVM doesn't mark a page/folio as accessed after it has been unmapped. Practically speaking marking a folio accesses is benign in this scenario, as KVM does hold a reference (it's really just marking folios dirty that is problematic), but there's no reason not to be paranoid (moving the APIC access page isn't a hot path), and no reason to be different from other mmu_notifier-protected flows in KVM. Tested-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Sean Christopherson <seanjc@google.com> Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-ID: <20241010182427.1434605-51-seanjc@google.com>
Diffstat (limited to '')
-rw-r--r--arch/x86/kvm/vmx/vmx.c21
1 files changed, 9 insertions, 12 deletions
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 81ed596e4454..bee90089c8a7 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6832,25 +6832,22 @@ void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
return;
read_lock(&vcpu->kvm->mmu_lock);
- if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn)) {
+ if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn))
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
- read_unlock(&vcpu->kvm->mmu_lock);
- goto out;
- }
-
- vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(pfn));
- read_unlock(&vcpu->kvm->mmu_lock);
+ else
+ vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(pfn));
/*
- * No need for a manual TLB flush at this point, KVM has already done a
- * flush if there were SPTEs pointing at the previous page.
- */
-out:
- /*
* Do not pin apic access page in memory, the MMU notifier
* will call us again if it is migrated or swapped out.
*/
kvm_release_pfn_clean(pfn);
+
+ /*
+ * No need for a manual TLB flush at this point, KVM has already done a
+ * flush if there were SPTEs pointing at the previous page.
+ */
+ read_unlock(&vcpu->kvm->mmu_lock);
}
void vmx_hwapic_isr_update(int max_isr)