diff options
author | Sean Christopherson <sean.j.christopherson@intel.com> | 2019-02-05 22:01:37 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2019-02-20 22:48:49 +0100 |
commit | 8ab3c471eef20925bf64c6d4fa46e88cdb4e86d5 (patch) | |
tree | 45347f67e38f133ed4caa13824c2f8011049b72c /arch/x86/kvm/mmu.c | |
parent | KVM: x86/mmu: WARN if zapping a MMIO spte results in zapping children (diff) | |
download | linux-8ab3c471eef20925bf64c6d4fa46e88cdb4e86d5.tar.xz linux-8ab3c471eef20925bf64c6d4fa46e88cdb4e86d5.zip |
KVM: x86/mmu: Consolidate kvm_mmu_zap_all() and kvm_mmu_zap_mmio_sptes()
...via a new helper, __kvm_mmu_zap_all(). An alternative to passing a
'bool mmio_only' would be to pass a callback function to filter the
shadow page, i.e. to make __kvm_mmu_zap_all() generic and reusable, but
zapping all shadow pages is a last resort, i.e. making the helper less
extensible is a feature of sorts. And the explicit MMIO parameter makes
it easy to preserve the WARN_ON_ONCE() if a restart is triggered when
zapping MMIO sptes.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 33 |
1 files changed, 10 insertions, 23 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 81618070367b..8d43b7c0f56f 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -5840,7 +5840,7 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm, } EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty); -void kvm_mmu_zap_all(struct kvm *kvm) +static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only) { struct kvm_mmu_page *sp, *node; LIST_HEAD(invalid_list); @@ -5849,30 +5849,12 @@ void kvm_mmu_zap_all(struct kvm *kvm) spin_lock(&kvm->mmu_lock); restart: list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { - if (sp->role.invalid && sp->root_count) + if (mmio_only && !sp->mmio_cached) continue; - if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign) || - cond_resched_lock(&kvm->mmu_lock)) - goto restart; - } - - kvm_mmu_commit_zap_page(kvm, &invalid_list); - spin_unlock(&kvm->mmu_lock); -} - -static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm) -{ - struct kvm_mmu_page *sp, *node; - LIST_HEAD(invalid_list); - int ign; - - spin_lock(&kvm->mmu_lock); -restart: - list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { - if (!sp->mmio_cached) + if (sp->role.invalid && sp->root_count) continue; if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) { - WARN_ON_ONCE(1); + WARN_ON_ONCE(mmio_only); goto restart; } if (cond_resched_lock(&kvm->mmu_lock)) @@ -5883,6 +5865,11 @@ restart: spin_unlock(&kvm->mmu_lock); } +void kvm_mmu_zap_all(struct kvm *kvm) +{ + return __kvm_mmu_zap_all(kvm, false); +} + void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) { WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); @@ -5904,7 +5891,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) */ if (unlikely(gen == 0)) { kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n"); - kvm_mmu_zap_mmio_sptes(kvm); + __kvm_mmu_zap_all(kvm, true); } } |