diff options
author | Ben Gardon <bgardon@google.com> | 2021-04-02 01:37:32 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2021-04-19 15:06:04 +0200 |
commit | 6103bc074048876794fa6d21fd8989331690ccbd (patch) | |
tree | 3a54f5ec8e134850966f65da895271da5df94455 /arch/x86/kvm/mmu/tdp_mmu.h | |
parent | KVM: x86/mmu: Protect the tdp_mmu_roots list with RCU (diff) | |
download | linux-6103bc074048876794fa6d21fd8989331690ccbd.tar.xz linux-6103bc074048876794fa6d21fd8989331690ccbd.zip |
KVM: x86/mmu: Allow zap gfn range to operate under the mmu read lock
To reduce lock contention and interference with page fault handlers,
allow the TDP MMU function to zap a GFN range to operate under the MMU
read lock.
Signed-off-by: Ben Gardon <bgardon@google.com>
Message-Id: <20210401233736.638171-10-bgardon@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to '')
-rw-r--r-- | arch/x86/kvm/mmu/tdp_mmu.h | 14 |
1 files changed, 9 insertions, 5 deletions
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index 25268c4ba03b..2e1913bbc0ba 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -13,14 +13,18 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm *kvm, return refcount_inc_not_zero(&root->tdp_mmu_root_count); } -void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root); +void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, + bool shared); bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, - gfn_t end, bool can_yield, bool flush); + gfn_t end, bool can_yield, bool flush, + bool shared); static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, - gfn_t start, gfn_t end, bool flush) + gfn_t start, gfn_t end, bool flush, + bool shared) { - return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush); + return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush, + shared); } static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) { @@ -37,7 +41,7 @@ static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) */ lockdep_assert_held_write(&kvm->mmu_lock); return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp), - sp->gfn, end, false, false); + sp->gfn, end, false, false, false); } void kvm_tdp_mmu_zap_all(struct kvm *kvm); |