diff options
author | Jing Zhang <jingzhangos@google.com> | 2022-01-18 02:57:02 +0100 |
---|---|---|
committer | Marc Zyngier <maz@kernel.org> | 2022-02-08 15:27:53 +0100 |
commit | f783ef1c0e82e4fc311a972472ff61f6d1d0e22d (patch) | |
tree | e8ac9502306803791cb891285006752d6ca1254d /arch | |
parent | KVM: arm64: Use read/write spin lock for MMU protection (diff) | |
download | linux-f783ef1c0e82e4fc311a972472ff61f6d1d0e22d.tar.xz linux-f783ef1c0e82e4fc311a972472ff61f6d1d0e22d.zip |
KVM: arm64: Add fast path to handle permission relaxation during dirty logging
To reduce MMU lock contention during dirty logging, all permission
relaxation operations would be performed under read lock.
Signed-off-by: Jing Zhang <jingzhangos@google.com>
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220118015703.3630552-3-jingzhangos@google.com
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/kvm/mmu.c | 17 |
1 files changed, 15 insertions, 2 deletions
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index cafd5813c949..10df5d855d54 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1080,6 +1080,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, gfn_t gfn; kvm_pfn_t pfn; bool logging_active = memslot_is_logging(memslot); + bool logging_perm_fault = false; unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu); unsigned long vma_pagesize, fault_granule; enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; @@ -1114,6 +1115,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (logging_active) { force_pte = true; vma_shift = PAGE_SHIFT; + logging_perm_fault = (fault_status == FSC_PERM && write_fault); } else { vma_shift = get_vma_page_shift(vma, hva); } @@ -1212,7 +1214,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (exec_fault && device) return -ENOEXEC; - write_lock(&kvm->mmu_lock); + /* + * To reduce MMU contentions and enhance concurrency during dirty + * logging dirty logging, only acquire read lock for permission + * relaxation. + */ + if (logging_perm_fault) + read_lock(&kvm->mmu_lock); + else + write_lock(&kvm->mmu_lock); pgt = vcpu->arch.hw_mmu->pgt; if (mmu_notifier_retry(kvm, mmu_seq)) goto out_unlock; @@ -1271,7 +1281,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, } out_unlock: - write_unlock(&kvm->mmu_lock); + if (logging_perm_fault) + read_unlock(&kvm->mmu_lock); + else + write_unlock(&kvm->mmu_lock); kvm_set_pfn_accessed(pfn); kvm_release_pfn_clean(pfn); return ret != -EAGAIN ? ret : 0; |