diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2020-01-20 16:14:37 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-01-23 09:51:08 +0100 |
commit | 4425f567b0dd2cb5ad2b16ce03a8951d0ccf935d (patch) | |
tree | 4d71ef4f94792745998ee66b4c7d8df3c4a587d0 /virt | |
parent | KVM: x86: list MSR_IA32_UCODE_REV as an emulated MSR (diff) | |
download | linux-4425f567b0dd2cb5ad2b16ce03a8951d0ccf935d.tar.xz linux-4425f567b0dd2cb5ad2b16ce03a8951d0ccf935d.zip |
KVM: async_pf: drop kvm_arch_async_page_present wrappers
The wrappers make it less clear that the position of the call
to kvm_arch_async_page_present depends on the architecture, and
that only one of the two call sites will actually be active.
Remove them.
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/async_pf.c | 21 |
1 files changed, 4 insertions, 17 deletions
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index d8ef708a2ef6..15e5b037f92d 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -17,21 +17,6 @@ #include "async_pf.h" #include <trace/events/kvm.h> -static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu, - struct kvm_async_pf *work) -{ -#ifdef CONFIG_KVM_ASYNC_PF_SYNC - kvm_arch_async_page_present(vcpu, work); -#endif -} -static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu, - struct kvm_async_pf *work) -{ -#ifndef CONFIG_KVM_ASYNC_PF_SYNC - kvm_arch_async_page_present(vcpu, work); -#endif -} - static struct kmem_cache *async_pf_cache; int kvm_async_pf_init(void) @@ -80,7 +65,8 @@ static void async_pf_execute(struct work_struct *work) if (locked) up_read(&mm->mmap_sem); - kvm_async_page_present_sync(vcpu, apf); + if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC)) + kvm_arch_async_page_present(vcpu, apf); spin_lock(&vcpu->async_pf.lock); list_add_tail(&apf->link, &vcpu->async_pf.done); @@ -157,7 +143,8 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) spin_unlock(&vcpu->async_pf.lock); kvm_arch_async_page_ready(vcpu, work); - kvm_async_page_present_async(vcpu, work); + if (!IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC)) + kvm_arch_async_page_present(vcpu, work); list_del(&work->queue); vcpu->async_pf.queued--; |