summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorVitaly Kuznetsov <vkuznets@redhat.com>2020-06-10 19:55:31 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2020-06-11 18:35:19 +0200
commit7863e346e1089b40cac1c7d9098314c405e2e1e3 (patch)
treefa4c2c9d3b1d63d7d0ec19e60e02af9dcd7f477d /virt
parentkvm: i8254: remove redundant assignment to pointer s (diff)
downloadlinux-7863e346e1089b40cac1c7d9098314c405e2e1e3.tar.xz
linux-7863e346e1089b40cac1c7d9098314c405e2e1e3.zip
KVM: async_pf: Cleanup kvm_setup_async_pf()
schedule_work() returns 'false' only when the work is already on the queue and this can't happen as kvm_setup_async_pf() always allocates a new one. Also, to avoid potential race, it makes sense to to schedule_work() at the very end after we've added it to the queue. While on it, do some minor cleanup. gfn_to_pfn_async() mentioned in a comment does not currently exist and, moreover, we can check kvm_is_error_hva() at the very beginning, before we try to allocate work so 'retry_sync' label can go away completely. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Message-Id: <20200610175532.779793-1-vkuznets@redhat.com> Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/async_pf.c19
1 files changed, 6 insertions, 13 deletions
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index f1e07fae84e9..ba080088da76 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -164,7 +164,9 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
return 0;
- /* setup delayed work */
+ /* Arch specific code should not do async PF in this case */
+ if (unlikely(kvm_is_error_hva(hva)))
+ return 0;
/*
* do alloc nowait since if we are going to sleep anyway we
@@ -183,24 +185,15 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
mmget(work->mm);
kvm_get_kvm(work->vcpu->kvm);
- /* this can't really happen otherwise gfn_to_pfn_async
- would succeed */
- if (unlikely(kvm_is_error_hva(work->addr)))
- goto retry_sync;
-
INIT_WORK(&work->work, async_pf_execute);
- if (!schedule_work(&work->work))
- goto retry_sync;
list_add_tail(&work->queue, &vcpu->async_pf.queue);
vcpu->async_pf.queued++;
kvm_arch_async_page_not_present(vcpu, work);
+
+ schedule_work(&work->work);
+
return 1;
-retry_sync:
- kvm_put_kvm(work->vcpu->kvm);
- mmput(work->mm);
- kmem_cache_free(async_pf_cache, work);
- return 0;
}
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)