summaryrefslogtreecommitdiffstats
path: root/virt/kvm/eventfd.c
diff options
context:
space:
mode:
authorBhaktipriya Shridhar <bhaktipriya96@gmail.com>2016-08-30 19:59:51 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2016-09-07 19:34:28 +0200
commit3706feacd007c89f49bdb20c5c1dd17c8badfc43 (patch)
treea719e04c4f1871e8271eb3d2091cef727a3288db /virt/kvm/eventfd.c
parentKVM: nVMX: make emulated nested preemption timer pinned (diff)
downloadlinux-3706feacd007c89f49bdb20c5c1dd17c8badfc43.tar.xz
linux-3706feacd007c89f49bdb20c5c1dd17c8badfc43.zip
KVM: Remove deprecated create_singlethread_workqueue
The workqueue "irqfd_cleanup_wq" queues a single work item &irqfd->shutdown and hence doesn't require ordering. It is a host-wide workqueue for issuing deferred shutdown requests aggregated from all vm* instances. It is not being used on a memory reclaim path. Hence, it has been converted to use system_wq. The work item has been flushed in kvm_irqfd_release(). The workqueue "wqueue" queues a single work item &timer->expired and hence doesn't require ordering. Also, it is not being used on a memory reclaim path. Hence, it has been converted to use system_wq. System workqueues have been able to handle high level of concurrency for a long time now and hence it's not required to have a singlethreaded workqueue just to gain concurrency. Unlike a dedicated per-cpu workqueue created with create_singlethread_workqueue(), system_wq allows multiple work items to overlap executions even on the same CPU; however, a per-cpu workqueue doesn't have any CPU locality or global ordering guarantee unless the target CPU is explicitly specified and thus the increase of local concurrency shouldn't make any difference. Signed-off-by: Bhaktipriya Shridhar <bhaktipriya96@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt/kvm/eventfd.c')
-rw-r--r--virt/kvm/eventfd.c22
1 files changed, 3 insertions, 19 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index e469b6012471..f397e9b20370 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -42,7 +42,6 @@
#ifdef CONFIG_HAVE_KVM_IRQFD
-static struct workqueue_struct *irqfd_cleanup_wq;
static void
irqfd_inject(struct work_struct *work)
@@ -168,7 +167,7 @@ irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
list_del_init(&irqfd->list);
- queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
+ schedule_work(&irqfd->shutdown);
}
int __attribute__((weak)) kvm_arch_set_irq_inatomic(
@@ -555,7 +554,7 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
* so that we guarantee there will not be any more interrupts on this
* gsi once this deassign function returns.
*/
- flush_workqueue(irqfd_cleanup_wq);
+ flush_work(&irqfd->shutdown);
return 0;
}
@@ -592,7 +591,7 @@ kvm_irqfd_release(struct kvm *kvm)
* Block until we know all outstanding shutdown jobs have completed
* since we do not take a kvm* reference.
*/
- flush_workqueue(irqfd_cleanup_wq);
+ flush_work(&irqfd->shutdown);
}
@@ -622,23 +621,8 @@ void kvm_irq_routing_update(struct kvm *kvm)
spin_unlock_irq(&kvm->irqfds.lock);
}
-/*
- * create a host-wide workqueue for issuing deferred shutdown requests
- * aggregated from all vm* instances. We need our own isolated single-thread
- * queue to prevent deadlock against flushing the normal work-queue.
- */
-int kvm_irqfd_init(void)
-{
- irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
- if (!irqfd_cleanup_wq)
- return -ENOMEM;
-
- return 0;
-}
-
void kvm_irqfd_exit(void)
{
- destroy_workqueue(irqfd_cleanup_wq);
}
#endif