diff options
author | Mike Christie <michael.christie@oracle.com> | 2023-06-27 01:22:55 +0200 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2023-07-03 18:15:13 +0200 |
commit | 0921dddcb5898030f0951816ed685a958acfbde2 (patch) | |
tree | 49ea22eb9870f07f50f70aff487d29f0f6d163b4 /drivers/vhost/vhost.c | |
parent | vhost, vhost_net: add helper to check if vq has work (diff) | |
download | linux-0921dddcb5898030f0951816ed685a958acfbde2.tar.xz linux-0921dddcb5898030f0951816ed685a958acfbde2.zip |
vhost: take worker or vq instead of dev for queueing
This patch has the core work queueing function take a worker for when we
support multiple workers. It also adds a helper that takes a vq during
queueing so modules can control which vq/worker to queue work on.
This temp leaves vhost_work_queue. It will be removed when the drivers
are converted in the next patches.
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Message-Id: <20230626232307.97930-6-michael.christie@oracle.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vhost/vhost.c')
-rw-r--r-- | drivers/vhost/vhost.c | 44 |
1 files changed, 28 insertions, 16 deletions
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index aafb23e12477..611e495eeb3c 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -231,21 +231,10 @@ void vhost_poll_stop(struct vhost_poll *poll) } EXPORT_SYMBOL_GPL(vhost_poll_stop); -void vhost_dev_flush(struct vhost_dev *dev) +static bool vhost_worker_queue(struct vhost_worker *worker, + struct vhost_work *work) { - struct vhost_flush_struct flush; - - init_completion(&flush.wait_event); - vhost_work_init(&flush.work, vhost_flush_work); - - if (vhost_work_queue(dev, &flush.work)) - wait_for_completion(&flush.wait_event); -} -EXPORT_SYMBOL_GPL(vhost_dev_flush); - -bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) -{ - if (!dev->worker) + if (!worker) return false; /* * vsock can queue while we do a VHOST_SET_OWNER, so we have a smp_wmb @@ -257,14 +246,37 @@ bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) * sure it was not in the list. * test_and_set_bit() implies a memory barrier. */ - llist_add(&work->node, &dev->worker->work_list); - vhost_task_wake(dev->worker->vtsk); + llist_add(&work->node, &worker->work_list); + vhost_task_wake(worker->vtsk); } return true; } + +bool vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) +{ + return vhost_worker_queue(dev->worker, work); +} EXPORT_SYMBOL_GPL(vhost_work_queue); +bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work) +{ + return vhost_worker_queue(vq->worker, work); +} +EXPORT_SYMBOL_GPL(vhost_vq_work_queue); + +void vhost_dev_flush(struct vhost_dev *dev) +{ + struct vhost_flush_struct flush; + + init_completion(&flush.wait_event); + vhost_work_init(&flush.work, vhost_flush_work); + + if (vhost_work_queue(dev, &flush.work)) + wait_for_completion(&flush.wait_event); +} +EXPORT_SYMBOL_GPL(vhost_dev_flush); + /* A lockless hint for busy polling code to exit the loop */ bool vhost_vq_has_work(struct vhost_virtqueue *vq) { |