diff options
author | Michael S. Tsirkin <mst@redhat.com> | 2012-11-01 10:16:55 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-11-03 02:29:58 +0100 |
commit | 24eb21a14896e5c1c7cbca8e9a40405a51745f1d (patch) | |
tree | fc754d17a01a4f56272401fd4d73ebe5ee60ede6 /drivers/vhost/net.c | |
parent | vhost-net: select tx zero copy dynamically (diff) | |
download | linux-24eb21a14896e5c1c7cbca8e9a40405a51745f1d.tar.xz linux-24eb21a14896e5c1c7cbca8e9a40405a51745f1d.zip |
vhost-net: reduce vq polling on tx zerocopy
It seems that to avoid deadlocks it is enough to poll vq before
we are going to use the last buffer. This is faster than
c70aa540c7a9f67add11ad3161096fb95233aa2e.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/vhost/net.c')
-rw-r--r-- | drivers/vhost/net.c | 14 |
1 files changed, 12 insertions, 2 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 93f2d6741f34..28ad7752e0f3 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -197,8 +197,18 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) { struct vhost_ubuf_ref *ubufs = ubuf->ctx; struct vhost_virtqueue *vq = ubufs->vq; - - vhost_poll_queue(&vq->poll); + int cnt = atomic_read(&ubufs->kref.refcount); + + /* + * Trigger polling thread if guest stopped submitting new buffers: + * in this case, the refcount after decrement will eventually reach 1 + * so here it is 2. + * We also trigger polling periodically after each 16 packets + * (the value 16 here is more or less arbitrary, it's tuned to trigger + * less than 10% of times). + */ + if (cnt <= 2 || !(cnt % 16)) + vhost_poll_queue(&vq->poll); /* set len to mark this desc buffers done DMA */ vq->heads[ubuf->desc].len = success ? VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; |