diff options
Diffstat (limited to 'drivers/virtio')
-rw-r--r-- | drivers/virtio/virtio_balloon.c | 108 | ||||
-rw-r--r-- | drivers/virtio/virtio_mmio.c | 4 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci.c | 110 | ||||
-rw-r--r-- | drivers/virtio/virtio_ring.c | 249 |
4 files changed, 402 insertions, 69 deletions
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 94fd738a7741..95aeedf198f8 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -1,4 +1,5 @@ -/* Virtio balloon implementation, inspired by Dor Loar and Marcelo +/* + * Virtio balloon implementation, inspired by Dor Laor and Marcelo * Tosatti's implementations. * * Copyright 2008 Rusty Russell IBM Corporation @@ -17,7 +18,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ -//#define DEBUG + #include <linux/virtio.h> #include <linux/virtio_balloon.h> #include <linux/swap.h> @@ -87,7 +88,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) init_completion(&vb->acked); /* We should always be able to add one buffer to an empty queue. */ - if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0) + if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0) BUG(); virtqueue_kick(vq); @@ -149,7 +150,6 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num) vb->num_pages--; } - /* * Note that if * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST); @@ -220,7 +220,7 @@ static void stats_handle_request(struct virtio_balloon *vb) vq = vb->stats_vq; sg_init_one(&sg, vb->stats, sizeof(vb->stats)); - if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0) + if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0) BUG(); virtqueue_kick(vq); } @@ -275,32 +275,21 @@ static int balloon(void *_vballoon) return 0; } -static int virtballoon_probe(struct virtio_device *vdev) +static int init_vqs(struct virtio_balloon *vb) { - struct virtio_balloon *vb; struct virtqueue *vqs[3]; vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request }; const char *names[] = { "inflate", "deflate", "stats" }; int err, nvqs; - vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); - if (!vb) { - err = -ENOMEM; - goto out; - } - - INIT_LIST_HEAD(&vb->pages); - vb->num_pages = 0; - init_waitqueue_head(&vb->config_change); - vb->vdev = vdev; - vb->need_stats_update = 0; - - /* We expect two virtqueues: inflate and deflate, - * and optionally stat. */ + /* + * We expect two virtqueues: inflate and deflate, and + * optionally stat. + */ nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2; - err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names); + err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names); if (err) - goto out_free_vb; + return err; vb->inflate_vq = vqs[0]; vb->deflate_vq = vqs[1]; @@ -313,10 +302,34 @@ static int virtballoon_probe(struct virtio_device *vdev) * use it to signal us later. */ sg_init_one(&sg, vb->stats, sizeof vb->stats); - if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb) < 0) + if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb, GFP_KERNEL) + < 0) BUG(); virtqueue_kick(vb->stats_vq); } + return 0; +} + +static int virtballoon_probe(struct virtio_device *vdev) +{ + struct virtio_balloon *vb; + int err; + + vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); + if (!vb) { + err = -ENOMEM; + goto out; + } + + INIT_LIST_HEAD(&vb->pages); + vb->num_pages = 0; + init_waitqueue_head(&vb->config_change); + vb->vdev = vdev; + vb->need_stats_update = 0; + + err = init_vqs(vb); + if (err) + goto out_free_vb; vb->thread = kthread_run(balloon, vb, "vballoon"); if (IS_ERR(vb->thread)) { @@ -351,6 +364,48 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev) kfree(vb); } +#ifdef CONFIG_PM +static int virtballoon_freeze(struct virtio_device *vdev) +{ + /* + * The kthread is already frozen by the PM core before this + * function is called. + */ + + /* Ensure we don't get any more requests from the host */ + vdev->config->reset(vdev); + vdev->config->del_vqs(vdev); + return 0; +} + +static int virtballoon_thaw(struct virtio_device *vdev) +{ + return init_vqs(vdev->priv); +} + +static int virtballoon_restore(struct virtio_device *vdev) +{ + struct virtio_balloon *vb = vdev->priv; + struct page *page, *page2; + + /* We're starting from a clean slate */ + vb->num_pages = 0; + + /* + * If a request wasn't complete at the time of freezing, this + * could have been set. + */ + vb->need_stats_update = 0; + + /* We don't have these pages in the balloon anymore! */ + list_for_each_entry_safe(page, page2, &vb->pages, lru) { + list_del(&page->lru); + totalram_pages++; + } + return init_vqs(vdev->priv); +} +#endif + static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST, VIRTIO_BALLOON_F_STATS_VQ, @@ -365,6 +420,11 @@ static struct virtio_driver virtio_balloon_driver = { .probe = virtballoon_probe, .remove = __devexit_p(virtballoon_remove), .config_changed = virtballoon_changed, +#ifdef CONFIG_PM + .freeze = virtballoon_freeze, + .restore = virtballoon_restore, + .thaw = virtballoon_thaw, +#endif }; static int __init init(void) diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 0269717436af..01d6dc250d5c 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -310,8 +310,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); /* Create the vring */ - vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN, - vdev, info->queue, vm_notify, callback, name); + vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN, vdev, + true, info->queue, vm_notify, callback, name); if (!vq) { err = -ENOMEM; goto error_new_virtqueue; diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index baabb7937ec2..635e1efb3792 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -55,6 +55,10 @@ struct virtio_pci_device unsigned msix_vectors; /* Vectors allocated, excluding per-vq vectors if any */ unsigned msix_used_vectors; + + /* Status saved during hibernate/restore */ + u8 saved_status; + /* Whether we have vector per vq */ bool per_vq_vectors; }; @@ -414,8 +418,8 @@ static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); /* create the vring */ - vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, - vdev, info->queue, vp_notify, callback, name); + vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, vdev, + true, info->queue, vp_notify, callback, name); if (!vq) { err = -ENOMEM; goto out_activate_queue; @@ -716,19 +720,114 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev) } #ifdef CONFIG_PM -static int virtio_pci_suspend(struct pci_dev *pci_dev, pm_message_t state) +static int virtio_pci_suspend(struct device *dev) { + struct pci_dev *pci_dev = to_pci_dev(dev); + pci_save_state(pci_dev); pci_set_power_state(pci_dev, PCI_D3hot); return 0; } -static int virtio_pci_resume(struct pci_dev *pci_dev) +static int virtio_pci_resume(struct device *dev) { + struct pci_dev *pci_dev = to_pci_dev(dev); + pci_restore_state(pci_dev); pci_set_power_state(pci_dev, PCI_D0); return 0; } + +static int virtio_pci_freeze(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + struct virtio_driver *drv; + int ret; + + drv = container_of(vp_dev->vdev.dev.driver, + struct virtio_driver, driver); + + ret = 0; + vp_dev->saved_status = vp_get_status(&vp_dev->vdev); + if (drv && drv->freeze) + ret = drv->freeze(&vp_dev->vdev); + + if (!ret) + pci_disable_device(pci_dev); + return ret; +} + +static int restore_common(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + int ret; + + ret = pci_enable_device(pci_dev); + if (ret) + return ret; + pci_set_master(pci_dev); + vp_finalize_features(&vp_dev->vdev); + + return ret; +} + +static int virtio_pci_thaw(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + struct virtio_driver *drv; + int ret; + + ret = restore_common(dev); + if (ret) + return ret; + + drv = container_of(vp_dev->vdev.dev.driver, + struct virtio_driver, driver); + + if (drv && drv->thaw) + ret = drv->thaw(&vp_dev->vdev); + else if (drv && drv->restore) + ret = drv->restore(&vp_dev->vdev); + + /* Finally, tell the device we're all set */ + if (!ret) + vp_set_status(&vp_dev->vdev, vp_dev->saved_status); + + return ret; +} + +static int virtio_pci_restore(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + struct virtio_driver *drv; + int ret; + + drv = container_of(vp_dev->vdev.dev.driver, + struct virtio_driver, driver); + + ret = restore_common(dev); + if (!ret && drv && drv->restore) + ret = drv->restore(&vp_dev->vdev); + + /* Finally, tell the device we're all set */ + if (!ret) + vp_set_status(&vp_dev->vdev, vp_dev->saved_status); + + return ret; +} + +static const struct dev_pm_ops virtio_pci_pm_ops = { + .suspend = virtio_pci_suspend, + .resume = virtio_pci_resume, + .freeze = virtio_pci_freeze, + .thaw = virtio_pci_thaw, + .restore = virtio_pci_restore, + .poweroff = virtio_pci_suspend, +}; #endif static struct pci_driver virtio_pci_driver = { @@ -737,8 +836,7 @@ static struct pci_driver virtio_pci_driver = { .probe = virtio_pci_probe, .remove = __devexit_p(virtio_pci_remove), #ifdef CONFIG_PM - .suspend = virtio_pci_suspend, - .resume = virtio_pci_resume, + .driver.pm = &virtio_pci_pm_ops, #endif }; diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index c7a2c208f6ea..5aa43c3392a2 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -22,23 +22,27 @@ #include <linux/device.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/hrtimer.h> /* virtio guest is communicating with a virtual "device" that actually runs on * a host processor. Memory barriers are used to control SMP effects. */ #ifdef CONFIG_SMP /* Where possible, use SMP barriers which are more lightweight than mandatory * barriers, because mandatory barriers control MMIO effects on accesses - * through relaxed memory I/O windows (which virtio does not use). */ -#define virtio_mb() smp_mb() -#define virtio_rmb() smp_rmb() -#define virtio_wmb() smp_wmb() + * through relaxed memory I/O windows (which virtio-pci does not use). */ +#define virtio_mb(vq) \ + do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0) +#define virtio_rmb(vq) \ + do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0) +#define virtio_wmb(vq) \ + do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0) #else /* We must force memory ordering even if guest is UP since host could be * running on another CPU, but SMP barriers are defined to barrier() in that * configuration. So fall back to mandatory barriers instead. */ -#define virtio_mb() mb() -#define virtio_rmb() rmb() -#define virtio_wmb() wmb() +#define virtio_mb(vq) mb() +#define virtio_rmb(vq) rmb() +#define virtio_wmb(vq) wmb() #endif #ifdef DEBUG @@ -77,6 +81,9 @@ struct vring_virtqueue /* Actual memory layout for this queue */ struct vring vring; + /* Can we use weak barriers? */ + bool weak_barriers; + /* Other side has made a mess, don't try any more. */ bool broken; @@ -102,6 +109,10 @@ struct vring_virtqueue #ifdef DEBUG /* They're supposed to lock for us. */ unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; #endif /* Tokens for callbacks. */ @@ -160,12 +171,29 @@ static int vring_add_indirect(struct vring_virtqueue *vq, return head; } -int virtqueue_add_buf_gfp(struct virtqueue *_vq, - struct scatterlist sg[], - unsigned int out, - unsigned int in, - void *data, - gfp_t gfp) +/** + * virtqueue_add_buf - expose buffer to other end + * @vq: the struct virtqueue we're talking about. + * @sg: the description of the buffer(s). + * @out_num: the number of sg readable by other side + * @in_num: the number of sg which are writable (after readable ones) + * @data: the token identifying the buffer. + * @gfp: how to do memory allocations (if necessary). + * + * Caller must ensure we don't call this with other virtqueue operations + * at the same time (except where noted). + * + * Returns remaining capacity of queue or a negative error + * (ie. ENOSPC). Note that it only really makes sense to treat all + * positive return values as "available": indirect buffers mean that + * we can put an entire sg[] array inside a single queue entry. + */ +int virtqueue_add_buf(struct virtqueue *_vq, + struct scatterlist sg[], + unsigned int out, + unsigned int in, + void *data, + gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i, avail, uninitialized_var(prev); @@ -175,6 +203,19 @@ int virtqueue_add_buf_gfp(struct virtqueue *_vq, BUG_ON(data == NULL); +#ifdef DEBUG + { + ktime_t now = ktime_get(); + + /* No kick or get, with .1 second between? Warn. */ + if (vq->last_add_time_valid) + WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) + > 100); + vq->last_add_time = now; + vq->last_add_time_valid = true; + } +#endif + /* If the host supports indirect descriptor tables, and we have multiple * buffers, then go indirect. FIXME: tune this threshold */ if (vq->indirect && (out + in) > 1 && vq->num_free) { @@ -227,40 +268,102 @@ add_head: vq->data[head] = data; /* Put entry in available array (but don't update avail->idx until they - * do sync). FIXME: avoid modulus here? */ - avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num; + * do sync). */ + avail = (vq->vring.avail->idx & (vq->vring.num-1)); vq->vring.avail->ring[avail] = head; + /* Descriptors and available array need to be set before we expose the + * new available array entries. */ + virtio_wmb(vq); + vq->vring.avail->idx++; + vq->num_added++; + + /* This is very unlikely, but theoretically possible. Kick + * just in case. */ + if (unlikely(vq->num_added == (1 << 16) - 1)) + virtqueue_kick(_vq); + pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); return vq->num_free; } -EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp); +EXPORT_SYMBOL_GPL(virtqueue_add_buf); -void virtqueue_kick(struct virtqueue *_vq) +/** + * virtqueue_kick_prepare - first half of split virtqueue_kick call. + * @vq: the struct virtqueue + * + * Instead of virtqueue_kick(), you can do: + * if (virtqueue_kick_prepare(vq)) + * virtqueue_notify(vq); + * + * This is sometimes useful because the virtqueue_kick_prepare() needs + * to be serialized, but the actual virtqueue_notify() call does not. + */ +bool virtqueue_kick_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); u16 new, old; + bool needs_kick; + START_USE(vq); - /* Descriptors and available array need to be set before we expose the - * new available array entries. */ - virtio_wmb(); + /* We need to expose available array entries before checking avail + * event. */ + virtio_mb(vq); - old = vq->vring.avail->idx; - new = vq->vring.avail->idx = old + vq->num_added; + old = vq->vring.avail->idx - vq->num_added; + new = vq->vring.avail->idx; vq->num_added = 0; - /* Need to update avail index before checking if we should notify */ - virtio_mb(); - - if (vq->event ? - vring_need_event(vring_avail_event(&vq->vring), new, old) : - !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY)) - /* Prod other side to tell it about changes. */ - vq->notify(&vq->vq); +#ifdef DEBUG + if (vq->last_add_time_valid) { + WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), + vq->last_add_time)) > 100); + } + vq->last_add_time_valid = false; +#endif + if (vq->event) { + needs_kick = vring_need_event(vring_avail_event(&vq->vring), + new, old); + } else { + needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); + } END_USE(vq); + return needs_kick; +} +EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); + +/** + * virtqueue_notify - second half of split virtqueue_kick call. + * @vq: the struct virtqueue + * + * This does not need to be serialized. + */ +void virtqueue_notify(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + /* Prod other side to tell it about changes. */ + vq->notify(_vq); +} +EXPORT_SYMBOL_GPL(virtqueue_notify); + +/** + * virtqueue_kick - update after add_buf + * @vq: the struct virtqueue + * + * After one or more virtqueue_add_buf calls, invoke this to kick + * the other side. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + */ +void virtqueue_kick(struct virtqueue *vq) +{ + if (virtqueue_kick_prepare(vq)) + virtqueue_notify(vq); } EXPORT_SYMBOL_GPL(virtqueue_kick); @@ -294,11 +397,28 @@ static inline bool more_used(const struct vring_virtqueue *vq) return vq->last_used_idx != vq->vring.used->idx; } +/** + * virtqueue_get_buf - get the next used buffer + * @vq: the struct virtqueue we're talking about. + * @len: the length written into the buffer + * + * If the driver wrote data into the buffer, @len will be set to the + * amount written. This means you don't need to clear the buffer + * beforehand to ensure there's no data leakage in the case of short + * writes. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + * + * Returns NULL if there are no used buffers, or the "data" token + * handed to virtqueue_add_buf(). + */ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) { struct vring_virtqueue *vq = to_vvq(_vq); void *ret; unsigned int i; + u16 last_used; START_USE(vq); @@ -314,10 +434,11 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) } /* Only get used array entries after they have been exposed by host. */ - virtio_rmb(); + virtio_rmb(vq); - i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id; - *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len; + last_used = (vq->last_used_idx & (vq->vring.num - 1)); + i = vq->vring.used->ring[last_used].id; + *len = vq->vring.used->ring[last_used].len; if (unlikely(i >= vq->vring.num)) { BAD_RING(vq, "id %u out of range\n", i); @@ -337,14 +458,27 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) * the read in the next get_buf call. */ if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { vring_used_event(&vq->vring) = vq->last_used_idx; - virtio_mb(); + virtio_mb(vq); } +#ifdef DEBUG + vq->last_add_time_valid = false; +#endif + END_USE(vq); return ret; } EXPORT_SYMBOL_GPL(virtqueue_get_buf); +/** + * virtqueue_disable_cb - disable callbacks + * @vq: the struct virtqueue we're talking about. + * + * Note that this is not necessarily synchronous, hence unreliable and only + * useful as an optimization. + * + * Unlike other operations, this need not be serialized. + */ void virtqueue_disable_cb(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); @@ -353,6 +487,17 @@ void virtqueue_disable_cb(struct virtqueue *_vq) } EXPORT_SYMBOL_GPL(virtqueue_disable_cb); +/** + * virtqueue_enable_cb - restart callbacks after disable_cb. + * @vq: the struct virtqueue we're talking about. + * + * This re-enables callbacks; it returns "false" if there are pending + * buffers in the queue, to detect a possible race between the driver + * checking for more work, and enabling callbacks. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + */ bool virtqueue_enable_cb(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); @@ -366,7 +511,7 @@ bool virtqueue_enable_cb(struct virtqueue *_vq) * entry. Always do both to keep code simple. */ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; vring_used_event(&vq->vring) = vq->last_used_idx; - virtio_mb(); + virtio_mb(vq); if (unlikely(more_used(vq))) { END_USE(vq); return false; @@ -377,6 +522,19 @@ bool virtqueue_enable_cb(struct virtqueue *_vq) } EXPORT_SYMBOL_GPL(virtqueue_enable_cb); +/** + * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. + * @vq: the struct virtqueue we're talking about. + * + * This re-enables callbacks but hints to the other side to delay + * interrupts until most of the available buffers have been processed; + * it returns "false" if there are many pending buffers in the queue, + * to detect a possible race between the driver checking for more work, + * and enabling callbacks. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + */ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); @@ -393,7 +551,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) /* TODO: tune this threshold */ bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; vring_used_event(&vq->vring) = vq->last_used_idx + bufs; - virtio_mb(); + virtio_mb(vq); if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { END_USE(vq); return false; @@ -404,6 +562,14 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) } EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); +/** + * virtqueue_detach_unused_buf - detach first unused buffer + * @vq: the struct virtqueue we're talking about. + * + * Returns NULL or the "data" token handed to virtqueue_add_buf(). + * This is not valid on an active queue; it is useful only for device + * shutdown. + */ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); @@ -453,6 +619,7 @@ EXPORT_SYMBOL_GPL(vring_interrupt); struct virtqueue *vring_new_virtqueue(unsigned int num, unsigned int vring_align, struct virtio_device *vdev, + bool weak_barriers, void *pages, void (*notify)(struct virtqueue *), void (*callback)(struct virtqueue *), @@ -476,12 +643,14 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, vq->vq.vdev = vdev; vq->vq.name = name; vq->notify = notify; + vq->weak_barriers = weak_barriers; vq->broken = false; vq->last_used_idx = 0; vq->num_added = 0; list_add_tail(&vq->vq.list, &vdev->vqs); #ifdef DEBUG vq->in_use = false; + vq->last_add_time_valid = false; #endif vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); @@ -530,7 +699,13 @@ void vring_transport_features(struct virtio_device *vdev) } EXPORT_SYMBOL_GPL(vring_transport_features); -/* return the size of the vring within the virtqueue */ +/** + * virtqueue_get_vring_size - return the size of the virtqueue's vring + * @vq: the struct virtqueue containing the vring of interest. + * + * Returns the size of the vring. This is mainly used for boasting to + * userspace. Unlike other operations, this need not be serialized. + */ unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) { |