summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-14 23:12:59 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-14 23:12:59 +0200
commit35c99ffa20edd3c24be352d28a63cd3a23121282 (patch)
tree4a844d58d3a5648025f48821198ef04f643a857e /drivers
parentAdd gitignore file for samples/vfs/ generated files (diff)
parentvirtio/s390: enable packed ring (diff)
downloadlinux-35c99ffa20edd3c24be352d28a63cd3a23121282.tar.xz
linux-35c99ffa20edd3c24be352d28a63cd3a23121282.zip
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio updates from Michael Tsirkin: - enable packed ring support for s390 - several fixes * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: virtio/s390: enable packed ring virtio/s390: DMA support for virtio-ccw virtio/s390: use vring_create_virtqueue virtio/virtio_ring: do some comment fixes vhost-scsi: remove incorrect memory barrier tools/virtio/ringtest: Remove bogus definition of BUG_ON() virtio_ring: Fix potential mem leak in virtqueue_add_indirect_packed
Diffstat (limited to 'drivers')
-rw-r--r--drivers/s390/virtio/virtio_ccw.c52
-rw-r--r--drivers/vhost/scsi.c1
-rw-r--r--drivers/virtio/virtio_ring.c28
3 files changed, 40 insertions, 41 deletions
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 991420caa4f2..6a3076881321 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -66,6 +66,7 @@ struct virtio_ccw_device {
bool device_lost;
unsigned int config_ready;
void *airq_info;
+ u64 dma_mask;
};
struct vq_info_block_legacy {
@@ -108,7 +109,6 @@ struct virtio_rev_info {
struct virtio_ccw_vq_info {
struct virtqueue *vq;
int num;
- void *queue;
union {
struct vq_info_block s;
struct vq_info_block_legacy l;
@@ -423,7 +423,6 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
struct virtio_ccw_vq_info *info = vq->priv;
unsigned long flags;
- unsigned long size;
int ret;
unsigned int index = vq->index;
@@ -461,8 +460,6 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
ret, index);
vring_del_virtqueue(vq);
- size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
- free_pages_exact(info->queue, size);
kfree(info->info_block);
kfree(info);
}
@@ -494,8 +491,9 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
int err;
struct virtqueue *vq = NULL;
struct virtio_ccw_vq_info *info;
- unsigned long size = 0; /* silence the compiler */
+ u64 queue;
unsigned long flags;
+ bool may_reduce;
/* Allocate queue. */
info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
@@ -516,37 +514,34 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
err = info->num;
goto out_err;
}
- size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
- info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
- if (info->queue == NULL) {
- dev_warn(&vcdev->cdev->dev, "no queue\n");
- err = -ENOMEM;
- goto out_err;
- }
+ may_reduce = vcdev->revision > 0;
+ vq = vring_create_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN,
+ vdev, true, may_reduce, ctx,
+ virtio_ccw_kvm_notify, callback, name);
- vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
- true, ctx, info->queue, virtio_ccw_kvm_notify,
- callback, name);
if (!vq) {
/* For now, we fail if we can't get the requested size. */
dev_warn(&vcdev->cdev->dev, "no vq\n");
err = -ENOMEM;
goto out_err;
}
+ /* it may have been reduced */
+ info->num = virtqueue_get_vring_size(vq);
/* Register it with the host. */
+ queue = virtqueue_get_desc_addr(vq);
if (vcdev->revision == 0) {
- info->info_block->l.queue = (__u64)info->queue;
+ info->info_block->l.queue = queue;
info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
info->info_block->l.index = i;
info->info_block->l.num = info->num;
ccw->count = sizeof(info->info_block->l);
} else {
- info->info_block->s.desc = (__u64)info->queue;
+ info->info_block->s.desc = queue;
info->info_block->s.index = i;
info->info_block->s.num = info->num;
- info->info_block->s.avail = (__u64)virtqueue_get_avail(vq);
- info->info_block->s.used = (__u64)virtqueue_get_used(vq);
+ info->info_block->s.avail = (__u64)virtqueue_get_avail_addr(vq);
+ info->info_block->s.used = (__u64)virtqueue_get_used_addr(vq);
ccw->count = sizeof(info->info_block->s);
}
ccw->cmd_code = CCW_CMD_SET_VQ;
@@ -572,8 +567,6 @@ out_err:
if (vq)
vring_del_virtqueue(vq);
if (info) {
- if (info->queue)
- free_pages_exact(info->queue, size);
kfree(info->info_block);
}
kfree(info);
@@ -780,12 +773,8 @@ out_free:
static void ccw_transport_features(struct virtio_device *vdev)
{
/*
- * Packed ring isn't enabled on virtio_ccw for now,
- * because virtio_ccw uses some legacy accessors,
- * e.g. virtqueue_get_avail() and virtqueue_get_used()
- * which aren't available in packed ring currently.
+ * Currently nothing to do here.
*/
- __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
}
static int virtio_ccw_finalize_features(struct virtio_device *vdev)
@@ -1266,6 +1255,16 @@ static int virtio_ccw_online(struct ccw_device *cdev)
ret = -ENOMEM;
goto out_free;
}
+
+ vcdev->vdev.dev.parent = &cdev->dev;
+ cdev->dev.dma_mask = &vcdev->dma_mask;
+ /* we are fine with common virtio infrastructure using 64 bit DMA */
+ ret = dma_set_mask_and_coherent(&cdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_warn(&cdev->dev, "Failed to enable 64-bit DMA.\n");
+ goto out_free;
+ }
+
vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
GFP_DMA | GFP_KERNEL);
if (!vcdev->config_block) {
@@ -1280,7 +1279,6 @@ static int virtio_ccw_online(struct ccw_device *cdev)
vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
- vcdev->vdev.dev.parent = &cdev->dev;
vcdev->vdev.dev.release = virtio_ccw_release_dev;
vcdev->vdev.config = &virtio_ccw_config_ops;
vcdev->cdev = cdev;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 618fb6461017..c090d177bd75 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1443,7 +1443,6 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
tpg->tv_tpg_vhost_count++;
tpg->vhost_scsi = vs;
vs_tpg[tpg->tport_tpgt] = tpg;
- smp_mb__after_atomic();
match = true;
}
mutex_unlock(&tpg->tv_tpg_mutex);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 5df92c308286..0a7b3ce3fb75 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1004,6 +1004,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
if (unlikely(vq->vq.num_free < 1)) {
pr_debug("Can't add buf len 1 - avail = 0\n");
+ kfree(desc);
END_USE(vq);
return -ENOSPC;
}
@@ -1718,10 +1719,10 @@ static inline int virtqueue_add(struct virtqueue *_vq,
/**
* virtqueue_add_sgs - expose buffers to other end
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
* @sgs: array of terminated scatterlists.
- * @out_num: the number of scatterlists readable by other side
- * @in_num: the number of scatterlists which are writable (after readable ones)
+ * @out_sgs: the number of scatterlists readable by other side
+ * @in_sgs: the number of scatterlists which are writable (after readable ones)
* @data: the token identifying the buffer.
* @gfp: how to do memory allocations (if necessary).
*
@@ -1821,7 +1822,7 @@ EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
/**
* virtqueue_kick_prepare - first half of split virtqueue_kick call.
- * @vq: the struct virtqueue
+ * @_vq: the struct virtqueue
*
* Instead of virtqueue_kick(), you can do:
* if (virtqueue_kick_prepare(vq))
@@ -1841,7 +1842,7 @@ EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
/**
* virtqueue_notify - second half of split virtqueue_kick call.
- * @vq: the struct virtqueue
+ * @_vq: the struct virtqueue
*
* This does not need to be serialized.
*
@@ -1885,8 +1886,9 @@ EXPORT_SYMBOL_GPL(virtqueue_kick);
/**
* virtqueue_get_buf - get the next used buffer
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
* @len: the length written into the buffer
+ * @ctx: extra context for the token
*
* If the device wrote data into the buffer, @len will be set to the
* amount written. This means you don't need to clear the buffer
@@ -1916,7 +1918,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
EXPORT_SYMBOL_GPL(virtqueue_get_buf);
/**
* virtqueue_disable_cb - disable callbacks
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
*
* Note that this is not necessarily synchronous, hence unreliable and only
* useful as an optimization.
@@ -1936,7 +1938,7 @@ EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
/**
* virtqueue_enable_cb_prepare - restart callbacks after disable_cb
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks; it returns current queue state
* in an opaque unsigned value. This value should be later tested by
@@ -1957,7 +1959,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
/**
* virtqueue_poll - query pending used buffers
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
* @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
*
* Returns "true" if there are pending used buffers in the queue.
@@ -1976,7 +1978,7 @@ EXPORT_SYMBOL_GPL(virtqueue_poll);
/**
* virtqueue_enable_cb - restart callbacks after disable_cb.
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks; it returns "false" if there are pending
* buffers in the queue, to detect a possible race between the driver
@@ -1995,7 +1997,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
/**
* virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks but hints to the other side to delay
* interrupts until most of the available buffers have been processed;
@@ -2017,7 +2019,7 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
/**
* virtqueue_detach_unused_buf - detach first unused buffer
- * @vq: the struct virtqueue we're talking about.
+ * @_vq: the struct virtqueue we're talking about.
*
* Returns NULL or the "data" token handed to virtqueue_add_*().
* This is not valid on an active queue; it is useful only for device
@@ -2249,7 +2251,7 @@ EXPORT_SYMBOL_GPL(vring_transport_features);
/**
* virtqueue_get_vring_size - return the size of the virtqueue's vring
- * @vq: the struct virtqueue containing the vring of interest.
+ * @_vq: the struct virtqueue containing the vring of interest.
*
* Returns the size of the vring. This is mainly used for boasting to
* userspace. Unlike other operations, this need not be serialized.