diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-16 03:13:41 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-16 03:13:41 +0100 |
commit | ed3c5a0be38c180ab0899a0f52719e81f36b87a1 (patch) | |
tree | 684eb66d1e8513b4584c680e157f3887c04c58ba /drivers/vhost | |
parent | Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm (diff) | |
parent | Makefile: drop -D__CHECK_ENDIAN__ from cflags (diff) | |
download | linux-ed3c5a0be38c180ab0899a0f52719e81f36b87a1.tar.xz linux-ed3c5a0be38c180ab0899a0f52719e81f36b87a1.zip |
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio updates from Michael Tsirkin:
"virtio, vhost: new device, fixes, speedups
This includes the new virtio crypto device, and fixes all over the
place. In particular enabling endian-ness checks for sparse builds
found some bugs which this fixes. And it appears that everyone is in
agreement that disabling endian-ness sparse checks shouldn't be
necessary any longer.
So this enables them for everyone, and drops the __CHECK_ENDIAN__ and
__bitwise__ APIs.
IRQ handling in virtio has been refactored somewhat, the larger switch
to IRQ_SHARED will have to wait as it proved too aggressive"
* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (34 commits)
Makefile: drop -D__CHECK_ENDIAN__ from cflags
fs/logfs: drop __CHECK_ENDIAN__
Documentation/sparse: drop __CHECK_ENDIAN__
linux: drop __bitwise__ everywhere
checkpatch: replace __bitwise__ with __bitwise
Documentation/sparse: drop __bitwise__
tools: enable endian checks for all sparse builds
linux/types.h: enable endian checks for all sparse builds
virtio_mmio: Set dev.release() to avoid warning
vhost: remove unused feature bit
virtio_ring: fix description of virtqueue_get_buf
vhost/scsi: Remove unused but set variable
tools/virtio: use {READ,WRITE}_ONCE() in uaccess.h
vringh: kill off ACCESS_ONCE()
tools/virtio: fix READ_ONCE()
crypto: add virtio-crypto driver
vhost: cache used event for better performance
vsock: lookup and setup guest_cid inside vhost_vsock_lock
virtio_pci: split vp_try_to_find_vqs into INTx and MSI-X variants
virtio_pci: merge vp_free_vectors into vp_del_vqs
...
Diffstat (limited to 'drivers/vhost')
-rw-r--r-- | drivers/vhost/scsi.c | 2 | ||||
-rw-r--r-- | drivers/vhost/vhost.c | 40 | ||||
-rw-r--r-- | drivers/vhost/vhost.h | 3 | ||||
-rw-r--r-- | drivers/vhost/vringh.c | 5 | ||||
-rw-r--r-- | drivers/vhost/vsock.c | 25 |
5 files changed, 51 insertions, 24 deletions
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 6e29d053843d..e2be447752c2 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1749,7 +1749,6 @@ out: static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, const char *name) { - struct se_portal_group *se_tpg; struct vhost_scsi_nexus *tv_nexus; mutex_lock(&tpg->tv_tpg_mutex); @@ -1758,7 +1757,6 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, pr_debug("tpg->tpg_nexus already exists\n"); return -EEXIST; } - se_tpg = &tpg->se_tpg; tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL); if (!tv_nexus) { diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 266354390c8f..c0f81e8cb50c 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -49,7 +49,7 @@ enum { INTERVAL_TREE_DEFINE(struct vhost_umem_node, rb, __u64, __subtree_last, - START, LAST, , vhost_umem_interval_tree); + START, LAST, static inline, vhost_umem_interval_tree); #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) @@ -290,6 +290,7 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->avail = NULL; vq->used = NULL; vq->last_avail_idx = 0; + vq->last_used_event = 0; vq->avail_idx = 0; vq->last_used_idx = 0; vq->signalled_used = 0; @@ -719,7 +720,7 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem, static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, struct iovec iov[], int iov_size, int access); -static int vhost_copy_to_user(struct vhost_virtqueue *vq, void *to, +static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to, const void *from, unsigned size) { int ret; @@ -749,7 +750,7 @@ out: } static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to, - void *from, unsigned size) + void __user *from, unsigned size) { int ret; @@ -783,7 +784,7 @@ out: } static void __user *__vhost_get_user(struct vhost_virtqueue *vq, - void *addr, unsigned size) + void __user *addr, unsigned size) { int ret; @@ -934,8 +935,8 @@ static int umem_access_ok(u64 uaddr, u64 size, int access) return 0; } -int vhost_process_iotlb_msg(struct vhost_dev *dev, - struct vhost_iotlb_msg *msg) +static int vhost_process_iotlb_msg(struct vhost_dev *dev, + struct vhost_iotlb_msg *msg) { int ret = 0; @@ -1324,7 +1325,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) r = -EINVAL; break; } - vq->last_avail_idx = s.num; + vq->last_avail_idx = vq->last_used_event = s.num; /* Forget the cached index value. */ vq->avail_idx = vq->last_avail_idx; break; @@ -2159,10 +2160,6 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) __u16 old, new; __virtio16 event; bool v; - /* Flush out used index updates. This is paired - * with the barrier that the Guest executes when enabling - * interrupts. */ - smp_mb(); if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && unlikely(vq->avail_idx == vq->last_avail_idx)) @@ -2170,6 +2167,10 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { __virtio16 flags; + /* Flush out used index updates. This is paired + * with the barrier that the Guest executes when enabling + * interrupts. */ + smp_mb(); if (vhost_get_user(vq, flags, &vq->avail->flags)) { vq_err(vq, "Failed to get flags"); return true; @@ -2184,11 +2185,26 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) if (unlikely(!v)) return true; + /* We're sure if the following conditions are met, there's no + * need to notify guest: + * 1) cached used event is ahead of new + * 2) old to new updating does not cross cached used event. */ + if (vring_need_event(vq->last_used_event, new + vq->num, new) && + !vring_need_event(vq->last_used_event, new, old)) + return false; + + /* Flush out used index updates. This is paired + * with the barrier that the Guest executes when enabling + * interrupts. */ + smp_mb(); + if (vhost_get_user(vq, event, vhost_used_event(vq))) { vq_err(vq, "Failed to get used event idx"); return true; } - return vring_need_event(vhost16_to_cpu(vq, event), new, old); + vq->last_used_event = vhost16_to_cpu(vq, event); + + return vring_need_event(vq->last_used_event, new, old); } /* This actually signals the guest, using eventfd. */ diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 78f3c5fc02e4..a9cbbb148f46 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -107,6 +107,9 @@ struct vhost_virtqueue { /* Last index we used. */ u16 last_used_idx; + /* Last used evet we've seen */ + u16 last_used_event; + /* Used flags */ u16 used_flags; diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index 3bb02c60a2f5..bb8971f2a634 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -3,6 +3,7 @@ * * Since these may be in userspace, we use (inline) accessors. */ +#include <linux/compiler.h> #include <linux/module.h> #include <linux/vringh.h> #include <linux/virtio_ring.h> @@ -820,13 +821,13 @@ EXPORT_SYMBOL(vringh_need_notify_user); static inline int getu16_kern(const struct vringh *vrh, u16 *val, const __virtio16 *p) { - *val = vringh16_to_cpu(vrh, ACCESS_ONCE(*p)); + *val = vringh16_to_cpu(vrh, READ_ONCE(*p)); return 0; } static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val) { - ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val); + WRITE_ONCE(*p, cpu_to_vringh16(vrh, val)); return 0; } diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index e6b70966c19d..bbbf588540ed 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -50,11 +50,10 @@ static u32 vhost_transport_get_local_cid(void) return VHOST_VSOCK_DEFAULT_HOST_CID; } -static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) +static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid) { struct vhost_vsock *vsock; - spin_lock_bh(&vhost_vsock_lock); list_for_each_entry(vsock, &vhost_vsock_list, list) { u32 other_cid = vsock->guest_cid; @@ -63,15 +62,24 @@ static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) continue; if (other_cid == guest_cid) { - spin_unlock_bh(&vhost_vsock_lock); return vsock; } } - spin_unlock_bh(&vhost_vsock_lock); return NULL; } +static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) +{ + struct vhost_vsock *vsock; + + spin_lock_bh(&vhost_vsock_lock); + vsock = __vhost_vsock_get(guest_cid); + spin_unlock_bh(&vhost_vsock_lock); + + return vsock; +} + static void vhost_transport_do_send_pkt(struct vhost_vsock *vsock, struct vhost_virtqueue *vq) @@ -559,11 +567,12 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) return -EINVAL; /* Refuse if CID is already in use */ - other = vhost_vsock_get(guest_cid); - if (other && other != vsock) - return -EADDRINUSE; - spin_lock_bh(&vhost_vsock_lock); + other = __vhost_vsock_get(guest_cid); + if (other && other != vsock) { + spin_unlock_bh(&vhost_vsock_lock); + return -EADDRINUSE; + } vsock->guest_cid = guest_cid; spin_unlock_bh(&vhost_vsock_lock); |