summaryrefslogtreecommitdiffstats
path: root/drivers/virtio/virtio_ring.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-04-28 02:05:34 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2023-04-28 02:05:34 +0200
commit8ccd54fe45713cd458015b5b08d6098545e70543 (patch)
tree12a034b2ee42d681b2e915815c4529d6f246bcc4 /drivers/virtio/virtio_ring.c
parentMerge tag 'pstore-v6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/... (diff)
parentvhost_vdpa: fix unmap process in no-batch mode (diff)
downloadlinux-8ccd54fe45713cd458015b5b08d6098545e70543.tar.xz
linux-8ccd54fe45713cd458015b5b08d6098545e70543.zip
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio updates from Michael Tsirkin: "virtio,vhost,vdpa: features, fixes, and cleanups: - reduction in interrupt rate in virtio - perf improvement for VDUSE - scalability for vhost-scsi - non power of 2 ring support for packed rings - better management for mlx5 vdpa - suspend for snet - VIRTIO_F_NOTIFICATION_DATA - shared backend with vdpa-sim-blk - user VA support in vdpa-sim - better struct packing for virtio and fixes, cleanups all over the place" * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (52 commits) vhost_vdpa: fix unmap process in no-batch mode MAINTAINERS: make me a reviewer of VIRTIO CORE AND NET DRIVERS tools/virtio: fix build caused by virtio_ring changes virtio_ring: add a struct device forward declaration vdpa_sim_blk: support shared backend vdpa_sim: move buffer allocation in the devices vdpa/snet: use likely/unlikely macros in hot functions vdpa/snet: implement kick_vq_with_data callback virtio-vdpa: add VIRTIO_F_NOTIFICATION_DATA feature support virtio: add VIRTIO_F_NOTIFICATION_DATA feature support vdpa/snet: support the suspend vDPA callback vdpa/snet: support getting and setting VQ state MAINTAINERS: add vringh.h to Virtio Core and Net Drivers vringh: address kdoc warnings vdpa: address kdoc warnings virtio_ring: don't update event idx on get_buf vdpa_sim: add support for user VA vdpa_sim: replace the spinlock with a mutex to protect the state vdpa_sim: use kthread worker vdpa_sim: make devices agnostic for work management ...
Diffstat (limited to 'drivers/virtio/virtio_ring.c')
-rw-r--r--drivers/virtio/virtio_ring.c89
1 files changed, 59 insertions, 30 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 41144b5246a8..c5310eaf8b46 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -231,10 +231,10 @@ static void vring_free(struct virtqueue *_vq);
* Helpers.
*/
-#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
+#define to_vvq(_vq) container_of_const(_vq, struct vring_virtqueue, vq)
-static inline bool virtqueue_use_indirect(struct vring_virtqueue *vq,
- unsigned int total_sg)
+static bool virtqueue_use_indirect(const struct vring_virtqueue *vq,
+ unsigned int total_sg)
{
/*
* If the host supports indirect descriptor tables, and we have multiple
@@ -269,7 +269,7 @@ static inline bool virtqueue_use_indirect(struct vring_virtqueue *vq,
* unconditionally on data path.
*/
-static bool vring_use_dma_api(struct virtio_device *vdev)
+static bool vring_use_dma_api(const struct virtio_device *vdev)
{
if (!virtio_has_dma_quirk(vdev))
return true;
@@ -289,7 +289,7 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
return false;
}
-size_t virtio_max_dma_size(struct virtio_device *vdev)
+size_t virtio_max_dma_size(const struct virtio_device *vdev)
{
size_t max_segment_size = SIZE_MAX;
@@ -349,7 +349,7 @@ static void vring_free_queue(struct virtio_device *vdev, size_t size,
* making all of the arch DMA ops work on the vring device itself
* is a mess.
*/
-static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
+static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
{
return vq->dma_dev;
}
@@ -423,7 +423,7 @@ static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
*/
static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
- struct vring_desc *desc)
+ const struct vring_desc *desc)
{
u16 flags;
@@ -784,7 +784,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
}
}
-static inline bool more_used_split(const struct vring_virtqueue *vq)
+static bool more_used_split(const struct vring_virtqueue *vq)
{
return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
vq->split.vring.used->idx);
@@ -854,6 +854,14 @@ static void virtqueue_disable_cb_split(struct virtqueue *_vq)
if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+
+ /*
+ * If device triggered an event already it won't trigger one again:
+ * no need to disable.
+ */
+ if (vq->event_triggered)
+ return;
+
if (vq->event)
/* TODO: this is a hack. Figure out a cleaner value to write. */
vring_used_event(&vq->split.vring) = 0x0;
@@ -1172,18 +1180,18 @@ err:
/*
* Packed ring specific functions - *_packed().
*/
-static inline bool packed_used_wrap_counter(u16 last_used_idx)
+static bool packed_used_wrap_counter(u16 last_used_idx)
{
return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
}
-static inline u16 packed_last_used(u16 last_used_idx)
+static u16 packed_last_used(u16 last_used_idx)
{
return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR));
}
static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
- struct vring_desc_extra *extra)
+ const struct vring_desc_extra *extra)
{
u16 flags;
@@ -1206,7 +1214,7 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
}
static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
- struct vring_packed_desc *desc)
+ const struct vring_packed_desc *desc)
{
u16 flags;
@@ -1612,7 +1620,7 @@ static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
return avail == used && used == used_wrap_counter;
}
-static inline bool more_used_packed(const struct vring_virtqueue *vq)
+static bool more_used_packed(const struct vring_virtqueue *vq)
{
u16 last_used;
u16 last_used_idx;
@@ -1699,6 +1707,14 @@ static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
+
+ /*
+ * If device triggered an event already it won't trigger one again:
+ * no need to disable.
+ */
+ if (vq->event_triggered)
+ return;
+
vq->packed.vring.driver->flags =
cpu_to_le16(vq->packed.event_flags_shadow);
}
@@ -2330,12 +2346,6 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- /* If device triggered an event already it won't trigger one again:
- * no need to disable.
- */
- if (vq->event_triggered)
- return;
-
if (vq->packed_ring)
virtqueue_disable_cb_packed(_vq);
else
@@ -2752,6 +2762,23 @@ void vring_del_virtqueue(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
+u32 vring_notification_data(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 next;
+
+ if (vq->packed_ring)
+ next = (vq->packed.next_avail_idx &
+ ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR))) |
+ vq->packed.avail_wrap_counter <<
+ VRING_PACKED_EVENT_F_WRAP_CTR;
+ else
+ next = vq->split.avail_idx_shadow;
+
+ return next << 16 | _vq->index;
+}
+EXPORT_SYMBOL_GPL(vring_notification_data);
+
/* Manipulates transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev)
{
@@ -2771,6 +2798,8 @@ void vring_transport_features(struct virtio_device *vdev)
break;
case VIRTIO_F_ORDER_PLATFORM:
break;
+ case VIRTIO_F_NOTIFICATION_DATA:
+ break;
default:
/* We don't understand this bit. */
__virtio_clear_bit(vdev, i);
@@ -2786,10 +2815,10 @@ EXPORT_SYMBOL_GPL(vring_transport_features);
* Returns the size of the vring. This is mainly used for boasting to
* userspace. Unlike other operations, this need not be serialized.
*/
-unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
+unsigned int virtqueue_get_vring_size(const struct virtqueue *_vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
}
@@ -2819,9 +2848,9 @@ void __virtqueue_unbreak(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(__virtqueue_unbreak);
-bool virtqueue_is_broken(struct virtqueue *_vq)
+bool virtqueue_is_broken(const struct virtqueue *_vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
return READ_ONCE(vq->broken);
}
@@ -2868,9 +2897,9 @@ void __virtio_unbreak_device(struct virtio_device *dev)
}
EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
-dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
+dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *_vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
BUG_ON(!vq->we_own_ring);
@@ -2881,9 +2910,9 @@ dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
-dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
+dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *_vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
BUG_ON(!vq->we_own_ring);
@@ -2895,9 +2924,9 @@ dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
-dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
+dma_addr_t virtqueue_get_used_addr(const struct virtqueue *_vq)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
BUG_ON(!vq->we_own_ring);
@@ -2910,7 +2939,7 @@ dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
/* Only available for split ring */
-const struct vring *virtqueue_get_vring(struct virtqueue *vq)
+const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
{
return &to_vvq(vq)->split.vring;
}