diff options
Diffstat (limited to 'drivers/virtio')
-rw-r--r-- | drivers/virtio/Kconfig | 12 | ||||
-rw-r--r-- | drivers/virtio/virtio.c | 19 | ||||
-rw-r--r-- | drivers/virtio/virtio_dma_buf.c | 2 | ||||
-rw-r--r-- | drivers/virtio/virtio_mmio.c | 2 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci_common.c | 24 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci_common.h | 20 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci_modern.c | 469 | ||||
-rw-r--r-- | drivers/virtio/virtio_ring.c | 593 | ||||
-rw-r--r-- | drivers/virtio/virtio_vdpa.c | 3 |
9 files changed, 831 insertions, 313 deletions
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 42a48ac763ee..2eb747311bfd 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -122,7 +122,7 @@ config VIRTIO_BALLOON config VIRTIO_MEM tristate "Virtio mem driver" - depends on X86_64 || ARM64 || RISCV + depends on X86_64 || ARM64 || RISCV || S390 depends on VIRTIO depends on MEMORY_HOTPLUG depends on MEMORY_HOTREMOVE @@ -132,11 +132,11 @@ config VIRTIO_MEM This driver provides access to virtio-mem paravirtualized memory devices, allowing to hotplug and hotunplug memory. - This driver currently only supports x86-64 and arm64. Although it - should compile on other architectures that implement memory - hot(un)plug, architecture-specific and/or common - code changes may be required for virtio-mem, kdump and kexec to work as - expected. + This driver currently supports x86-64, arm64, riscv and s390. + Although it should compile on other architectures that implement + memory hot(un)plug, architecture-specific and/or common + code changes may be required for virtio-mem, kdump and kexec to + work as expected. If unsure, say M. diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index b9095751e43b..b10ed9f5b543 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -377,6 +377,24 @@ static void virtio_dev_remove(struct device *_d) of_node_put(dev->dev.of_node); } +/* + * virtio_irq_get_affinity - get IRQ affinity mask for device + * @_d: ptr to dev structure + * @irq_vec: interrupt vector number + * + * Return the CPU affinity mask for @_d and @irq_vec. + */ +static const struct cpumask *virtio_irq_get_affinity(struct device *_d, + unsigned int irq_vec) +{ + struct virtio_device *dev = dev_to_virtio(_d); + + if (!dev->config->get_vq_affinity) + return NULL; + + return dev->config->get_vq_affinity(dev, irq_vec); +} + static const struct bus_type virtio_bus = { .name = "virtio", .match = virtio_dev_match, @@ -384,6 +402,7 @@ static const struct bus_type virtio_bus = { .uevent = virtio_uevent, .probe = virtio_dev_probe, .remove = virtio_dev_remove, + .irq_get_affinity = virtio_irq_get_affinity, }; int __register_virtio_driver(struct virtio_driver *driver, struct module *owner) diff --git a/drivers/virtio/virtio_dma_buf.c b/drivers/virtio/virtio_dma_buf.c index 3034a2f605c8..3fe1d03b0645 100644 --- a/drivers/virtio/virtio_dma_buf.c +++ b/drivers/virtio/virtio_dma_buf.c @@ -87,4 +87,4 @@ EXPORT_SYMBOL(virtio_dma_buf_get_uuid); MODULE_DESCRIPTION("dma-bufs for virtio exported objects"); MODULE_LICENSE("GPL"); -MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS("DMA_BUF"); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 90e784e7b721..5d78c2d572ab 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -845,7 +845,7 @@ MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match); static struct platform_driver virtio_mmio_driver = { .probe = virtio_mmio_probe, - .remove_new = virtio_mmio_remove, + .remove = virtio_mmio_remove, .driver = { .name = "virtio-mmio", .of_match_table = virtio_mmio_match, diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index c44d8ba00c02..88074451dd61 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -24,6 +24,16 @@ MODULE_PARM_DESC(force_legacy, "Force legacy mode for transitional virtio 1 devices"); #endif +bool vp_is_avq(struct virtio_device *vdev, unsigned int index) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) + return false; + + return index == vp_dev->admin_vq.vq_index; +} + /* wait for pending irq handlers */ void vp_synchronize_vectors(struct virtio_device *vdev) { @@ -234,10 +244,9 @@ out_info: return vq; } -static void vp_del_vq(struct virtqueue *vq) +static void vp_del_vq(struct virtqueue *vq, struct virtio_pci_vq_info *info) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); - struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; unsigned long flags; /* @@ -258,13 +267,16 @@ static void vp_del_vq(struct virtqueue *vq) void vp_del_vqs(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtio_pci_vq_info *info; struct virtqueue *vq, *n; int i; list_for_each_entry_safe(vq, n, &vdev->vqs, list) { - if (vp_dev->per_vq_vectors) { - int v = vp_dev->vqs[vq->index]->msix_vector; + info = vp_is_avq(vdev, vq->index) ? vp_dev->admin_vq.info : + vp_dev->vqs[vq->index]; + if (vp_dev->per_vq_vectors) { + int v = info->msix_vector; if (v != VIRTIO_MSI_NO_VECTOR && !vp_is_slow_path_vector(v)) { int irq = pci_irq_vector(vp_dev->pci_dev, v); @@ -273,7 +285,7 @@ void vp_del_vqs(struct virtio_device *vdev) free_irq(irq, vq); } } - vp_del_vq(vq); + vp_del_vq(vq, info); } vp_dev->per_vq_vectors = false; @@ -354,7 +366,7 @@ vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx, vring_interrupt, 0, vp_dev->msix_names[msix_vec], vq); if (err) { - vp_del_vq(vq); + vp_del_vq(vq, *p_info); return ERR_PTR(err); } diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index 1d9c49947f52..8cd01de27baf 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -48,6 +48,9 @@ struct virtio_pci_admin_vq { /* Protects virtqueue access. */ spinlock_t lock; u64 supported_cmds; + u64 supported_caps; + u8 max_dev_parts_objects; + struct ida dev_parts_ida; /* Name of the admin queue: avq.$vq_index. */ char name[10]; u16 vq_index; @@ -167,17 +170,30 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev); BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ) | \ BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO)) +#define VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP \ + (BIT_ULL(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_DRIVER_CAP_SET) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_GET) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_DEV_PARTS_GET) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_DEV_PARTS_SET) | \ + BIT_ULL(VIRTIO_ADMIN_CMD_DEV_MODE_SET)) + /* Unlike modern drivers which support hardware virtio devices, legacy drivers * assume software-based devices: e.g. they don't use proper memory barriers * on ARM, use big endian on PPC, etc. X86 drivers are mostly ok though, more * or less by chance. For now, only support legacy IO on X86. */ #ifdef CONFIG_VIRTIO_PCI_ADMIN_LEGACY -#define VIRTIO_ADMIN_CMD_BITMAP VIRTIO_LEGACY_ADMIN_CMD_BITMAP +#define VIRTIO_ADMIN_CMD_BITMAP (VIRTIO_LEGACY_ADMIN_CMD_BITMAP | \ + VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP) #else -#define VIRTIO_ADMIN_CMD_BITMAP 0 +#define VIRTIO_ADMIN_CMD_BITMAP VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP #endif +bool vp_is_avq(struct virtio_device *vdev, unsigned int index); void vp_modern_avq_done(struct virtqueue *vq); int vp_modern_admin_cmd_exec(struct virtio_device *vdev, struct virtio_admin_cmd *cmd); diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 9193c30d640a..5eaade757860 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -15,6 +15,7 @@ */ #include <linux/delay.h> +#include <linux/virtio_pci_admin.h> #define VIRTIO_PCI_NO_LEGACY #define VIRTIO_RING_NO_LEGACY #include "virtio_pci_common.h" @@ -43,16 +44,6 @@ static int vp_avq_index(struct virtio_device *vdev, u16 *index, u16 *num) return 0; } -static bool vp_is_avq(struct virtio_device *vdev, unsigned int index) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - - if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) - return false; - - return index == vp_dev->admin_vq.vq_index; -} - void vp_modern_avq_done(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); @@ -64,8 +55,10 @@ void vp_modern_avq_done(struct virtqueue *vq) spin_lock_irqsave(&admin_vq->lock, flags); do { virtqueue_disable_cb(vq); - while ((cmd = virtqueue_get_buf(vq, &len))) + while ((cmd = virtqueue_get_buf(vq, &len))) { + cmd->result_sg_size = len; complete(&cmd->completion); + } } while (!virtqueue_enable_cb(vq)); spin_unlock_irqrestore(&admin_vq->lock, flags); } @@ -228,12 +221,117 @@ end: kfree(data); } +static void +virtio_pci_admin_cmd_dev_parts_objects_enable(struct virtio_device *virtio_dev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev); + struct virtio_admin_cmd_cap_get_data *get_data; + struct virtio_admin_cmd_cap_set_data *set_data; + struct virtio_dev_parts_cap *result; + struct virtio_admin_cmd cmd = {}; + struct scatterlist result_sg; + struct scatterlist data_sg; + u8 resource_objects_limit; + u16 set_data_size; + int ret; + + get_data = kzalloc(sizeof(*get_data), GFP_KERNEL); + if (!get_data) + return; + + result = kzalloc(sizeof(*result), GFP_KERNEL); + if (!result) + goto end; + + get_data->id = cpu_to_le16(VIRTIO_DEV_PARTS_CAP); + sg_init_one(&data_sg, get_data, sizeof(*get_data)); + sg_init_one(&result_sg, result, sizeof(*result)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.data_sg = &data_sg; + cmd.result_sg = &result_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (ret) + goto err_get; + + set_data_size = sizeof(*set_data) + sizeof(*result); + set_data = kzalloc(set_data_size, GFP_KERNEL); + if (!set_data) + goto err_get; + + set_data->id = cpu_to_le16(VIRTIO_DEV_PARTS_CAP); + + /* Set the limit to the minimum value between the GET and SET values + * supported by the device. Since the obj_id for VIRTIO_DEV_PARTS_CAP + * is a globally unique value per PF, there is no possibility of + * overlap between GET and SET operations. + */ + resource_objects_limit = min(result->get_parts_resource_objects_limit, + result->set_parts_resource_objects_limit); + result->get_parts_resource_objects_limit = resource_objects_limit; + result->set_parts_resource_objects_limit = resource_objects_limit; + memcpy(set_data->cap_specific_data, result, sizeof(*result)); + sg_init_one(&data_sg, set_data, set_data_size); + cmd.data_sg = &data_sg; + cmd.result_sg = NULL; + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DRIVER_CAP_SET); + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (ret) + goto err_set; + + /* Allocate IDR to manage the dev caps objects */ + ida_init(&vp_dev->admin_vq.dev_parts_ida); + vp_dev->admin_vq.max_dev_parts_objects = resource_objects_limit; + +err_set: + kfree(set_data); +err_get: + kfree(result); +end: + kfree(get_data); +} + +static void virtio_pci_admin_cmd_cap_init(struct virtio_device *virtio_dev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev); + struct virtio_admin_cmd_query_cap_id_result *data; + struct virtio_admin_cmd cmd = {}; + struct scatterlist result_sg; + int ret; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return; + + sg_init_one(&result_sg, data, sizeof(*data)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.result_sg = &result_sg; + + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (ret) + goto end; + + /* Max number of caps fits into a single u64 */ + BUILD_BUG_ON(sizeof(data->supported_caps) > sizeof(u64)); + + vp_dev->admin_vq.supported_caps = le64_to_cpu(data->supported_caps[0]); + + if (!(vp_dev->admin_vq.supported_caps & (1 << VIRTIO_DEV_PARTS_CAP))) + goto end; + + virtio_pci_admin_cmd_dev_parts_objects_enable(virtio_dev); +end: + kfree(data); +} + static void vp_modern_avq_activate(struct virtio_device *vdev) { if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) return; virtio_pci_admin_cmd_list_init(vdev); + virtio_pci_admin_cmd_cap_init(vdev); } static void vp_modern_avq_cleanup(struct virtio_device *vdev) @@ -245,7 +343,7 @@ static void vp_modern_avq_cleanup(struct virtio_device *vdev) if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) return; - vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq; + vq = vp_dev->admin_vq.info->vq; if (!vq) return; @@ -768,6 +866,353 @@ static bool vp_get_shm_region(struct virtio_device *vdev, return true; } +/* + * virtio_pci_admin_has_dev_parts - Checks whether the device parts + * functionality is supported + * @pdev: VF pci_dev + * + * Returns true on success. + */ +bool virtio_pci_admin_has_dev_parts(struct pci_dev *pdev) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_pci_device *vp_dev; + + if (!virtio_dev) + return false; + + if (!virtio_has_feature(virtio_dev, VIRTIO_F_ADMIN_VQ)) + return false; + + vp_dev = to_vp_device(virtio_dev); + + if (!((vp_dev->admin_vq.supported_cmds & VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP) == + VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP)) + return false; + + return vp_dev->admin_vq.max_dev_parts_objects; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_has_dev_parts); + +/* + * virtio_pci_admin_mode_set - Sets the mode of a member device + * @pdev: VF pci_dev + * @flags: device mode's flags + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_mode_set(struct pci_dev *pdev, u8 flags) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd_dev_mode_set_data *data; + struct virtio_admin_cmd cmd = {}; + struct scatterlist data_sg; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->flags = flags; + sg_init_one(&data_sg, data, sizeof(*data)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_MODE_SET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_mode_set); + +/* + * virtio_pci_admin_obj_create - Creates an object for a given type and operation, + * following the max objects that can be created for that request. + * @pdev: VF pci_dev + * @obj_type: Object type + * @operation_type: Operation type + * @obj_id: Output unique object id + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_obj_create(struct pci_dev *pdev, u16 obj_type, u8 operation_type, + u32 *obj_id) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + u16 data_size = sizeof(struct virtio_admin_cmd_resource_obj_create_data); + struct virtio_admin_cmd_resource_obj_create_data *obj_create_data; + struct virtio_resource_obj_dev_parts obj_dev_parts = {}; + struct virtio_pci_admin_vq *avq; + struct virtio_admin_cmd cmd = {}; + struct scatterlist data_sg; + void *data; + int id = -1; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + if (obj_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS) + return -EOPNOTSUPP; + + if (operation_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_GET && + operation_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_SET) + return -EINVAL; + + avq = &to_vp_device(virtio_dev)->admin_vq; + if (!avq->max_dev_parts_objects) + return -EOPNOTSUPP; + + id = ida_alloc_range(&avq->dev_parts_ida, 0, + avq->max_dev_parts_objects - 1, GFP_KERNEL); + if (id < 0) + return id; + + *obj_id = id; + data_size += sizeof(obj_dev_parts); + data = kzalloc(data_size, GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + goto end; + } + + obj_create_data = data; + obj_create_data->hdr.type = cpu_to_le16(obj_type); + obj_create_data->hdr.id = cpu_to_le32(*obj_id); + obj_dev_parts.type = operation_type; + memcpy(obj_create_data->resource_obj_specific_data, &obj_dev_parts, + sizeof(obj_dev_parts)); + sg_init_one(&data_sg, data, data_size); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + + kfree(data); +end: + if (ret) + ida_free(&avq->dev_parts_ida, id); + + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_obj_create); + +/* + * virtio_pci_admin_obj_destroy - Destroys an object of a given type and id + * @pdev: VF pci_dev + * @obj_type: Object type + * @id: Object id + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_obj_destroy(struct pci_dev *pdev, u16 obj_type, u32 id) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd_resource_obj_cmd_hdr *data; + struct virtio_pci_device *vp_dev; + struct virtio_admin_cmd cmd = {}; + struct scatterlist data_sg; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + if (obj_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS) + return -EINVAL; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->type = cpu_to_le16(obj_type); + data->id = cpu_to_le32(id); + sg_init_one(&data_sg, data, sizeof(*data)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (!ret) { + vp_dev = to_vp_device(virtio_dev); + ida_free(&vp_dev->admin_vq.dev_parts_ida, id); + } + + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_obj_destroy); + +/* + * virtio_pci_admin_dev_parts_metadata_get - Gets the metadata of the device parts + * identified by the below attributes. + * @pdev: VF pci_dev + * @obj_type: Object type + * @id: Object id + * @metadata_type: Metadata type + * @out: Upon success holds the output for 'metadata type size' + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_dev_parts_metadata_get(struct pci_dev *pdev, u16 obj_type, + u32 id, u8 metadata_type, u32 *out) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd_dev_parts_metadata_result *result; + struct virtio_admin_cmd_dev_parts_metadata_data *data; + struct scatterlist data_sg, result_sg; + struct virtio_admin_cmd cmd = {}; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + if (metadata_type != VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE) + return -EOPNOTSUPP; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + result = kzalloc(sizeof(*result), GFP_KERNEL); + if (!result) { + ret = -ENOMEM; + goto end; + } + + data->hdr.type = cpu_to_le16(obj_type); + data->hdr.id = cpu_to_le32(id); + data->type = metadata_type; + sg_init_one(&data_sg, data, sizeof(*data)); + sg_init_one(&result_sg, result, sizeof(*result)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_GET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + cmd.result_sg = &result_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (!ret) + *out = le32_to_cpu(result->parts_size.size); + + kfree(result); +end: + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_metadata_get); + +/* + * virtio_pci_admin_dev_parts_get - Gets the device parts identified by the below attributes. + * @pdev: VF pci_dev + * @obj_type: Object type + * @id: Object id + * @get_type: Get type + * @res_sg: Upon success holds the output result data + * @res_size: Upon success holds the output result size + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_dev_parts_get(struct pci_dev *pdev, u16 obj_type, u32 id, + u8 get_type, struct scatterlist *res_sg, + u32 *res_size) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd_dev_parts_get_data *data; + struct scatterlist data_sg; + struct virtio_admin_cmd cmd = {}; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + if (get_type != VIRTIO_ADMIN_CMD_DEV_PARTS_GET_TYPE_ALL) + return -EOPNOTSUPP; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->hdr.type = cpu_to_le16(obj_type); + data->hdr.id = cpu_to_le32(id); + data->type = get_type; + sg_init_one(&data_sg, data, sizeof(*data)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_GET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + cmd.result_sg = res_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (!ret) + *res_size = cmd.result_sg_size; + + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_get); + +/* + * virtio_pci_admin_dev_parts_set - Sets the device parts identified by the below attributes. + * @pdev: VF pci_dev + * @data_sg: The device parts data, its layout follows struct virtio_admin_cmd_dev_parts_set_data + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_dev_parts_set(struct pci_dev *pdev, struct scatterlist *data_sg) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd cmd = {}; + int vf_id; + + if (!virtio_dev) + return -ENODEV; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_SET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = data_sg; + return vp_modern_admin_cmd_exec(virtio_dev, &cmd); +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_set); + static const struct virtio_config_ops virtio_pci_config_nodev_ops = { .get = NULL, .set = NULL, diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 98374ed7c577..fdd2d2b07b5a 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -69,12 +69,20 @@ struct vring_desc_state_split { void *data; /* Data for callback. */ - struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ + + /* Indirect desc table and extra table, if any. These two will be + * allocated together. So we won't stress more to the memory allocator. + */ + struct vring_desc *indir_desc; }; struct vring_desc_state_packed { void *data; /* Data for callback. */ - struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ + + /* Indirect desc table and extra table, if any. These two will be + * allocated together. So we won't stress more to the memory allocator. + */ + struct vring_packed_desc *indir_desc; u16 num; /* Descriptor list length. */ u16 last; /* The last desc state in a list. */ }; @@ -172,14 +180,6 @@ struct vring_virtqueue { /* Host publishes avail event idx */ bool event; - /* Do DMA mapping by driver */ - bool premapped; - - /* Do unmap or not for desc. Just when premapped is False and - * use_dma_api is true, this is true. - */ - bool do_unmap; - /* Head of free buffer list. */ unsigned int free_head; /* Number we've added since last sync. */ @@ -223,15 +223,6 @@ struct vring_virtqueue { #endif }; -static struct virtqueue *__vring_new_virtqueue(unsigned int index, - struct vring_virtqueue_split *vring_split, - struct virtio_device *vdev, - bool weak_barriers, - bool context, - bool (*notify)(struct virtqueue *), - void (*callback)(struct virtqueue *), - const char *name, - struct device *dma_dev); static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num); static void vring_free(struct virtqueue *_vq); @@ -297,6 +288,12 @@ static bool vring_use_dma_api(const struct virtio_device *vdev) return false; } +static bool vring_need_unmap_buffer(const struct vring_virtqueue *vring, + const struct vring_desc_extra *extra) +{ + return vring->use_dma_api && (extra->addr != DMA_MAPPING_ERROR); +} + size_t virtio_max_dma_size(const struct virtio_device *vdev) { size_t max_segment_size = SIZE_MAX; @@ -364,13 +361,17 @@ static struct device *vring_dma_dev(const struct vring_virtqueue *vq) /* Map one sg entry. */ static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg, - enum dma_data_direction direction, dma_addr_t *addr) + enum dma_data_direction direction, dma_addr_t *addr, + u32 *len, bool premapped) { - if (vq->premapped) { + if (premapped) { *addr = sg_dma_address(sg); + *len = sg_dma_len(sg); return 0; } + *len = sg->length; + if (!vq->use_dma_api) { /* * If DMA is not used, KMSAN doesn't know that the scatterlist @@ -440,61 +441,44 @@ static void virtqueue_init(struct vring_virtqueue *vq, u32 num) * Split ring specific functions - *_split(). */ -static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq, - const struct vring_desc *desc) -{ - u16 flags; - - if (!vq->do_unmap) - return; - - flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); - - dma_unmap_page(vring_dma_dev(vq), - virtio64_to_cpu(vq->vq.vdev, desc->addr), - virtio32_to_cpu(vq->vq.vdev, desc->len), - (flags & VRING_DESC_F_WRITE) ? - DMA_FROM_DEVICE : DMA_TO_DEVICE); -} - static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, - unsigned int i) + struct vring_desc_extra *extra) { - struct vring_desc_extra *extra = vq->split.desc_extra; u16 flags; - flags = extra[i].flags; + flags = extra->flags; if (flags & VRING_DESC_F_INDIRECT) { if (!vq->use_dma_api) goto out; dma_unmap_single(vring_dma_dev(vq), - extra[i].addr, - extra[i].len, + extra->addr, + extra->len, (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } else { - if (!vq->do_unmap) + if (!vring_need_unmap_buffer(vq, extra)) goto out; dma_unmap_page(vring_dma_dev(vq), - extra[i].addr, - extra[i].len, + extra->addr, + extra->len, (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } out: - return extra[i].next; + return extra->next; } static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq, unsigned int total_sg, gfp_t gfp) { + struct vring_desc_extra *extra; struct vring_desc *desc; - unsigned int i; + unsigned int i, size; /* * We require lowmem mappings for the descriptors because @@ -503,40 +487,41 @@ static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq, */ gfp &= ~__GFP_HIGHMEM; - desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp); + size = sizeof(*desc) * total_sg + sizeof(*extra) * total_sg; + + desc = kmalloc(size, gfp); if (!desc) return NULL; + extra = (struct vring_desc_extra *)&desc[total_sg]; + for (i = 0; i < total_sg; i++) - desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); + extra[i].next = i + 1; + return desc; } static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq, struct vring_desc *desc, + struct vring_desc_extra *extra, unsigned int i, dma_addr_t addr, unsigned int len, - u16 flags, - bool indirect) + u16 flags, bool premapped) { - struct vring_virtqueue *vring = to_vvq(vq); - struct vring_desc_extra *extra = vring->split.desc_extra; u16 next; desc[i].flags = cpu_to_virtio16(vq->vdev, flags); desc[i].addr = cpu_to_virtio64(vq->vdev, addr); desc[i].len = cpu_to_virtio32(vq->vdev, len); - if (!indirect) { - next = extra[i].next; - desc[i].next = cpu_to_virtio16(vq->vdev, next); + extra[i].addr = premapped ? DMA_MAPPING_ERROR : addr; + extra[i].len = len; + extra[i].flags = flags; + + next = extra[i].next; - extra[i].addr = addr; - extra[i].len = len; - extra[i].flags = flags; - } else - next = virtio16_to_cpu(vq->vdev, desc[i].next); + desc[i].next = cpu_to_virtio16(vq->vdev, next); return next; } @@ -548,9 +533,11 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, unsigned int in_sgs, void *data, void *ctx, + bool premapped, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); + struct vring_desc_extra *extra; struct scatterlist *sg; struct vring_desc *desc; unsigned int i, n, avail, descs_used, prev, err_idx; @@ -586,9 +573,11 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, /* Set up rest to use this indirect table. */ i = 0; descs_used = 1; + extra = (struct vring_desc_extra *)&desc[total_sg]; } else { indirect = false; desc = vq->split.vring.desc; + extra = vq->split.desc_extra; i = head; descs_used = total_sg; } @@ -610,40 +599,41 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, for (n = 0; n < out_sgs; n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { dma_addr_t addr; + u32 len; - if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr)) + if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, &len, premapped)) goto unmap_release; prev = i; /* Note that we trust indirect descriptor * table since it use stream DMA mapping. */ - i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length, + i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len, VRING_DESC_F_NEXT, - indirect); + premapped); } } for (; n < (out_sgs + in_sgs); n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { dma_addr_t addr; + u32 len; - if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr)) + if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, &len, premapped)) goto unmap_release; prev = i; /* Note that we trust indirect descriptor * table since it use stream DMA mapping. */ - i = virtqueue_add_desc_split(_vq, desc, i, addr, - sg->length, + i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE, - indirect); + premapped); } } /* Last one doesn't continue. */ desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); - if (!indirect && vq->do_unmap) + if (!indirect && vring_need_unmap_buffer(vq, &extra[prev])) vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= ~VRING_DESC_F_NEXT; @@ -652,18 +642,14 @@ static inline int virtqueue_add_split(struct virtqueue *_vq, dma_addr_t addr = vring_map_single( vq, desc, total_sg * sizeof(struct vring_desc), DMA_TO_DEVICE); - if (vring_mapping_error(vq, addr)) { - if (vq->premapped) - goto free_indirect; - + if (vring_mapping_error(vq, addr)) goto unmap_release; - } virtqueue_add_desc_split(_vq, vq->split.vring.desc, + vq->split.desc_extra, head, addr, total_sg * sizeof(struct vring_desc), - VRING_DESC_F_INDIRECT, - false); + VRING_DESC_F_INDIRECT, false); } /* We're using some buffers from the free list. */ @@ -716,14 +702,10 @@ unmap_release: for (n = 0; n < total_sg; n++) { if (i == err_idx) break; - if (indirect) { - vring_unmap_one_split_indirect(vq, &desc[i]); - i = virtio16_to_cpu(_vq->vdev, desc[i].next); - } else - i = vring_unmap_one_split(vq, i); + + i = vring_unmap_one_split(vq, &extra[i]); } -free_indirect: if (indirect) kfree(desc); @@ -765,22 +747,25 @@ static bool virtqueue_kick_prepare_split(struct virtqueue *_vq) static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, void **ctx) { + struct vring_desc_extra *extra; unsigned int i, j; __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); /* Clear data ptr. */ vq->split.desc_state[head].data = NULL; + extra = vq->split.desc_extra; + /* Put back on free list: unmap first-level descriptors and find end */ i = head; while (vq->split.vring.desc[i].flags & nextflag) { - vring_unmap_one_split(vq, i); + vring_unmap_one_split(vq, &extra[i]); i = vq->split.desc_extra[i].next; vq->vq.num_free++; } - vring_unmap_one_split(vq, i); + vring_unmap_one_split(vq, &extra[i]); vq->split.desc_extra[i].next = vq->free_head; vq->free_head = head; @@ -790,21 +775,24 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, if (vq->indirect) { struct vring_desc *indir_desc = vq->split.desc_state[head].indir_desc; - u32 len; + u32 len, num; /* Free the indirect table, if any, now that it's unmapped. */ if (!indir_desc) return; - len = vq->split.desc_extra[head].len; BUG_ON(!(vq->split.desc_extra[head].flags & VRING_DESC_F_INDIRECT)); BUG_ON(len == 0 || len % sizeof(struct vring_desc)); - if (vq->do_unmap) { - for (j = 0; j < len / sizeof(struct vring_desc); j++) - vring_unmap_one_split_indirect(vq, &indir_desc[j]); + num = len / sizeof(struct vring_desc); + + extra = (struct vring_desc_extra *)&indir_desc[num]; + + if (vq->use_dma_api) { + for (j = 0; j < num; j++) + vring_unmap_one_split(vq, &extra[j]); } kfree(indir_desc); @@ -1138,6 +1126,64 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split, return 0; } +static struct virtqueue *__vring_new_virtqueue_split(unsigned int index, + struct vring_virtqueue_split *vring_split, + struct virtio_device *vdev, + bool weak_barriers, + bool context, + bool (*notify)(struct virtqueue *), + void (*callback)(struct virtqueue *), + const char *name, + struct device *dma_dev) +{ + struct vring_virtqueue *vq; + int err; + + vq = kmalloc(sizeof(*vq), GFP_KERNEL); + if (!vq) + return NULL; + + vq->packed_ring = false; + vq->vq.callback = callback; + vq->vq.vdev = vdev; + vq->vq.name = name; + vq->vq.index = index; + vq->vq.reset = false; + vq->we_own_ring = false; + vq->notify = notify; + vq->weak_barriers = weak_barriers; +#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION + vq->broken = true; +#else + vq->broken = false; +#endif + vq->dma_dev = dma_dev; + vq->use_dma_api = vring_use_dma_api(vdev); + + vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && + !context; + vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); + + if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) + vq->weak_barriers = false; + + err = vring_alloc_state_extra_split(vring_split); + if (err) { + kfree(vq); + return NULL; + } + + virtqueue_vring_init_split(vring_split, vq); + + virtqueue_init(vq, vring_split->vring.num); + virtqueue_vring_attach_split(vq, vring_split); + + spin_lock(&vdev->vqs_list_lock); + list_add_tail(&vq->vq.list, &vdev->vqs); + spin_unlock(&vdev->vqs_list_lock); + return &vq->vq; +} + static struct virtqueue *vring_create_virtqueue_split( unsigned int index, unsigned int num, @@ -1160,7 +1206,7 @@ static struct virtqueue *vring_create_virtqueue_split( if (err) return NULL; - vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers, + vq = __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers, context, notify, callback, name, dma_dev); if (!vq) { vring_free_split(&vring_split, vdev, dma_dev); @@ -1236,7 +1282,7 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq, (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } else { - if (!vq->do_unmap) + if (!vring_need_unmap_buffer(vq, extra)) return; dma_unmap_page(vring_dma_dev(vq), @@ -1246,27 +1292,12 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq, } } -static void vring_unmap_desc_packed(const struct vring_virtqueue *vq, - const struct vring_packed_desc *desc) -{ - u16 flags; - - if (!vq->do_unmap) - return; - - flags = le16_to_cpu(desc->flags); - - dma_unmap_page(vring_dma_dev(vq), - le64_to_cpu(desc->addr), - le32_to_cpu(desc->len), - (flags & VRING_DESC_F_WRITE) ? - DMA_FROM_DEVICE : DMA_TO_DEVICE); -} - static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg, gfp_t gfp) { + struct vring_desc_extra *extra; struct vring_packed_desc *desc; + int i, size; /* * We require lowmem mappings for the descriptors because @@ -1275,7 +1306,16 @@ static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg, */ gfp &= ~__GFP_HIGHMEM; - desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp); + size = (sizeof(*desc) + sizeof(*extra)) * total_sg; + + desc = kmalloc(size, gfp); + if (!desc) + return NULL; + + extra = (struct vring_desc_extra *)&desc[total_sg]; + + for (i = 0; i < total_sg; i++) + extra[i].next = i + 1; return desc; } @@ -1286,11 +1326,13 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, unsigned int out_sgs, unsigned int in_sgs, void *data, + bool premapped, gfp_t gfp) { + struct vring_desc_extra *extra; struct vring_packed_desc *desc; struct scatterlist *sg; - unsigned int i, n, err_idx; + unsigned int i, n, err_idx, len; u16 head, id; dma_addr_t addr; @@ -1299,6 +1341,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, if (!desc) return -ENOMEM; + extra = (struct vring_desc_extra *)&desc[total_sg]; + if (unlikely(vq->vq.num_free < 1)) { pr_debug("Can't add buf len 1 - avail = 0\n"); kfree(desc); @@ -1313,13 +1357,21 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, for (n = 0; n < out_sgs + in_sgs; n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { if (vring_map_one_sg(vq, sg, n < out_sgs ? - DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr)) + DMA_TO_DEVICE : DMA_FROM_DEVICE, + &addr, &len, premapped)) goto unmap_release; desc[i].flags = cpu_to_le16(n < out_sgs ? 0 : VRING_DESC_F_WRITE); desc[i].addr = cpu_to_le64(addr); - desc[i].len = cpu_to_le32(sg->length); + desc[i].len = cpu_to_le32(len); + + if (unlikely(vq->use_dma_api)) { + extra[i].addr = premapped ? DMA_MAPPING_ERROR : addr; + extra[i].len = len; + extra[i].flags = n < out_sgs ? 0 : VRING_DESC_F_WRITE; + } + i++; } } @@ -1328,12 +1380,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, addr = vring_map_single(vq, desc, total_sg * sizeof(struct vring_packed_desc), DMA_TO_DEVICE); - if (vring_mapping_error(vq, addr)) { - if (vq->premapped) - goto free_desc; - + if (vring_mapping_error(vq, addr)) goto unmap_release; - } vq->packed.vring.desc[head].addr = cpu_to_le64(addr); vq->packed.vring.desc[head].len = cpu_to_le32(total_sg * @@ -1389,9 +1437,8 @@ unmap_release: err_idx = i; for (i = 0; i < err_idx; i++) - vring_unmap_desc_packed(vq, &desc[i]); + vring_unmap_extra_packed(vq, &extra[i]); -free_desc: kfree(desc); END_USE(vq); @@ -1405,12 +1452,13 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, unsigned int in_sgs, void *data, void *ctx, + bool premapped, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); struct vring_packed_desc *desc; struct scatterlist *sg; - unsigned int i, n, c, descs_used, err_idx; + unsigned int i, n, c, descs_used, err_idx, len; __le16 head_flags, flags; u16 head, id, prev, curr, avail_used_flags; int err; @@ -1431,7 +1479,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, if (virtqueue_use_indirect(vq, total_sg)) { err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs, - in_sgs, data, gfp); + in_sgs, data, premapped, gfp); if (err != -ENOMEM) { END_USE(vq); return err; @@ -1466,7 +1514,8 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, dma_addr_t addr; if (vring_map_one_sg(vq, sg, n < out_sgs ? - DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr)) + DMA_TO_DEVICE : DMA_FROM_DEVICE, + &addr, &len, premapped)) goto unmap_release; flags = cpu_to_le16(vq->packed.avail_used_flags | @@ -1478,12 +1527,13 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, desc[i].flags = flags; desc[i].addr = cpu_to_le64(addr); - desc[i].len = cpu_to_le32(sg->length); + desc[i].len = cpu_to_le32(len); desc[i].id = cpu_to_le16(id); if (unlikely(vq->use_dma_api)) { - vq->packed.desc_extra[curr].addr = addr; - vq->packed.desc_extra[curr].len = sg->length; + vq->packed.desc_extra[curr].addr = premapped ? + DMA_MAPPING_ERROR : addr; + vq->packed.desc_extra[curr].len = len; vq->packed.desc_extra[curr].flags = le16_to_cpu(flags); } @@ -1625,18 +1675,22 @@ static void detach_buf_packed(struct vring_virtqueue *vq, } if (vq->indirect) { - u32 len; + struct vring_desc_extra *extra; + u32 len, num; /* Free the indirect table, if any, now that it's unmapped. */ desc = state->indir_desc; if (!desc) return; - if (vq->do_unmap) { + if (vq->use_dma_api) { len = vq->packed.desc_extra[id].len; - for (i = 0; i < len / sizeof(struct vring_packed_desc); - i++) - vring_unmap_desc_packed(vq, &desc[i]); + num = len / sizeof(struct vring_packed_desc); + + extra = (struct vring_desc_extra *)&desc[num]; + + for (i = 0; i < num; i++) + vring_unmap_extra_packed(vq, &extra[i]); } kfree(desc); state->indir_desc = NULL; @@ -2050,36 +2104,29 @@ static void virtqueue_reinit_packed(struct vring_virtqueue *vq) virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback); } -static struct virtqueue *vring_create_virtqueue_packed( - unsigned int index, - unsigned int num, - unsigned int vring_align, - struct virtio_device *vdev, - bool weak_barriers, - bool may_reduce_num, - bool context, - bool (*notify)(struct virtqueue *), - void (*callback)(struct virtqueue *), - const char *name, - struct device *dma_dev) +static struct virtqueue *__vring_new_virtqueue_packed(unsigned int index, + struct vring_virtqueue_packed *vring_packed, + struct virtio_device *vdev, + bool weak_barriers, + bool context, + bool (*notify)(struct virtqueue *), + void (*callback)(struct virtqueue *), + const char *name, + struct device *dma_dev) { - struct vring_virtqueue_packed vring_packed = {}; struct vring_virtqueue *vq; int err; - if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev)) - goto err_ring; - vq = kmalloc(sizeof(*vq), GFP_KERNEL); if (!vq) - goto err_vq; + return NULL; vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.name = name; vq->vq.index = index; vq->vq.reset = false; - vq->we_own_ring = true; + vq->we_own_ring = false; vq->notify = notify; vq->weak_barriers = weak_barriers; #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION @@ -2090,8 +2137,6 @@ static struct virtqueue *vring_create_virtqueue_packed( vq->packed_ring = true; vq->dma_dev = dma_dev; vq->use_dma_api = vring_use_dma_api(vdev); - vq->premapped = false; - vq->do_unmap = vq->use_dma_api; vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && !context; @@ -2100,26 +2145,52 @@ static struct virtqueue *vring_create_virtqueue_packed( if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) vq->weak_barriers = false; - err = vring_alloc_state_extra_packed(&vring_packed); - if (err) - goto err_state_extra; + err = vring_alloc_state_extra_packed(vring_packed); + if (err) { + kfree(vq); + return NULL; + } - virtqueue_vring_init_packed(&vring_packed, !!callback); + virtqueue_vring_init_packed(vring_packed, !!callback); - virtqueue_init(vq, num); - virtqueue_vring_attach_packed(vq, &vring_packed); + virtqueue_init(vq, vring_packed->vring.num); + virtqueue_vring_attach_packed(vq, vring_packed); spin_lock(&vdev->vqs_list_lock); list_add_tail(&vq->vq.list, &vdev->vqs); spin_unlock(&vdev->vqs_list_lock); return &vq->vq; +} -err_state_extra: - kfree(vq); -err_vq: - vring_free_packed(&vring_packed, vdev, dma_dev); -err_ring: - return NULL; +static struct virtqueue *vring_create_virtqueue_packed( + unsigned int index, + unsigned int num, + unsigned int vring_align, + struct virtio_device *vdev, + bool weak_barriers, + bool may_reduce_num, + bool context, + bool (*notify)(struct virtqueue *), + void (*callback)(struct virtqueue *), + const char *name, + struct device *dma_dev) +{ + struct vring_virtqueue_packed vring_packed = {}; + struct virtqueue *vq; + + if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev)) + return NULL; + + vq = __vring_new_virtqueue_packed(index, &vring_packed, vdev, weak_barriers, + context, notify, callback, name, dma_dev); + if (!vq) { + vring_free_packed(&vring_packed, vdev, dma_dev); + return NULL; + } + + to_vvq(vq)->we_own_ring = true; + + return vq; } static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num) @@ -2201,14 +2272,15 @@ static inline int virtqueue_add(struct virtqueue *_vq, unsigned int in_sgs, void *data, void *ctx, + bool premapped, gfp_t gfp) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg, - out_sgs, in_sgs, data, ctx, gfp) : + out_sgs, in_sgs, data, ctx, premapped, gfp) : virtqueue_add_split(_vq, sgs, total_sg, - out_sgs, in_sgs, data, ctx, gfp); + out_sgs, in_sgs, data, ctx, premapped, gfp); } /** @@ -2242,7 +2314,7 @@ int virtqueue_add_sgs(struct virtqueue *_vq, total_sg++; } return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, - data, NULL, gfp); + data, NULL, false, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_sgs); @@ -2264,11 +2336,34 @@ int virtqueue_add_outbuf(struct virtqueue *vq, void *data, gfp_t gfp) { - return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp); + return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, false, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); /** + * virtqueue_add_outbuf_premapped - expose output buffers to other end + * @vq: the struct virtqueue we're talking about. + * @sg: scatterlist (must be well-formed and terminated!) + * @num: the number of entries in @sg readable by other side + * @data: the token identifying the buffer. + * @gfp: how to do memory allocations (if necessary). + * + * Caller must ensure we don't call this with other virtqueue operations + * at the same time (except where noted). + * + * Return: + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). + */ +int virtqueue_add_outbuf_premapped(struct virtqueue *vq, + struct scatterlist *sg, unsigned int num, + void *data, + gfp_t gfp) +{ + return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, true, gfp); +} +EXPORT_SYMBOL_GPL(virtqueue_add_outbuf_premapped); + +/** * virtqueue_add_inbuf - expose input buffers to other end * @vq: the struct virtqueue we're talking about. * @sg: scatterlist (must be well-formed and terminated!) @@ -2286,7 +2381,7 @@ int virtqueue_add_inbuf(struct virtqueue *vq, void *data, gfp_t gfp) { - return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp); + return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, false, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); @@ -2310,11 +2405,36 @@ int virtqueue_add_inbuf_ctx(struct virtqueue *vq, void *ctx, gfp_t gfp) { - return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp); + return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, false, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx); /** + * virtqueue_add_inbuf_premapped - expose input buffers to other end + * @vq: the struct virtqueue we're talking about. + * @sg: scatterlist (must be well-formed and terminated!) + * @num: the number of entries in @sg writable by other side + * @data: the token identifying the buffer. + * @ctx: extra context for the token + * @gfp: how to do memory allocations (if necessary). + * + * Caller must ensure we don't call this with other virtqueue operations + * at the same time (except where noted). + * + * Return: + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). + */ +int virtqueue_add_inbuf_premapped(struct virtqueue *vq, + struct scatterlist *sg, unsigned int num, + void *data, + void *ctx, + gfp_t gfp) +{ + return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, true, gfp); +} +EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_premapped); + +/** * virtqueue_dma_dev - get the dma dev * @_vq: the struct virtqueue we're talking about. * @@ -2598,70 +2718,6 @@ irqreturn_t vring_interrupt(int irq, void *_vq) } EXPORT_SYMBOL_GPL(vring_interrupt); -/* Only available for split ring */ -static struct virtqueue *__vring_new_virtqueue(unsigned int index, - struct vring_virtqueue_split *vring_split, - struct virtio_device *vdev, - bool weak_barriers, - bool context, - bool (*notify)(struct virtqueue *), - void (*callback)(struct virtqueue *), - const char *name, - struct device *dma_dev) -{ - struct vring_virtqueue *vq; - int err; - - if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) - return NULL; - - vq = kmalloc(sizeof(*vq), GFP_KERNEL); - if (!vq) - return NULL; - - vq->packed_ring = false; - vq->vq.callback = callback; - vq->vq.vdev = vdev; - vq->vq.name = name; - vq->vq.index = index; - vq->vq.reset = false; - vq->we_own_ring = false; - vq->notify = notify; - vq->weak_barriers = weak_barriers; -#ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION - vq->broken = true; -#else - vq->broken = false; -#endif - vq->dma_dev = dma_dev; - vq->use_dma_api = vring_use_dma_api(vdev); - vq->premapped = false; - vq->do_unmap = vq->use_dma_api; - - vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && - !context; - vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); - - if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) - vq->weak_barriers = false; - - err = vring_alloc_state_extra_split(vring_split); - if (err) { - kfree(vq); - return NULL; - } - - virtqueue_vring_init_split(vring_split, vq); - - virtqueue_init(vq, vring_split->vring.num); - virtqueue_vring_attach_split(vq, vring_split); - - spin_lock(&vdev->vqs_list_lock); - list_add_tail(&vq->vq.list, &vdev->vqs); - spin_unlock(&vdev->vqs_list_lock); - return &vq->vq; -} - struct virtqueue *vring_create_virtqueue( unsigned int index, unsigned int num, @@ -2716,6 +2772,7 @@ EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma); * @_vq: the struct virtqueue we're talking about. * @num: new ring num * @recycle: callback to recycle unused buffers + * @recycle_done: callback to be invoked when recycle for all unused buffers done * * When it is really necessary to create a new vring, it will set the current vq * into the reset state. Then call the passed callback to recycle the buffer @@ -2736,7 +2793,8 @@ EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma); * */ int virtqueue_resize(struct virtqueue *_vq, u32 num, - void (*recycle)(struct virtqueue *vq, void *buf)) + void (*recycle)(struct virtqueue *vq, void *buf), + void (*recycle_done)(struct virtqueue *vq)) { struct vring_virtqueue *vq = to_vvq(_vq); int err; @@ -2753,6 +2811,8 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num, err = virtqueue_disable_and_recycle(_vq, recycle); if (err) return err; + if (recycle_done) + recycle_done(_vq); if (vq->packed_ring) err = virtqueue_resize_packed(_vq, num); @@ -2764,53 +2824,10 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num, EXPORT_SYMBOL_GPL(virtqueue_resize); /** - * virtqueue_set_dma_premapped - set the vring premapped mode - * @_vq: the struct virtqueue we're talking about. - * - * Enable the premapped mode of the vq. - * - * The vring in premapped mode does not do dma internally, so the driver must - * do dma mapping in advance. The driver must pass the dma_address through - * dma_address of scatterlist. When the driver got a used buffer from - * the vring, it has to unmap the dma address. - * - * This function must be called immediately after creating the vq, or after vq - * reset, and before adding any buffers to it. - * - * Caller must ensure we don't call this with other virtqueue operations - * at the same time (except where noted). - * - * Returns zero or a negative error. - * 0: success. - * -EINVAL: too late to enable premapped mode, the vq already contains buffers. - */ -int virtqueue_set_dma_premapped(struct virtqueue *_vq) -{ - struct vring_virtqueue *vq = to_vvq(_vq); - u32 num; - - START_USE(vq); - - num = vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num; - - if (num != vq->vq.num_free) { - END_USE(vq); - return -EINVAL; - } - - vq->premapped = true; - vq->do_unmap = false; - - END_USE(vq); - - return 0; -} -EXPORT_SYMBOL_GPL(virtqueue_set_dma_premapped); - -/** * virtqueue_reset - detach and recycle all unused buffers * @_vq: the struct virtqueue we're talking about. * @recycle: callback to recycle unused buffers + * @recycle_done: callback to be invoked when recycle for all unused buffers done * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). @@ -2822,7 +2839,8 @@ EXPORT_SYMBOL_GPL(virtqueue_set_dma_premapped); * -EPERM: Operation not permitted */ int virtqueue_reset(struct virtqueue *_vq, - void (*recycle)(struct virtqueue *vq, void *buf)) + void (*recycle)(struct virtqueue *vq, void *buf), + void (*recycle_done)(struct virtqueue *vq)) { struct vring_virtqueue *vq = to_vvq(_vq); int err; @@ -2830,6 +2848,8 @@ int virtqueue_reset(struct virtqueue *_vq, err = virtqueue_disable_and_recycle(_vq, recycle); if (err) return err; + if (recycle_done) + recycle_done(_vq); if (vq->packed_ring) virtqueue_reinit_packed(vq); @@ -2840,7 +2860,6 @@ int virtqueue_reset(struct virtqueue *_vq, } EXPORT_SYMBOL_GPL(virtqueue_reset); -/* Only available for split ring */ struct virtqueue *vring_new_virtqueue(unsigned int index, unsigned int num, unsigned int vring_align, @@ -2854,11 +2873,19 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, { struct vring_virtqueue_split vring_split = {}; - if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) - return NULL; + if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) { + struct vring_virtqueue_packed vring_packed = {}; + + vring_packed.vring.num = num; + vring_packed.vring.desc = pages; + return __vring_new_virtqueue_packed(index, &vring_packed, + vdev, weak_barriers, + context, notify, callback, + name, vdev->dev.parent); + } vring_init(&vring_split.vring, num, pages, vring_align); - return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers, + return __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers, context, notify, callback, name, vdev->dev.parent); } diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c index 7364bd53e38d..1f60c9d5cb18 100644 --- a/drivers/virtio/virtio_vdpa.c +++ b/drivers/virtio/virtio_vdpa.c @@ -364,14 +364,13 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs, struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev); struct vdpa_device *vdpa = vd_get_vdpa(vdev); const struct vdpa_config_ops *ops = vdpa->config; - struct irq_affinity default_affd = { 0 }; struct cpumask *masks; struct vdpa_callback cb; bool has_affinity = desc && ops->set_vq_affinity; int i, err, queue_idx = 0; if (has_affinity) { - masks = create_affinity_masks(nvqs, desc ? desc : &default_affd); + masks = create_affinity_masks(nvqs, desc); if (!masks) return -ENOMEM; } |