summaryrefslogtreecommitdiffstats
path: root/drivers/vdpa
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-01-18 09:05:48 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-18 09:05:48 +0100
commit3bf6a9e36e441714928d73a5adbc59562eb7ef19 (patch)
tree736a123fe4be1ef70ec02ce52f6bc25f39a1f87c /drivers/vdpa
parentMerge tag 'rproc-v5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/remo... (diff)
parentvdpa/mlx5: Fix tracking of current number of VQs (diff)
downloadlinux-3bf6a9e36e441714928d73a5adbc59562eb7ef19.tar.xz
linux-3bf6a9e36e441714928d73a5adbc59562eb7ef19.zip
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio updates from Michael Tsirkin: "virtio,vdpa,qemu_fw_cfg: features, cleanups, and fixes. - partial support for < MAX_ORDER - 1 granularity for virtio-mem - driver_override for vdpa - sysfs ABI documentation for vdpa - multiqueue config support for mlx5 vdpa - and misc fixes, cleanups" * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (42 commits) vdpa/mlx5: Fix tracking of current number of VQs vdpa/mlx5: Fix is_index_valid() to refer to features vdpa: Protect vdpa reset with cf_mutex vdpa: Avoid taking cf_mutex lock on get status vdpa/vdpa_sim_net: Report max device capabilities vdpa: Use BIT_ULL for bit operations vdpa/vdpa_sim: Configure max supported virtqueues vdpa/mlx5: Report max device capabilities vdpa: Support reporting max device capabilities vdpa/mlx5: Restore cur_num_vqs in case of failure in change_num_qps() vdpa: Add support for returning device configuration information vdpa/mlx5: Support configuring max data virtqueue vdpa/mlx5: Fix config_attr_mask assignment vdpa: Allow to configure max data virtqueues vdpa: Read device configuration only if FEATURES_OK vdpa: Sync calls set/get config/status with cf_mutex vdpa/mlx5: Distribute RX virtqueues in RQT object vdpa: Provide interface to read driver features vdpa: clean up get_config_size ret value handling virtio_ring: mark ring unused on error ...
Diffstat (limited to 'drivers/vdpa')
-rw-r--r--drivers/vdpa/alibaba/eni_vdpa.c28
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c41
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h9
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c40
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c156
-rw-r--r--drivers/vdpa/vdpa.c163
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c21
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c2
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c19
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c16
10 files changed, 350 insertions, 145 deletions
diff --git a/drivers/vdpa/alibaba/eni_vdpa.c b/drivers/vdpa/alibaba/eni_vdpa.c
index 3f788794571a..f480d54f308c 100644
--- a/drivers/vdpa/alibaba/eni_vdpa.c
+++ b/drivers/vdpa/alibaba/eni_vdpa.c
@@ -58,7 +58,7 @@ static struct virtio_pci_legacy_device *vdpa_to_ldev(struct vdpa_device *vdpa)
return &eni_vdpa->ldev;
}
-static u64 eni_vdpa_get_features(struct vdpa_device *vdpa)
+static u64 eni_vdpa_get_device_features(struct vdpa_device *vdpa)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
u64 features = vp_legacy_get_features(ldev);
@@ -69,7 +69,7 @@ static u64 eni_vdpa_get_features(struct vdpa_device *vdpa)
return features;
}
-static int eni_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
+static int eni_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
@@ -84,6 +84,13 @@ static int eni_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
return 0;
}
+static u64 eni_vdpa_get_driver_features(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
+
+ return vp_legacy_get_driver_features(ldev);
+}
+
static u8 eni_vdpa_get_status(struct vdpa_device *vdpa)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
@@ -401,8 +408,9 @@ static void eni_vdpa_set_config_cb(struct vdpa_device *vdpa,
}
static const struct vdpa_config_ops eni_vdpa_ops = {
- .get_features = eni_vdpa_get_features,
- .set_features = eni_vdpa_set_features,
+ .get_device_features = eni_vdpa_get_device_features,
+ .set_driver_features = eni_vdpa_set_driver_features,
+ .get_driver_features = eni_vdpa_get_driver_features,
.get_status = eni_vdpa_get_status,
.set_status = eni_vdpa_set_status,
.reset = eni_vdpa_reset,
@@ -450,11 +458,6 @@ static u16 eni_vdpa_get_num_queues(struct eni_vdpa *eni_vdpa)
return num;
}
-static void eni_vdpa_free_irq_vectors(void *data)
-{
- pci_free_irq_vectors(data);
-}
-
static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -488,13 +491,6 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
eni_vdpa->vdpa.dma_dev = &pdev->dev;
eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa);
- ret = devm_add_action_or_reset(dev, eni_vdpa_free_irq_vectors, pdev);
- if (ret) {
- ENI_ERR(pdev,
- "failed for adding devres for freeing irq vectors\n");
- goto err;
- }
-
eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues,
sizeof(*eni_vdpa->vring),
GFP_KERNEL);
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 2808f1ba9f7b..7d41dfe48ade 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -143,8 +143,8 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
IFCVF_DBG(pdev, "hw->isr = %p\n", hw->isr);
break;
case VIRTIO_PCI_CAP_DEVICE_CFG:
- hw->net_cfg = get_cap_addr(hw, &cap);
- IFCVF_DBG(pdev, "hw->net_cfg = %p\n", hw->net_cfg);
+ hw->dev_cfg = get_cap_addr(hw, &cap);
+ IFCVF_DBG(pdev, "hw->dev_cfg = %p\n", hw->dev_cfg);
break;
}
@@ -153,7 +153,7 @@ next:
}
if (hw->common_cfg == NULL || hw->notify_base == NULL ||
- hw->isr == NULL || hw->net_cfg == NULL) {
+ hw->isr == NULL || hw->dev_cfg == NULL) {
IFCVF_ERR(pdev, "Incomplete PCI capabilities\n");
return -EIO;
}
@@ -174,7 +174,7 @@ next:
IFCVF_DBG(pdev,
"PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n",
hw->common_cfg, hw->notify_base, hw->isr,
- hw->net_cfg, hw->notify_off_multiplier);
+ hw->dev_cfg, hw->notify_off_multiplier);
return 0;
}
@@ -242,33 +242,54 @@ int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
return 0;
}
-void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset,
+u32 ifcvf_get_config_size(struct ifcvf_hw *hw)
+{
+ struct ifcvf_adapter *adapter;
+ u32 config_size;
+
+ adapter = vf_to_adapter(hw);
+ switch (hw->dev_type) {
+ case VIRTIO_ID_NET:
+ config_size = sizeof(struct virtio_net_config);
+ break;
+ case VIRTIO_ID_BLOCK:
+ config_size = sizeof(struct virtio_blk_config);
+ break;
+ default:
+ config_size = 0;
+ IFCVF_ERR(adapter->pdev, "VIRTIO ID %u not supported\n", hw->dev_type);
+ }
+
+ return config_size;
+}
+
+void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset,
void *dst, int length)
{
u8 old_gen, new_gen, *p;
int i;
- WARN_ON(offset + length > sizeof(struct virtio_net_config));
+ WARN_ON(offset + length > hw->config_size);
do {
old_gen = ifc_ioread8(&hw->common_cfg->config_generation);
p = dst;
for (i = 0; i < length; i++)
- *p++ = ifc_ioread8(hw->net_cfg + offset + i);
+ *p++ = ifc_ioread8(hw->dev_cfg + offset + i);
new_gen = ifc_ioread8(&hw->common_cfg->config_generation);
} while (old_gen != new_gen);
}
-void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset,
+void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
const void *src, int length)
{
const u8 *p;
int i;
p = src;
- WARN_ON(offset + length > sizeof(struct virtio_net_config));
+ WARN_ON(offset + length > hw->config_size);
for (i = 0; i < length; i++)
- ifc_iowrite8(*p++, hw->net_cfg + offset + i);
+ ifc_iowrite8(*p++, hw->dev_cfg + offset + i);
}
static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 09918af3ecf8..c486873f370a 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -71,12 +71,14 @@ struct ifcvf_hw {
u64 hw_features;
u32 dev_type;
struct virtio_pci_common_cfg __iomem *common_cfg;
- void __iomem *net_cfg;
+ void __iomem *dev_cfg;
struct vring_info vring[IFCVF_MAX_QUEUES];
void __iomem * const *base;
char config_msix_name[256];
struct vdpa_callback config_cb;
unsigned int config_irq;
+ /* virtio-net or virtio-blk device config size */
+ u32 config_size;
};
struct ifcvf_adapter {
@@ -105,9 +107,9 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *dev);
int ifcvf_start_hw(struct ifcvf_hw *hw);
void ifcvf_stop_hw(struct ifcvf_hw *hw);
void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid);
-void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset,
+void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset,
void *dst, int length);
-void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset,
+void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
const void *src, int length);
u8 ifcvf_get_status(struct ifcvf_hw *hw);
void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
@@ -120,4 +122,5 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
+u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
#endif /* _IFCVF_H_ */
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 6dc75ca70b37..d1a6b5ab543c 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -169,7 +169,7 @@ static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
return &adapter->vf;
}
-static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
+static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
{
struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
@@ -187,7 +187,7 @@ static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
return features;
}
-static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
+static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
int ret;
@@ -201,6 +201,13 @@ static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
return 0;
}
+static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return vf->req_features;
+}
+
static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
@@ -366,24 +373,9 @@ static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
{
- struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- struct pci_dev *pdev = adapter->pdev;
- size_t size;
-
- switch (vf->dev_type) {
- case VIRTIO_ID_NET:
- size = sizeof(struct virtio_net_config);
- break;
- case VIRTIO_ID_BLOCK:
- size = sizeof(struct virtio_blk_config);
- break;
- default:
- size = 0;
- IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
- }
- return size;
+ return vf->config_size;
}
static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
@@ -392,8 +384,7 @@ static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- WARN_ON(offset + len > sizeof(struct virtio_net_config));
- ifcvf_read_net_config(vf, offset, buf, len);
+ ifcvf_read_dev_config(vf, offset, buf, len);
}
static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
@@ -402,8 +393,7 @@ static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- WARN_ON(offset + len > sizeof(struct virtio_net_config));
- ifcvf_write_net_config(vf, offset, buf, len);
+ ifcvf_write_dev_config(vf, offset, buf, len);
}
static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
@@ -443,8 +433,9 @@ static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_devic
* implemented set_map()/dma_map()/dma_unmap()
*/
static const struct vdpa_config_ops ifc_vdpa_ops = {
- .get_features = ifcvf_vdpa_get_features,
- .set_features = ifcvf_vdpa_set_features,
+ .get_device_features = ifcvf_vdpa_get_device_features,
+ .set_driver_features = ifcvf_vdpa_set_driver_features,
+ .get_driver_features = ifcvf_vdpa_get_driver_features,
.get_status = ifcvf_vdpa_get_status,
.set_status = ifcvf_vdpa_set_status,
.reset = ifcvf_vdpa_reset,
@@ -542,6 +533,7 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
vf->vring[i].irq = -EINVAL;
vf->hw_features = ifcvf_get_hw_features(vf);
+ vf->config_size = ifcvf_get_config_size(vf);
adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index cf59f7e17c6d..f648f1c54a0f 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -131,25 +131,24 @@ struct mlx5_vdpa_virtqueue {
struct mlx5_vq_restore_info ri;
};
-/* We will remove this limitation once mlx5_vdpa_alloc_resources()
- * provides for driver space allocation
- */
-#define MLX5_MAX_SUPPORTED_VQS 16
-
static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)
{
- if (unlikely(idx > mvdev->max_idx))
- return false;
+ if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) {
+ if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
+ return idx < 2;
+ else
+ return idx < 3;
+ }
- return true;
+ return idx <= mvdev->max_idx;
}
struct mlx5_vdpa_net {
struct mlx5_vdpa_dev mvdev;
struct mlx5_vdpa_net_resources res;
struct virtio_net_config config;
- struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS];
- struct vdpa_callback event_cbs[MLX5_MAX_SUPPORTED_VQS + 1];
+ struct mlx5_vdpa_virtqueue *vqs;
+ struct vdpa_callback *event_cbs;
/* Serialize vq resources creation and destruction. This is required
* since memory map might change and we need to destroy and create
@@ -876,8 +875,6 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size);
MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
- if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type))
- MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, 1);
err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
if (err)
@@ -1218,7 +1215,7 @@ static void suspend_vqs(struct mlx5_vdpa_net *ndev)
{
int i;
- for (i = 0; i < MLX5_MAX_SUPPORTED_VQS; i++)
+ for (i = 0; i < ndev->mvdev.max_vqs; i++)
suspend_vq(ndev, &ndev->vqs[i]);
}
@@ -1244,8 +1241,14 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
void *in;
int i, j;
int err;
+ int num;
- max_rqt = min_t(int, MLX5_MAX_SUPPORTED_VQS / 2,
+ if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
+ num = 1;
+ else
+ num = ndev->cur_num_vqs / 2;
+
+ max_rqt = min_t(int, roundup_pow_of_two(num),
1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
if (max_rqt < 1)
return -EOPNOTSUPP;
@@ -1261,17 +1264,10 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
MLX5_SET(rqtc, rqtc, rqt_max_size, max_rqt);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; j < max_rqt; j++) {
- if (!ndev->vqs[j].initialized)
- continue;
-
- if (!vq_is_tx(ndev->vqs[j].index)) {
- list[i] = cpu_to_be32(ndev->vqs[j].virtq_id);
- i++;
- }
- }
- MLX5_SET(rqtc, rqtc, rqt_actual_size, i);
+ for (i = 0, j = 0; i < max_rqt; i++, j += 2)
+ list[i] = cpu_to_be32(ndev->vqs[j % (2 * num)].virtq_id);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
kfree(in);
if (err)
@@ -1292,7 +1288,7 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
int i, j;
int err;
- max_rqt = min_t(int, ndev->cur_num_vqs / 2,
+ max_rqt = min_t(int, roundup_pow_of_two(ndev->cur_num_vqs / 2),
1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
if (max_rqt < 1)
return -EOPNOTSUPP;
@@ -1308,16 +1304,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; j < num; j++) {
- if (!ndev->vqs[j].initialized)
- continue;
+ for (i = 0, j = 0; i < max_rqt; i++, j += 2)
+ list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
- if (!vq_is_tx(ndev->vqs[j].index)) {
- list[i] = cpu_to_be32(ndev->vqs[j].virtq_id);
- i++;
- }
- }
- MLX5_SET(rqtc, rqtc, rqt_actual_size, i);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
kfree(in);
if (err)
@@ -1554,9 +1544,11 @@ static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps)
return 0;
clean_added:
- for (--i; i >= cur_qps; --i)
+ for (--i; i >= 2 * cur_qps; --i)
teardown_vq(ndev, &ndev->vqs[i]);
+ ndev->cur_num_vqs = 2 * cur_qps;
+
return err;
}
@@ -1581,9 +1573,6 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
break;
}
- if (newqps & (newqps - 1))
- break;
-
if (!change_num_qps(mvdev, newqps))
status = VIRTIO_NET_OK;
@@ -1880,21 +1869,29 @@ static u64 mlx_to_vritio_features(u16 dev_features)
return result;
}
-static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev)
+static u64 get_supported_features(struct mlx5_core_dev *mdev)
{
- struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
- struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ u64 mlx_vdpa_features = 0;
u16 dev_features;
- dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask);
- ndev->mvdev.mlx_features |= mlx_to_vritio_features(dev_features);
- if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0))
- ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_VERSION_1);
- ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
- ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VQ);
- ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR);
- ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MQ);
- ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_STATUS);
+ dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mdev, device_features_bits_mask);
+ mlx_vdpa_features |= mlx_to_vritio_features(dev_features);
+ if (MLX5_CAP_DEV_VDPA_EMULATION(mdev, virtio_version_1_0))
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_F_VERSION_1);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VQ);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MQ);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_STATUS);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MTU);
+
+ return mlx_vdpa_features;
+}
+
+static u64 mlx5_vdpa_get_device_features(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
print_features(mvdev, ndev->mvdev.mlx_features, false);
return ndev->mvdev.mlx_features;
@@ -1972,7 +1969,7 @@ static void update_cvq_info(struct mlx5_vdpa_dev *mvdev)
}
}
-static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
+static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
@@ -1985,6 +1982,11 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
return err;
ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
+ if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ))
+ ndev->cur_num_vqs = 2 * mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
+ else
+ ndev->cur_num_vqs = 2;
+
update_cvq_info(mvdev);
return err;
}
@@ -2235,7 +2237,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
clear_vqs_ready(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
- memset(ndev->event_cbs, 0, sizeof(ndev->event_cbs));
+ ndev->cur_num_vqs = 0;
+ memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1));
ndev->mvdev.actual_features = 0;
++mvdev->generation;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
@@ -2308,6 +2311,8 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
}
mlx5_vdpa_free_resources(&ndev->mvdev);
mutex_destroy(&ndev->reslock);
+ kfree(ndev->event_cbs);
+ kfree(ndev->vqs);
}
static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device *vdev, u16 idx)
@@ -2339,6 +2344,13 @@ static int mlx5_get_vq_irq(struct vdpa_device *vdv, u16 idx)
return -EOPNOTSUPP;
}
+static u64 mlx5_vdpa_get_driver_features(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+
+ return mvdev->actual_features;
+}
+
static const struct vdpa_config_ops mlx5_vdpa_ops = {
.set_vq_address = mlx5_vdpa_set_vq_address,
.set_vq_num = mlx5_vdpa_set_vq_num,
@@ -2351,8 +2363,9 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.get_vq_notification = mlx5_get_vq_notification,
.get_vq_irq = mlx5_get_vq_irq,
.get_vq_align = mlx5_vdpa_get_vq_align,
- .get_features = mlx5_vdpa_get_features,
- .set_features = mlx5_vdpa_set_features,
+ .get_device_features = mlx5_vdpa_get_device_features,
+ .set_driver_features = mlx5_vdpa_set_driver_features,
+ .get_driver_features = mlx5_vdpa_get_driver_features,
.set_config_cb = mlx5_vdpa_set_config_cb,
.get_vq_num_max = mlx5_vdpa_get_vq_num_max,
.get_device_id = mlx5_vdpa_get_device_id,
@@ -2545,18 +2558,39 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
return -EOPNOTSUPP;
}
- /* we save one virtqueue for control virtqueue should we require it */
max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
- max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
+ if (max_vqs < 2) {
+ dev_warn(mdev->device,
+ "%d virtqueues are supported. At least 2 are required\n",
+ max_vqs);
+ return -EAGAIN;
+ }
+
+ if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) {
+ if (add_config->net.max_vq_pairs > max_vqs / 2)
+ return -EINVAL;
+ max_vqs = min_t(u32, max_vqs, 2 * add_config->net.max_vq_pairs);
+ } else {
+ max_vqs = 2;
+ }
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
name, false);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
+ ndev->mvdev.mlx_features = mgtdev->mgtdev.supported_features;
ndev->mvdev.max_vqs = max_vqs;
mvdev = &ndev->mvdev;
mvdev->mdev = mdev;
+
+ ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL);
+ ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL);
+ if (!ndev->vqs || !ndev->event_cbs) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
init_mvqs(ndev);
mutex_init(&ndev->reslock);
config = &ndev->config;
@@ -2612,9 +2646,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
ndev->nb.notifier_call = event_handler;
mlx5_notifier_register(mdev, &ndev->nb);
- ndev->cur_num_vqs = 2 * mlx5_vdpa_max_qps(max_vqs);
mvdev->vdev.mdev = &mgtdev->mgtdev;
- err = _vdpa_register_device(&mvdev->vdev, ndev->cur_num_vqs + 1);
+ err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs) + 1);
if (err)
goto err_reg;
@@ -2634,6 +2667,7 @@ err_mpfs:
mlx5_mpfs_del_mac(pfmdev, config->mac);
err_mtu:
mutex_destroy(&ndev->reslock);
+err_alloc:
put_device(&mvdev->vdev.dev);
return err;
}
@@ -2676,7 +2710,11 @@ static int mlx5v_probe(struct auxiliary_device *adev,
mgtdev->mgtdev.ops = &mdev_ops;
mgtdev->mgtdev.device = mdev->device;
mgtdev->mgtdev.id_table = id_table;
- mgtdev->mgtdev.config_attr_mask = (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR);
+ mgtdev->mgtdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) |
+ BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
+ mgtdev->mgtdev.max_supported_vqs =
+ MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues) + 1;
+ mgtdev->mgtdev.supported_features = get_supported_features(mdev);
mgtdev->madev = madev;
err = vdpa_mgmtdev_register(&mgtdev->mgtdev);
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 09bbe53c3ac4..9846c9de4bfa 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -21,6 +21,14 @@ static LIST_HEAD(mdev_head);
static DEFINE_MUTEX(vdpa_dev_mutex);
static DEFINE_IDA(vdpa_index_ida);
+void vdpa_set_status(struct vdpa_device *vdev, u8 status)
+{
+ mutex_lock(&vdev->cf_mutex);
+ vdev->config->set_status(vdev, status);
+ mutex_unlock(&vdev->cf_mutex);
+}
+EXPORT_SYMBOL(vdpa_set_status);
+
static struct genl_family vdpa_nl_family;
static int vdpa_dev_probe(struct device *d)
@@ -52,8 +60,81 @@ static void vdpa_dev_remove(struct device *d)
drv->remove(vdev);
}
+static int vdpa_dev_match(struct device *dev, struct device_driver *drv)
+{
+ struct vdpa_device *vdev = dev_to_vdpa(dev);
+
+ /* Check override first, and if set, only use the named driver */
+ if (vdev->driver_override)
+ return strcmp(vdev->driver_override, drv->name) == 0;
+
+ /* Currently devices must be supported by all vDPA bus drivers */
+ return 1;
+}
+
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct vdpa_device *vdev = dev_to_vdpa(dev);
+ const char *driver_override, *old;
+ char *cp;
+
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ driver_override = kstrndup(buf, count, GFP_KERNEL);
+ if (!driver_override)
+ return -ENOMEM;
+
+ cp = strchr(driver_override, '\n');
+ if (cp)
+ *cp = '\0';
+
+ device_lock(dev);
+ old = vdev->driver_override;
+ if (strlen(driver_override)) {
+ vdev->driver_override = driver_override;
+ } else {
+ kfree(driver_override);
+ vdev->driver_override = NULL;
+ }
+ device_unlock(dev);
+
+ kfree(old);
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vdpa_device *vdev = dev_to_vdpa(dev);
+ ssize_t len;
+
+ device_lock(dev);
+ len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override);
+ device_unlock(dev);
+
+ return len;
+}
+static DEVICE_ATTR_RW(driver_override);
+
+static struct attribute *vdpa_dev_attrs[] = {
+ &dev_attr_driver_override.attr,
+ NULL,
+};
+
+static const struct attribute_group vdpa_dev_group = {
+ .attrs = vdpa_dev_attrs,
+};
+__ATTRIBUTE_GROUPS(vdpa_dev);
+
static struct bus_type vdpa_bus = {
.name = "vdpa",
+ .dev_groups = vdpa_dev_groups,
+ .match = vdpa_dev_match,
.probe = vdpa_dev_probe,
.remove = vdpa_dev_remove,
};
@@ -68,6 +149,7 @@ static void vdpa_release_dev(struct device *d)
ida_simple_remove(&vdpa_index_ida, vdev->index);
mutex_destroy(&vdev->cf_mutex);
+ kfree(vdev->driver_override);
kfree(vdev);
}
@@ -300,6 +382,21 @@ void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
}
EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
+static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ const struct vdpa_config_ops *ops = vdev->config;
+
+ /*
+ * Config accesses aren't supposed to trigger before features are set.
+ * If it does happen we assume a legacy guest.
+ */
+ if (!vdev->features_valid)
+ vdpa_set_features(vdev, 0, true);
+ ops->get_config(vdev, offset, buf, len);
+}
+
/**
* vdpa_get_config - Get one or more device configuration fields.
* @vdev: vdpa device to operate on
@@ -310,16 +407,8 @@ EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
void *buf, unsigned int len)
{
- const struct vdpa_config_ops *ops = vdev->config;
-
mutex_lock(&vdev->cf_mutex);
- /*
- * Config accesses aren't supposed to trigger before features are set.
- * If it does happen we assume a legacy guest.
- */
- if (!vdev->features_valid)
- vdpa_set_features(vdev, 0);
- ops->get_config(vdev, offset, buf, len);
+ vdpa_get_config_unlocked(vdev, offset, buf, len);
mutex_unlock(&vdev->cf_mutex);
}
EXPORT_SYMBOL_GPL(vdpa_get_config);
@@ -414,6 +503,16 @@ static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *m
err = -EMSGSIZE;
goto msg_err;
}
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,
+ mdev->max_supported_vqs)) {
+ err = -EMSGSIZE;
+ goto msg_err;
+ }
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES,
+ mdev->supported_features, VDPA_ATTR_PAD)) {
+ err = -EMSGSIZE;
+ goto msg_err;
+ }
genlmsg_end(msg, hdr);
return 0;
@@ -480,8 +579,9 @@ out:
return msg->len;
}
-#define VDPA_DEV_NET_ATTRS_MASK ((1 << VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
- (1 << VDPA_ATTR_DEV_NET_CFG_MTU))
+#define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
+ BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU) | \
+ BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
{
@@ -500,12 +600,22 @@ static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *i
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
- config.mask |= (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR);
+ config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
}
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
config.net.mtu =
nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
- config.mask |= (1 << VDPA_ATTR_DEV_NET_CFG_MTU);
+ config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
+ }
+ if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) {
+ config.net.max_vq_pairs =
+ nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]);
+ if (!config.net.max_vq_pairs) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "At least one pair of VQs is required");
+ return -EINVAL;
+ }
+ config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
}
/* Skip checking capability if user didn't prefer to configure any
@@ -707,7 +817,7 @@ static int vdpa_dev_net_mq_config_fill(struct vdpa_device *vdev,
{
u16 val_u16;
- if ((features & (1ULL << VIRTIO_NET_F_MQ)) == 0)
+ if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0)
return 0;
val_u16 = le16_to_cpu(config->max_virtqueue_pairs);
@@ -720,7 +830,7 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms
u64 features;
u16 val_u16;
- vdpa_get_config(vdev, 0, &config, sizeof(config));
+ vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
if (nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, sizeof(config.mac),
config.mac))
@@ -734,7 +844,10 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16))
return -EMSGSIZE;
- features = vdev->config->get_features(vdev);
+ features = vdev->config->get_driver_features(vdev);
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features,
+ VDPA_ATTR_PAD))
+ return -EMSGSIZE;
return vdpa_dev_net_mq_config_fill(vdev, msg, features, &config);
}
@@ -745,12 +858,23 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
{
u32 device_id;
void *hdr;
+ u8 status;
int err;
+ mutex_lock(&vdev->cf_mutex);
+ status = vdev->config->get_status(vdev);
+ if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
+ NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed");
+ err = -EAGAIN;
+ goto out;
+ }
+
hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
VDPA_CMD_DEV_CONFIG_GET);
- if (!hdr)
- return -EMSGSIZE;
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto out;
+ }
if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
err = -EMSGSIZE;
@@ -774,11 +898,14 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
if (err)
goto msg_err;
+ mutex_unlock(&vdev->cf_mutex);
genlmsg_end(msg, hdr);
return 0;
msg_err:
genlmsg_cancel(msg, hdr);
+out:
+ mutex_unlock(&vdev->cf_mutex);
return err;
}
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 41b0cd17fcba..ddbe142af09a 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -399,14 +399,14 @@ static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
return VDPASIM_QUEUE_ALIGN;
}
-static u64 vdpasim_get_features(struct vdpa_device *vdpa)
+static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
return vdpasim->dev_attr.supported_features;
}
-static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
+static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -419,6 +419,13 @@ static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
return 0;
}
+static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ return vdpasim->features;
+}
+
static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
struct vdpa_callback *cb)
{
@@ -613,8 +620,9 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.set_vq_state = vdpasim_set_vq_state,
.get_vq_state = vdpasim_get_vq_state,
.get_vq_align = vdpasim_get_vq_align,
- .get_features = vdpasim_get_features,
- .set_features = vdpasim_set_features,
+ .get_device_features = vdpasim_get_device_features,
+ .set_driver_features = vdpasim_set_driver_features,
+ .get_driver_features = vdpasim_get_driver_features,
.set_config_cb = vdpasim_set_config_cb,
.get_vq_num_max = vdpasim_get_vq_num_max,
.get_device_id = vdpasim_get_device_id,
@@ -642,8 +650,9 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.set_vq_state = vdpasim_set_vq_state,
.get_vq_state = vdpasim_get_vq_state,
.get_vq_align = vdpasim_get_vq_align,
- .get_features = vdpasim_get_features,
- .set_features = vdpasim_set_features,
+ .get_device_features = vdpasim_get_device_features,
+ .set_driver_features = vdpasim_set_driver_features,
+ .get_driver_features = vdpasim_get_driver_features,
.set_config_cb = vdpasim_set_config_cb,
.get_vq_num_max = vdpasim_get_vq_num_max,
.get_device_id = vdpasim_get_device_id,
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index 76dd24abc791..d5324f6fd8c7 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -191,6 +191,8 @@ static struct vdpa_mgmt_dev mgmt_dev = {
.ops = &vdpasim_net_mgmtdev_ops,
.config_attr_mask = (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR |
1 << VDPA_ATTR_DEV_NET_CFG_MTU),
+ .max_supported_vqs = VDPASIM_NET_VQ_NUM,
+ .supported_features = VDPASIM_NET_FEATURES,
};
static int __init vdpasim_net_init(void)
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index eddcb64a910a..f85d1a08ed87 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -573,14 +573,14 @@ static u32 vduse_vdpa_get_vq_align(struct vdpa_device *vdpa)
return dev->vq_align;
}
-static u64 vduse_vdpa_get_features(struct vdpa_device *vdpa)
+static u64 vduse_vdpa_get_device_features(struct vdpa_device *vdpa)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
return dev->device_features;
}
-static int vduse_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
+static int vduse_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
@@ -588,6 +588,13 @@ static int vduse_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
return 0;
}
+static u64 vduse_vdpa_get_driver_features(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->driver_features;
+}
+
static void vduse_vdpa_set_config_cb(struct vdpa_device *vdpa,
struct vdpa_callback *cb)
{
@@ -721,8 +728,9 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
.set_vq_state = vduse_vdpa_set_vq_state,
.get_vq_state = vduse_vdpa_get_vq_state,
.get_vq_align = vduse_vdpa_get_vq_align,
- .get_features = vduse_vdpa_get_features,
- .set_features = vduse_vdpa_set_features,
+ .get_device_features = vduse_vdpa_get_device_features,
+ .set_driver_features = vduse_vdpa_set_driver_features,
+ .get_driver_features = vduse_vdpa_get_driver_features,
.set_config_cb = vduse_vdpa_set_config_cb,
.get_vq_num_max = vduse_vdpa_get_vq_num_max,
.get_device_id = vduse_vdpa_get_device_id,
@@ -1357,7 +1365,6 @@ err_domain:
err_str:
vduse_dev_destroy(dev);
err:
- kvfree(config_buf);
return ret;
}
@@ -1408,6 +1415,8 @@ static long vduse_ioctl(struct file *file, unsigned int cmd,
}
config.name[VDUSE_NAME_MAX - 1] = '\0';
ret = vduse_create_dev(&config, buf, control->api_version);
+ if (ret)
+ kvfree(buf);
break;
}
case VDUSE_DESTROY_DEV: {
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index e3ff7875e123..a57e381e830b 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -53,14 +53,14 @@ static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa)
return &vp_vdpa->mdev;
}
-static u64 vp_vdpa_get_features(struct vdpa_device *vdpa)
+static u64 vp_vdpa_get_device_features(struct vdpa_device *vdpa)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
return vp_modern_get_features(mdev);
}
-static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
+static int vp_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
@@ -69,6 +69,13 @@ static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
return 0;
}
+static u64 vp_vdpa_get_driver_features(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return vp_modern_get_driver_features(mdev);
+}
+
static u8 vp_vdpa_get_status(struct vdpa_device *vdpa)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
@@ -415,8 +422,9 @@ vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid)
}
static const struct vdpa_config_ops vp_vdpa_ops = {
- .get_features = vp_vdpa_get_features,
- .set_features = vp_vdpa_set_features,
+ .get_device_features = vp_vdpa_get_device_features,
+ .set_driver_features = vp_vdpa_set_driver_features,
+ .get_driver_features = vp_vdpa_get_driver_features,
.get_status = vp_vdpa_get_status,
.set_status = vp_vdpa_set_status,
.reset = vp_vdpa_reset,