summaryrefslogtreecommitdiffstats
path: root/drivers/vdpa
diff options
context:
space:
mode:
authorDragos Tatulea <dtatulea@nvidia.com>2024-06-26 12:26:59 +0200
committerMichael S. Tsirkin <mst@redhat.com>2024-07-09 14:42:50 +0200
commit2638134f710364c9e696a155bf16c6847959b1d9 (patch)
treebf79c317bdcf8e8ab74e89fd7902f247f87fa69a /drivers/vdpa
parentvdpa/mlx5: Re-create HW VQs under certain conditions (diff)
downloadlinux-2638134f710364c9e696a155bf16c6847959b1d9.tar.xz
linux-2638134f710364c9e696a155bf16c6847959b1d9.zip
vdpa/mlx5: Don't reset VQs more than necessary
The vdpa device can be reset many times in sequence without any significant state changes in between. Previously this was not a problem: VQs were torn down only on first reset. But after VQ pre-creation was introduced, each reset will delete and re-create the hardware VQs and their associated resources. To solve this problem, avoid resetting hardware VQs if the VQs are still in a blank state. Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com> Acked-by: Eugenio PĂ©rez <eperezma@redhat.com> Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com> Message-Id: <20240626-stage-vdpa-vq-precreate-v2-23-560c491078df@nvidia.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vdpa')
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c30
1 files changed, 27 insertions, 3 deletions
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index ea4bfd9afce9..573dc01df8c3 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -3134,18 +3134,41 @@ static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev)
mvdev->group2asid[i] = 0;
}
+static bool needs_vqs_reset(const struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[0];
+
+ if (mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK)
+ return true;
+
+ if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT)
+ return true;
+
+ return mvq->modified_fields & (
+ MLX5_VIRTQ_MODIFY_MASK_STATE |
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS |
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX |
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX
+ );
+}
+
static int mlx5_vdpa_compat_reset(struct vdpa_device *vdev, u32 flags)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ bool vq_reset;
print_status(mvdev, 0, true);
mlx5_vdpa_info(mvdev, "performing device reset\n");
down_write(&ndev->reslock);
unregister_link_notifier(ndev);
- teardown_vq_resources(ndev);
- mvqs_set_defaults(ndev);
+ vq_reset = needs_vqs_reset(mvdev);
+ if (vq_reset) {
+ teardown_vq_resources(ndev);
+ mvqs_set_defaults(ndev);
+ }
if (flags & VDPA_RESET_F_CLEAN_MAP)
mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
@@ -3165,7 +3188,8 @@ static int mlx5_vdpa_compat_reset(struct vdpa_device *vdev, u32 flags)
if (mlx5_vdpa_create_dma_mr(mvdev))
mlx5_vdpa_warn(mvdev, "create MR failed\n");
}
- setup_vq_resources(ndev, false);
+ if (vq_reset)
+ setup_vq_resources(ndev, false);
up_write(&ndev->reslock);
return 0;