summaryrefslogtreecommitdiffstats
path: root/drivers/s390/crypto
diff options
context:
space:
mode:
authorMatthew Rosato <mjrosato@linux.ibm.com>2022-12-02 14:54:02 +0100
committerJason Gunthorpe <jgg@nvidia.com>2022-12-02 16:50:10 +0100
commit2a54e347d990574ceb047b71ea0b03979232b85e (patch)
treedab5d8b945a2d381f221399f64613315806b779c /drivers/s390/crypto
parenti915/gvt: Move gvt mapping cache initialization to intel_vgpu_init_dev() (diff)
downloadlinux-2a54e347d990574ceb047b71ea0b03979232b85e.tar.xz
linux-2a54e347d990574ceb047b71ea0b03979232b85e.zip
vfio/ap: Validate iova during dma_unmap and trigger irq disable
Currently, each mapped iova is stashed in its associated vfio_ap_queue; when we get an unmap request, validate that it matches with one or more of these stashed values before attempting unpins. Each stashed iova represents IRQ that was enabled for a queue. Therefore, if a match is found, trigger IRQ disable for this queue to ensure that underlying firmware will no longer try to use the associated pfn after the page is unpinned. IRQ disable will also handle the associated unpin. Link: https://lore.kernel.org/r/20221202135402.756470-3-yi.l.liu@intel.com Reviewed-by: Tony Krowiak <akrowiak@linux.ibm.com> Signed-off-by: Matthew Rosato <mjrosato@linux.ibm.com> Signed-off-by: Yi Liu <yi.l.liu@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/s390/crypto')
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 0b4cc8c597ae..8bf353d46820 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -1535,13 +1535,29 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
return 0;
}
+static void unmap_iova(struct ap_matrix_mdev *matrix_mdev, u64 iova, u64 length)
+{
+ struct ap_queue_table *qtable = &matrix_mdev->qtable;
+ struct vfio_ap_queue *q;
+ int loop_cursor;
+
+ hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
+ if (q->saved_iova >= iova && q->saved_iova < iova + length)
+ vfio_ap_irq_disable(q);
+ }
+}
+
static void vfio_ap_mdev_dma_unmap(struct vfio_device *vdev, u64 iova,
u64 length)
{
struct ap_matrix_mdev *matrix_mdev =
container_of(vdev, struct ap_matrix_mdev, vdev);
- vfio_unpin_pages(&matrix_mdev->vdev, iova, 1);
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ unmap_iova(matrix_mdev, iova, length);
+
+ mutex_unlock(&matrix_dev->mdevs_lock);
}
/**