summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJean-Philippe Brucker <jean-philippe@linaro.org>2020-03-26 10:35:58 +0100
committerJoerg Roedel <jroedel@suse.de>2020-03-27 11:10:20 +0100
commit39b3b3c9cac1b4a64a6e546c9faf0c1011d775d4 (patch)
treee2790d0c80b65e2c208e83e7ecd7406f53083c05 /drivers/iommu
parentiommu/virtio: Fix freeing of incomplete domains (diff)
downloadlinux-39b3b3c9cac1b4a64a6e546c9faf0c1011d775d4.tar.xz
linux-39b3b3c9cac1b4a64a6e546c9faf0c1011d775d4.zip
iommu/virtio: Reject IOMMU page granule larger than PAGE_SIZE
We don't currently support IOMMUs with a page granule larger than the system page size. The IOVA allocator has a BUG_ON() in this case, and VFIO has a WARN_ON(). Removing these obstacles ranges doesn't seem possible without major changes to the DMA API and VFIO. Some callers of iommu_map(), for example, want to map multiple page-aligned regions adjacent to each others for scatter-gather purposes. Even in simple DMA API uses, a call to dma_map_page() would let the endpoint access neighbouring memory. And VFIO users cannot ensure that their virtual address buffer is physically contiguous at the IOMMU granule. Rather than triggering the IOVA BUG_ON() on mismatched page sizes, abort the vdomain finalise() with an error message. We could simply abort the viommu probe(), but an upcoming extension to virtio-iommu will allow setting different page masks for each endpoint. Reported-by: Bharat Bhushan <bbhushan2@marvell.com> Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Bharat Bhushan <bbhushan2@marvell.com> Reviewed-by: Eric Auger <eric.auger@redhat.com> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Link: https://lore.kernel.org/r/20200326093558.2641019-4-jean-philippe@linaro.org Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/virtio-iommu.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 9d5d4c941874..1adc73d1f90a 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -607,12 +607,22 @@ static struct iommu_domain *viommu_domain_alloc(unsigned type)
return &vdomain->domain;
}
-static int viommu_domain_finalise(struct viommu_dev *viommu,
+static int viommu_domain_finalise(struct viommu_endpoint *vdev,
struct iommu_domain *domain)
{
int ret;
+ unsigned long viommu_page_size;
+ struct viommu_dev *viommu = vdev->viommu;
struct viommu_domain *vdomain = to_viommu_domain(domain);
+ viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
+ if (viommu_page_size > PAGE_SIZE) {
+ dev_err(vdev->dev,
+ "granule 0x%lx larger than system page size 0x%lx\n",
+ viommu_page_size, PAGE_SIZE);
+ return -EINVAL;
+ }
+
ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
viommu->last_domain, GFP_KERNEL);
if (ret < 0)
@@ -659,7 +669,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
* Properly initialize the domain now that we know which viommu
* owns it.
*/
- ret = viommu_domain_finalise(vdev->viommu, domain);
+ ret = viommu_domain_finalise(vdev, domain);
} else if (vdomain->viommu != vdev->viommu) {
dev_err(dev, "cannot attach to foreign vIOMMU\n");
ret = -EXDEV;