summaryrefslogtreecommitdiffstats
path: root/drivers/vfio
diff options
context:
space:
mode:
authorEric Auger <eric.auger@linaro.org>2015-10-29 18:49:42 +0100
committerAlex Williamson <alex.williamson@redhat.com>2015-11-03 20:53:53 +0100
commit4644321fd3c119a819ab24fd2bc2d1f9bca4a695 (patch)
tree3188e61fff67f7233ca755d1b967ac835c413fc4 /drivers/vfio
parentVFIO: platform: clear IRQ_NOAUTOEN when de-assigning the IRQ (diff)
downloadlinux-4644321fd3c119a819ab24fd2bc2d1f9bca4a695.tar.xz
linux-4644321fd3c119a819ab24fd2bc2d1f9bca4a695.zip
vfio/type1: handle case where IOMMU does not support PAGE_SIZE size
Current vfio_pgsize_bitmap code hides the supported IOMMU page sizes smaller than PAGE_SIZE. As a result, in case the IOMMU does not support PAGE_SIZE page, the alignment check on map/unmap is done with larger page sizes, if any. This can fail although mapping could be done with pages smaller than PAGE_SIZE. This patch modifies vfio_pgsize_bitmap implementation so that, in case the IOMMU supports page sizes smaller than PAGE_SIZE we pretend PAGE_SIZE is supported and hide sub-PAGE_SIZE sizes. That way the user will be able to map/unmap buffers whose size/ start address is aligned with PAGE_SIZE. Pinning code uses that granularity while iommu driver can use the sub-PAGE_SIZE size to map the buffer. Signed-off-by: Eric Auger <eric.auger@linaro.org> Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Diffstat (limited to 'drivers/vfio')
-rw-r--r--drivers/vfio/vfio_iommu_type1.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 57d8c37a002b..59d47cb638d5 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -403,13 +403,26 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
{
struct vfio_domain *domain;
- unsigned long bitmap = PAGE_MASK;
+ unsigned long bitmap = ULONG_MAX;
mutex_lock(&iommu->lock);
list_for_each_entry(domain, &iommu->domain_list, next)
bitmap &= domain->domain->ops->pgsize_bitmap;
mutex_unlock(&iommu->lock);
+ /*
+ * In case the IOMMU supports page sizes smaller than PAGE_SIZE
+ * we pretend PAGE_SIZE is supported and hide sub-PAGE_SIZE sizes.
+ * That way the user will be able to map/unmap buffers whose size/
+ * start address is aligned with PAGE_SIZE. Pinning code uses that
+ * granularity while iommu driver can use the sub-PAGE_SIZE size
+ * to map the buffer.
+ */
+ if (bitmap & ~PAGE_MASK) {
+ bitmap &= PAGE_MASK;
+ bitmap |= PAGE_SIZE;
+ }
+
return bitmap;
}