summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-05-20 09:29:34 +0200
committerJoerg Roedel <jroedel@suse.de>2019-05-27 17:31:11 +0200
commit21b95aaf5f2212764d37b7569f3829a295e4042c (patch)
tree90d1afb49a994ee80d5e8077b494215d03fbf194 /drivers/iommu
parentiommu/dma: Factor out remapped pages lookup (diff)
downloadlinux-21b95aaf5f2212764d37b7569f3829a295e4042c.tar.xz
linux-21b95aaf5f2212764d37b7569f3829a295e4042c.zip
iommu/dma: Refactor the page array remapping allocator
Move the call to dma_common_pages_remap into __iommu_dma_alloc and rename it to iommu_dma_alloc_remap. This creates a self-contained helper for remapped pages allocation and mapping. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/dma-iommu.c54
1 files changed, 26 insertions, 28 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 5e81165e6755..0ffb7805de77 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -564,9 +564,9 @@ static struct page **__iommu_dma_get_pages(void *cpu_addr)
}
/**
- * iommu_dma_free - Free a buffer allocated by __iommu_dma_alloc()
+ * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc_remap()
* @dev: Device which owns this buffer
- * @pages: Array of buffer pages as returned by __iommu_dma_alloc()
+ * @pages: Array of buffer pages as returned by __iommu_dma_alloc_remap()
* @size: Size of buffer in bytes
* @handle: DMA address of buffer
*
@@ -582,33 +582,35 @@ static void __iommu_dma_free(struct device *dev, struct page **pages,
}
/**
- * __iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
+ * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
* @dev: Device to allocate memory for. Must be a real device
* attached to an iommu_dma_domain
* @size: Size of buffer in bytes
+ * @dma_handle: Out argument for allocated DMA handle
* @gfp: Allocation flags
* @attrs: DMA attributes for this allocation
- * @prot: IOMMU mapping flags
- * @handle: Out argument for allocated DMA handle
*
* If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
* but an IOMMU which supports smaller pages might not map the whole thing.
*
- * Return: Array of struct page pointers describing the buffer,
- * or NULL on failure.
+ * Return: Mapped virtual address, or NULL on failure.
*/
-static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
- gfp_t gfp, unsigned long attrs, int prot, dma_addr_t *handle)
+static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
+ bool coherent = dev_is_dma_coherent(dev);
+ int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+ pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+ unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
struct sg_table sgt;
dma_addr_t iova;
- unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
+ void *vaddr;
- *handle = DMA_MAPPING_ERROR;
+ *dma_handle = DMA_MAPPING_ERROR;
min_size = alloc_sizes & -alloc_sizes;
if (min_size < PAGE_SIZE) {
@@ -634,7 +636,7 @@ static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
goto out_free_iova;
- if (!(prot & IOMMU_CACHE)) {
+ if (!(ioprot & IOMMU_CACHE)) {
struct scatterlist *sg;
int i;
@@ -642,14 +644,21 @@ static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
arch_dma_prep_coherent(sg_page(sg), sg->length);
}
- if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
+ if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
< size)
goto out_free_sg;
- *handle = iova;
+ vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
+ __builtin_return_address(0));
+ if (!vaddr)
+ goto out_unmap;
+
+ *dma_handle = iova;
sg_free_table(&sgt);
- return pages;
+ return vaddr;
+out_unmap:
+ __iommu_dma_unmap(dev, iova, size);
out_free_sg:
sg_free_table(&sgt);
out_free_iova:
@@ -1008,18 +1017,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
size >> PAGE_SHIFT);
}
} else {
- pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
- struct page **pages;
-
- pages = __iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
- handle);
- if (!pages)
- return NULL;
-
- addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
- __builtin_return_address(0));
- if (!addr)
- __iommu_dma_free(dev, pages, iosize, handle);
+ addr = iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs);
}
return addr;
}
@@ -1033,7 +1031,7 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
/*
* @cpu_addr will be one of 4 things depending on how it was allocated:
* - A remapped array of pages for contiguous allocations.
- * - A remapped array of pages from __iommu_dma_alloc(), for all
+ * - A remapped array of pages from iommu_dma_alloc_remap(), for all
* non-atomic allocations.
* - A non-cacheable alias from the atomic pool, for atomic
* allocations by non-coherent devices.