diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-05-04 19:52:09 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-05-04 19:52:09 +0200 |
commit | 954b7207059cc4004f2e18f49c335304b1c6d64a (patch) | |
tree | 8dc4347cead2d2eb247e198c9c50823ba7c4ea01 /drivers/iommu | |
parent | Merge tag 'm68knommu-for-v5.13' of git://git.kernel.org/pub/scm/linux/kernel/... (diff) | |
parent | dma-mapping: add unlikely hint to error path in dma_mapping_error (diff) | |
download | linux-954b7207059cc4004f2e18f49c335304b1c6d64a.tar.xz linux-954b7207059cc4004f2e18f49c335304b1c6d64a.zip |
Merge tag 'dma-mapping-5.13' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig:
- add a new dma_alloc_noncontiguous API (me, Ricardo Ribalda)
- fix a copyright notice (Hao Fang)
- add an unlikely annotation to dma_mapping_error (Heiner Kallweit)
- remove a pointless empty line (Wang Qing)
- add support for multi-pages map/unmap bencharking (Xiang Chen)
* tag 'dma-mapping-5.13' of git://git.infradead.org/users/hch/dma-mapping:
dma-mapping: add unlikely hint to error path in dma_mapping_error
dma-mapping: benchmark: Add support for multi-pages map/unmap
dma-mapping: benchmark: use the correct HiSilicon copyright
dma-mapping: remove a pointless empty line in dma_alloc_coherent
media: uvcvideo: Use dma_alloc_noncontiguous API
dma-iommu: implement ->alloc_noncontiguous
dma-iommu: refactor iommu_dma_alloc_remap
dma-mapping: add a dma_alloc_noncontiguous API
dma-mapping: refactor dma_{alloc,free}_pages
dma-mapping: add a dma_mmap_pages helper
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/dma-iommu.c | 103 |
1 files changed, 71 insertions, 32 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index efaf5eab40a1..4dadac3135b0 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -647,23 +647,12 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev, return pages; } -/** - * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space - * @dev: Device to allocate memory for. Must be a real device - * attached to an iommu_dma_domain - * @size: Size of buffer in bytes - * @dma_handle: Out argument for allocated DMA handle - * @gfp: Allocation flags - * @prot: pgprot_t to use for the remapped mapping - * @attrs: DMA attributes for this allocation - * - * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, +/* + * If size is less than PAGE_SIZE, then a full CPU page will be allocated, * but an IOMMU which supports smaller pages might not map the whole thing. - * - * Return: Mapped virtual address, or NULL on failure. */ -static void *iommu_dma_alloc_remap(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, +static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, + size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot, unsigned long attrs) { struct iommu_domain *domain = iommu_get_dma_domain(dev); @@ -673,11 +662,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; struct page **pages; - struct sg_table sgt; dma_addr_t iova; - void *vaddr; - - *dma_handle = DMA_MAPPING_ERROR; if (static_branch_unlikely(&iommu_deferred_attach_enabled) && iommu_deferred_attach(dev, domain)) @@ -704,41 +689,91 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, if (!iova) goto out_free_pages; - if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) + if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL)) goto out_free_iova; if (!(ioprot & IOMMU_CACHE)) { struct scatterlist *sg; int i; - for_each_sg(sgt.sgl, sg, sgt.orig_nents, i) + for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) arch_dma_prep_coherent(sg_page(sg), sg->length); } - if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) + if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot) < size) goto out_free_sg; + sgt->sgl->dma_address = iova; + sgt->sgl->dma_length = size; + return pages; + +out_free_sg: + sg_free_table(sgt); +out_free_iova: + iommu_dma_free_iova(cookie, iova, size, NULL); +out_free_pages: + __iommu_dma_free_pages(pages, count); + return NULL; +} + +static void *iommu_dma_alloc_remap(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, + unsigned long attrs) +{ + struct page **pages; + struct sg_table sgt; + void *vaddr; + + pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot, + attrs); + if (!pages) + return NULL; + *dma_handle = sgt.sgl->dma_address; + sg_free_table(&sgt); vaddr = dma_common_pages_remap(pages, size, prot, __builtin_return_address(0)); if (!vaddr) goto out_unmap; - - *dma_handle = iova; - sg_free_table(&sgt); return vaddr; out_unmap: - __iommu_dma_unmap(dev, iova, size); -out_free_sg: - sg_free_table(&sgt); -out_free_iova: - iommu_dma_free_iova(cookie, iova, size, NULL); -out_free_pages: - __iommu_dma_free_pages(pages, count); + __iommu_dma_unmap(dev, *dma_handle, size); + __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); return NULL; } +#ifdef CONFIG_DMA_REMAP +static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, + size_t size, enum dma_data_direction dir, gfp_t gfp, + unsigned long attrs) +{ + struct dma_sgt_handle *sh; + + sh = kmalloc(sizeof(*sh), gfp); + if (!sh) + return NULL; + + sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp, + PAGE_KERNEL, attrs); + if (!sh->pages) { + kfree(sh); + return NULL; + } + return &sh->sgt; +} + +static void iommu_dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir) +{ + struct dma_sgt_handle *sh = sgt_handle(sgt); + + __iommu_dma_unmap(dev, sgt->sgl->dma_address, size); + __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT); + sg_free_table(&sh->sgt); +} +#endif /* CONFIG_DMA_REMAP */ + static void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) { @@ -1255,6 +1290,10 @@ static const struct dma_map_ops iommu_dma_ops = { .free = iommu_dma_free, .alloc_pages = dma_common_alloc_pages, .free_pages = dma_common_free_pages, +#ifdef CONFIG_DMA_REMAP + .alloc_noncontiguous = iommu_dma_alloc_noncontiguous, + .free_noncontiguous = iommu_dma_free_noncontiguous, +#endif .mmap = iommu_dma_mmap, .get_sgtable = iommu_dma_get_sgtable, .map_page = iommu_dma_map_page, |