diff options
Diffstat (limited to 'drivers/media/pci/intel/ipu6/ipu6-dma.c')
-rw-r--r-- | drivers/media/pci/intel/ipu6/ipu6-dma.c | 208 |
1 files changed, 99 insertions, 109 deletions
diff --git a/drivers/media/pci/intel/ipu6/ipu6-dma.c b/drivers/media/pci/intel/ipu6/ipu6-dma.c index 92530a1cc90f..b34022bad83b 100644 --- a/drivers/media/pci/intel/ipu6/ipu6-dma.c +++ b/drivers/media/pci/intel/ipu6/ipu6-dma.c @@ -39,8 +39,7 @@ static struct vm_info *get_vm_info(struct ipu6_mmu *mmu, dma_addr_t iova) return NULL; } -static void __dma_clear_buffer(struct page *page, size_t size, - unsigned long attrs) +static void __clear_buffer(struct page *page, size_t size, unsigned long attrs) { void *ptr; @@ -56,8 +55,7 @@ static void __dma_clear_buffer(struct page *page, size_t size, clflush_cache_range(ptr, size); } -static struct page **__dma_alloc_buffer(struct device *dev, size_t size, - gfp_t gfp, unsigned long attrs) +static struct page **__alloc_buffer(size_t size, gfp_t gfp, unsigned long attrs) { int count = PHYS_PFN(size); int array_size = count * sizeof(struct page *); @@ -86,7 +84,7 @@ static struct page **__dma_alloc_buffer(struct device *dev, size_t size, pages[i + j] = pages[i] + j; } - __dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs); + __clear_buffer(pages[i], PAGE_SIZE << order, attrs); i += 1 << order; count -= 1 << order; } @@ -100,29 +98,26 @@ error: return NULL; } -static void __dma_free_buffer(struct device *dev, struct page **pages, - size_t size, unsigned long attrs) +static void __free_buffer(struct page **pages, size_t size, unsigned long attrs) { int count = PHYS_PFN(size); unsigned int i; for (i = 0; i < count && pages[i]; i++) { - __dma_clear_buffer(pages[i], PAGE_SIZE, attrs); + __clear_buffer(pages[i], PAGE_SIZE, attrs); __free_pages(pages[i], 0); } kvfree(pages); } -static void ipu6_dma_sync_single_for_cpu(struct device *dev, - dma_addr_t dma_handle, - size_t size, - enum dma_data_direction dir) +void ipu6_dma_sync_single(struct ipu6_bus_device *sys, dma_addr_t dma_handle, + size_t size) { void *vaddr; u32 offset; struct vm_info *info; - struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu; + struct ipu6_mmu *mmu = sys->mmu; info = get_vm_info(mmu, dma_handle); if (WARN_ON(!info)) @@ -135,25 +130,33 @@ static void ipu6_dma_sync_single_for_cpu(struct device *dev, vaddr = info->vaddr + offset; clflush_cache_range(vaddr, size); } +EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_single, "INTEL_IPU6"); -static void ipu6_dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sglist, - int nents, enum dma_data_direction dir) +void ipu6_dma_sync_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + int nents) { struct scatterlist *sg; int i; for_each_sg(sglist, sg, nents, i) - clflush_cache_range(page_to_virt(sg_page(sg)), sg->length); + clflush_cache_range(sg_virt(sg), sg->length); } +EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sg, "INTEL_IPU6"); -static void *ipu6_dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - unsigned long attrs) +void ipu6_dma_sync_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt) { - struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu; - struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev; + ipu6_dma_sync_sg(sys, sgt->sgl, sgt->orig_nents); +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_sync_sgtable, "INTEL_IPU6"); + +void *ipu6_dma_alloc(struct ipu6_bus_device *sys, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + struct device *dev = &sys->auxdev.dev; + struct pci_dev *pdev = sys->isp->pdev; dma_addr_t pci_dma_addr, ipu6_iova; + struct ipu6_mmu *mmu = sys->mmu; struct vm_info *info; unsigned long count; struct page **pages; @@ -173,7 +176,7 @@ static void *ipu6_dma_alloc(struct device *dev, size_t size, if (!iova) goto out_kfree; - pages = __dma_alloc_buffer(dev, size, gfp, attrs); + pages = __alloc_buffer(size, gfp, attrs); if (!pages) goto out_free_iova; @@ -227,7 +230,7 @@ out_unmap: ipu6_mmu_unmap(mmu->dmap->mmu_info, ipu6_iova, PAGE_SIZE); } - __dma_free_buffer(dev, pages, size, attrs); + __free_buffer(pages, size, attrs); out_free_iova: __free_iova(&mmu->dmap->iovad, iova); @@ -236,13 +239,13 @@ out_kfree: return NULL; } +EXPORT_SYMBOL_NS_GPL(ipu6_dma_alloc, "INTEL_IPU6"); -static void ipu6_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, - unsigned long attrs) +void ipu6_dma_free(struct ipu6_bus_device *sys, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) { - struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu; - struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev; + struct ipu6_mmu *mmu = sys->mmu; + struct pci_dev *pdev = sys->isp->pdev; struct iova *iova = find_iova(&mmu->dmap->iovad, PHYS_PFN(dma_handle)); dma_addr_t pci_dma_addr, ipu6_iova; struct vm_info *info; @@ -281,7 +284,7 @@ static void ipu6_dma_free(struct device *dev, size_t size, void *vaddr, ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), PFN_PHYS(iova_size(iova))); - __dma_free_buffer(dev, pages, size, attrs); + __free_buffer(pages, size, attrs); mmu->tlb_invalidate(mmu); @@ -289,13 +292,14 @@ static void ipu6_dma_free(struct device *dev, size_t size, void *vaddr, kfree(info); } +EXPORT_SYMBOL_NS_GPL(ipu6_dma_free, "INTEL_IPU6"); -static int ipu6_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *addr, dma_addr_t iova, size_t size, - unsigned long attrs) +int ipu6_dma_mmap(struct ipu6_bus_device *sys, struct vm_area_struct *vma, + void *addr, dma_addr_t iova, size_t size, + unsigned long attrs) { - struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu; - size_t count = PHYS_PFN(PAGE_ALIGN(size)); + struct ipu6_mmu *mmu = sys->mmu; + size_t count = PFN_UP(size); struct vm_info *info; size_t i; int ret; @@ -323,18 +327,17 @@ static int ipu6_dma_mmap(struct device *dev, struct vm_area_struct *vma, return 0; } -static void ipu6_dma_unmap_sg(struct device *dev, - struct scatterlist *sglist, - int nents, enum dma_data_direction dir, - unsigned long attrs) +void ipu6_dma_unmap_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + int nents, enum dma_data_direction dir, + unsigned long attrs) { - struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev; - struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu; + struct device *dev = &sys->auxdev.dev; + struct ipu6_mmu *mmu = sys->mmu; struct iova *iova = find_iova(&mmu->dmap->iovad, PHYS_PFN(sg_dma_address(sglist))); - int i, npages, count; struct scatterlist *sg; dma_addr_t pci_dma_addr; + unsigned int i; if (!nents) return; @@ -342,31 +345,15 @@ static void ipu6_dma_unmap_sg(struct device *dev, if (WARN_ON(!iova)) return; - if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) - ipu6_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL); - - /* get the nents as orig_nents given by caller */ - count = 0; - npages = iova_size(iova); - for_each_sg(sglist, sg, nents, i) { - if (sg_dma_len(sg) == 0 || - sg_dma_address(sg) == DMA_MAPPING_ERROR) - break; - - npages -= PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg))); - count++; - if (npages <= 0) - break; - } - /* * Before IPU6 mmu unmap, return the pci dma address back to sg * assume the nents is less than orig_nents as the least granule * is 1 SZ_4K page */ - dev_dbg(dev, "trying to unmap concatenated %u ents\n", count); - for_each_sg(sglist, sg, count, i) { - dev_dbg(dev, "ipu unmap sg[%d] %pad\n", i, &sg_dma_address(sg)); + dev_dbg(dev, "trying to unmap concatenated %u ents\n", nents); + for_each_sg(sglist, sg, nents, i) { + dev_dbg(dev, "unmap sg[%d] %pad size %u\n", i, + &sg_dma_address(sg), sg_dma_len(sg)); pci_dma_addr = ipu6_mmu_iova_to_phys(mmu->dmap->mmu_info, sg_dma_address(sg)); dev_dbg(dev, "return pci_dma_addr %pad back to sg[%d]\n", @@ -380,23 +367,21 @@ static void ipu6_dma_unmap_sg(struct device *dev, PFN_PHYS(iova_size(iova))); mmu->tlb_invalidate(mmu); - - dma_unmap_sg_attrs(&pdev->dev, sglist, nents, dir, attrs); - __free_iova(&mmu->dmap->iovad, iova); } +EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sg, "INTEL_IPU6"); -static int ipu6_dma_map_sg(struct device *dev, struct scatterlist *sglist, - int nents, enum dma_data_direction dir, - unsigned long attrs) +int ipu6_dma_map_sg(struct ipu6_bus_device *sys, struct scatterlist *sglist, + int nents, enum dma_data_direction dir, + unsigned long attrs) { - struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu; - struct pci_dev *pdev = to_ipu6_bus_device(dev)->isp->pdev; + struct device *dev = &sys->auxdev.dev; + struct ipu6_mmu *mmu = sys->mmu; struct scatterlist *sg; struct iova *iova; size_t npages = 0; unsigned long iova_addr; - int i, count; + int i; for_each_sg(sglist, sg, nents, i) { if (sg->offset) { @@ -406,17 +391,11 @@ static int ipu6_dma_map_sg(struct device *dev, struct scatterlist *sglist, } } - dev_dbg(dev, "pci_dma_map_sg trying to map %d ents\n", nents); - count = dma_map_sg_attrs(&pdev->dev, sglist, nents, dir, attrs); - if (count <= 0) { - dev_err(dev, "pci_dma_map_sg %d ents failed\n", nents); - return 0; - } - - dev_dbg(dev, "pci_dma_map_sg %d ents mapped\n", count); + for_each_sg(sglist, sg, nents, i) + npages += PFN_UP(sg_dma_len(sg)); - for_each_sg(sglist, sg, count, i) - npages += PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg))); + dev_dbg(dev, "dmamap trying to map %d ents %zu pages\n", + nents, npages); iova = alloc_iova(&mmu->dmap->iovad, npages, PHYS_PFN(dma_get_mask(dev)), 0); @@ -427,12 +406,13 @@ static int ipu6_dma_map_sg(struct device *dev, struct scatterlist *sglist, iova->pfn_hi); iova_addr = iova->pfn_lo; - for_each_sg(sglist, sg, count, i) { + for_each_sg(sglist, sg, nents, i) { + phys_addr_t iova_pa; int ret; - dev_dbg(dev, "mapping entry %d: iova 0x%llx phy %pad size %d\n", - i, PFN_PHYS(iova_addr), &sg_dma_address(sg), - sg_dma_len(sg)); + iova_pa = PFN_PHYS(iova_addr); + dev_dbg(dev, "mapping entry %d: iova %pap phy %pap size %d\n", + i, &iova_pa, &sg_dma_address(sg), sg_dma_len(sg)); ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr), sg_dma_address(sg), @@ -442,28 +422,51 @@ static int ipu6_dma_map_sg(struct device *dev, struct scatterlist *sglist, sg_dma_address(sg) = PFN_PHYS(iova_addr); - iova_addr += PHYS_PFN(PAGE_ALIGN(sg_dma_len(sg))); + iova_addr += PFN_UP(sg_dma_len(sg)); } - if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) - ipu6_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL); + dev_dbg(dev, "dmamap %d ents %zu pages mapped\n", nents, npages); - return count; + return nents; out_fail: - ipu6_dma_unmap_sg(dev, sglist, i, dir, attrs); + ipu6_dma_unmap_sg(sys, sglist, i, dir, attrs); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sg, "INTEL_IPU6"); + +int ipu6_dma_map_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + int nents; + + nents = ipu6_dma_map_sg(sys, sgt->sgl, sgt->nents, dir, attrs); + if (nents < 0) + return nents; + + sgt->nents = nents; return 0; } +EXPORT_SYMBOL_NS_GPL(ipu6_dma_map_sgtable, "INTEL_IPU6"); + +void ipu6_dma_unmap_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + ipu6_dma_unmap_sg(sys, sgt->sgl, sgt->nents, dir, attrs); +} +EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sgtable, "INTEL_IPU6"); /* * Create scatter-list for the already allocated DMA buffer */ -static int ipu6_dma_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t handle, size_t size, - unsigned long attrs) +int ipu6_dma_get_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size, + unsigned long attrs) { - struct ipu6_mmu *mmu = to_ipu6_bus_device(dev)->mmu; + struct device *dev = &sys->auxdev.dev; + struct ipu6_mmu *mmu = sys->mmu; struct vm_info *info; int n_pages; int ret = 0; @@ -478,25 +481,12 @@ static int ipu6_dma_get_sgtable(struct device *dev, struct sg_table *sgt, if (WARN_ON(!info->pages)) return -ENOMEM; - n_pages = PHYS_PFN(PAGE_ALIGN(size)); + n_pages = PFN_UP(size); ret = sg_alloc_table_from_pages(sgt, info->pages, n_pages, 0, size, GFP_KERNEL); if (ret) - dev_warn(dev, "IPU6 get sgt table failed\n"); + dev_warn(dev, "get sgt table failed\n"); return ret; } - -const struct dma_map_ops ipu6_dma_ops = { - .alloc = ipu6_dma_alloc, - .free = ipu6_dma_free, - .mmap = ipu6_dma_mmap, - .map_sg = ipu6_dma_map_sg, - .unmap_sg = ipu6_dma_unmap_sg, - .sync_single_for_cpu = ipu6_dma_sync_single_for_cpu, - .sync_single_for_device = ipu6_dma_sync_single_for_cpu, - .sync_sg_for_cpu = ipu6_dma_sync_sg_for_cpu, - .sync_sg_for_device = ipu6_dma_sync_sg_for_cpu, - .get_sgtable = ipu6_dma_get_sgtable, -}; |