diff options
author | Christoph Hellwig <hch@lst.de> | 2019-04-30 12:51:50 +0200 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2019-05-07 11:06:16 +0200 |
commit | a98d9ae937d256ed679a935fc82d9deaa710d98e (patch) | |
tree | 3982eb41c186c4f306f2187a7adf74d2ac274d84 /arch | |
parent | Merge branch 'for-next/perf' of git://git.kernel.org/pub/scm/linux/kernel/git... (diff) | |
download | linux-a98d9ae937d256ed679a935fc82d9deaa710d98e.tar.xz linux-a98d9ae937d256ed679a935fc82d9deaa710d98e.zip |
arm64/iommu: handle non-remapped addresses in ->mmap and ->get_sgtable
DMA allocations that can't sleep may return non-remapped addresses, but
we do not properly handle them in the mmap and get_sgtable methods.
Resolve non-vmalloc addresses using virt_to_page to handle this corner
case.
Cc: <stable@vger.kernel.org>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/mm/dma-mapping.c | 10 |
1 files changed, 10 insertions, 0 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 78c0a72f822c..674860e3e478 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -249,6 +249,11 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; + if (!is_vmalloc_addr(cpu_addr)) { + unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); + return __swiotlb_mmap_pfn(vma, pfn, size); + } + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { /* * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped, @@ -272,6 +277,11 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt, unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct vm_struct *area = find_vm_area(cpu_addr); + if (!is_vmalloc_addr(cpu_addr)) { + struct page *page = virt_to_page(cpu_addr); + return __swiotlb_get_sgtable_page(sgt, page, size); + } + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { /* * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped, |