diff options
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 39 |
1 files changed, 15 insertions, 24 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 78de138aa66d..f1e2922e447c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -179,11 +179,6 @@ static void arm_dma_sync_single_for_device(struct device *dev, __dma_page_cpu_to_dev(page, offset, size, dir); } -static int arm_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == ARM_MAPPING_ERROR; -} - const struct dma_map_ops arm_dma_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, @@ -197,7 +192,6 @@ const struct dma_map_ops arm_dma_ops = { .sync_single_for_device = arm_dma_sync_single_for_device, .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_device = arm_dma_sync_sg_for_device, - .mapping_error = arm_dma_mapping_error, .dma_supported = arm_dma_supported, }; EXPORT_SYMBOL(arm_dma_ops); @@ -217,7 +211,6 @@ const struct dma_map_ops arm_coherent_dma_ops = { .get_sgtable = arm_dma_get_sgtable, .map_page = arm_coherent_dma_map_page, .map_sg = arm_dma_map_sg, - .mapping_error = arm_dma_mapping_error, .dma_supported = arm_dma_supported, }; EXPORT_SYMBOL(arm_coherent_dma_ops); @@ -774,7 +767,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp &= ~(__GFP_COMP); args.gfp = gfp; - *handle = ARM_MAPPING_ERROR; + *handle = DMA_MAPPING_ERROR; allowblock = gfpflags_allow_blocking(gfp); cma = allowblock ? dev_get_cma_area(dev) : false; @@ -1217,7 +1210,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, if (i == mapping->nr_bitmaps) { if (extend_iommu_mapping(mapping)) { spin_unlock_irqrestore(&mapping->lock, flags); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } start = bitmap_find_next_zero_area(mapping->bitmaps[i], @@ -1225,7 +1218,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, if (start > mapping->bits) { spin_unlock_irqrestore(&mapping->lock, flags); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } bitmap_set(mapping->bitmaps[i], start, count); @@ -1409,7 +1402,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, int i; dma_addr = __alloc_iova(mapping, size); - if (dma_addr == ARM_MAPPING_ERROR) + if (dma_addr == DMA_MAPPING_ERROR) return dma_addr; iova = dma_addr; @@ -1436,7 +1429,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, fail: iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); __free_iova(mapping, dma_addr, size); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) @@ -1497,7 +1490,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, return NULL; *handle = __iommu_create_mapping(dev, &page, size, attrs); - if (*handle == ARM_MAPPING_ERROR) + if (*handle == DMA_MAPPING_ERROR) goto err_mapping; return addr; @@ -1525,7 +1518,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, struct page **pages; void *addr = NULL; - *handle = ARM_MAPPING_ERROR; + *handle = DMA_MAPPING_ERROR; size = PAGE_ALIGN(size); if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) @@ -1546,7 +1539,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, return NULL; *handle = __iommu_create_mapping(dev, pages, size, attrs); - if (*handle == ARM_MAPPING_ERROR) + if (*handle == DMA_MAPPING_ERROR) goto err_buffer; if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) @@ -1696,10 +1689,10 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, int prot; size = PAGE_ALIGN(size); - *handle = ARM_MAPPING_ERROR; + *handle = DMA_MAPPING_ERROR; iova_base = iova = __alloc_iova(mapping, size); - if (iova == ARM_MAPPING_ERROR) + if (iova == DMA_MAPPING_ERROR) return -ENOMEM; for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { @@ -1739,7 +1732,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, for (i = 1; i < nents; i++) { s = sg_next(s); - s->dma_address = ARM_MAPPING_ERROR; + s->dma_address = DMA_MAPPING_ERROR; s->dma_length = 0; if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { @@ -1914,7 +1907,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p int ret, prot, len = PAGE_ALIGN(size + offset); dma_addr = __alloc_iova(mapping, len); - if (dma_addr == ARM_MAPPING_ERROR) + if (dma_addr == DMA_MAPPING_ERROR) return dma_addr; prot = __dma_info_to_prot(dir, attrs); @@ -1926,7 +1919,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p return dma_addr + offset; fail: __free_iova(mapping, dma_addr, len); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } /** @@ -2020,7 +2013,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev, size_t len = PAGE_ALIGN(size + offset); dma_addr = __alloc_iova(mapping, len); - if (dma_addr == ARM_MAPPING_ERROR) + if (dma_addr == DMA_MAPPING_ERROR) return dma_addr; prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; @@ -2032,7 +2025,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev, return dma_addr + offset; fail: __free_iova(mapping, dma_addr, len); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } /** @@ -2105,7 +2098,6 @@ const struct dma_map_ops iommu_ops = { .map_resource = arm_iommu_map_resource, .unmap_resource = arm_iommu_unmap_resource, - .mapping_error = arm_dma_mapping_error, .dma_supported = arm_dma_supported, }; @@ -2124,7 +2116,6 @@ const struct dma_map_ops iommu_coherent_ops = { .map_resource = arm_iommu_map_resource, .unmap_resource = arm_iommu_unmap_resource, - .mapping_error = arm_dma_mapping_error, .dma_supported = arm_dma_supported, }; |