summaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/dma-mapping.c54
-rw-r--r--arch/arm64/mm/init.c16
2 files changed, 15 insertions, 55 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index b45c5bcaeccb..a96ec0181818 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -24,7 +24,7 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/genalloc.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/dma-contiguous.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
@@ -91,46 +91,6 @@ static int __free_from_pool(void *start, size_t size)
return 1;
}
-static void *__dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- unsigned long attrs)
-{
- if (IS_ENABLED(CONFIG_ZONE_DMA) &&
- dev->coherent_dma_mask <= DMA_BIT_MASK(32))
- flags |= GFP_DMA;
- if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
- struct page *page;
- void *addr;
-
- page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size), flags);
- if (!page)
- return NULL;
-
- *dma_handle = phys_to_dma(dev, page_to_phys(page));
- addr = page_address(page);
- memset(addr, 0, size);
- return addr;
- } else {
- return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
- }
-}
-
-static void __dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
- bool freed;
- phys_addr_t paddr = dma_to_phys(dev, dma_handle);
-
-
- freed = dma_release_from_contiguous(dev,
- phys_to_page(paddr),
- size >> PAGE_SHIFT);
- if (!freed)
- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
-}
-
static void *__dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
unsigned long attrs)
@@ -152,7 +112,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
return addr;
}
- ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
+ ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs);
if (!ptr)
goto no_mem;
@@ -173,7 +133,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
return coherent_ptr;
no_map:
- __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
+ swiotlb_free(dev, size, ptr, *dma_handle, attrs);
no_mem:
return NULL;
}
@@ -191,7 +151,7 @@ static void __dma_free(struct device *dev, size_t size,
return;
vunmap(vaddr);
}
- __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
+ swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs);
}
static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
@@ -368,7 +328,7 @@ static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
return 0;
}
-static const struct dma_map_ops swiotlb_dma_ops = {
+static const struct dma_map_ops arm64_swiotlb_dma_ops = {
.alloc = __dma_alloc,
.free = __dma_free,
.mmap = __swiotlb_mmap,
@@ -397,7 +357,7 @@ static int __init atomic_pool_init(void)
page = dma_alloc_from_contiguous(NULL, nr_pages,
pool_size_order, GFP_KERNEL);
else
- page = alloc_pages(GFP_DMA, pool_size_order);
+ page = alloc_pages(GFP_DMA32, pool_size_order);
if (page) {
int ret;
@@ -923,7 +883,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
if (!dev->dma_ops)
- dev->dma_ops = &swiotlb_dma_ops;
+ dev->dma_ops = &arm64_swiotlb_dma_ops;
dev->archdata.dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index c903f7ccbdd2..9f3c47acf8ff 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -217,7 +217,7 @@ static void __init reserve_elfcorehdr(void)
}
#endif /* CONFIG_CRASH_DUMP */
/*
- * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
+ * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It
* currently assumes that for memory starting above 4G, 32-bit devices will
* use a DMA offset.
*/
@@ -233,8 +233,8 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
- if (IS_ENABLED(CONFIG_ZONE_DMA))
- max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_dma_phys());
+ if (IS_ENABLED(CONFIG_ZONE_DMA32))
+ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
max_zone_pfns[ZONE_NORMAL] = max;
free_area_init_nodes(max_zone_pfns);
@@ -251,9 +251,9 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
memset(zone_size, 0, sizeof(zone_size));
/* 4GB maximum for 32-bit only capable devices */
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ZONE_DMA32
max_dma = PFN_DOWN(arm64_dma_phys_limit);
- zone_size[ZONE_DMA] = max_dma - min;
+ zone_size[ZONE_DMA32] = max_dma - min;
#endif
zone_size[ZONE_NORMAL] = max - max_dma;
@@ -266,10 +266,10 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
if (start >= max)
continue;
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ZONE_DMA32
if (start < max_dma) {
unsigned long dma_end = min(end, max_dma);
- zhole_size[ZONE_DMA] -= dma_end - start;
+ zhole_size[ZONE_DMA32] -= dma_end - start;
}
#endif
if (end > max_dma) {
@@ -470,7 +470,7 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem();
/* 4GB maximum for 32-bit only capable devices */
- if (IS_ENABLED(CONFIG_ZONE_DMA))
+ if (IS_ENABLED(CONFIG_ZONE_DMA32))
arm64_dma_phys_limit = max_zone_dma_phys();
else
arm64_dma_phys_limit = PHYS_MASK + 1;