summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-02-13 08:01:28 +0100
committerMichael Ellerman <mpe@ellerman.id.au>2019-02-18 12:41:04 +0100
commit31f940afda6add7a7bb182adde97e615e5355c6d (patch)
tree0133b5ca9bc0889985dc3c4021fa3268e97ae6ea
parentswiotlb: remove swiotlb_dma_supported (diff)
downloadlinux-31f940afda6add7a7bb182adde97e615e5355c6d.tar.xz
linux-31f940afda6add7a7bb182adde97e615e5355c6d.zip
powerpc/dma: use the dma-direct allocator for coherent platforms
The generic code allows a few nice things such as node local allocations and dipping into the CMA area. The lookup of the right zone for a given dma mask works a little different, but the results should be the same. Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Christian Zigotzky <chzigotzky@xenosoft.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/pgtable.h1
-rw-r--r--arch/powerpc/kernel/dma-iommu.c5
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c4
-rw-r--r--arch/powerpc/kernel/dma.c69
-rw-r--r--arch/powerpc/mm/mem.c22
5 files changed, 9 insertions, 92 deletions
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index dad1d27e196d..505550fb2935 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -66,7 +66,6 @@ extern unsigned long empty_zero_page[];
extern pgd_t swapper_pg_dir[];
-int dma_pfn_limit_to_zone(u64 pfn_limit);
extern void paging_init(void);
/*
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 67fbfaa4e3b2..c75ba4e3a50c 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -40,8 +40,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
unsigned long attrs)
{
if (dma_iommu_alloc_bypass(dev))
- return __dma_nommu_alloc_coherent(dev, size, dma_handle, flag,
- attrs);
+ return dma_direct_alloc(dev, size, dma_handle, flag, attrs);
return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
dma_handle, dev->coherent_dma_mask, flag,
dev_to_node(dev));
@@ -52,7 +51,7 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size,
unsigned long attrs)
{
if (dma_iommu_alloc_bypass(dev))
- __dma_nommu_free_coherent(dev, size, vaddr, dma_handle, attrs);
+ dma_direct_free(dev, size, vaddr, dma_handle, attrs);
else
iommu_free_coherent(get_iommu_table_base(dev), size, vaddr,
dma_handle);
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 6d2677b2daa6..3a15a7d945e9 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -32,8 +32,8 @@ unsigned int ppc_swiotlb_enable;
* for everything else.
*/
const struct dma_map_ops powerpc_swiotlb_dma_ops = {
- .alloc = __dma_nommu_alloc_coherent,
- .free = __dma_nommu_free_coherent,
+ .alloc = dma_direct_alloc,
+ .free = dma_direct_free,
.map_sg = dma_direct_map_sg,
.unmap_sg = dma_direct_unmap_sg,
.dma_supported = dma_direct_supported,
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index a3546a82f6d7..f983f8d435a6 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -27,70 +27,6 @@
* default the offset is PCI_DRAM_OFFSET.
*/
-static u64 __maybe_unused get_pfn_limit(struct device *dev)
-{
- u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1;
-
-#ifdef CONFIG_SWIOTLB
- if (dev->bus_dma_mask && dev->dma_ops == &powerpc_swiotlb_dma_ops)
- pfn = min_t(u64, pfn, dev->bus_dma_mask >> PAGE_SHIFT);
-#endif
-
- return pfn;
-}
-
-#ifndef CONFIG_NOT_COHERENT_CACHE
-void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- unsigned long attrs)
-{
- void *ret;
- struct page *page;
- int node = dev_to_node(dev);
-#ifdef CONFIG_FSL_SOC
- u64 pfn = get_pfn_limit(dev);
- int zone;
-
- /*
- * This code should be OK on other platforms, but we have drivers that
- * don't set coherent_dma_mask. As a workaround we just ifdef it. This
- * whole routine needs some serious cleanup.
- */
-
- zone = dma_pfn_limit_to_zone(pfn);
- if (zone < 0) {
- dev_err(dev, "%s: No suitable zone for pfn %#llx\n",
- __func__, pfn);
- return NULL;
- }
-
- switch (zone) {
-#ifdef CONFIG_ZONE_DMA
- case ZONE_DMA:
- flag |= GFP_DMA;
- break;
-#endif
- };
-#endif /* CONFIG_FSL_SOC */
-
- page = alloc_pages_node(node, flag, get_order(size));
- if (page == NULL)
- return NULL;
- ret = page_address(page);
- memset(ret, 0, size);
- *dma_handle = phys_to_dma(dev,__pa(ret));
-
- return ret;
-}
-
-void __dma_nommu_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
- free_pages((unsigned long)vaddr, get_order(size));
-}
-#endif /* !CONFIG_NOT_COHERENT_CACHE */
-
int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
unsigned long attrs)
@@ -163,8 +99,13 @@ static inline void dma_nommu_sync_single(struct device *dev,
#endif
const struct dma_map_ops dma_nommu_ops = {
+#ifdef CONFIG_NOT_COHERENT_CACHE
.alloc = __dma_nommu_alloc_coherent,
.free = __dma_nommu_free_coherent,
+#else
+ .alloc = dma_direct_alloc,
+ .free = dma_direct_free,
+#endif
.map_sg = dma_nommu_map_sg,
.unmap_sg = dma_nommu_unmap_sg,
.dma_supported = dma_direct_supported,
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 33cc6f676fa6..a10ee3645a6c 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -69,15 +69,12 @@ pte_t *kmap_pte;
EXPORT_SYMBOL(kmap_pte);
pgprot_t kmap_prot;
EXPORT_SYMBOL(kmap_prot);
-#define TOP_ZONE ZONE_HIGHMEM
static inline pte_t *virt_to_kpte(unsigned long vaddr)
{
return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
vaddr), vaddr), vaddr);
}
-#else
-#define TOP_ZONE ZONE_NORMAL
#endif
int page_is_ram(unsigned long pfn)
@@ -262,25 +259,6 @@ static int __init mark_nonram_nosave(void)
static unsigned long max_zone_pfns[MAX_NR_ZONES];
/*
- * Find the least restrictive zone that is entirely below the
- * specified pfn limit. Returns < 0 if no suitable zone is found.
- *
- * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
- * systems -- the DMA limit can be higher than any possible real pfn.
- */
-int dma_pfn_limit_to_zone(u64 pfn_limit)
-{
- int i;
-
- for (i = TOP_ZONE; i >= 0; i--) {
- if (max_zone_pfns[i] <= pfn_limit)
- return i;
- }
-
- return -EPERM;
-}
-
-/*
* paging_init() sets up the page tables - in fact we've already done this.
*/
void __init paging_init(void)