diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-28 23:12:21 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-28 23:12:21 +0100 |
commit | af7ddd8a627c62a835524b3f5b471edbbbcce025 (patch) | |
tree | af9777ddef6d394c7cc01fca599328b584ca2bc1 /arch | |
parent | Merge tag 'for-4.21/libata-20181221' of git://git.kernel.dk/linux-block (diff) | |
parent | dma-mapping: fix inverted logic in dma_supported (diff) | |
download | linux-af7ddd8a627c62a835524b3f5b471edbbbcce025.tar.xz linux-af7ddd8a627c62a835524b3f5b471edbbbcce025.zip |
Merge tag 'dma-mapping-4.21' of git://git.infradead.org/users/hch/dma-mapping
Pull DMA mapping updates from Christoph Hellwig:
"A huge update this time, but a lot of that is just consolidating or
removing code:
- provide a common DMA_MAPPING_ERROR definition and avoid indirect
calls for dma_map_* error checking
- use direct calls for the DMA direct mapping case, avoiding huge
retpoline overhead for high performance workloads
- merge the swiotlb dma_map_ops into dma-direct
- provide a generic remapping DMA consistent allocator for
architectures that have devices that perform DMA that is not cache
coherent. Based on the existing arm64 implementation and also used
for csky now.
- improve the dma-debug infrastructure, including dynamic allocation
of entries (Robin Murphy)
- default to providing chaining scatterlist everywhere, with opt-outs
for the few architectures (alpha, parisc, most arm32 variants) that
can't cope with it
- misc sparc32 dma-related cleanups
- remove the dma_mark_clean arch hook used by swiotlb on ia64 and
replace it with the generic noncoherent infrastructure
- fix the return type of dma_set_max_seg_size (Niklas Söderlund)
- move the dummy dma ops for not DMA capable devices from arm64 to
common code (Robin Murphy)
- ensure dma_alloc_coherent returns zeroed memory to avoid kernel
data leaks through userspace. We already did this for most common
architectures, but this ensures we do it everywhere.
dma_zalloc_coherent has been deprecated and can hopefully be
removed after -rc1 with a coccinelle script"
* tag 'dma-mapping-4.21' of git://git.infradead.org/users/hch/dma-mapping: (73 commits)
dma-mapping: fix inverted logic in dma_supported
dma-mapping: deprecate dma_zalloc_coherent
dma-mapping: zero memory returned from dma_alloc_*
sparc/iommu: fix ->map_sg return value
sparc/io-unit: fix ->map_sg return value
arm64: default to the direct mapping in get_arch_dma_ops
PCI: Remove unused attr variable in pci_dma_configure
ia64: only select ARCH_HAS_DMA_COHERENT_TO_PFN if swiotlb is enabled
dma-mapping: bypass indirect calls for dma-direct
vmd: use the proper dma_* APIs instead of direct methods calls
dma-direct: merge swiotlb_dma_ops into the dma_direct code
dma-direct: use dma_direct_map_page to implement dma_direct_map_sg
dma-direct: improve addressability error reporting
swiotlb: remove dma_mark_clean
swiotlb: remove SWIOTLB_MAP_ERROR
ACPI / scan: Refactor _CCA enforcement
dma-mapping: factor out dummy DMA ops
dma-mapping: always build the direct mapping code
dma-mapping: move dma_cache_sync out of line
dma-mapping: move various slow path functions out of line
...
Diffstat (limited to 'arch')
78 files changed, 440 insertions, 1237 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 5b4f88363453..5da6ff54b3e7 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -5,6 +5,7 @@ config ALPHA select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_NO_PREEMPT + select ARCH_NO_SG_CHAIN select ARCH_USE_CMPXCHG_LOCKREF select HAVE_AOUT select HAVE_IDE @@ -202,7 +203,6 @@ config ALPHA_EIGER config ALPHA_JENSEN bool "Jensen" depends on BROKEN - select DMA_DIRECT_OPS help DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one of the first-generation Alpha systems. A number of these systems diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 8beeafd4f68e..0ee6a5c99b16 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -7,7 +7,7 @@ extern const struct dma_map_ops alpha_pci_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { #ifdef CONFIG_ALPHA_JENSEN - return &dma_direct_ops; + return NULL; #else return &alpha_pci_ops; #endif diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 46e08e0d9181..aa0f50d0f823 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -291,7 +291,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, use direct_map above, it now must be considered an error. */ if (! alpha_mv.mv_pci_tbi) { printk_once(KERN_WARNING "pci_map_single: no HW sg\n"); - return 0; + return DMA_MAPPING_ERROR; } arena = hose->sg_pci; @@ -307,7 +307,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, if (dma_ofs < 0) { printk(KERN_WARNING "pci_map_single failed: " "could not allocate dma page tables\n"); - return 0; + return DMA_MAPPING_ERROR; } paddr &= PAGE_MASK; @@ -443,7 +443,7 @@ static void *alpha_pci_alloc_coherent(struct device *dev, size_t size, gfp &= ~GFP_DMA; try_again: - cpu_addr = (void *)__get_free_pages(gfp, order); + cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order); if (! cpu_addr) { printk(KERN_INFO "pci_alloc_consistent: " "get_free_pages failed from %pf\n", @@ -455,7 +455,7 @@ try_again: memset(cpu_addr, 0, size); *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0); - if (*dma_addrp == 0) { + if (*dma_addrp == DMA_MAPPING_ERROR) { free_pages((unsigned long)cpu_addr, order); if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA)) return NULL; @@ -671,7 +671,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, sg->dma_address = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg), sg->length, dac_allowed); - return sg->dma_address != 0; + return sg->dma_address != DMA_MAPPING_ERROR; } start = sg; @@ -935,11 +935,6 @@ iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) return 0; } -static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == 0; -} - const struct dma_map_ops alpha_pci_ops = { .alloc = alpha_pci_alloc_coherent, .free = alpha_pci_free_coherent, @@ -947,7 +942,6 @@ const struct dma_map_ops alpha_pci_ops = { .unmap_page = alpha_pci_unmap_page, .map_sg = alpha_pci_map_sg, .unmap_sg = alpha_pci_unmap_sg, - .mapping_error = alpha_pci_mapping_error, .dma_supported = alpha_pci_supported, }; EXPORT_SYMBOL(alpha_pci_ops); diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index dadb494d83fd..ca8e26d045a9 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -13,12 +13,10 @@ config ARC select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_SG_CHAIN select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK - select DMA_DIRECT_OPS select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC) select GENERIC_CLOCKEVENTS select GENERIC_FIND_FIRST_BIT diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index cf9619d4efb4..4135abec3fb0 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -1294,7 +1294,7 @@ void __init arc_cache_init_master(void) /* * In case of IOC (say IOC+SLC case), pointers above could still be set * but end up not being relevant as the first function in chain is not - * called at all for @dma_direct_ops + * called at all for devices using coherent DMA. * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*() */ } diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index db203ff69ccf..1525ac00fd02 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -33,7 +33,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, */ BUG_ON(gfp & __GFP_HIGHMEM); - page = alloc_pages(gfp, order); + page = alloc_pages(gfp | __GFP_ZERO, order); if (!page) return NULL; diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 5c0305585a0a..2196aac0e45c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -19,6 +19,7 @@ config ARM select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_MIGHT_HAVE_PC_PARPORT + select ARCH_NO_SG_CHAIN if !ARM_HAS_SG_CHAIN select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7 select ARCH_SUPPORTS_ATOMIC_RMW @@ -29,7 +30,7 @@ config ARM select CLONE_BACKWARDS select CPU_PM if (SUSPEND || CPU_IDLE) select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS - select DMA_DIRECT_OPS if !MMU + select DMA_REMAP if MMU select EDAC_SUPPORT select EDAC_ATOMIC_SCRUB select GENERIC_ALLOCATOR @@ -118,7 +119,6 @@ config ARM <http://www.arm.linux.org.uk/>. config ARM_HAS_SG_CHAIN - select ARCH_HAS_SG_CHAIN bool config ARM_DMA_USE_IOMMU diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 9a92de63426f..5ba4622030ca 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -257,7 +257,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, if (buf == NULL) { dev_err(dev, "%s: unable to map unsafe buffer %p!\n", __func__, ptr); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", @@ -327,7 +327,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, ret = needs_bounce(dev, dma_addr, size); if (ret < 0) - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; if (ret == 0) { arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir); @@ -336,7 +336,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, if (PageHighMem(page)) { dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } return map_single(dev, page_address(page) + offset, size, dir, attrs); @@ -453,11 +453,6 @@ static int dmabounce_dma_supported(struct device *dev, u64 dma_mask) return arm_dma_ops.dma_supported(dev, dma_mask); } -static int dmabounce_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return arm_dma_ops.mapping_error(dev, dma_addr); -} - static const struct dma_map_ops dmabounce_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, @@ -472,7 +467,6 @@ static const struct dma_map_ops dmabounce_ops = { .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_device = arm_dma_sync_sg_for_device, .dma_supported = dmabounce_dma_supported, - .mapping_error = dmabounce_mapping_error, }; static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h index 6821f1249300..772f48ef84b7 100644 --- a/arch/arm/include/asm/dma-iommu.h +++ b/arch/arm/include/asm/dma-iommu.h @@ -9,8 +9,6 @@ #include <linux/dma-debug.h> #include <linux/kref.h> -#define ARM_MAPPING_ERROR (~(dma_addr_t)0x0) - struct dma_iommu_mapping { /* iommu specific data */ struct iommu_domain *domain; diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 965b7c846ecb..31d3b96f0f4b 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -18,7 +18,7 @@ extern const struct dma_map_ops arm_coherent_dma_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_direct_ops; + return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : NULL; } #ifdef __arch_page_to_dma diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c index 712416ecd8e6..f304b10e23a4 100644 --- a/arch/arm/mm/dma-mapping-nommu.c +++ b/arch/arm/mm/dma-mapping-nommu.c @@ -22,7 +22,7 @@ #include "dma.h" /* - * dma_direct_ops is used if + * The generic direct mapping code is used if * - MMU/MPU is off * - cpu is v7m w/o cache support * - device is coherent @@ -209,16 +209,9 @@ const struct dma_map_ops arm_nommu_dma_ops = { }; EXPORT_SYMBOL(arm_nommu_dma_ops); -static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent) -{ - return coherent ? &dma_direct_ops : &arm_nommu_dma_ops; -} - void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { - const struct dma_map_ops *dma_ops; - if (IS_ENABLED(CONFIG_CPU_V7M)) { /* * Cache support for v7m is optional, so can be treated as @@ -234,7 +227,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true; } - dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent); - - set_dma_ops(dev, dma_ops); + if (!dev->archdata.dma_coherent) + set_dma_ops(dev, &arm_nommu_dma_ops); } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 78de138aa66d..f1e2922e447c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -179,11 +179,6 @@ static void arm_dma_sync_single_for_device(struct device *dev, __dma_page_cpu_to_dev(page, offset, size, dir); } -static int arm_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == ARM_MAPPING_ERROR; -} - const struct dma_map_ops arm_dma_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, @@ -197,7 +192,6 @@ const struct dma_map_ops arm_dma_ops = { .sync_single_for_device = arm_dma_sync_single_for_device, .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_device = arm_dma_sync_sg_for_device, - .mapping_error = arm_dma_mapping_error, .dma_supported = arm_dma_supported, }; EXPORT_SYMBOL(arm_dma_ops); @@ -217,7 +211,6 @@ const struct dma_map_ops arm_coherent_dma_ops = { .get_sgtable = arm_dma_get_sgtable, .map_page = arm_coherent_dma_map_page, .map_sg = arm_dma_map_sg, - .mapping_error = arm_dma_mapping_error, .dma_supported = arm_dma_supported, }; EXPORT_SYMBOL(arm_coherent_dma_ops); @@ -774,7 +767,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp &= ~(__GFP_COMP); args.gfp = gfp; - *handle = ARM_MAPPING_ERROR; + *handle = DMA_MAPPING_ERROR; allowblock = gfpflags_allow_blocking(gfp); cma = allowblock ? dev_get_cma_area(dev) : false; @@ -1217,7 +1210,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, if (i == mapping->nr_bitmaps) { if (extend_iommu_mapping(mapping)) { spin_unlock_irqrestore(&mapping->lock, flags); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } start = bitmap_find_next_zero_area(mapping->bitmaps[i], @@ -1225,7 +1218,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, if (start > mapping->bits) { spin_unlock_irqrestore(&mapping->lock, flags); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } bitmap_set(mapping->bitmaps[i], start, count); @@ -1409,7 +1402,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, int i; dma_addr = __alloc_iova(mapping, size); - if (dma_addr == ARM_MAPPING_ERROR) + if (dma_addr == DMA_MAPPING_ERROR) return dma_addr; iova = dma_addr; @@ -1436,7 +1429,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, fail: iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); __free_iova(mapping, dma_addr, size); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) @@ -1497,7 +1490,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, return NULL; *handle = __iommu_create_mapping(dev, &page, size, attrs); - if (*handle == ARM_MAPPING_ERROR) + if (*handle == DMA_MAPPING_ERROR) goto err_mapping; return addr; @@ -1525,7 +1518,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, struct page **pages; void *addr = NULL; - *handle = ARM_MAPPING_ERROR; + *handle = DMA_MAPPING_ERROR; size = PAGE_ALIGN(size); if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) @@ -1546,7 +1539,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, return NULL; *handle = __iommu_create_mapping(dev, pages, size, attrs); - if (*handle == ARM_MAPPING_ERROR) + if (*handle == DMA_MAPPING_ERROR) goto err_buffer; if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) @@ -1696,10 +1689,10 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, int prot; size = PAGE_ALIGN(size); - *handle = ARM_MAPPING_ERROR; + *handle = DMA_MAPPING_ERROR; iova_base = iova = __alloc_iova(mapping, size); - if (iova == ARM_MAPPING_ERROR) + if (iova == DMA_MAPPING_ERROR) return -ENOMEM; for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { @@ -1739,7 +1732,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, for (i = 1; i < nents; i++) { s = sg_next(s); - s->dma_address = ARM_MAPPING_ERROR; + s->dma_address = DMA_MAPPING_ERROR; s->dma_length = 0; if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { @@ -1914,7 +1907,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p int ret, prot, len = PAGE_ALIGN(size + offset); dma_addr = __alloc_iova(mapping, len); - if (dma_addr == ARM_MAPPING_ERROR) + if (dma_addr == DMA_MAPPING_ERROR) return dma_addr; prot = __dma_info_to_prot(dir, attrs); @@ -1926,7 +1919,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p return dma_addr + offset; fail: __free_iova(mapping, dma_addr, len); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } /** @@ -2020,7 +2013,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev, size_t len = PAGE_ALIGN(size + offset); dma_addr = __alloc_iova(mapping, len); - if (dma_addr == ARM_MAPPING_ERROR) + if (dma_addr == DMA_MAPPING_ERROR) return dma_addr; prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; @@ -2032,7 +2025,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev, return dma_addr + offset; fail: __free_iova(mapping, dma_addr, len); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } /** @@ -2105,7 +2098,6 @@ const struct dma_map_ops iommu_ops = { .map_resource = arm_iommu_map_resource, .unmap_resource = arm_iommu_unmap_resource, - .mapping_error = arm_dma_mapping_error, .dma_supported = arm_dma_supported, }; @@ -2124,7 +2116,6 @@ const struct dma_map_ops iommu_coherent_ops = { .map_resource = arm_iommu_map_resource, .unmap_resource = arm_iommu_unmap_resource, - .mapping_error = arm_dma_mapping_error, .dma_supported = arm_dma_supported, }; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6b7bf0fc190d..ff9291872372 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -23,7 +23,6 @@ config ARM64 select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_MEMORY - select ARCH_HAS_SG_CHAIN select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_SYNC_DMA_FOR_DEVICE @@ -81,7 +80,7 @@ config ARM64 select CPU_PM if (SUSPEND || CPU_IDLE) select CRC32 select DCACHE_WORD_ACCESS - select DMA_DIRECT_OPS + select DMA_DIRECT_REMAP select EDAC_SUPPORT select FRAME_POINTER select GENERIC_ALLOCATOR diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index c41f3fb1446c..95dbf3ef735a 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -24,15 +24,9 @@ #include <xen/xen.h> #include <asm/xen/hypervisor.h> -extern const struct dma_map_ops dummy_dma_ops; - static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - /* - * We expect no ISA devices, and all other DMA masters are expected to - * have someone call arch_setup_dma_ops at device creation time. - */ - return &dummy_dma_ops; + return NULL; } void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index a53704406099..fb0908456a1f 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -33,113 +33,6 @@ #include <asm/cacheflush.h> -static struct gen_pool *atomic_pool __ro_after_init; - -#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K -static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE; - -static int __init early_coherent_pool(char *p) -{ - atomic_pool_size = memparse(p, &p); - return 0; -} -early_param("coherent_pool", early_coherent_pool); - -static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) -{ - unsigned long val; - void *ptr = NULL; - - if (!atomic_pool) { - WARN(1, "coherent pool not initialised!\n"); - return NULL; - } - - val = gen_pool_alloc(atomic_pool, size); - if (val) { - phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); - - *ret_page = phys_to_page(phys); - ptr = (void *)val; - memset(ptr, 0, size); - } - - return ptr; -} - -static bool __in_atomic_pool(void *start, size_t size) -{ - return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); -} - -static int __free_from_pool(void *start, size_t size) -{ - if (!__in_atomic_pool(start, size)) - return 0; - - gen_pool_free(atomic_pool, (unsigned long)start, size); - - return 1; -} - -void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t flags, unsigned long attrs) -{ - struct page *page; - void *ptr, *coherent_ptr; - pgprot_t prot = pgprot_writecombine(PAGE_KERNEL); - - size = PAGE_ALIGN(size); - - if (!gfpflags_allow_blocking(flags)) { - struct page *page = NULL; - void *addr = __alloc_from_pool(size, &page, flags); - - if (addr) - *dma_handle = phys_to_dma(dev, page_to_phys(page)); - - return addr; - } - - ptr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs); - if (!ptr) - goto no_mem; - - /* remove any dirty cache lines on the kernel alias */ - __dma_flush_area(ptr, size); - - /* create a coherent mapping */ - page = virt_to_page(ptr); - coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP, - prot, __builtin_return_address(0)); - if (!coherent_ptr) - goto no_map; - - return coherent_ptr; - -no_map: - dma_direct_free_pages(dev, size, ptr, *dma_handle, attrs); -no_mem: - return NULL; -} - -void arch_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs) -{ - if (!__free_from_pool(vaddr, PAGE_ALIGN(size))) { - void *kaddr = phys_to_virt(dma_to_phys(dev, dma_handle)); - - vunmap(vaddr); - dma_direct_free_pages(dev, size, kaddr, dma_handle, attrs); - } -} - -long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, - dma_addr_t dma_addr) -{ - return __phys_to_pfn(dma_to_phys(dev, dma_addr)); -} - pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) { @@ -160,6 +53,11 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, __dma_unmap_area(phys_to_virt(paddr), size, dir); } +void arch_dma_prep_coherent(struct page *page, size_t size) +{ + __dma_flush_area(page_address(page), size); +} + #ifdef CONFIG_IOMMU_DMA static int __swiotlb_get_sgtable_page(struct sg_table *sgt, struct page *page, size_t size) @@ -191,167 +89,13 @@ static int __swiotlb_mmap_pfn(struct vm_area_struct *vma, } #endif /* CONFIG_IOMMU_DMA */ -static int __init atomic_pool_init(void) -{ - pgprot_t prot = __pgprot(PROT_NORMAL_NC); - unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT; - struct page *page; - void *addr; - unsigned int pool_size_order = get_order(atomic_pool_size); - - if (dev_get_cma_area(NULL)) - page = dma_alloc_from_contiguous(NULL, nr_pages, - pool_size_order, false); - else - page = alloc_pages(GFP_DMA32, pool_size_order); - - if (page) { - int ret; - void *page_addr = page_address(page); - - memset(page_addr, 0, atomic_pool_size); - __dma_flush_area(page_addr, atomic_pool_size); - - atomic_pool = gen_pool_create(PAGE_SHIFT, -1); - if (!atomic_pool) - goto free_page; - - addr = dma_common_contiguous_remap(page, atomic_pool_size, - VM_USERMAP, prot, atomic_pool_init); - - if (!addr) - goto destroy_genpool; - - ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr, - page_to_phys(page), - atomic_pool_size, -1); - if (ret) - goto remove_mapping; - - gen_pool_set_algo(atomic_pool, - gen_pool_first_fit_order_align, - NULL); - - pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n", - atomic_pool_size / 1024); - return 0; - } - goto out; - -remove_mapping: - dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP); -destroy_genpool: - gen_pool_destroy(atomic_pool); - atomic_pool = NULL; -free_page: - if (!dma_release_from_contiguous(NULL, page, nr_pages)) - __free_pages(page, pool_size_order); -out: - pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", - atomic_pool_size / 1024); - return -ENOMEM; -} - -/******************************************** - * The following APIs are for dummy DMA ops * - ********************************************/ - -static void *__dummy_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - unsigned long attrs) -{ - return NULL; -} - -static void __dummy_free(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - unsigned long attrs) -{ -} - -static int __dummy_mmap(struct device *dev, - struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ - return -ENXIO; -} - -static dma_addr_t __dummy_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - return 0; -} - -static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ -} - -static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl, - int nelems, enum dma_data_direction dir, - unsigned long attrs) -{ - return 0; -} - -static void __dummy_unmap_sg(struct device *dev, - struct scatterlist *sgl, int nelems, - enum dma_data_direction dir, - unsigned long attrs) -{ -} - -static void __dummy_sync_single(struct device *dev, - dma_addr_t dev_addr, size_t size, - enum dma_data_direction dir) -{ -} - -static void __dummy_sync_sg(struct device *dev, - struct scatterlist *sgl, int nelems, - enum dma_data_direction dir) -{ -} - -static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr) -{ - return 1; -} - -static int __dummy_dma_supported(struct device *hwdev, u64 mask) -{ - return 0; -} - -const struct dma_map_ops dummy_dma_ops = { - .alloc = __dummy_alloc, - .free = __dummy_free, - .mmap = __dummy_mmap, - .map_page = __dummy_map_page, - .unmap_page = __dummy_unmap_page, - .map_sg = __dummy_map_sg, - .unmap_sg = __dummy_unmap_sg, - .sync_single_for_cpu = __dummy_sync_single, - .sync_single_for_device = __dummy_sync_single, - .sync_sg_for_cpu = __dummy_sync_sg, - .sync_sg_for_device = __dummy_sync_sg, - .mapping_error = __dummy_mapping_error, - .dma_supported = __dummy_dma_supported, -}; -EXPORT_SYMBOL(dummy_dma_ops); - static int __init arm64_dma_init(void) { WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(), TAINT_CPU_OUT_OF_SPEC, "ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)", ARCH_DMA_MINALIGN, cache_line_size()); - - return atomic_pool_init(); + return dma_atomic_pool_init(GFP_DMA32, __pgprot(PROT_NORMAL_NC)); } arch_initcall(arm64_dma_init); @@ -397,17 +141,17 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, page = alloc_pages(gfp, get_order(size)); addr = page ? page_address(page) : NULL; } else { - addr = __alloc_from_pool(size, &page, gfp); + addr = dma_alloc_from_pool(size, &page, gfp); } if (!addr) return NULL; *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot); - if (iommu_dma_mapping_error(dev, *handle)) { + if (*handle == DMA_MAPPING_ERROR) { if (coherent) __free_pages(page, get_order(size)); else - __free_from_pool(addr, size); + dma_free_from_pool(addr, size); addr = NULL; } } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { @@ -420,7 +164,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, return NULL; *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot); - if (iommu_dma_mapping_error(dev, *handle)) { + if (*handle == DMA_MAPPING_ERROR) { dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); return NULL; @@ -471,9 +215,9 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, * coherent devices. * Hence how dodgy the below logic looks... */ - if (__in_atomic_pool(cpu_addr, size)) { + if (dma_in_atomic_pool(cpu_addr, size)) { iommu_dma_unmap_page(dev, handle, iosize, 0, 0); - __free_from_pool(cpu_addr, size); + dma_free_from_pool(cpu_addr, size); } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { struct page *page = vmalloc_to_page(cpu_addr); @@ -580,7 +324,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && - !iommu_dma_mapping_error(dev, dev_addr)) + dev_addr != DMA_MAPPING_ERROR) __dma_map_area(page_address(page) + offset, size, dir); return dev_addr; @@ -663,7 +407,6 @@ static const struct dma_map_ops iommu_dma_ops = { .sync_sg_for_device = __iommu_sync_sg_for_device, .map_resource = iommu_dma_map_resource, .unmap_resource = iommu_dma_unmap_resource, - .mapping_error = iommu_dma_mapping_error, }; static int __init __iommu_dma_init(void) @@ -719,9 +462,6 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { - if (!dev->dma_ops) - dev->dma_ops = &swiotlb_dma_ops; - dev->dma_coherent = coherent; __iommu_setup_dma_ops(dev, dma_base, size, iommu); diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index 84420109113d..456e154674d1 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig @@ -9,7 +9,6 @@ config C6X select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select CLKDEV_LOOKUP - select DMA_DIRECT_OPS select GENERIC_ATOMIC64 select GENERIC_IRQ_SHOW select HAVE_ARCH_TRACEHOOK diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c index 01305c787201..75b79571732c 100644 --- a/arch/c6x/mm/dma-coherent.c +++ b/arch/c6x/mm/dma-coherent.c @@ -78,6 +78,7 @@ static void __free_dma_pages(u32 addr, int order) void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, unsigned long attrs) { + void *ret; u32 paddr; int order; @@ -94,7 +95,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, if (!paddr) return NULL; - return phys_to_virt(paddr); + ret = phys_to_virt(paddr); + memset(ret, 0, 1 << order); + return ret; } /* diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index cb64f8dacd08..37bed8aadf95 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -7,8 +7,7 @@ config CSKY select COMMON_CLK select CLKSRC_MMIO select CLKSRC_OF - select DMA_DIRECT_OPS - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_REMAP select IRQ_DOMAIN select HANDLE_DOMAIN_IRQ select DW_APB_TIMER_OF diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c index 85437b21e045..80783bb71c5c 100644 --- a/arch/csky/mm/dma-mapping.c +++ b/arch/csky/mm/dma-mapping.c @@ -14,73 +14,13 @@ #include <linux/version.h> #include <asm/cache.h> -static struct gen_pool *atomic_pool; -static size_t atomic_pool_size __initdata = SZ_256K; - -static int __init early_coherent_pool(char *p) -{ - atomic_pool_size = memparse(p, &p); - return 0; -} -early_param("coherent_pool", early_coherent_pool); - static int __init atomic_pool_init(void) { - struct page *page; - size_t size = atomic_pool_size; - void *ptr; - int ret; - - atomic_pool = gen_pool_create(PAGE_SHIFT, -1); - if (!atomic_pool) - BUG(); - - page = alloc_pages(GFP_KERNEL | GFP_DMA, get_order(size)); - if (!page) - BUG(); - - ptr = dma_common_contiguous_remap(page, size, VM_ALLOC, - pgprot_noncached(PAGE_KERNEL), - __builtin_return_address(0)); - if (!ptr) - BUG(); - - ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, - page_to_phys(page), atomic_pool_size, -1); - if (ret) - BUG(); - - gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL); - - pr_info("DMA: preallocated %zu KiB pool for atomic coherent pool\n", - atomic_pool_size / 1024); - - pr_info("DMA: vaddr: 0x%x phy: 0x%lx,\n", (unsigned int)ptr, - page_to_phys(page)); - - return 0; + return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL)); } postcore_initcall(atomic_pool_init); -static void *csky_dma_alloc_atomic(struct device *dev, size_t size, - dma_addr_t *dma_handle) -{ - unsigned long addr; - - addr = gen_pool_alloc(atomic_pool, size); - if (addr) - *dma_handle = gen_pool_virt_to_phys(atomic_pool, addr); - - return (void *)addr; -} - -static void csky_dma_free_atomic(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs) -{ - gen_pool_free(atomic_pool, (unsigned long)vaddr, size); -} - -static void __dma_clear_buffer(struct page *page, size_t size) +void arch_dma_prep_coherent(struct page *page, size_t size) { if (PageHighMem(page)) { unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; @@ -107,84 +47,6 @@ static void __dma_clear_buffer(struct page *page, size_t size) } } -static void *csky_dma_alloc_nonatomic(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - unsigned long attrs) -{ - void *vaddr; - struct page *page; - unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - - if (DMA_ATTR_NON_CONSISTENT & attrs) { - pr_err("csky %s can't support DMA_ATTR_NON_CONSISTENT.\n", __func__); - return NULL; - } - - if (IS_ENABLED(CONFIG_DMA_CMA)) - page = dma_alloc_from_contiguous(dev, count, get_order(size), - gfp); - else - page = alloc_pages(gfp, get_order(size)); - - if (!page) { - pr_err("csky %s no more free pages.\n", __func__); - return NULL; - } - - *dma_handle = page_to_phys(page); - - __dma_clear_buffer(page, size); - - if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) - return page; - - vaddr = dma_common_contiguous_remap(page, PAGE_ALIGN(size), VM_USERMAP, - pgprot_noncached(PAGE_KERNEL), __builtin_return_address(0)); - if (!vaddr) - BUG(); - - return vaddr; -} - -static void csky_dma_free_nonatomic( - struct device *dev, - size_t size, - void *vaddr, - dma_addr_t dma_handle, - unsigned long attrs - ) -{ - struct page *page = phys_to_page(dma_handle); - unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - - if ((unsigned int)vaddr >= VMALLOC_START) - dma_common_free_remap(vaddr, size, VM_USERMAP); - - if (IS_ENABLED(CONFIG_DMA_CMA)) - dma_release_from_contiguous(dev, page, count); - else - __free_pages(page, get_order(size)); -} - -void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs) -{ - if (gfpflags_allow_blocking(gfp)) - return csky_dma_alloc_nonatomic(dev, size, dma_handle, gfp, - attrs); - else - return csky_dma_alloc_atomic(dev, size, dma_handle); -} - -void arch_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs) -{ - if (!addr_in_gen_pool(atomic_pool, (unsigned int) vaddr, size)) - csky_dma_free_nonatomic(dev, size, vaddr, dma_handle, attrs); - else - csky_dma_free_atomic(dev, size, vaddr, dma_handle, attrs); -} - static inline void cache_op(phys_addr_t paddr, size_t size, void (*fn)(unsigned long start, unsigned long end)) { diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index d19c6b16cd5d..6472a0685470 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -22,7 +22,6 @@ config H8300 select HAVE_ARCH_KGDB select HAVE_ARCH_HASH select CPU_NO_EFFICIENT_FFS - select DMA_DIRECT_OPS config CPU_BIG_ENDIAN def_bool y diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 2b688af379e6..d71036c598de 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -31,7 +31,6 @@ config HEXAGON select GENERIC_CLOCKEVENTS_BROADCAST select MODULES_USE_ELF_RELA select GENERIC_CPU_DEVICES - select DMA_DIRECT_OPS ---help--- Qualcomm Hexagon is a processor architecture designed for high performance and low power across a wide variety of applications. diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 36773def6920..cbf6c67c7166 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -28,8 +28,8 @@ config IA64 select HAVE_ARCH_TRACEHOOK select HAVE_MEMBLOCK_NODE_MAP select HAVE_VIRT_CPU_ACCOUNTING - select ARCH_HAS_DMA_MARK_CLEAN - select ARCH_HAS_SG_CHAIN + select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB + select ARCH_HAS_SYNC_DMA_FOR_CPU select VIRT_TO_BUS select ARCH_DISCARD_MEMBLOCK select GENERIC_IRQ_PROBE diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index 58969039bed2..8840ed97712f 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c @@ -38,7 +38,7 @@ static inline int use_swiotlb(struct device *dev) const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) { if (use_swiotlb(dev)) - return &swiotlb_dma_ops; + return NULL; return &sba_dma_ops; } EXPORT_SYMBOL(hwsw_dma_get_ops); diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index e8a93b07283e..5a361e51cb1e 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -907,11 +907,12 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) } /** - * sba_map_single_attrs - map one buffer and return IOVA for DMA + * sba_map_page - map one buffer and return IOVA for DMA * @dev: instance of PCI owned by the driver that's asking. - * @addr: driver buffer to map. - * @size: number of bytes to map in driver buffer. - * @dir: R/W or both. + * @page: page to map + * @poff: offset into page + * @size: number of bytes to map + * @dir: dma direction * @attrs: optional dma attributes * * See Documentation/DMA-API-HOWTO.txt @@ -944,7 +945,7 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page, ** Device is bit capable of DMA'ing to the buffer... ** just return the PCI address of ptr */ - DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: " + DBG_BYPASS("sba_map_page() bypass mask/addr: " "0x%lx/0x%lx\n", to_pci_dev(dev)->dma_mask, pci_addr); return pci_addr; @@ -966,14 +967,14 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page, #ifdef ASSERT_PDIR_SANITY spin_lock_irqsave(&ioc->res_lock, flags); - if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()")) + if (sba_check_pdir(ioc,"Check before sba_map_page()")) panic("Sanity check failed"); spin_unlock_irqrestore(&ioc->res_lock, flags); #endif pide = sba_alloc_range(ioc, dev, size); if (pide < 0) - return 0; + return DMA_MAPPING_ERROR; iovp = (dma_addr_t) pide << iovp_shift; @@ -997,20 +998,12 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page, /* form complete address */ #ifdef ASSERT_PDIR_SANITY spin_lock_irqsave(&ioc->res_lock, flags); - sba_check_pdir(ioc,"Check after sba_map_single_attrs()"); + sba_check_pdir(ioc,"Check after sba_map_page()"); spin_unlock_irqrestore(&ioc->res_lock, flags); #endif return SBA_IOVA(ioc, iovp, offset); } -static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - return sba_map_page(dev, virt_to_page(addr), - (unsigned long)addr & ~PAGE_MASK, size, dir, attrs); -} - #ifdef ENABLE_MARK_CLEAN static SBA_INLINE void sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) @@ -1036,7 +1029,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) #endif /** - * sba_unmap_single_attrs - unmap one IOVA and free resources + * sba_unmap_page - unmap one IOVA and free resources * @dev: instance of PCI owned by the driver that's asking. * @iova: IOVA of driver buffer previously mapped. * @size: number of bytes mapped in driver buffer. @@ -1063,7 +1056,7 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, /* ** Address does not fall w/in IOVA, must be bypassing */ - DBG_BYPASS("sba_unmap_single_attrs() bypass addr: 0x%lx\n", + DBG_BYPASS("sba_unmap_page() bypass addr: 0x%lx\n", iova); #ifdef ENABLE_MARK_CLEAN @@ -1114,12 +1107,6 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, #endif /* DELAYED_RESOURCE_CNT == 0 */ } -void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - sba_unmap_page(dev, iova, size, dir, attrs); -} - /** * sba_alloc_coherent - allocate/map shared mem for DMA * @dev: instance of PCI owned by the driver that's asking. @@ -1132,30 +1119,24 @@ static void * sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { + struct page *page; struct ioc *ioc; + int node = -1; void *addr; ioc = GET_IOC(dev); ASSERT(ioc); - #ifdef CONFIG_NUMA - { - struct page *page; - - page = alloc_pages_node(ioc->node, flags, get_order(size)); - if (unlikely(!page)) - return NULL; - - addr = page_address(page); - } -#else - addr = (void *) __get_free_pages(flags, get_order(size)); + node = ioc->node; #endif - if (unlikely(!addr)) + + page = alloc_pages_node(node, flags, get_order(size)); + if (unlikely(!page)) return NULL; + addr = page_address(page); memset(addr, 0, size); - *dma_handle = virt_to_phys(addr); + *dma_handle = page_to_phys(page); #ifdef ALLOW_IOV_BYPASS ASSERT(dev->coherent_dma_mask); @@ -1174,9 +1155,10 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, * If device can't bypass or bypass is disabled, pass the 32bit fake * device to map single to get an iova mapping. */ - *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr, - size, 0, 0); - + *dma_handle = sba_map_page(&ioc->sac_only_dev->dev, page, 0, size, + DMA_BIDIRECTIONAL, 0); + if (dma_mapping_error(dev, *dma_handle)) + return NULL; return addr; } @@ -1193,7 +1175,7 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { - sba_unmap_single_attrs(dev, dma_handle, size, 0, 0); + sba_unmap_page(dev, dma_handle, size, 0, 0); free_pages((unsigned long) vaddr, get_order(size)); } @@ -1483,7 +1465,10 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, /* Fast path single entry scatterlists. */ if (nents == 1) { sglist->dma_length = sglist->length; - sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs); + sglist->dma_address = sba_map_page(dev, sg_page(sglist), + sglist->offset, sglist->length, dir, attrs); + if (dma_mapping_error(dev, sglist->dma_address)) + return 0; return 1; } @@ -1572,8 +1557,8 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, while (nents && sglist->dma_length) { - sba_unmap_single_attrs(dev, sglist->dma_address, - sglist->dma_length, dir, attrs); + sba_unmap_page(dev, sglist->dma_address, sglist->dma_length, + dir, attrs); sglist = sg_next(sglist); nents--; } @@ -2080,8 +2065,6 @@ static int __init acpi_sba_ioc_init_acpi(void) /* This has to run before acpi_scan_init(). */ arch_initcall(acpi_sba_ioc_init_acpi); -extern const struct dma_map_ops swiotlb_dma_ops; - static int __init sba_init(void) { @@ -2095,7 +2078,7 @@ sba_init(void) * a successful kdump kernel boot is to use the swiotlb. */ if (is_kdump_kernel()) { - dma_ops = &swiotlb_dma_ops; + dma_ops = NULL; if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) panic("Unable to initialize software I/O TLB:" " Try machvec=dig boot option"); @@ -2117,7 +2100,7 @@ sba_init(void) * If we didn't find something sba_iommu can claim, we * need to setup the swiotlb and switch to the dig machvec. */ - dma_ops = &swiotlb_dma_ops; + dma_ops = NULL; if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) panic("Unable to find SBA IOMMU or initialize " "software I/O TLB: Try machvec=dig boot option"); @@ -2170,11 +2153,6 @@ static int sba_dma_supported (struct device *dev, u64 mask) return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); } -static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return 0; -} - __setup("nosbagart", nosbagart); static int __init @@ -2208,7 +2186,6 @@ const struct dma_map_ops sba_dma_ops = { .map_sg = sba_map_sg_attrs, .unmap_sg = sba_unmap_sg_attrs, .dma_supported = sba_dma_supported, - .mapping_error = sba_dma_mapping_error, }; void sba_dma_init(void) diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c index 7a471d8d67d4..ad7d9963de34 100644 --- a/arch/ia64/kernel/dma-mapping.c +++ b/arch/ia64/kernel/dma-mapping.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#include <linux/dma-mapping.h> +#include <linux/dma-direct.h> #include <linux/swiotlb.h> #include <linux/export.h> @@ -16,9 +16,26 @@ const struct dma_map_ops *dma_get_ops(struct device *dev) EXPORT_SYMBOL(dma_get_ops); #ifdef CONFIG_SWIOTLB +void *arch_dma_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) +{ + return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); +} + +void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_addr, unsigned long attrs) +{ + dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); +} + +long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, + dma_addr_t dma_addr) +{ + return page_to_pfn(virt_to_page(cpu_addr)); +} + void __init swiotlb_dma_init(void) { - dma_ops = &swiotlb_dma_ops; swiotlb_init(1); } #endif diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index d5e12ff1d73c..0cf43bb13d6e 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/init.h> +#include <linux/dma-noncoherent.h> #include <linux/efi.h> #include <linux/elf.h> #include <linux/memblock.h> @@ -71,18 +72,14 @@ __ia64_sync_icache_dcache (pte_t pte) * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to * flush them when they get mapped into an executable vm-area. */ -void -dma_mark_clean(void *addr, size_t size) +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { - unsigned long pg_addr, end; - - pg_addr = PAGE_ALIGN((unsigned long) addr); - end = (unsigned long) addr + size; - while (pg_addr + PAGE_SIZE <= end) { - struct page *page = virt_to_page(pg_addr); - set_bit(PG_arch_1, &page->flags); - pg_addr += PAGE_SIZE; - } + unsigned long pfn = PHYS_PFN(paddr); + + do { + set_bit(PG_arch_1, &pfn_to_page(pfn)->flags); + } while (++pfn <= PHYS_PFN(paddr + size - 1)); } inline void diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 4ce4ee4ef9f2..b7d42e4edc1f 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -196,7 +196,7 @@ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, if (!dma_addr) { printk(KERN_ERR "%s: out of ATEs\n", __func__); - return 0; + return DMA_MAPPING_ERROR; } return dma_addr; } @@ -314,11 +314,6 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, return nhwentries; } -static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return 0; -} - static u64 sn_dma_get_required_mask(struct device *dev) { return DMA_BIT_MASK(64); @@ -441,7 +436,6 @@ static struct dma_map_ops sn_dma_ops = { .unmap_page = sn_dma_unmap_page, .map_sg = sn_dma_map_sg, .unmap_sg = sn_dma_unmap_sg, - .mapping_error = sn_dma_mapping_error, .dma_supported = sn_dma_supported, .get_required_mask = sn_dma_get_required_mask, }; diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 1bc9f1ba759a..8a5868e9a3a0 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -26,7 +26,6 @@ config M68K select MODULES_USE_ELF_RELA select OLD_SIGSUSPEND3 select OLD_SIGACTION - select DMA_DIRECT_OPS if HAS_DMA select ARCH_DISCARD_MEMBLOCK config CPU_BIG_ENDIAN diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c index e99993c57d6b..b4aa853051bd 100644 --- a/arch/m68k/kernel/dma.c +++ b/arch/m68k/kernel/dma.c @@ -32,7 +32,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, size = PAGE_ALIGN(size); order = get_order(size); - page = alloc_pages(flag, order); + page = alloc_pages(flag | __GFP_ZERO, order); if (!page) return NULL; diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index effed2efd306..eda9e2315ef5 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -12,7 +12,6 @@ config MICROBLAZE select TIMER_OF select CLONE_BACKWARDS3 select COMMON_CLK - select DMA_DIRECT_OPS select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS select GENERIC_CPU_DEVICES diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index 45e0a1aa9357..3002cbca3059 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c @@ -81,7 +81,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, size = PAGE_ALIGN(size); order = get_order(size); - vaddr = __get_free_pages(gfp, order); + vaddr = __get_free_pages(gfp | __GFP_ZERO, order); if (!vaddr) return NULL; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index e49b5a0c8585..63183a8454d6 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -18,7 +18,6 @@ config MIPS select CLONE_BACKWARDS select CPU_NO_EFFICIENT_FFS if (TARGET_ISA_REV < 1) select CPU_PM if CPU_IDLE - select DMA_DIRECT_OPS select GENERIC_ATOMIC64 if !64BIT select GENERIC_CLOCKEVENTS select GENERIC_CMOS_UPDATE diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index b4c477eb46ce..20dfaad3a55d 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -10,10 +10,8 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { #if defined(CONFIG_MACH_JAZZ) return &jazz_dma_ops; -#elif defined(CONFIG_SWIOTLB) - return &swiotlb_dma_ops; #else - return &dma_direct_ops; + return NULL; #endif } diff --git a/arch/mips/include/asm/jazzdma.h b/arch/mips/include/asm/jazzdma.h index d913439c738c..d13f940022d5 100644 --- a/arch/mips/include/asm/jazzdma.h +++ b/arch/mips/include/asm/jazzdma.h @@ -40,12 +40,6 @@ extern int vdma_get_enable(int channel); #define VDMA_OFFSET(a) ((unsigned int)(a) & (VDMA_PAGESIZE-1)) /* - * error code returned by vdma_alloc() - * (See also arch/mips/kernel/jazzdma.c) - */ -#define VDMA_ERROR 0xffffffff - -/* * VDMA pagetable entry description */ typedef volatile struct VDMA_PGTBL_ENTRY { diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index 4c41ed0a637e..6256d35dbf4d 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c @@ -104,12 +104,12 @@ unsigned long vdma_alloc(unsigned long paddr, unsigned long size) if (vdma_debug) printk("vdma_alloc: Invalid physical address: %08lx\n", paddr); - return VDMA_ERROR; /* invalid physical address */ + return DMA_MAPPING_ERROR; /* invalid physical address */ } if (size > 0x400000 || size == 0) { if (vdma_debug) printk("vdma_alloc: Invalid size: %08lx\n", size); - return VDMA_ERROR; /* invalid physical address */ + return DMA_MAPPING_ERROR; /* invalid physical address */ } spin_lock_irqsave(&vdma_lock, flags); @@ -123,7 +123,7 @@ unsigned long vdma_alloc(unsigned long paddr, unsigned long size) first < VDMA_PGTBL_ENTRIES) first++; if (first + pages > VDMA_PGTBL_ENTRIES) { /* nothing free */ spin_unlock_irqrestore(&vdma_lock, flags); - return VDMA_ERROR; + return DMA_MAPPING_ERROR; } last = first + 1; @@ -569,7 +569,7 @@ static void *jazz_dma_alloc(struct device *dev, size_t size, return NULL; *dma_handle = vdma_alloc(virt_to_phys(ret), size); - if (*dma_handle == VDMA_ERROR) { + if (*dma_handle == DMA_MAPPING_ERROR) { dma_direct_free_pages(dev, size, ret, *dma_handle, attrs); return NULL; } @@ -620,7 +620,7 @@ static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist, arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); sg->dma_address = vdma_alloc(sg_phys(sg), sg->length); - if (sg->dma_address == VDMA_ERROR) + if (sg->dma_address == DMA_MAPPING_ERROR) return 0; sg_dma_len(sg) = sg->length; } @@ -674,11 +674,6 @@ static void jazz_dma_sync_sg_for_cpu(struct device *dev, arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); } -static int jazz_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == VDMA_ERROR; -} - const struct dma_map_ops jazz_dma_ops = { .alloc = jazz_dma_alloc, .free = jazz_dma_free, @@ -692,6 +687,5 @@ const struct dma_map_ops jazz_dma_ops = { .sync_sg_for_device = jazz_dma_sync_sg_for_device, .dma_supported = dma_direct_supported, .cache_sync = arch_dma_cache_sync, - .mapping_error = jazz_dma_mapping_error, }; EXPORT_SYMBOL(jazz_dma_ops); diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 7a04adacb2f0..1af6bbae7220 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -11,7 +11,6 @@ config NDS32 select CLKSRC_MMIO select CLONE_BACKWARDS select COMMON_CLK - select DMA_DIRECT_OPS select GENERIC_ATOMIC64 select GENERIC_CPU_DEVICES select GENERIC_CLOCKEVENTS diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index 7e95506e957a..f6c4b0f49997 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -4,7 +4,6 @@ config NIOS2 select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_NO_SWAP - select DMA_DIRECT_OPS select TIMER_OF select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 285f7d05c8ed..d0feebad5a8f 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -7,7 +7,6 @@ config OPENRISC def_bool y select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select DMA_DIRECT_OPS select OF select OF_EARLY_FLATTREE select IRQ_DOMAIN diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index 159336adfa2f..f79457cb3741 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -89,7 +89,7 @@ arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, .mm = &init_mm }; - page = alloc_pages_exact(size, gfp); + page = alloc_pages_exact(size, gfp | __GFP_ZERO); if (!page) return NULL; diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 92a339ee28b3..6e1b71da0e71 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -11,6 +11,7 @@ config PARISC select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_NO_SG_CHAIN select ARCH_SUPPORTS_MEMORY_FAILURE select RTC_CLASS select RTC_DRV_GENERIC @@ -184,7 +185,6 @@ config PA11 depends on PA7000 || PA7100LC || PA7200 || PA7300LC select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select DMA_DIRECT_OPS select DMA_NONCOHERENT_CACHE_SYNC config PREFETCH diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 04c48f1ef3fb..239162355b58 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -404,7 +404,7 @@ static void *pcxl_dma_alloc(struct device *dev, size_t size, order = get_order(size); size = 1 << (order + PAGE_SHIFT); vaddr = pcxl_alloc_range(size); - paddr = __get_free_pages(flag, order); + paddr = __get_free_pages(flag | __GFP_ZERO, order); flush_kernel_dcache_range(paddr, size); paddr = __pa(paddr); map_uncached_pages(vaddr, size, paddr); @@ -429,7 +429,7 @@ static void *pcx_dma_alloc(struct device *dev, size_t size, if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) return NULL; - addr = (void *)__get_free_pages(flag, get_order(size)); + addr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size)); if (addr) *dma_handle = (dma_addr_t)virt_to_phys(addr); diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 2b108ee3b217..f2cf86ac279b 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -99,10 +99,6 @@ void __init dma_ops_init(void) case pcxl2: pa7300lc_init(); - case pcxl: /* falls through */ - case pcxs: - case pcxt: - hppa_dma_ops = &dma_direct_ops; break; default: break; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a1e858e42ada..50f27a656051 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -139,7 +139,6 @@ config PPC select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_MEMBARRIER_CALLBACKS select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC64 - select ARCH_HAS_SG_CHAIN select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION) select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UACCESS_FLUSHCACHE if PPC64 diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index dacd0f93f2b2..ebf66809f2d3 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -103,7 +103,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off) } #define HAVE_ARCH_DMA_SET_MASK 1 -extern int dma_set_mask(struct device *dev, u64 dma_mask); extern u64 __dma_get_required_mask(struct device *dev); diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index e847ff69cb2b..17524d222a7b 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -143,8 +143,6 @@ struct scatterlist; #ifdef CONFIG_PPC64 -#define IOMMU_MAPPING_ERROR (~(dma_addr_t)0x0) - static inline void set_iommu_table_base(struct device *dev, struct iommu_table *base) { @@ -239,8 +237,6 @@ static inline void iommu_del_device(struct device *dev) } #endif /* !CONFIG_IOMMU_API */ -int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr); - #else static inline void *get_iommu_table_base(struct device *dev) diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 2ca6cfaebf65..9c9bcaae2f75 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -105,11 +105,6 @@ static u64 dma_iommu_get_required_mask(struct device *dev) return mask; } -int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == IOMMU_MAPPING_ERROR; -} - struct dma_map_ops dma_iommu_ops = { .alloc = dma_iommu_alloc_coherent, .free = dma_iommu_free_coherent, @@ -120,5 +115,4 @@ struct dma_map_ops dma_iommu_ops = { .map_page = dma_iommu_map_page, .unmap_page = dma_iommu_unmap_page, .get_required_mask = dma_iommu_get_required_mask, - .mapping_error = dma_iommu_mapping_error, }; diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 678811abccfc..7d5fc9751622 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -50,16 +50,15 @@ const struct dma_map_ops powerpc_swiotlb_dma_ops = { .alloc = __dma_nommu_alloc_coherent, .free = __dma_nommu_free_coherent, .mmap = dma_nommu_mmap_coherent, - .map_sg = swiotlb_map_sg_attrs, - .unmap_sg = swiotlb_unmap_sg_attrs, + .map_sg = dma_direct_map_sg, + .unmap_sg = dma_direct_unmap_sg, .dma_supported = swiotlb_dma_supported, - .map_page = swiotlb_map_page, - .unmap_page = swiotlb_unmap_page, - .sync_single_for_cpu = swiotlb_sync_single_for_cpu, - .sync_single_for_device = swiotlb_sync_single_for_device, - .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, - .sync_sg_for_device = swiotlb_sync_sg_for_device, - .mapping_error = dma_direct_mapping_error, + .map_page = dma_direct_map_page, + .unmap_page = dma_direct_unmap_page, + .sync_single_for_cpu = dma_direct_sync_single_for_cpu, + .sync_single_for_device = dma_direct_sync_single_for_device, + .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, + .sync_sg_for_device = dma_direct_sync_sg_for_device, .get_required_mask = swiotlb_powerpc_get_required, }; diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 9d5d109f15c0..d0625480b59e 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -198,11 +198,11 @@ static unsigned long iommu_range_alloc(struct device *dev, if (unlikely(npages == 0)) { if (printk_ratelimit()) WARN_ON(1); - return IOMMU_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } if (should_fail_iommu(dev)) - return IOMMU_MAPPING_ERROR; + return DMA_MAPPING_ERROR; /* * We don't need to disable preemption here because any CPU can @@ -278,7 +278,7 @@ again: } else { /* Give up */ spin_unlock_irqrestore(&(pool->lock), flags); - return IOMMU_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } } @@ -310,13 +310,13 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, unsigned long attrs) { unsigned long entry; - dma_addr_t ret = IOMMU_MAPPING_ERROR; + dma_addr_t ret = DMA_MAPPING_ERROR; int build_fail; entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); - if (unlikely(entry == IOMMU_MAPPING_ERROR)) - return IOMMU_MAPPING_ERROR; + if (unlikely(entry == DMA_MAPPING_ERROR)) + return DMA_MAPPING_ERROR; entry += tbl->it_offset; /* Offset into real TCE table */ ret = entry << tbl->it_page_shift; /* Set the return dma address */ @@ -328,12 +328,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, /* tbl->it_ops->set() only returns non-zero for transient errors. * Clean up the table bitmap in this case and return - * IOMMU_MAPPING_ERROR. For all other errors the functionality is + * DMA_MAPPING_ERROR. For all other errors the functionality is * not altered. */ if (unlikely(build_fail)) { __iommu_free(tbl, ret, npages); - return IOMMU_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } /* Flush/invalidate TLB caches if necessary */ @@ -478,7 +478,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); /* Handle failure */ - if (unlikely(entry == IOMMU_MAPPING_ERROR)) { + if (unlikely(entry == DMA_MAPPING_ERROR)) { if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) dev_info(dev, "iommu_alloc failed, tbl %p " @@ -545,7 +545,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, */ if (outcount < incount) { outs = sg_next(outs); - outs->dma_address = IOMMU_MAPPING_ERROR; + outs->dma_address = DMA_MAPPING_ERROR; outs->dma_length = 0; } @@ -563,7 +563,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, npages = iommu_num_pages(s->dma_address, s->dma_length, IOMMU_PAGE_SIZE(tbl)); __iommu_free(tbl, vaddr, npages); - s->dma_address = IOMMU_MAPPING_ERROR; + s->dma_address = DMA_MAPPING_ERROR; s->dma_length = 0; } if (s == outs) @@ -777,7 +777,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, unsigned long mask, enum dma_data_direction direction, unsigned long attrs) { - dma_addr_t dma_handle = IOMMU_MAPPING_ERROR; + dma_addr_t dma_handle = DMA_MAPPING_ERROR; void *vaddr; unsigned long uaddr; unsigned int npages, align; @@ -797,7 +797,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, mask >> tbl->it_page_shift, align, attrs); - if (dma_handle == IOMMU_MAPPING_ERROR) { + if (dma_handle == DMA_MAPPING_ERROR) { if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) { dev_info(dev, "iommu_alloc failed, tbl %p " @@ -869,7 +869,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, io_order = get_iommu_order(size, tbl); mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, mask >> tbl->it_page_shift, io_order, 0); - if (mapping == IOMMU_MAPPING_ERROR) { + if (mapping == DMA_MAPPING_ERROR) { free_pages((unsigned long)ret, order); return NULL; } diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 12352a58072a..af2a3c15e0ec 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -654,7 +654,6 @@ static const struct dma_map_ops dma_iommu_fixed_ops = { .dma_supported = dma_suported_and_switch, .map_page = dma_fixed_map_page, .unmap_page = dma_fixed_unmap_page, - .mapping_error = dma_iommu_mapping_error, }; static void cell_dma_dev_setup(struct device *dev) diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 93cc9eec6601..1fad4649735b 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -519,7 +519,7 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, { struct vio_dev *viodev = to_vio_dev(dev); struct iommu_table *tbl; - dma_addr_t ret = IOMMU_MAPPING_ERROR; + dma_addr_t ret = DMA_MAPPING_ERROR; tbl = get_iommu_table_base(dev); if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) { @@ -625,7 +625,6 @@ static const struct dma_map_ops vio_dma_mapping_ops = { .unmap_page = vio_dma_iommu_unmap_page, .dma_supported = vio_dma_iommu_dma_supported, .get_required_mask = vio_dma_get_required_mask, - .mapping_error = dma_iommu_mapping_error, }; /** diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index ee833e6f5ccb..106539bb914e 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -19,7 +19,6 @@ config RISCV select ARCH_WANT_FRAME_POINTERS select CLONE_BACKWARDS select COMMON_CLK - select DMA_DIRECT_OPS select GENERIC_CLOCKEVENTS select GENERIC_CPU_DEVICES select GENERIC_IRQ_SHOW diff --git a/arch/riscv/include/asm/dma-mapping.h b/arch/riscv/include/asm/dma-mapping.h deleted file mode 100644 index 8facc1c8fa05..000000000000 --- a/arch/riscv/include/asm/dma-mapping.h +++ /dev/null @@ -1,15 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#ifndef _RISCV_ASM_DMA_MAPPING_H -#define _RISCV_ASM_DMA_MAPPING_H 1 - -#ifdef CONFIG_SWIOTLB -#include <linux/swiotlb.h> -static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) -{ - return &swiotlb_dma_ops; -} -#else -#include <asm-generic/dma-mapping.h> -#endif /* CONFIG_SWIOTLB */ - -#endif /* _RISCV_ASM_DMA_MAPPING_H */ diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 5173366af8f3..21d271d04ca6 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -73,7 +73,6 @@ config S390 select ARCH_HAS_KCOV select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_MEMORY - select ARCH_HAS_SG_CHAIN select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_UBSAN_SANITIZE_ALL @@ -140,7 +139,6 @@ config S390 select HAVE_COPY_THREAD_TLS select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS - select DMA_DIRECT_OPS select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_EFFICIENT_UNALIGNED_ACCESS diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index d387a0fbdd7e..9e52d1527f71 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -15,8 +15,6 @@ #include <linux/pci.h> #include <asm/pci_dma.h> -#define S390_MAPPING_ERROR (~(dma_addr_t) 0x0) - static struct kmem_cache *dma_region_table_cache; static struct kmem_cache *dma_page_table_cache; static int s390_iommu_strict; @@ -301,7 +299,7 @@ static dma_addr_t dma_alloc_address(struct device *dev, int size) out_error: spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); - return S390_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size) @@ -349,7 +347,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, /* This rounds up number of pages based on size and offset */ nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); dma_addr = dma_alloc_address(dev, nr_pages); - if (dma_addr == S390_MAPPING_ERROR) { + if (dma_addr == DMA_MAPPING_ERROR) { ret = -ENOSPC; goto out_err; } @@ -372,7 +370,7 @@ out_free: out_err: zpci_err("map error:\n"); zpci_err_dma(ret, pa); - return S390_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, @@ -406,7 +404,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size, dma_addr_t map; size = PAGE_ALIGN(size); - page = alloc_pages(flag, get_order(size)); + page = alloc_pages(flag | __GFP_ZERO, get_order(size)); if (!page) return NULL; @@ -449,7 +447,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg, int ret; dma_addr_base = dma_alloc_address(dev, nr_pages); - if (dma_addr_base == S390_MAPPING_ERROR) + if (dma_addr_base == DMA_MAPPING_ERROR) return -ENOMEM; dma_addr = dma_addr_base; @@ -496,7 +494,7 @@ static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, for (i = 1; i < nr_elements; i++) { s = sg_next(s); - s->dma_address = S390_MAPPING_ERROR; + s->dma_address = DMA_MAPPING_ERROR; s->dma_length = 0; if (s->offset || (size & ~PAGE_MASK) || @@ -546,11 +544,6 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, } } -static int s390_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == S390_MAPPING_ERROR; -} - int zpci_dma_init_device(struct zpci_dev *zdev) { int rc; @@ -675,7 +668,6 @@ const struct dma_map_ops s390_pci_dma_ops = { .unmap_sg = s390_dma_unmap_sg, .map_page = s390_dma_map_pages, .unmap_page = s390_dma_unmap_pages, - .mapping_error = s390_mapping_error, /* dma_supported is unconditionally true without a callback */ }; EXPORT_SYMBOL_GPL(s390_pci_dma_ops); diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index f82a4da7adf3..10fd4e9c454b 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -7,7 +7,6 @@ config SUPERH select ARCH_NO_COHERENT_DMA_MMAP if !MMU select HAVE_PATA_PLATFORM select CLKDEV_LOOKUP - select DMA_DIRECT_OPS select HAVE_IDE if HAS_IOPORT_MAP select HAVE_MEMBLOCK_NODE_MAP select ARCH_DISCARD_MEMBLOCK diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 490b2c95c212..f5bb9ded1d18 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -40,7 +40,6 @@ config SPARC select MODULES_USE_ELF_RELA select ODD_RT_SIGACTION select OLD_SIGSUSPEND - select ARCH_HAS_SG_CHAIN select CPU_NO_EFFICIENT_FFS select LOCKDEP_SMALL if LOCKDEP select NEED_DMA_MAP_STATE @@ -49,7 +48,6 @@ config SPARC config SPARC32 def_bool !64BIT select ARCH_HAS_SYNC_DMA_FOR_CPU - select DMA_DIRECT_OPS select GENERIC_ATOMIC64 select CLZ_TAB select HAVE_UID16 diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index b0bb2fcaf1c9..ed32845bd2d2 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -2,9 +2,7 @@ #ifndef ___ASM_SPARC_DMA_MAPPING_H #define ___ASM_SPARC_DMA_MAPPING_H -#include <linux/scatterlist.h> -#include <linux/mm.h> -#include <linux/dma-debug.h> +#include <asm/cpu_type.h> extern const struct dma_map_ops *dma_ops; @@ -14,11 +12,11 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { #ifdef CONFIG_SPARC_LEON if (sparc_cpu_model == sparc_leon) - return &dma_direct_ops; + return NULL; #endif #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) if (bus == &pci_bus_type) - return &dma_direct_ops; + return NULL; #endif return dma_ops; } diff --git a/arch/sparc/include/asm/dma.h b/arch/sparc/include/asm/dma.h index a1d7c86917c6..462e7c794a09 100644 --- a/arch/sparc/include/asm/dma.h +++ b/arch/sparc/include/asm/dma.h @@ -91,54 +91,10 @@ extern int isa_dma_bridge_buggy; #endif #ifdef CONFIG_SPARC32 - -/* Routines for data transfer buffers. */ struct device; -struct scatterlist; - -struct sparc32_dma_ops { - __u32 (*get_scsi_one)(struct device *, char *, unsigned long); - void (*get_scsi_sgl)(struct device *, struct scatterlist *, int); - void (*release_scsi_one)(struct device *, __u32, unsigned long); - void (*release_scsi_sgl)(struct device *, struct scatterlist *,int); -#ifdef CONFIG_SBUS - int (*map_dma_area)(struct device *, dma_addr_t *, unsigned long, unsigned long, int); - void (*unmap_dma_area)(struct device *, unsigned long, int); -#endif -}; -extern const struct sparc32_dma_ops *sparc32_dma_ops; - -#define mmu_get_scsi_one(dev,vaddr,len) \ - sparc32_dma_ops->get_scsi_one(dev, vaddr, len) -#define mmu_get_scsi_sgl(dev,sg,sz) \ - sparc32_dma_ops->get_scsi_sgl(dev, sg, sz) -#define mmu_release_scsi_one(dev,vaddr,len) \ - sparc32_dma_ops->release_scsi_one(dev, vaddr,len) -#define mmu_release_scsi_sgl(dev,sg,sz) \ - sparc32_dma_ops->release_scsi_sgl(dev, sg, sz) - -#ifdef CONFIG_SBUS -/* - * mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep. - * - * The mmu_map_dma_area establishes two mappings in one go. - * These mappings point to pages normally mapped at 'va' (linear address). - * First mapping is for CPU visible address at 'a', uncached. - * This is an alias, but it works because it is an uncached mapping. - * Second mapping is for device visible address, or "bus" address. - * The bus address is returned at '*pba'. - * - * These functions seem distinct, but are hard to split. - * On sun4m, page attributes depend on the CPU type, so we have to - * know if we are mapping RAM or I/O, so it has to be an additional argument - * to a separate mapping function for CPU visible mappings. - */ -#define sbus_map_dma_area(dev,pba,va,a,len) \ - sparc32_dma_ops->map_dma_area(dev, pba, va, a, len) -#define sbus_unmap_dma_area(dev,ba,len) \ - sparc32_dma_ops->unmap_dma_area(dev, ba, len) -#endif /* CONFIG_SBUS */ +unsigned long sparc_dma_alloc_resource(struct device *dev, size_t len); +bool sparc_dma_free_resource(void *cpu_addr, size_t size); #endif #endif /* !(_ASM_SPARC_DMA_H) */ diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h index 8c01f0f6b1ed..c1e05e4ab9e3 100644 --- a/arch/sparc/include/asm/leon.h +++ b/arch/sparc/include/asm/leon.h @@ -254,4 +254,13 @@ extern int leon_ipi_irq; #define _pfn_valid(pfn) ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base))) #define _SRMMU_PTE_PMASK_LEON 0xffffffff +/* + * On LEON PCI Memory space is mapped 1:1 with physical address space. + * + * I/O space is located at low 64Kbytes in PCI I/O space. The I/O addresses + * are converted into CPU addresses to virtual addresses that are mapped with + * MMU to the PCI Host PCI I/O space window which are translated to the low + * 64Kbytes by the Host controller. + */ + #endif diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h index cad79a6ce0e4..cfec79bb1831 100644 --- a/arch/sparc/include/asm/pci.h +++ b/arch/sparc/include/asm/pci.h @@ -1,9 +1,54 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef ___ASM_SPARC_PCI_H #define ___ASM_SPARC_PCI_H -#if defined(__sparc__) && defined(__arch64__) -#include <asm/pci_64.h> + + +/* Can be used to override the logic in pci_scan_bus for skipping + * already-configured bus numbers - to be used for buggy BIOSes + * or architectures with incomplete PCI setup by the loader. + */ +#define pcibios_assign_all_busses() 0 + +#define PCIBIOS_MIN_IO 0UL +#define PCIBIOS_MIN_MEM 0UL + +#define PCI_IRQ_NONE 0xffffffff + + +#ifdef CONFIG_SPARC64 + +/* PCI IOMMU mapping bypass support. */ + +/* PCI 64-bit addressing works for all slots on all controller + * types on sparc64. However, it requires that the device + * can drive enough of the 64 bits. + */ +#define PCI64_REQUIRED_MASK (~(u64)0) +#define PCI64_ADDR_BASE 0xfffc000000000000UL + +/* Return the index of the PCI controller for device PDEV. */ +int pci_domain_nr(struct pci_bus *bus); +static inline int pci_proc_domain(struct pci_bus *bus) +{ + return 1; +} + +/* Platform support for /proc/bus/pci/X/Y mmap()s. */ +#define HAVE_PCI_MMAP +#define arch_can_pci_mmap_io() 1 +#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA +#define get_pci_unmapped_area get_fb_unmapped_area + +#define HAVE_ARCH_PCI_RESOURCE_TO_USER +#endif /* CONFIG_SPARC64 */ + +#if defined(CONFIG_SPARC64) || defined(CONFIG_LEON_PCI) +static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +{ + return PCI_IRQ_NONE; +} #else -#include <asm/pci_32.h> -#endif +#include <asm-generic/pci.h> #endif + +#endif /* ___ASM_SPARC_PCI_H */ diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h deleted file mode 100644 index cfc0ee9476c6..000000000000 --- a/arch/sparc/include/asm/pci_32.h +++ /dev/null @@ -1,41 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __SPARC_PCI_H -#define __SPARC_PCI_H - -#ifdef __KERNEL__ - -#include <linux/dma-mapping.h> - -/* Can be used to override the logic in pci_scan_bus for skipping - * already-configured bus numbers - to be used for buggy BIOSes - * or architectures with incomplete PCI setup by the loader. - */ -#define pcibios_assign_all_busses() 0 - -#define PCIBIOS_MIN_IO 0UL -#define PCIBIOS_MIN_MEM 0UL - -#define PCI_IRQ_NONE 0xffffffff - -#endif /* __KERNEL__ */ - -#ifndef CONFIG_LEON_PCI -/* generic pci stuff */ -#include <asm-generic/pci.h> -#else -/* - * On LEON PCI Memory space is mapped 1:1 with physical address space. - * - * I/O space is located at low 64Kbytes in PCI I/O space. The I/O addresses - * are converted into CPU addresses to virtual addresses that are mapped with - * MMU to the PCI Host PCI I/O space window which are translated to the low - * 64Kbytes by the Host controller. - */ - -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ - return PCI_IRQ_NONE; -} -#endif - -#endif /* __SPARC_PCI_H */ diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h deleted file mode 100644 index fac77813402c..000000000000 --- a/arch/sparc/include/asm/pci_64.h +++ /dev/null @@ -1,52 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __SPARC64_PCI_H -#define __SPARC64_PCI_H - -#ifdef __KERNEL__ - -#include <linux/dma-mapping.h> - -/* Can be used to override the logic in pci_scan_bus for skipping - * already-configured bus numbers - to be used for buggy BIOSes - * or architectures with incomplete PCI setup by the loader. - */ -#define pcibios_assign_all_busses() 0 - -#define PCIBIOS_MIN_IO 0UL -#define PCIBIOS_MIN_MEM 0UL - -#define PCI_IRQ_NONE 0xffffffff - -/* PCI IOMMU mapping bypass support. */ - -/* PCI 64-bit addressing works for all slots on all controller - * types on sparc64. However, it requires that the device - * can drive enough of the 64 bits. - */ -#define PCI64_REQUIRED_MASK (~(u64)0) -#define PCI64_ADDR_BASE 0xfffc000000000000UL - -/* Return the index of the PCI controller for device PDEV. */ - -int pci_domain_nr(struct pci_bus *bus); -static inline int pci_proc_domain(struct pci_bus *bus) -{ - return 1; -} - -/* Platform support for /proc/bus/pci/X/Y mmap()s. */ - -#define HAVE_PCI_MMAP -#define arch_can_pci_mmap_io() 1 -#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA -#define get_pci_unmapped_area get_fb_unmapped_area - -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ - return PCI_IRQ_NONE; -} - -#define HAVE_ARCH_PCI_RESOURCE_TO_USER -#endif /* __KERNEL__ */ - -#endif /* __SPARC64_PCI_H */ diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 05eb016fc41b..b1a09080e8da 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -314,7 +314,7 @@ bad: bad_no_ctx: if (printk_ratelimit()) WARN_ON(1); - return SPARC_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, @@ -547,7 +547,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, if (outcount < incount) { outs = sg_next(outs); - outs->dma_address = SPARC_MAPPING_ERROR; + outs->dma_address = DMA_MAPPING_ERROR; outs->dma_length = 0; } @@ -573,7 +573,7 @@ iommu_map_failed: iommu_tbl_range_free(&iommu->tbl, vaddr, npages, IOMMU_ERROR_CODE); - s->dma_address = SPARC_MAPPING_ERROR; + s->dma_address = DMA_MAPPING_ERROR; s->dma_length = 0; } if (s == outs) @@ -741,11 +741,6 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, spin_unlock_irqrestore(&iommu->lock, flags); } -static int dma_4u_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == SPARC_MAPPING_ERROR; -} - static int dma_4u_supported(struct device *dev, u64 device_mask) { struct iommu *iommu = dev->archdata.iommu; @@ -771,7 +766,6 @@ static const struct dma_map_ops sun4u_dma_ops = { .sync_single_for_cpu = dma_4u_sync_single_for_cpu, .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, .dma_supported = dma_4u_supported, - .mapping_error = dma_4u_mapping_error, }; const struct dma_map_ops *dma_ops = &sun4u_dma_ops; diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h index e3c02ba32500..d62ed9c5682d 100644 --- a/arch/sparc/kernel/iommu_common.h +++ b/arch/sparc/kernel/iommu_common.h @@ -48,6 +48,4 @@ static inline int is_span_boundary(unsigned long entry, return iommu_is_span_boundary(entry, nr, shift, boundary_size); } -#define SPARC_MAPPING_ERROR (~(dma_addr_t)0x0) - #endif /* _IOMMU_COMMON_H */ diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index aeaad04fdd14..f89603855f1e 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -52,8 +52,6 @@ #include <asm/io-unit.h> #include <asm/leon.h> -const struct sparc32_dma_ops *sparc32_dma_ops; - /* This function must make sure that caches and memory are coherent after DMA * On LEON systems without cache snooping it flushes the entire D-CACHE. */ @@ -247,178 +245,60 @@ static void _sparc_free_io(struct resource *res) release_resource(res); } -#ifdef CONFIG_SBUS - -void sbus_set_sbus64(struct device *dev, int x) -{ - printk("sbus_set_sbus64: unsupported\n"); -} -EXPORT_SYMBOL(sbus_set_sbus64); - -/* - * Allocate a chunk of memory suitable for DMA. - * Typically devices use them for control blocks. - * CPU may access them without any explicit flushing. - */ -static void *sbus_alloc_coherent(struct device *dev, size_t len, - dma_addr_t *dma_addrp, gfp_t gfp, - unsigned long attrs) +unsigned long sparc_dma_alloc_resource(struct device *dev, size_t len) { - struct platform_device *op = to_platform_device(dev); - unsigned long len_total = PAGE_ALIGN(len); - unsigned long va; struct resource *res; - int order; - - /* XXX why are some lengths signed, others unsigned? */ - if (len <= 0) { - return NULL; - } - /* XXX So what is maxphys for us and how do drivers know it? */ - if (len > 256*1024) { /* __get_free_pages() limit */ - return NULL; - } - - order = get_order(len_total); - va = __get_free_pages(gfp, order); - if (va == 0) - goto err_nopages; - if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) - goto err_nomem; + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) + return 0; + res->name = dev->of_node->full_name; - if (allocate_resource(&_sparc_dvma, res, len_total, - _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { - printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total); - goto err_nova; + if (allocate_resource(&_sparc_dvma, res, len, _sparc_dvma.start, + _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { + printk("%s: cannot occupy 0x%zx", __func__, len); + kfree(res); + return 0; } - // XXX The sbus_map_dma_area does this for us below, see comments. - // srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total); - /* - * XXX That's where sdev would be used. Currently we load - * all iommu tables with the same translations. - */ - if (sbus_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0) - goto err_noiommu; - - res->name = op->dev.of_node->full_name; - - return (void *)(unsigned long)res->start; - -err_noiommu: - release_resource(res); -err_nova: - kfree(res); -err_nomem: - free_pages(va, order); -err_nopages: - return NULL; + return res->start; } -static void sbus_free_coherent(struct device *dev, size_t n, void *p, - dma_addr_t ba, unsigned long attrs) +bool sparc_dma_free_resource(void *cpu_addr, size_t size) { + unsigned long addr = (unsigned long)cpu_addr; struct resource *res; - struct page *pgv; - if ((res = lookup_resource(&_sparc_dvma, - (unsigned long)p)) == NULL) { - printk("sbus_free_consistent: cannot free %p\n", p); - return; + res = lookup_resource(&_sparc_dvma, addr); + if (!res) { + printk("%s: cannot free %p\n", __func__, cpu_addr); + return false; } - if (((unsigned long)p & (PAGE_SIZE-1)) != 0) { - printk("sbus_free_consistent: unaligned va %p\n", p); - return; + if ((addr & (PAGE_SIZE - 1)) != 0) { + printk("%s: unaligned va %p\n", __func__, cpu_addr); + return false; } - n = PAGE_ALIGN(n); - if (resource_size(res) != n) { - printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n", - (long)resource_size(res), n); - return; + size = PAGE_ALIGN(size); + if (resource_size(res) != size) { + printk("%s: region 0x%lx asked 0x%zx\n", + __func__, (long)resource_size(res), size); + return false; } release_resource(res); kfree(res); - - pgv = virt_to_page(p); - sbus_unmap_dma_area(dev, ba, n); - - __free_pages(pgv, get_order(n)); -} - -/* - * Map a chunk of memory so that devices can see it. - * CPU view of this memory may be inconsistent with - * a device view and explicit flushing is necessary. - */ -static dma_addr_t sbus_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t len, - enum dma_data_direction dir, - unsigned long attrs) -{ - void *va = page_address(page) + offset; - - /* XXX why are some lengths signed, others unsigned? */ - if (len <= 0) { - return 0; - } - /* XXX So what is maxphys for us and how do drivers know it? */ - if (len > 256*1024) { /* __get_free_pages() limit */ - return 0; - } - return mmu_get_scsi_one(dev, va, len); -} - -static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n, - enum dma_data_direction dir, unsigned long attrs) -{ - mmu_release_scsi_one(dev, ba, n); -} - -static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, - enum dma_data_direction dir, unsigned long attrs) -{ - mmu_get_scsi_sgl(dev, sg, n); - return n; -} - -static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, - enum dma_data_direction dir, unsigned long attrs) -{ - mmu_release_scsi_sgl(dev, sg, n); -} - -static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, - int n, enum dma_data_direction dir) -{ - BUG(); + return true; } -static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, - int n, enum dma_data_direction dir) -{ - BUG(); -} +#ifdef CONFIG_SBUS -static int sbus_dma_supported(struct device *dev, u64 mask) +void sbus_set_sbus64(struct device *dev, int x) { - return 0; + printk("sbus_set_sbus64: unsupported\n"); } - -static const struct dma_map_ops sbus_dma_ops = { - .alloc = sbus_alloc_coherent, - .free = sbus_free_coherent, - .map_page = sbus_map_page, - .unmap_page = sbus_unmap_page, - .map_sg = sbus_map_sg, - .unmap_sg = sbus_unmap_sg, - .sync_sg_for_cpu = sbus_sync_sg_for_cpu, - .sync_sg_for_device = sbus_sync_sg_for_device, - .dma_supported = sbus_dma_supported, -}; +EXPORT_SYMBOL(sbus_set_sbus64); static int __init sparc_register_ioport(void) { @@ -438,45 +318,30 @@ arch_initcall(sparc_register_ioport); void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { - unsigned long len_total = PAGE_ALIGN(size); + unsigned long addr; void *va; - struct resource *res; - int order; - if (size == 0) { + if (!size || size > 256 * 1024) /* __get_free_pages() limit */ return NULL; - } - if (size > 256*1024) { /* __get_free_pages() limit */ - return NULL; - } - order = get_order(len_total); - va = (void *) __get_free_pages(gfp, order); - if (va == NULL) { - printk("%s: no %ld pages\n", __func__, len_total>>PAGE_SHIFT); - goto err_nopages; + size = PAGE_ALIGN(size); + va = (void *) __get_free_pages(gfp | __GFP_ZERO, get_order(size)); + if (!va) { + printk("%s: no %zd pages\n", __func__, size >> PAGE_SHIFT); + return NULL; } - if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { - printk("%s: no core\n", __func__); + addr = sparc_dma_alloc_resource(dev, size); + if (!addr) goto err_nomem; - } - if (allocate_resource(&_sparc_dvma, res, len_total, - _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { - printk("%s: cannot occupy 0x%lx", __func__, len_total); - goto err_nova; - } - srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total); + srmmu_mapiorange(0, virt_to_phys(va), addr, size); *dma_handle = virt_to_phys(va); - return (void *) res->start; + return (void *)addr; -err_nova: - kfree(res); err_nomem: - free_pages((unsigned long)va, order); -err_nopages: + free_pages((unsigned long)va, get_order(size)); return NULL; } @@ -491,31 +356,11 @@ err_nopages: void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { - struct resource *res; - - if ((res = lookup_resource(&_sparc_dvma, - (unsigned long)cpu_addr)) == NULL) { - printk("%s: cannot free %p\n", __func__, cpu_addr); + if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size))) return; - } - - if (((unsigned long)cpu_addr & (PAGE_SIZE-1)) != 0) { - printk("%s: unaligned va %p\n", __func__, cpu_addr); - return; - } - - size = PAGE_ALIGN(size); - if (resource_size(res) != size) { - printk("%s: region 0x%lx asked 0x%zx\n", __func__, - (long)resource_size(res), size); - return; - } dma_make_coherent(dma_addr, size); srmmu_unmapiorange((unsigned long)cpu_addr, size); - - release_resource(res); - kfree(res); free_pages((unsigned long)phys_to_virt(dma_addr), get_order(size)); } @@ -528,7 +373,7 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, dma_make_coherent(paddr, PAGE_ALIGN(size)); } -const struct dma_map_ops *dma_ops = &sbus_dma_ops; +const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); #ifdef CONFIG_PROC_FS diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 565d9ac883d0..fa0e42b4cbfb 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -414,12 +414,12 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, bad: if (printk_ratelimit()) WARN_ON(1); - return SPARC_MAPPING_ERROR; + return DMA_MAPPING_ERROR; iommu_map_fail: local_irq_restore(flags); iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); - return SPARC_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, @@ -592,7 +592,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, if (outcount < incount) { outs = sg_next(outs); - outs->dma_address = SPARC_MAPPING_ERROR; + outs->dma_address = DMA_MAPPING_ERROR; outs->dma_length = 0; } @@ -609,7 +609,7 @@ iommu_map_failed: iommu_tbl_range_free(tbl, vaddr, npages, IOMMU_ERROR_CODE); /* XXX demap? XXX */ - s->dma_address = SPARC_MAPPING_ERROR; + s->dma_address = DMA_MAPPING_ERROR; s->dma_length = 0; } if (s == outs) @@ -688,11 +688,6 @@ static int dma_4v_supported(struct device *dev, u64 device_mask) return pci64_dma_supported(to_pci_dev(dev), device_mask); } -static int dma_4v_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == SPARC_MAPPING_ERROR; -} - static const struct dma_map_ops sun4v_dma_ops = { .alloc = dma_4v_alloc_coherent, .free = dma_4v_free_coherent, @@ -701,7 +696,6 @@ static const struct dma_map_ops sun4v_dma_ops = { .map_sg = dma_4v_map_sg, .unmap_sg = dma_4v_unmap_sg, .dma_supported = dma_4v_supported, - .mapping_error = dma_4v_mapping_error, }; static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent) diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c index c8cb27d3ea75..f770ee7229d8 100644 --- a/arch/sparc/mm/io-unit.c +++ b/arch/sparc/mm/io-unit.c @@ -12,7 +12,7 @@ #include <linux/mm.h> #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */ #include <linux/bitops.h> -#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> @@ -140,34 +140,44 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan); return vaddr; } -static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len) +static dma_addr_t iounit_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t len, enum dma_data_direction dir, + unsigned long attrs) { + void *vaddr = page_address(page) + offset; struct iounit_struct *iounit = dev->archdata.iommu; unsigned long ret, flags; + /* XXX So what is maxphys for us and how do drivers know it? */ + if (!len || len > 256 * 1024) + return DMA_MAPPING_ERROR; + spin_lock_irqsave(&iounit->lock, flags); ret = iounit_get_area(iounit, (unsigned long)vaddr, len); spin_unlock_irqrestore(&iounit->lock, flags); return ret; } -static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz) +static int iounit_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs) { struct iounit_struct *iounit = dev->archdata.iommu; + struct scatterlist *sg; unsigned long flags; + int i; /* FIXME: Cache some resolved pages - often several sg entries are to the same page */ spin_lock_irqsave(&iounit->lock, flags); - while (sz != 0) { - --sz; + for_each_sg(sgl, sg, nents, i) { sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length); sg->dma_length = sg->length; - sg = sg_next(sg); } spin_unlock_irqrestore(&iounit->lock, flags); + return nents; } -static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len) +static void iounit_unmap_page(struct device *dev, dma_addr_t vaddr, size_t len, + enum dma_data_direction dir, unsigned long attrs) { struct iounit_struct *iounit = dev->archdata.iommu; unsigned long flags; @@ -181,34 +191,47 @@ static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned lo spin_unlock_irqrestore(&iounit->lock, flags); } -static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz) +static void iounit_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) { struct iounit_struct *iounit = dev->archdata.iommu; - unsigned long flags; - unsigned long vaddr, len; + unsigned long flags, vaddr, len; + struct scatterlist *sg; + int i; spin_lock_irqsave(&iounit->lock, flags); - while (sz != 0) { - --sz; + for_each_sg(sgl, sg, nents, i) { len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT; vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT; IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); for (len += vaddr; vaddr < len; vaddr++) clear_bit(vaddr, iounit->bmap); - sg = sg_next(sg); } spin_unlock_irqrestore(&iounit->lock, flags); } #ifdef CONFIG_SBUS -static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, unsigned long addr, int len) +static void *iounit_alloc(struct device *dev, size_t len, + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { struct iounit_struct *iounit = dev->archdata.iommu; - unsigned long page, end; + unsigned long va, addr, page, end, ret; pgprot_t dvma_prot; iopte_t __iomem *iopte; - *pba = addr; + /* XXX So what is maxphys for us and how do drivers know it? */ + if (!len || len > 256 * 1024) + return NULL; + + len = PAGE_ALIGN(len); + va = __get_free_pages(gfp | __GFP_ZERO, get_order(len)); + if (!va) + return NULL; + + addr = ret = sparc_dma_alloc_resource(dev, len); + if (!addr) + goto out_free_pages; + *dma_handle = addr; dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV); end = PAGE_ALIGN((addr + len)); @@ -237,27 +260,32 @@ static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned lon flush_cache_all(); flush_tlb_all(); - return 0; + return (void *)ret; + +out_free_pages: + free_pages(va, get_order(len)); + return NULL; } -static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len) +static void iounit_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_addr, unsigned long attrs) { /* XXX Somebody please fill this in */ } #endif -static const struct sparc32_dma_ops iounit_dma_ops = { - .get_scsi_one = iounit_get_scsi_one, - .get_scsi_sgl = iounit_get_scsi_sgl, - .release_scsi_one = iounit_release_scsi_one, - .release_scsi_sgl = iounit_release_scsi_sgl, +static const struct dma_map_ops iounit_dma_ops = { #ifdef CONFIG_SBUS - .map_dma_area = iounit_map_dma_area, - .unmap_dma_area = iounit_unmap_dma_area, + .alloc = iounit_alloc, + .free = iounit_free, #endif + .map_page = iounit_map_page, + .unmap_page = iounit_unmap_page, + .map_sg = iounit_map_sg, + .unmap_sg = iounit_unmap_sg, }; void __init ld_mmu_iounit(void) { - sparc32_dma_ops = &iounit_dma_ops; + dma_ops = &iounit_dma_ops; } diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 2c5f8a648f8c..e8d5d73ca40d 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -13,7 +13,7 @@ #include <linux/mm.h> #include <linux/slab.h> #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */ -#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> @@ -205,59 +205,67 @@ static u32 iommu_get_one(struct device *dev, struct page *page, int npages) return busa0; } -static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len) +static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t len) { - unsigned long off; - int npages; - struct page *page; - u32 busa; - - off = (unsigned long)vaddr & ~PAGE_MASK; - npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; - page = virt_to_page((unsigned long)vaddr & PAGE_MASK); - busa = iommu_get_one(dev, page, npages); - return busa + off; + void *vaddr = page_address(page) + offset; + unsigned long off = (unsigned long)vaddr & ~PAGE_MASK; + unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + + /* XXX So what is maxphys for us and how do drivers know it? */ + if (!len || len > 256 * 1024) + return DMA_MAPPING_ERROR; + return iommu_get_one(dev, virt_to_page(vaddr), npages) + off; } -static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len) +static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev, + struct page *page, unsigned long offset, size_t len, + enum dma_data_direction dir, unsigned long attrs) { flush_page_for_dma(0); - return iommu_get_scsi_one(dev, vaddr, len); + return __sbus_iommu_map_page(dev, page, offset, len); } -static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len) +static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev, + struct page *page, unsigned long offset, size_t len, + enum dma_data_direction dir, unsigned long attrs) { - unsigned long page = ((unsigned long) vaddr) & PAGE_MASK; + void *vaddr = page_address(page) + offset; + unsigned long p = ((unsigned long)vaddr) & PAGE_MASK; - while(page < ((unsigned long)(vaddr + len))) { - flush_page_for_dma(page); - page += PAGE_SIZE; + while (p < (unsigned long)vaddr + len) { + flush_page_for_dma(p); + p += PAGE_SIZE; } - return iommu_get_scsi_one(dev, vaddr, len); + + return __sbus_iommu_map_page(dev, page, offset, len); } -static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz) +static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) { - int n; + struct scatterlist *sg; + int i, n; flush_page_for_dma(0); - while (sz != 0) { - --sz; + + for_each_sg(sgl, sg, nents, i) { n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset; sg->dma_length = sg->length; - sg = sg_next(sg); } + + return nents; } -static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz) +static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) { unsigned long page, oldpage = 0; - int n, i; - - while(sz != 0) { - --sz; + struct scatterlist *sg; + int i, j, n; + for_each_sg(sgl, sg, nents, j) { n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; /* @@ -277,8 +285,9 @@ static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset; sg->dma_length = sg->length; - sg = sg_next(sg); } + + return nents; } static void iommu_release_one(struct device *dev, u32 busa, int npages) @@ -297,40 +306,52 @@ static void iommu_release_one(struct device *dev, u32 busa, int npages) bit_map_clear(&iommu->usemap, ioptex, npages); } -static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len) +static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr, + size_t len, enum dma_data_direction dir, unsigned long attrs) { - unsigned long off; + unsigned long off = dma_addr & ~PAGE_MASK; int npages; - off = vaddr & ~PAGE_MASK; npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; - iommu_release_one(dev, vaddr & PAGE_MASK, npages); + iommu_release_one(dev, dma_addr & PAGE_MASK, npages); } -static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz) +static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) { - int n; - - while(sz != 0) { - --sz; + struct scatterlist *sg; + int i, n; + for_each_sg(sgl, sg, nents, i) { n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; iommu_release_one(dev, sg->dma_address & PAGE_MASK, n); sg->dma_address = 0x21212121; - sg = sg_next(sg); } } #ifdef CONFIG_SBUS -static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, - unsigned long addr, int len) +static void *sbus_iommu_alloc(struct device *dev, size_t len, + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { struct iommu_struct *iommu = dev->archdata.iommu; - unsigned long page, end; + unsigned long va, addr, page, end, ret; iopte_t *iopte = iommu->page_table; iopte_t *first; int ioptex; + /* XXX So what is maxphys for us and how do drivers know it? */ + if (!len || len > 256 * 1024) + return NULL; + + len = PAGE_ALIGN(len); + va = __get_free_pages(gfp | __GFP_ZERO, get_order(len)); + if (va == 0) + return NULL; + + addr = ret = sparc_dma_alloc_resource(dev, len); + if (!addr) + goto out_free_pages; + BUG_ON((va & ~PAGE_MASK) != 0); BUG_ON((addr & ~PAGE_MASK) != 0); BUG_ON((len & ~PAGE_MASK) != 0); @@ -385,16 +406,25 @@ static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long flush_tlb_all(); iommu_invalidate(iommu->regs); - *pba = iommu->start + (ioptex << PAGE_SHIFT); - return 0; + *dma_handle = iommu->start + (ioptex << PAGE_SHIFT); + return (void *)ret; + +out_free_pages: + free_pages(va, get_order(len)); + return NULL; } -static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len) +static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr, + dma_addr_t busa, unsigned long attrs) { struct iommu_struct *iommu = dev->archdata.iommu; iopte_t *iopte = iommu->page_table; - unsigned long end; + struct page *page = virt_to_page(cpu_addr); int ioptex = (busa - iommu->start) >> PAGE_SHIFT; + unsigned long end; + + if (!sparc_dma_free_resource(cpu_addr, len)) + return; BUG_ON((busa & ~PAGE_MASK) != 0); BUG_ON((len & ~PAGE_MASK) != 0); @@ -408,38 +438,40 @@ static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len flush_tlb_all(); iommu_invalidate(iommu->regs); bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT); + + __free_pages(page, get_order(len)); } #endif -static const struct sparc32_dma_ops iommu_dma_gflush_ops = { - .get_scsi_one = iommu_get_scsi_one_gflush, - .get_scsi_sgl = iommu_get_scsi_sgl_gflush, - .release_scsi_one = iommu_release_scsi_one, - .release_scsi_sgl = iommu_release_scsi_sgl, +static const struct dma_map_ops sbus_iommu_dma_gflush_ops = { #ifdef CONFIG_SBUS - .map_dma_area = iommu_map_dma_area, - .unmap_dma_area = iommu_unmap_dma_area, + .alloc = sbus_iommu_alloc, + .free = sbus_iommu_free, #endif + .map_page = sbus_iommu_map_page_gflush, + .unmap_page = sbus_iommu_unmap_page, + .map_sg = sbus_iommu_map_sg_gflush, + .unmap_sg = sbus_iommu_unmap_sg, }; -static const struct sparc32_dma_ops iommu_dma_pflush_ops = { - .get_scsi_one = iommu_get_scsi_one_pflush, - .get_scsi_sgl = iommu_get_scsi_sgl_pflush, - .release_scsi_one = iommu_release_scsi_one, - .release_scsi_sgl = iommu_release_scsi_sgl, +static const struct dma_map_ops sbus_iommu_dma_pflush_ops = { #ifdef CONFIG_SBUS - .map_dma_area = iommu_map_dma_area, - .unmap_dma_area = iommu_unmap_dma_area, + .alloc = sbus_iommu_alloc, + .free = sbus_iommu_free, #endif + .map_page = sbus_iommu_map_page_pflush, + .unmap_page = sbus_iommu_unmap_page, + .map_sg = sbus_iommu_map_sg_pflush, + .unmap_sg = sbus_iommu_unmap_sg, }; void __init ld_mmu_iommu(void) { if (flush_page_for_dma_global) { /* flush_page_for_dma flushes everything, no matter of what page is it */ - sparc32_dma_ops = &iommu_dma_gflush_ops; + dma_ops = &sbus_iommu_dma_gflush_ops; } else { - sparc32_dma_ops = &iommu_dma_pflush_ops; + dma_ops = &sbus_iommu_dma_pflush_ops; } if (viking_mxcc_present || srmmu_modtype == HyperSparc) { diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index a4c05159dca5..2681027d7bff 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -4,7 +4,6 @@ config UNICORE32 select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO - select DMA_DIRECT_OPS select HAVE_GENERIC_DMA_COHERENT select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c7094f813183..57552f2b37eb 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -66,7 +66,6 @@ config X86 select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 select ARCH_HAS_UACCESS_MCSAFE if X86_64 && X86_MCE select ARCH_HAS_SET_MEMORY - select ARCH_HAS_SG_CHAIN select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE @@ -90,7 +89,6 @@ config X86 select CLOCKSOURCE_VALIDATE_LAST_CYCLE select CLOCKSOURCE_WATCHDOG select DCACHE_WORD_ACCESS - select DMA_DIRECT_OPS select EDAC_ATOMIC_SCRUB select EDAC_SUPPORT select GENERIC_CLOCKEVENTS diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 3f9d1b4019bb..e0ff3ac8c127 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -50,8 +50,6 @@ static unsigned long iommu_pages; /* .. and in pages */ static u32 *iommu_gatt_base; /* Remapping table */ -static dma_addr_t bad_dma_addr; - /* * If this is disabled the IOMMU will use an optimized flushing strategy * of only flushing when an mapping is reused. With it true the GART is @@ -74,8 +72,6 @@ static u32 gart_unmapped_entry; (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) -#define EMERGENCY_PAGES 32 /* = 128KB */ - #ifdef CONFIG_AGP #define AGPEXTERN extern #else @@ -155,9 +151,6 @@ static void flush_gart(void) #ifdef CONFIG_IOMMU_LEAK /* Debugging aid for drivers that don't free their IOMMU tables */ -static int leak_trace; -static int iommu_leak_pages = 20; - static void dump_leak(void) { static int dump; @@ -184,14 +177,6 @@ static void iommu_full(struct device *dev, size_t size, int dir) */ dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size); - - if (size > PAGE_SIZE*EMERGENCY_PAGES) { - if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) - panic("PCI-DMA: Memory would be corrupted\n"); - if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) - panic(KERN_ERR - "PCI-DMA: Random memory would be DMAed\n"); - } #ifdef CONFIG_IOMMU_LEAK dump_leak(); #endif @@ -220,7 +205,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, int i; if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR)) - return bad_dma_addr; + return DMA_MAPPING_ERROR; iommu_page = alloc_iommu(dev, npages, align_mask); if (iommu_page == -1) { @@ -229,7 +214,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, if (panic_on_overflow) panic("dma_map_area overflow %lu bytes\n", size); iommu_full(dev, size, dir); - return bad_dma_addr; + return DMA_MAPPING_ERROR; } for (i = 0; i < npages; i++) { @@ -271,7 +256,7 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, int npages; int i; - if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || + if (dma_addr == DMA_MAPPING_ERROR || dma_addr >= iommu_bus_base + iommu_size) return; @@ -315,7 +300,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, if (nonforced_iommu(dev, addr, s->length)) { addr = dma_map_area(dev, addr, s->length, dir, 0); - if (addr == bad_dma_addr) { + if (addr == DMA_MAPPING_ERROR) { if (i > 0) gart_unmap_sg(dev, sg, i, dir, 0); nents = 0; @@ -471,7 +456,7 @@ error: iommu_full(dev, pages << PAGE_SHIFT, dir); for_each_sg(sg, s, nents, i) - s->dma_address = bad_dma_addr; + s->dma_address = DMA_MAPPING_ERROR; return 0; } @@ -490,7 +475,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, *dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size, DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1); flush_gart(); - if (unlikely(*dma_addr == bad_dma_addr)) + if (unlikely(*dma_addr == DMA_MAPPING_ERROR)) goto out_free; return vaddr; out_free: @@ -507,11 +492,6 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr, dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs); } -static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return (dma_addr == bad_dma_addr); -} - static int no_agp; static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) @@ -695,7 +675,6 @@ static const struct dma_map_ops gart_dma_ops = { .unmap_page = gart_unmap_page, .alloc = gart_alloc_coherent, .free = gart_free_coherent, - .mapping_error = gart_mapping_error, .dma_supported = dma_direct_supported, }; @@ -730,7 +709,6 @@ int __init gart_iommu_init(void) unsigned long aper_base, aper_size; unsigned long start_pfn, end_pfn; unsigned long scratch; - long i; if (!amd_nb_has_feature(AMD_NB_GART)) return 0; @@ -774,29 +752,12 @@ int __init gart_iommu_init(void) if (!iommu_gart_bitmap) panic("Cannot allocate iommu bitmap\n"); -#ifdef CONFIG_IOMMU_LEAK - if (leak_trace) { - int ret; - - ret = dma_debug_resize_entries(iommu_pages); - if (ret) - pr_debug("PCI-DMA: Cannot trace all the entries\n"); - } -#endif - - /* - * Out of IOMMU space handling. - * Reserve some invalid pages at the beginning of the GART. - */ - bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES); - pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", iommu_size >> 20); agp_memory_reserved = iommu_size; iommu_start = aper_size - iommu_size; iommu_bus_base = info.aper_base + iommu_start; - bad_dma_addr = iommu_bus_base; iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); /* @@ -838,8 +799,6 @@ int __init gart_iommu_init(void) if (!scratch) panic("Cannot allocate iommu scratch page"); gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); - for (i = EMERGENCY_PAGES; i < iommu_pages; i++) - iommu_gatt_base[i] = gart_unmapped_entry; flush_gart(); dma_ops = &gart_dma_ops; @@ -853,16 +812,6 @@ void __init gart_parse_options(char *p) { int arg; -#ifdef CONFIG_IOMMU_LEAK - if (!strncmp(p, "leak", 4)) { - leak_trace = 1; - p += 4; - if (*p == '=') - ++p; - if (isdigit(*p) && get_option(&p, &arg)) - iommu_leak_pages = arg; - } -#endif if (isdigit(*p) && get_option(&p, &arg)) iommu_size = arg; if (!strncmp(p, "fullflush", 9)) diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index bbfc8b1e9104..c70720f61a34 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -51,8 +51,6 @@ #include <asm/x86_init.h> #include <asm/iommu_table.h> -#define CALGARY_MAPPING_ERROR 0 - #ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT int use_calgary __read_mostly = 1; #else @@ -157,8 +155,6 @@ static const unsigned long phb_debug_offsets[] = { #define PHB_DEBUG_STUFF_OFFSET 0x0020 -#define EMERGENCY_PAGES 32 /* = 128KB */ - unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED; static int translate_empty_slots __read_mostly = 0; static int calgary_detected __read_mostly = 0; @@ -255,7 +251,7 @@ static unsigned long iommu_range_alloc(struct device *dev, if (panic_on_overflow) panic("Calgary: fix the allocator.\n"); else - return CALGARY_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } } @@ -274,11 +270,10 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, dma_addr_t ret; entry = iommu_range_alloc(dev, tbl, npages); - - if (unlikely(entry == CALGARY_MAPPING_ERROR)) { + if (unlikely(entry == DMA_MAPPING_ERROR)) { pr_warn("failed to allocate %u pages in iommu %p\n", npages, tbl); - return CALGARY_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } /* set the return dma address */ @@ -294,12 +289,10 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned int npages) { unsigned long entry; - unsigned long badend; unsigned long flags; /* were we called with bad_dma_address? */ - badend = CALGARY_MAPPING_ERROR + (EMERGENCY_PAGES * PAGE_SIZE); - if (unlikely(dma_addr < badend)) { + if (unlikely(dma_addr == DMA_MAPPING_ERROR)) { WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA " "address 0x%Lx\n", dma_addr); return; @@ -383,7 +376,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE); entry = iommu_range_alloc(dev, tbl, npages); - if (entry == CALGARY_MAPPING_ERROR) { + if (entry == DMA_MAPPING_ERROR) { /* makes sure unmap knows to stop */ s->dma_length = 0; goto error; @@ -401,7 +394,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, error: calgary_unmap_sg(dev, sg, nelems, dir, 0); for_each_sg(sg, s, nelems, i) { - sg->dma_address = CALGARY_MAPPING_ERROR; + sg->dma_address = DMA_MAPPING_ERROR; sg->dma_length = 0; } return 0; @@ -454,7 +447,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size, /* set up tces to cover the allocated range */ mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); - if (mapping == CALGARY_MAPPING_ERROR) + if (mapping == DMA_MAPPING_ERROR) goto free; *dma_handle = mapping; return ret; @@ -479,11 +472,6 @@ static void calgary_free_coherent(struct device *dev, size_t size, free_pages((unsigned long)vaddr, get_order(size)); } -static int calgary_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == CALGARY_MAPPING_ERROR; -} - static const struct dma_map_ops calgary_dma_ops = { .alloc = calgary_alloc_coherent, .free = calgary_free_coherent, @@ -491,7 +479,6 @@ static const struct dma_map_ops calgary_dma_ops = { .unmap_sg = calgary_unmap_sg, .map_page = calgary_map_page, .unmap_page = calgary_unmap_page, - .mapping_error = calgary_mapping_error, .dma_supported = dma_direct_supported, }; @@ -739,9 +726,6 @@ static void __init calgary_reserve_regions(struct pci_dev *dev) u64 start; struct iommu_table *tbl = pci_iommu(dev->bus); - /* reserve EMERGENCY_PAGES from bad_dma_address and up */ - iommu_range_reserve(tbl, CALGARY_MAPPING_ERROR, EMERGENCY_PAGES); - /* avoid the BIOS/VGA first 640KB-1MB region */ /* for CalIOC2 - avoid the entire first MB */ if (is_calgary(dev->device)) { diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index f4562fcec681..d460998ae828 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -17,7 +17,7 @@ static bool disable_dac_quirk __read_mostly; -const struct dma_map_ops *dma_ops = &dma_direct_ops; +const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); #ifdef CONFIG_IOMMU_DEBUG diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index bd08b9e1c9e2..5f5302028a9a 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -62,10 +62,8 @@ IOMMU_INIT(pci_swiotlb_detect_4gb, void __init pci_swiotlb_init(void) { - if (swiotlb) { + if (swiotlb) swiotlb_init(0); - dma_ops = &swiotlb_dma_ops; - } } void __init pci_swiotlb_late_init(void) diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 006f373f54ab..385afa2b9e17 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -381,13 +381,6 @@ void __init mem_encrypt_init(void) swiotlb_update_mem_attributes(); /* - * With SEV, DMA operations cannot use encryption, we need to use - * SWIOTLB to bounce buffer DMA operation. - */ - if (sev_active()) - dma_ops = &swiotlb_dma_ops; - - /* * With SEV, we need to unroll the rep string I/O instructions. */ if (sev_active()) diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c index 7a5bafb76d77..3cdafea55ab6 100644 --- a/arch/x86/pci/sta2x11-fixup.c +++ b/arch/x86/pci/sta2x11-fixup.c @@ -168,7 +168,6 @@ static void sta2x11_setup_pdev(struct pci_dev *pdev) return; pci_set_consistent_dma_mask(pdev, STA2X11_AMBA_SIZE - 1); pci_set_dma_mask(pdev, STA2X11_AMBA_SIZE - 1); - pdev->dev.dma_ops = &swiotlb_dma_ops; pdev->dev.archdata.is_sta2x11 = true; /* We must enable all devices as master, for audio DMA to work */ diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index d29b7365da8d..36338e7564a3 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 config XTENSA def_bool y - select ARCH_HAS_SG_CHAIN select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_NO_COHERENT_DMA_MMAP if !MMU @@ -10,7 +9,7 @@ config XTENSA select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK - select DMA_DIRECT_OPS + select DMA_REMAP if MMU select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS select GENERIC_IRQ_SHOW diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 1fc138b6bc0a..9171bff76fc4 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -160,7 +160,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, flag & __GFP_NOWARN); if (!page) - page = alloc_pages(flag, get_order(size)); + page = alloc_pages(flag | __GFP_ZERO, get_order(size)); if (!page) return NULL; |