summaryrefslogtreecommitdiffstats
path: root/kernel/dma/swiotlb.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-10-19 08:51:53 +0200
committerChristoph Hellwig <hch@lst.de>2018-10-19 08:53:05 +0200
commita4a4330db46a17289cf2ca5f9fb153d536267b97 (patch)
treed3e807bd0fe1d8fb38c800f0696711b8423d57ff /kernel/dma/swiotlb.c
parentswiotlb: don't dip into swiotlb pool for coherent allocations (diff)
downloadlinux-a4a4330db46a17289cf2ca5f9fb153d536267b97.tar.xz
linux-a4a4330db46a17289cf2ca5f9fb153d536267b97.zip
swiotlb: add support for non-coherent DMA
Handle architectures that are not cache coherent directly in the main swiotlb code by calling arch_sync_dma_for_{device,cpu} in all the right places from the various dma_map/unmap/sync methods when the device is non-coherent. Because swiotlb now uses dma_direct_alloc for the coherent allocation that side is already taken care of by the dma-direct code calling into arch_dma_{alloc,free} for devices that are non-coherent. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to '')
-rw-r--r--kernel/dma/swiotlb.c33
1 files changed, 23 insertions, 10 deletions
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 1a01b0ac0a5e..ebecaf255ea2 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -21,6 +21,7 @@
#include <linux/cache.h>
#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/spinlock.h>
@@ -671,11 +672,17 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
- return dev_addr;
+ if (!dma_capable(dev, dev_addr, size) ||
+ swiotlb_force == SWIOTLB_FORCE) {
+ trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
+ dev_addr = swiotlb_bounce_page(dev, &phys, size, dir, attrs);
+ }
+
+ if (!dev_is_dma_coherent(dev) &&
+ (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ arch_sync_dma_for_device(dev, phys, size, dir);
- trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
- return swiotlb_bounce_page(dev, &phys, size, dir, attrs);
+ return dev_addr;
}
/*
@@ -694,6 +701,10 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE);
+ if (!dev_is_dma_coherent(hwdev) &&
+ (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ arch_sync_dma_for_cpu(hwdev, paddr, size, dir);
+
if (is_swiotlb_buffer(paddr)) {
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
return;
@@ -730,15 +741,17 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE);
- if (is_swiotlb_buffer(paddr)) {
+ if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_CPU)
+ arch_sync_dma_for_cpu(hwdev, paddr, size, dir);
+
+ if (is_swiotlb_buffer(paddr))
swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
- return;
- }
- if (dir != DMA_FROM_DEVICE)
- return;
+ if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_DEVICE)
+ arch_sync_dma_for_device(hwdev, paddr, size, dir);
- dma_mark_clean(phys_to_virt(paddr), size);
+ if (!is_swiotlb_buffer(paddr) && dir == DMA_FROM_DEVICE)
+ dma_mark_clean(phys_to_virt(paddr), size);
}
void