diff options
author | Christoph Hellwig <hch@lst.de> | 2018-07-19 15:02:31 +0200 |
---|---|---|
committer | Stafford Horne <shorne@gmail.com> | 2018-07-21 06:49:48 +0200 |
commit | 2c1de929aef31afbff0dad81de9cc26056d3d01b (patch) | |
tree | e33babd08d443e5332f366a74eb605bab4c14ae1 /arch/openrisc/kernel/dma.c | |
parent | openrisc: remove the no-op unmap_page and unmap_sg DMA operations (diff) | |
download | linux-2c1de929aef31afbff0dad81de9cc26056d3d01b.tar.xz linux-2c1de929aef31afbff0dad81de9cc26056d3d01b.zip |
openrisc: fix cache maintainance the the sync_single_for_device DMA operation
The cache maintaince in the sync_single_for_device operation should be
equivalent to the map_page operation to facilitate reusing buffers. Fix the
openrisc implementation by moving the cache maintaince performed in map_page
into the sync_single method, and calling that from map_page.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Stafford Horne <shorne@gmail.com>
Diffstat (limited to 'arch/openrisc/kernel/dma.c')
-rw-r--r-- | arch/openrisc/kernel/dma.c | 42 |
1 files changed, 19 insertions, 23 deletions
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index 7cadff93d179..d6a0bf1fa713 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -133,19 +133,15 @@ or1k_dma_free(struct device *dev, size_t size, void *vaddr, free_pages_exact(vaddr, size); } -static dma_addr_t -or1k_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) +static void +or1k_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir) { unsigned long cl; - dma_addr_t addr = page_to_phys(page) + offset; + dma_addr_t addr = dma_handle; struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; - if (attrs & DMA_ATTR_SKIP_CPU_SYNC) - return addr; - switch (dir) { case DMA_TO_DEVICE: /* Flush the dcache for the requested range */ @@ -168,6 +164,20 @@ or1k_map_page(struct device *dev, struct page *page, break; } +} + +static dma_addr_t +or1k_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + unsigned long cl; + dma_addr_t addr = page_to_phys(page) + offset; + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + or1k_sync_single_for_device(dev, addr, size, dir); return addr; } @@ -187,20 +197,6 @@ or1k_map_sg(struct device *dev, struct scatterlist *sg, return nents; } -static void -or1k_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction dir) -{ - unsigned long cl; - dma_addr_t addr = dma_handle; - struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; - - /* Flush the dcache for the requested range */ - for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size) - mtspr(SPR_DCBFR, cl); -} - const struct dma_map_ops or1k_dma_map_ops = { .alloc = or1k_dma_alloc, .free = or1k_dma_free, |