diff options
author | Milton Miller <miltonm@bga.com> | 2011-06-24 11:05:24 +0200 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2011-09-20 01:19:35 +0200 |
commit | d24f9c6999eacd3a7bc2b289e49fcb2bf2fafef2 (patch) | |
tree | 66276ee7149e5eab4b7ec9785bad7bdf0564ea3d /arch/powerpc/kernel/dma.c | |
parent | dma-mapping: Add get_required_mask if arch overrides default (diff) | |
download | linux-d24f9c6999eacd3a7bc2b289e49fcb2bf2fafef2.tar.xz linux-d24f9c6999eacd3a7bc2b289e49fcb2bf2fafef2.zip |
powerpc: Use the newly added get_required_mask dma_map_ops hook
Now that the generic code has dma_map_ops set, instead of having a
messy ifdef & if block in the base dma_get_required_mask hook push
the computation into the dma ops.
If the ops fails to set the get_required_mask hook default to the
width of dma_addr_t.
This also corrects ibmbus ibmebus_dma_supported to require a 64
bit mask. I doubt anything is checking or setting the dma mask on
that bus.
Signed-off-by: Milton Miller <miltonm@bga.com>
Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-kernel@vger.kernel.org
Cc: benh@kernel.crashing.org
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/dma.c')
-rw-r--r-- | arch/powerpc/kernel/dma.c | 41 |
1 files changed, 16 insertions, 25 deletions
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 503093efa202..10b136afbf50 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -96,6 +96,18 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask) #endif } +static u64 dma_direct_get_required_mask(struct device *dev) +{ + u64 end, mask; + + end = memblock_end_of_DRAM() + get_dma_offset(dev); + + mask = 1ULL << (fls64(end) - 1); + mask += mask - 1; + + return mask; +} + static inline dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, unsigned long offset, @@ -144,6 +156,7 @@ struct dma_map_ops dma_direct_ops = { .dma_supported = dma_direct_dma_supported, .map_page = dma_direct_map_page, .unmap_page = dma_direct_unmap_page, + .get_required_mask = dma_direct_get_required_mask, #ifdef CONFIG_NOT_COHERENT_CACHE .sync_single_for_cpu = dma_direct_sync_single, .sync_single_for_device = dma_direct_sync_single, @@ -173,7 +186,6 @@ EXPORT_SYMBOL(dma_set_mask); u64 dma_get_required_mask(struct device *dev) { struct dma_map_ops *dma_ops = get_dma_ops(dev); - u64 mask, end = 0; if (ppc_md.dma_get_required_mask) return ppc_md.dma_get_required_mask(dev); @@ -181,31 +193,10 @@ u64 dma_get_required_mask(struct device *dev) if (unlikely(dma_ops == NULL)) return 0; -#ifdef CONFIG_PPC64 - else if (dma_ops == &dma_iommu_ops) - return dma_iommu_get_required_mask(dev); -#endif -#ifdef CONFIG_SWIOTLB - else if (dma_ops == &swiotlb_dma_ops) { - u64 max_direct_dma_addr = dev->archdata.max_direct_dma_addr; - - end = memblock_end_of_DRAM(); - if (max_direct_dma_addr && end > max_direct_dma_addr) - end = max_direct_dma_addr; - end += get_dma_offset(dev); - } -#endif - else if (dma_ops == &dma_direct_ops) - end = memblock_end_of_DRAM() + get_dma_offset(dev); - else { - WARN_ONCE(1, "%s: unknown ops %p\n", __func__, dma_ops); - end = memblock_end_of_DRAM(); - } + if (dma_ops->get_required_mask) + return dma_ops->get_required_mask(dev); - mask = 1ULL << (fls64(end) - 1); - mask += mask - 1; - - return mask; + return DMA_BIT_MASK(8 * sizeof(dma_addr_t)); } EXPORT_SYMBOL_GPL(dma_get_required_mask); |