summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-09-09 18:06:49 +0200
committerIngo Molnar <mingo@elte.hu>2008-09-10 11:33:45 +0200
commit2797982ed93c10d5585ee1842ab298cb11326ff5 (patch)
tree4215ac50f4ac4ed6349be469b76ab45e4f839d78 /lib
parentx86: convert pci-nommu to use is_buffer_dma_capable helper function (diff)
downloadlinux-2797982ed93c10d5585ee1842ab298cb11326ff5.tar.xz
linux-2797982ed93c10d5585ee1842ab298cb11326ff5.zip
swiotlb: convert swiotlb to use is_buffer_dma_capable helper function
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'lib')
-rw-r--r--lib/swiotlb.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index b5f5d1133042..240a67c2c979 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -274,13 +274,13 @@ cleanup1:
}
static int
-address_needs_mapping(struct device *hwdev, dma_addr_t addr)
+address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
{
dma_addr_t mask = 0xffffffff;
/* If the device has a mask, use it, otherwise default to 32 bits */
if (hwdev && hwdev->dma_mask)
mask = *hwdev->dma_mask;
- return (addr & ~mask) != 0;
+ return !is_buffer_dma_capable(mask, addr, size);
}
static int is_swiotlb_buffer(char *addr)
@@ -473,7 +473,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
int order = get_order(size);
ret = (void *)__get_free_pages(flags, order);
- if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
+ if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) {
/*
* The allocated memory isn't reachable by the device.
* Fall back on swiotlb_map_single().
@@ -497,7 +497,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dev_addr = virt_to_bus(ret);
/* Confirm address can be DMA'd by device */
- if (address_needs_mapping(hwdev, dev_addr)) {
+ if (address_needs_mapping(hwdev, dev_addr, size)) {
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)*hwdev->dma_mask,
(unsigned long long)dev_addr);
@@ -561,7 +561,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
+ if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force)
return dev_addr;
/*
@@ -578,7 +578,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
/*
* Ensure that the address returned is DMA'ble
*/
- if (address_needs_mapping(hwdev, dev_addr))
+ if (address_needs_mapping(hwdev, dev_addr, size))
panic("map_single: bounce buffer is not DMA'ble");
return dev_addr;
@@ -721,7 +721,8 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
for_each_sg(sgl, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
dev_addr = virt_to_bus(addr);
- if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
+ if (swiotlb_force ||
+ address_needs_mapping(hwdev, dev_addr, sg->length)) {
void *map = map_single(hwdev, addr, sg->length, dir);
if (!map) {
/* Don't panic here, we expect map_sg users