summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-11-28 20:16:43 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2019-11-28 20:16:43 +0100
commit81b6b96475ac7a4ebfceae9f16fb3758327adbfe (patch)
tree8288f9aacb9ee6dbcba8ee146c168a87bdc445a7 /drivers
parentMerge tag 'ioremap-5.5' of git://git.infradead.org/users/hch/ioremap (diff)
parentdma-mapping: treat dev->bus_dma_mask as a DMA limit (diff)
downloadlinux-81b6b96475ac7a4ebfceae9f16fb3758327adbfe.tar.xz
linux-81b6b96475ac7a4ebfceae9f16fb3758327adbfe.zip
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux; tag 'dma-mapping-5.5' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - improve dma-debug scalability (Eric Dumazet) - tiny dma-debug cleanup (Dan Carpenter) - check for vmap memory in dma_map_single (Kees Cook) - check for dma_addr_t overflows in dma-direct when using DMA offsets (Nicolas Saenz Julienne) - switch the x86 sta2x11 SOC to use more generic DMA code (Nicolas Saenz Julienne) - fix arm-nommu dma-ranges handling (Vladimir Murzin) - use __initdata in CMA (Shyam Saini) - replace the bus dma mask with a limit (Nicolas Saenz Julienne) - merge the remapping helpers into the main dma-direct flow (me) - switch xtensa to the generic dma remap handling (me) - various cleanups around dma_capable (me) - remove unused dev arguments to various dma-noncoherent helpers (me) * 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux: * tag 'dma-mapping-5.5' of git://git.infradead.org/users/hch/dma-mapping: (22 commits) dma-mapping: treat dev->bus_dma_mask as a DMA limit dma-direct: exclude dma_direct_map_resource from the min_low_pfn check dma-direct: don't check swiotlb=force in dma_direct_map_resource dma-debug: clean up put_hash_bucket() powerpc: remove support for NULL dev in __phys_to_dma / __dma_to_phys dma-direct: avoid a forward declaration for phys_to_dma dma-direct: unify the dma_capable definitions dma-mapping: drop the dev argument to arch_sync_dma_for_* x86/PCI: sta2x11: use default DMA address translation dma-direct: check for overflows on 32 bit DMA addresses dma-debug: increase HASH_SIZE dma-debug: reorder struct dma_debug_entry fields xtensa: use the generic uncached segment support dma-mapping: merge the generic remapping helpers into dma-direct dma-direct: provide mmap and get_sgtable method overrides dma-direct: remove the dma_handle argument to __dma_direct_alloc_pages dma-direct: remove __dma_direct_free_pages usb: core: Remove redundant vmap checks kernel: dma-contiguous: mark CMA parameters __initdata/__initconst dma-debug: add a schedule point in debug_dma_dump_mappings() ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/arm64/iort.c20
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/iommu/dma-iommu.c13
-rw-r--r--drivers/of/device.c9
-rw-r--r--drivers/usb/core/hcd.c8
-rw-r--r--drivers/xen/swiotlb-xen.c12
6 files changed, 26 insertions, 38 deletions
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 5a7551d060f2..33f71983e001 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1057,8 +1057,8 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
*/
void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
{
- u64 mask, dmaaddr = 0, size = 0, offset = 0;
- int ret, msb;
+ u64 end, mask, dmaaddr = 0, size = 0, offset = 0;
+ int ret;
/*
* If @dev is expected to be DMA-capable then the bus code that created
@@ -1085,19 +1085,13 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
}
if (!ret) {
- msb = fls64(dmaaddr + size - 1);
/*
- * Round-up to the power-of-two mask or set
- * the mask to the whole 64-bit address space
- * in case the DMA region covers the full
- * memory window.
+ * Limit coherent and dma mask based on size retrieved from
+ * firmware.
*/
- mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1;
- /*
- * Limit coherent and dma mask based on size
- * retrieved from firmware.
- */
- dev->bus_dma_mask = mask;
+ end = dmaaddr + size - 1;
+ mask = DMA_BIT_MASK(ilog2(end) + 1);
+ dev->bus_dma_limit = end;
dev->coherent_dma_mask = mask;
*dev->dma_mask = mask;
}
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index ec6c64fce74a..4bfd1b14b390 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -910,7 +910,7 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
* value, don't extend it here. This happens on STA2X11, for example.
*
* XXX: manipulating the DMA mask from platform code is completely
- * bogus, platform code should use dev->bus_dma_mask instead..
+ * bogus, platform code should use dev->bus_dma_limit instead..
*/
if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32))
return 0;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f321279baf9e..646332fbf3d7 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -405,8 +405,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
iova_len = roundup_pow_of_two(iova_len);
- if (dev->bus_dma_mask)
- dma_limit &= dev->bus_dma_mask;
+ dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
if (domain->geometry.force_aperture)
dma_limit = min(dma_limit, domain->geometry.aperture_end);
@@ -659,7 +658,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
- arch_sync_dma_for_cpu(dev, phys, size, dir);
+ arch_sync_dma_for_cpu(phys, size, dir);
}
static void iommu_dma_sync_single_for_device(struct device *dev,
@@ -671,7 +670,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
- arch_sync_dma_for_device(dev, phys, size, dir);
+ arch_sync_dma_for_device(phys, size, dir);
}
static void iommu_dma_sync_sg_for_cpu(struct device *dev,
@@ -685,7 +684,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
return;
for_each_sg(sgl, sg, nelems, i)
- arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+ arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
}
static void iommu_dma_sync_sg_for_device(struct device *dev,
@@ -699,7 +698,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
return;
for_each_sg(sgl, sg, nelems, i)
- arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+ arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
@@ -714,7 +713,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
dma_handle =__iommu_dma_map(dev, phys, size, prot);
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
dma_handle != DMA_MAPPING_ERROR)
- arch_sync_dma_for_device(dev, phys, size, dir);
+ arch_sync_dma_for_device(phys, size, dir);
return dma_handle;
}
diff --git a/drivers/of/device.c b/drivers/of/device.c
index da8158392010..e9127db7b067 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -93,7 +93,7 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
bool coherent;
unsigned long offset;
const struct iommu_ops *iommu;
- u64 mask;
+ u64 mask, end;
ret = of_dma_get_range(np, &dma_addr, &paddr, &size);
if (ret < 0) {
@@ -148,12 +148,13 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
* Limit coherent and dma mask based on size and default mask
* set by the driver.
*/
- mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
+ end = dma_addr + size - 1;
+ mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
- /* ...but only set bus mask if we found valid dma-ranges earlier */
+ /* ...but only set bus limit if we found valid dma-ranges earlier */
if (!ret)
- dev->bus_dma_mask = mask;
+ dev->bus_dma_limit = end;
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index f225eaa98ff8..281568d464f9 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1410,10 +1410,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
if (hcd->self.uses_pio_for_control)
return ret;
if (hcd_uses_dma(hcd)) {
- if (is_vmalloc_addr(urb->setup_packet)) {
- WARN_ONCE(1, "setup packet is not dma capable\n");
- return -EAGAIN;
- } else if (object_is_on_stack(urb->setup_packet)) {
+ if (object_is_on_stack(urb->setup_packet)) {
WARN_ONCE(1, "setup packet is on stack\n");
return -EAGAIN;
}
@@ -1479,9 +1476,6 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
ret = -EAGAIN;
else
urb->transfer_flags |= URB_DMA_MAP_PAGE;
- } else if (is_vmalloc_addr(urb->transfer_buffer)) {
- WARN_ONCE(1, "transfer buffer not dma capable\n");
- ret = -EAGAIN;
} else if (object_is_on_stack(urb->transfer_buffer)) {
WARN_ONCE(1, "transfer buffer is on stack\n");
ret = -EAGAIN;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index bd3a10dfac15..b6d27762c6f8 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -375,7 +375,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (dma_capable(dev, dev_addr, size) &&
+ if (dma_capable(dev, dev_addr, size, true) &&
!range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
swiotlb_force != SWIOTLB_FORCE)
@@ -397,7 +397,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
/*
* Ensure that the address returned is DMA'ble
*/
- if (unlikely(!dma_capable(dev, dev_addr, size))) {
+ if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
swiotlb_tbl_unmap_single(dev, map, size, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
return DMA_MAPPING_ERROR;
@@ -405,7 +405,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
done:
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- xen_dma_sync_for_device(dev, dev_addr, phys, size, dir);
+ xen_dma_sync_for_device(dev_addr, phys, size, dir);
return dev_addr;
}
@@ -425,7 +425,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE);
if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir);
+ xen_dma_sync_for_cpu(dev_addr, paddr, size, dir);
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr))
@@ -439,7 +439,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
phys_addr_t paddr = xen_bus_to_phys(dma_addr);
if (!dev_is_dma_coherent(dev))
- xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir);
+ xen_dma_sync_for_cpu(dma_addr, paddr, size, dir);
if (is_xen_swiotlb_buffer(dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
@@ -455,7 +455,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
if (!dev_is_dma_coherent(dev))
- xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir);
+ xen_dma_sync_for_device(dma_addr, paddr, size, dir);
}
/*