summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2013-07-09 13:14:49 +0200
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-10-31 15:49:21 +0100
commit4dcfa60071b3d23f0181f27d8519f12e37cefbb9 (patch)
treea3c568b34226152a8f825c7ce1cff8a8bf1a20c2
parentARM: 7857/1: dma: imx-sdma: setup dma mask (diff)
downloadlinux-4dcfa60071b3d23f0181f27d8519f12e37cefbb9.tar.xz
linux-4dcfa60071b3d23f0181f27d8519f12e37cefbb9.zip
ARM: DMA-API: better handing of DMA masks for coherent allocations
We need to start treating DMA masks as something which is specific to the bus that the device resides on, otherwise we're going to hit all sorts of nasty issues with LPAE and 32-bit DMA controllers in >32-bit systems, where memory is offset from PFN 0. In order to start doing this, we convert the DMA mask to a PFN using the device specific dma_to_pfn() macro. This is the reverse of the pfn_to_dma() macro which is used to get the DMA address for the device. This gives us a PFN mask, which we can then check against the PFN limit of the DMA zone. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/mm/dma-mapping.c51
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/mm.h2
3 files changed, 49 insertions, 6 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f5e1a8471714..7b3f2426a1fb 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -159,7 +159,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops);
static u64 get_coherent_dma_mask(struct device *dev)
{
- u64 mask = (u64)arm_dma_limit;
+ u64 mask = (u64)DMA_BIT_MASK(32);
if (dev) {
mask = dev->coherent_dma_mask;
@@ -173,10 +173,30 @@ static u64 get_coherent_dma_mask(struct device *dev)
return 0;
}
- if ((~mask) & (u64)arm_dma_limit) {
- dev_warn(dev, "coherent DMA mask %#llx is smaller "
- "than system GFP_DMA mask %#llx\n",
- mask, (u64)arm_dma_limit);
+ /*
+ * If the mask allows for more memory than we can address,
+ * and we actually have that much memory, then fail the
+ * allocation.
+ */
+ if (sizeof(mask) != sizeof(dma_addr_t) &&
+ mask > (dma_addr_t)~0 &&
+ dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) {
+ dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
+ mask);
+ dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
+ return 0;
+ }
+
+ /*
+ * Now check that the mask, when translated to a PFN,
+ * fits within the allowable addresses which we can
+ * allocate.
+ */
+ if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) {
+ dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
+ mask,
+ dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
+ arm_dma_pfn_limit + 1);
return 0;
}
}
@@ -1007,8 +1027,27 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
*/
int dma_supported(struct device *dev, u64 mask)
{
- if (mask < (u64)arm_dma_limit)
+ unsigned long limit;
+
+ /*
+ * If the mask allows for more memory than we can address,
+ * and we actually have that much memory, then we must
+ * indicate that DMA to this device is not supported.
+ */
+ if (sizeof(mask) != sizeof(dma_addr_t) &&
+ mask > (dma_addr_t)~0 &&
+ dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
+ return 0;
+
+ /*
+ * Translate the device's DMA mask to a PFN limit. This
+ * PFN number includes the page which we can DMA to.
+ */
+ limit = dma_to_pfn(dev, mask);
+
+ if (limit < arm_dma_pfn_limit)
return 0;
+
return 1;
}
EXPORT_SYMBOL(dma_supported);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index febaee7ca57b..8aab24f35a5e 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -218,6 +218,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
* so a successful GFP_DMA allocation will always satisfy this.
*/
phys_addr_t arm_dma_limit;
+unsigned long arm_dma_pfn_limit;
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
unsigned long dma_size)
@@ -240,6 +241,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
} else
arm_dma_limit = 0xffffffff;
+ arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
#endif
}
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a4e9ad8f0f..d5a982d15a88 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -81,8 +81,10 @@ extern __init void add_static_vm_early(struct static_vm *svm);
#ifdef CONFIG_ZONE_DMA
extern phys_addr_t arm_dma_limit;
+extern unsigned long arm_dma_pfn_limit;
#else
#define arm_dma_limit ((phys_addr_t)~0)
+#define arm_dma_pfn_limit (~0ul >> PAGE_SHIFT)
#endif
extern phys_addr_t arm_lowmem_limit;