summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/io-pgtable-arm.c
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2018-05-22 13:50:09 +0200
committerJoerg Roedel <jroedel@suse.de>2018-05-29 16:57:58 +0200
commit4b123757eeaab1d522605b4469ee1adc18a80c90 (patch)
tree0307f0cad26e227a1881598a6e633bfeefd84cc4 /drivers/iommu/io-pgtable-arm.c
parentiommu/io-pgtable-arm: Use for_each_set_bit to simplify code (diff)
downloadlinux-4b123757eeaab1d522605b4469ee1adc18a80c90.tar.xz
linux-4b123757eeaab1d522605b4469ee1adc18a80c90.zip
iommu/io-pgtable-arm: Make allocations NUMA-aware
We would generally expect pagetables to be read by the IOMMU more than written by the CPU, so in NUMA systems it makes sense to locate them close to the former and avoid cross-node pagetable walks if at all possible. As it turns out, we already have a handle on the IOMMU device for the sake of coherency management, so it's trivial to grab the appropriate NUMA node when allocating new pagetable pages. Note that we drop the semantics of alloc_pages_exact(), but that's fine since they have never been necessary: the only time we're allocating more than one page is for stage 2 top-level concatenation, but since that is based on the number of IPA bits, the size is always some exact power of two anyway. Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/io-pgtable-arm.c')
-rw-r--r--drivers/iommu/io-pgtable-arm.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 4ffdd88b1566..010a254305dd 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -231,12 +231,17 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
struct io_pgtable_cfg *cfg)
{
struct device *dev = cfg->iommu_dev;
+ int order = get_order(size);
+ struct page *p;
dma_addr_t dma;
- void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
+ void *pages;
- if (!pages)
+ VM_BUG_ON((gfp & __GFP_HIGHMEM));
+ p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
+ if (!p)
return NULL;
+ pages = page_address(p);
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
@@ -256,7 +261,7 @@ out_unmap:
dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
out_free:
- free_pages_exact(pages, size);
+ __free_pages(p, order);
return NULL;
}
@@ -266,7 +271,7 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
size, DMA_TO_DEVICE);
- free_pages_exact(pages, size);
+ free_pages((unsigned long)pages, get_order(size));
}
static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,