summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2017-06-02 15:37:26 +0200
committerJoerg Roedel <jroedel@suse.de>2017-06-08 14:39:10 +0200
commitfd62190a67d6bdf9b93dea056adfcd7fd29b0f92 (patch)
treeff153aaa532e8ba75087e85e503895536a67bef6 /drivers/iommu/amd_iommu.c
parentiommu/amd: Add per-domain flush-queue data structures (diff)
downloadlinux-fd62190a67d6bdf9b93dea056adfcd7fd29b0f92.tar.xz
linux-fd62190a67d6bdf9b93dea056adfcd7fd29b0f92.zip
iommu/amd: Make use of the per-domain flush queue
Fill the flush-queue on unmap and only flush the IOMMU and device TLBs when a per-cpu queue gets full. Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r--drivers/iommu/amd_iommu.c60
1 files changed, 56 insertions, 4 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 2418fcc28fbe..9fafc3026865 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1806,6 +1806,61 @@ static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom)
return 0;
}
+static inline bool queue_ring_full(struct flush_queue *queue)
+{
+ return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head);
+}
+
+#define queue_ring_for_each(i, q) \
+ for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE)
+
+static void queue_release(struct dma_ops_domain *dom,
+ struct flush_queue *queue)
+{
+ unsigned i;
+
+ queue_ring_for_each(i, queue)
+ free_iova_fast(&dom->iovad,
+ queue->entries[i].iova_pfn,
+ queue->entries[i].pages);
+
+ queue->head = queue->tail = 0;
+}
+
+static inline unsigned queue_ring_add(struct flush_queue *queue)
+{
+ unsigned idx = queue->tail;
+
+ queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE;
+
+ return idx;
+}
+
+static void queue_add(struct dma_ops_domain *dom,
+ unsigned long address, unsigned long pages)
+{
+ struct flush_queue *queue;
+ int idx;
+
+ pages = __roundup_pow_of_two(pages);
+ address >>= PAGE_SHIFT;
+
+ queue = get_cpu_ptr(dom->flush_queue);
+
+ if (queue_ring_full(queue)) {
+ domain_flush_tlb(&dom->domain);
+ domain_flush_complete(&dom->domain);
+ queue_release(dom, queue);
+ }
+
+ idx = queue_ring_add(queue);
+
+ queue->entries[idx].iova_pfn = address;
+ queue->entries[idx].pages = pages;
+
+ put_cpu_ptr(dom->flush_queue);
+}
+
/*
* Free a domain, only used if something went wrong in the
* allocation path and we need to free an already allocated page table
@@ -2454,10 +2509,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
domain_flush_tlb(&dma_dom->domain);
domain_flush_complete(&dma_dom->domain);
} else {
- /* Keep the if() around, we need it later again */
- dma_ops_free_iova(dma_dom, dma_addr, pages);
- domain_flush_tlb(&dma_dom->domain);
- domain_flush_complete(&dma_dom->domain);
+ queue_add(dma_dom, dma_addr, pages);
}
}