summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2017-06-02 15:44:57 +0200
committerJoerg Roedel <jroedel@suse.de>2017-06-08 14:39:16 +0200
commite241f8e76c152e000d481fc8334d41d22c013fe8 (patch)
tree1be05851bc3efe2e7b7171b90b623627fc4c7f78 /drivers/iommu/amd_iommu.c
parentiommu/amd: Make use of the per-domain flush queue (diff)
downloadlinux-e241f8e76c152e000d481fc8334d41d22c013fe8.tar.xz
linux-e241f8e76c152e000d481fc8334d41d22c013fe8.zip
iommu/amd: Add locking to per-domain flush-queue
With locking we can safely access the flush-queues of other cpus. Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r--drivers/iommu/amd_iommu.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 9fafc3026865..9a06acc8cc9d 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -146,6 +146,7 @@ struct flush_queue_entry {
struct flush_queue {
struct flush_queue_entry *entries;
unsigned head, tail;
+ spinlock_t lock;
};
/*
@@ -1801,6 +1802,8 @@ static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom)
dma_ops_domain_free_flush_queue(dom);
return -ENOMEM;
}
+
+ spin_lock_init(&queue->lock);
}
return 0;
@@ -1808,6 +1811,8 @@ static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom)
static inline bool queue_ring_full(struct flush_queue *queue)
{
+ assert_spin_locked(&queue->lock);
+
return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head);
}
@@ -1819,6 +1824,8 @@ static void queue_release(struct dma_ops_domain *dom,
{
unsigned i;
+ assert_spin_locked(&queue->lock);
+
queue_ring_for_each(i, queue)
free_iova_fast(&dom->iovad,
queue->entries[i].iova_pfn,
@@ -1831,6 +1838,7 @@ static inline unsigned queue_ring_add(struct flush_queue *queue)
{
unsigned idx = queue->tail;
+ assert_spin_locked(&queue->lock);
queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE;
return idx;
@@ -1840,12 +1848,14 @@ static void queue_add(struct dma_ops_domain *dom,
unsigned long address, unsigned long pages)
{
struct flush_queue *queue;
+ unsigned long flags;
int idx;
pages = __roundup_pow_of_two(pages);
address >>= PAGE_SHIFT;
queue = get_cpu_ptr(dom->flush_queue);
+ spin_lock_irqsave(&queue->lock, flags);
if (queue_ring_full(queue)) {
domain_flush_tlb(&dom->domain);
@@ -1858,6 +1868,7 @@ static void queue_add(struct dma_ops_domain *dom,
queue->entries[idx].iova_pfn = address;
queue->entries[idx].pages = pages;
+ spin_unlock_irqrestore(&queue->lock, flags);
put_cpu_ptr(dom->flush_queue);
}