summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/iova.c
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2017-08-10 16:31:17 +0200
committerJoerg Roedel <jroedel@suse.de>2017-08-15 18:23:52 +0200
commit8109c2a2f8463852dddd6a1c3fcf262047c0c124 (patch)
treef7f822e45c2a0849ce240c486c58bfb0fe0f6ced /drivers/iommu/iova.c
parentiommu/iova: Add flush counters to Flush-Queue implementation (diff)
downloadlinux-8109c2a2f8463852dddd6a1c3fcf262047c0c124.tar.xz
linux-8109c2a2f8463852dddd6a1c3fcf262047c0c124.zip
iommu/iova: Add locking to Flush-Queues
The lock is taken from the same CPU most of the time. But having it allows to flush the queue also from another CPU if necessary. This will be used by a timer to regularily flush any pending IOVAs from the Flush-Queues. Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/iova.c')
-rw-r--r--drivers/iommu/iova.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 47b144e417ad..749d39533e0b 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -91,6 +91,8 @@ int init_iova_flush_queue(struct iova_domain *iovad,
fq = per_cpu_ptr(iovad->fq, cpu);
fq->head = 0;
fq->tail = 0;
+
+ spin_lock_init(&fq->lock);
}
return 0;
@@ -471,6 +473,7 @@ EXPORT_SYMBOL_GPL(free_iova_fast);
static inline bool fq_full(struct iova_fq *fq)
{
+ assert_spin_locked(&fq->lock);
return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
}
@@ -478,6 +481,8 @@ static inline unsigned fq_ring_add(struct iova_fq *fq)
{
unsigned idx = fq->tail;
+ assert_spin_locked(&fq->lock);
+
fq->tail = (idx + 1) % IOVA_FQ_SIZE;
return idx;
@@ -488,6 +493,8 @@ static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
unsigned idx;
+ assert_spin_locked(&fq->lock);
+
fq_ring_for_each(idx, fq) {
if (fq->entries[idx].counter >= counter)
@@ -537,8 +544,11 @@ void queue_iova(struct iova_domain *iovad,
unsigned long data)
{
struct iova_fq *fq = get_cpu_ptr(iovad->fq);
+ unsigned long flags;
unsigned idx;
+ spin_lock_irqsave(&fq->lock, flags);
+
/*
* First remove all entries from the flush queue that have already been
* flushed out on another CPU. This makes the fq_full() check below less
@@ -558,6 +568,7 @@ void queue_iova(struct iova_domain *iovad,
fq->entries[idx].data = data;
fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
+ spin_unlock_irqrestore(&fq->lock, flags);
put_cpu_ptr(iovad->fq);
}
EXPORT_SYMBOL_GPL(queue_iova);