diff options
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r-- | drivers/iommu/dma-iommu.c | 171 |
1 files changed, 94 insertions, 77 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 96057bec4164..d85d54f2b549 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -9,9 +9,12 @@ */ #include <linux/acpi_iort.h> +#include <linux/atomic.h> +#include <linux/crash_dump.h> #include <linux/device.h> -#include <linux/dma-map-ops.h> +#include <linux/dma-direct.h> #include <linux/dma-iommu.h> +#include <linux/dma-map-ops.h> #include <linux/gfp.h> #include <linux/huge_mm.h> #include <linux/iommu.h> @@ -20,11 +23,10 @@ #include <linux/mm.h> #include <linux/mutex.h> #include <linux/pci.h> -#include <linux/swiotlb.h> #include <linux/scatterlist.h> +#include <linux/spinlock.h> +#include <linux/swiotlb.h> #include <linux/vmalloc.h> -#include <linux/crash_dump.h> -#include <linux/dma-direct.h> struct iommu_dma_msi_page { struct list_head list; @@ -41,7 +43,19 @@ struct iommu_dma_cookie { enum iommu_dma_cookie_type type; union { /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ - struct iova_domain iovad; + struct { + struct iova_domain iovad; + + struct iova_fq __percpu *fq; /* Flush queue */ + /* Number of TLB flushes that have been started */ + atomic64_t fq_flush_start_cnt; + /* Number of TLB flushes that have been finished */ + atomic64_t fq_flush_finish_cnt; + /* Timer to regularily empty the flush queues */ + struct timer_list fq_timer; + /* 1 when timer is active, 0 when not */ + atomic_t fq_timer_on; + }; /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ dma_addr_t msi_iova; }; @@ -64,6 +78,27 @@ static int __init iommu_dma_forcedac_setup(char *str) } early_param("iommu.forcedac", iommu_dma_forcedac_setup); +/* Number of entries per flush queue */ +#define IOVA_FQ_SIZE 256 + +/* Timeout (in ms) after which entries are flushed from the queue */ +#define IOVA_FQ_TIMEOUT 10 + +/* Flush queue entry for deferred flushing */ +struct iova_fq_entry { + unsigned long iova_pfn; + unsigned long pages; + struct list_head freelist; + u64 counter; /* Flush counter when this entry was added */ +}; + +/* Per-CPU flush queue structure */ +struct iova_fq { + struct iova_fq_entry entries[IOVA_FQ_SIZE]; + unsigned int head, tail; + spinlock_t lock; +}; + #define fq_ring_for_each(i, fq) \ for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE) @@ -73,9 +108,9 @@ static inline bool fq_full(struct iova_fq *fq) return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head); } -static inline unsigned fq_ring_add(struct iova_fq *fq) +static inline unsigned int fq_ring_add(struct iova_fq *fq) { - unsigned idx = fq->tail; + unsigned int idx = fq->tail; assert_spin_locked(&fq->lock); @@ -84,10 +119,10 @@ static inline unsigned fq_ring_add(struct iova_fq *fq) return idx; } -static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq) +static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq) { - u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt); - unsigned idx; + u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt); + unsigned int idx; assert_spin_locked(&fq->lock); @@ -97,7 +132,7 @@ static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq) break; put_pages_list(&fq->entries[idx].freelist); - free_iova_fast(iovad, + free_iova_fast(&cookie->iovad, fq->entries[idx].iova_pfn, fq->entries[idx].pages); @@ -105,50 +140,50 @@ static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq) } } -static void iova_domain_flush(struct iova_domain *iovad) +static void fq_flush_iotlb(struct iommu_dma_cookie *cookie) { - atomic64_inc(&iovad->fq_flush_start_cnt); - iovad->fq_domain->ops->flush_iotlb_all(iovad->fq_domain); - atomic64_inc(&iovad->fq_flush_finish_cnt); + atomic64_inc(&cookie->fq_flush_start_cnt); + cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain); + atomic64_inc(&cookie->fq_flush_finish_cnt); } static void fq_flush_timeout(struct timer_list *t) { - struct iova_domain *iovad = from_timer(iovad, t, fq_timer); + struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer); int cpu; - atomic_set(&iovad->fq_timer_on, 0); - iova_domain_flush(iovad); + atomic_set(&cookie->fq_timer_on, 0); + fq_flush_iotlb(cookie); for_each_possible_cpu(cpu) { unsigned long flags; struct iova_fq *fq; - fq = per_cpu_ptr(iovad->fq, cpu); + fq = per_cpu_ptr(cookie->fq, cpu); spin_lock_irqsave(&fq->lock, flags); - fq_ring_free(iovad, fq); + fq_ring_free(cookie, fq); spin_unlock_irqrestore(&fq->lock, flags); } } -void queue_iova(struct iova_domain *iovad, +static void queue_iova(struct iommu_dma_cookie *cookie, unsigned long pfn, unsigned long pages, struct list_head *freelist) { struct iova_fq *fq; unsigned long flags; - unsigned idx; + unsigned int idx; /* * Order against the IOMMU driver's pagetable update from unmapping - * @pte, to guarantee that iova_domain_flush() observes that if called + * @pte, to guarantee that fq_flush_iotlb() observes that if called * from a different CPU before we release the lock below. Full barrier * so it also pairs with iommu_dma_init_fq() to avoid seeing partially * written fq state here. */ smp_mb(); - fq = raw_cpu_ptr(iovad->fq); + fq = raw_cpu_ptr(cookie->fq); spin_lock_irqsave(&fq->lock, flags); /* @@ -156,65 +191,66 @@ void queue_iova(struct iova_domain *iovad, * flushed out on another CPU. This makes the fq_full() check below less * likely to be true. */ - fq_ring_free(iovad, fq); + fq_ring_free(cookie, fq); if (fq_full(fq)) { - iova_domain_flush(iovad); - fq_ring_free(iovad, fq); + fq_flush_iotlb(cookie); + fq_ring_free(cookie, fq); } idx = fq_ring_add(fq); fq->entries[idx].iova_pfn = pfn; fq->entries[idx].pages = pages; - fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt); + fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt); list_splice(freelist, &fq->entries[idx].freelist); spin_unlock_irqrestore(&fq->lock, flags); /* Avoid false sharing as much as possible. */ - if (!atomic_read(&iovad->fq_timer_on) && - !atomic_xchg(&iovad->fq_timer_on, 1)) - mod_timer(&iovad->fq_timer, + if (!atomic_read(&cookie->fq_timer_on) && + !atomic_xchg(&cookie->fq_timer_on, 1)) + mod_timer(&cookie->fq_timer, jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT)); } -static void free_iova_flush_queue(struct iova_domain *iovad) +static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie) { int cpu, idx; - if (!iovad->fq) + if (!cookie->fq) return; - del_timer_sync(&iovad->fq_timer); - /* - * This code runs when the iova_domain is being detroyed, so don't - * bother to free iovas, just free any remaining pagetable pages. - */ + del_timer_sync(&cookie->fq_timer); + /* The IOVAs will be torn down separately, so just free our queued pages */ for_each_possible_cpu(cpu) { - struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu); + struct iova_fq *fq = per_cpu_ptr(cookie->fq, cpu); fq_ring_for_each(idx, fq) put_pages_list(&fq->entries[idx].freelist); } - free_percpu(iovad->fq); - - iovad->fq = NULL; - iovad->fq_domain = NULL; + free_percpu(cookie->fq); } -int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_domain) +/* sysfs updates are serialised by the mutex of the group owning @domain */ +int iommu_dma_init_fq(struct iommu_domain *domain) { + struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_fq __percpu *queue; int i, cpu; - atomic64_set(&iovad->fq_flush_start_cnt, 0); - atomic64_set(&iovad->fq_flush_finish_cnt, 0); + if (cookie->fq_domain) + return 0; + + atomic64_set(&cookie->fq_flush_start_cnt, 0); + atomic64_set(&cookie->fq_flush_finish_cnt, 0); queue = alloc_percpu(struct iova_fq); - if (!queue) + if (!queue) { + pr_warn("iova flush queue initialization failed\n"); return -ENOMEM; + } for_each_possible_cpu(cpu) { struct iova_fq *fq = per_cpu_ptr(queue, cpu); @@ -228,12 +264,16 @@ int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_dom INIT_LIST_HEAD(&fq->entries[i].freelist); } - iovad->fq_domain = fq_domain; - iovad->fq = queue; - - timer_setup(&iovad->fq_timer, fq_flush_timeout, 0); - atomic_set(&iovad->fq_timer_on, 0); + cookie->fq = queue; + timer_setup(&cookie->fq_timer, fq_flush_timeout, 0); + atomic_set(&cookie->fq_timer_on, 0); + /* + * Prevent incomplete fq state being observable. Pairs with path from + * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova() + */ + smp_wmb(); + WRITE_ONCE(cookie->fq_domain, domain); return 0; } @@ -318,7 +358,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) return; if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) { - free_iova_flush_queue(&cookie->iovad); + iommu_dma_free_fq(cookie); put_iova_domain(&cookie->iovad); } @@ -467,29 +507,6 @@ static bool dev_use_swiotlb(struct device *dev) return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev); } -/* sysfs updates are serialised by the mutex of the group owning @domain */ -int iommu_dma_init_fq(struct iommu_domain *domain) -{ - struct iommu_dma_cookie *cookie = domain->iova_cookie; - int ret; - - if (cookie->fq_domain) - return 0; - - ret = init_iova_flush_queue(&cookie->iovad, domain); - if (ret) { - pr_warn("iova flush queue initialization failed\n"); - return ret; - } - /* - * Prevent incomplete iovad->fq being observable. Pairs with path from - * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova() - */ - smp_wmb(); - WRITE_ONCE(cookie->fq_domain, domain); - return 0; -} - /** * iommu_dma_init_domain - Initialise a DMA mapping domain * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() @@ -620,7 +637,7 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, if (cookie->type == IOMMU_DMA_MSI_COOKIE) cookie->msi_iova -= size; else if (gather && gather->queued) - queue_iova(iovad, iova_pfn(iovad, iova), + queue_iova(cookie, iova_pfn(iovad, iova), size >> iova_shift(iovad), &gather->freelist); else |