diff options
author | Robin Murphy <robin.murphy@arm.com> | 2021-12-17 16:30:56 +0100 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2021-12-20 09:03:05 +0100 |
commit | d5c383f2c98ac58c210b266cdaf7b86bc32d1ad1 (patch) | |
tree | ab362c5ed43a1248ccbaa13a465ba74327400f24 /drivers/iommu | |
parent | iommu/iova: Fix race between FQ timeout and teardown (diff) | |
download | linux-d5c383f2c98ac58c210b266cdaf7b86bc32d1ad1.tar.xz linux-d5c383f2c98ac58c210b266cdaf7b86bc32d1ad1.zip |
iommu/iova: Squash entry_dtor abstraction
All flush queues are driven by iommu-dma now, so there is no need to
abstract entry_dtor or its data any more. Squash the now-canonical
implementation directly into the IOVA code to get it out of the way.
Reviewed-by: John Garry <john.garry@huawei.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Link: https://lore.kernel.org/r/2260f8de00ab5e0f9d2a1cf8978e6ae7cd4f182c.1639753638.git.robin.murphy@arm.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/dma-iommu.c | 17 | ||||
-rw-r--r-- | drivers/iommu/iova.c | 28 |
2 files changed, 17 insertions, 28 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 84dee53fe892..6691f3cd768f 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -64,18 +64,6 @@ static int __init iommu_dma_forcedac_setup(char *str) } early_param("iommu.forcedac", iommu_dma_forcedac_setup); -static void iommu_dma_entry_dtor(unsigned long data) -{ - struct page *freelist = (struct page *)data; - - while (freelist) { - unsigned long p = (unsigned long)page_address(freelist); - - freelist = freelist->freelist; - free_page(p); - } -} - static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) { if (cookie->type == IOMMU_DMA_IOVA_COOKIE) @@ -324,8 +312,7 @@ int iommu_dma_init_fq(struct iommu_domain *domain) if (cookie->fq_domain) return 0; - ret = init_iova_flush_queue(&cookie->iovad, iommu_dma_flush_iotlb_all, - iommu_dma_entry_dtor); + ret = init_iova_flush_queue(&cookie->iovad, iommu_dma_flush_iotlb_all); if (ret) { pr_warn("iova flush queue initialization failed\n"); return ret; @@ -471,7 +458,7 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, else if (gather && gather->queued) queue_iova(iovad, iova_pfn(iovad, iova), size >> iova_shift(iovad), - (unsigned long)gather->freelist); + gather->freelist); else free_iova_fast(iovad, iova_pfn(iovad, iova), size >> iova_shift(iovad)); diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 670211e41771..541857ca4fd5 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -91,11 +91,9 @@ static void free_iova_flush_queue(struct iova_domain *iovad) iovad->fq = NULL; iovad->flush_cb = NULL; - iovad->entry_dtor = NULL; } -int init_iova_flush_queue(struct iova_domain *iovad, - iova_flush_cb flush_cb, iova_entry_dtor entry_dtor) +int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb) { struct iova_fq __percpu *queue; int cpu; @@ -108,7 +106,6 @@ int init_iova_flush_queue(struct iova_domain *iovad, return -ENOMEM; iovad->flush_cb = flush_cb; - iovad->entry_dtor = entry_dtor; for_each_possible_cpu(cpu) { struct iova_fq *fq; @@ -547,6 +544,16 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) } EXPORT_SYMBOL_GPL(free_iova_fast); +static void fq_entry_dtor(struct page *freelist) +{ + while (freelist) { + unsigned long p = (unsigned long)page_address(freelist); + + freelist = freelist->freelist; + free_page(p); + } +} + #define fq_ring_for_each(i, fq) \ for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE) @@ -579,9 +586,7 @@ static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq) if (fq->entries[idx].counter >= counter) break; - if (iovad->entry_dtor) - iovad->entry_dtor(fq->entries[idx].data); - + fq_entry_dtor(fq->entries[idx].freelist); free_iova_fast(iovad, fq->entries[idx].iova_pfn, fq->entries[idx].pages); @@ -606,15 +611,12 @@ static void fq_destroy_all_entries(struct iova_domain *iovad) * bother to free iovas, just call the entry_dtor on all remaining * entries. */ - if (!iovad->entry_dtor) - return; - for_each_possible_cpu(cpu) { struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu); int idx; fq_ring_for_each(idx, fq) - iovad->entry_dtor(fq->entries[idx].data); + fq_entry_dtor(fq->entries[idx].freelist); } } @@ -639,7 +641,7 @@ static void fq_flush_timeout(struct timer_list *t) void queue_iova(struct iova_domain *iovad, unsigned long pfn, unsigned long pages, - unsigned long data) + struct page *freelist) { struct iova_fq *fq; unsigned long flags; @@ -673,7 +675,7 @@ void queue_iova(struct iova_domain *iovad, fq->entries[idx].iova_pfn = pfn; fq->entries[idx].pages = pages; - fq->entries[idx].data = data; + fq->entries[idx].freelist = freelist; fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt); spin_unlock_irqrestore(&fq->lock, flags); |