summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/iova.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-12-17 16:31:00 +0100
committerJoerg Roedel <jroedel@suse.de>2021-12-20 09:03:05 +0100
commit87f60cc65d24939353b40aa1d9297fea080cdf8d (patch)
tree0c538476e267e82e5b93d8f07f7b931a8d198fdf /drivers/iommu/iova.c
parentiommu/amd: Use put_pages_list (diff)
downloadlinux-87f60cc65d24939353b40aa1d9297fea080cdf8d.tar.xz
linux-87f60cc65d24939353b40aa1d9297fea080cdf8d.zip
iommu/vt-d: Use put_pages_list
page->freelist is for the use of slab. We already have the ability to free a list of pages in the core mm, but it requires the use of a list_head and for the pages to be chained together through page->lru. Switch the Intel IOMMU and IOVA code over to using free_pages_list(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> [rm: split from original patch, cosmetic tweaks, fix fq entries] Signed-off-by: Robin Murphy <robin.murphy@arm.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/2115b560d9a0ce7cd4b948bd51a2b7bde8fdfd59.1639753638.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/iova.c')
-rw-r--r--drivers/iommu/iova.c26
1 files changed, 9 insertions, 17 deletions
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index bbf642940988..962614c7afbb 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -96,7 +96,7 @@ static void free_iova_flush_queue(struct iova_domain *iovad)
int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_domain)
{
struct iova_fq __percpu *queue;
- int cpu;
+ int i, cpu;
atomic64_set(&iovad->fq_flush_start_cnt, 0);
atomic64_set(&iovad->fq_flush_finish_cnt, 0);
@@ -113,6 +113,9 @@ int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_dom
fq->tail = 0;
spin_lock_init(&fq->lock);
+
+ for (i = 0; i < IOVA_FQ_SIZE; i++)
+ INIT_LIST_HEAD(&fq->entries[i].freelist);
}
iovad->fq_domain = fq_domain;
@@ -543,16 +546,6 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
}
EXPORT_SYMBOL_GPL(free_iova_fast);
-static void fq_entry_dtor(struct page *freelist)
-{
- while (freelist) {
- unsigned long p = (unsigned long)page_address(freelist);
-
- freelist = freelist->freelist;
- free_page(p);
- }
-}
-
#define fq_ring_for_each(i, fq) \
for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
@@ -585,7 +578,7 @@ static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
if (fq->entries[idx].counter >= counter)
break;
- fq_entry_dtor(fq->entries[idx].freelist);
+ put_pages_list(&fq->entries[idx].freelist);
free_iova_fast(iovad,
fq->entries[idx].iova_pfn,
fq->entries[idx].pages);
@@ -607,15 +600,14 @@ static void fq_destroy_all_entries(struct iova_domain *iovad)
/*
* This code runs when the iova_domain is being detroyed, so don't
- * bother to free iovas, just call the entry_dtor on all remaining
- * entries.
+ * bother to free iovas, just free any remaining pagetable pages.
*/
for_each_possible_cpu(cpu) {
struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
int idx;
fq_ring_for_each(idx, fq)
- fq_entry_dtor(fq->entries[idx].freelist);
+ put_pages_list(&fq->entries[idx].freelist);
}
}
@@ -640,7 +632,7 @@ static void fq_flush_timeout(struct timer_list *t)
void queue_iova(struct iova_domain *iovad,
unsigned long pfn, unsigned long pages,
- struct page *freelist)
+ struct list_head *freelist)
{
struct iova_fq *fq;
unsigned long flags;
@@ -674,8 +666,8 @@ void queue_iova(struct iova_domain *iovad,
fq->entries[idx].iova_pfn = pfn;
fq->entries[idx].pages = pages;
- fq->entries[idx].freelist = freelist;
fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
+ list_splice(freelist, &fq->entries[idx].freelist);
spin_unlock_irqrestore(&fq->lock, flags);