summaryrefslogtreecommitdiffstats
path: root/arch/parisc
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-09-06 21:48:53 +0200
committerAndrew Morton <akpm@linux-foundation.org>2022-09-27 04:46:19 +0200
commit70fa203165d96ae03abb83cf60d30c44e6b81a12 (patch)
tree44928d0ce7b0f9b394063e06068b6d57f7ec522d /arch/parisc
parentarm64: Change elfcore for_each_mte_vma() to use VMA iterator (diff)
downloadlinux-70fa203165d96ae03abb83cf60d30c44e6b81a12.tar.xz
linux-70fa203165d96ae03abb83cf60d30c44e6b81a12.zip
parisc: remove mmap linked list from cache handling
Use the VMA iterator instead. Link: https://lkml.kernel.org/r/20220906194824.2110408-33-Liam.Howlett@oracle.com Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> Tested-by: Yu Zhao <yuzhao@google.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Howells <dhowells@redhat.com> Cc: SeongJae Park <sj@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/kernel/cache.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 3feb7694e0ca..1d3b8bc8a623 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -657,15 +657,20 @@ static inline unsigned long mm_total_size(struct mm_struct *mm)
{
struct vm_area_struct *vma;
unsigned long usize = 0;
+ VMA_ITERATOR(vmi, mm, 0);
- for (vma = mm->mmap; vma && usize < parisc_cache_flush_threshold; vma = vma->vm_next)
+ for_each_vma(vmi, vma) {
+ if (usize >= parisc_cache_flush_threshold)
+ break;
usize += vma->vm_end - vma->vm_start;
+ }
return usize;
}
void flush_cache_mm(struct mm_struct *mm)
{
struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
/*
* Flushing the whole cache on each cpu takes forever on
@@ -685,7 +690,7 @@ void flush_cache_mm(struct mm_struct *mm)
}
/* Flush mm */
- for (vma = mm->mmap; vma; vma = vma->vm_next)
+ for_each_vma(vmi, vma)
flush_cache_pages(vma, vma->vm_start, vma->vm_end);
}