summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-08-02 17:14:06 +0200
committerAndrew Morton <akpm@linux-foundation.org>2023-08-25 01:20:27 +0200
commit5003a2bdf6880dc9c301f555bece1154081158fe (patch)
treeb77f937af523084b475ce91a0a4ce94acb5542f7 /mm
parentfilemap: batch PTE mappings (diff)
downloadlinux-5003a2bdf6880dc9c301f555bece1154081158fe.tar.xz
linux-5003a2bdf6880dc9c301f555bece1154081158fe.zip
mm: call update_mmu_cache_range() in more page fault handling paths
Pass the vm_fault to the architecture to help it make smarter decisions about which PTEs to insert into the TLB. Link: https://lkml.kernel.org/r/20230802151406.3735276-39-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 12b385eaf353..9d7fb721a680 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2862,7 +2862,7 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src,
entry = pte_mkyoung(vmf->orig_pte);
if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
- update_mmu_cache(vma, addr, vmf->pte);
+ update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1);
}
/*
@@ -3039,7 +3039,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
entry = pte_mkyoung(vmf->orig_pte);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
- update_mmu_cache(vma, vmf->address, vmf->pte);
+ update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
pte_unmap_unlock(vmf->pte, vmf->ptl);
count_vm_event(PGREUSE);
}
@@ -3163,7 +3163,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
*/
BUG_ON(unshare && pte_write(entry));
set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
- update_mmu_cache(vma, vmf->address, vmf->pte);
+ update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
if (old_folio) {
/*
* Only after switching the pte to the new page may
@@ -4046,7 +4046,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
}
/* No need to invalidate - it was non-present before */
- update_mmu_cache(vma, vmf->address, vmf->pte);
+ update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
unlock:
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -4170,7 +4170,7 @@ setpte:
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
/* No need to invalidate - it was non-present before */
- update_mmu_cache(vma, vmf->address, vmf->pte);
+ update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
unlock:
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -4859,7 +4859,7 @@ out_map:
if (writable)
pte = pte_mkwrite(pte);
ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
- update_mmu_cache(vma, vmf->address, vmf->pte);
+ update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
}
@@ -5030,7 +5030,8 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
entry = pte_mkyoung(entry);
if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
vmf->flags & FAULT_FLAG_WRITE)) {
- update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
+ update_mmu_cache_range(vmf, vmf->vma, vmf->address,
+ vmf->pte, 1);
} else {
/* Skip spurious TLB flush for retried page fault */
if (vmf->flags & FAULT_FLAG_TRIED)