summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-07-24 20:54:03 +0200
committerAndrew Morton <akpm@linux-foundation.org>2023-08-18 19:12:51 +0200
commit4ec31152a80d83d74d231d964703a721236244ef (patch)
treec038eef92d6b55069f2a1bd34c8129fae68278e4 /mm/memory.c
parentmm: allow per-VMA locks on file-backed VMAs (diff)
downloadlinux-4ec31152a80d83d74d231d964703a721236244ef.tar.xz
linux-4ec31152a80d83d74d231d964703a721236244ef.zip
mm: move FAULT_FLAG_VMA_LOCK check from handle_mm_fault()
Handle a little more of the page fault path outside the mmap sem. The hugetlb path doesn't need to check whether the VMA is anonymous; the VM_HUGETLB flag is only set on hugetlbfs VMAs. There should be no performance change from the previous commit; this is simply a step to ease bisection of any problems. Link: https://lkml.kernel.org/r/20230724185410.1124082-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Suren Baghdasaryan <surenb@google.com> Cc: Arjun Roy <arjunroy@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Punit Agrawal <punit.agrawal@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/mm/memory.c b/mm/memory.c
index b2b17c66f87a..2a5f4883d9a5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4984,10 +4984,10 @@ unlock:
}
/*
- * By the time we get here, we already hold the mm semaphore
- *
- * The mmap_lock may have been released depending on flags and our
- * return value. See filemap_fault() and __folio_lock_or_retry().
+ * On entry, we hold either the VMA lock or the mmap_lock
+ * (FAULT_FLAG_VMA_LOCK tells you which). If VM_FAULT_RETRY is set in
+ * the result, the mmap_lock is not held on exit. See filemap_fault()
+ * and __folio_lock_or_retry().
*/
static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
@@ -5006,6 +5006,11 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
p4d_t *p4d;
vm_fault_t ret;
+ if ((flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) {
+ vma_end_read(vma);
+ return VM_FAULT_RETRY;
+ }
+
pgd = pgd_offset(mm, address);
p4d = p4d_alloc(mm, pgd, address);
if (!p4d)
@@ -5222,11 +5227,6 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
goto out;
}
- if ((flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) {
- vma_end_read(vma);
- return VM_FAULT_RETRY;
- }
-
/*
* Enable the memcg OOM handling for faults triggered in user
* space. Kernel faults are handled more gracefully.