summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-12-15 00:07:21 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-15 01:04:09 +0100
commit9118c0cbd44262d0015568266f314e645ed6b9ce (patch)
tree8f457a1fca7e87522c3d8e0332ee61a49bb105df /mm/memory.c
parentmm: allow full handling of COW faults in ->fault handlers (diff)
downloadlinux-9118c0cbd44262d0015568266f314e645ed6b9ce.tar.xz
linux-9118c0cbd44262d0015568266f314e645ed6b9ce.zip
mm: factor out functionality to finish page faults
Introduce finish_fault() as a helper function for finishing page faults. It is rather thin wrapper around alloc_set_pte() but since we'd want to call this from DAX code or filesystems, it is still useful to avoid some boilerplate code. Link: http://lkml.kernel.org/r/1479460644-25076-10-git-send-email-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c44
1 files changed, 35 insertions, 9 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 02504cd4ca0e..22f7f6e38515 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3074,6 +3074,38 @@ fault_handled:
return ret;
}
+
+/**
+ * finish_fault - finish page fault once we have prepared the page to fault
+ *
+ * @vmf: structure describing the fault
+ *
+ * This function handles all that is needed to finish a page fault once the
+ * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
+ * given page, adds reverse page mapping, handles memcg charges and LRU
+ * addition. The function returns 0 on success, VM_FAULT_ code in case of
+ * error.
+ *
+ * The function expects the page to be locked and on success it consumes a
+ * reference of a page being mapped (for the PTE which maps it).
+ */
+int finish_fault(struct vm_fault *vmf)
+{
+ struct page *page;
+ int ret;
+
+ /* Did we COW the page? */
+ if ((vmf->flags & FAULT_FLAG_WRITE) &&
+ !(vmf->vma->vm_flags & VM_SHARED))
+ page = vmf->cow_page;
+ else
+ page = vmf->page;
+ ret = alloc_set_pte(vmf, vmf->memcg, page);
+ if (vmf->pte)
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ return ret;
+}
+
static unsigned long fault_around_bytes __read_mostly =
rounddown_pow_of_two(65536);
@@ -3213,9 +3245,7 @@ static int do_read_fault(struct vm_fault *vmf)
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
- ret |= alloc_set_pte(vmf, NULL, vmf->page);
- if (vmf->pte)
- pte_unmap_unlock(vmf->pte, vmf->ptl);
+ ret |= finish_fault(vmf);
unlock_page(vmf->page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
put_page(vmf->page);
@@ -3250,9 +3280,7 @@ static int do_cow_fault(struct vm_fault *vmf)
copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
__SetPageUptodate(vmf->cow_page);
- ret |= alloc_set_pte(vmf, vmf->memcg, vmf->cow_page);
- if (vmf->pte)
- pte_unmap_unlock(vmf->pte, vmf->ptl);
+ ret |= finish_fault(vmf);
if (!(ret & VM_FAULT_DAX_LOCKED)) {
unlock_page(vmf->page);
put_page(vmf->page);
@@ -3293,9 +3321,7 @@ static int do_shared_fault(struct vm_fault *vmf)
}
}
- ret |= alloc_set_pte(vmf, NULL, vmf->page);
- if (vmf->pte)
- pte_unmap_unlock(vmf->pte, vmf->ptl);
+ ret |= finish_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
VM_FAULT_RETRY))) {
unlock_page(vmf->page);