summaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c37
1 files changed, 29 insertions, 8 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 86975dec0ba1..90731e3b7e58 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -32,6 +32,7 @@
#include <linux/userfaultfd_k.h>
#include <linux/page_idle.h>
#include <linux/shmem_fs.h>
+#include <linux/oom.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
@@ -550,6 +551,7 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
struct mem_cgroup *memcg;
pgtable_t pgtable;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
+ int ret = 0;
VM_BUG_ON_PAGE(!PageCompound(page), page);
@@ -561,9 +563,8 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
pgtable = pte_alloc_one(vma->vm_mm, haddr);
if (unlikely(!pgtable)) {
- mem_cgroup_cancel_charge(page, memcg, true);
- put_page(page);
- return VM_FAULT_OOM;
+ ret = VM_FAULT_OOM;
+ goto release;
}
clear_huge_page(page, haddr, HPAGE_PMD_NR);
@@ -576,13 +577,14 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_none(*vmf->pmd))) {
- spin_unlock(vmf->ptl);
- mem_cgroup_cancel_charge(page, memcg, true);
- put_page(page);
- pte_free(vma->vm_mm, pgtable);
+ goto unlock_release;
} else {
pmd_t entry;
+ ret = check_stable_address_space(vma->vm_mm);
+ if (ret)
+ goto unlock_release;
+
/* Deliver the page fault to userland */
if (userfaultfd_missing(vma)) {
int ret;
@@ -610,6 +612,15 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
}
return 0;
+unlock_release:
+ spin_unlock(vmf->ptl);
+release:
+ if (pgtable)
+ pte_free(vma->vm_mm, pgtable);
+ mem_cgroup_cancel_charge(page, memcg, true);
+ put_page(page);
+ return ret;
+
}
/*
@@ -688,7 +699,10 @@ int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
ret = 0;
set = false;
if (pmd_none(*vmf->pmd)) {
- if (userfaultfd_missing(vma)) {
+ ret = check_stable_address_space(vma->vm_mm);
+ if (ret) {
+ spin_unlock(vmf->ptl);
+ } else if (userfaultfd_missing(vma)) {
spin_unlock(vmf->ptl);
ret = handle_userfault(vmf, VM_UFFD_MISSING);
VM_BUG_ON(ret & VM_FAULT_FALLBACK);
@@ -1496,6 +1510,13 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
}
/*
+ * The page_table_lock above provides a memory barrier
+ * with change_protection_range.
+ */
+ if (mm_tlb_flush_pending(vma->vm_mm))
+ flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
+
+ /*
* Migrate the THP to the requested node, returns with page unlocked
* and access rights restored.
*/