summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig3
-rw-r--r--mm/gup.c51
-rw-r--r--mm/mmap.c15
3 files changed, 51 insertions, 18 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 0abc6c71dd89..09130434e30d 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -1224,9 +1224,8 @@ config ARCH_SUPPORTS_PER_VMA_LOCK
def_bool n
config PER_VMA_LOCK
- bool "Enable per-vma locking during page fault handling."
+ def_bool y
depends on ARCH_SUPPORTS_PER_VMA_LOCK && MMU && SMP
- depends on BROKEN
help
Allow per-vma locking during page fault handling.
diff --git a/mm/gup.c b/mm/gup.c
index ef29641671c7..76d222ccc3ff 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1091,6 +1091,45 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
return 0;
}
+/*
+ * This is "vma_lookup()", but with a warning if we would have
+ * historically expanded the stack in the GUP code.
+ */
+static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm,
+ unsigned long addr)
+{
+#ifdef CONFIG_STACK_GROWSUP
+ return vma_lookup(mm, addr);
+#else
+ static volatile unsigned long next_warn;
+ struct vm_area_struct *vma;
+ unsigned long now, next;
+
+ vma = find_vma(mm, addr);
+ if (!vma || (addr >= vma->vm_start))
+ return vma;
+
+ /* Only warn for half-way relevant accesses */
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ return NULL;
+ if (vma->vm_start - addr > 65536)
+ return NULL;
+
+ /* Let's not warn more than once an hour.. */
+ now = jiffies; next = next_warn;
+ if (next && time_before(now, next))
+ return NULL;
+ next_warn = now + 60*60*HZ;
+
+ /* Let people know things may have changed. */
+ pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n",
+ current->comm, task_pid_nr(current),
+ vma->vm_start, vma->vm_end, addr);
+ dump_stack();
+ return NULL;
+#endif
+}
+
/**
* __get_user_pages() - pin user pages in memory
* @mm: mm_struct of target mm
@@ -1168,11 +1207,7 @@ static long __get_user_pages(struct mm_struct *mm,
/* first iteration or cross vma bound */
if (!vma || start >= vma->vm_end) {
- vma = find_vma(mm, start);
- if (vma && (start < vma->vm_start)) {
- WARN_ON_ONCE(vma->vm_flags & VM_GROWSDOWN);
- vma = NULL;
- }
+ vma = gup_vma_lookup(mm, start);
if (!vma && in_gate_area(mm, start)) {
ret = get_gate_page(mm, start & PAGE_MASK,
gup_flags, &vma,
@@ -1337,13 +1372,9 @@ int fixup_user_fault(struct mm_struct *mm,
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
retry:
- vma = find_vma(mm, address);
+ vma = gup_vma_lookup(mm, address);
if (!vma)
return -EFAULT;
- if (address < vma->vm_start ) {
- WARN_ON_ONCE(vma->vm_flags & VM_GROWSDOWN);
- return -EFAULT;
- }
if (!vma_permits_fault(vma, fault_flags))
return -EFAULT;
diff --git a/mm/mmap.c b/mm/mmap.c
index 51e70fa98450..84c71431a527 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1977,6 +1977,8 @@ static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
return -ENOMEM;
}
+ /* Lock the VMA before expanding to prevent concurrent page faults */
+ vma_start_write(vma);
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_lock in read mode. We need the
@@ -2064,6 +2066,8 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
return -ENOMEM;
}
+ /* Lock the VMA before expanding to prevent concurrent page faults */
+ vma_start_write(vma);
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_lock in read mode. We need the
@@ -2554,11 +2558,10 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
mas_set(&mas_detach, start);
remove_mt(mm, &mas_detach);
__mt_destroy(&mt_detach);
+ validate_mm(mm);
if (unlock)
mmap_read_unlock(mm);
-
- validate_mm(mm);
return 0;
clear_tree_failed:
@@ -2572,6 +2575,7 @@ end_split_failed:
__mt_destroy(&mt_detach);
start_split_failed:
map_count_exceeded:
+ validate_mm(mm);
return error;
}
@@ -2808,6 +2812,8 @@ cannot_expand:
if (vma->vm_file)
i_mmap_lock_write(vma->vm_file->f_mapping);
+ /* Lock the VMA since it is modified after insertion into VMA tree */
+ vma_start_write(vma);
vma_iter_store(&vmi, vma);
mm->map_count++;
if (vma->vm_file) {
@@ -3020,12 +3026,9 @@ int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
bool unlock)
{
struct mm_struct *mm = vma->vm_mm;
- int ret;
arch_unmap(mm, start, end);
- ret = do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
- validate_mm(mm);
- return ret;
+ return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
}
/*