diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-30 02:29:11 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-30 02:29:11 +0200 |
commit | 65090f30ab791810a3dc840317e57df05018559c (patch) | |
tree | f417526656da37109777e89613e140ffc59228bc /mm/mmap.c | |
parent | Merge tag 'devprop-5.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git... (diff) | |
parent | mm,hwpoison: make get_hwpoison_page() call get_any_page() (diff) | |
download | linux-65090f30ab791810a3dc840317e57df05018559c.tar.xz linux-65090f30ab791810a3dc840317e57df05018559c.zip |
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton:
"191 patches.
Subsystems affected by this patch series: kthread, ia64, scripts,
ntfs, squashfs, ocfs2, kernel/watchdog, and mm (gup, pagealloc, slab,
slub, kmemleak, dax, debug, pagecache, gup, swap, memcg, pagemap,
mprotect, bootmem, dma, tracing, vmalloc, kasan, initialization,
pagealloc, and memory-failure)"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (191 commits)
mm,hwpoison: make get_hwpoison_page() call get_any_page()
mm,hwpoison: send SIGBUS with error virutal address
mm/page_alloc: split pcp->high across all online CPUs for cpuless nodes
mm/page_alloc: allow high-order pages to be stored on the per-cpu lists
mm: replace CONFIG_FLAT_NODE_MEM_MAP with CONFIG_FLATMEM
mm: replace CONFIG_NEED_MULTIPLE_NODES with CONFIG_NUMA
docs: remove description of DISCONTIGMEM
arch, mm: remove stale mentions of DISCONIGMEM
mm: remove CONFIG_DISCONTIGMEM
m68k: remove support for DISCONTIGMEM
arc: remove support for DISCONTIGMEM
arc: update comment about HIGHMEM implementation
alpha: remove DISCONTIGMEM and NUMA
mm/page_alloc: move free_the_page
mm/page_alloc: fix counting of managed_pages
mm/page_alloc: improve memmap_pages dbg msg
mm: drop SECTION_SHIFT in code comments
mm/page_alloc: introduce vm.percpu_pagelist_high_fraction
mm/page_alloc: limit the number of pages on PCP lists when reclaim is active
mm/page_alloc: scale the number of pages that are batch freed
...
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 54 |
1 files changed, 24 insertions, 30 deletions
diff --git a/mm/mmap.c b/mm/mmap.c index bc88d1674364..aa9de981b659 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1457,9 +1457,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, return addr; if (flags & MAP_FIXED_NOREPLACE) { - struct vm_area_struct *vma = find_vma(mm, addr); - - if (vma && vma->vm_start < addr + len) + if (find_vma_intersection(mm, addr, addr + len)) return -EEXIST; } @@ -1633,7 +1631,7 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, return PTR_ERR(file); } - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); + flags &= ~MAP_DENYWRITE; retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); out_fput: @@ -2802,6 +2800,22 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, return __split_vma(mm, vma, addr, new_below); } +static inline void +unlock_range(struct vm_area_struct *start, unsigned long limit) +{ + struct mm_struct *mm = start->vm_mm; + struct vm_area_struct *tmp = start; + + while (tmp && tmp->vm_start < limit) { + if (tmp->vm_flags & VM_LOCKED) { + mm->locked_vm -= vma_pages(tmp); + munlock_vma_pages_all(tmp); + } + + tmp = tmp->vm_next; + } +} + /* Munmap is split into 2 main parts -- this part which finds * what needs doing, and the areas themselves, which do the * work. This now handles partial unmappings. @@ -2828,16 +2842,11 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, */ arch_unmap(mm, start, end); - /* Find the first overlapping VMA */ - vma = find_vma(mm, start); + /* Find the first overlapping VMA where start < vma->vm_end */ + vma = find_vma_intersection(mm, start, end); if (!vma) return 0; prev = vma->vm_prev; - /* we have start < vma->vm_end */ - - /* if it doesn't overlap, we have nothing.. */ - if (vma->vm_start >= end) - return 0; /* * If we need to split any vma, do it now to save pain later. @@ -2890,17 +2899,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, /* * unlock any mlock()ed ranges before detaching vmas */ - if (mm->locked_vm) { - struct vm_area_struct *tmp = vma; - while (tmp && tmp->vm_start < end) { - if (tmp->vm_flags & VM_LOCKED) { - mm->locked_vm -= vma_pages(tmp); - munlock_vma_pages_all(tmp); - } - - tmp = tmp->vm_next; - } - } + if (mm->locked_vm) + unlock_range(vma, end); /* Detach vmas from rbtree */ if (!detach_vmas_to_be_unmapped(mm, vma, prev, end)) @@ -3185,14 +3185,8 @@ void exit_mmap(struct mm_struct *mm) mmap_write_unlock(mm); } - if (mm->locked_vm) { - vma = mm->mmap; - while (vma) { - if (vma->vm_flags & VM_LOCKED) - munlock_vma_pages_all(vma); - vma = vma->vm_next; - } - } + if (mm->locked_vm) + unlock_range(mm->mmap, ULONG_MAX); arch_exit_mmap(mm); |