summaryrefslogtreecommitdiffstats
path: root/fs/proc/task_mmu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-01-08 22:13:41 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2021-01-16 19:46:39 +0100
commit29a951dfb3c3263c3a0f3bd9f7f2c2cfde4baedb (patch)
tree04043448c9ada73ea98a6b573520e4b0c9ef8e14 /fs/proc/task_mmu.c
parentMerge tag 'for-5.11/dm-fixes-1' of git://git.kernel.org/pub/scm/linux/kernel/... (diff)
downloadlinux-29a951dfb3c3263c3a0f3bd9f7f2c2cfde4baedb.tar.xz
linux-29a951dfb3c3263c3a0f3bd9f7f2c2cfde4baedb.zip
mm: fix clear_refs_write locking
Turning page table entries read-only requires the mmap_sem held for writing. So stop doing the odd games with turning things from read locks to write locks and back. Just get the write lock. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--fs/proc/task_mmu.c32
1 files changed, 9 insertions, 23 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index ee5a235b3056..ab7d700b2caa 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1215,41 +1215,26 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
.type = type,
};
+ if (mmap_write_lock_killable(mm)) {
+ count = -EINTR;
+ goto out_mm;
+ }
if (type == CLEAR_REFS_MM_HIWATER_RSS) {
- if (mmap_write_lock_killable(mm)) {
- count = -EINTR;
- goto out_mm;
- }
-
/*
* Writing 5 to /proc/pid/clear_refs resets the peak
* resident set size to this mm's current rss value.
*/
reset_mm_hiwater_rss(mm);
- mmap_write_unlock(mm);
- goto out_mm;
+ goto out_unlock;
}
- if (mmap_read_lock_killable(mm)) {
- count = -EINTR;
- goto out_mm;
- }
tlb_gather_mmu(&tlb, mm, 0, -1);
if (type == CLEAR_REFS_SOFT_DIRTY) {
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!(vma->vm_flags & VM_SOFTDIRTY))
continue;
- mmap_read_unlock(mm);
- if (mmap_write_lock_killable(mm)) {
- count = -EINTR;
- goto out_mm;
- }
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- vma->vm_flags &= ~VM_SOFTDIRTY;
- vma_set_page_prot(vma);
- }
- mmap_write_downgrade(mm);
- break;
+ vma->vm_flags &= ~VM_SOFTDIRTY;
+ vma_set_page_prot(vma);
}
mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
@@ -1261,7 +1246,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
if (type == CLEAR_REFS_SOFT_DIRTY)
mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb, 0, -1);
- mmap_read_unlock(mm);
+out_unlock:
+ mmap_write_unlock(mm);
out_mm:
mmput(mm);
}