summaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2017-02-23 00:43:52 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-23 01:41:29 +0100
commita425d3584e7e69587aa441e91c7ffce7f47004d7 (patch)
tree521ff37f093bd1bc9fc280fac65f21de0bd155b1 /mm/shmem.c
parentuserfaultfd: shmem: lock the page before adding it to pagecache (diff)
downloadlinux-a425d3584e7e69587aa441e91c7ffce7f47004d7.tar.xz
linux-a425d3584e7e69587aa441e91c7ffce7f47004d7.zip
userfaultfd: shmem: avoid a lockup resulting from corrupted page->flags
Use the non atomic version of __SetPageUptodate while the page is still private and not visible to lookup operations. Using the non atomic version after the page is already visible to lookups is unsafe as there would be concurrent lock_page operation modifying the page->flags while it runs. This solves a lockup in find_lock_entry with the userfaultfd_shmem selftest. userfaultfd_shm D14296 691 1 0x00000004 Call Trace: schedule+0x3d/0x90 schedule_timeout+0x228/0x420 io_schedule_timeout+0xa4/0x110 __lock_page+0x12d/0x170 find_lock_entry+0xa4/0x190 shmem_getpage_gfp+0xb9/0xc30 shmem_fault+0x70/0x1c0 __do_fault+0x21/0x150 handle_mm_fault+0xec9/0x1490 __do_page_fault+0x20d/0x520 trace_do_page_fault+0x61/0x270 do_async_page_fault+0x19/0x80 async_page_fault+0x25/0x30 Link: http://lkml.kernel.org/r/20170116180408.12184-2-aarcange@redhat.com Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Reported-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c3
1 files changed, 1 insertions, 2 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 4e5e7a57e5b4..8d7d80cf8708 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2248,6 +2248,7 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
__SetPageLocked(page);
__SetPageSwapBacked(page);
+ __SetPageUptodate(page);
ret = mem_cgroup_try_charge(page, dst_mm, gfp, &memcg, false);
if (ret)
@@ -2272,8 +2273,6 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
if (!pte_none(*dst_pte))
goto out_release_uncharge_unlock;
- __SetPageUptodate(page);
-
lru_cache_add_anon(page);
spin_lock(&info->lock);