summaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorVishal Moola (Oracle) <vishal.moola@gmail.com>2024-04-15 23:17:47 +0200
committerAndrew Morton <akpm@linux-foundation.org>2024-04-25 04:34:26 +0200
commit37641efaa3faa4b8292aba4bbd7d71c0b703a239 (patch)
treeba4426bd53e97e37951e37cfbe757ddd7998ecfe /mm/hugetlb.c
parentmm: zswap: fix shrinker NULL crash with cgroup_disable=memory (diff)
downloadlinux-37641efaa3faa4b8292aba4bbd7d71c0b703a239.tar.xz
linux-37641efaa3faa4b8292aba4bbd7d71c0b703a239.zip
hugetlb: check for anon_vma prior to folio allocation
Commit 9acad7ba3e25 ("hugetlb: use vmf_anon_prepare() instead of anon_vma_prepare()") may bailout after allocating a folio if we do not hold the mmap lock. When this occurs, vmf_anon_prepare() will release the vma lock. Hugetlb then attempts to call restore_reserve_on_error(), which depends on the vma lock being held. We can move vmf_anon_prepare() prior to the folio allocation in order to avoid calling restore_reserve_on_error() without the vma lock. Link: https://lkml.kernel.org/r/ZiFqSrSRLhIV91og@fedora Fixes: 9acad7ba3e25 ("hugetlb: use vmf_anon_prepare() instead of anon_vma_prepare()") Reported-by: syzbot+ad1b592fc4483655438b@syzkaller.appspotmail.com Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--mm/hugetlb.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 4553241f0fb2..05371bf54f96 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6261,6 +6261,12 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
VM_UFFD_MISSING);
}
+ if (!(vma->vm_flags & VM_MAYSHARE)) {
+ ret = vmf_anon_prepare(vmf);
+ if (unlikely(ret))
+ goto out;
+ }
+
folio = alloc_hugetlb_folio(vma, haddr, 0);
if (IS_ERR(folio)) {
/*
@@ -6297,15 +6303,12 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
*/
restore_reserve_on_error(h, vma, haddr, folio);
folio_put(folio);
+ ret = VM_FAULT_SIGBUS;
goto out;
}
new_pagecache_folio = true;
} else {
folio_lock(folio);
-
- ret = vmf_anon_prepare(vmf);
- if (unlikely(ret))
- goto backout_unlocked;
anon_rmap = 1;
}
} else {