summaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-05-25 20:23:45 +0200
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-06-09 22:24:25 +0200
commitdcfa24ba68991ab69a48254a18377b45180ae664 (patch)
tree1095030522aad702805d25d23b91fe13320fabb9 /mm/filemap.c
parentfilemap: Don't release a locked folio (diff)
downloadlinux-dcfa24ba68991ab69a48254a18377b45180ae664.tar.xz
linux-dcfa24ba68991ab69a48254a18377b45180ae664.zip
filemap: Cache the value of vm_flags
After we have unlocked the mmap_lock for I/O, the file is pinned, but the VMA is not. Checking this flag after that can be a use-after-free. It's not a terribly interesting use-after-free as it can only read one bit, and it's used to decide whether to read 2MB or 4MB. But it upsets the automated tools and it's generally bad practice anyway, so let's fix it. Reported-by: syzbot+5b96d55e5b54924c77ad@syzkaller.appspotmail.com Fixes: 4687fdbb805a ("mm/filemap: Support VM_HUGEPAGE for file mappings") Cc: stable@vger.kernel.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 9daeaab36081..ac3775c1ce4c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2991,11 +2991,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
struct address_space *mapping = file->f_mapping;
DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
struct file *fpin = NULL;
+ unsigned long vm_flags = vmf->vma->vm_flags;
unsigned int mmap_miss;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* Use the readahead code, even if readahead is disabled */
- if (vmf->vma->vm_flags & VM_HUGEPAGE) {
+ if (vm_flags & VM_HUGEPAGE) {
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
ra->size = HPAGE_PMD_NR;
@@ -3003,7 +3004,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
* Fetch two PMD folios, so we get the chance to actually
* readahead, unless we've been told not to.
*/
- if (!(vmf->vma->vm_flags & VM_RAND_READ))
+ if (!(vm_flags & VM_RAND_READ))
ra->size *= 2;
ra->async_size = HPAGE_PMD_NR;
page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
@@ -3012,12 +3013,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
#endif
/* If we don't want any read-ahead, don't bother */
- if (vmf->vma->vm_flags & VM_RAND_READ)
+ if (vm_flags & VM_RAND_READ)
return fpin;
if (!ra->ra_pages)
return fpin;
- if (vmf->vma->vm_flags & VM_SEQ_READ) {
+ if (vm_flags & VM_SEQ_READ) {
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
page_cache_sync_ra(&ractl, ra->ra_pages);
return fpin;