From 89154dd5313f774d3a592451360b78442571b1f8 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Jun 2020 21:33:29 -0700 Subject: mmap locking API: convert mmap_sem call sites missed by coccinelle Convert the last few remaining mmap_sem rwsem calls to use the new mmap locking API. These were missed by coccinelle for some reason (I think coccinelle does not support some of the preprocessor constructs in these files ?) [akpm@linux-foundation.org: convert linux-next leftovers] [akpm@linux-foundation.org: more linux-next leftovers] [akpm@linux-foundation.org: more linux-next leftovers] Signed-off-by: Michel Lespinasse Signed-off-by: Andrew Morton Reviewed-by: Daniel Jordan Reviewed-by: Laurent Dufour Reviewed-by: Vlastimil Babka Cc: Davidlohr Bueso Cc: David Rientjes Cc: Hugh Dickins Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Liam Howlett Cc: Matthew Wilcox Cc: Peter Zijlstra Cc: Ying Han Link: http://lkml.kernel.org/r/20200520052908.204642-6-walken@google.com Signed-off-by: Linus Torvalds --- arch/mips/mm/fault.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/mips/mm/fault.c') diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index f8d62cd83b36..9ef2dd39111e 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -97,7 +97,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, if (user_mode(regs)) flags |= FAULT_FLAG_USER; retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma(mm, address); if (!vma) goto bad_area; @@ -190,7 +190,7 @@ good_area: } } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return; /* @@ -198,7 +198,7 @@ good_area: * Fix it, but check if it's kernel or user first.. */ bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ @@ -250,14 +250,14 @@ out_of_memory: * We ran out of memory, call the OOM killer, and return the userspace * (which will retry the fault, or kill us if we got oom-killed). */ - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) -- cgit v1.2.3 From 3e4e28c5a8f01ee4174d639e36ed155ade489a6f Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Jun 2020 21:33:51 -0700 Subject: mmap locking API: convert mmap_sem API comments Convert comments that reference old mmap_sem APIs to reference corresponding new mmap locking APIs instead. Signed-off-by: Michel Lespinasse Signed-off-by: Andrew Morton Reviewed-by: Vlastimil Babka Reviewed-by: Davidlohr Bueso Reviewed-by: Daniel Jordan Cc: David Rientjes Cc: Hugh Dickins Cc: Jason Gunthorpe Cc: Jerome Glisse Cc: John Hubbard Cc: Laurent Dufour Cc: Liam Howlett Cc: Matthew Wilcox Cc: Peter Zijlstra Cc: Ying Han Link: http://lkml.kernel.org/r/20200520052908.204642-12-walken@google.com Signed-off-by: Linus Torvalds --- Documentation/vm/hmm.rst | 6 +++--- arch/alpha/mm/fault.c | 2 +- arch/ia64/mm/fault.c | 2 +- arch/m68k/mm/fault.c | 2 +- arch/microblaze/mm/fault.c | 2 +- arch/mips/mm/fault.c | 2 +- arch/nds32/mm/fault.c | 2 +- arch/nios2/mm/fault.c | 2 +- arch/openrisc/mm/fault.c | 2 +- arch/parisc/mm/fault.c | 2 +- arch/riscv/mm/fault.c | 2 +- arch/sh/mm/fault.c | 2 +- arch/sparc/mm/fault_32.c | 2 +- arch/sparc/mm/fault_64.c | 2 +- arch/xtensa/mm/fault.c | 2 +- drivers/android/binder_alloc.c | 4 ++-- fs/hugetlbfs/inode.c | 2 +- fs/userfaultfd.c | 2 +- mm/filemap.c | 2 +- mm/gup.c | 12 ++++++------ mm/huge_memory.c | 4 ++-- mm/khugepaged.c | 2 +- mm/ksm.c | 2 +- mm/memory.c | 4 ++-- mm/mempolicy.c | 2 +- mm/migrate.c | 4 ++-- mm/mmap.c | 2 +- mm/oom_kill.c | 8 ++++---- net/ipv4/tcp.c | 2 +- 29 files changed, 43 insertions(+), 43 deletions(-) (limited to 'arch/mips/mm/fault.c') diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst index 561969754bc0..6f9e000757fa 100644 --- a/Documentation/vm/hmm.rst +++ b/Documentation/vm/hmm.rst @@ -191,15 +191,15 @@ The usage pattern is:: again: range.notifier_seq = mmu_interval_read_begin(&interval_sub); - down_read(&mm->mmap_sem); + mmap_read_lock(mm); ret = hmm_range_fault(&range); if (ret) { - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (ret == -EBUSY) goto again; return ret; } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); take_lock(driver->update); if (mmu_interval_read_retry(&ni, range.notifier_seq) { diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 36efa778ee1a..c2303a8c2b9f 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -171,7 +171,7 @@ retry: if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; - /* No need to up_read(&mm->mmap_sem) as we would + /* No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index e9ce969c8b73..6c09f43d9711 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -173,7 +173,7 @@ retry: if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; - /* No need to up_read(&mm->mmap_sem) as we would + /* No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 650acab0d77d..a94a814ad6ad 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -165,7 +165,7 @@ good_area: flags |= FAULT_FLAG_TRIED; /* - * No need to up_read(&mm->mmap_sem) as we would + * No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 952ab614d50e..74358902a5db 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -238,7 +238,7 @@ good_area: flags |= FAULT_FLAG_TRIED; /* - * No need to up_read(&mm->mmap_sem) as we would + * No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 9ef2dd39111e..01b168a90434 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -181,7 +181,7 @@ good_area: flags |= FAULT_FLAG_TRIED; /* - * No need to up_read(&mm->mmap_sem) as we would + * No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c index b92785588c30..89831b6e1ede 100644 --- a/arch/nds32/mm/fault.c +++ b/arch/nds32/mm/fault.c @@ -247,7 +247,7 @@ good_area: if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; - /* No need to up_read(&mm->mmap_sem) as we would + /* No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index b8a0b51c6b0f..4112ef0e247e 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -160,7 +160,7 @@ good_area: flags |= FAULT_FLAG_TRIED; /* - * No need to up_read(&mm->mmap_sem) as we would + * No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index 0bbb1a76949a..d2224ccca294 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -183,7 +183,7 @@ good_area: if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; - /* No need to up_read(&mm->mmap_sem) as we would + /* No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index bc840fdb398f..66ac0719bd49 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -329,7 +329,7 @@ good_area: current->min_flt++; if (fault & VM_FAULT_RETRY) { /* - * No need to up_read(&mm->mmap_sem) as we would + * No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index cd7f4af95e56..996db5ebbf39 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -147,7 +147,7 @@ good_area: flags |= FAULT_FLAG_TRIED; /* - * No need to up_read(&mm->mmap_sem) as we would + * No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index d0c0d5351280..3a125aad586d 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -502,7 +502,7 @@ good_area: flags |= FAULT_FLAG_TRIED; /* - * No need to up_read(&mm->mmap_sem) as we would + * No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 34588c4ab9d9..cfef656eda0f 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -262,7 +262,7 @@ good_area: if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; - /* No need to up_read(&mm->mmap_sem) as we would + /* No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 417d7f677eb3..73f95a5ba683 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -450,7 +450,7 @@ good_area: if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; - /* No need to up_read(&mm->mmap_sem) as we would + /* No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 1c8d22a0cf46..c4decc73fd86 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -130,7 +130,7 @@ good_area: if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; - /* No need to up_read(&mm->mmap_sem) as we would + /* No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index cbdc43ed0f9f..42c672f1584e 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -933,7 +933,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, if (!mmget_not_zero(mm)) goto err_mmget; if (!mmap_read_trylock(mm)) - goto err_down_read_mmap_sem_failed; + goto err_mmap_read_lock_failed; vma = binder_alloc_get_vma(alloc); list_lru_isolate(lru, item); @@ -960,7 +960,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, mutex_unlock(&alloc->mutex); return LRU_REMOVED_RETRY; -err_down_read_mmap_sem_failed: +err_mmap_read_lock_failed: mmput_async(mm); err_mmget: err_page_already_freed: diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index f3420a643b4f..ef5313f9c78f 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -187,7 +187,7 @@ out: } /* - * Called under down_write(mmap_sem). + * Called under mmap_write_lock(mm). */ #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 12b492409040..3a63d75ed2fd 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -1248,7 +1248,7 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, /* * To be sure waitqueue_active() is not reordered by the CPU * before the pagetable update, use an explicit SMP memory - * barrier here. PT lock release or up_read(mmap_sem) still + * barrier here. PT lock release or mmap_read_unlock(mm) still * have release semantics that can allow the * waitqueue_active() to be reordered before the pte update. */ diff --git a/mm/filemap.c b/mm/filemap.c index b1442c4b36b7..950cf12a10fc 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1373,7 +1373,7 @@ EXPORT_SYMBOL_GPL(__lock_page_killable); * Return values: * 1 - page is locked; mmap_sem is still held. * 0 - page is not locked. - * mmap_sem has been released (up_read()), unless flags had both + * mmap_lock has been released (mmap_read_unlock(), unless flags had both * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in * which case mmap_sem is still held. * diff --git a/mm/gup.c b/mm/gup.c index 762fb9f733b2..bbb8851f4656 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1993,19 +1993,19 @@ EXPORT_SYMBOL(get_user_pages); /** * get_user_pages_locked() is suitable to replace the form: * - * down_read(&mm->mmap_sem); + * mmap_read_lock(mm); * do_something() * get_user_pages(tsk, mm, ..., pages, NULL); - * up_read(&mm->mmap_sem); + * mmap_read_unlock(mm); * * to: * * int locked = 1; - * down_read(&mm->mmap_sem); + * mmap_read_lock(mm); * do_something() * get_user_pages_locked(tsk, mm, ..., pages, &locked); * if (locked) - * up_read(&mm->mmap_sem); + * mmap_read_unlock(mm); * * @start: starting user address * @nr_pages: number of pages from start to pin @@ -2050,9 +2050,9 @@ EXPORT_SYMBOL(get_user_pages_locked); /* * get_user_pages_unlocked() is suitable to replace the form: * - * down_read(&mm->mmap_sem); + * mmap_read_lock(mm); * get_user_pages(tsk, mm, ..., pages, NULL); - * up_read(&mm->mmap_sem); + * mmap_read_unlock(mm); * * with: * diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d9b2e0e0580a..de201f0b5a4a 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1833,9 +1833,9 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, goto unlock; /* - * In case prot_numa, we are under down_read(mmap_sem). It's critical + * In case prot_numa, we are under mmap_read_lock(mm). It's critical * to not clear pmd intermittently to avoid race with MADV_DONTNEED - * which is also under down_read(mmap_sem): + * which is also under mmap_read_lock(mm): * * CPU0: CPU1: * change_huge_pmd(prot_numa=1) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 19f3401e568a..2c318ad1db20 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1543,7 +1543,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) /* * Check vma->anon_vma to exclude MAP_PRIVATE mappings that * got written to. These VMAs are likely not worth investing - * down_write(mmap_sem) as PMD-mapping is likely to be split + * mmap_write_lock(mm) as PMD-mapping is likely to be split * later. * * Not that vma->anon_vma check is racy: it can be set up after diff --git a/mm/ksm.c b/mm/ksm.c index 098b580e7d76..3efe7f28cc3f 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2362,7 +2362,7 @@ next_mm: } else { mmap_read_unlock(mm); /* - * up_read(&mm->mmap_sem) first because after + * mmap_read_unlock(mm) first because after * spin_unlock(&ksm_mmlist_lock) run, the "mm" may * already have been freed under us by __ksm_exit() * because the "mm_slot" is still hashed and diff --git a/mm/memory.c b/mm/memory.c index 823982a8f0b0..4e2e17bb1281 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3323,10 +3323,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) * pte_offset_map() on pmds where a huge pmd might be created * from a different thread. * - * pte_alloc_map() is safe to use under down_write(mmap_sem) or when + * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when * parallel threads are excluded by other means. * - * Here we only have down_read(mmap_sem). + * Here we only have mmap_read_lock(mm). */ if (pte_alloc(vma->vm_mm, vmf->pmd)) return VM_FAULT_OOM; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 4930a9254068..a38cd4cc3206 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2185,7 +2185,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, * * This function allocates a page from the kernel page pool and applies * a NUMA policy associated with the VMA or the current process. - * When VMA is not NULL caller must hold down_read on the mmap_sem of the + * When VMA is not NULL caller must read-lock the mmap_lock of the * mm_struct of the VMA to prevent it from going away. Should be used for * all allocations for pages that will be mapped into user space. Returns * NULL when no page can be allocated. diff --git a/mm/migrate.c b/mm/migrate.c index 0aa8f83789c5..f69b09e0829c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2772,10 +2772,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, * pte_offset_map() on pmds where a huge pmd might be created * from a different thread. * - * pte_alloc_map() is safe to use under down_write(mmap_sem) or when + * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when * parallel threads are excluded by other means. * - * Here we only have down_read(mmap_sem). + * Here we only have mmap_read_lock(mm). */ if (pte_alloc(mm, pmdp)) goto abort; diff --git a/mm/mmap.c b/mm/mmap.c index a28778da76a3..79005049fbfc 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1361,7 +1361,7 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode, } /* - * The caller must hold down_write(¤t->mm->mmap_sem). + * The caller must write-lock current->mm->mmap_lock. */ unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, diff --git a/mm/oom_kill.c b/mm/oom_kill.c index af3de7a92a9f..3b5d78dfebe9 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -577,8 +577,8 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) /* * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't * work on the mm anymore. The check for MMF_OOM_SKIP must run - * under mmap_sem for reading because it serializes against the - * down_write();up_write() cycle in exit_mmap(). + * under mmap_lock for reading because it serializes against the + * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap(). */ if (test_bit(MMF_OOM_SKIP, &mm->flags)) { trace_skip_task_reaping(tsk->pid); @@ -611,7 +611,7 @@ static void oom_reap_task(struct task_struct *tsk) int attempts = 0; struct mm_struct *mm = tsk->signal->oom_mm; - /* Retry the down_read_trylock(mmap_sem) a few times */ + /* Retry the mmap_read_trylock(mm) a few times */ while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm)) schedule_timeout_idle(HZ/10); @@ -629,7 +629,7 @@ done: /* * Hide this mm from OOM killer because it has been either reaped or - * somebody can't call up_write(mmap_sem). + * somebody can't call mmap_write_unlock(mm). */ set_bit(MMF_OOM_SKIP, &mm->flags); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1714fe20ec80..27716e4932bc 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1734,7 +1734,7 @@ int tcp_mmap(struct file *file, struct socket *sock, return -EPERM; vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC); - /* Instruct vm_insert_page() to not down_read(mmap_sem) */ + /* Instruct vm_insert_page() to not mmap_read_lock(mm) */ vma->vm_flags |= VM_MIXEDMAP; vma->vm_ops = &tcp_vm_ops; -- cgit v1.2.3