diff options
author | Michel Lespinasse <walken@google.com> | 2020-06-09 06:33:51 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-09 18:39:14 +0200 |
commit | 3e4e28c5a8f01ee4174d639e36ed155ade489a6f (patch) | |
tree | 5d440153b87f0416897f4f1a986248370354f986 /mm | |
parent | mmap locking API: rename mmap_sem to mmap_lock (diff) | |
download | linux-3e4e28c5a8f01ee4174d639e36ed155ade489a6f.tar.xz linux-3e4e28c5a8f01ee4174d639e36ed155ade489a6f.zip |
mmap locking API: convert mmap_sem API comments
Convert comments that reference old mmap_sem APIs to reference
corresponding new mmap locking APIs instead.
Signed-off-by: Michel Lespinasse <walken@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Davidlohr Bueso <dbueso@suse.de>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Laurent Dufour <ldufour@linux.ibm.com>
Cc: Liam Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ying Han <yinghan@google.com>
Link: http://lkml.kernel.org/r/20200520052908.204642-12-walken@google.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 2 | ||||
-rw-r--r-- | mm/gup.c | 12 | ||||
-rw-r--r-- | mm/huge_memory.c | 4 | ||||
-rw-r--r-- | mm/khugepaged.c | 2 | ||||
-rw-r--r-- | mm/ksm.c | 2 | ||||
-rw-r--r-- | mm/memory.c | 4 | ||||
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/migrate.c | 4 | ||||
-rw-r--r-- | mm/mmap.c | 2 | ||||
-rw-r--r-- | mm/oom_kill.c | 8 |
10 files changed, 21 insertions, 21 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index b1442c4b36b7..950cf12a10fc 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1373,7 +1373,7 @@ EXPORT_SYMBOL_GPL(__lock_page_killable); * Return values: * 1 - page is locked; mmap_sem is still held. * 0 - page is not locked. - * mmap_sem has been released (up_read()), unless flags had both + * mmap_lock has been released (mmap_read_unlock(), unless flags had both * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in * which case mmap_sem is still held. * @@ -1993,19 +1993,19 @@ EXPORT_SYMBOL(get_user_pages); /** * get_user_pages_locked() is suitable to replace the form: * - * down_read(&mm->mmap_sem); + * mmap_read_lock(mm); * do_something() * get_user_pages(tsk, mm, ..., pages, NULL); - * up_read(&mm->mmap_sem); + * mmap_read_unlock(mm); * * to: * * int locked = 1; - * down_read(&mm->mmap_sem); + * mmap_read_lock(mm); * do_something() * get_user_pages_locked(tsk, mm, ..., pages, &locked); * if (locked) - * up_read(&mm->mmap_sem); + * mmap_read_unlock(mm); * * @start: starting user address * @nr_pages: number of pages from start to pin @@ -2050,9 +2050,9 @@ EXPORT_SYMBOL(get_user_pages_locked); /* * get_user_pages_unlocked() is suitable to replace the form: * - * down_read(&mm->mmap_sem); + * mmap_read_lock(mm); * get_user_pages(tsk, mm, ..., pages, NULL); - * up_read(&mm->mmap_sem); + * mmap_read_unlock(mm); * * with: * diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d9b2e0e0580a..de201f0b5a4a 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1833,9 +1833,9 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, goto unlock; /* - * In case prot_numa, we are under down_read(mmap_sem). It's critical + * In case prot_numa, we are under mmap_read_lock(mm). It's critical * to not clear pmd intermittently to avoid race with MADV_DONTNEED - * which is also under down_read(mmap_sem): + * which is also under mmap_read_lock(mm): * * CPU0: CPU1: * change_huge_pmd(prot_numa=1) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 19f3401e568a..2c318ad1db20 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1543,7 +1543,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) /* * Check vma->anon_vma to exclude MAP_PRIVATE mappings that * got written to. These VMAs are likely not worth investing - * down_write(mmap_sem) as PMD-mapping is likely to be split + * mmap_write_lock(mm) as PMD-mapping is likely to be split * later. * * Not that vma->anon_vma check is racy: it can be set up after @@ -2362,7 +2362,7 @@ next_mm: } else { mmap_read_unlock(mm); /* - * up_read(&mm->mmap_sem) first because after + * mmap_read_unlock(mm) first because after * spin_unlock(&ksm_mmlist_lock) run, the "mm" may * already have been freed under us by __ksm_exit() * because the "mm_slot" is still hashed and diff --git a/mm/memory.c b/mm/memory.c index 823982a8f0b0..4e2e17bb1281 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3323,10 +3323,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) * pte_offset_map() on pmds where a huge pmd might be created * from a different thread. * - * pte_alloc_map() is safe to use under down_write(mmap_sem) or when + * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when * parallel threads are excluded by other means. * - * Here we only have down_read(mmap_sem). + * Here we only have mmap_read_lock(mm). */ if (pte_alloc(vma->vm_mm, vmf->pmd)) return VM_FAULT_OOM; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 4930a9254068..a38cd4cc3206 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2185,7 +2185,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, * * This function allocates a page from the kernel page pool and applies * a NUMA policy associated with the VMA or the current process. - * When VMA is not NULL caller must hold down_read on the mmap_sem of the + * When VMA is not NULL caller must read-lock the mmap_lock of the * mm_struct of the VMA to prevent it from going away. Should be used for * all allocations for pages that will be mapped into user space. Returns * NULL when no page can be allocated. diff --git a/mm/migrate.c b/mm/migrate.c index 0aa8f83789c5..f69b09e0829c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2772,10 +2772,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, * pte_offset_map() on pmds where a huge pmd might be created * from a different thread. * - * pte_alloc_map() is safe to use under down_write(mmap_sem) or when + * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when * parallel threads are excluded by other means. * - * Here we only have down_read(mmap_sem). + * Here we only have mmap_read_lock(mm). */ if (pte_alloc(mm, pmdp)) goto abort; diff --git a/mm/mmap.c b/mm/mmap.c index a28778da76a3..79005049fbfc 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1361,7 +1361,7 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode, } /* - * The caller must hold down_write(¤t->mm->mmap_sem). + * The caller must write-lock current->mm->mmap_lock. */ unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, diff --git a/mm/oom_kill.c b/mm/oom_kill.c index af3de7a92a9f..3b5d78dfebe9 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -577,8 +577,8 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) /* * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't * work on the mm anymore. The check for MMF_OOM_SKIP must run - * under mmap_sem for reading because it serializes against the - * down_write();up_write() cycle in exit_mmap(). + * under mmap_lock for reading because it serializes against the + * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap(). */ if (test_bit(MMF_OOM_SKIP, &mm->flags)) { trace_skip_task_reaping(tsk->pid); @@ -611,7 +611,7 @@ static void oom_reap_task(struct task_struct *tsk) int attempts = 0; struct mm_struct *mm = tsk->signal->oom_mm; - /* Retry the down_read_trylock(mmap_sem) a few times */ + /* Retry the mmap_read_trylock(mm) a few times */ while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm)) schedule_timeout_idle(HZ/10); @@ -629,7 +629,7 @@ done: /* * Hide this mm from OOM killer because it has been either reaped or - * somebody can't call up_write(mmap_sem). + * somebody can't call mmap_write_unlock(mm). */ set_bit(MMF_OOM_SKIP, &mm->flags); |