summaryrefslogtreecommitdiffstats
path: root/mm/khugepaged.c
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2020-06-09 06:33:54 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-09 18:39:14 +0200
commitc1e8d7c6a7a682e1405e3e242d32fc377fd196ff (patch)
treeef02402b77990834fbb5bdb1f146fc0393cc8987 /mm/khugepaged.c
parentmmap locking API: convert mmap_sem API comments (diff)
downloadlinux-c1e8d7c6a7a682e1405e3e242d32fc377fd196ff.tar.xz
linux-c1e8d7c6a7a682e1405e3e242d32fc377fd196ff.zip
mmap locking API: convert mmap_sem comments
Convert comments that reference mmap_sem to reference mmap_lock instead. [akpm@linux-foundation.org: fix up linux-next leftovers] [akpm@linux-foundation.org: s/lockaphore/lock/, per Vlastimil] [akpm@linux-foundation.org: more linux-next fixups, per Michel] Signed-off-by: Michel Lespinasse <walken@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Cc: Davidlohr Bueso <dbueso@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Laurent Dufour <ldufour@linux.ibm.com> Cc: Liam Howlett <Liam.Howlett@oracle.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ying Han <yinghan@google.com> Link: http://lkml.kernel.org/r/20200520052908.204642-13-walken@google.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/khugepaged.c')
-rw-r--r--mm/khugepaged.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 2c318ad1db20..b043c40a21d4 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -534,7 +534,7 @@ void __khugepaged_exit(struct mm_struct *mm)
* under mmap sem read mode). Stop here (after we
* return all pagetables will be destroyed) until
* khugepaged has finished working on the pagetables
- * under the mmap_sem.
+ * under the mmap_lock.
*/
mmap_write_lock(mm);
mmap_write_unlock(mm);
@@ -933,8 +933,8 @@ khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
#endif
/*
- * If mmap_sem temporarily dropped, revalidate vma
- * before taking mmap_sem.
+ * If mmap_lock temporarily dropped, revalidate vma
+ * before taking mmap_lock.
* Return 0 if succeeds, otherwise return none-zero
* value (scan code).
*/
@@ -966,7 +966,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
* Only done if khugepaged_scan_pmd believes it is worthwhile.
*
* Called and returns without pte mapped or spinlocks held,
- * but with mmap_sem held to protect against vma changes.
+ * but with mmap_lock held to protect against vma changes.
*/
static bool __collapse_huge_page_swapin(struct mm_struct *mm,
@@ -993,7 +993,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
swapped_in++;
ret = do_swap_page(&vmf);
- /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
+ /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
if (ret & VM_FAULT_RETRY) {
mmap_read_lock(mm);
if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
@@ -1047,9 +1047,9 @@ static void collapse_huge_page(struct mm_struct *mm,
gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
/*
- * Before allocating the hugepage, release the mmap_sem read lock.
+ * Before allocating the hugepage, release the mmap_lock read lock.
* The allocation can take potentially a long time if it involves
- * sync compaction, and we do not need to hold the mmap_sem during
+ * sync compaction, and we do not need to hold the mmap_lock during
* that. We will recheck the vma after taking it again in write mode.
*/
mmap_read_unlock(mm);
@@ -1080,8 +1080,8 @@ static void collapse_huge_page(struct mm_struct *mm,
}
/*
- * __collapse_huge_page_swapin always returns with mmap_sem locked.
- * If it fails, we release mmap_sem and jump out_nolock.
+ * __collapse_huge_page_swapin always returns with mmap_lock locked.
+ * If it fails, we release mmap_lock and jump out_nolock.
* Continuing to collapse causes inconsistency.
*/
if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
@@ -1345,7 +1345,7 @@ out_unmap:
pte_unmap_unlock(pte, ptl);
if (ret) {
node = khugepaged_find_target_node();
- /* collapse_huge_page will return with the mmap_sem released */
+ /* collapse_huge_page will return with the mmap_lock released */
collapse_huge_page(mm, address, hpage, node,
referenced, unmapped);
}
@@ -1547,7 +1547,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
* later.
*
* Not that vma->anon_vma check is racy: it can be set up after
- * the check but before we took mmap_sem by the fault path.
+ * the check but before we took mmap_lock by the fault path.
* But page lock would prevent establishing any new ptes of the
* page, so we are safe.
*
@@ -1567,10 +1567,10 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
if (!pmd)
continue;
/*
- * We need exclusive mmap_sem to retract page table.
+ * We need exclusive mmap_lock to retract page table.
*
* We use trylock due to lock inversion: we need to acquire
- * mmap_sem while holding page lock. Fault path does it in
+ * mmap_lock while holding page lock. Fault path does it in
* reverse order. Trylock is a way to avoid deadlock.
*/
if (mmap_write_trylock(vma->vm_mm)) {
@@ -2058,7 +2058,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
*/
vma = NULL;
if (unlikely(!mmap_read_trylock(mm)))
- goto breakouterloop_mmap_sem;
+ goto breakouterloop_mmap_lock;
if (likely(!khugepaged_test_exit(mm)))
vma = find_vma(mm, khugepaged_scan.address);
@@ -2115,15 +2115,15 @@ skip:
khugepaged_scan.address += HPAGE_PMD_SIZE;
progress += HPAGE_PMD_NR;
if (ret)
- /* we released mmap_sem so break loop */
- goto breakouterloop_mmap_sem;
+ /* we released mmap_lock so break loop */
+ goto breakouterloop_mmap_lock;
if (progress >= pages)
goto breakouterloop;
}
}
breakouterloop:
mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
-breakouterloop_mmap_sem:
+breakouterloop_mmap_lock:
spin_lock(&khugepaged_mm_lock);
VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);