From 1c4c3b99c03d3e72ac643b01edcb83c0c0aafd46 Mon Sep 17 00:00:00 2001 From: Jiang Biao Date: Tue, 21 Aug 2018 21:53:13 -0700 Subject: mm: fix page_freeze_refs and page_unfreeze_refs in comments page_freeze_refs/page_unfreeze_refs have already been relplaced by page_ref_freeze/page_ref_unfreeze , but they are not modified in the comments. Link: http://lkml.kernel.org/r/1532590226-106038-1-git-send-email-jiang.biao2@zte.com.cn Signed-off-by: Jiang Biao Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/ksm.c | 4 ++-- mm/memory-failure.c | 2 +- mm/vmscan.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index 2621be57bd95..1bd514c9e5d0 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -703,7 +703,7 @@ again: * We cannot do anything with the page while its refcount is 0. * Usually 0 means free, or tail of a higher-order page: in which * case this node is no longer referenced, and should be freed; - * however, it might mean that the page is under page_freeze_refs(). + * however, it might mean that the page is under page_ref_freeze(). * The __remove_mapping() case is easy, again the node is now stale; * but if page is swapcache in migrate_page_move_mapping(), it might * still be our page, in which case it's essential to keep the node. @@ -714,7 +714,7 @@ again: * work here too. We have chosen the !PageSwapCache test to * optimize the common case, when the page is or is about to * be freed: PageSwapCache is cleared (under spin_lock_irq) - * in the freeze_refs section of __remove_mapping(); but Anon + * in the ref_freeze section of __remove_mapping(); but Anon * page->mapping reset to NULL later, in free_pages_prepare(). */ if (!PageSwapCache(page)) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 9d142b9b86dc..c83a1746812f 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1167,7 +1167,7 @@ int memory_failure(unsigned long pfn, int flags) * R/W the page; let's pray that the page has been * used and will be freed some time later. * In fact it's dangerous to directly bump up page count from 0, - * that may make page_freeze_refs()/page_unfreeze_refs() mismatch. + * that may make page_ref_freeze()/page_ref_unfreeze() mismatch. */ if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) { if (is_free_buddy_page(p)) { diff --git a/mm/vmscan.c b/mm/vmscan.c index 3c6e2bfee427..7e7d25504651 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -903,7 +903,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, refcount = 2; if (!page_ref_freeze(page, refcount)) goto cannot_free; - /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */ + /* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */ if (unlikely(PageDirty(page))) { page_ref_unfreeze(page, refcount); goto cannot_free; -- cgit v1.2.3