diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2024-04-02 22:12:49 +0200 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-04-26 05:56:34 +0200 |
commit | 6c977f36dc36baec8ad15faccbc6a08315eaa195 (patch) | |
tree | 9297b7f8c7bd0c7c34e6fac0a10655bfdbba7c0a /fs/proc/task_mmu.c | |
parent | proc: convert clear_refs_pte_range to use a folio (diff) | |
download | linux-6c977f36dc36baec8ad15faccbc6a08315eaa195.tar.xz linux-6c977f36dc36baec8ad15faccbc6a08315eaa195.zip |
proc: convert smaps_account() to use a folio
Replace seven calls to compound_head() with one.
Link: https://lkml.kernel.org/r/20240402201252.917342-3-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r-- | fs/proc/task_mmu.c | 16 |
1 files changed, 9 insertions, 7 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index b94101cd2706..e8d1008a838d 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -444,6 +444,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page, bool compound, bool young, bool dirty, bool locked, bool migration) { + struct folio *folio = page_folio(page); int i, nr = compound ? compound_nr(page) : 1; unsigned long size = nr * PAGE_SIZE; @@ -451,27 +452,28 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page, * First accumulate quantities that depend only on |size| and the type * of the compound page. */ - if (PageAnon(page)) { + if (folio_test_anon(folio)) { mss->anonymous += size; - if (!PageSwapBacked(page) && !dirty && !PageDirty(page)) + if (!folio_test_swapbacked(folio) && !dirty && + !folio_test_dirty(folio)) mss->lazyfree += size; } - if (PageKsm(page)) + if (folio_test_ksm(folio)) mss->ksm += size; mss->resident += size; /* Accumulate the size in pages that have been accessed. */ - if (young || page_is_young(page) || PageReferenced(page)) + if (young || folio_test_young(folio) || folio_test_referenced(folio)) mss->referenced += size; /* * Then accumulate quantities that may depend on sharing, or that may * differ page-by-page. * - * page_count(page) == 1 guarantees the page is mapped exactly once. + * refcount == 1 guarantees the page is mapped exactly once. * If any subpage of the compound page mapped with PTE it would elevate - * page_count(). + * the refcount. * * The page_mapcount() is called to get a snapshot of the mapcount. * Without holding the page lock this snapshot can be slightly wrong as @@ -480,7 +482,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page, * especially for migration entries. Treat regular migration entries * as mapcount == 1. */ - if ((page_count(page) == 1) || migration) { + if ((folio_ref_count(folio) == 1) || migration) { smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty, locked, true); return; |