diff options
author | Kefeng Wang <wangkefeng.wang@huawei.com> | 2022-12-09 03:06:18 +0100 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-01-19 02:12:42 +0100 |
commit | 6a6fe9ebd571a4092b7d5c1f11e4e1e15d296fa5 (patch) | |
tree | 76ec781c239ef810ee3ff6a0ce8440ddcf9ffd63 | |
parent | mm: huge_memory: convert madvise_free_huge_pmd to use a folio (diff) | |
download | linux-6a6fe9ebd571a4092b7d5c1f11e4e1e15d296fa5.tar.xz linux-6a6fe9ebd571a4092b7d5c1f11e4e1e15d296fa5.zip |
mm: swap: convert mark_page_lazyfree() to folio_mark_lazyfree()
mark_page_lazyfree() and the callers are converted to use folio, this
rename and make it to take in a folio argument instead of calling
page_folio().
Link: https://lkml.kernel.org/r/20221209020618.190306-1-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | include/linux/swap.h | 2 | ||||
-rw-r--r-- | mm/huge_memory.c | 2 | ||||
-rw-r--r-- | mm/madvise.c | 2 | ||||
-rw-r--r-- | mm/swap.c | 12 |
4 files changed, 8 insertions, 10 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h index 2787b84eaf12..93f1cebd8545 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -402,7 +402,7 @@ extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_cpu_zone(struct zone *zone); extern void lru_add_drain_all(void); extern void deactivate_page(struct page *page); -extern void mark_page_lazyfree(struct page *page); +void folio_mark_lazyfree(struct folio *folio); extern void swap_setup(void); extern void lru_cache_add_inactive_or_unevictable(struct page *page, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3de266e0aeb2..266c4b557946 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1660,7 +1660,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, tlb_remove_pmd_tlb_entry(tlb, pmd, addr); } - mark_page_lazyfree(&folio->page); + folio_mark_lazyfree(folio); ret = true; out: spin_unlock(ptl); diff --git a/mm/madvise.c b/mm/madvise.c index b6ea204d4e23..479d9a32e44a 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -728,7 +728,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, set_pte_at(mm, addr, pte, ptent); tlb_remove_tlb_entry(tlb, pte, addr); } - mark_page_lazyfree(&folio->page); + folio_mark_lazyfree(folio); } out: if (nr_swap) { diff --git a/mm/swap.c b/mm/swap.c index 70e2063ef43a..5e5eba186930 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -757,16 +757,14 @@ void deactivate_page(struct page *page) } /** - * mark_page_lazyfree - make an anon page lazyfree - * @page: page to deactivate + * folio_mark_lazyfree - make an anon folio lazyfree + * @folio: folio to deactivate * - * mark_page_lazyfree() moves @page to the inactive file list. - * This is done to accelerate the reclaim of @page. + * folio_mark_lazyfree() moves @folio to the inactive file list. + * This is done to accelerate the reclaim of @folio. */ -void mark_page_lazyfree(struct page *page) +void folio_mark_lazyfree(struct folio *folio) { - struct folio *folio = page_folio(page); - if (folio_test_lru(folio) && folio_test_anon(folio) && folio_test_swapbacked(folio) && !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) { |