diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2024-04-05 17:32:27 +0200 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-04-26 05:56:44 +0200 |
commit | 9f100e3b37590828ae23b0210ee634d14b28b8e8 (patch) | |
tree | 18da8c0270f6ef2122375fb821e1a1827fe03353 | |
parent | mm: combine __folio_put_small, __folio_put_large and __folio_put (diff) | |
download | linux-9f100e3b37590828ae23b0210ee634d14b28b8e8.tar.xz linux-9f100e3b37590828ae23b0210ee634d14b28b8e8.zip |
mm: convert free_zone_device_page to free_zone_device_folio
Both callers already have a folio; pass it in and save a few calls to
compound_head().
Link: https://lkml.kernel.org/r/20240405153228.2563754-6-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | mm/internal.h | 2 | ||||
-rw-r--r-- | mm/memremap.c | 30 | ||||
-rw-r--r-- | mm/swap.c | 4 |
3 files changed, 19 insertions, 17 deletions
diff --git a/mm/internal.h b/mm/internal.h index 8fd41f889a95..22152e0c8494 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1165,7 +1165,7 @@ void __vunmap_range_noflush(unsigned long start, unsigned long end); int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf, unsigned long addr, int page_nid, int *flags); -void free_zone_device_page(struct page *page); +void free_zone_device_folio(struct folio *folio); int migrate_device_coherent_page(struct page *page); /* diff --git a/mm/memremap.c b/mm/memremap.c index 9e9fb1972fff..e1776693e2ea 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -456,21 +456,23 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn, } EXPORT_SYMBOL_GPL(get_dev_pagemap); -void free_zone_device_page(struct page *page) +void free_zone_device_folio(struct folio *folio) { - if (WARN_ON_ONCE(!page->pgmap->ops || !page->pgmap->ops->page_free)) + if (WARN_ON_ONCE(!folio->page.pgmap->ops || + !folio->page.pgmap->ops->page_free)) return; - mem_cgroup_uncharge(page_folio(page)); + mem_cgroup_uncharge(folio); /* * Note: we don't expect anonymous compound pages yet. Once supported * and we could PTE-map them similar to THP, we'd have to clear * PG_anon_exclusive on all tail pages. */ - VM_BUG_ON_PAGE(PageAnon(page) && PageCompound(page), page); - if (PageAnon(page)) - __ClearPageAnonExclusive(page); + if (folio_test_anon(folio)) { + VM_BUG_ON_FOLIO(folio_test_large(folio), folio); + __ClearPageAnonExclusive(folio_page(folio, 0)); + } /* * When a device managed page is freed, the folio->mapping field @@ -481,20 +483,20 @@ void free_zone_device_page(struct page *page) * * For other types of ZONE_DEVICE pages, migration is either * handled differently or not done at all, so there is no need - * to clear page->mapping. + * to clear folio->mapping. */ - page->mapping = NULL; - page->pgmap->ops->page_free(page); + folio->mapping = NULL; + folio->page.pgmap->ops->page_free(folio_page(folio, 0)); - if (page->pgmap->type != MEMORY_DEVICE_PRIVATE && - page->pgmap->type != MEMORY_DEVICE_COHERENT) + if (folio->page.pgmap->type != MEMORY_DEVICE_PRIVATE && + folio->page.pgmap->type != MEMORY_DEVICE_COHERENT) /* - * Reset the page count to 1 to prepare for handing out the page + * Reset the refcount to 1 to prepare for handing out the page * again. */ - set_page_count(page, 1); + folio_set_count(folio, 1); else - put_dev_pagemap(page->pgmap); + put_dev_pagemap(folio->page.pgmap); } void zone_device_page_init(struct page *page) diff --git a/mm/swap.c b/mm/swap.c index 4f3964c983d8..8ae5cd4ed180 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -115,7 +115,7 @@ static void page_cache_release(struct folio *folio) void __folio_put(struct folio *folio) { if (unlikely(folio_is_zone_device(folio))) { - free_zone_device_page(&folio->page); + free_zone_device_folio(folio); return; } else if (folio_test_hugetlb(folio)) { free_huge_folio(folio); @@ -983,7 +983,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs) if (put_devmap_managed_page_refs(&folio->page, nr_refs)) continue; if (folio_ref_sub_and_test(folio, nr_refs)) - free_zone_device_page(&folio->page); + free_zone_device_folio(folio); continue; } |