diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2022-01-29 16:46:04 +0100 |
---|---|---|
committer | Matthew Wilcox (Oracle) <willy@infradead.org> | 2022-03-21 18:01:35 +0100 |
commit | 6d42dba3ccf326551f6e413fb497c31e8812b98f (patch) | |
tree | 899ffa2ccc48120e0bc2b3c7c0b5d24f6d1c6eac | |
parent | mm/migrate: Convert remove_migration_ptes() to folios (diff) | |
download | linux-6d42dba3ccf326551f6e413fb497c31e8812b98f.tar.xz linux-6d42dba3ccf326551f6e413fb497c31e8812b98f.zip |
mm/damon: Convert damon_pa_mkold() to use a folio
Ensure that we're passing the entire folio to rmap_walk().
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
-rw-r--r-- | mm/damon/paddr.c | 16 |
1 files changed, 9 insertions, 7 deletions
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index cb45d49c731d..699ebfd70ede 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -33,6 +33,7 @@ static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma, static void damon_pa_mkold(unsigned long paddr) { + struct folio *folio; struct page *page = damon_get_page(PHYS_PFN(paddr)); struct rmap_walk_control rwc = { .rmap_one = __damon_pa_mkold, @@ -42,23 +43,24 @@ static void damon_pa_mkold(unsigned long paddr) if (!page) return; + folio = page_folio(page); - if (!page_mapped(page) || !page_rmapping(page)) { - set_page_idle(page); + if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { + folio_set_idle(folio); goto out; } - need_lock = !PageAnon(page) || PageKsm(page); - if (need_lock && !trylock_page(page)) + need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); + if (need_lock && !folio_trylock(folio)) goto out; - rmap_walk(page, &rwc); + rmap_walk(&folio->page, &rwc); if (need_lock) - unlock_page(page); + folio_unlock(folio); out: - put_page(page); + folio_put(folio); } static void __damon_pa_prepare_access_check(struct damon_ctx *ctx, |