summaryrefslogtreecommitdiffstats
path: root/mm/migrate_device.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/migrate_device.c')
-rw-r--r--mm/migrate_device.c64
1 files changed, 34 insertions, 30 deletions
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 8ac1f79f754a..b6c27c76e1a0 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -107,6 +107,7 @@ again:
for (; addr < end; addr += PAGE_SIZE, ptep++) {
unsigned long mpfn = 0, pfn;
+ struct folio *folio;
struct page *page;
swp_entry_t entry;
pte_t pte;
@@ -168,41 +169,43 @@ again:
}
/*
- * By getting a reference on the page we pin it and that blocks
+ * By getting a reference on the folio we pin it and that blocks
* any kind of migration. Side effect is that it "freezes" the
* pte.
*
- * We drop this reference after isolating the page from the lru
- * for non device page (device page are not on the lru and thus
+ * We drop this reference after isolating the folio from the lru
+ * for non device folio (device folio are not on the lru and thus
* can't be dropped from it).
*/
- get_page(page);
+ folio = page_folio(page);
+ folio_get(folio);
/*
- * We rely on trylock_page() to avoid deadlock between
+ * We rely on folio_trylock() to avoid deadlock between
* concurrent migrations where each is waiting on the others
- * page lock. If we can't immediately lock the page we fail this
+ * folio lock. If we can't immediately lock the folio we fail this
* migration as it is only best effort anyway.
*
- * If we can lock the page it's safe to set up a migration entry
- * now. In the common case where the page is mapped once in a
+ * If we can lock the folio it's safe to set up a migration entry
+ * now. In the common case where the folio is mapped once in a
* single process setting up the migration entry now is an
* optimisation to avoid walking the rmap later with
* try_to_migrate().
*/
- if (trylock_page(page)) {
+ if (folio_trylock(folio)) {
bool anon_exclusive;
pte_t swp_pte;
flush_cache_page(vma, addr, pte_pfn(pte));
- anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
+ anon_exclusive = folio_test_anon(folio) &&
+ PageAnonExclusive(page);
if (anon_exclusive) {
pte = ptep_clear_flush(vma, addr, ptep);
- if (page_try_share_anon_rmap(page)) {
+ if (folio_try_share_anon_rmap_pte(folio, page)) {
set_pte_at(mm, addr, ptep, pte);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
mpfn = 0;
goto next;
}
@@ -214,7 +217,7 @@ again:
/* Set the dirty flag on the folio now the pte is gone. */
if (pte_dirty(pte))
- folio_mark_dirty(page_folio(page));
+ folio_mark_dirty(folio);
/* Setup special migration page table entry */
if (mpfn & MIGRATE_PFN_WRITE)
@@ -248,16 +251,16 @@ again:
/*
* This is like regular unmap: we remove the rmap and
- * drop page refcount. Page won't be freed, as we took
- * a reference just above.
+ * drop the folio refcount. The folio won't be freed, as
+ * we took a reference just above.
*/
- page_remove_rmap(page, vma, false);
- put_page(page);
+ folio_remove_rmap_pte(folio, page, vma);
+ folio_put(folio);
if (pte_present(pte))
unmapped++;
} else {
- put_page(page);
+ folio_put(folio);
mpfn = 0;
}
@@ -564,6 +567,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
struct page *page,
unsigned long *src)
{
+ struct folio *folio = page_folio(page);
struct vm_area_struct *vma = migrate->vma;
struct mm_struct *mm = vma->vm_mm;
bool flush = false;
@@ -596,17 +600,17 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
goto abort;
if (unlikely(anon_vma_prepare(vma)))
goto abort;
- if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
+ if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
goto abort;
/*
- * The memory barrier inside __SetPageUptodate makes sure that
- * preceding stores to the page contents become visible before
+ * The memory barrier inside __folio_mark_uptodate makes sure that
+ * preceding stores to the folio contents become visible before
* the set_pte_at() write.
*/
- __SetPageUptodate(page);
+ __folio_mark_uptodate(folio);
- if (is_device_private_page(page)) {
+ if (folio_is_device_private(folio)) {
swp_entry_t swp_entry;
if (vma->vm_flags & VM_WRITE)
@@ -617,8 +621,8 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
page_to_pfn(page));
entry = swp_entry_to_pte(swp_entry);
} else {
- if (is_zone_device_page(page) &&
- !is_device_coherent_page(page)) {
+ if (folio_is_zone_device(folio) &&
+ !folio_is_device_coherent(folio)) {
pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
goto abort;
}
@@ -652,10 +656,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
goto unlock_abort;
inc_mm_counter(mm, MM_ANONPAGES);
- page_add_new_anon_rmap(page, vma, addr);
- if (!is_zone_device_page(page))
- lru_cache_add_inactive_or_unevictable(page, vma);
- get_page(page);
+ folio_add_new_anon_rmap(folio, vma, addr);
+ if (!folio_is_zone_device(folio))
+ folio_add_lru_vma(folio, vma);
+ folio_get(folio);
if (flush) {
flush_cache_page(vma, addr, pte_pfn(orig_pte));