summaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c29
1 files changed, 10 insertions, 19 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index cf25b00f03c8..7079e6b7dbe7 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -291,7 +291,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
{
pte_t pte;
swp_entry_t entry;
- struct page *page;
+ struct folio *folio;
spin_lock(ptl);
pte = *ptep;
@@ -302,18 +302,17 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
if (!is_migration_entry(entry))
goto out;
- page = pfn_swap_entry_to_page(entry);
- page = compound_head(page);
+ folio = page_folio(pfn_swap_entry_to_page(entry));
/*
* Once page cache replacement of page migration started, page_count
- * is zero; but we must not call put_and_wait_on_page_locked() without
- * a ref. Use get_page_unless_zero(), and just fault again if it fails.
+ * is zero; but we must not call folio_put_wait_locked() without
+ * a ref. Use folio_try_get(), and just fault again if it fails.
*/
- if (!get_page_unless_zero(page))
+ if (!folio_try_get(folio))
goto out;
pte_unmap_unlock(ptep, ptl);
- put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
+ folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
return;
out:
pte_unmap_unlock(ptep, ptl);
@@ -338,16 +337,16 @@ void migration_entry_wait_huge(struct vm_area_struct *vma,
void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
{
spinlock_t *ptl;
- struct page *page;
+ struct folio *folio;
ptl = pmd_lock(mm, pmd);
if (!is_pmd_migration_entry(*pmd))
goto unlock;
- page = pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd));
- if (!get_page_unless_zero(page))
+ folio = page_folio(pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd)));
+ if (!folio_try_get(folio))
goto unlock;
spin_unlock(ptl);
- put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
+ folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
return;
unlock:
spin_unlock(ptl);
@@ -434,14 +433,6 @@ int folio_migrate_mapping(struct address_space *mapping,
}
xas_store(&xas, newfolio);
- if (nr > 1) {
- int i;
-
- for (i = 1; i < nr; i++) {
- xas_next(&xas);
- xas_store(&xas, newfolio);
- }
- }
/*
* Drop cache reference from old page by unfreezing