summaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-01-22 10:28:23 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-22 10:28:23 +0100
commit1c52283265a462a100ae63ddf58b4e5884acde86 (patch)
treec0d3fa7a02fc0111bc7e56aa1be88e9b92795704 /mm/migrate.c
parentMerge tag '5.17-rc-part2-smb3-fixes' of git://git.samba.org/sfrench/cifs-2.6 (diff)
parentmm: hide the FRONTSWAP Kconfig symbol (diff)
downloadlinux-1c52283265a462a100ae63ddf58b4e5884acde86.tar.xz
linux-1c52283265a462a100ae63ddf58b4e5884acde86.zip
Merge branch 'akpm' (patches from Andrew)
Merge yet more updates from Andrew Morton: "This is the post-linux-next queue. Material which was based on or dependent upon material which was in -next. 69 patches. Subsystems affected by this patch series: mm (migration and zsmalloc), sysctl, proc, and lib" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (69 commits) mm: hide the FRONTSWAP Kconfig symbol frontswap: remove support for multiple ops mm: mark swap_lock and swap_active_head static frontswap: simplify frontswap_register_ops frontswap: remove frontswap_test mm: simplify try_to_unuse frontswap: remove the frontswap exports frontswap: simplify frontswap_init frontswap: remove frontswap_curr_pages frontswap: remove frontswap_shrink frontswap: remove frontswap_tmem_exclusive_gets frontswap: remove frontswap_writethrough mm: remove cleancache lib/stackdepot: always do filter_irq_stacks() in stack_depot_save() lib/stackdepot: allow optional init and stack_table allocation by kvmalloc() proc: remove PDE_DATA() completely fs: proc: store PDE()->data into inode->i_private zsmalloc: replace get_cpu_var with local_lock zsmalloc: replace per zpage lock with pool->migrate_lock locking/rwlocks: introduce write_lock_nested ...
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c38
1 files changed, 4 insertions, 34 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 18ce840914f0..c7da064b4781 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -291,7 +291,6 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
{
pte_t pte;
swp_entry_t entry;
- struct folio *folio;
spin_lock(ptl);
pte = *ptep;
@@ -302,17 +301,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
if (!is_migration_entry(entry))
goto out;
- folio = page_folio(pfn_swap_entry_to_page(entry));
-
- /*
- * Once page cache replacement of page migration started, page_count
- * is zero; but we must not call folio_put_wait_locked() without
- * a ref. Use folio_try_get(), and just fault again if it fails.
- */
- if (!folio_try_get(folio))
- goto out;
- pte_unmap_unlock(ptep, ptl);
- folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
+ migration_entry_wait_on_locked(entry, ptep, ptl);
return;
out:
pte_unmap_unlock(ptep, ptl);
@@ -337,16 +326,11 @@ void migration_entry_wait_huge(struct vm_area_struct *vma,
void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
{
spinlock_t *ptl;
- struct folio *folio;
ptl = pmd_lock(mm, pmd);
if (!is_pmd_migration_entry(*pmd))
goto unlock;
- folio = page_folio(pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd)));
- if (!folio_try_get(folio))
- goto unlock;
- spin_unlock(ptl);
- folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
+ migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl);
return;
unlock:
spin_unlock(ptl);
@@ -2431,22 +2415,8 @@ static bool migrate_vma_check_page(struct page *page)
return false;
/* Page from ZONE_DEVICE have one extra reference */
- if (is_zone_device_page(page)) {
- /*
- * Private page can never be pin as they have no valid pte and
- * GUP will fail for those. Yet if there is a pending migration
- * a thread might try to wait on the pte migration entry and
- * will bump the page reference count. Sadly there is no way to
- * differentiate a regular pin from migration wait. Hence to
- * avoid 2 racing thread trying to migrate back to CPU to enter
- * infinite loop (one stopping migration because the other is
- * waiting on pte migration entry). We always return true here.
- *
- * FIXME proper solution is to rework migration_entry_wait() so
- * it does not need to take a reference on page.
- */
- return is_device_private_page(page);
- }
+ if (is_zone_device_page(page))
+ extra++;
/* For file back page */
if (page_mapping(page))