diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/gup.c | 7 | ||||
-rw-r--r-- | mm/huge_memory.c | 3 | ||||
-rw-r--r-- | mm/ksm.c | 1 | ||||
-rw-r--r-- | mm/rmap.c | 11 |
4 files changed, 17 insertions, 5 deletions
@@ -158,6 +158,13 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags) else folio_ref_add(folio, refs * (GUP_PIN_COUNTING_BIAS - 1)); + /* + * Adjust the pincount before re-checking the PTE for changes. + * This is essentially a smp_mb() and is paired with a memory + * barrier in page_try_share_anon_rmap(). + */ + smp_mb__after_atomic(); + node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); return folio; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0405d7375bce..2f18896c8f9a 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2148,6 +2148,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, * * In case we cannot clear PageAnonExclusive(), split the PMD * only and let try_to_migrate_one() fail later. + * + * See page_try_share_anon_rmap(): invalidate PMD first. */ anon_exclusive = PageAnon(page) && PageAnonExclusive(page); if (freeze && anon_exclusive && page_try_share_anon_rmap(page)) @@ -3181,6 +3183,7 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); pmdval = pmdp_invalidate(vma, address, pvmw->pmd); + /* See page_try_share_anon_rmap(): invalidate PMD first. */ anon_exclusive = PageAnon(page) && PageAnonExclusive(page); if (anon_exclusive && page_try_share_anon_rmap(page)) { set_pmd_at(mm, address, pvmw->pmd, pmdval); @@ -1095,6 +1095,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, goto out_unlock; } + /* See page_try_share_anon_rmap(): clear PTE first. */ if (anon_exclusive && page_try_share_anon_rmap(page)) { set_pte_at(mm, pvmw.address, pvmw.pte, entry); goto out_unlock; diff --git a/mm/rmap.c b/mm/rmap.c index af775855e58f..6781f693df50 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1574,11 +1574,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); } else { flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); - /* - * Nuke the page table entry. When having to clear - * PageAnonExclusive(), we always have to flush. - */ - if (should_defer_flush(mm, flags) && !anon_exclusive) { + /* Nuke the page table entry. */ + if (should_defer_flush(mm, flags)) { /* * We clear the PTE but do not flush so potentially * a remote CPU could still be writing to the folio. @@ -1709,6 +1706,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, page_vma_mapped_walk_done(&pvmw); break; } + + /* See page_try_share_anon_rmap(): clear PTE first. */ if (anon_exclusive && page_try_share_anon_rmap(subpage)) { swap_free(entry); @@ -2040,6 +2039,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, } VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && !anon_exclusive, subpage); + + /* See page_try_share_anon_rmap(): clear PTE first. */ if (anon_exclusive && page_try_share_anon_rmap(subpage)) { if (folio_test_hugetlb(folio)) |