summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-09-02 21:46:37 +0200
committerAndrew Morton <akpm@linux-foundation.org>2022-10-03 23:02:52 +0200
commite4a2ed94908cc0104b8826ed8d831661ed1c3ea1 (patch)
treec65dbc24771ab1b58322ff29b017c77630469588
parentswap: convert swap_writepage() to use a folio (diff)
downloadlinux-e4a2ed94908cc0104b8826ed8d831661ed1c3ea1.tar.xz
linux-e4a2ed94908cc0104b8826ed8d831661ed1c3ea1.zip
mm: convert do_wp_page() to use a folio
Saves many calls to compound_head(). Link: https://lkml.kernel.org/r/20220902194653.1739778-42-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/memory.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 0018df3f0cc2..2f1397b7c77d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3368,6 +3368,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
{
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
struct vm_area_struct *vma = vmf->vma;
+ struct folio *folio;
VM_BUG_ON(unshare && (vmf->flags & FAULT_FLAG_WRITE));
VM_BUG_ON(!unshare && !(vmf->flags & FAULT_FLAG_WRITE));
@@ -3414,48 +3415,47 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
* Take out anonymous pages first, anonymous shared vmas are
* not dirty accountable.
*/
- if (PageAnon(vmf->page)) {
- struct page *page = vmf->page;
-
+ folio = page_folio(vmf->page);
+ if (folio_test_anon(folio)) {
/*
* If the page is exclusive to this process we must reuse the
* page without further checks.
*/
- if (PageAnonExclusive(page))
+ if (PageAnonExclusive(vmf->page))
goto reuse;
/*
- * We have to verify under page lock: these early checks are
- * just an optimization to avoid locking the page and freeing
+ * We have to verify under folio lock: these early checks are
+ * just an optimization to avoid locking the folio and freeing
* the swapcache if there is little hope that we can reuse.
*
- * PageKsm() doesn't necessarily raise the page refcount.
+ * KSM doesn't necessarily raise the folio refcount.
*/
- if (PageKsm(page) || page_count(page) > 3)
+ if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
goto copy;
- if (!PageLRU(page))
+ if (!folio_test_lru(folio))
/*
* Note: We cannot easily detect+handle references from
- * remote LRU pagevecs or references to PageLRU() pages.
+ * remote LRU pagevecs or references to LRU folios.
*/
lru_add_drain();
- if (page_count(page) > 1 + PageSwapCache(page))
+ if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
goto copy;
- if (!trylock_page(page))
+ if (!folio_trylock(folio))
goto copy;
- if (PageSwapCache(page))
- try_to_free_swap(page);
- if (PageKsm(page) || page_count(page) != 1) {
- unlock_page(page);
+ if (folio_test_swapcache(folio))
+ folio_free_swap(folio);
+ if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
+ folio_unlock(folio);
goto copy;
}
/*
- * Ok, we've got the only page reference from our mapping
- * and the page is locked, it's dark out, and we're wearing
+ * Ok, we've got the only folio reference from our mapping
+ * and the folio is locked, it's dark out, and we're wearing
* sunglasses. Hit it.
*/
- page_move_anon_rmap(page, vma);
- unlock_page(page);
+ page_move_anon_rmap(vmf->page, vma);
+ folio_unlock(folio);
reuse:
if (unlikely(unshare)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);