summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-01-16 20:18:10 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-02-03 07:33:18 +0100
commitcb3184deef10fdc7658fb366189864c89ad118c9 (patch)
treeaab51a83e92f59c82890a40726adefd1ba149d8a /mm/memory.c
parentmm: add vma_alloc_zeroed_movable_folio() (diff)
downloadlinux-cb3184deef10fdc7658fb366189864c89ad118c9.tar.xz
linux-cb3184deef10fdc7658fb366189864c89ad118c9.zip
mm: convert do_anonymous_page() to use a folio
Removes six calls to compound_head(); some inline and some external. Link: https://lkml.kernel.org/r/20230116191813.2145215-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/mm/memory.c b/mm/memory.c
index b6358ffbccaa..950e5a4f2cf1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3996,7 +3996,6 @@ out_release:
static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- struct page *page;
struct folio *folio;
vm_fault_t ret = 0;
pte_t entry;
@@ -4051,19 +4050,18 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
if (!folio)
goto oom;
- page = &folio->page;
if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
goto oom_free_page;
- cgroup_throttle_swaprate(page, GFP_KERNEL);
+ cgroup_throttle_swaprate(&folio->page, GFP_KERNEL);
/*
- * The memory barrier inside __SetPageUptodate makes sure that
+ * The memory barrier inside __folio_mark_uptodate makes sure that
* preceding stores to the page contents become visible before
* the set_pte_at() write.
*/
- __SetPageUptodate(page);
+ __folio_mark_uptodate(folio);
- entry = mk_pte(page, vma->vm_page_prot);
+ entry = mk_pte(&folio->page, vma->vm_page_prot);
entry = pte_sw_mkyoung(entry);
if (vma->vm_flags & VM_WRITE)
entry = pte_mkwrite(pte_mkdirty(entry));
@@ -4082,13 +4080,13 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
/* Deliver the page fault to userland, check inside PT lock */
if (userfaultfd_missing(vma)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
- put_page(page);
+ folio_put(folio);
return handle_userfault(vmf, VM_UFFD_MISSING);
}
inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
- page_add_new_anon_rmap(page, vma, vmf->address);
- lru_cache_add_inactive_or_unevictable(page, vma);
+ folio_add_new_anon_rmap(folio, vma, vmf->address);
+ folio_add_lru_vma(folio, vma);
setpte:
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
@@ -4098,10 +4096,10 @@ unlock:
pte_unmap_unlock(vmf->pte, vmf->ptl);
return ret;
release:
- put_page(page);
+ folio_put(folio);
goto unlock;
oom_free_page:
- put_page(page);
+ folio_put(folio);
oom:
return VM_FAULT_OOM;
}