summaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-03-26 21:28:26 +0100
committerAndrew Morton <akpm@linux-foundation.org>2024-04-26 05:56:19 +0200
commite28833bc4ace001e77201d7b69ac389233482864 (patch)
treeec9a085caab184ec133eba26111c5f4e7217da09 /mm/huge_memory.c
parentmm: convert huge_zero_page to huge_zero_folio (diff)
downloadlinux-e28833bc4ace001e77201d7b69ac389233482864.tar.xz
linux-e28833bc4ace001e77201d7b69ac389233482864.zip
mm: convert do_huge_pmd_anonymous_page to huge_zero_folio
Use folios more widely. Link: https://lkml.kernel.org/r/20240326202833.523759-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 6d510d635b1d..098b8ad4bcc7 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -971,14 +971,14 @@ gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
}
/* Caller must hold page table lock. */
-static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
+static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
- struct page *zero_page)
+ struct folio *zero_folio)
{
pmd_t entry;
if (!pmd_none(*pmd))
return;
- entry = mk_pmd(zero_page, vma->vm_page_prot);
+ entry = mk_pmd(&zero_folio->page, vma->vm_page_prot);
entry = pmd_mkhuge(entry);
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, haddr, pmd, entry);
@@ -1002,13 +1002,14 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
!mm_forbids_zeropage(vma->vm_mm) &&
transparent_hugepage_use_zero_page()) {
pgtable_t pgtable;
- struct page *zero_page;
+ struct folio *zero_folio;
vm_fault_t ret;
+
pgtable = pte_alloc_one(vma->vm_mm);
if (unlikely(!pgtable))
return VM_FAULT_OOM;
- zero_page = mm_get_huge_zero_page(vma->vm_mm);
- if (unlikely(!zero_page)) {
+ zero_folio = mm_get_huge_zero_folio(vma->vm_mm);
+ if (unlikely(!zero_folio)) {
pte_free(vma->vm_mm, pgtable);
count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK;
@@ -1026,8 +1027,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
ret = handle_userfault(vmf, VM_UFFD_MISSING);
VM_BUG_ON(ret & VM_FAULT_FALLBACK);
} else {
- set_huge_zero_page(pgtable, vma->vm_mm, vma,
- haddr, vmf->pmd, zero_page);
+ set_huge_zero_folio(pgtable, vma->vm_mm, vma,
+ haddr, vmf->pmd, zero_folio);
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
spin_unlock(vmf->ptl);
}
@@ -1336,9 +1337,9 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
*/
if (is_huge_zero_pmd(pmd)) {
/*
- * get_huge_zero_page() will never allocate a new page here,
- * since we already have a zero page to copy. It just takes a
- * reference.
+ * mm_get_huge_zero_folio() will never allocate a new
+ * folio here, since we already have a zero page to
+ * copy. It just takes a reference.
*/
mm_get_huge_zero_folio(dst_mm);
goto out_zero_page;