summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorKefeng Wang <wangkefeng.wang@huawei.com>2024-06-18 11:12:39 +0200
committerAndrew Morton <akpm@linux-foundation.org>2024-07-04 04:30:20 +0200
commit78fefd04c123493bbf28434768fa577b2153c79b (patch)
tree3c53f2c78b94554cbda1394c3ec0f90729491e79 /mm/memory.c
parentmm/page_alloc: reword the comment of buddy_merge_likely() (diff)
downloadlinux-78fefd04c123493bbf28434768fa577b2153c79b.tar.xz
linux-78fefd04c123493bbf28434768fa577b2153c79b.zip
mm: memory: convert clear_huge_page() to folio_zero_user()
Patch series "mm: improve clear and copy user folio", v2. Some folio conversions. An improvement is to move address alignment into the caller as it is only needed if we don't know which address will be accessed when clearing/copying user folios. This patch (of 4): Replace clear_huge_page() with folio_zero_user(), and take a folio instead of a page. Directly get number of pages by folio_nr_pages() to remove pages_per_huge_page argument, furthermore, move the address alignment from folio_zero_user() to the callers since the alignment is only needed when we don't know which address will be accessed. Link: https://lkml.kernel.org/r/20240618091242.2140164-1-wangkefeng.wang@huawei.com Link: https://lkml.kernel.org/r/20240618091242.2140164-2-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: David Hildenbrand <david@redhat.com> Cc: "Huang, Ying" <ying.huang@intel.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c34
1 files changed, 16 insertions, 18 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 97ddba866e43..cb26f8713db4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4488,7 +4488,7 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
goto next;
}
folio_throttle_swaprate(folio, gfp);
- clear_huge_page(&folio->page, vmf->address, 1 << order);
+ folio_zero_user(folio, vmf->address);
return folio;
}
next:
@@ -6441,41 +6441,39 @@ static inline int process_huge_page(
return 0;
}
-static void clear_gigantic_page(struct page *page,
- unsigned long addr,
+static void clear_gigantic_page(struct folio *folio, unsigned long addr,
unsigned int pages_per_huge_page)
{
int i;
- struct page *p;
might_sleep();
for (i = 0; i < pages_per_huge_page; i++) {
- p = nth_page(page, i);
cond_resched();
- clear_user_highpage(p, addr + i * PAGE_SIZE);
+ clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE);
}
}
static int clear_subpage(unsigned long addr, int idx, void *arg)
{
- struct page *page = arg;
+ struct folio *folio = arg;
- clear_user_highpage(nth_page(page, idx), addr);
+ clear_user_highpage(folio_page(folio, idx), addr);
return 0;
}
-void clear_huge_page(struct page *page,
- unsigned long addr_hint, unsigned int pages_per_huge_page)
+/**
+ * folio_zero_user - Zero a folio which will be mapped to userspace.
+ * @folio: The folio to zero.
+ * @addr_hint: The address will be accessed or the base address if uncelar.
+ */
+void folio_zero_user(struct folio *folio, unsigned long addr_hint)
{
- unsigned long addr = addr_hint &
- ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
+ unsigned int nr_pages = folio_nr_pages(folio);
- if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
- clear_gigantic_page(page, addr, pages_per_huge_page);
- return;
- }
-
- process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
+ if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
+ clear_gigantic_page(folio, addr_hint, nr_pages);
+ else
+ process_huge_page(addr_hint, nr_pages, clear_subpage, folio);
}
static int copy_user_gigantic_page(struct folio *dst, struct folio *src,