summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2024-08-09 13:48:50 +0200
committerAndrew Morton <akpm@linux-foundation.org>2024-09-02 05:26:06 +0200
commit310183de7bb2ed114a80d057690ddb23d0cfe814 (patch)
treea76ab4f6e564a935622087fd570c9120e83255a2 /mm/page_alloc.c
parentmm: accept memory in __alloc_pages_bulk() (diff)
downloadlinux-310183de7bb2ed114a80d057690ddb23d0cfe814.tar.xz
linux-310183de7bb2ed114a80d057690ddb23d0cfe814.zip
mm: introduce PageUnaccepted() page type
The new page type allows physical memory scanners to detect unaccepted memory and handle it accordingly. The page type is serialized with zone lock. Link: https://lkml.kernel.org/r/20240809114854.3745464-5-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Mike Rapoport (Microsoft) <rppt@kernel.org> Cc: Tom Lendacky <thomas.lendacky@amd.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9dde1b04a04e..fec7ed5a04f2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6963,6 +6963,7 @@ static bool try_to_accept_memory_one(struct zone *zone)
account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
__mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
+ __ClearPageUnaccepted(page);
spin_unlock_irqrestore(&zone->lock, flags);
accept_page(page, MAX_PAGE_ORDER);
@@ -7021,6 +7022,7 @@ static bool __free_unaccepted(struct page *page)
list_add_tail(&page->lru, &zone->unaccepted_pages);
account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
__mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
+ __SetPageUnaccepted(page);
spin_unlock_irqrestore(&zone->lock, flags);
if (first)