summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c84
1 files changed, 38 insertions, 46 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 733732e7e0ba..a01baf0454f8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -727,7 +727,7 @@ buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
unsigned long higher_page_pfn;
struct page *higher_page;
- if (order >= MAX_ORDER - 1)
+ if (order >= MAX_PAGE_ORDER - 1)
return false;
higher_page_pfn = buddy_pfn & pfn;
@@ -782,7 +782,7 @@ static inline void __free_one_page(struct page *page,
VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
VM_BUG_ON_PAGE(bad_range(zone, page), page);
- while (order < MAX_ORDER) {
+ while (order < MAX_PAGE_ORDER) {
if (compaction_capture(capc, page, order, migratetype)) {
__mod_zone_freepage_state(zone, -(1 << order),
migratetype);
@@ -1059,7 +1059,7 @@ static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
return deferred_pages_enabled();
- return page_kasan_tag(page) == 0xff;
+ return page_kasan_tag(page) == KASAN_TAG_KERNEL;
}
static void kernel_init_pages(struct page *page, int numpages)
@@ -1086,13 +1086,11 @@ static __always_inline bool free_pages_prepare(struct page *page,
trace_mm_page_free(page, order);
kmsan_free_page(page, order);
+ if (memcg_kmem_online() && PageMemcgKmem(page))
+ __memcg_kmem_uncharge_page(page, order);
+
if (unlikely(PageHWPoison(page)) && !order) {
- /*
- * Do not let hwpoison pages hit pcplists/buddy
- * Untie memcg state and reset page's owner
- */
- if (memcg_kmem_online() && PageMemcgKmem(page))
- __memcg_kmem_uncharge_page(page, order);
+ /* Do not let hwpoison pages hit pcplists/buddy */
reset_page_owner(page, order);
page_table_check_free(page, order);
return false;
@@ -1123,8 +1121,6 @@ static __always_inline bool free_pages_prepare(struct page *page,
}
if (PageMappingFlags(page))
page->mapping = NULL;
- if (memcg_kmem_online() && PageMemcgKmem(page))
- __memcg_kmem_uncharge_page(page, order);
if (is_check_pages_enabled()) {
if (free_page_is_bad(page))
bad++;
@@ -1259,7 +1255,6 @@ static void free_one_page(struct zone *zone,
static void __free_pages_ok(struct page *page, unsigned int order,
fpi_t fpi_flags)
{
- unsigned long flags;
int migratetype;
unsigned long pfn = page_to_pfn(page);
struct zone *zone = page_zone(page);
@@ -1274,13 +1269,7 @@ static void __free_pages_ok(struct page *page, unsigned int order,
*/
migratetype = get_pfnblock_migratetype(page, pfn);
- spin_lock_irqsave(&zone->lock, flags);
- if (unlikely(has_isolate_pageblock(zone) ||
- is_migrate_isolate(migratetype))) {
- migratetype = get_pfnblock_migratetype(page, pfn);
- }
- __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
- spin_unlock_irqrestore(&zone->lock, flags);
+ free_one_page(zone, page, pfn, order, migratetype, fpi_flags);
__count_vm_events(PGFREE, 1 << order);
}
@@ -1308,7 +1297,7 @@ void __free_pages_core(struct page *page, unsigned int order)
atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
if (page_contains_unaccepted(page, order)) {
- if (order == MAX_ORDER && __free_unaccepted(page))
+ if (order == MAX_PAGE_ORDER && __free_unaccepted(page))
return;
accept_page(page, order);
@@ -1338,7 +1327,7 @@ void __free_pages_core(struct page *page, unsigned int order)
*
* Note: the function may return non-NULL struct page even for a page block
* which contains a memory hole (i.e. there is no physical memory for a subset
- * of the pfn range). For example, if the pageblock order is MAX_ORDER, which
+ * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which
* will fall into 2 sub-sections, and the end pfn of the pageblock may be hole
* even though the start pfn is online and valid. This should be safe most of
* the time because struct pages are still initialized via init_unavailable_range()
@@ -1571,7 +1560,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
struct page *page;
/* Find a page of the appropriate size in the preferred list */
- for (current_order = order; current_order <= MAX_ORDER; ++current_order) {
+ for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) {
area = &(zone->free_area[current_order]);
page = get_page_from_free_area(area, migratetype);
if (!page)
@@ -1884,10 +1873,14 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
unsigned long max_managed, flags;
/*
- * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
+ * The number reserved as: minimum is 1 pageblock, maximum is
+ * roughly 1% of a zone. But if 1% of a zone falls below a
+ * pageblock size, then don't reserve any pageblocks.
* Check is race-prone but harmless.
*/
- max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
+ if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages)
+ return;
+ max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages);
if (zone->nr_reserved_highatomic >= max_managed)
return;
@@ -1941,7 +1934,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
continue;
spin_lock_irqsave(&zone->lock, flags);
- for (order = 0; order <= MAX_ORDER; order++) {
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
struct free_area *area = &(zone->free_area[order]);
page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
@@ -2025,7 +2018,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
* approximates finding the pageblock with the most free pages, which
* would be too costly to do exactly.
*/
- for (current_order = MAX_ORDER; current_order >= min_order;
+ for (current_order = MAX_PAGE_ORDER; current_order >= min_order;
--current_order) {
area = &(zone->free_area[current_order]);
fallback_mt = find_suitable_fallback(area, current_order,
@@ -2051,8 +2044,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
return false;
find_smallest:
- for (current_order = order; current_order <= MAX_ORDER;
- current_order++) {
+ for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
area = &(zone->free_area[current_order]);
fallback_mt = find_suitable_fallback(area, current_order,
start_migratetype, false, &can_steal);
@@ -2064,7 +2056,7 @@ find_smallest:
* This should not happen - we already found a suitable fallback
* when looking for the largest page.
*/
- VM_BUG_ON(current_order > MAX_ORDER);
+ VM_BUG_ON(current_order > MAX_PAGE_ORDER);
do_steal:
page = get_page_from_free_area(area, fallback_mt);
@@ -3007,7 +2999,7 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
return true;
/* For a high-order request, check at least one suitable page is free */
- for (o = order; o <= MAX_ORDER; o++) {
+ for (o = order; o < NR_PAGE_ORDERS; o++) {
struct free_area *area = &z->free_area[o];
int mt;
@@ -3951,14 +3943,9 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
else
(*no_progress_loops)++;
- /*
- * Make sure we converge to OOM if we cannot make any progress
- * several times in the row.
- */
- if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
- /* Before OOM, exhaust highatomic_reserve */
- return unreserve_highatomic_pageblock(ac, true);
- }
+ if (*no_progress_loops > MAX_RECLAIM_RETRIES)
+ goto out;
+
/*
* Keep reclaiming pages while there is a chance this will lead
@@ -4001,6 +3988,11 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
schedule_timeout_uninterruptible(1);
else
cond_resched();
+out:
+ /* Before OOM, exhaust highatomic_reserve */
+ if (!ret)
+ return unreserve_highatomic_pageblock(ac, true);
+
return ret;
}
@@ -4541,7 +4533,7 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
* There are several places where we assume that the order value is sane
* so bail out early if the request is out of bound.
*/
- if (WARN_ON_ONCE_GFP(order > MAX_ORDER, gfp))
+ if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp))
return NULL;
gfp &= gfp_allowed_mask;
@@ -4823,7 +4815,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
* minimum number of pages to satisfy the request. alloc_pages() can only
* allocate memory in power-of-two pages.
*
- * This function is also limited by MAX_ORDER.
+ * This function is also limited by MAX_PAGE_ORDER.
*
* Memory allocated by this function must be released by free_pages_exact().
*
@@ -6381,7 +6373,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
order = 0;
outer_start = start;
while (!PageBuddy(pfn_to_page(outer_start))) {
- if (++order > MAX_ORDER) {
+ if (++order > MAX_PAGE_ORDER) {
outer_start = start;
break;
}
@@ -6635,7 +6627,7 @@ bool is_free_buddy_page(struct page *page)
unsigned long pfn = page_to_pfn(page);
unsigned int order;
- for (order = 0; order <= MAX_ORDER; order++) {
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
struct page *page_head = page - (pfn & ((1 << order) - 1));
if (PageBuddy(page_head) &&
@@ -6643,7 +6635,7 @@ bool is_free_buddy_page(struct page *page)
break;
}
- return order <= MAX_ORDER;
+ return order <= MAX_PAGE_ORDER;
}
EXPORT_SYMBOL(is_free_buddy_page);
@@ -6690,7 +6682,7 @@ bool take_page_off_buddy(struct page *page)
bool ret = false;
spin_lock_irqsave(&zone->lock, flags);
- for (order = 0; order <= MAX_ORDER; order++) {
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
struct page *page_head = page - (pfn & ((1 << order) - 1));
int page_order = buddy_order(page_head);
@@ -6815,9 +6807,9 @@ static bool try_to_accept_memory_one(struct zone *zone)
__mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
spin_unlock_irqrestore(&zone->lock, flags);
- accept_page(page, MAX_ORDER);
+ accept_page(page, MAX_PAGE_ORDER);
- __free_pages_ok(page, MAX_ORDER, FPI_TO_TAIL);
+ __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
if (last)
static_branch_dec(&zones_with_unaccepted_pages);