summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2024-04-26 05:56:04 +0200
committerAndrew Morton <akpm@linux-foundation.org>2024-04-26 05:56:04 +0200
commite1f42a577f63647dadf1abe4583053c03d6be045 (patch)
treebc50591da1476bc88471a4329398c659da8b625e /mm
parentmm: page_alloc: consolidate free page accounting (diff)
downloadlinux-e1f42a577f63647dadf1abe4583053c03d6be045.tar.xz
linux-e1f42a577f63647dadf1abe4583053c03d6be045.zip
mm: page_alloc: change move_freepages() to __move_freepages_block()
The function is now supposed to be called only on a single pageblock and checks start_pfn and end_pfn accordingly. Rename it to make this more obvious and drop the end_pfn parameter which can be determined trivially and none of the callers use it for anything else. Also make the (now internal) end_pfn exclusive, which is more common. Link: https://lkml.kernel.org/r/81b1d642-2ec0-49f5-89fc-19a3828419ff@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Zi Yan <ziy@nvidia.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: David Hildenbrand <david@redhat.com> Cc: "Huang, Ying" <ying.huang@intel.com> Cc: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c43
1 files changed, 20 insertions, 23 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b3b98a91d6a7..489dd74d2232 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1560,18 +1560,18 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
* Change the type of a block and move all its free pages to that
* type's freelist.
*/
-static int move_freepages(struct zone *zone, unsigned long start_pfn,
- unsigned long end_pfn, int old_mt, int new_mt)
+static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
+ int old_mt, int new_mt)
{
struct page *page;
- unsigned long pfn;
+ unsigned long pfn, end_pfn;
unsigned int order;
int pages_moved = 0;
VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1));
- VM_WARN_ON(start_pfn + pageblock_nr_pages - 1 != end_pfn);
+ end_pfn = pageblock_end_pfn(start_pfn);
- for (pfn = start_pfn; pfn <= end_pfn;) {
+ for (pfn = start_pfn; pfn < end_pfn;) {
page = pfn_to_page(pfn);
if (!PageBuddy(page)) {
pfn++;
@@ -1597,14 +1597,13 @@ static int move_freepages(struct zone *zone, unsigned long start_pfn,
static bool prep_move_freepages_block(struct zone *zone, struct page *page,
unsigned long *start_pfn,
- unsigned long *end_pfn,
int *num_free, int *num_movable)
{
unsigned long pfn, start, end;
pfn = page_to_pfn(page);
start = pageblock_start_pfn(pfn);
- end = pageblock_end_pfn(pfn) - 1;
+ end = pageblock_end_pfn(pfn);
/*
* The caller only has the lock for @zone, don't touch ranges
@@ -1615,16 +1614,15 @@ static bool prep_move_freepages_block(struct zone *zone, struct page *page,
*/
if (!zone_spans_pfn(zone, start))
return false;
- if (!zone_spans_pfn(zone, end))
+ if (!zone_spans_pfn(zone, end - 1))
return false;
*start_pfn = start;
- *end_pfn = end;
if (num_free) {
*num_free = 0;
*num_movable = 0;
- for (pfn = start; pfn <= end;) {
+ for (pfn = start; pfn < end;) {
page = pfn_to_page(pfn);
if (PageBuddy(page)) {
int nr = 1 << buddy_order(page);
@@ -1650,13 +1648,12 @@ static bool prep_move_freepages_block(struct zone *zone, struct page *page,
static int move_freepages_block(struct zone *zone, struct page *page,
int old_mt, int new_mt)
{
- unsigned long start_pfn, end_pfn;
+ unsigned long start_pfn;
- if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn,
- NULL, NULL))
+ if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
return -1;
- return move_freepages(zone, start_pfn, end_pfn, old_mt, new_mt);
+ return __move_freepages_block(zone, start_pfn, old_mt, new_mt);
}
#ifdef CONFIG_MEMORY_ISOLATION
@@ -1727,10 +1724,9 @@ static void split_large_buddy(struct zone *zone, struct page *page,
bool move_freepages_block_isolate(struct zone *zone, struct page *page,
int migratetype)
{
- unsigned long start_pfn, end_pfn, pfn;
+ unsigned long start_pfn, pfn;
- if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn,
- NULL, NULL))
+ if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
return false;
/* No splits needed if buddies can't span multiple blocks */
@@ -1761,8 +1757,9 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
return true;
}
move:
- move_freepages(zone, start_pfn, end_pfn,
- get_pfnblock_migratetype(page, start_pfn), migratetype);
+ __move_freepages_block(zone, start_pfn,
+ get_pfnblock_migratetype(page, start_pfn),
+ migratetype);
return true;
}
#endif /* CONFIG_MEMORY_ISOLATION */
@@ -1862,7 +1859,7 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
unsigned int alloc_flags, bool whole_block)
{
int free_pages, movable_pages, alike_pages;
- unsigned long start_pfn, end_pfn;
+ unsigned long start_pfn;
int block_type;
block_type = get_pageblock_migratetype(page);
@@ -1895,8 +1892,8 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
goto single_page;
/* moving whole block can fail due to zone boundary conditions */
- if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn,
- &free_pages, &movable_pages))
+ if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages,
+ &movable_pages))
goto single_page;
/*
@@ -1926,7 +1923,7 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
*/
if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
page_group_by_mobility_disabled) {
- move_freepages(zone, start_pfn, end_pfn, block_type, start_type);
+ __move_freepages_block(zone, start_pfn, block_type, start_type);
return __rmqueue_smallest(zone, order, start_type);
}