summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKefeng Wang <wangkefeng.wang@huawei.com>2022-09-07 08:08:42 +0200
committerAndrew Morton <akpm@linux-foundation.org>2022-10-03 23:03:03 +0200
commit4f9bc69ac5ce34071a9a51343bc81ca76cb2e3f1 (patch)
tree09f1faa2d36644a3800fed59651a11ea1089e65e /mm
parentmm/page_owner.c: remove redundant drain_all_pages (diff)
downloadlinux-4f9bc69ac5ce34071a9a51343bc81ca76cb2e3f1.tar.xz
linux-4f9bc69ac5ce34071a9a51343bc81ca76cb2e3f1.zip
mm: reuse pageblock_start/end_pfn() macro
Move pageblock_start_pfn/pageblock_end_pfn() into pageblock-flags.h, then they could be used somewhere else, not only in compaction, also use ALIGN_DOWN() instead of round_down() to be pair with ALIGN(), which should be same for pageblock usage. Link: https://lkml.kernel.org/r/20220907060844.126891-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Acked-by: Mike Rapoport <rppt@linux.ibm.com> Reviewed-by: David Hildenbrand <david@redhat.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c2
-rw-r--r--mm/memblock.c2
-rw-r--r--mm/page_alloc.c13
-rw-r--r--mm/page_isolation.c11
-rw-r--r--mm/page_owner.c4
5 files changed, 14 insertions, 18 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 262c4676b32c..9cbe8562b63a 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -52,8 +52,6 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
-#define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
-#define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
/*
* Page order with-respect-to which proactive compaction
diff --git a/mm/memblock.c b/mm/memblock.c
index b5d3026979fc..46fe7575f03c 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -2000,7 +2000,7 @@ static void __init free_unused_memmap(void)
* presume that there are no holes in the memory map inside
* a pageblock
*/
- start = round_down(start, pageblock_nr_pages);
+ start = pageblock_start_pfn(start);
/*
* If we had a previous bank, and there is a space
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 44f3c9364316..1637db90472e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -544,7 +544,7 @@ static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
#ifdef CONFIG_SPARSEMEM
pfn &= (PAGES_PER_SECTION-1);
#else
- pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
+ pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
#endif /* CONFIG_SPARSEMEM */
return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
}
@@ -1857,7 +1857,7 @@ void set_zone_contiguous(struct zone *zone)
unsigned long block_start_pfn = zone->zone_start_pfn;
unsigned long block_end_pfn;
- block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
+ block_end_pfn = pageblock_end_pfn(block_start_pfn);
for (; block_start_pfn < zone_end_pfn(zone);
block_start_pfn = block_end_pfn,
block_end_pfn += pageblock_nr_pages) {
@@ -2653,8 +2653,8 @@ int move_freepages_block(struct zone *zone, struct page *page,
*num_movable = 0;
pfn = page_to_pfn(page);
- start_pfn = pfn & ~(pageblock_nr_pages - 1);
- end_pfn = start_pfn + pageblock_nr_pages - 1;
+ start_pfn = pageblock_start_pfn(pfn);
+ end_pfn = pageblock_end_pfn(pfn) - 1;
/* Do not cross zone boundaries */
if (!zone_spans_pfn(zone, start_pfn))
@@ -6934,9 +6934,8 @@ static void __init init_unavailable_range(unsigned long spfn,
u64 pgcnt = 0;
for (pfn = spfn; pfn < epfn; pfn++) {
- if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
- pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
- + pageblock_nr_pages - 1;
+ if (!pfn_valid(pageblock_start_pfn(pfn))) {
+ pfn = pageblock_end_pfn(pfn) - 1;
continue;
}
__init_single_page(pfn_to_page(pfn), pfn, zone, node);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index eb3a68ca92ad..5819cb9c62f3 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -37,8 +37,8 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
struct zone *zone = page_zone(page);
unsigned long pfn;
- VM_BUG_ON(ALIGN_DOWN(start_pfn, pageblock_nr_pages) !=
- ALIGN_DOWN(end_pfn - 1, pageblock_nr_pages));
+ VM_BUG_ON(pageblock_start_pfn(start_pfn) !=
+ pageblock_start_pfn(end_pfn - 1));
if (is_migrate_cma_page(page)) {
/*
@@ -172,7 +172,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
* to avoid redundant checks.
*/
check_unmovable_start = max(page_to_pfn(page), start_pfn);
- check_unmovable_end = min(ALIGN(page_to_pfn(page) + 1, pageblock_nr_pages),
+ check_unmovable_end = min(pageblock_end_pfn(page_to_pfn(page)),
end_pfn);
unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end,
@@ -532,7 +532,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned long pfn;
struct page *page;
/* isolation is done at page block granularity */
- unsigned long isolate_start = ALIGN_DOWN(start_pfn, pageblock_nr_pages);
+ unsigned long isolate_start = pageblock_start_pfn(start_pfn);
unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);
int ret;
bool skip_isolation = false;
@@ -579,10 +579,9 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
{
unsigned long pfn;
struct page *page;
- unsigned long isolate_start = ALIGN_DOWN(start_pfn, pageblock_nr_pages);
+ unsigned long isolate_start = pageblock_start_pfn(start_pfn);
unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);
-
for (pfn = isolate_start;
pfn < isolate_end;
pfn += pageblock_nr_pages) {
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 54f3e039fb48..2d27f532df4c 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -297,7 +297,7 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
continue;
}
- block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+ block_end_pfn = pageblock_end_pfn(pfn);
block_end_pfn = min(block_end_pfn, end_pfn);
pageblock_mt = get_pageblock_migratetype(page);
@@ -635,7 +635,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
continue;
}
- block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+ block_end_pfn = pageblock_end_pfn(pfn);
block_end_pfn = min(block_end_pfn, end_pfn);
for (; pfn < block_end_pfn; pfn++) {