summaryrefslogtreecommitdiffstats
path: root/mm/mm_init.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mm_init.c')
-rw-r--r--mm/mm_init.c71
1 files changed, 31 insertions, 40 deletions
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 077bfe393b5e..89dc29f1e6c6 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -796,6 +796,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
* - physical memory bank size is not necessarily the exact multiple of the
* arbitrary section size
* - early reserved memory may not be listed in memblock.memory
+ * - non-memory regions covered by the contigious flatmem mapping
* - memory layouts defined with memmap= kernel parameter may not align
* nicely with memmap sections
*
@@ -826,7 +827,7 @@ static void __init init_unavailable_range(unsigned long spfn,
}
if (pgcnt)
- pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
+ pr_info("On node %d, zone %s: %lld pages in unavailable ranges\n",
node, zone_names[zone], pgcnt);
}
@@ -1454,7 +1455,7 @@ static inline void setup_usemap(struct zone *zone) {}
/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
void __init set_pageblock_order(void)
{
- unsigned int order = MAX_ORDER;
+ unsigned int order = MAX_PAGE_ORDER;
/* Check that pageblock_nr_pages has not already been setup */
if (pageblock_order)
@@ -1466,8 +1467,7 @@ void __init set_pageblock_order(void)
/*
* Assume the largest contiguous order of interest is a huge page.
- * This value may be variable depending on boot parameters on IA64 and
- * powerpc.
+ * This value may be variable depending on boot parameters on powerpc.
*/
pageblock_order = order;
}
@@ -1628,8 +1628,8 @@ void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
#ifdef CONFIG_FLATMEM
static void __init alloc_node_mem_map(struct pglist_data *pgdat)
{
- unsigned long __maybe_unused start = 0;
- unsigned long __maybe_unused offset = 0;
+ unsigned long start, offset, size, end;
+ struct page *map;
/* Skip empty nodes */
if (!pgdat->node_spanned_pages)
@@ -1637,33 +1637,24 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat)
start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
offset = pgdat->node_start_pfn - start;
- /* ia64 gets its own node_mem_map, before this, without bootmem */
- if (!pgdat->node_mem_map) {
- unsigned long size, end;
- struct page *map;
-
- /*
- * The zone's endpoints aren't required to be MAX_ORDER
- * aligned but the node_mem_map endpoints must be in order
- * for the buddy allocator to function correctly.
- */
- end = pgdat_end_pfn(pgdat);
- end = ALIGN(end, MAX_ORDER_NR_PAGES);
- size = (end - start) * sizeof(struct page);
- map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
- pgdat->node_id, false);
- if (!map)
- panic("Failed to allocate %ld bytes for node %d memory map\n",
- size, pgdat->node_id);
- pgdat->node_mem_map = map + offset;
- }
- pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
- __func__, pgdat->node_id, (unsigned long)pgdat,
- (unsigned long)pgdat->node_mem_map);
-#ifndef CONFIG_NUMA
/*
- * With no DISCONTIG, the global mem_map is just set as node 0's
+ * The zone's endpoints aren't required to be MAX_PAGE_ORDER
+ * aligned but the node_mem_map endpoints must be in order
+ * for the buddy allocator to function correctly.
*/
+ end = ALIGN(pgdat_end_pfn(pgdat), MAX_ORDER_NR_PAGES);
+ size = (end - start) * sizeof(struct page);
+ map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
+ pgdat->node_id, false);
+ if (!map)
+ panic("Failed to allocate %ld bytes for node %d memory map\n",
+ size, pgdat->node_id);
+ pgdat->node_mem_map = map + offset;
+ pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
+ __func__, pgdat->node_id, (unsigned long)pgdat,
+ (unsigned long)pgdat->node_mem_map);
+#ifndef CONFIG_NUMA
+ /* the global mem_map is just set as node 0's */
if (pgdat == NODE_DATA(0)) {
mem_map = NODE_DATA(0)->node_mem_map;
if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
@@ -1973,11 +1964,11 @@ static void __init deferred_free_range(unsigned long pfn,
if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
for (i = 0; i < nr_pages; i += pageblock_nr_pages)
set_pageblock_migratetype(page + i, MIGRATE_MOVABLE);
- __free_pages_core(page, MAX_ORDER);
+ __free_pages_core(page, MAX_PAGE_ORDER);
return;
}
- /* Accept chunks smaller than MAX_ORDER upfront */
+ /* Accept chunks smaller than MAX_PAGE_ORDER upfront */
accept_memory(PFN_PHYS(pfn), PFN_PHYS(pfn + nr_pages));
for (i = 0; i < nr_pages; i++, page++, pfn++) {
@@ -2000,8 +1991,8 @@ static inline void __init pgdat_init_report_one_done(void)
/*
* Returns true if page needs to be initialized or freed to buddy allocator.
*
- * We check if a current MAX_ORDER block is valid by only checking the validity
- * of the head pfn.
+ * We check if a current MAX_PAGE_ORDER block is valid by only checking the
+ * validity of the head pfn.
*/
static inline bool __init deferred_pfn_valid(unsigned long pfn)
{
@@ -2158,8 +2149,8 @@ deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
/*
- * Initialize and free pages in MAX_ORDER sized increments so that we
- * can avoid introducing any issues with the buddy allocator.
+ * Initialize and free pages in MAX_PAGE_ORDER sized increments so that
+ * we can avoid introducing any issues with the buddy allocator.
*/
while (spfn < end_pfn) {
deferred_init_maxorder(&i, zone, &spfn, &epfn);
@@ -2300,7 +2291,7 @@ bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
}
/*
- * Initialize and free pages in MAX_ORDER sized increments so
+ * Initialize and free pages in MAX_PAGE_ORDER sized increments so
* that we can avoid introducing any issues with the buddy
* allocator.
*/
@@ -2518,7 +2509,7 @@ void *__init alloc_large_system_hash(const char *tablename,
else
table = memblock_alloc_raw(size,
SMP_CACHE_BYTES);
- } else if (get_order(size) > MAX_ORDER || hashdist) {
+ } else if (get_order(size) > MAX_PAGE_ORDER || hashdist) {
table = vmalloc_huge(size, gfp_flags);
virt = true;
if (table)
@@ -2765,7 +2756,7 @@ void __init mm_core_init(void)
/*
* page_ext requires contiguous pages,
- * bigger than MAX_ORDER unless SPARSEMEM.
+ * bigger than MAX_PAGE_ORDER unless SPARSEMEM.
*/
page_ext_init_flatmem();
mem_debugging_and_hardening_init();