summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-12-29 08:53:57 +0100
committerDan Williams <dan.j.williams@intel.com>2018-01-08 20:46:23 +0100
commita99583e780c751003ac9c0105eec9a3b23ec3bc4 (patch)
tree084c7a60723639f7e9d61509f843b6eea0f70fb0 /mm
parentmm: pass the vmem_altmap to vmemmap_free (diff)
downloadlinux-a99583e780c751003ac9c0105eec9a3b23ec3bc4.tar.xz
linux-a99583e780c751003ac9c0105eec9a3b23ec3bc4.zip
mm: pass the vmem_altmap to memmap_init_zone
Pass the vmem_altmap two levels down instead of needing a lookup. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/hmm.c2
-rw-r--r--mm/memory_hotplug.c9
-rw-r--r--mm/page_alloc.c6
3 files changed, 9 insertions, 8 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index 5d17ba89062f..2f2e13c61040 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -942,7 +942,7 @@ static int hmm_devmem_pages_create(struct hmm_devmem *devmem)
}
move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
align_start >> PAGE_SHIFT,
- align_size >> PAGE_SHIFT);
+ align_size >> PAGE_SHIFT, NULL);
mem_hotplug_done();
for (pfn = devmem->pfn_first; pfn < devmem->pfn_last; pfn++) {
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index a8dde9734120..12df8a5fadcc 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -798,8 +798,8 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon
pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
}
-void __ref move_pfn_range_to_zone(struct zone *zone,
- unsigned long start_pfn, unsigned long nr_pages)
+void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
+ unsigned long nr_pages, struct vmem_altmap *altmap)
{
struct pglist_data *pgdat = zone->zone_pgdat;
int nid = pgdat->node_id;
@@ -824,7 +824,8 @@ void __ref move_pfn_range_to_zone(struct zone *zone,
* expects the zone spans the pfn range. All the pages in the range
* are reserved so nobody should be touching them so we should be safe
*/
- memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, MEMMAP_HOTPLUG);
+ memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
+ MEMMAP_HOTPLUG, altmap);
set_zone_contiguous(zone);
}
@@ -896,7 +897,7 @@ static struct zone * __meminit move_pfn_range(int online_type, int nid,
struct zone *zone;
zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
- move_pfn_range_to_zone(zone, start_pfn, nr_pages);
+ move_pfn_range_to_zone(zone, start_pfn, nr_pages, NULL);
return zone;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7e5e775e97f4..1748dd4a4b1b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5314,9 +5314,9 @@ void __ref build_all_zonelists(pg_data_t *pgdat)
* done. Non-atomic initialization, single-pass.
*/
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
- unsigned long start_pfn, enum memmap_context context)
+ unsigned long start_pfn, enum memmap_context context,
+ struct vmem_altmap *altmap)
{
- struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
unsigned long end_pfn = start_pfn + size;
pg_data_t *pgdat = NODE_DATA(nid);
unsigned long pfn;
@@ -5417,7 +5417,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
#ifndef __HAVE_ARCH_MEMMAP_INIT
#define memmap_init(size, nid, zone, start_pfn) \
- memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
+ memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY, NULL)
#endif
static int zone_batchsize(struct zone *zone)