summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/numa_32.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-05-02 14:18:54 +0200
committerTejun Heo <tj@kernel.org>2011-05-02 14:18:54 +0200
commit99cca492ea8ced305bfd687521ed69fb9e0147aa (patch)
tree807f087f54f799051114925d50643ca18a950f45 /arch/x86/mm/numa_32.c
parentx86, NUMA: Remove long 64bit assumption from numa.c (diff)
downloadlinux-99cca492ea8ced305bfd687521ed69fb9e0147aa.tar.xz
linux-99cca492ea8ced305bfd687521ed69fb9e0147aa.zip
x86-32, NUMA: Add @start and @end to init_alloc_remap()
Instead of dereferencing node_start/end_pfn[] directly, make init_alloc_remap() take @start and @end and let the caller be responsible for making sure the range is sane. This is to prepare for use from unified NUMA init code. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm/numa_32.c')
-rw-r--r--arch/x86/mm/numa_32.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 975a76f622ba..900863204be2 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -265,8 +265,10 @@ void resume_map_numa_kva(pgd_t *pgd_base)
* opportunistically and the callers will fall back to other memory
* allocation mechanisms on failure.
*/
-static __init void init_alloc_remap(int nid)
+static __init void init_alloc_remap(int nid, u64 start, u64 end)
{
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long end_pfn = end >> PAGE_SHIFT;
unsigned long size, pfn;
u64 node_pa, remap_pa;
void *remap_va;
@@ -276,24 +278,15 @@ static __init void init_alloc_remap(int nid)
* memory could be added but not currently present.
*/
printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
- nid, node_start_pfn[nid], node_end_pfn[nid]);
- if (node_start_pfn[nid] > max_pfn)
- return;
- if (!node_end_pfn[nid])
- return;
- if (node_end_pfn[nid] > max_pfn)
- node_end_pfn[nid] = max_pfn;
+ nid, start_pfn, end_pfn);
/* calculate the necessary space aligned to large page size */
- size = node_memmap_size_bytes(nid, node_start_pfn[nid],
- min(node_end_pfn[nid], max_pfn));
+ size = node_memmap_size_bytes(nid, start_pfn, end_pfn);
size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);
size = ALIGN(size, LARGE_PAGE_BYTES);
/* allocate node memory and the lowmem remap area */
- node_pa = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT,
- (u64)node_end_pfn[nid] << PAGE_SHIFT,
- size, LARGE_PAGE_BYTES);
+ node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
if (node_pa == MEMBLOCK_ERROR) {
pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
size, nid);
@@ -391,8 +384,14 @@ void __init initmem_init(void)
get_memcfg_numa();
numa_init_array();
- for_each_online_node(nid)
- init_alloc_remap(nid);
+ for_each_online_node(nid) {
+ u64 start = (u64)node_start_pfn[nid] << PAGE_SHIFT;
+ u64 end = min((u64)node_end_pfn[nid] << PAGE_SHIFT,
+ (u64)max_pfn << PAGE_SHIFT);
+
+ if (start < end)
+ init_alloc_remap(nid, start, end);
+ }
#ifdef CONFIG_HIGHMEM
highstart_pfn = highend_pfn = max_pfn;