diff options
author | Magnus Damm <magnus@valinux.co.jp> | 2005-11-05 17:25:54 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-15 04:55:17 +0100 |
commit | ffd10a2b77bca50dd05ba26acd5a6e68bcc8f61f (patch) | |
tree | 92bd5c702cde0c6582950ff66f648e59bf5fb2cd | |
parent | [PATCH] x86_64: Log machine checks from boot on Intel systems (diff) | |
download | linux-ffd10a2b77bca50dd05ba26acd5a6e68bcc8f61f.tar.xz linux-ffd10a2b77bca50dd05ba26acd5a6e68bcc8f61f.zip |
[PATCH] x86_64: Make node boundaries consistent
The current x86_64 NUMA memory code is inconsequent when it comes to node
memory ranges. The exact behaviour varies depending on which config option
that is used.
setup_node_bootmem() has start and end as arguments and these are used to
calculate the size of the node like this: (end - start). This is all fine
if end is pointing to the first non-available byte. The problem is that the
current x86_64 code sometimes treats it as the last present byte and sometimes
as the first non-available byte. The result is that some configurations might
lose a page at the end of the range.
This patch tries to fix CONFIG_ACPI_NUMA, CONFIG_K8_NUMA and CONFIG_NUMA_EMU
so they all treat the end variable as the first non-available byte. This is
the same way as the single node code.
The patch is boot tested on dual x86_64 hardware with the above configurations,
but maybe the removed code is needed as some workaround?
Signed-off-by: Magnus Damm <magnus@valinux.co.jp>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/x86_64/mm/k8topology.c | 1 | ||||
-rw-r--r-- | arch/x86_64/mm/numa.c | 2 | ||||
-rw-r--r-- | arch/x86_64/mm/srat.c | 4 |
3 files changed, 1 insertions, 6 deletions
diff --git a/arch/x86_64/mm/k8topology.c b/arch/x86_64/mm/k8topology.c index 65417b040c1b..a5663e0bb01c 100644 --- a/arch/x86_64/mm/k8topology.c +++ b/arch/x86_64/mm/k8topology.c @@ -108,6 +108,7 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end) limit >>= 16; limit <<= 24; limit |= (1<<24)-1; + limit++; if (limit > end_pfn << PAGE_SHIFT) limit = end_pfn << PAGE_SHIFT; diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c index edd5559380d3..629ff0621b3d 100644 --- a/arch/x86_64/mm/numa.c +++ b/arch/x86_64/mm/numa.c @@ -209,8 +209,6 @@ static int numa_emulation(unsigned long start_pfn, unsigned long end_pfn) if (i == numa_fake-1) sz = (end_pfn<<PAGE_SHIFT) - nodes[i].start; nodes[i].end = nodes[i].start + sz; - if (i != numa_fake-1) - nodes[i].end--; printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", i, nodes[i].start, nodes[i].end, diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c index c7aa08a58041..33340bd1e328 100644 --- a/arch/x86_64/mm/srat.c +++ b/arch/x86_64/mm/srat.c @@ -71,8 +71,6 @@ static __init void cutoff_node(int i, unsigned long start, unsigned long end) nd->start = nd->end; } if (nd->end > end) { - if (!(end & 0xfff)) - end--; nd->end = end; if (nd->start > nd->end) nd->start = nd->end; @@ -166,8 +164,6 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) if (nd->end < end) nd->end = end; } - if (!(nd->end & 0xfff)) - nd->end--; printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm, nd->start, nd->end); } |