summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-10-03 10:14:11 +0200
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-03 17:04:07 +0200
commitce164428c4cabfd284ca81913415cacd889aac33 (patch)
treeae4df00bc80285b03e755d615f41b4f96e5520ba /kernel
parent[PATCH] sched: fixing wrong comment for find_idlest_cpu() (diff)
downloadlinux-ce164428c4cabfd284ca81913415cacd889aac33.tar.xz
linux-ce164428c4cabfd284ca81913415cacd889aac33.zip
[PATCH] scheduler: NUMA aware placement of sched_group_allnodes
When the per cpu sched domains are build then they also need to be placed on the node where the cpu resides otherwise we will have frequent off node accesses which will slow down the system. Signed-off-by: Christoph Lameter <clameter@sgi.com> Acked-by: Ingo Molnar <mingo@elte.hu> Acked-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index fec97e4e196d..53608a59d6e3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6349,9 +6349,10 @@ static int build_sched_domains(const cpumask_t *cpu_map)
> SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
if (!sched_group_allnodes) {
sched_group_allnodes
- = kmalloc(sizeof(struct sched_group)
- * MAX_NUMNODES,
- GFP_KERNEL);
+ = kmalloc_node(sizeof(struct sched_group)
+ * MAX_NUMNODES,
+ GFP_KERNEL,
+ cpu_to_node(i));
if (!sched_group_allnodes) {
printk(KERN_WARNING
"Can not alloc allnodes sched group\n");