summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorSiddha, Suresh B <suresh.b.siddha@intel.com>2006-03-27 11:15:23 +0200
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-27 18:44:44 +0200
commit0806903316d516a3b3851c51cea5c71724d9051d (patch)
tree05b453747a68a32bfd1d668a53963a4bb0bc36d1 /kernel/sched.c
parent[PATCH] sched: new sched domain for representing multi-core (diff)
downloadlinux-0806903316d516a3b3851c51cea5c71724d9051d.tar.xz
linux-0806903316d516a3b3851c51cea5c71724d9051d.zip
[PATCH] sched: fix group power for allnodes_domains
Current sched groups power calculation for allnodes_domains is wrong. We should really be using cumulative power of the physical packages in that group (similar to the calculation in node_domains) Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to '')
-rw-r--r--kernel/sched.c62
1 files changed, 29 insertions, 33 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8a8b71b5751b..7854ee516b92 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5621,6 +5621,32 @@ static int cpu_to_allnodes_group(int cpu)
{
return cpu_to_node(cpu);
}
+static void init_numa_sched_groups_power(struct sched_group *group_head)
+{
+ struct sched_group *sg = group_head;
+ int j;
+
+ if (!sg)
+ return;
+next_sg:
+ for_each_cpu_mask(j, sg->cpumask) {
+ struct sched_domain *sd;
+
+ sd = &per_cpu(phys_domains, j);
+ if (j != first_cpu(sd->groups->cpumask)) {
+ /*
+ * Only add "power" once for each
+ * physical package.
+ */
+ continue;
+ }
+
+ sg->cpu_power += sd->groups->cpu_power;
+ }
+ sg = sg->next;
+ if (sg != group_head)
+ goto next_sg;
+}
#endif
/*
@@ -5866,43 +5892,13 @@ void build_sched_domains(const cpumask_t *cpu_map)
(cpus_weight(sd->groups->cpumask)-1) / 10;
sd->groups->cpu_power = power;
#endif
-
-#ifdef CONFIG_NUMA
- sd = &per_cpu(allnodes_domains, i);
- if (sd->groups) {
- power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
- (cpus_weight(sd->groups->cpumask)-1) / 10;
- sd->groups->cpu_power = power;
- }
-#endif
}
#ifdef CONFIG_NUMA
- for (i = 0; i < MAX_NUMNODES; i++) {
- struct sched_group *sg = sched_group_nodes[i];
- int j;
-
- if (sg == NULL)
- continue;
-next_sg:
- for_each_cpu_mask(j, sg->cpumask) {
- struct sched_domain *sd;
+ for (i = 0; i < MAX_NUMNODES; i++)
+ init_numa_sched_groups_power(sched_group_nodes[i]);
- sd = &per_cpu(phys_domains, j);
- if (j != first_cpu(sd->groups->cpumask)) {
- /*
- * Only add "power" once for each
- * physical package.
- */
- continue;
- }
-
- sg->cpu_power += sd->groups->cpu_power;
- }
- sg = sg->next;
- if (sg != sched_group_nodes[i])
- goto next_sg;
- }
+ init_numa_sched_groups_power(sched_group_allnodes);
#endif
/* Attach the domains */