summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2012-04-25 00:30:36 +0200
committerIngo Molnar <mingo@kernel.org>2012-05-09 15:00:53 +0200
commit0ce90475dcdbe90affc218e9688c8401e468e84d (patch)
tree30771f6a6791af0214c5e7d57958f6395f3b97f5 /kernel
parentsched/fair: Let minimally loaded cpu balance the group (diff)
downloadlinux-0ce90475dcdbe90affc218e9688c8401e468e84d.tar.xz
linux-0ce90475dcdbe90affc218e9688c8401e468e84d.zip
sched/fair: Add some serialization to the sched_domain load-balance walk
Since the sched_domain walk is completely unserialized (!SD_SERIALIZE) it is possible that multiple cpus in the group get elected to do the next level. Avoid this by adding some serialization. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/n/tip-vqh9ai6s0ewmeakjz80w4qz6@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c9
2 files changed, 9 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0533a688ce22..6001e5c3b4e4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6060,6 +6060,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
atomic_inc(&sg->sgp->ref);
+ sg->balance_cpu = -1;
if (cpumask_test_cpu(cpu, sg_span))
groups = sg;
@@ -6135,6 +6136,7 @@ build_sched_groups(struct sched_domain *sd, int cpu)
cpumask_clear(sched_group_cpus(sg));
sg->sgp->power = 0;
+ sg->balance_cpu = -1;
for_each_cpu(j, span) {
if (get_group(j, sdd, NULL) != group)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 968ffee24721..cf86f74bcac2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3828,7 +3828,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
*/
if (local_group) {
if (idle != CPU_NEWLY_IDLE) {
- if (balance_cpu != this_cpu) {
+ if (balance_cpu != this_cpu ||
+ cmpxchg(&group->balance_cpu, -1, balance_cpu) != -1) {
*balance = 0;
return;
}
@@ -4929,7 +4930,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
int balance = 1;
struct rq *rq = cpu_rq(cpu);
unsigned long interval;
- struct sched_domain *sd;
+ struct sched_domain *sd, *last = NULL;
/* Earliest time when we have to do rebalance again */
unsigned long next_balance = jiffies + 60*HZ;
int update_next_balance = 0;
@@ -4939,6 +4940,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
rcu_read_lock();
for_each_domain(cpu, sd) {
+ last = sd;
if (!(sd->flags & SD_LOAD_BALANCE))
continue;
@@ -4983,6 +4985,9 @@ out:
if (!balance)
break;
}
+ for (sd = last; sd; sd = sd->child)
+ (void)cmpxchg(&sd->groups->balance_cpu, cpu, -1);
+
rcu_read_unlock();
/*