summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-10 18:18:47 +0200
committerIngo Molnar <mingo@elte.hu>2009-09-15 16:01:06 +0200
commit83f54960c11a14942ab00b54c51e91906b9d8235 (patch)
tree6c7df778777568bb0c7375b4e7ec84ed532809df
parentsched: Weaken SD_POWERSAVINGS_BALANCE (diff)
downloadlinux-83f54960c11a14942ab00b54c51e91906b9d8235.tar.xz
linux-83f54960c11a14942ab00b54c51e91906b9d8235.zip
sched: for_each_domain() vs RCU
for_each_domain() uses RCU to serialize the sched_domains, except it doesn't actually use rcu_read_lock() and instead relies on disabling preemption -> FAIL. XXX: audit other sched_domain code. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched_fair.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index eaa00014b499..43dc6d1d9e88 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1331,6 +1331,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
new_cpu = prev_cpu;
}
+ rcu_read_lock();
for_each_domain(cpu, tmp) {
/*
* If power savings logic is enabled for a domain, see if we
@@ -1369,8 +1370,10 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
- if (wake_affine(tmp, p, sync))
- return cpu;
+ if (wake_affine(tmp, p, sync)) {
+ new_cpu = cpu;
+ goto out;
+ }
want_affine = 0;
}
@@ -1416,6 +1419,8 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
/* while loop will break here if sd == NULL */
}
+out:
+ rcu_read_unlock();
return new_cpu;
}
#endif /* CONFIG_SMP */