diff options
author | Peter Zijlstra <peterz@infradead.org> | 2010-04-21 22:02:07 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-04-30 12:03:17 +0200 |
commit | 8b08ca52f5942c21564bbb90ccfb61053f2c26a1 (patch) | |
tree | 3fbba8114fdad7d077390c206ae17c38fbc85cb8 /kernel | |
parent | mutex: Don't spin when the owner CPU is offline or other weird cases (diff) | |
download | linux-8b08ca52f5942c21564bbb90ccfb61053f2c26a1.tar.xz linux-8b08ca52f5942c21564bbb90ccfb61053f2c26a1.zip |
rcu: Fix RCU lockdep splat in set_task_cpu on fork path
Add an RCU read-side critical section to suppress this false
positive.
Located-by: Eric Paris <eparis@parisplace.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
Cc: eric.dumazet@gmail.com
LKML-Reference: <1271880131-3951-1-git-send-email-paulmck@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 10 |
1 files changed, 10 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index de0bd26e520a..3c2a54f70ffe 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -323,6 +323,15 @@ static inline struct task_group *task_group(struct task_struct *p) /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { + /* + * Strictly speaking this rcu_read_lock() is not needed since the + * task_group is tied to the cgroup, which in turn can never go away + * as long as there are tasks attached to it. + * + * However since task_group() uses task_subsys_state() which is an + * rcu_dereference() user, this quiets CONFIG_PROVE_RCU. + */ + rcu_read_lock(); #ifdef CONFIG_FAIR_GROUP_SCHED p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; p->se.parent = task_group(p)->se[cpu]; @@ -332,6 +341,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu) p->rt.rt_rq = task_group(p)->rt_rq[cpu]; p->rt.parent = task_group(p)->rt_se[cpu]; #endif + rcu_read_unlock(); } #else |