diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-08-11 09:30:22 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-11 09:30:22 +0200 |
commit | 1b12bbc747560ea68bcc132c3d05699e52271da0 (patch) | |
tree | 0e0fe5b7fe07d411251eebdd053e9e7793820248 /kernel | |
parent | lockdep: lock_set_subclass - reset a held lock's subclass (diff) | |
download | linux-1b12bbc747560ea68bcc132c3d05699e52271da0.tar.xz linux-1b12bbc747560ea68bcc132c3d05699e52271da0.zip |
lockdep: re-annotate scheduler runqueues
Instead of using a per-rq lock class, use the regular nesting operations.
However, take extra care with double_lock_balance() as it can release the
already held rq->lock (and therefore change its nesting class).
So what can happen is:
spin_lock(rq->lock); // this rq subclass 0
double_lock_balance(rq, other_rq);
// release rq
// acquire other_rq->lock subclass 0
// acquire rq->lock subclass 1
spin_unlock(other_rq->lock);
leaving you with rq->lock in subclass 1
So a subsequent double_lock_balance() call can try to nest a subclass 1
lock while already holding a subclass 1 lock.
Fix this by introducing double_unlock_balance() which releases the other
rq's lock, but also re-sets the subclass for this rq's lock to 0.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 11 | ||||
-rw-r--r-- | kernel/sched_rt.c | 8 |
2 files changed, 14 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 655f1db26b12..9b2b6a85577d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2812,6 +2812,13 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) return ret; } +static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) + __releases(busiest->lock) +{ + spin_unlock(&busiest->lock); + lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); +} + /* * If dest_cpu is allowed for this process, migrate the task to it. * This is accomplished by forcing the cpu_allowed mask to only @@ -3636,7 +3643,7 @@ redo: ld_moved = move_tasks(this_rq, this_cpu, busiest, imbalance, sd, CPU_NEWLY_IDLE, &all_pinned); - spin_unlock(&busiest->lock); + double_unlock_balance(this_rq, busiest); if (unlikely(all_pinned)) { cpu_clear(cpu_of(busiest), *cpus); @@ -3751,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) else schedstat_inc(sd, alb_failed); } - spin_unlock(&target_rq->lock); + double_unlock_balance(busiest_rq, target_rq); } #ifdef CONFIG_NO_HZ diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 908c04f9dad0..6163e4cf885b 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -861,6 +861,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) #define RT_MAX_TRIES 3 static int double_lock_balance(struct rq *this_rq, struct rq *busiest); +static void double_unlock_balance(struct rq *this_rq, struct rq *busiest); + static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) @@ -1022,7 +1024,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) break; /* try again */ - spin_unlock(&lowest_rq->lock); + double_unlock_balance(rq, lowest_rq); lowest_rq = NULL; } @@ -1091,7 +1093,7 @@ static int push_rt_task(struct rq *rq) resched_task(lowest_rq->curr); - spin_unlock(&lowest_rq->lock); + double_unlock_balance(rq, lowest_rq); ret = 1; out: @@ -1197,7 +1199,7 @@ static int pull_rt_task(struct rq *this_rq) } skip: - spin_unlock(&src_rq->lock); + double_unlock_balance(this_rq, src_rq); } return ret; |