summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-09-28 18:02:03 +0200
committerIngo Molnar <mingo@kernel.org>2015-10-06 17:08:13 +0200
commitb99def8b961448f5b9a550dddeeb718e3975e7a6 (patch)
tree28a9f70f27b02dcbecbdd1e386744f4332e927f9 /kernel/sched
parentsched/core: Simplify INIT_PREEMPT_COUNT (diff)
downloadlinux-b99def8b961448f5b9a550dddeeb718e3975e7a6.tar.xz
linux-b99def8b961448f5b9a550dddeeb718e3975e7a6.zip
sched/core: Rework TASK_DEAD preemption exception
TASK_DEAD is special in that the final schedule call from do_exit() must be done with preemption disabled. This means we end up scheduling with a preempt_count() higher than usual (3 instead of the 'expected' 2). Since future patches will want to rely on an invariant preempt_count() value during schedule, fix this up. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com> Reviewed-by: Steven Rostedt <rostedt@goodmis.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 88a425443ff4..530fe8baa645 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2949,12 +2949,8 @@ static inline void schedule_debug(struct task_struct *prev)
#ifdef CONFIG_SCHED_STACK_END_CHECK
BUG_ON(unlikely(task_stack_end_corrupted(prev)));
#endif
- /*
- * Test if we are atomic. Since do_exit() needs to call into
- * schedule() atomically, we ignore that path. Otherwise whine
- * if we are scheduling when we should not.
- */
- if (unlikely(in_atomic_preempt_off() && prev->state != TASK_DEAD))
+
+ if (unlikely(in_atomic_preempt_off()))
__schedule_bug(prev);
rcu_sleep_check();
@@ -3053,6 +3049,17 @@ static void __sched __schedule(void)
rcu_note_context_switch();
prev = rq->curr;
+ /*
+ * do_exit() calls schedule() with preemption disabled as an exception;
+ * however we must fix that up, otherwise the next task will see an
+ * inconsistent (higher) preempt count.
+ *
+ * It also avoids the below schedule_debug() test from complaining
+ * about this.
+ */
+ if (unlikely(prev->state == TASK_DEAD))
+ preempt_enable_no_resched_notrace();
+
schedule_debug(prev);
if (sched_feat(HRTICK))