From fe0e01c77dd9f7a60916aec2149d8a1182baf63c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 8 Oct 2014 18:51:10 +0200 Subject: tracing: Robustify wait loop The pending nested sleep debugging triggered on the potential stale TASK_INTERRUPTIBLE in this code. While there, fix the loop such that we won't revert to a while(1) yield() 'spin' loop if we ever get a spurious wakeup. And fix the actual issue by properly terminating the 'wait' loop by setting TASK_RUNNING. Link: http://lkml.kernel.org/p/20141008165110.GA14547@worktop.programming.kicks-ass.net Reported-by: Fengguang Wu Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Steven Rostedt --- kernel/trace/trace_events.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel/trace/trace_events.c') diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index ef06ce7e9cf8..0cc51edde3a8 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -2513,8 +2513,11 @@ static __init int event_test_thread(void *unused) kfree(test_malloc); set_current_state(TASK_INTERRUPTIBLE); - while (!kthread_should_stop()) + while (!kthread_should_stop()) { schedule(); + set_current_state(TASK_INTERRUPTIBLE); + } + __set_current_state(TASK_RUNNING); return 0; } -- cgit v1.2.3