summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_benchmark.c
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-04-12 00:25:08 +0200
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-04-17 21:21:19 +0200
commitb980b117c9ff17226937128a15692a18c9a28ed6 (patch)
treef4f1d2911131998ea103c58846b516ce4c039bbf /kernel/trace/trace_benchmark.c
parentftrace: Fix indexing of t_hash_start() from t_next() (diff)
downloadlinux-b980b117c9ff17226937128a15692a18c9a28ed6.tar.xz
linux-b980b117c9ff17226937128a15692a18c9a28ed6.zip
tracing: Have the trace_event benchmark thread call cond_resched_rcu_qs()
The trace_event benchmark thread runs in kernel space in an infinite loop while also calling cond_resched() in case anything else wants to schedule in. Unfortunately, on a PREEMPT kernel, that makes it a nop, in which case, this will never voluntarily schedule. That will cause synchronize_rcu_tasks() to forever block on this thread, while it is running. This is exactly what cond_resched_rcu_qs() is for. Use that instead. Acked-by: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_benchmark.c')
-rw-r--r--kernel/trace/trace_benchmark.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/kernel/trace/trace_benchmark.c b/kernel/trace/trace_benchmark.c
index e49fbe901cfc..16a8cf02eee9 100644
--- a/kernel/trace/trace_benchmark.c
+++ b/kernel/trace/trace_benchmark.c
@@ -153,10 +153,18 @@ static int benchmark_event_kthread(void *arg)
trace_do_benchmark();
/*
- * We don't go to sleep, but let others
- * run as well.
+ * We don't go to sleep, but let others run as well.
+ * This is bascially a "yield()" to let any task that
+ * wants to run, schedule in, but if the CPU is idle,
+ * we'll keep burning cycles.
+ *
+ * Note the _rcu_qs() version of cond_resched() will
+ * notify synchronize_rcu_tasks() that this thread has
+ * passed a quiescent state for rcu_tasks. Otherwise
+ * this thread will never voluntarily schedule which would
+ * block synchronize_rcu_tasks() indefinitely.
*/
- cond_resched();
+ cond_resched_rcu_qs();
}
return 0;