summaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tasks.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2020-03-22 21:34:34 +0100
committerPaul E. McKenney <paulmck@kernel.org>2020-04-27 20:03:52 +0200
commit40471509be3cb8c9c02aec1c316614cb96e6fe85 (patch)
treedd440382688dc19e677a1b46a20f8620ce53d908 /kernel/rcu/tasks.h
parentrcu-tasks: Make RCU tasks trace also wait for idle tasks (diff)
downloadlinux-40471509be3cb8c9c02aec1c316614cb96e6fe85.tar.xz
linux-40471509be3cb8c9c02aec1c316614cb96e6fe85.zip
rcu-tasks: Add rcu_dynticks_zero_in_eqs() effectiveness statistics
This commit adds counts of the number of calls and number of successful calls to rcu_dynticks_zero_in_eqs(), which are printed at the end of rcutorture runs and at stall time. This allows evaluation of the effectiveness of rcu_dynticks_zero_in_eqs(). Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu/tasks.h')
-rw-r--r--kernel/rcu/tasks.h13
1 files changed, 11 insertions, 2 deletions
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index f272e8f16b81..ce658831c759 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -725,6 +725,11 @@ DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks.
// Record outstanding IPIs to each CPU. No point in sending two...
static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
+// The number of detections of task quiescent state relying on
+// heavyweight readers executing explicit memory barriers.
+unsigned long n_heavy_reader_attempts;
+unsigned long n_heavy_reader_updates;
+
void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
"RCU Tasks Trace");
@@ -830,9 +835,11 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
// If heavyweight readers are enabled on the remote task,
// we can inspect its state despite its currently running.
// However, we cannot safely change its state.
+ n_heavy_reader_attempts++;
if (!ofl && // Check for "running" idle tasks on offline CPUs.
!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
return false; // No quiescent state, do it the hard way.
+ n_heavy_reader_updates++;
in_qs = true;
} else {
in_qs = likely(!t->trc_reader_nesting);
@@ -1147,9 +1154,11 @@ core_initcall(rcu_spawn_tasks_trace_kthread);
static void show_rcu_tasks_trace_gp_kthread(void)
{
- char buf[32];
+ char buf[64];
- sprintf(buf, "N%d", atomic_read(&trc_n_readers_need_end));
+ sprintf(buf, "N%d h:%lu/%lu", atomic_read(&trc_n_readers_need_end),
+ data_race(n_heavy_reader_updates),
+ data_race(n_heavy_reader_attempts));
show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
}