summaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2024-08-23 23:15:12 +0200
committerNeeraj Upadhyay <neeraj.upadhyay@kernel.org>2024-09-08 20:36:44 +0200
commit1ecd9d68eb44e4b7972aee2840eb4fdf29b9de2b (patch)
treeb532b33a6df684d32d27975a7bc09fa42497cfe8 /kernel/rcu
parentrcu: Let dump_cpu_task() be used without preemption disabled (diff)
downloadlinux-1ecd9d68eb44e4b7972aee2840eb4fdf29b9de2b.tar.xz
linux-1ecd9d68eb44e4b7972aee2840eb4fdf29b9de2b.zip
rcu: Defer printing stall-warning backtrace when holding rcu_node lock
The rcu_dump_cpu_stacks() holds the leaf rcu_node structure's ->lock when dumping the stakcks of any CPUs stalling the current grace period. This lock is held to prevent confusion that would otherwise occur when the stalled CPU reported its quiescent state (and then went on to do unrelated things) just as the backtrace NMI was heading towards it. This has worked well, but on larger systems has recently been observed to cause severe lock contention resulting in CSD-lock stalls and other general unhappiness. This commit therefore does printk_deferred_enter() before acquiring the lock and printk_deferred_exit() after releasing it, thus deferring the overhead of actually outputting the stack trace out of that lock's critical section. Reported-by: Rik van Riel <riel@surriel.com> Suggested-by: Rik van Riel <riel@surriel.com> Signed-off-by: "Paul E. McKenney" <paulmck@kernel.org> Signed-off-by: Neeraj Upadhyay <neeraj.upadhyay@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree_stall.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index b497d4c6dabd..9772e1ffcf6e 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -371,6 +371,7 @@ static void rcu_dump_cpu_stacks(void)
struct rcu_node *rnp;
rcu_for_each_leaf_node(rnp) {
+ printk_deferred_enter();
raw_spin_lock_irqsave_rcu_node(rnp, flags);
for_each_leaf_node_possible_cpu(rnp, cpu)
if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
@@ -380,6 +381,7 @@ static void rcu_dump_cpu_stacks(void)
dump_cpu_task(cpu);
}
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ printk_deferred_exit();
}
}