summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2020-06-01 11:47:29 +0200
committerIngo Molnar <mingo@kernel.org>2020-06-01 11:47:29 +0200
commitcb3cb6733fbd8fd8d2c716095fdca42dadba2063 (patch)
tree10d8f584a9850d0dc594ca3452af44da3d3b19c0 /kernel
parentrcu: Allow for smp_call_function() running callbacks from idle (diff)
parentrcu: Provide rcu_irq_exit_check_preempt() (diff)
downloadlinux-cb3cb6733fbd8fd8d2c716095fdca42dadba2063.tar.xz
linux-cb3cb6733fbd8fd8d2c716095fdca42dadba2063.zip
Merge branch 'WIP.core/rcu' into core/rcu, to pick up two x86/entry dependencies
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcu/tree.c100
1 files changed, 80 insertions, 20 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f51385b86ea3..c716eadc7617 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -778,6 +778,24 @@ void rcu_irq_exit_preempt(void)
"RCU in extended quiescent state!");
}
+#ifdef CONFIG_PROVE_RCU
+/**
+ * rcu_irq_exit_check_preempt - Validate that scheduling is possible
+ */
+void rcu_irq_exit_check_preempt(void)
+{
+ lockdep_assert_irqs_disabled();
+
+ RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
+ "RCU dynticks_nesting counter underflow/zero!");
+ RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
+ DYNTICK_IRQ_NONIDLE,
+ "Bad RCU dynticks_nmi_nesting counter\n");
+ RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
+ "RCU in extended quiescent state!");
+}
+#endif /* #ifdef CONFIG_PROVE_RCU */
+
/*
* Wrapper for rcu_irq_exit() where interrupts are enabled.
*
@@ -861,6 +879,67 @@ void noinstr rcu_user_exit(void)
{
rcu_eqs_exit(1);
}
+
+/**
+ * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
+ *
+ * The scheduler tick is not normally enabled when CPUs enter the kernel
+ * from nohz_full userspace execution. After all, nohz_full userspace
+ * execution is an RCU quiescent state and the time executing in the kernel
+ * is quite short. Except of course when it isn't. And it is not hard to
+ * cause a large system to spend tens of seconds or even minutes looping
+ * in the kernel, which can cause a number of problems, include RCU CPU
+ * stall warnings.
+ *
+ * Therefore, if a nohz_full CPU fails to report a quiescent state
+ * in a timely manner, the RCU grace-period kthread sets that CPU's
+ * ->rcu_urgent_qs flag with the expectation that the next interrupt or
+ * exception will invoke this function, which will turn on the scheduler
+ * tick, which will enable RCU to detect that CPU's quiescent states,
+ * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
+ * The tick will be disabled once a quiescent state is reported for
+ * this CPU.
+ *
+ * Of course, in carefully tuned systems, there might never be an
+ * interrupt or exception. In that case, the RCU grace-period kthread
+ * will eventually cause one to happen. However, in less carefully
+ * controlled environments, this function allows RCU to get what it
+ * needs without creating otherwise useless interruptions.
+ */
+void __rcu_irq_enter_check_tick(void)
+{
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
+
+ // Enabling the tick is unsafe in NMI handlers.
+ if (WARN_ON_ONCE(in_nmi()))
+ return;
+
+ RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
+ "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
+
+ if (!tick_nohz_full_cpu(rdp->cpu) ||
+ !READ_ONCE(rdp->rcu_urgent_qs) ||
+ READ_ONCE(rdp->rcu_forced_tick)) {
+ // RCU doesn't need nohz_full help from this CPU, or it is
+ // already getting that help.
+ return;
+ }
+
+ // We get here only when not in an extended quiescent state and
+ // from interrupts (as opposed to NMIs). Therefore, (1) RCU is
+ // already watching and (2) The fact that we are in an interrupt
+ // handler and that the rcu_node lock is an irq-disabled lock
+ // prevents self-deadlock. So we can safely recheck under the lock.
+ // Note that the nohz_full state currently cannot change.
+ raw_spin_lock_rcu_node(rdp->mynode);
+ if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
+ // A nohz_full CPU is in the kernel and RCU needs a
+ // quiescent state. Turn on the tick!
+ WRITE_ONCE(rdp->rcu_forced_tick, true);
+ tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
+ }
+ raw_spin_unlock_rcu_node(rdp->mynode);
+}
#endif /* CONFIG_NO_HZ_FULL */
/**
@@ -907,26 +986,7 @@ noinstr void rcu_nmi_enter(void)
incby = 1;
} else if (!in_nmi()) {
instrumentation_begin();
- if (tick_nohz_full_cpu(rdp->cpu) &&
- rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
- READ_ONCE(rdp->rcu_urgent_qs) &&
- !READ_ONCE(rdp->rcu_forced_tick)) {
- // We get here only if we had already exited the
- // extended quiescent state and this was an
- // interrupt (not an NMI). Therefore, (1) RCU is
- // already watching and (2) The fact that we are in
- // an interrupt handler and that the rcu_node lock
- // is an irq-disabled lock prevents self-deadlock.
- // So we can safely recheck under the lock.
- raw_spin_lock_rcu_node(rdp->mynode);
- if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
- // A nohz_full CPU is in the kernel and RCU
- // needs a quiescent state. Turn on the tick!
- WRITE_ONCE(rdp->rcu_forced_tick, true);
- tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
- }
- raw_spin_unlock_rcu_node(rdp->mynode);
- }
+ rcu_irq_enter_check_tick();
instrumentation_end();
}
instrumentation_begin();