summaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <frederic@kernel.org>2020-11-13 13:13:27 +0100
committerPaul E. McKenney <paulmck@kernel.org>2021-01-07 01:24:59 +0100
commit32aa2f4170d22f0b9fcb75ab05679ab122fae373 (patch)
tree668df1c7f89bdfb995a220f390dc17f8468aa893 /kernel/rcu/tree.c
parentrcu/nocb: Only cond_resched() from actual offloaded batch processing (diff)
downloadlinux-32aa2f4170d22f0b9fcb75ab05679ab122fae373.tar.xz
linux-32aa2f4170d22f0b9fcb75ab05679ab122fae373.zip
rcu/nocb: Process batch locally as long as offloading isn't complete
This commit makes sure to process the callbacks locally (via either RCU_SOFTIRQ or the rcuc kthread) whenever the segcblist isn't entirely offloaded. This ensures that callbacks are invoked one way or another while a CPU is in the middle of a toggle operation. Cc: Josh Triplett <josh@joshtriplett.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Lai Jiangshan <jiangshanlai@gmail.com> Cc: Joel Fernandes <joel@joelfernandes.org> Cc: Neeraj Upadhyay <neeraju@codeaurora.org> Cc: Thomas Gleixner <tglx@linutronix.de> Inspired-by: Paul E. McKenney <paulmck@kernel.org> Tested-by: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 4ef59a5416a3..ec14c017c0e3 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2700,6 +2700,7 @@ static __latent_entropy void rcu_core(void)
struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
struct rcu_node *rnp = rdp->mynode;
const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
+ const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
if (cpu_is_offline(smp_processor_id()))
return;
@@ -2729,7 +2730,7 @@ static __latent_entropy void rcu_core(void)
rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
/* If there are callbacks ready, invoke them. */
- if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) &&
+ if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
likely(READ_ONCE(rcu_scheduler_fully_active)))
rcu_do_batch(rdp);