summaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2020-12-21 20:17:16 +0100
committerPaul E. McKenney <paulmck@kernel.org>2021-01-07 01:47:55 +0100
commitf759081e8f5ac640df1c7125540759bbcb4eb0e2 (patch)
tree5c526e4459a4267d47965586718a8f7be53f36be /kernel/rcu
parentrcu/nocb: Add nocb CB kthread list to show_rcu_nocb_state() output (diff)
downloadlinux-f759081e8f5ac640df1c7125540759bbcb4eb0e2.tar.xz
linux-f759081e8f5ac640df1c7125540759bbcb4eb0e2.zip
rcu/nocb: Code-style nits in callback-offloading toggling
This commit addresses a few code-style nits in callback-offloading toggling, including one that predates this toggling. Cc: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/rcu_segcblist.h19
-rw-r--r--kernel/rcu/rcutorture.c2
-rw-r--r--kernel/rcu/tree_plugin.h45
3 files changed, 29 insertions, 37 deletions
diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h
index 311060279f1a..9a19328ff251 100644
--- a/kernel/rcu/rcu_segcblist.h
+++ b/kernel/rcu/rcu_segcblist.h
@@ -80,17 +80,12 @@ static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
return rcu_segcblist_test_flags(rsclp, SEGCBLIST_ENABLED);
}
-/* Is the specified rcu_segcblist offloaded? */
+/* Is the specified rcu_segcblist offloaded, or is SEGCBLIST_SOFTIRQ_ONLY set? */
static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
{
- if (IS_ENABLED(CONFIG_RCU_NOCB_CPU)) {
- /*
- * Complete de-offloading happens only when SEGCBLIST_SOFTIRQ_ONLY
- * is set.
- */
- if (!rcu_segcblist_test_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY))
- return true;
- }
+ if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
+ !rcu_segcblist_test_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY))
+ return true;
return false;
}
@@ -99,10 +94,8 @@ static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rscl
{
int flags = SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | SEGCBLIST_OFFLOADED;
- if (IS_ENABLED(CONFIG_RCU_NOCB_CPU)) {
- if ((rsclp->flags & flags) == flags)
- return true;
- }
+ if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && (rsclp->flags & flags) == flags)
+ return true;
return false;
}
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 22735bc3eacc..b9dd63c166b9 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -1606,7 +1606,7 @@ rcu_torture_stats_print(void)
data_race(n_barrier_successes),
data_race(n_barrier_attempts),
data_race(n_rcu_torture_barrier_error));
- pr_cont("read-exits: %ld ", data_race(n_read_exits));
+ pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
pr_cont("nocb-toggles: %ld:%ld\n",
atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index bc63a6b9d532..6f56f9e51e67 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1962,17 +1962,17 @@ static inline bool nocb_gp_update_state(struct rcu_data *rdp, bool *needwake_sta
*needwake_state = true;
}
return true;
- } else {
- /*
- * De-offloading. Clear our flag and notify the de-offload worker.
- * We will ignore this rdp until it ever gets re-offloaded.
- */
- WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
- rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
- if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
- *needwake_state = true;
- return false;
}
+
+ /*
+ * De-offloading. Clear our flag and notify the de-offload worker.
+ * We will ignore this rdp until it ever gets re-offloaded.
+ */
+ WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
+ rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
+ if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
+ *needwake_state = true;
+ return false;
}
@@ -2005,6 +2005,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
bool needwake_state = false;
+
if (!nocb_gp_enabled_cb(rdp))
continue;
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
@@ -2160,11 +2161,11 @@ static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
static void nocb_cb_wait(struct rcu_data *rdp)
{
struct rcu_segcblist *cblist = &rdp->cblist;
- struct rcu_node *rnp = rdp->mynode;
- bool needwake_state = false;
- bool needwake_gp = false;
unsigned long cur_gp_seq;
unsigned long flags;
+ bool needwake_state = false;
+ bool needwake_gp = false;
+ struct rcu_node *rnp = rdp->mynode;
local_irq_save(flags);
rcu_momentary_dyntick_idle();
@@ -2217,8 +2218,8 @@ static void nocb_cb_wait(struct rcu_data *rdp)
swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
nocb_cb_wait_cond(rdp));
- /* ^^^ Ensure CB invocation follows _sleep test. */
- if (smp_load_acquire(&rdp->nocb_cb_sleep)) {
+ // VVV Ensure CB invocation follows _sleep test.
+ if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^
WARN_ON(signal_pending(current));
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
}
@@ -2323,7 +2324,7 @@ static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp)
unsigned long flags;
int ret;
- printk("De-offloading %d\n", rdp->cpu);
+ pr_info("De-offloading %d\n", rdp->cpu);
rcu_nocb_lock_irqsave(rdp, flags);
/*
@@ -2384,11 +2385,10 @@ int rcu_nocb_cpu_deoffload(int cpu)
mutex_lock(&rcu_state.barrier_mutex);
cpus_read_lock();
if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
- if (cpu_online(cpu)) {
+ if (cpu_online(cpu))
ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
- } else {
+ else
ret = __rcu_nocb_rdp_deoffload(rdp);
- }
if (!ret)
cpumask_clear_cpu(cpu, rcu_nocb_mask);
}
@@ -2412,7 +2412,7 @@ static int __rcu_nocb_rdp_offload(struct rcu_data *rdp)
if (!rdp->nocb_gp_rdp)
return -EINVAL;
- printk("Offloading %d\n", rdp->cpu);
+ pr_info("Offloading %d\n", rdp->cpu);
/*
* Can't use rcu_nocb_lock_irqsave() while we are in
* SEGCBLIST_SOFTIRQ_ONLY mode.
@@ -2460,11 +2460,10 @@ int rcu_nocb_cpu_offload(int cpu)
mutex_lock(&rcu_state.barrier_mutex);
cpus_read_lock();
if (!rcu_segcblist_is_offloaded(&rdp->cblist)) {
- if (cpu_online(cpu)) {
+ if (cpu_online(cpu))
ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
- } else {
+ else
ret = __rcu_nocb_rdp_offload(rdp);
- }
if (!ret)
cpumask_set_cpu(cpu, rcu_nocb_mask);
}