summaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c55
1 files changed, 32 insertions, 23 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 42b317e13776..f07343b54fe5 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -97,7 +97,7 @@ struct rcu_state sname##_state = { \
.level = { &sname##_state.node[0] }, \
.rda = &sname##_data, \
.call = cr, \
- .fqs_state = RCU_GP_IDLE, \
+ .gp_state = RCU_GP_IDLE, \
.gpnum = 0UL - 300UL, \
.completed = 0UL - 300UL, \
.orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
@@ -350,12 +350,14 @@ static void rcu_momentary_dyntick_idle(void)
*/
void rcu_note_context_switch(void)
{
+ barrier(); /* Avoid RCU read-side critical sections leaking down. */
trace_rcu_utilization(TPS("Start context switch"));
rcu_sched_qs();
rcu_preempt_note_context_switch();
if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
rcu_momentary_dyntick_idle();
trace_rcu_utilization(TPS("End context switch"));
+ barrier(); /* Avoid RCU read-side critical sections leaking up. */
}
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
@@ -366,12 +368,19 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
* RCU flavors in desperate need of a quiescent state, which will normally
* be none of them). Either way, do a lightweight quiescent state for
* all RCU flavors.
+ *
+ * The barrier() calls are redundant in the common case when this is
+ * called externally, but just in case this is called from within this
+ * file.
+ *
*/
void rcu_all_qs(void)
{
+ barrier(); /* Avoid RCU read-side critical sections leaking down. */
if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
rcu_momentary_dyntick_idle();
this_cpu_inc(rcu_qs_ctr);
+ barrier(); /* Avoid RCU read-side critical sections leaking up. */
}
EXPORT_SYMBOL_GPL(rcu_all_qs);
@@ -1940,16 +1949,15 @@ static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
/*
* Do one round of quiescent-state forcing.
*/
-static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
+static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
{
- int fqs_state = fqs_state_in;
bool isidle = false;
unsigned long maxj;
struct rcu_node *rnp = rcu_get_root(rsp);
WRITE_ONCE(rsp->gp_activity, jiffies);
rsp->n_force_qs++;
- if (fqs_state == RCU_SAVE_DYNTICK) {
+ if (first_time) {
/* Collect dyntick-idle snapshots. */
if (is_sysidle_rcu_state(rsp)) {
isidle = true;
@@ -1958,7 +1966,6 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
force_qs_rnp(rsp, dyntick_save_progress_counter,
&isidle, &maxj);
rcu_sysidle_report_gp(rsp, isidle, maxj);
- fqs_state = RCU_FORCE_QS;
} else {
/* Handle dyntick-idle and offline CPUs. */
isidle = true;
@@ -1972,7 +1979,6 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
raw_spin_unlock_irq(&rnp->lock);
}
- return fqs_state;
}
/*
@@ -2036,7 +2042,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
/* Declare grace period done. */
WRITE_ONCE(rsp->completed, rsp->gpnum);
trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
- rsp->fqs_state = RCU_GP_IDLE;
+ rsp->gp_state = RCU_GP_IDLE;
rdp = this_cpu_ptr(rsp->rda);
/* Advance CBs to reduce false positives below. */
needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
@@ -2054,7 +2060,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
*/
static int __noreturn rcu_gp_kthread(void *arg)
{
- int fqs_state;
+ bool first_gp_fqs;
int gf;
unsigned long j;
int ret;
@@ -2086,7 +2092,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
}
/* Handle quiescent-state forcing. */
- fqs_state = RCU_SAVE_DYNTICK;
+ first_gp_fqs = true;
j = jiffies_till_first_fqs;
if (j > HZ) {
j = HZ;
@@ -2114,7 +2120,8 @@ static int __noreturn rcu_gp_kthread(void *arg)
trace_rcu_grace_period(rsp->name,
READ_ONCE(rsp->gpnum),
TPS("fqsstart"));
- fqs_state = rcu_gp_fqs(rsp, fqs_state);
+ rcu_gp_fqs(rsp, first_gp_fqs);
+ first_gp_fqs = false;
trace_rcu_grace_period(rsp->name,
READ_ONCE(rsp->gpnum),
TPS("fqsend"));
@@ -3030,7 +3037,7 @@ static void rcu_leak_callback(struct rcu_head *rhp)
* is expected to specify a CPU.
*/
static void
-__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
+__call_rcu(struct rcu_head *head, rcu_callback_t func,
struct rcu_state *rsp, int cpu, bool lazy)
{
unsigned long flags;
@@ -3101,7 +3108,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
/*
* Queue an RCU-sched callback for invocation after a grace period.
*/
-void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
{
__call_rcu(head, func, &rcu_sched_state, -1, 0);
}
@@ -3110,7 +3117,7 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
/*
* Queue an RCU callback for invocation after a quicker grace period.
*/
-void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
{
__call_rcu(head, func, &rcu_bh_state, -1, 0);
}
@@ -3124,7 +3131,7 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
* function may only be called from __kfree_rcu().
*/
void kfree_call_rcu(struct rcu_head *head,
- void (*func)(struct rcu_head *rcu))
+ rcu_callback_t func)
{
__call_rcu(head, func, rcu_state_p, -1, 1);
}
@@ -4492,13 +4499,12 @@ static void __init rcu_init_geometry(void)
rcu_fanout_leaf, nr_cpu_ids);
/*
- * The boot-time rcu_fanout_leaf parameter is only permitted
- * to increase the leaf-level fanout, not decrease it. Of course,
- * the leaf-level fanout cannot exceed the number of bits in
- * the rcu_node masks. Complain and fall back to the compile-
- * time values if these limits are exceeded.
+ * The boot-time rcu_fanout_leaf parameter must be at least two
+ * and cannot exceed the number of bits in the rcu_node masks.
+ * Complain and fall back to the compile-time values if this
+ * limit is exceeded.
*/
- if (rcu_fanout_leaf < RCU_FANOUT_LEAF ||
+ if (rcu_fanout_leaf < 2 ||
rcu_fanout_leaf > sizeof(unsigned long) * 8) {
rcu_fanout_leaf = RCU_FANOUT_LEAF;
WARN_ON(1);
@@ -4515,10 +4521,13 @@ static void __init rcu_init_geometry(void)
/*
* The tree must be able to accommodate the configured number of CPUs.
- * If this limit is exceeded than we have a serious problem elsewhere.
+ * If this limit is exceeded, fall back to the compile-time values.
*/
- if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1])
- panic("rcu_init_geometry: rcu_capacity[] is too small");
+ if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
+ rcu_fanout_leaf = RCU_FANOUT_LEAF;
+ WARN_ON(1);
+ return;
+ }
/* Calculate the number of levels in the tree. */
for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {