summaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-08-02 22:53:17 +0200
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-09-21 06:16:19 +0200
commit8203d6d0ee784cfb2ebf89053f7fe399abc867d7 (patch)
treecb0598f473834f117a06b1eeab32576fcddd0e52 /kernel/rcu/tree_plugin.h
parentrcu: Consolidate tree setup for synchronize_rcu_expedited() (diff)
downloadlinux-8203d6d0ee784cfb2ebf89053f7fe399abc867d7.tar.xz
linux-8203d6d0ee784cfb2ebf89053f7fe399abc867d7.zip
rcu: Use single-stage IPI algorithm for RCU expedited grace period
The current preemptible-RCU expedited grace-period algorithm invokes synchronize_sched_expedited() to enqueue all tasks currently running in a preemptible-RCU read-side critical section, then waits for all the ->blkd_tasks lists to drain. This works, but results in both an IPI and a double context switch even on CPUs that do not happen to be running in a preemptible RCU read-side critical section. This commit implements a new algorithm that causes less OS jitter. This new algorithm IPIs all online CPUs that are not idle (from an RCU perspective), but refrains from self-IPIs. If a CPU receiving this IPI is not in a preemptible RCU read-side critical section (or is just now exiting one), it pushes quiescence up the rcu_node tree, otherwise, it sets a flag that will be handled by the upcoming outermost rcu_read_unlock(), which will then push quiescence up the tree. The expedited grace period must of course wait on any pre-existing blocked readers, and newly blocked readers must be queued carefully based on the state of both the normal and the expedited grace periods. This new queueing approach also avoids the need to update boost state, courtesy of the fact that blocked tasks are no longer ever migrated to the root rcu_node structure. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r--kernel/rcu/tree_plugin.h296
1 files changed, 250 insertions, 46 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 62d05413b7ba..6f7500f9387c 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -101,7 +101,6 @@ RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
static struct rcu_state *const rcu_state_p = &rcu_preempt_state;
static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data;
-static int rcu_preempted_readers_exp(struct rcu_node *rnp);
static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
bool wake);
@@ -114,6 +113,147 @@ static void __init rcu_bootup_announce(void)
rcu_bootup_announce_oddness();
}
+/* Flags for rcu_preempt_ctxt_queue() decision table. */
+#define RCU_GP_TASKS 0x8
+#define RCU_EXP_TASKS 0x4
+#define RCU_GP_BLKD 0x2
+#define RCU_EXP_BLKD 0x1
+
+/*
+ * Queues a task preempted within an RCU-preempt read-side critical
+ * section into the appropriate location within the ->blkd_tasks list,
+ * depending on the states of any ongoing normal and expedited grace
+ * periods. The ->gp_tasks pointer indicates which element the normal
+ * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
+ * indicates which element the expedited grace period is waiting on (again,
+ * NULL if none). If a grace period is waiting on a given element in the
+ * ->blkd_tasks list, it also waits on all subsequent elements. Thus,
+ * adding a task to the tail of the list blocks any grace period that is
+ * already waiting on one of the elements. In contrast, adding a task
+ * to the head of the list won't block any grace period that is already
+ * waiting on one of the elements.
+ *
+ * This queuing is imprecise, and can sometimes make an ongoing grace
+ * period wait for a task that is not strictly speaking blocking it.
+ * Given the choice, we needlessly block a normal grace period rather than
+ * blocking an expedited grace period.
+ *
+ * Note that an endless sequence of expedited grace periods still cannot
+ * indefinitely postpone a normal grace period. Eventually, all of the
+ * fixed number of preempted tasks blocking the normal grace period that are
+ * not also blocking the expedited grace period will resume and complete
+ * their RCU read-side critical sections. At that point, the ->gp_tasks
+ * pointer will equal the ->exp_tasks pointer, at which point the end of
+ * the corresponding expedited grace period will also be the end of the
+ * normal grace period.
+ */
+static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
+ unsigned long flags) __releases(rnp->lock)
+{
+ int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
+ (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
+ (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
+ (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
+ struct task_struct *t = current;
+
+ /*
+ * Decide where to queue the newly blocked task. In theory,
+ * this could be an if-statement. In practice, when I tried
+ * that, it was quite messy.
+ */
+ switch (blkd_state) {
+ case 0:
+ case RCU_EXP_TASKS:
+ case RCU_EXP_TASKS + RCU_GP_BLKD:
+ case RCU_GP_TASKS:
+ case RCU_GP_TASKS + RCU_EXP_TASKS:
+
+ /*
+ * Blocking neither GP, or first task blocking the normal
+ * GP but not blocking the already-waiting expedited GP.
+ * Queue at the head of the list to avoid unnecessarily
+ * blocking the already-waiting GPs.
+ */
+ list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
+ break;
+
+ case RCU_EXP_BLKD:
+ case RCU_GP_BLKD:
+ case RCU_GP_BLKD + RCU_EXP_BLKD:
+ case RCU_GP_TASKS + RCU_EXP_BLKD:
+ case RCU_GP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
+ case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
+
+ /*
+ * First task arriving that blocks either GP, or first task
+ * arriving that blocks the expedited GP (with the normal
+ * GP already waiting), or a task arriving that blocks
+ * both GPs with both GPs already waiting. Queue at the
+ * tail of the list to avoid any GP waiting on any of the
+ * already queued tasks that are not blocking it.
+ */
+ list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
+ break;
+
+ case RCU_EXP_TASKS + RCU_EXP_BLKD:
+ case RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
+ case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_EXP_BLKD:
+
+ /*
+ * Second or subsequent task blocking the expedited GP.
+ * The task either does not block the normal GP, or is the
+ * first task blocking the normal GP. Queue just after
+ * the first task blocking the expedited GP.
+ */
+ list_add(&t->rcu_node_entry, rnp->exp_tasks);
+ break;
+
+ case RCU_GP_TASKS + RCU_GP_BLKD:
+ case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD:
+
+ /*
+ * Second or subsequent task blocking the normal GP.
+ * The task does not block the expedited GP. Queue just
+ * after the first task blocking the normal GP.
+ */
+ list_add(&t->rcu_node_entry, rnp->gp_tasks);
+ break;
+
+ default:
+
+ /* Yet another exercise in excessive paranoia. */
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ /*
+ * We have now queued the task. If it was the first one to
+ * block either grace period, update the ->gp_tasks and/or
+ * ->exp_tasks pointers, respectively, to reference the newly
+ * blocked tasks.
+ */
+ if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD))
+ rnp->gp_tasks = &t->rcu_node_entry;
+ if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
+ rnp->exp_tasks = &t->rcu_node_entry;
+ raw_spin_unlock(&rnp->lock);
+
+ /*
+ * Report the quiescent state for the expedited GP. This expedited
+ * GP should not be able to end until we report, so there should be
+ * no need to check for a subsequent expedited GP. (Though we are
+ * still in a quiescent state in any case.)
+ */
+ if (blkd_state & RCU_EXP_BLKD &&
+ t->rcu_read_unlock_special.b.exp_need_qs) {
+ t->rcu_read_unlock_special.b.exp_need_qs = false;
+ rcu_report_exp_rdp(rdp->rsp, rdp, true);
+ } else {
+ WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs);
+ }
+ local_irq_restore(flags);
+}
+
/*
* Record a preemptible-RCU quiescent state for the specified CPU. Note
* that this just means that the task currently running on the CPU is
@@ -167,42 +307,18 @@ static void rcu_preempt_note_context_switch(void)
t->rcu_blocked_node = rnp;
/*
- * If this CPU has already checked in, then this task
- * will hold up the next grace period rather than the
- * current grace period. Queue the task accordingly.
- * If the task is queued for the current grace period
- * (i.e., this CPU has not yet passed through a quiescent
- * state for the current grace period), then as long
- * as that task remains queued, the current grace period
- * cannot end. Note that there is some uncertainty as
- * to exactly when the current grace period started.
- * We take a conservative approach, which can result
- * in unnecessarily waiting on tasks that started very
- * slightly after the current grace period began. C'est
- * la vie!!!
- *
- * But first, note that the current CPU must still be
- * on line!
+ * Verify the CPU's sanity, trace the preemption, and
+ * then queue the task as required based on the states
+ * of any ongoing and expedited grace periods.
*/
WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
- if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
- list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
- rnp->gp_tasks = &t->rcu_node_entry;
- if (IS_ENABLED(CONFIG_RCU_BOOST) &&
- rnp->boost_tasks != NULL)
- rnp->boost_tasks = rnp->gp_tasks;
- } else {
- list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
- if (rnp->qsmask & rdp->grpmask)
- rnp->gp_tasks = &t->rcu_node_entry;
- }
trace_rcu_preempt_task(rdp->rsp->name,
t->pid,
(rnp->qsmask & rdp->grpmask)
? rnp->gpnum
: rnp->gpnum + 1);
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ rcu_preempt_ctxt_queue(rnp, rdp, flags);
} else if (t->rcu_read_lock_nesting < 0 &&
t->rcu_read_unlock_special.s) {
@@ -272,6 +388,7 @@ void rcu_read_unlock_special(struct task_struct *t)
unsigned long flags;
struct list_head *np;
bool drop_boost_mutex = false;
+ struct rcu_data *rdp;
struct rcu_node *rnp;
union rcu_special special;
@@ -282,8 +399,8 @@ void rcu_read_unlock_special(struct task_struct *t)
local_irq_save(flags);
/*
- * If RCU core is waiting for this CPU to exit critical section,
- * let it know that we have done so. Because irqs are disabled,
+ * If RCU core is waiting for this CPU to exit its critical section,
+ * report the fact that it has exited. Because irqs are disabled,
* t->rcu_read_unlock_special cannot change.
*/
special = t->rcu_read_unlock_special;
@@ -296,13 +413,32 @@ void rcu_read_unlock_special(struct task_struct *t)
}
}
+ /*
+ * Respond to a request for an expedited grace period, but only if
+ * we were not preempted, meaning that we were running on the same
+ * CPU throughout. If we were preempted, the exp_need_qs flag
+ * would have been cleared at the time of the first preemption,
+ * and the quiescent state would be reported when we were dequeued.
+ */
+ if (special.b.exp_need_qs) {
+ WARN_ON_ONCE(special.b.blocked);
+ t->rcu_read_unlock_special.b.exp_need_qs = false;
+ rdp = this_cpu_ptr(rcu_state_p->rda);
+ rcu_report_exp_rdp(rcu_state_p, rdp, true);
+ if (!t->rcu_read_unlock_special.s) {
+ local_irq_restore(flags);
+ return;
+ }
+ }
+
/* Hardware IRQ handlers cannot block, complain if they get here. */
if (in_irq() || in_serving_softirq()) {
lockdep_rcu_suspicious(__FILE__, __LINE__,
"rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
- pr_alert("->rcu_read_unlock_special: %#x (b: %d, nq: %d)\n",
+ pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n",
t->rcu_read_unlock_special.s,
t->rcu_read_unlock_special.b.blocked,
+ t->rcu_read_unlock_special.b.exp_need_qs,
t->rcu_read_unlock_special.b.need_qs);
local_irq_restore(flags);
return;
@@ -329,7 +465,7 @@ void rcu_read_unlock_special(struct task_struct *t)
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
}
empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
- empty_exp = !rcu_preempted_readers_exp(rnp);
+ empty_exp = sync_rcu_preempt_exp_done(rnp);
smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
np = rcu_next_node_entry(t, rnp);
list_del_init(&t->rcu_node_entry);
@@ -353,7 +489,7 @@ void rcu_read_unlock_special(struct task_struct *t)
* Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
* so we must take a snapshot of the expedited state.
*/
- empty_exp_now = !rcu_preempted_readers_exp(rnp);
+ empty_exp_now = sync_rcu_preempt_exp_done(rnp);
if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
rnp->gpnum,
@@ -536,27 +672,98 @@ void synchronize_rcu(void)
EXPORT_SYMBOL_GPL(synchronize_rcu);
/*
+ * Remote handler for smp_call_function_single(). If there is an
+ * RCU read-side critical section in effect, request that the
+ * next rcu_read_unlock() record the quiescent state up the
+ * ->expmask fields in the rcu_node tree. Otherwise, immediately
+ * report the quiescent state.
+ */
+static void sync_rcu_exp_handler(void *info)
+{
+ struct rcu_data *rdp;
+ struct rcu_state *rsp = info;
+ struct task_struct *t = current;
+
+ /*
+ * Within an RCU read-side critical section, request that the next
+ * rcu_read_unlock() report. Unless this RCU read-side critical
+ * section has already blocked, in which case it is already set
+ * up for the expedited grace period to wait on it.
+ */
+ if (t->rcu_read_lock_nesting > 0 &&
+ !t->rcu_read_unlock_special.b.blocked) {
+ t->rcu_read_unlock_special.b.exp_need_qs = true;
+ return;
+ }
+
+ /*
+ * We are either exiting an RCU read-side critical section (negative
+ * values of t->rcu_read_lock_nesting) or are not in one at all
+ * (zero value of t->rcu_read_lock_nesting). Or we are in an RCU
+ * read-side critical section that blocked before this expedited
+ * grace period started. Either way, we can immediately report
+ * the quiescent state.
+ */
+ rdp = this_cpu_ptr(rsp->rda);
+ rcu_report_exp_rdp(rsp, rdp, true);
+}
+
+/*
* Select the nodes that the upcoming expedited grace period needs
* to wait for.
*/
-static void sync_rcu_exp_select_nodes(struct rcu_state *rsp)
+static void sync_rcu_exp_select_cpus(struct rcu_state *rsp)
{
+ int cpu;
unsigned long flags;
+ unsigned long mask;
+ unsigned long mask_ofl_test;
+ unsigned long mask_ofl_ipi;
+ int ret;
struct rcu_node *rnp;
sync_exp_reset_tree(rsp);
rcu_for_each_leaf_node(rsp, rnp) {
raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
- rnp->expmask = 0; /* No per-CPU component yet. */
- if (!rcu_preempt_has_tasks(rnp)) {
- /* FIXME: Want __rcu_report_exp_rnp() here. */
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- } else {
+
+ /* Each pass checks a CPU for identity, offline, and idle. */
+ mask_ofl_test = 0;
+ for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
+ struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+ struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+ if (raw_smp_processor_id() == cpu ||
+ cpu_is_offline(cpu) ||
+ !(atomic_add_return(0, &rdtp->dynticks) & 0x1))
+ mask_ofl_test |= rdp->grpmask;
+ }
+ mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
+
+ /*
+ * Need to wait for any blocked tasks as well. Note that
+ * additional blocking tasks will also block the expedited
+ * GP until such time as the ->expmask bits are cleared.
+ */
+ if (rcu_preempt_has_tasks(rnp))
rnp->exp_tasks = rnp->blkd_tasks.next;
- rcu_initiate_boost(rnp, flags);
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
+
+ /* IPI the remaining CPUs for expedited quiescent state. */
+ mask = 1;
+ for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
+ if (!(mask_ofl_ipi & mask))
+ continue;
+ ret = smp_call_function_single(cpu,
+ sync_rcu_exp_handler,
+ rsp, 0);
+ if (!ret)
+ mask_ofl_ipi &= ~mask;
}
- rcu_report_exp_rnp(rsp, rnp, false);
+ /* Report quiescent states for those that went offline. */
+ mask_ofl_test |= mask_ofl_ipi;
+ if (mask_ofl_test)
+ rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
}
}
@@ -587,11 +794,8 @@ void synchronize_rcu_expedited(void)
rcu_exp_gp_seq_start(rsp);
- /* force all RCU readers onto ->blkd_tasks lists. */
- synchronize_sched_expedited();
-
/* Initialize the rcu_node tree in preparation for the wait. */
- sync_rcu_exp_select_nodes(rsp);
+ sync_rcu_exp_select_cpus(rsp);
/* Wait for snapshotted ->blkd_tasks lists to drain. */
rnp = rcu_get_root(rsp);