summaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-09-25 00:04:06 +0200
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-12-03 19:10:18 +0100
commit78e4bc34e5d966cfd95f1238565afc399d56225c (patch)
treee430291c48ec41b22e31865bf5aa13c4db87a3cb /kernel/rcu/tree_plugin.h
parentrcu: Kick CPU halfway to RCU CPU stall warning (diff)
downloadlinux-78e4bc34e5d966cfd95f1238565afc399d56225c.tar.xz
linux-78e4bc34e5d966cfd95f1238565afc399d56225c.zip
rcu: Fix and comment ordering around wait_event()
It is all too easy to forget that wait_event() does not necessarily imply a full memory barrier. The case where it does not is where the condition transitions to true just as wait_event() starts execution. This is actually a feature: The standard use of wait_event() involves locking, in which case the locks provide the needed ordering (you hold a lock across the wake_up() and acquire that same lock after wait_event() returns). Given that I did forget that wait_event() does not necessarily imply a full memory barrier in one case, this commit fixes that case. This commit also adds comments calling out the placement of existing memory barriers relied on by wait_event() calls. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r--kernel/rcu/tree_plugin.h6
1 files changed, 5 insertions, 1 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 6abb03dff5c0..b023e5407111 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -779,8 +779,10 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
}
if (rnp->parent == NULL) {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
- if (wake)
+ if (wake) {
+ smp_mb(); /* EGP done before wake_up(). */
wake_up(&sync_rcu_preempt_exp_wq);
+ }
break;
}
mask = rnp->grpmask;
@@ -1852,6 +1854,7 @@ static int rcu_oom_notify(struct notifier_block *self,
/* Wait for callbacks from earlier instance to complete. */
wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
+ smp_mb(); /* Ensure callback reuse happens after callback invocation. */
/*
* Prevent premature wakeup: ensure that all increments happen
@@ -2250,6 +2253,7 @@ static int rcu_nocb_kthread(void *arg)
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
TPS("Sleep"));
wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
+ /* Memory barrier provide by xchg() below. */
} else if (firsttime) {
firsttime = 0;
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,