summaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-03-15 09:00:12 +0100
committerIngo Molnar <mingo@kernel.org>2016-03-15 09:01:06 +0100
commit8bc6782fe20bd2584c73a35c47329c9fd0a8d34c (patch)
treec7fc6f467ee212e4ef442e70843c48fcf3c67c17 /kernel/rcu/tree.h
parentMerge branch 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/ke... (diff)
parentrcu: Remove rcu_user_hooks_switch (diff)
downloadlinux-8bc6782fe20bd2584c73a35c47329c9fd0a8d34c.tar.xz
linux-8bc6782fe20bd2584c73a35c47329c9fd0a8d34c.zip
Merge commit 'fixes.2015.02.23a' into core/rcu
Conflicts: kernel/rcu/tree.c Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/rcu/tree.h')
-rw-r--r--kernel/rcu/tree.h42
1 files changed, 31 insertions, 11 deletions
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index bbd235d0e71f..df668c0f9e64 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -150,8 +150,9 @@ struct rcu_dynticks {
* Definition for node within the RCU grace-period-detection hierarchy.
*/
struct rcu_node {
- raw_spinlock_t lock; /* Root rcu_node's lock protects some */
- /* rcu_state fields as well as following. */
+ raw_spinlock_t __private lock; /* Root rcu_node's lock protects */
+ /* some rcu_state fields as well as */
+ /* following. */
unsigned long gpnum; /* Current grace period for this node. */
/* This will either be equal to or one */
/* behind the root rcu_node's gpnum. */
@@ -682,7 +683,7 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
#endif /* #else #ifdef CONFIG_PPC */
/*
- * Wrappers for the rcu_node::lock acquire.
+ * Wrappers for the rcu_node::lock acquire and release.
*
* Because the rcu_nodes form a tree, the tree traversal locking will observe
* different lock values, this in turn means that an UNLOCK of one level
@@ -691,29 +692,48 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
*
* In order to restore full ordering between tree levels, augment the regular
* lock acquire functions with smp_mb__after_unlock_lock().
+ *
+ * As ->lock of struct rcu_node is a __private field, therefore one should use
+ * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
*/
static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp)
{
- raw_spin_lock(&rnp->lock);
+ raw_spin_lock(&ACCESS_PRIVATE(rnp, lock));
smp_mb__after_unlock_lock();
}
+static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp)
+{
+ raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock));
+}
+
static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp)
{
- raw_spin_lock_irq(&rnp->lock);
+ raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock));
smp_mb__after_unlock_lock();
}
-#define raw_spin_lock_irqsave_rcu_node(rnp, flags) \
-do { \
- typecheck(unsigned long, flags); \
- raw_spin_lock_irqsave(&(rnp)->lock, flags); \
- smp_mb__after_unlock_lock(); \
+static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp)
+{
+ raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock));
+}
+
+#define raw_spin_lock_irqsave_rcu_node(rnp, flags) \
+do { \
+ typecheck(unsigned long, flags); \
+ raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags); \
+ smp_mb__after_unlock_lock(); \
+} while (0)
+
+#define raw_spin_unlock_irqrestore_rcu_node(rnp, flags) \
+do { \
+ typecheck(unsigned long, flags); \
+ raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags); \
} while (0)
static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp)
{
- bool locked = raw_spin_trylock(&rnp->lock);
+ bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock));
if (locked)
smp_mb__after_unlock_lock();