summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-05-24 15:00:38 +0200
committerIngo Molnar <mingo@kernel.org>2016-06-14 11:55:16 +0200
commitb316ff783d17bd6217804e2e4385ce9347d7dad9 (patch)
tree3a9fad86e590ef5c74e4e6cc2948265d1a22dea5 /net
parentlocking/spinlock: Update spin_unlock_wait() users (diff)
downloadlinux-b316ff783d17bd6217804e2e4385ce9347d7dad9.tar.xz
linux-b316ff783d17bd6217804e2e4385ce9347d7dad9.zip
locking/spinlock, netfilter: Fix nf_conntrack_lock() barriers
Even with spin_unlock_wait() fixed, nf_conntrack_lock{,_all}() is borken as it misses a bunch of memory barriers to order the whole global vs local locks scheme. Even x86 (and other TSO archs) are affected. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> [ Updated the comments. ] Cc: Andrew Morton <akpm@linux-foundation.org> Cc: David S. Miller <davem@davemloft.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/netfilter/nf_conntrack_core.c23
1 files changed, 22 insertions, 1 deletions
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index db2312eeb2a4..b8c5501d3872 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -83,6 +83,13 @@ void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
spin_lock(lock);
while (unlikely(nf_conntrack_locks_all)) {
spin_unlock(lock);
+
+ /*
+ * Order the 'nf_conntrack_locks_all' load vs. the
+ * spin_unlock_wait() loads below, to ensure
+ * that 'nf_conntrack_locks_all_lock' is indeed held:
+ */
+ smp_rmb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
spin_unlock_wait(&nf_conntrack_locks_all_lock);
spin_lock(lock);
}
@@ -128,6 +135,14 @@ static void nf_conntrack_all_lock(void)
spin_lock(&nf_conntrack_locks_all_lock);
nf_conntrack_locks_all = true;
+ /*
+ * Order the above store of 'nf_conntrack_locks_all' against
+ * the spin_unlock_wait() loads below, such that if
+ * nf_conntrack_lock() observes 'nf_conntrack_locks_all'
+ * we must observe nf_conntrack_locks[] held:
+ */
+ smp_mb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
+
for (i = 0; i < CONNTRACK_LOCKS; i++) {
spin_unlock_wait(&nf_conntrack_locks[i]);
}
@@ -135,7 +150,13 @@ static void nf_conntrack_all_lock(void)
static void nf_conntrack_all_unlock(void)
{
- nf_conntrack_locks_all = false;
+ /*
+ * All prior stores must be complete before we clear
+ * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
+ * might observe the false value but not the entire
+ * critical section:
+ */
+ smp_store_release(&nf_conntrack_locks_all, false);
spin_unlock(&nf_conntrack_locks_all_lock);
}