summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 21:57:53 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 21:57:53 +0200
commit776edb59317ada867dfcddde40b55648beeb0078 (patch)
treef6a6136374642323cfefd7d6399ea429f9018ade /include
parentMerge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kerne... (diff)
parentrwsem: Add comments to explain the meaning of the rwsem's count field (diff)
downloadlinux-776edb59317ada867dfcddde40b55648beeb0078.tar.xz
linux-776edb59317ada867dfcddde40b55648beeb0078.zip
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull core locking updates from Ingo Molnar: "The main changes in this cycle were: - reduced/streamlined smp_mb__*() interface that allows more usecases and makes the existing ones less buggy, especially in rarer architectures - add rwsem implementation comments - bump up lockdep limits" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits) rwsem: Add comments to explain the meaning of the rwsem's count field lockdep: Increase static allocations arch: Mass conversion of smp_mb__*() arch,doc: Convert smp_mb__*() arch,xtensa: Convert smp_mb__*() arch,x86: Convert smp_mb__*() arch,tile: Convert smp_mb__*() arch,sparc: Convert smp_mb__*() arch,sh: Convert smp_mb__*() arch,score: Convert smp_mb__*() arch,s390: Convert smp_mb__*() arch,powerpc: Convert smp_mb__*() arch,parisc: Convert smp_mb__*() arch,openrisc: Convert smp_mb__*() arch,mn10300: Convert smp_mb__*() arch,mips: Convert smp_mb__*() arch,metag: Convert smp_mb__*() arch,m68k: Convert smp_mb__*() arch,m32r: Convert smp_mb__*() arch,ia64: Convert smp_mb__*() ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/atomic.h7
-rw-r--r--include/asm-generic/barrier.h8
-rw-r--r--include/asm-generic/bitops.h9
-rw-r--r--include/asm-generic/bitops/atomic.h2
-rw-r--r--include/asm-generic/bitops/lock.h2
-rw-r--r--include/linux/atomic.h36
-rw-r--r--include/linux/bitops.h20
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/interrupt.h8
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/sunrpc/sched.h8
-rw-r--r--include/linux/sunrpc/xprt.h8
-rw-r--r--include/linux/tracehook.h2
-rw-r--r--include/net/ip_vs.h4
16 files changed, 88 insertions, 38 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 33bd2de3bc1e..9c79e7603459 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -16,6 +16,7 @@
#define __ASM_GENERIC_ATOMIC_H
#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
#ifdef CONFIG_SMP
/* Force people to define core atomics */
@@ -182,11 +183,5 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
}
#endif
-/* Assume that atomic operations are already serializing */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 6f692f8ac664..1402fa855388 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -62,6 +62,14 @@
#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
#endif
+#ifndef smp_mb__before_atomic
+#define smp_mb__before_atomic() smp_mb()
+#endif
+
+#ifndef smp_mb__after_atomic
+#define smp_mb__after_atomic() smp_mb()
+#endif
+
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
index 280ca7a96f75..dcdcacf2fd2b 100644
--- a/include/asm-generic/bitops.h
+++ b/include/asm-generic/bitops.h
@@ -11,14 +11,7 @@
#include <linux/irqflags.h>
#include <linux/compiler.h>
-
-/*
- * clear_bit may not imply a memory barrier
- */
-#ifndef smp_mb__before_clear_bit
-#define smp_mb__before_clear_bit() smp_mb()
-#define smp_mb__after_clear_bit() smp_mb()
-#endif
+#include <asm/barrier.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/ffz.h>
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index 9ae6c34dc191..49673510b484 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -80,7 +80,7 @@ static inline void set_bit(int nr, volatile unsigned long *addr)
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
* in order to ensure changes are visible on other processors.
*/
static inline void clear_bit(int nr, volatile unsigned long *addr)
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index 308a9e22c802..c30266e94806 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -20,7 +20,7 @@
*/
#define clear_bit_unlock(nr, addr) \
do { \
- smp_mb__before_clear_bit(); \
+ smp_mb__before_atomic(); \
clear_bit(nr, addr); \
} while (0)
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 5b08a8540ecf..fef3a809e7cf 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -3,6 +3,42 @@
#define _LINUX_ATOMIC_H
#include <asm/atomic.h>
+/*
+ * Provide __deprecated wrappers for the new interface, avoid flag day changes.
+ * We need the ugly external functions to break header recursion hell.
+ */
+#ifndef smp_mb__before_atomic_inc
+static inline void __deprecated smp_mb__before_atomic_inc(void)
+{
+ extern void __smp_mb__before_atomic(void);
+ __smp_mb__before_atomic();
+}
+#endif
+
+#ifndef smp_mb__after_atomic_inc
+static inline void __deprecated smp_mb__after_atomic_inc(void)
+{
+ extern void __smp_mb__after_atomic(void);
+ __smp_mb__after_atomic();
+}
+#endif
+
+#ifndef smp_mb__before_atomic_dec
+static inline void __deprecated smp_mb__before_atomic_dec(void)
+{
+ extern void __smp_mb__before_atomic(void);
+ __smp_mb__before_atomic();
+}
+#endif
+
+#ifndef smp_mb__after_atomic_dec
+static inline void __deprecated smp_mb__after_atomic_dec(void)
+{
+ extern void __smp_mb__after_atomic(void);
+ __smp_mb__after_atomic();
+}
+#endif
+
/**
* atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index be5fd38bd5a0..cbc5833fb221 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -32,6 +32,26 @@ extern unsigned long __sw_hweight64(__u64 w);
*/
#include <asm/bitops.h>
+/*
+ * Provide __deprecated wrappers for the new interface, avoid flag day changes.
+ * We need the ugly external functions to break header recursion hell.
+ */
+#ifndef smp_mb__before_clear_bit
+static inline void __deprecated smp_mb__before_clear_bit(void)
+{
+ extern void __smp_mb__before_atomic(void);
+ __smp_mb__before_atomic();
+}
+#endif
+
+#ifndef smp_mb__after_clear_bit
+static inline void __deprecated smp_mb__after_clear_bit(void)
+{
+ extern void __smp_mb__after_atomic(void);
+ __smp_mb__after_atomic();
+}
+#endif
+
#define for_each_set_bit(bit, addr, size) \
for ((bit) = find_first_bit((addr), (size)); \
(bit) < (size); \
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index c40302f909ce..7cbf837a279c 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -278,7 +278,7 @@ static inline void get_bh(struct buffer_head *bh)
static inline void put_bh(struct buffer_head *bh)
{
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_dec(&bh->b_count);
}
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 9f3c275e053e..ec274e0f4ed2 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -649,7 +649,7 @@ static inline void hd_ref_init(struct hd_struct *part)
static inline void hd_struct_get(struct hd_struct *part)
{
atomic_inc(&part->ref);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
static inline int hd_struct_try_get(struct hd_struct *part)
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 051c85032f48..cb19f09d7e3e 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -491,7 +491,7 @@ static inline int tasklet_trylock(struct tasklet_struct *t)
static inline void tasklet_unlock(struct tasklet_struct *t)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(TASKLET_STATE_RUN, &(t)->state);
}
@@ -539,7 +539,7 @@ static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
static inline void tasklet_disable_nosync(struct tasklet_struct *t)
{
atomic_inc(&t->count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
static inline void tasklet_disable(struct tasklet_struct *t)
@@ -551,13 +551,13 @@ static inline void tasklet_disable(struct tasklet_struct *t)
static inline void tasklet_enable(struct tasklet_struct *t)
{
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_dec(&t->count);
}
static inline void tasklet_hi_enable(struct tasklet_struct *t)
{
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_dec(&t->count);
}
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index b42d07b0390b..6c1ae9fd9505 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -493,7 +493,7 @@ static inline void napi_disable(struct napi_struct *n)
static inline void napi_enable(struct napi_struct *n)
{
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(NAPI_STATE_SCHED, &n->state);
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 221b2bde3723..4dce5d844b74 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2785,10 +2785,8 @@ static inline bool __must_check current_set_polling_and_test(void)
/*
* Polling state must be visible before we test NEED_RESCHED,
* paired by resched_task()
- *
- * XXX: assumes set/clear bit are identical barrier wise.
*/
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return unlikely(tif_need_resched());
}
@@ -2806,7 +2804,7 @@ static inline bool __must_check current_clr_polling_and_test(void)
* Polling state must be visible before we test NEED_RESCHED,
* paired by resched_task()
*/
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return unlikely(tif_need_resched());
}
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 3a847de83fab..ad7dbe2cfecd 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -142,18 +142,18 @@ struct rpc_task_setup {
test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_clear_running(t) \
do { \
- smp_mb__before_clear_bit(); \
+ smp_mb__before_atomic(); \
clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \
- smp_mb__after_clear_bit(); \
+ smp_mb__after_atomic(); \
} while (0)
#define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
#define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
#define rpc_clear_queued(t) \
do { \
- smp_mb__before_clear_bit(); \
+ smp_mb__before_atomic(); \
clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \
- smp_mb__after_clear_bit(); \
+ smp_mb__after_atomic(); \
} while (0)
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 3e5efb2b236e..3876f0f1dfd3 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -379,9 +379,9 @@ static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt)
static inline void xprt_clear_connecting(struct rpc_xprt *xprt)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(XPRT_CONNECTING, &xprt->state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
static inline int xprt_connecting(struct rpc_xprt *xprt)
@@ -411,9 +411,9 @@ static inline void xprt_clear_bound(struct rpc_xprt *xprt)
static inline void xprt_clear_binding(struct rpc_xprt *xprt)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(XPRT_BINDING, &xprt->state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 1e98b5530425..6f8ab7da27c4 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -191,7 +191,7 @@ static inline void tracehook_notify_resume(struct pt_regs *regs)
* pairs with task_work_add()->set_notify_resume() after
* hlist_add_head(task->task_works);
*/
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
if (unlikely(current->task_works))
task_work_run();
}
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 5679d927562b..624a8a54806d 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1204,7 +1204,7 @@ static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp)
/* put back the conn without restarting its timer */
static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
{
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_dec(&cp->refcnt);
}
void ip_vs_conn_put(struct ip_vs_conn *cp);
@@ -1408,7 +1408,7 @@ static inline void ip_vs_dest_hold(struct ip_vs_dest *dest)
static inline void ip_vs_dest_put(struct ip_vs_dest *dest)
{
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_dec(&dest->refcnt);
}