diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-04-23 16:12:30 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-08-14 12:48:14 +0200 |
commit | 560cb12a4080a48b84da8b96878cafbd193c4d64 (patch) | |
tree | e0b28be89d66e8a01b164b7c6123e918cafcc79c /include | |
parent | locking,arch,xtensa: Fold atomic_ops (diff) | |
download | linux-560cb12a4080a48b84da8b96878cafbd193c4d64.tar.xz linux-560cb12a4080a48b84da8b96878cafbd193c4d64.zip |
locking,arch: Rewrite generic atomic support
Rewrite generic atomic support to only require cmpxchg(), generate all
other primitives from that.
Furthermore reduce the endless repetition for all these primitives to
a few CPP macros. This way we get more for less lines.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20140508135852.940119622@infradead.org
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: David Howells <dhowells@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-arch@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/atomic.h | 192 | ||||
-rw-r--r-- | include/asm-generic/atomic64.h | 20 |
2 files changed, 112 insertions, 100 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 9c79e7603459..56d4d36e1531 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -18,14 +18,100 @@ #include <asm/cmpxchg.h> #include <asm/barrier.h> +/* + * atomic_$op() - $op integer to atomic variable + * @i: integer value to $op + * @v: pointer to the atomic variable + * + * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use + * smp_mb__{before,after}_atomic(). + */ + +/* + * atomic_$op_return() - $op interer to atomic variable and returns the result + * @i: integer value to $op + * @v: pointer to the atomic variable + * + * Atomically $ops @i to @v. Does imply a full memory barrier. + */ + #ifdef CONFIG_SMP -/* Force people to define core atomics */ -# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \ - !defined(atomic_clear_mask) || !defined(atomic_set_mask) -# error "SMP requires a little arch-specific magic" -# endif + +/* we can build all atomic primitives from cmpxchg */ + +#define ATOMIC_OP(op, c_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + int c, old; \ + \ + c = v->counter; \ + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + c = old; \ +} + +#define ATOMIC_OP_RETURN(op, c_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + int c, old; \ + \ + c = v->counter; \ + while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ + c = old; \ + \ + return c c_op i; \ +} + +#else + +#include <linux/irqflags.h> + +#define ATOMIC_OP(op, c_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + v->counter = v->counter c_op i; \ + raw_local_irq_restore(flags); \ +} + +#define ATOMIC_OP_RETURN(op, c_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + int ret; \ + \ + raw_local_irq_save(flags); \ + ret = (v->counter = v->counter c_op i); \ + raw_local_irq_restore(flags); \ + \ + return ret; \ +} + +#endif /* CONFIG_SMP */ + +#ifndef atomic_add_return +ATOMIC_OP_RETURN(add, +) +#endif + +#ifndef atomic_sub_return +ATOMIC_OP_RETURN(sub, -) +#endif + +#ifndef atomic_clear_mask +ATOMIC_OP(and, &) +#define atomic_clear_mask(i, v) atomic_and(~(i), (v)) #endif +#ifndef atomic_set_mask +#define CONFIG_ARCH_HAS_ATOMIC_OR +ATOMIC_OP(or, |) +#define atomic_set_mask(i, v) atomic_or((i), (v)) +#endif + +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. @@ -33,8 +119,6 @@ #define ATOMIC_INIT(i) { (i) } -#ifdef __KERNEL__ - /** * atomic_read - read atomic variable * @v: pointer of type atomic_t @@ -56,52 +140,6 @@ #include <linux/irqflags.h> -/** - * atomic_add_return - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v and returns the result - */ -#ifndef atomic_add_return -static inline int atomic_add_return(int i, atomic_t *v) -{ - unsigned long flags; - int temp; - - raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ - temp = v->counter; - temp += i; - v->counter = temp; - raw_local_irq_restore(flags); - - return temp; -} -#endif - -/** - * atomic_sub_return - subtract integer from atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v and returns the result - */ -#ifndef atomic_sub_return -static inline int atomic_sub_return(int i, atomic_t *v) -{ - unsigned long flags; - int temp; - - raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ - temp = v->counter; - temp -= i; - v->counter = temp; - raw_local_irq_restore(flags); - - return temp; -} -#endif - static inline int atomic_add_negative(int i, atomic_t *v) { return atomic_add_return(i, v) < 0; @@ -139,49 +177,11 @@ static inline void atomic_dec(atomic_t *v) static inline int __atomic_add_unless(atomic_t *v, int a, int u) { - int c, old; - c = atomic_read(v); - while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) - c = old; - return c; -} - -/** - * atomic_clear_mask - Atomically clear bits in atomic variable - * @mask: Mask of the bits to be cleared - * @v: pointer of type atomic_t - * - * Atomically clears the bits set in @mask from @v - */ -#ifndef atomic_clear_mask -static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) -{ - unsigned long flags; - - mask = ~mask; - raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ - v->counter &= mask; - raw_local_irq_restore(flags); + int c, old; + c = atomic_read(v); + while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) + c = old; + return c; } -#endif - -/** - * atomic_set_mask - Atomically set bits in atomic variable - * @mask: Mask of the bits to be set - * @v: pointer of type atomic_t - * - * Atomically sets the bits set in @mask in @v - */ -#ifndef atomic_set_mask -static inline void atomic_set_mask(unsigned int mask, atomic_t *v) -{ - unsigned long flags; - - raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ - v->counter |= mask; - raw_local_irq_restore(flags); -} -#endif -#endif /* __KERNEL__ */ #endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index b18ce4f9ee3d..30ad9c86cebb 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h @@ -20,10 +20,22 @@ typedef struct { extern long long atomic64_read(const atomic64_t *v); extern void atomic64_set(atomic64_t *v, long long i); -extern void atomic64_add(long long a, atomic64_t *v); -extern long long atomic64_add_return(long long a, atomic64_t *v); -extern void atomic64_sub(long long a, atomic64_t *v); -extern long long atomic64_sub_return(long long a, atomic64_t *v); + +#define ATOMIC64_OP(op) \ +extern void atomic64_##op(long long a, atomic64_t *v); + +#define ATOMIC64_OP_RETURN(op) \ +extern long long atomic64_##op##_return(long long a, atomic64_t *v); + +#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) + +ATOMIC64_OPS(add) +ATOMIC64_OPS(sub) + +#undef ATOMIC64_OPS +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP + extern long long atomic64_dec_if_positive(atomic64_t *v); extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n); extern long long atomic64_xchg(atomic64_t *v, long long new); |