summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-03-26 18:04:44 +0100
committerIngo Molnar <mingo@kernel.org>2014-08-14 12:48:11 +0200
commit15e3f6d782fc6ff7e004b40642ad895b91ae78bf (patch)
tree894a8f52a270f1b2efdea955b346e75d2eb891a9 /arch
parentlocking,arch,mn10300: Fold atomic_ops (diff)
downloadlinux-15e3f6d782fc6ff7e004b40642ad895b91ae78bf.tar.xz
linux-15e3f6d782fc6ff7e004b40642ad895b91ae78bf.zip
locking,arch,parisc: Fold atomic_ops
OK, no LoC saved in this case because sub was defined in terms of add. Still do it because this also prepares for easy addition of new ops. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Helge Deller <deller@gmx.de> Cc: James E.J. Bottomley <jejb@parisc-linux.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: linux-parisc@vger.kernel.org Link: http://lkml.kernel.org/r/20140508135852.659342353@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/parisc/include/asm/atomic.h113
1 files changed, 69 insertions, 44 deletions
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 0be2db2c7d44..219750bb4ae7 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -55,24 +55,7 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
* are atomic, so a reader never sees inconsistent values.
*/
-/* It's possible to reduce all atomic operations to either
- * __atomic_add_return, atomic_set and atomic_read (the latter
- * is there only for consistency).
- */
-
-static __inline__ int __atomic_add_return(int i, atomic_t *v)
-{
- int ret;
- unsigned long flags;
- _atomic_spin_lock_irqsave(v, flags);
-
- ret = (v->counter += i);
-
- _atomic_spin_unlock_irqrestore(v, flags);
- return ret;
-}
-
-static __inline__ void atomic_set(atomic_t *v, int i)
+static __inline__ void atomic_set(atomic_t *v, int i)
{
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
@@ -115,16 +98,43 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
+#define ATOMIC_OP(op, c_op) \
+static __inline__ void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ _atomic_spin_lock_irqsave(v, flags); \
+ v->counter c_op i; \
+ _atomic_spin_unlock_irqrestore(v, flags); \
+} \
+
+#define ATOMIC_OP_RETURN(op, c_op) \
+static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int ret; \
+ \
+ _atomic_spin_lock_irqsave(v, flags); \
+ ret = (v->counter c_op i); \
+ _atomic_spin_unlock_irqrestore(v, flags); \
+ \
+ return ret; \
+}
+
+#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
+
+ATOMIC_OPS(add, +=)
+ATOMIC_OPS(sub, -=)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
-#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v))))
-#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int) (i)),(v))))
-#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
-#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
+#define atomic_inc(v) (atomic_add( 1,(v)))
+#define atomic_dec(v) (atomic_add( -1,(v)))
-#define atomic_add_return(i,v) (__atomic_add_return( (i),(v)))
-#define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v)))
-#define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
-#define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
+#define atomic_inc_return(v) (atomic_add_return( 1,(v)))
+#define atomic_dec_return(v) (atomic_add_return( -1,(v)))
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
@@ -148,18 +158,37 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#define ATOMIC64_INIT(i) { (i) }
-static __inline__ s64
-__atomic64_add_return(s64 i, atomic64_t *v)
-{
- s64 ret;
- unsigned long flags;
- _atomic_spin_lock_irqsave(v, flags);
+#define ATOMIC64_OP(op, c_op) \
+static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ _atomic_spin_lock_irqsave(v, flags); \
+ v->counter c_op i; \
+ _atomic_spin_unlock_irqrestore(v, flags); \
+} \
+
+#define ATOMIC64_OP_RETURN(op, c_op) \
+static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ s64 ret; \
+ \
+ _atomic_spin_lock_irqsave(v, flags); \
+ ret = (v->counter c_op i); \
+ _atomic_spin_unlock_irqrestore(v, flags); \
+ \
+ return ret; \
+}
- ret = (v->counter += i);
+#define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op)
- _atomic_spin_unlock_irqrestore(v, flags);
- return ret;
-}
+ATOMIC64_OPS(add, +=)
+ATOMIC64_OPS(sub, -=)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
static __inline__ void
atomic64_set(atomic64_t *v, s64 i)
@@ -178,15 +207,11 @@ atomic64_read(const atomic64_t *v)
return (*(volatile long *)&(v)->counter);
}
-#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v))))
-#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v))))
-#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
-#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
+#define atomic64_inc(v) (atomic64_add( 1,(v)))
+#define atomic64_dec(v) (atomic64_add( -1,(v)))
-#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v)))
-#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v)))
-#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
-#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
+#define atomic64_inc_return(v) (atomic64_add_return( 1,(v)))
+#define atomic64_dec_return(v) (atomic64_add_return( -1,(v)))
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)