summaryrefslogtreecommitdiffstats
path: root/arch/hexagon
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 15:48:00 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 15:48:00 +0200
commitdbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch)
tree9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/hexagon
parentMerge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kerne... (diff)
parentlocking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read() (diff)
downloadlinux-dbb885fecc1b1b35e93416bedd24d21bd20f60ed.tar.xz
linux-dbb885fecc1b1b35e93416bedd24d21bd20f60ed.zip
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar: "This is a series kept separate from the main locking tree, which cleans up and improves various details in the atomics type handling: - Remove the unused atomic_or_long() method - Consolidate and compress atomic ops implementations between architectures, to reduce linecount and to make it easier to add new ops. - Rewrite generic atomic support to only require cmpxchg() from an architecture - generate all other methods from that" * 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read() locking, mips: Fix atomics locking, sparc64: Fix atomics locking,arch: Rewrite generic atomic support locking,arch,xtensa: Fold atomic_ops locking,arch,sparc: Fold atomic_ops locking,arch,sh: Fold atomic_ops locking,arch,powerpc: Fold atomic_ops locking,arch,parisc: Fold atomic_ops locking,arch,mn10300: Fold atomic_ops locking,arch,mips: Fold atomic_ops locking,arch,metag: Fold atomic_ops locking,arch,m68k: Fold atomic_ops locking,arch,m32r: Fold atomic_ops locking,arch,ia64: Fold atomic_ops locking,arch,hexagon: Fold atomic_ops locking,arch,cris: Fold atomic_ops locking,arch,avr32: Fold atomic_ops locking,arch,arm64: Fold atomic_ops locking,arch,arm: Fold atomic_ops ...
Diffstat (limited to 'arch/hexagon')
-rw-r--r--arch/hexagon/include/asm/atomic.h68
1 files changed, 37 insertions, 31 deletions
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index de916b11bff5..93d07025f183 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -94,41 +94,47 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return __oldval;
}
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- int output;
-
- __asm__ __volatile__ (
- "1: %0 = memw_locked(%1);\n"
- " %0 = add(%0,%2);\n"
- " memw_locked(%1,P3)=%0;\n"
- " if !P3 jump 1b;\n"
- : "=&r" (output)
- : "r" (&v->counter), "r" (i)
- : "memory", "p3"
- );
- return output;
-
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ int output; \
+ \
+ __asm__ __volatile__ ( \
+ "1: %0 = memw_locked(%1);\n" \
+ " %0 = "#op "(%0,%2);\n" \
+ " memw_locked(%1,P3)=%0;\n" \
+ " if !P3 jump 1b;\n" \
+ : "=&r" (output) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory", "p3" \
+ ); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ int output; \
+ \
+ __asm__ __volatile__ ( \
+ "1: %0 = memw_locked(%1);\n" \
+ " %0 = "#op "(%0,%2);\n" \
+ " memw_locked(%1,P3)=%0;\n" \
+ " if !P3 jump 1b;\n" \
+ : "=&r" (output) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory", "p3" \
+ ); \
+ return output; \
}
-#define atomic_add(i, v) atomic_add_return(i, (v))
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- int output;
- __asm__ __volatile__ (
- "1: %0 = memw_locked(%1);\n"
- " %0 = sub(%0,%2);\n"
- " memw_locked(%1,P3)=%0\n"
- " if !P3 jump 1b;\n"
- : "=&r" (output)
- : "r" (&v->counter), "r" (i)
- : "memory", "p3"
- );
- return output;
-}
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
-#define atomic_sub(i, v) atomic_sub_return(i, (v))
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
/**
* __atomic_add_unless - add unless the number is a given value