summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/atomic.h
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2019-08-29 15:33:23 +0200
committerWill Deacon <will@kernel.org>2019-08-30 12:18:37 +0200
commit5aad6cdabbf91fd330bd216fe3c93d90f78bc7e7 (patch)
tree059fe136b852abbacb2121935c91c1f70033c77d /arch/arm64/include/asm/atomic.h
parentarm64: lse: Make ARM64_LSE_ATOMICS depend on JUMP_LABEL (diff)
downloadlinux-5aad6cdabbf91fd330bd216fe3c93d90f78bc7e7.tar.xz
linux-5aad6cdabbf91fd330bd216fe3c93d90f78bc7e7.zip
arm64: atomics: Undefine internal macros after use
We use a bunch of internal macros when constructing our atomic and cmpxchg routines in order to save on boilerplate. Avoid exposing these directly to users of the header files. Reviewed-by: Andrew Murray <andrew.murray@arm.com> Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/include/asm/atomic.h')
-rw-r--r--arch/arm64/include/asm/atomic.h7
1 files changed, 7 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 7c334337674d..916e5a6d5454 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -32,6 +32,7 @@ ATOMIC_OP(atomic_add)
ATOMIC_OP(atomic_and)
ATOMIC_OP(atomic_sub)
+#undef ATOMIC_OP
#define ATOMIC_FETCH_OP(name, op) \
static inline int arch_##op##name(int i, atomic_t *v) \
@@ -54,6 +55,8 @@ ATOMIC_FETCH_OPS(atomic_fetch_sub)
ATOMIC_FETCH_OPS(atomic_add_return)
ATOMIC_FETCH_OPS(atomic_sub_return)
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_FETCH_OPS
#define ATOMIC64_OP(op) \
static inline void arch_##op(long i, atomic64_t *v) \
@@ -68,6 +71,7 @@ ATOMIC64_OP(atomic64_add)
ATOMIC64_OP(atomic64_and)
ATOMIC64_OP(atomic64_sub)
+#undef ATOMIC64_OP
#define ATOMIC64_FETCH_OP(name, op) \
static inline long arch_##op##name(long i, atomic64_t *v) \
@@ -90,6 +94,9 @@ ATOMIC64_FETCH_OPS(atomic64_fetch_sub)
ATOMIC64_FETCH_OPS(atomic64_add_return)
ATOMIC64_FETCH_OPS(atomic64_sub_return)
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_FETCH_OPS
+
static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
{
return __lse_ll_sc_body(atomic64_dec_if_positive, v);