diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-28 20:45:29 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-28 20:45:29 +0200 |
commit | a15286c63d113d4296c58867994cd266a28f5d6d (patch) | |
tree | aaebbc86918c0c1943c26115d5bb1dd6c2fc2d9b /arch/ia64 | |
parent | Merge tags 'objtool-urgent-2021-06-28' and 'objtool-core-2021-06-28' of git:/... (diff) | |
parent | locking/lockdep: Correct the description error for check_redundant() (diff) | |
download | linux-a15286c63d113d4296c58867994cd266a28f5d6d.tar.xz linux-a15286c63d113d4296c58867994cd266a28f5d6d.zip |
Merge tag 'locking-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
- Core locking & atomics:
- Convert all architectures to ARCH_ATOMIC: move every architecture
to ARCH_ATOMIC, then get rid of ARCH_ATOMIC and all the
transitory facilities and #ifdefs.
Much reduction in complexity from that series:
63 files changed, 756 insertions(+), 4094 deletions(-)
- Self-test enhancements
- Futexes:
- Add the new FUTEX_LOCK_PI2 ABI, which is a variant that doesn't
set FLAGS_CLOCKRT (.e. uses CLOCK_MONOTONIC).
[ The temptation to repurpose FUTEX_LOCK_PI's implicit setting of
FLAGS_CLOCKRT & invert the flag's meaning to avoid having to
introduce a new variant was resisted successfully. ]
- Enhance futex self-tests
- Lockdep:
- Fix dependency path printouts
- Optimize trace saving
- Broaden & fix wait-context checks
- Misc cleanups and fixes.
* tag 'locking-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (52 commits)
locking/lockdep: Correct the description error for check_redundant()
futex: Provide FUTEX_LOCK_PI2 to support clock selection
futex: Prepare futex_lock_pi() for runtime clock selection
lockdep/selftest: Remove wait-type RCU_CALLBACK tests
lockdep/selftests: Fix selftests vs PROVE_RAW_LOCK_NESTING
lockdep: Fix wait-type for empty stack
locking/selftests: Add a selftest for check_irq_usage()
lockding/lockdep: Avoid to find wrong lock dep path in check_irq_usage()
locking/lockdep: Remove the unnecessary trace saving
locking/lockdep: Fix the dep path printing for backwards BFS
selftests: futex: Add futex compare requeue test
selftests: futex: Add futex wait test
seqlock: Remove trailing semicolon in macros
locking/lockdep: Reduce LOCKDEP dependency list
locking/lockdep,doc: Improve readability of the block matrix
locking/atomics: atomic-instrumented: simplify ifdeffery
locking/atomic: delete !ARCH_ATOMIC remnants
locking/atomic: xtensa: move to ARCH_ATOMIC
locking/atomic: sparc: move to ARCH_ATOMIC
locking/atomic: sh: move to ARCH_ATOMIC
...
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/include/asm/atomic.h | 74 | ||||
-rw-r--r-- | arch/ia64/include/asm/cmpxchg.h | 16 | ||||
-rw-r--r-- | arch/ia64/include/uapi/asm/cmpxchg.h | 10 |
3 files changed, 60 insertions, 40 deletions
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index f267d956458f..266c429b9137 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -21,11 +21,11 @@ #define ATOMIC64_INIT(i) { (i) } -#define atomic_read(v) READ_ONCE((v)->counter) -#define atomic64_read(v) READ_ONCE((v)->counter) +#define arch_atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic64_read(v) READ_ONCE((v)->counter) -#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i)) -#define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i)) +#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i)) +#define arch_atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i)) #define ATOMIC_OP(op, c_op) \ static __inline__ int \ @@ -36,7 +36,7 @@ ia64_atomic_##op (int i, atomic_t *v) \ \ do { \ CMPXCHG_BUGCHECK(v); \ - old = atomic_read(v); \ + old = arch_atomic_read(v); \ new = old c_op i; \ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \ return new; \ @@ -51,7 +51,7 @@ ia64_atomic_fetch_##op (int i, atomic_t *v) \ \ do { \ CMPXCHG_BUGCHECK(v); \ - old = atomic_read(v); \ + old = arch_atomic_read(v); \ new = old c_op i; \ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \ return old; \ @@ -74,7 +74,7 @@ ATOMIC_OPS(sub, -) #define __ia64_atomic_const(i) 0 #endif -#define atomic_add_return(i,v) \ +#define arch_atomic_add_return(i,v) \ ({ \ int __ia64_aar_i = (i); \ __ia64_atomic_const(i) \ @@ -82,7 +82,7 @@ ATOMIC_OPS(sub, -) : ia64_atomic_add(__ia64_aar_i, v); \ }) -#define atomic_sub_return(i,v) \ +#define arch_atomic_sub_return(i,v) \ ({ \ int __ia64_asr_i = (i); \ __ia64_atomic_const(i) \ @@ -90,7 +90,7 @@ ATOMIC_OPS(sub, -) : ia64_atomic_sub(__ia64_asr_i, v); \ }) -#define atomic_fetch_add(i,v) \ +#define arch_atomic_fetch_add(i,v) \ ({ \ int __ia64_aar_i = (i); \ __ia64_atomic_const(i) \ @@ -98,7 +98,7 @@ ATOMIC_OPS(sub, -) : ia64_atomic_fetch_add(__ia64_aar_i, v); \ }) -#define atomic_fetch_sub(i,v) \ +#define arch_atomic_fetch_sub(i,v) \ ({ \ int __ia64_asr_i = (i); \ __ia64_atomic_const(i) \ @@ -110,13 +110,13 @@ ATOMIC_FETCH_OP(and, &) ATOMIC_FETCH_OP(or, |) ATOMIC_FETCH_OP(xor, ^) -#define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v) -#define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v) -#define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v) +#define arch_atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v) +#define arch_atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v) +#define arch_atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v) -#define atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v) -#define atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v) -#define atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v) +#define arch_atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v) +#define arch_atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v) +#define arch_atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v) #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP @@ -131,7 +131,7 @@ ia64_atomic64_##op (s64 i, atomic64_t *v) \ \ do { \ CMPXCHG_BUGCHECK(v); \ - old = atomic64_read(v); \ + old = arch_atomic64_read(v); \ new = old c_op i; \ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \ return new; \ @@ -146,7 +146,7 @@ ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \ \ do { \ CMPXCHG_BUGCHECK(v); \ - old = atomic64_read(v); \ + old = arch_atomic64_read(v); \ new = old c_op i; \ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \ return old; \ @@ -159,7 +159,7 @@ ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \ ATOMIC64_OPS(add, +) ATOMIC64_OPS(sub, -) -#define atomic64_add_return(i,v) \ +#define arch_atomic64_add_return(i,v) \ ({ \ s64 __ia64_aar_i = (i); \ __ia64_atomic_const(i) \ @@ -167,7 +167,7 @@ ATOMIC64_OPS(sub, -) : ia64_atomic64_add(__ia64_aar_i, v); \ }) -#define atomic64_sub_return(i,v) \ +#define arch_atomic64_sub_return(i,v) \ ({ \ s64 __ia64_asr_i = (i); \ __ia64_atomic_const(i) \ @@ -175,7 +175,7 @@ ATOMIC64_OPS(sub, -) : ia64_atomic64_sub(__ia64_asr_i, v); \ }) -#define atomic64_fetch_add(i,v) \ +#define arch_atomic64_fetch_add(i,v) \ ({ \ s64 __ia64_aar_i = (i); \ __ia64_atomic_const(i) \ @@ -183,7 +183,7 @@ ATOMIC64_OPS(sub, -) : ia64_atomic64_fetch_add(__ia64_aar_i, v); \ }) -#define atomic64_fetch_sub(i,v) \ +#define arch_atomic64_fetch_sub(i,v) \ ({ \ s64 __ia64_asr_i = (i); \ __ia64_atomic_const(i) \ @@ -195,29 +195,29 @@ ATOMIC64_FETCH_OP(and, &) ATOMIC64_FETCH_OP(or, |) ATOMIC64_FETCH_OP(xor, ^) -#define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v) -#define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v) -#define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v) +#define arch_atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v) +#define arch_atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v) +#define arch_atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v) -#define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v) -#define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v) -#define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v) +#define arch_atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v) +#define arch_atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v) +#define arch_atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v) #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP -#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new)) +#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) -#define atomic64_cmpxchg(v, old, new) \ - (cmpxchg(&((v)->counter), old, new)) -#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) +#define arch_atomic64_cmpxchg(v, old, new) \ + (arch_cmpxchg(&((v)->counter), old, new)) +#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) -#define atomic_add(i,v) (void)atomic_add_return((i), (v)) -#define atomic_sub(i,v) (void)atomic_sub_return((i), (v)) +#define arch_atomic_add(i,v) (void)arch_atomic_add_return((i), (v)) +#define arch_atomic_sub(i,v) (void)arch_atomic_sub_return((i), (v)) -#define atomic64_add(i,v) (void)atomic64_add_return((i), (v)) -#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v)) +#define arch_atomic64_add(i,v) (void)arch_atomic64_add_return((i), (v)) +#define arch_atomic64_sub(i,v) (void)arch_atomic64_sub_return((i), (v)) #endif /* _ASM_IA64_ATOMIC_H */ diff --git a/arch/ia64/include/asm/cmpxchg.h b/arch/ia64/include/asm/cmpxchg.h new file mode 100644 index 000000000000..94ef84429843 --- /dev/null +++ b/arch/ia64/include/asm/cmpxchg.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_IA64_CMPXCHG_H +#define _ASM_IA64_CMPXCHG_H + +#include <uapi/asm/cmpxchg.h> + +#define arch_xchg(ptr, x) \ +({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));}) + +#define arch_cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) +#define arch_cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) + +#define arch_cmpxchg_local arch_cmpxchg +#define arch_cmpxchg64_local arch_cmpxchg64 + +#endif /* _ASM_IA64_CMPXCHG_H */ diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h index 5d90307fd6e0..926c6cb1e029 100644 --- a/arch/ia64/include/uapi/asm/cmpxchg.h +++ b/arch/ia64/include/uapi/asm/cmpxchg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _ASM_IA64_CMPXCHG_H -#define _ASM_IA64_CMPXCHG_H +#ifndef _UAPI_ASM_IA64_CMPXCHG_H +#define _UAPI_ASM_IA64_CMPXCHG_H /* * Compare/Exchange, forked from asm/intrinsics.h @@ -53,8 +53,10 @@ extern void ia64_xchg_called_with_bad_pointer(void); __xchg_result; \ }) +#ifndef __KERNEL__ #define xchg(ptr, x) \ ({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));}) +#endif /* * Atomic compare and exchange. Compare OLD with MEM, if identical, @@ -126,12 +128,14 @@ extern long ia64_cmpxchg_called_with_bad_pointer(void); * we had to back-pedal and keep the "legacy" behavior of a full fence :-( */ +#ifndef __KERNEL__ /* for compatibility with other platforms: */ #define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) #define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) #define cmpxchg_local cmpxchg #define cmpxchg64_local cmpxchg64 +#endif #ifdef CONFIG_IA64_DEBUG_CMPXCHG # define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128; @@ -152,4 +156,4 @@ do { \ #endif /* !__ASSEMBLY__ */ -#endif /* _ASM_IA64_CMPXCHG_H */ +#endif /* _UAPI_ASM_IA64_CMPXCHG_H */ |