diff options
author | Mark Rutland <mark.rutland@arm.com> | 2021-07-13 12:52:53 +0200 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2021-07-16 18:46:45 +0200 |
commit | cf3ee3c8c29dc349b2cf52e5e72e8cb805ff5e57 (patch) | |
tree | 3a341d32735ffce29fa67bc5bd0f84eb7fe2423d /include/asm-generic/bitops/lock.h | |
parent | locking/atomic: add arch_atomic_long*() (diff) | |
download | linux-cf3ee3c8c29dc349b2cf52e5e72e8cb805ff5e57.tar.xz linux-cf3ee3c8c29dc349b2cf52e5e72e8cb805ff5e57.zip |
locking/atomic: add generic arch_*() bitops
Now that all architectures provide arch_atomic_long_*(), we can
implement the generic bitops atop these rather than atop
atomic_long_*(), and provide arch_*() forms of the bitops that are safe
to use in noinstr code.
Now that all architectures provide arch_atomic_long_*(), we can
build the generic arch_*() bitops atop these, which can be safely used
in noinstr code. The regular bitop wrappers are built atop these.
As the generic non-atomic bitops use plain accesses, these will be
implicitly instrumented unless they are inlined into noinstr functions
(which is similar to arch_atomic*_read() when based on READ_ONCE()).
The wrappers are modified so that where the underlying arch_*() function
uses a plain access, no explicit instrumentation is added, as this is
redundant and could result in confusing reports.
Since function prototypes get excessively long with both an `arch_`
prefix and `__always_inline` attribute, the return type and function
attributes have been split onto a separate line, matching the style of
the generated atomic headers.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210713105253.7615-6-mark.rutland@arm.com
Diffstat (limited to 'include/asm-generic/bitops/lock.h')
-rw-r--r-- | include/asm-generic/bitops/lock.h | 39 |
1 files changed, 21 insertions, 18 deletions
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h index 3ae021368f48..630f2f6b9595 100644 --- a/include/asm-generic/bitops/lock.h +++ b/include/asm-generic/bitops/lock.h @@ -7,7 +7,7 @@ #include <asm/barrier.h> /** - * test_and_set_bit_lock - Set a bit and return its old value, for lock + * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock * @nr: Bit to set * @addr: Address to count from * @@ -15,8 +15,8 @@ * the returned value is 0. * It can be used to implement bit locks. */ -static inline int test_and_set_bit_lock(unsigned int nr, - volatile unsigned long *p) +static __always_inline int +arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); @@ -25,26 +25,27 @@ static inline int test_and_set_bit_lock(unsigned int nr, if (READ_ONCE(*p) & mask) return 1; - old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); + old = arch_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); return !!(old & mask); } /** - * clear_bit_unlock - Clear a bit in memory, for unlock + * arch_clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * * This operation is atomic and provides release barrier semantics. */ -static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p) +static __always_inline void +arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p) { p += BIT_WORD(nr); - atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); + arch_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); } /** - * __clear_bit_unlock - Clear a bit in memory, for unlock + * arch___clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * @@ -54,38 +55,40 @@ static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p) * * See for example x86's implementation. */ -static inline void __clear_bit_unlock(unsigned int nr, - volatile unsigned long *p) +static inline void +arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p) { unsigned long old; p += BIT_WORD(nr); old = READ_ONCE(*p); old &= ~BIT_MASK(nr); - atomic_long_set_release((atomic_long_t *)p, old); + arch_atomic_long_set_release((atomic_long_t *)p, old); } /** - * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom - * byte is negative, for unlock. + * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom + * byte is negative, for unlock. * @nr: the bit to clear * @addr: the address to start counting from * * This is a bit of a one-trick-pony for the filemap code, which clears * PG_locked and tests PG_waiters, */ -#ifndef clear_bit_unlock_is_negative_byte -static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr, - volatile unsigned long *p) +#ifndef arch_clear_bit_unlock_is_negative_byte +static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr, + volatile unsigned long *p) { long old; unsigned long mask = BIT_MASK(nr); p += BIT_WORD(nr); - old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); + old = arch_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); return !!(old & BIT(7)); } -#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte +#define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte #endif +#include <asm-generic/bitops/instrumented-lock.h> + #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */ |