diff options
author | Alexander Lobakin <alexandr.lobakin@intel.com> | 2022-06-24 14:13:07 +0200 |
---|---|---|
committer | Yury Norov <yury.norov@gmail.com> | 2022-07-01 04:52:41 +0200 |
commit | 0e862838f290147ea9c16db852d8d494b552d38d (patch) | |
tree | 31f1523f9af44667208d04d1cd514aac4ee41fd6 /include/asm-generic/bitops/generic-non-atomic.h | |
parent | bitops: always define asm-generic non-atomic bitops (diff) | |
download | linux-0e862838f290147ea9c16db852d8d494b552d38d.tar.xz linux-0e862838f290147ea9c16db852d8d494b552d38d.zip |
bitops: unify non-atomic bitops prototypes across architectures
Currently, there is a mess with the prototypes of the non-atomic
bitops across the different architectures:
ret bool, int, unsigned long
nr int, long, unsigned int, unsigned long
addr volatile unsigned long *, volatile void *
Thankfully, it doesn't provoke any bugs, but can sometimes make
the compiler angry when it's not handy at all.
Adjust all the prototypes to the following standard:
ret bool retval can be only 0 or 1
nr unsigned long native; signed makes no sense
addr volatile unsigned long * bitmaps are arrays of ulongs
Next, some architectures don't define 'arch_' versions as they don't
support instrumentation, others do. To make sure there is always the
same set of callables present and to ease any potential future
changes, make them all follow the rule:
* architecture-specific files define only 'arch_' versions;
* non-prefixed versions can be defined only in asm-generic files;
and place the non-prefixed definitions into a new file in
asm-generic to be included by non-instrumented architectures.
Finally, add some static assertions in order to prevent people from
making a mess in this room again.
I also used the %__always_inline attribute consistently, so that
they always get resolved to the actual operations.
Suggested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: Alexander Lobakin <alexandr.lobakin@intel.com>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Yury Norov <yury.norov@gmail.com>
Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: Yury Norov <yury.norov@gmail.com>
Diffstat (limited to 'include/asm-generic/bitops/generic-non-atomic.h')
-rw-r--r-- | include/asm-generic/bitops/generic-non-atomic.h | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/include/asm-generic/bitops/generic-non-atomic.h b/include/asm-generic/bitops/generic-non-atomic.h index 7226488810e5..b85b8a2ac239 100644 --- a/include/asm-generic/bitops/generic-non-atomic.h +++ b/include/asm-generic/bitops/generic-non-atomic.h @@ -24,7 +24,7 @@ * may be that only one operation succeeds. */ static __always_inline void -generic___set_bit(unsigned int nr, volatile unsigned long *addr) +generic___set_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -33,7 +33,7 @@ generic___set_bit(unsigned int nr, volatile unsigned long *addr) } static __always_inline void -generic___clear_bit(unsigned int nr, volatile unsigned long *addr) +generic___clear_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -50,8 +50,8 @@ generic___clear_bit(unsigned int nr, volatile unsigned long *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static __always_inline -void generic___change_bit(unsigned int nr, volatile unsigned long *addr) +static __always_inline void +generic___change_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -68,8 +68,8 @@ void generic___change_bit(unsigned int nr, volatile unsigned long *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static __always_inline int -generic___test_and_set_bit(unsigned int nr, volatile unsigned long *addr) +static __always_inline bool +generic___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -88,8 +88,8 @@ generic___test_and_set_bit(unsigned int nr, volatile unsigned long *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static __always_inline int -generic___test_and_clear_bit(unsigned int nr, volatile unsigned long *addr) +static __always_inline bool +generic___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -100,8 +100,8 @@ generic___test_and_clear_bit(unsigned int nr, volatile unsigned long *addr) } /* WARNING: non atomic and it can be reordered! */ -static __always_inline int -generic___test_and_change_bit(unsigned int nr, volatile unsigned long *addr) +static __always_inline bool +generic___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); @@ -116,8 +116,8 @@ generic___test_and_change_bit(unsigned int nr, volatile unsigned long *addr) * @nr: bit number to test * @addr: Address to start counting from */ -static __always_inline int -generic_test_bit(unsigned int nr, const volatile unsigned long *addr) +static __always_inline bool +generic_test_bit(unsigned long nr, const volatile unsigned long *addr) { /* * Unlike the bitops with the '__' prefix above, this one *is* atomic, |