diff options
author | Arnd Bergmann <arnd@arndb.de> | 2023-03-02 09:53:31 +0100 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2023-04-04 17:58:11 +0200 |
commit | 656e9007ef5862746cdf7ac16267c8e06e7b0989 (patch) | |
tree | ef0e41da56c4b51f697f168e83a2c4c062020fe3 /include/asm-generic | |
parent | asm-generic/io.h: suppress endianness warnings for relaxed accessors (diff) | |
download | linux-656e9007ef5862746cdf7ac16267c8e06e7b0989.tar.xz linux-656e9007ef5862746cdf7ac16267c8e06e7b0989.zip |
asm-generic: avoid __generic_cmpxchg_local warnings
Code that passes a 32-bit constant into cmpxchg() produces a harmless
sparse warning because of the truncation in the branch that is not taken:
fs/erofs/zdata.c: note: in included file (through /home/arnd/arm-soc/arch/arm/include/asm/cmpxchg.h, /home/arnd/arm-soc/arch/arm/include/asm/atomic.h, /home/arnd/arm-soc/include/linux/atomic.h, ...):
include/asm-generic/cmpxchg-local.h:29:33: warning: cast truncates bits from constant value (5f0ecafe becomes fe)
include/asm-generic/cmpxchg-local.h:33:34: warning: cast truncates bits from constant value (5f0ecafe becomes cafe)
include/asm-generic/cmpxchg-local.h:29:33: warning: cast truncates bits from constant value (5f0ecafe becomes fe)
include/asm-generic/cmpxchg-local.h:30:42: warning: cast truncates bits from constant value (5f0edead becomes ad)
include/asm-generic/cmpxchg-local.h:33:34: warning: cast truncates bits from constant value (5f0ecafe becomes cafe)
include/asm-generic/cmpxchg-local.h:34:44: warning: cast truncates bits from constant value (5f0edead becomes dead)
This was reported as a regression to Matt's recent __generic_cmpxchg_local
patch, though this patch only added more warnings on top of the ones
that were already there.
Rewording the truncation to use an explicit bitmask instead of a cast
to a smaller type avoids the warning but otherwise leaves the code
unchanged.
I had another look at why the cast is even needed for atomic_cmpxchg(),
and as Matt describes the problem here is that atomic_t contains a
signed 'int', but cmpxchg() takes an 'unsigned long' argument, and
converting between the two leads to a 64-bit sign-extension of
negative 32-bit atomics.
I checked the other implementations of arch_cmpxchg() and did not find
any others that run into the same problem as __generic_cmpxchg_local(),
but it's easy to be on the safe side here and always convert the
signed int into an unsigned int when calling arch_cmpxchg(), as this
will work even when any of the arch_cmpxchg() implementations run
into the same problem.
Fixes: 624654152284 ("locking/atomic: cmpxchg: Make __generic_cmpxchg_local compare against zero-extended 'old' value")
Reviewed-by: Matt Evans <mev@rivosinc.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/atomic.h | 4 | ||||
-rw-r--r-- | include/asm-generic/cmpxchg-local.h | 12 | ||||
-rw-r--r-- | include/asm-generic/cmpxchg.h | 6 |
3 files changed, 11 insertions, 11 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 04b8be9f1a77..e271d6708c87 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -130,7 +130,7 @@ ATOMIC_OP(xor, ^) #define arch_atomic_read(v) READ_ONCE((v)->counter) #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) -#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v))) -#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new))) +#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (u32)(v))) +#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (u32)(old), (u32)(new))) #endif /* __ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h index c3e7315b7c1d..3df9f59a544e 100644 --- a/include/asm-generic/cmpxchg-local.h +++ b/include/asm-generic/cmpxchg-local.h @@ -26,16 +26,16 @@ static inline unsigned long __generic_cmpxchg_local(volatile void *ptr, raw_local_irq_save(flags); switch (size) { case 1: prev = *(u8 *)ptr; - if (prev == (u8)old) - *(u8 *)ptr = (u8)new; + if (prev == (old & 0xffu)) + *(u8 *)ptr = (new & 0xffu); break; case 2: prev = *(u16 *)ptr; - if (prev == (u16)old) - *(u16 *)ptr = (u16)new; + if (prev == (old & 0xffffu)) + *(u16 *)ptr = (new & 0xffffu); break; case 4: prev = *(u32 *)ptr; - if (prev == (u32)old) - *(u32 *)ptr = (u32)new; + if (prev == (old & 0xffffffffffu)) + *(u32 *)ptr = (new & 0xffffffffu); break; case 8: prev = *(u64 *)ptr; if (prev == old) diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h index dca4419922a9..848de25fc4bf 100644 --- a/include/asm-generic/cmpxchg.h +++ b/include/asm-generic/cmpxchg.h @@ -32,7 +32,7 @@ unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size) #else local_irq_save(flags); ret = *(volatile u8 *)ptr; - *(volatile u8 *)ptr = x; + *(volatile u8 *)ptr = (x & 0xffu); local_irq_restore(flags); return ret; #endif /* __xchg_u8 */ @@ -43,7 +43,7 @@ unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size) #else local_irq_save(flags); ret = *(volatile u16 *)ptr; - *(volatile u16 *)ptr = x; + *(volatile u16 *)ptr = (x & 0xffffu); local_irq_restore(flags); return ret; #endif /* __xchg_u16 */ @@ -54,7 +54,7 @@ unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size) #else local_irq_save(flags); ret = *(volatile u32 *)ptr; - *(volatile u32 *)ptr = x; + *(volatile u32 *)ptr = (x & 0xffffffffu); local_irq_restore(flags); return ret; #endif /* __xchg_u32 */ |