summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/cmpxchg.h
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-07-30 20:19:43 +0200
committerWill Deacon <will.deacon@arm.com>2015-07-30 21:16:53 +0200
commita14949e09a228dcd4cc5088c90c375429c7d102c (patch)
treeab11f9350f85e2becd350780b4e70dbc9c61e15b /arch/arm64/include/asm/cmpxchg.h
parentarm64: alternative: put secondary CPUs into polling loop during patch (diff)
downloadlinux-a14949e09a228dcd4cc5088c90c375429c7d102c.tar.xz
linux-a14949e09a228dcd4cc5088c90c375429c7d102c.zip
arm64: cmpxchg: truncate sub-word signed types before comparison
When performing a cmpxchg operation on a signed sub-word type (e.g. s8), we need to ensure that the upper register bits of the "old" value used for comparison are zeroed, otherwise we may erroneously fail the cmpxchg which may even be interpreted as success by the caller (if the compiler performs the truncation as part of its check). This has been observed in mod_state, where negative values where causing problems with this_cpu_cmpxchg. This patch fixes the issue by explicitly casting 8-bit and 16-bit "old" values using unsigned types in our cmpxchg wrappers. 32-bit types can be left alone, since the underlying asm makes use of W registers in this case. Reported-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/cmpxchg.h')
-rw-r--r--arch/arm64/include/asm/cmpxchg.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 7bfda0944c9b..899e9f1d19e4 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -122,9 +122,9 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
{
switch (size) {
case 1:
- return __cmpxchg_case_1(ptr, old, new);
+ return __cmpxchg_case_1(ptr, (u8)old, new);
case 2:
- return __cmpxchg_case_2(ptr, old, new);
+ return __cmpxchg_case_2(ptr, (u16)old, new);
case 4:
return __cmpxchg_case_4(ptr, old, new);
case 8:
@@ -141,9 +141,9 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
{
switch (size) {
case 1:
- return __cmpxchg_case_mb_1(ptr, old, new);
+ return __cmpxchg_case_mb_1(ptr, (u8)old, new);
case 2:
- return __cmpxchg_case_mb_2(ptr, old, new);
+ return __cmpxchg_case_mb_2(ptr, (u16)old, new);
case 4:
return __cmpxchg_case_mb_4(ptr, old, new);
case 8: