diff options
author | Paul Burton <paul.burton@mips.com> | 2019-02-06 23:38:56 +0100 |
---|---|---|
committer | Paul Burton <paul.burton@mips.com> | 2019-02-06 23:59:24 +0100 |
commit | c7e2d71dda7ac4731bae54a25c49cd81fe7b9046 (patch) | |
tree | d38e29f77b16bdf2684f7c3838f0ef2afb506935 /arch/mips/include/asm/cmpxchg.h | |
parent | MIPS: Export mm switching functions used by KVM (diff) | |
download | linux-c7e2d71dda7ac4731bae54a25c49cd81fe7b9046.tar.xz linux-c7e2d71dda7ac4731bae54a25c49cd81fe7b9046.zip |
MIPS: Fix set_pte() for Netlogic XLR using cmpxchg64()
Commit 46011e6ea392 ("MIPS: Make set_pte() SMP safe.") introduced an
open-coded version of cmpxchg() within set_pte(), that always operated
on a value the size of an unsigned long. That is, it used ll/sc
instructions when CONFIG_32BIT=y or lld/scd instructions when
CONFIG_64BIT=y.
This was broken for configurations in which pte_t is larger than an
unsigned long (with the exception of XPA configurations which have a
different implementation of set_pte()), because we no longer update the
whole PTE. Indeed commit 46011e6ea392 ("MIPS: Make set_pte() SMP safe.")
notes:
> The case of CONFIG_64BIT_PHYS_ADDR && CONFIG_CPU_MIPS32 is *not*
> handled.
In practice this affects Netlogic XLR/XLS systems including
nlm_xlr_defconfig.
Commit 82f4f66ddf11 ("MIPS: Remove open-coded cmpxchg() in set_pte()")
then replaced this open-coded version of cmpxchg() with an actual call
to cmpxchg(). Unfortunately the configurations mentioned above then fail
to build because cmpxchg() can only operate on values 32 bits or smaller
in size, resulting in:
arch/mips/include/asm/cmpxchg.h:166:11: error:
call to '__cmpxchg_called_with_bad_pointer' declared with
attribute error: Bad argument size for cmpxchg
One option that would fix the build failure & restore the previous
behaviour would be to cast the pte pointer to a pointer to unsigned
long, so that cmpxchg() would operate on just 32 bits of the PTE as it
has been since commit 46011e6ea392 ("MIPS: Make set_pte() SMP safe.").
That feels like an ugly hack though, and the behaviour of set_pte() is
likely a little broken.
Instead we take advantage of the fact that the affected configurations
already know at compile time that the CPU will support 64 bits (ie. have
hardcoded cpu_has_64bits in cpu-feature-overrides.h) in order to allow
cmpxchg64() to be used in these configurations. set_pte() then makes use
of cmpxchg64() when necessary.
Signed-off-by: Paul Burton <paul.burton@mips.com>
Fixes: 46011e6ea392 ("MIPS: Make set_pte() SMP safe.")
Fixes: 82f4f66ddf11 ("MIPS: Remove open-coded cmpxchg() in set_pte()")
Diffstat (limited to 'arch/mips/include/asm/cmpxchg.h')
-rw-r--r-- | arch/mips/include/asm/cmpxchg.h | 104 |
1 files changed, 98 insertions, 6 deletions
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 638de0c25249..f345a873742d 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -36,6 +36,8 @@ */ extern unsigned long __cmpxchg_called_with_bad_pointer(void) __compiletime_error("Bad argument size for cmpxchg"); +extern unsigned long __cmpxchg64_unsupported(void) + __compiletime_error("cmpxchg64 not available; cpu_has_64bits may be false"); extern unsigned long __xchg_called_with_bad_pointer(void) __compiletime_error("Bad argument size for xchg"); @@ -204,12 +206,102 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, cmpxchg((ptr), (o), (n)); \ }) #else -#include <asm-generic/cmpxchg-local.h> -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#ifndef CONFIG_SMP -#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) -#endif -#endif + +# include <asm-generic/cmpxchg-local.h> +# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +# ifdef CONFIG_SMP + +static inline unsigned long __cmpxchg64(volatile void *ptr, + unsigned long long old, + unsigned long long new) +{ + unsigned long long tmp, ret; + unsigned long flags; + + /* + * The assembly below has to combine 32 bit values into a 64 bit + * register, and split 64 bit values from one register into two. If we + * were to take an interrupt in the middle of this we'd only save the + * least significant 32 bits of each register & probably clobber the + * most significant 32 bits of the 64 bit values we're using. In order + * to avoid this we must disable interrupts. + */ + local_irq_save(flags); + + asm volatile( + " .set push \n" + " .set " MIPS_ISA_ARCH_LEVEL " \n" + /* Load 64 bits from ptr */ + "1: lld %L0, %3 # __cmpxchg64 \n" + /* + * Split the 64 bit value we loaded into the 2 registers that hold the + * ret variable. + */ + " dsra %M0, %L0, 32 \n" + " sll %L0, %L0, 0 \n" + /* + * Compare ret against old, breaking out of the loop if they don't + * match. + */ + " bne %M0, %M4, 2f \n" + " bne %L0, %L4, 2f \n" + /* + * Combine the 32 bit halves from the 2 registers that hold the new + * variable into a single 64 bit register. + */ +# if MIPS_ISA_REV >= 2 + " move %L1, %L5 \n" + " dins %L1, %M5, 32, 32 \n" +# else + " dsll %L1, %L5, 32 \n" + " dsrl %L1, %L1, 32 \n" + " .set noat \n" + " dsll $at, %M5, 32 \n" + " or %L1, %L1, $at \n" + " .set at \n" +# endif + /* Attempt to store new at ptr */ + " scd %L1, %2 \n" + /* If we failed, loop! */ + "\t" __scbeqz " %L1, 1b \n" + " .set pop \n" + "2: \n" + : "=&r"(ret), + "=&r"(tmp), + "=" GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr) + : GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr), + "r" (old), + "r" (new) + : "memory"); + + local_irq_restore(flags); + return ret; +} + +# define cmpxchg64(ptr, o, n) ({ \ + unsigned long long __old = (__typeof__(*(ptr)))(o); \ + unsigned long long __new = (__typeof__(*(ptr)))(n); \ + __typeof__(*(ptr)) __res; \ + \ + /* \ + * We can only use cmpxchg64 if we know that the CPU supports \ + * 64-bits, ie. lld & scd. Our call to __cmpxchg64_unsupported \ + * will cause a build error unless cpu_has_64bits is a \ + * compile-time constant 1. \ + */ \ + if (cpu_has_64bits && kernel_uses_llsc) \ + __res = __cmpxchg64((ptr), __old, __new); \ + else \ + __res = __cmpxchg64_unsupported(); \ + \ + __res; \ +}) + +# else /* !CONFIG_SMP */ +# define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) +# endif /* !CONFIG_SMP */ +#endif /* !CONFIG_64BIT */ #undef __scbeqz |