summaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@imgtec.com>2017-06-10 02:26:40 +0200
committerRalf Baechle <ralf@linux-mips.org>2017-06-29 02:42:25 +0200
commit3ba7f44d2b19166b34031db48ce613d1bddbd384 (patch)
tree973693babaf2dd5eee8646c084e155e7b30f1adb /arch/mips
parentMIPS: cmpxchg: Implement 1 byte & 2 byte xchg() (diff)
downloadlinux-3ba7f44d2b19166b34031db48ce613d1bddbd384.tar.xz
linux-3ba7f44d2b19166b34031db48ce613d1bddbd384.zip
MIPS: cmpxchg: Implement 1 byte & 2 byte cmpxchg()
Implement support for 1 & 2 byte cmpxchg() using read-modify-write atop a 4 byte cmpxchg(). This allows us to support these atomic operations despite the MIPS ISA only providing 4 & 8 byte atomic operations. This is required in order to support queued rwlocks (qrwlock) in a later patch, since these make use of a 1 byte cmpxchg() in their slow path. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16355/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/include/asm/cmpxchg.h7
-rw-r--r--arch/mips/kernel/cmpxchg.c57
2 files changed, 64 insertions, 0 deletions
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index a633bf845689..a633f61c5545 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -142,10 +142,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
__ret; \
})
+extern unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size);
+
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size)
{
switch (size) {
+ case 1:
+ case 2:
+ return __cmpxchg_small(ptr, old, new, size);
+
case 4:
return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, old, new);
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
index 5acfbf9fb2c5..7730f1d3434f 100644
--- a/arch/mips/kernel/cmpxchg.c
+++ b/arch/mips/kernel/cmpxchg.c
@@ -50,3 +50,60 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
return (load32 & mask) >> shift;
}
+
+unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size)
+{
+ u32 mask, old32, new32, load32;
+ volatile u32 *ptr32;
+ unsigned int shift;
+ u8 load;
+
+ /* Check that ptr is naturally aligned */
+ WARN_ON((unsigned long)ptr & (size - 1));
+
+ /* Mask inputs to the correct size. */
+ mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
+ old &= mask;
+ new &= mask;
+
+ /*
+ * Calculate a shift & mask that correspond to the value we wish to
+ * compare & exchange within the naturally aligned 4 byte integer
+ * that includes it.
+ */
+ shift = (unsigned long)ptr & 0x3;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ shift ^= sizeof(u32) - size;
+ shift *= BITS_PER_BYTE;
+ mask <<= shift;
+
+ /*
+ * Calculate a pointer to the naturally aligned 4 byte integer that
+ * includes our byte of interest, and load its value.
+ */
+ ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
+ load32 = *ptr32;
+
+ while (true) {
+ /*
+ * Ensure the byte we want to exchange matches the expected
+ * old value, and if not then bail.
+ */
+ load = (load32 & mask) >> shift;
+ if (load != old)
+ return load;
+
+ /*
+ * Calculate the old & new values of the naturally aligned
+ * 4 byte integer that include the byte we want to exchange.
+ * Attempt to exchange the old value for the new value, and
+ * return if we succeed.
+ */
+ old32 = (load32 & ~mask) | (old << shift);
+ new32 = (load32 & ~mask) | (new << shift);
+ load32 = cmpxchg(ptr32, old32, new32);
+ if (load32 == old32)
+ return old;
+ }
+}