summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2022-11-26 10:59:17 +0100
committerMichael Ellerman <mpe@ellerman.id.au>2022-12-02 07:48:49 +0100
commit4c93c2e4b9e8988511c06b9c042f23d4b8f593ad (patch)
tree7e8f54d808a17fd46eb79d5f43d228b6af175af0 /arch/powerpc/include/asm
parentpowerpc/qspinlock: add mcs queueing for contended waiters (diff)
downloadlinux-4c93c2e4b9e8988511c06b9c042f23d4b8f593ad.tar.xz
linux-4c93c2e4b9e8988511c06b9c042f23d4b8f593ad.zip
powerpc/qspinlock: use a half-word store to unlock to avoid larx/stcx.
The first 16 bits of the lock are only modified by the owner, and other modifications always use atomic operations on the entire 32 bits, so unlocks can use plain stores on the 16 bits. This is the same kind of optimisation done by core qspinlock code. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20221126095932.1234527-3-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/qspinlock.h6
-rw-r--r--arch/powerpc/include/asm/qspinlock_types.h19
2 files changed, 18 insertions, 7 deletions
diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h
index 6946dba5d087..713f6629f6fb 100644
--- a/arch/powerpc/include/asm/qspinlock.h
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -37,11 +37,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
static inline void queued_spin_unlock(struct qspinlock *lock)
{
- for (;;) {
- int val = atomic_read(&lock->val);
- if (atomic_cmpxchg_release(&lock->val, val, val & ~_Q_LOCKED_VAL) == val)
- return;
- }
+ smp_store_release(&lock->locked, 0);
}
#define arch_spin_is_locked(l) queued_spin_is_locked(l)
diff --git a/arch/powerpc/include/asm/qspinlock_types.h b/arch/powerpc/include/asm/qspinlock_types.h
index 20a36dfb14e2..fe87181c59e5 100644
--- a/arch/powerpc/include/asm/qspinlock_types.h
+++ b/arch/powerpc/include/asm/qspinlock_types.h
@@ -3,12 +3,27 @@
#define _ASM_POWERPC_QSPINLOCK_TYPES_H
#include <linux/types.h>
+#include <asm/byteorder.h>
typedef struct qspinlock {
- atomic_t val;
+ union {
+ atomic_t val;
+
+#ifdef __LITTLE_ENDIAN
+ struct {
+ u16 locked;
+ u8 reserved[2];
+ };
+#else
+ struct {
+ u8 reserved[2];
+ u16 locked;
+ };
+#endif
+ };
} arch_spinlock_t;
-#define __ARCH_SPIN_LOCK_UNLOCKED { .val = ATOMIC_INIT(0) }
+#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
/*
* Bitfields in the lock word: