summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/spinlock.h
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2010-02-10 01:57:28 +0100
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-02-17 04:03:14 +0100
commit4e14a4d17a8cd66ccab180d32c977091922cfbed (patch)
tree5d6c9c97853ada47c4748d6965fca54692dbf665 /arch/powerpc/include/asm/spinlock.h
parentpowerpc: Convert global "BAD" interrupt to per cpu spurious (diff)
downloadlinux-4e14a4d17a8cd66ccab180d32c977091922cfbed.tar.xz
linux-4e14a4d17a8cd66ccab180d32c977091922cfbed.zip
powerpc: Use lwarx hint in spinlocks
Recent versions of the PowerPC architecture added a hint bit to the larx instructions to differentiate between an atomic operation and a lock operation: > 0 Other programs might attempt to modify the word in storage addressed by EA > even if the subsequent Store Conditional succeeds. > > 1 Other programs will not attempt to modify the word in storage addressed by > EA until the program that has acquired the lock performs a subsequent store > releasing the lock. To avoid a binutils dependency this patch create macros for the extended lwarx format and uses it in the spinlock code. To test this change I used a simple test case that acquires and releases a global pthread mutex: pthread_mutex_lock(&mutex); pthread_mutex_unlock(&mutex); On a 32 core POWER6, running 32 test threads we spend almost all our time in the futex spinlock code: 94.37% perf [kernel] [k] ._raw_spin_lock | |--99.95%-- ._raw_spin_lock | | | |--63.29%-- .futex_wake | | | |--36.64%-- .futex_wait_setup Which is a good test for this patch. The results (in lock/unlock operations per second) are: before: 1538203 ops/sec after: 2189219 ops/sec An improvement of 42% A 32 core POWER7 improves even more: before: 1279529 ops/sec after: 2282076 ops/sec An improvement of 78% Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm/spinlock.h')
-rw-r--r--arch/powerpc/include/asm/spinlock.h7
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 764094cff681..1c35b59f6f30 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -27,6 +27,7 @@
#endif
#include <asm/asm-compat.h>
#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
#define arch_spin_is_locked(x) ((x)->slock != 0)
@@ -60,7 +61,7 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
token = LOCK_TOKEN;
__asm__ __volatile__(
-"1: lwarx %0,0,%2\n\
+"1: " PPC_LWARX(%0,0,%2,1) "\n\
cmpwi 0,%0,0\n\
bne- 2f\n\
stwcx. %1,0,%2\n\
@@ -186,7 +187,7 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
long tmp;
__asm__ __volatile__(
-"1: lwarx %0,0,%1\n"
+"1: " PPC_LWARX(%0,0,%1,1) "\n"
__DO_SIGN_EXTEND
" addic. %0,%0,1\n\
ble- 2f\n"
@@ -211,7 +212,7 @@ static inline long __arch_write_trylock(arch_rwlock_t *rw)
token = WRLOCK_TOKEN;
__asm__ __volatile__(
-"1: lwarx %0,0,%2\n\
+"1: " PPC_LWARX(%0,0,%2,1) "\n\
cmpwi 0,%0,0\n\
bne- 2f\n"
PPC405_ERR77(0,%1)