summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2014-01-15 08:14:29 +0100
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-01-28 04:45:44 +0100
commit7179ba52889bef7e5e23f72908270e1ab2b7fc6f (patch)
tree4904e3a0268095e0242b29adc782708a7729ccc8 /arch/powerpc
parentpowerpc: Add support for the optimised lockref implementation (diff)
downloadlinux-7179ba52889bef7e5e23f72908270e1ab2b7fc6f.tar.xz
linux-7179ba52889bef7e5e23f72908270e1ab2b7fc6f.zip
powerpc: Implement arch_spin_is_locked() using arch_spin_value_unlocked()
At a glance these are just the inverse of each other. The one subtlety is that arch_spin_value_unlocked() takes the lock by value, rather than as a pointer, which is important for the lockref code. On the other hand arch_spin_is_locked() doesn't really care, so implement it in terms of arch_spin_value_unlocked(). Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/spinlock.h7
1 files changed, 5 insertions, 2 deletions
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 5162f8cd18c0..a30ef6999d66 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -28,8 +28,6 @@
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
-#define arch_spin_is_locked(x) ((x)->slock != 0)
-
#ifdef CONFIG_PPC64
/* use 0x800000yy when locked, where yy == CPU number */
#ifdef __BIG_ENDIAN__
@@ -59,6 +57,11 @@ static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
return lock.slock == 0;
}
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ return !arch_spin_value_unlocked(*lock);
+}
+
/*
* This returns the old value in the lock, so we succeeded
* in getting the lock if the return value is 0.