diff options
author | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2010-07-13 23:07:45 +0200 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2011-08-29 22:46:07 +0200 |
commit | 2994488fe5bb721de1ded53af1a2fc41f47f6ddc (patch) | |
tree | 9629e17795bbe0e6e12b31ebf3b433fcea51668a /arch/x86/include/asm/spinlock.h | |
parent | x86, ticketlock: Convert spin loop to C (diff) | |
download | linux-2994488fe5bb721de1ded53af1a2fc41f47f6ddc.tar.xz linux-2994488fe5bb721de1ded53af1a2fc41f47f6ddc.zip |
x86, ticketlock: Convert __ticket_spin_lock to use xadd()
Convert the two variants of __ticket_spin_lock() to use xadd(), which
has the effect of making them identical, so remove the duplicate function.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/include/asm/spinlock.h')
-rw-r--r-- | arch/x86/include/asm/spinlock.h | 35 |
1 files changed, 5 insertions, 30 deletions
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 5240cdefa683..b69e0b473de6 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -54,26 +54,22 @@ * save some instructions and make the code more elegant. There really isn't * much between them in performance though, especially as locks are out of line. */ -#if (NR_CPUS < 256) static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) { - register union { - struct __raw_tickets tickets; - unsigned short slock; - } inc = { .slock = 1 << TICKET_SHIFT }; + register struct __raw_tickets inc = { .tail = 1 }; - asm volatile (LOCK_PREFIX "xaddw %w0, %1\n" - : "+Q" (inc), "+m" (lock->slock) : : "memory", "cc"); + inc = xadd(&lock->tickets, inc); for (;;) { - if (inc.tickets.head == inc.tickets.tail) + if (inc.head == inc.tail) break; cpu_relax(); - inc.tickets.head = ACCESS_ONCE(lock->tickets.head); + inc.head = ACCESS_ONCE(lock->tickets.head); } barrier(); /* make sure nothing creeps before the lock is taken */ } +#if (NR_CPUS < 256) static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) { unsigned int tmp, new; @@ -101,27 +97,6 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) : "memory", "cc"); } #else -static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) -{ - unsigned inc = 1 << TICKET_SHIFT; - __ticket_t tmp; - - asm volatile(LOCK_PREFIX "xaddl %0, %1\n\t" - : "+r" (inc), "+m" (lock->slock) - : : "memory", "cc"); - - tmp = inc; - inc >>= TICKET_SHIFT; - - for (;;) { - if ((__ticket_t)inc == tmp) - break; - cpu_relax(); - tmp = ACCESS_ONCE(lock->tickets.head); - } - barrier(); /* make sure nothing creeps before the lock is taken */ -} - static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) { unsigned tmp; |