diff options
author | Babu Moger <babu.moger@oracle.com> | 2017-05-25 01:55:13 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-05-25 21:06:51 +0200 |
commit | a37594f198363fd9321ece54440336fd4b2a9c8e (patch) | |
tree | 591a2cc525ac40ca81840cd1b93e615dc5dd9679 /arch | |
parent | arch/sparc: Introduce cmpxchg_u8 SPARC (diff) | |
download | linux-a37594f198363fd9321ece54440336fd4b2a9c8e.tar.xz linux-a37594f198363fd9321ece54440336fd4b2a9c8e.zip |
arch/sparc: Enable queued rwlocks for SPARC
Enable queued rwlocks for SPARC. Here are the discussions on this feature
when this was introduced.
https://lwn.net/Articles/572765/
https://lwn.net/Articles/582200/
Cleaned-up the arch_read_xxx and arch_write_xxx definitions in spinlock_64.h.
These routines are replaced by the functions in include/asm-generic/qrwlock.h
Signed-off-by: Babu Moger <babu.moger@oracle.com>
Reviewed-by: HÃ¥kon Bugge <haakon.bugge@oracle.com>
Reviewed-by: Jane Chu <jane.chu@oracle.com>
Reviewed-by: Shannon Nelson <shannon.nelson@oracle.com>
Reviewed-by: Vijay Kumar <vijay.ac.kumar@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sparc/Kconfig | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/qrwlock.h | 7 | ||||
-rw-r--r-- | arch/sparc/include/asm/spinlock_64.h | 124 | ||||
-rw-r--r-- | arch/sparc/include/asm/spinlock_types.h | 5 |
4 files changed, 13 insertions, 124 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index eb213b5000c8..0e8065207596 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -83,6 +83,7 @@ config SPARC64 select ARCH_SUPPORTS_ATOMIC_RMW select HAVE_NMI select HAVE_REGS_AND_STACK_ACCESS_API + select ARCH_USE_QUEUED_RWLOCKS config ARCH_DEFCONFIG string diff --git a/arch/sparc/include/asm/qrwlock.h b/arch/sparc/include/asm/qrwlock.h new file mode 100644 index 000000000000..d68a4b102100 --- /dev/null +++ b/arch/sparc/include/asm/qrwlock.h @@ -0,0 +1,7 @@ +#ifndef _ASM_SPARC_QRWLOCK_H +#define _ASM_SPARC_QRWLOCK_H + +#include <asm-generic/qrwlock_types.h> +#include <asm-generic/qrwlock.h> + +#endif /* _ASM_SPARC_QRWLOCK_H */ diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 07c9f2e9bf57..8901c2d4ada9 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -10,6 +10,7 @@ #include <asm/processor.h> #include <asm/barrier.h> +#include <asm/qrwlock.h> /* To get debugging spinlocks which detect and catch * deadlock situations, set CONFIG_DEBUG_SPINLOCK @@ -94,132 +95,9 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla : "memory"); } -/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ - -static inline void arch_read_lock(arch_rwlock_t *lock) -{ - unsigned long tmp1, tmp2; - - __asm__ __volatile__ ( -"1: ldsw [%2], %0\n" -" brlz,pn %0, 2f\n" -"4: add %0, 1, %1\n" -" cas [%2], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%icc, 1b\n" -" nop\n" -" .subsection 2\n" -"2: ldsw [%2], %0\n" -" brlz,pt %0, 2b\n" -" nop\n" -" ba,a,pt %%xcc, 4b\n" -" .previous" - : "=&r" (tmp1), "=&r" (tmp2) - : "r" (lock) - : "memory"); -} - -static inline int arch_read_trylock(arch_rwlock_t *lock) -{ - int tmp1, tmp2; - - __asm__ __volatile__ ( -"1: ldsw [%2], %0\n" -" brlz,a,pn %0, 2f\n" -" mov 0, %0\n" -" add %0, 1, %1\n" -" cas [%2], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%icc, 1b\n" -" mov 1, %0\n" -"2:" - : "=&r" (tmp1), "=&r" (tmp2) - : "r" (lock) - : "memory"); - - return tmp1; -} - -static inline void arch_read_unlock(arch_rwlock_t *lock) -{ - unsigned long tmp1, tmp2; - - __asm__ __volatile__( -"1: lduw [%2], %0\n" -" sub %0, 1, %1\n" -" cas [%2], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%xcc, 1b\n" -" nop" - : "=&r" (tmp1), "=&r" (tmp2) - : "r" (lock) - : "memory"); -} - -static inline void arch_write_lock(arch_rwlock_t *lock) -{ - unsigned long mask, tmp1, tmp2; - - mask = 0x80000000UL; - - __asm__ __volatile__( -"1: lduw [%2], %0\n" -" brnz,pn %0, 2f\n" -"4: or %0, %3, %1\n" -" cas [%2], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%icc, 1b\n" -" nop\n" -" .subsection 2\n" -"2: lduw [%2], %0\n" -" brnz,pt %0, 2b\n" -" nop\n" -" ba,a,pt %%xcc, 4b\n" -" .previous" - : "=&r" (tmp1), "=&r" (tmp2) - : "r" (lock), "r" (mask) - : "memory"); -} - -static inline void arch_write_unlock(arch_rwlock_t *lock) -{ - __asm__ __volatile__( -" stw %%g0, [%0]" - : /* no outputs */ - : "r" (lock) - : "memory"); -} - -static inline int arch_write_trylock(arch_rwlock_t *lock) -{ - unsigned long mask, tmp1, tmp2, result; - - mask = 0x80000000UL; - - __asm__ __volatile__( -" mov 0, %2\n" -"1: lduw [%3], %0\n" -" brnz,pn %0, 2f\n" -" or %0, %4, %1\n" -" cas [%3], %0, %1\n" -" cmp %0, %1\n" -" bne,pn %%icc, 1b\n" -" nop\n" -" mov 1, %2\n" -"2:" - : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result) - : "r" (lock), "r" (mask) - : "memory"); - - return result; -} - #define arch_read_lock_flags(p, f) arch_read_lock(p) #define arch_write_lock_flags(p, f) arch_write_lock(p) -#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) -#define arch_write_can_lock(rw) (!(rw)->lock) - #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h index 019c0855bf8b..64fce21b23df 100644 --- a/arch/sparc/include/asm/spinlock_types.h +++ b/arch/sparc/include/asm/spinlock_types.h @@ -7,10 +7,13 @@ typedef struct { #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } +#ifdef CONFIG_QUEUED_RWLOCKS +#include <asm-generic/qrwlock_types.h> +#else typedef struct { volatile unsigned int lock; } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED { 0 } - +#endif /* CONFIG_QUEUED_RWLOCKS */ #endif |