summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/spinlock.h
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2016-09-06 17:42:58 +0200
committerWill Deacon <will.deacon@arm.com>2016-09-09 19:12:34 +0200
commit05492f2fd87d0a2e6a626bf8fe002c9a11941950 (patch)
tree021fd9f449e9be0a2876f290f10276bd999dc928 /arch/arm64/include/asm/spinlock.h
parentarm64: barriers: introduce nops and __nops macros for NOP sequences (diff)
downloadlinux-05492f2fd87d0a2e6a626bf8fe002c9a11941950.tar.xz
linux-05492f2fd87d0a2e6a626bf8fe002c9a11941950.zip
arm64: lse: convert lse alternatives NOP padding to use __nops
The LSE atomics are implemented using alternative code sequences of different lengths, and explicit NOP padding is used to ensure the patching works correctly. This patch converts the bulk of the LSE code over to using the __nops macro, which makes it slightly clearer as to what is going on and also consolidates all of the padding at the end of the various sequences. Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/spinlock.h')
-rw-r--r--arch/arm64/include/asm/spinlock.h27
1 files changed, 11 insertions, 16 deletions
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index e875a5a551d7..28f8c2174f8a 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -66,8 +66,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
" stxr %w1, %w0, %2\n"
-" nop\n"
-" nop\n",
+ __nops(2),
/* LSE atomics */
" mov %w1, %w0\n"
" cas %w0, %w0, %2\n"
@@ -99,9 +98,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
/* LSE atomics */
" mov %w2, %w5\n"
" ldadda %w2, %w0, %3\n"
-" nop\n"
-" nop\n"
-" nop\n"
+ __nops(3)
)
/* Did we get the lock? */
@@ -165,8 +162,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
" stlrh %w1, %0",
/* LSE atomics */
" mov %w1, #1\n"
- " nop\n"
- " staddlh %w1, %0")
+ " staddlh %w1, %0\n"
+ __nops(1))
: "=Q" (lock->owner), "=&r" (tmp)
:
: "memory");
@@ -212,7 +209,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
" cbnz %w0, 1b\n"
" stxr %w0, %w2, %1\n"
" cbnz %w0, 2b\n"
- " nop",
+ __nops(1),
/* LSE atomics */
"1: mov %w0, wzr\n"
"2: casa %w0, %w2, %1\n"
@@ -241,8 +238,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
/* LSE atomics */
" mov %w0, wzr\n"
" casa %w0, %w2, %1\n"
- " nop\n"
- " nop")
+ __nops(2))
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
: "memory");
@@ -290,8 +286,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
" add %w0, %w0, #1\n"
" tbnz %w0, #31, 1b\n"
" stxr %w1, %w0, %2\n"
- " nop\n"
- " cbnz %w1, 2b",
+ " cbnz %w1, 2b\n"
+ __nops(1),
/* LSE atomics */
"1: wfe\n"
"2: ldxr %w0, %2\n"
@@ -317,9 +313,8 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
" cbnz %w1, 1b",
/* LSE atomics */
" movn %w0, #0\n"
- " nop\n"
- " nop\n"
- " staddl %w0, %2")
+ " staddl %w0, %2\n"
+ __nops(2))
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
: "memory");
@@ -344,7 +339,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
" tbnz %w1, #31, 1f\n"
" casa %w0, %w1, %2\n"
" sbc %w1, %w1, %w0\n"
- " nop\n"
+ __nops(1)
"1:")
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
: