summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn David Anglin <dave.anglin@bell.net>2018-11-06 12:00:01 +0100
committerHelge Deller <deller@gmx.de>2018-11-06 12:03:22 +0100
commit86d4d068df573a8c2105554624796c086d6bec3d (patch)
tree5968f43762711e2aa98d92e8775600ffe9b03a4f
parentLinux 4.20-rc1 (diff)
downloadlinux-86d4d068df573a8c2105554624796c086d6bec3d.tar.xz
linux-86d4d068df573a8c2105554624796c086d6bec3d.zip
parisc: Revert "Release spinlocks using ordered store"
This reverts commit d27dfa13b9f77ae7e6ed09d70a0426ed26c1a8f9. Unfortunately, this patch needs to be reverted. We need the full sync barrier and not the limited barrier provided by using an ordered store. The sync ensures that all accesses and cache purge instructions that follow the sync are performed after all such instructions prior the sync instruction have completed executing. The patch breaks the rwlock implementation in glibc. This caused the test-lock application in the libprelude testsuite to hang. With the change reverted, the test runs correctly and the libprelude package builds successfully. Signed-off-by: John David Anglin <dave.anglin@bell.net> Signed-off-by: Helge Deller <deller@gmx.de>
-rw-r--r--arch/parisc/include/asm/spinlock.h4
-rw-r--r--arch/parisc/kernel/syscall.S12
2 files changed, 10 insertions, 6 deletions
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 16aec9ba2580..8a63515f03bf 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -37,8 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
volatile unsigned int *a;
a = __ldcw_align(x);
- /* Release with ordered store. */
- __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
+ mb();
+ *a = 1;
}
static inline int arch_spin_trylock(arch_spinlock_t *x)
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 9505c317818d..a9bc90dc4ae7 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -640,7 +640,8 @@ cas_action:
sub,<> %r28, %r25, %r0
2: stw %r24, 0(%r26)
/* Free lock */
- stw,ma %r20, 0(%sr2,%r20)
+ sync
+ stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
@@ -654,7 +655,8 @@ cas_action:
3:
/* Error occurred on load or store */
/* Free lock */
- stw,ma %r20, 0(%sr2,%r20)
+ sync
+ stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
#endif
@@ -855,7 +857,8 @@ cas2_action:
cas2_end:
/* Free lock */
- stw,ma %r20, 0(%sr2,%r20)
+ sync
+ stw %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
@@ -865,7 +868,8 @@ cas2_end:
22:
/* Error occurred on load or store */
/* Free lock */
- stw,ma %r20, 0(%sr2,%r20)
+ sync
+ stw %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 1(%r0),%r28
b lws_exit