diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2014-11-20 11:12:09 +0100 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2015-06-25 02:30:16 +0200 |
commit | 2576c28e3f623ed401db7e6197241865328620ef (patch) | |
tree | 6bff094ce931e43c18d5887417e07c9defa0af4d /arch/arc/include/asm/bitops.h | |
parent | ARC: add compiler barrier to LLSC based cmpxchg (diff) | |
download | linux-2576c28e3f623ed401db7e6197241865328620ef.tar.xz linux-2576c28e3f623ed401db7e6197241865328620ef.zip |
ARC: add smp barriers around atomics per Documentation/atomic_ops.txt
- arch_spin_lock/unlock were lacking the ACQUIRE/RELEASE barriers
Since ARCv2 only provides load/load, store/store and all/all, we need
the full barrier
- LLOCK/SCOND based atomics, bitops, cmpxchg, which return modified
values were lacking the explicit smp barriers.
- Non LLOCK/SCOND varaints don't need the explicit barriers since that
is implicity provided by the spin locks used to implement the
critical section (the spin lock barriers in turn are also fixed in
this commit as explained above
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: stable@vger.kernel.org
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/include/asm/bitops.h')
-rw-r--r-- | arch/arc/include/asm/bitops.h | 19 |
1 files changed, 19 insertions, 0 deletions
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h index 829a8a2e9704..dd03fd931bb7 100644 --- a/arch/arc/include/asm/bitops.h +++ b/arch/arc/include/asm/bitops.h @@ -117,6 +117,12 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m) if (__builtin_constant_p(nr)) nr &= 0x1f; + /* + * Explicit full memory barrier needed before/after as + * LLOCK/SCOND themselves don't provide any such semantics + */ + smp_mb(); + __asm__ __volatile__( "1: llock %0, [%2] \n" " bset %1, %0, %3 \n" @@ -126,6 +132,8 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m) : "r"(m), "ir"(nr) : "cc"); + smp_mb(); + return (old & (1 << nr)) != 0; } @@ -139,6 +147,8 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *m) if (__builtin_constant_p(nr)) nr &= 0x1f; + smp_mb(); + __asm__ __volatile__( "1: llock %0, [%2] \n" " bclr %1, %0, %3 \n" @@ -148,6 +158,8 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *m) : "r"(m), "ir"(nr) : "cc"); + smp_mb(); + return (old & (1 << nr)) != 0; } @@ -161,6 +173,8 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m) if (__builtin_constant_p(nr)) nr &= 0x1f; + smp_mb(); + __asm__ __volatile__( "1: llock %0, [%2] \n" " bxor %1, %0, %3 \n" @@ -170,6 +184,8 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m) : "r"(m), "ir"(nr) : "cc"); + smp_mb(); + return (old & (1 << nr)) != 0; } @@ -249,6 +265,9 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m) if (__builtin_constant_p(nr)) nr &= 0x1f; + /* + * spin lock/unlock provide the needed smp_mb() before/after + */ bitops_lock(flags); old = *m; |