summaryrefslogtreecommitdiffstats
path: root/include/asm-generic/rwsem.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-04 01:10:43 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-04 01:10:43 +0100
commitd63a9788650fcd999b34584316afee6bd4378f19 (patch)
tree32135409a61fab9365621ad01adb58a8dc7c536d /include/asm-generic/rwsem.h
parentMerge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kerne... (diff)
parentARM, locking/atomics: Implement _relaxed variants of atomic[64]_{inc,dec} (diff)
downloadlinux-d63a9788650fcd999b34584316afee6bd4378f19.tar.xz
linux-d63a9788650fcd999b34584316afee6bd4378f19.zip
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking changes from Ingo Molnar: "The main changes in this cycle were: - More gradual enhancements to atomic ops: new atomic*_read_ctrl() ops, synchronize atomic_{read,set}() ordering requirements between architectures, add atomic_long_t bitops. (Peter Zijlstra) - Add _{relaxed|acquire|release}() variants for inc/dec atomics and use them in various locking primitives: mutex, rtmutex, mcs, rwsem. This enables weakly ordered architectures (such as arm64) to make use of more locking related optimizations. (Davidlohr Bueso) - Implement atomic[64]_{inc,dec}_relaxed() on ARM. (Will Deacon) - Futex kernel data cache footprint micro-optimization. (Rasmus Villemoes) - pvqspinlock runtime overhead micro-optimization. (Waiman Long) - misc smaller fixlets" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: ARM, locking/atomics: Implement _relaxed variants of atomic[64]_{inc,dec} locking/rwsem: Use acquire/release semantics locking/mcs: Use acquire/release semantics locking/rtmutex: Use acquire/release semantics locking/mutex: Use acquire/release semantics locking/asm-generic: Add _{relaxed|acquire|release}() variants for inc/dec atomics atomic: Implement atomic_read_ctrl() atomic, arch: Audit atomic_{read,set}() atomic: Add atomic_long_t bitops futex: Force hot variables into a single cache line locking/pvqspinlock: Kick the PV CPU unconditionally when _Q_SLOW_VAL locking/osq: Relax atomic semantics locking/qrwlock: Rename ->lock to ->wait_lock locking/Documentation/lockstat: Fix typo - lokcing -> locking locking/atomics, cmpxchg: Privatize the inclusion of asm/cmpxchg.h
Diffstat (limited to 'include/asm-generic/rwsem.h')
-rw-r--r--include/asm-generic/rwsem.h21
1 files changed, 14 insertions, 7 deletions
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
index d48bf5a95cc1..d6d5dc98d7da 100644
--- a/include/asm-generic/rwsem.h
+++ b/include/asm-generic/rwsem.h
@@ -33,7 +33,7 @@
*/
static inline void __down_read(struct rw_semaphore *sem)
{
- if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
+ if (unlikely(atomic_long_inc_return_acquire((atomic_long_t *)&sem->count) <= 0))
rwsem_down_read_failed(sem);
}
@@ -42,7 +42,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
long tmp;
while ((tmp = sem->count) >= 0) {
- if (tmp == cmpxchg(&sem->count, tmp,
+ if (tmp == cmpxchg_acquire(&sem->count, tmp,
tmp + RWSEM_ACTIVE_READ_BIAS)) {
return 1;
}
@@ -57,7 +57,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{
long tmp;
- tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+ tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_long_t *)&sem->count);
if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
rwsem_down_write_failed(sem);
@@ -72,7 +72,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
{
long tmp;
- tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+ tmp = cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
return tmp == RWSEM_UNLOCKED_VALUE;
}
@@ -84,7 +84,7 @@ static inline void __up_read(struct rw_semaphore *sem)
{
long tmp;
- tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
+ tmp = atomic_long_dec_return_release((atomic_long_t *)&sem->count);
if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
rwsem_wake(sem);
}
@@ -94,7 +94,7 @@ static inline void __up_read(struct rw_semaphore *sem)
*/
static inline void __up_write(struct rw_semaphore *sem)
{
- if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+ if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_long_t *)&sem->count) < 0))
rwsem_wake(sem);
}
@@ -114,7 +114,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
{
long tmp;
- tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
+ /*
+ * When downgrading from exclusive to shared ownership,
+ * anything inside the write-locked region cannot leak
+ * into the read side. In contrast, anything in the
+ * read-locked region is ok to be re-ordered into the
+ * write side. As such, rely on RELEASE semantics.
+ */
+ tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS,
(atomic_long_t *)&sem->count);
if (tmp < 0)
rwsem_downgrade_wake(sem);