diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-05 01:40:11 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-05 01:40:11 +0200 |
commit | 92400b8c8b42e53abb0fcb4ac75cb85d4177a891 (patch) | |
tree | b6c7ef758d1c2b5e32e2483a0dbde7cd23a6d8a0 /include/asm-generic | |
parent | Merge branch 'efi-core-for-linus' of git://git.kernel.org/pub/scm/linux/kerne... (diff) | |
parent | locking/rwsem: Simplify the is-owner-spinnable checks (diff) | |
download | linux-92400b8c8b42e53abb0fcb4ac75cb85d4177a891.tar.xz linux-92400b8c8b42e53abb0fcb4ac75cb85d4177a891.zip |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
- Lots of tidying up changes all across the map for Linux's formal
memory/locking-model tooling, by Alan Stern, Akira Yokosawa, Andrea
Parri, Paul E. McKenney and SeongJae Park.
Notable changes beyond an overall update in the tooling itself is the
tidying up of spin_is_locked() semantics, which spills over into the
kernel proper as well.
- qspinlock improvements: the locking algorithm now guarantees forward
progress whereas the previous implementation in mainline could starve
threads indefinitely in cmpxchg() loops. Also other related cleanups
to the qspinlock code (Will Deacon)
- misc smaller improvements, cleanups and fixes all across the locking
subsystem
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (51 commits)
locking/rwsem: Simplify the is-owner-spinnable checks
tools/memory-model: Add reference for 'Simplifying ARM concurrency'
tools/memory-model: Update ASPLOS information
MAINTAINERS, tools/memory-model: Update e-mail address for Andrea Parri
tools/memory-model: Fix coding style in 'lock.cat'
tools/memory-model: Remove out-of-date comments and code from lock.cat
tools/memory-model: Improve mixed-access checking in lock.cat
tools/memory-model: Improve comments in lock.cat
tools/memory-model: Remove duplicated code from lock.cat
tools/memory-model: Flag "cumulativity" and "propagation" tests
tools/memory-model: Add model support for spin_is_locked()
tools/memory-model: Add scripts to test memory model
tools/memory-model: Fix coding style in 'linux-kernel.def'
tools/memory-model: Model 'smp_store_mb()'
tools/memory-order: Update the cheat-sheet to show that smp_mb__after_atomic() orders later RMW operations
tools/memory-order: Improve key for SELF and SV
tools/memory-model: Fix cheat sheet typo
tools/memory-model: Update required version of herdtools7
tools/memory-model: Redefine rb in terms of rcu-fence
tools/memory-model: Rename link and rcu-path to rcu-link and rb
...
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/atomic-long.h | 19 | ||||
-rw-r--r-- | include/asm-generic/barrier.h | 27 | ||||
-rw-r--r-- | include/asm-generic/qspinlock.h | 4 | ||||
-rw-r--r-- | include/asm-generic/qspinlock_types.h | 32 |
4 files changed, 71 insertions, 11 deletions
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index 34a028a7bcc5..87d14476edc2 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h @@ -25,6 +25,7 @@ typedef atomic64_t atomic_long_t; #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) #define ATOMIC_LONG_PFX(x) atomic64 ## x +#define ATOMIC_LONG_TYPE s64 #else @@ -32,6 +33,7 @@ typedef atomic_t atomic_long_t; #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) #define ATOMIC_LONG_PFX(x) atomic ## x +#define ATOMIC_LONG_TYPE int #endif @@ -90,6 +92,21 @@ ATOMIC_LONG_ADD_SUB_OP(sub, _release) #define atomic_long_cmpxchg(l, old, new) \ (ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new))) + +#define atomic_long_try_cmpxchg_relaxed(l, old, new) \ + (ATOMIC_LONG_PFX(_try_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \ + (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) +#define atomic_long_try_cmpxchg_acquire(l, old, new) \ + (ATOMIC_LONG_PFX(_try_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \ + (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) +#define atomic_long_try_cmpxchg_release(l, old, new) \ + (ATOMIC_LONG_PFX(_try_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \ + (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) +#define atomic_long_try_cmpxchg(l, old, new) \ + (ATOMIC_LONG_PFX(_try_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), \ + (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) + + #define atomic_long_xchg_relaxed(v, new) \ (ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new))) #define atomic_long_xchg_acquire(v, new) \ @@ -244,6 +261,8 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) #define atomic_long_inc_not_zero(l) \ ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l)) +#define atomic_long_cond_read_relaxed(v, c) \ + ATOMIC_LONG_PFX(_cond_read_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (c)) #define atomic_long_cond_read_acquire(v, c) \ ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c)) diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 29458bbb2fa0..2cafdbb9ae4c 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -221,18 +221,17 @@ do { \ #endif /** - * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering + * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees * @ptr: pointer to the variable to wait on * @cond: boolean expression to wait for * - * Equivalent to using smp_load_acquire() on the condition variable but employs - * the control dependency of the wait to reduce the barrier on many platforms. + * Equivalent to using READ_ONCE() on the condition variable. * * Due to C lacking lambda expressions we load the value of *ptr into a * pre-named variable @VAL to be used in @cond. */ -#ifndef smp_cond_load_acquire -#define smp_cond_load_acquire(ptr, cond_expr) ({ \ +#ifndef smp_cond_load_relaxed +#define smp_cond_load_relaxed(ptr, cond_expr) ({ \ typeof(ptr) __PTR = (ptr); \ typeof(*ptr) VAL; \ for (;;) { \ @@ -241,10 +240,26 @@ do { \ break; \ cpu_relax(); \ } \ - smp_acquire__after_ctrl_dep(); \ VAL; \ }) #endif +/** + * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering + * @ptr: pointer to the variable to wait on + * @cond: boolean expression to wait for + * + * Equivalent to using smp_load_acquire() on the condition variable but employs + * the control dependency of the wait to reduce the barrier on many platforms. + */ +#ifndef smp_cond_load_acquire +#define smp_cond_load_acquire(ptr, cond_expr) ({ \ + typeof(*ptr) _val; \ + _val = smp_cond_load_relaxed(ptr, cond_expr); \ + smp_acquire__after_ctrl_dep(); \ + _val; \ +}) +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index b37b4ad7eb94..9cc457597ddf 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -26,7 +26,6 @@ * @lock: Pointer to queued spinlock structure * Return: 1 if it is locked, 0 otherwise */ -#ifndef queued_spin_is_locked static __always_inline int queued_spin_is_locked(struct qspinlock *lock) { /* @@ -35,7 +34,6 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock) */ return atomic_read(&lock->val); } -#endif /** * queued_spin_value_unlocked - is the spinlock structure unlocked? @@ -100,7 +98,7 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock) /* * unlock() needs release semantics: */ - (void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val); + smp_store_release(&lock->locked, 0); } #endif diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h index 034acd0c4956..0763f065b975 100644 --- a/include/asm-generic/qspinlock_types.h +++ b/include/asm-generic/qspinlock_types.h @@ -29,13 +29,41 @@ #endif typedef struct qspinlock { - atomic_t val; + union { + atomic_t val; + + /* + * By using the whole 2nd least significant byte for the + * pending bit, we can allow better optimization of the lock + * acquisition for the pending bit holder. + */ +#ifdef __LITTLE_ENDIAN + struct { + u8 locked; + u8 pending; + }; + struct { + u16 locked_pending; + u16 tail; + }; +#else + struct { + u16 tail; + u16 locked_pending; + }; + struct { + u8 reserved[2]; + u8 pending; + u8 locked; + }; +#endif + }; } arch_spinlock_t; /* * Initializier */ -#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) } +#define __ARCH_SPIN_LOCK_UNLOCKED { .val = ATOMIC_INIT(0) } /* * Bitfields in the atomic value: |