summaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2019-07-29 06:47:35 +0200
committerPeter Zijlstra <peterz@infradead.org>2019-08-06 12:49:15 +0200
commitfce45cd41101f1a9620267146b21f09b3454d8db (patch)
treeff8142e67c6ec7cb5d6ed8aa0a7e662bfe57ce1a /kernel/locking
parentlocking/rwsem: Make handoff writer optimistically spin on owner (diff)
downloadlinux-fce45cd41101f1a9620267146b21f09b3454d8db.tar.xz
linux-fce45cd41101f1a9620267146b21f09b3454d8db.zip
locking/rwsem: Check for operations on an uninitialized rwsem
Currently rwsems is the only locking primitive that lacks this debug feature. Add it under CONFIG_DEBUG_RWSEMS and do the magic checking in the locking fastpath (trylock) operation such that we cover all cases. The unlocking part is pretty straightforward. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Waiman Long <longman@redhat.com> Cc: mingo@kernel.org Cc: Davidlohr Bueso <dave@stgolabs.net> Link: https://lkml.kernel.org/r/20190729044735.9632-1-dave@stgolabs.net
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/rwsem.c22
1 files changed, 18 insertions, 4 deletions
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 354238a08b7a..eef04551eae7 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -105,8 +105,9 @@
#ifdef CONFIG_DEBUG_RWSEMS
# define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
if (!debug_locks_silent && \
- WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
+ WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
#c, atomic_long_read(&(sem)->count), \
+ (unsigned long) sem->magic, \
atomic_long_read(&(sem)->owner), (long)current, \
list_empty(&(sem)->wait_list) ? "" : "not ")) \
debug_locks_off(); \
@@ -330,6 +331,9 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
+#ifdef CONFIG_DEBUG_RWSEMS
+ sem->magic = sem;
+#endif
atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
@@ -1358,11 +1362,14 @@ static inline int __down_read_killable(struct rw_semaphore *sem)
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
+ long tmp;
+
+ DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
+
/*
* Optimize for the case when the rwsem is not locked at all.
*/
- long tmp = RWSEM_UNLOCKED_VALUE;
-
+ tmp = RWSEM_UNLOCKED_VALUE;
do {
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
tmp + RWSEM_READER_BIAS)) {
@@ -1403,8 +1410,11 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
- long tmp = RWSEM_UNLOCKED_VALUE;
+ long tmp;
+ DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
+
+ tmp = RWSEM_UNLOCKED_VALUE;
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
RWSEM_WRITER_LOCKED)) {
rwsem_set_owner(sem);
@@ -1420,7 +1430,9 @@ inline void __up_read(struct rw_semaphore *sem)
{
long tmp;
+ DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
+
rwsem_clear_reader_owned(sem);
tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
@@ -1438,12 +1450,14 @@ static inline void __up_write(struct rw_semaphore *sem)
{
long tmp;
+ DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
/*
* sem->owner may differ from current if the ownership is transferred
* to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
*/
DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
!rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
+
rwsem_clear_owner(sem);
tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
if (unlikely(tmp & RWSEM_FLAG_WAITERS))