From a68e2c4c637918da47b3aa270051545cff7d8245 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 4 Apr 2019 13:43:14 -0400 Subject: locking/rwsem: Add debug check for __down_read*() When rwsem_down_read_failed*() return, the read lock is acquired indirectly by others. So debug checks are added in __down_read() and __down_read_killable() to make sure the rwsem is really reader-owned. The other debug check calls in kernel/locking/rwsem.c except the one in up_read_non_owner() are also moved over to rwsem-xadd.h. Signed-off-by: Waiman Long Acked-by: Peter Zijlstra Acked-by: Davidlohr Bueso Cc: Andrew Morton Cc: Arnd Bergmann Cc: Borislav Petkov Cc: Davidlohr Bueso Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tim Chen Cc: Will Deacon Link: http://lkml.kernel.org/r/20190404174320.22416-6-longman@redhat.com Signed-off-by: Ingo Molnar --- kernel/locking/rwsem.h | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'kernel/locking/rwsem.h') diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h index 19997c82270b..1d8f722a6761 100644 --- a/kernel/locking/rwsem.h +++ b/kernel/locking/rwsem.h @@ -165,10 +165,13 @@ extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); */ static inline void __down_read(struct rw_semaphore *sem) { - if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) + if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) { rwsem_down_read_failed(sem); - else + DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & + RWSEM_READER_OWNED)); + } else { rwsem_set_reader_owned(sem); + } } static inline int __down_read_killable(struct rw_semaphore *sem) @@ -176,6 +179,8 @@ static inline int __down_read_killable(struct rw_semaphore *sem) if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) { if (IS_ERR(rwsem_down_read_failed_killable(sem))) return -EINTR; + DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & + RWSEM_READER_OWNED)); } else { rwsem_set_reader_owned(sem); } @@ -246,6 +251,7 @@ static inline void __up_read(struct rw_semaphore *sem) { long tmp; + DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED)); rwsem_clear_reader_owned(sem); tmp = atomic_long_dec_return_release(&sem->count); if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) @@ -257,6 +263,7 @@ static inline void __up_read(struct rw_semaphore *sem) */ static inline void __up_write(struct rw_semaphore *sem) { + DEBUG_RWSEMS_WARN_ON(sem->owner != current); rwsem_clear_owner(sem); if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS, &sem->count) < 0)) @@ -277,6 +284,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem) * read-locked region is ok to be re-ordered into the * write side. As such, rely on RELEASE semantics. */ + DEBUG_RWSEMS_WARN_ON(sem->owner != current); tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count); rwsem_set_reader_owned(sem); if (tmp < 0) -- cgit v1.2.3