diff options
author | Peter Zijlstra <peterz@infradead.org> | 2019-10-30 20:12:37 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2020-02-11 13:10:54 +0100 |
commit | 206c98ffbeda588dbbd9d272505c42acbc364a30 (patch) | |
tree | f75b1f38d75f4aaf1d1ce969549582ea982d2e8d | |
parent | locking/percpu-rwsem, lockdep: Make percpu-rwsem use its own lockdep_map (diff) | |
download | linux-206c98ffbeda588dbbd9d272505c42acbc364a30.tar.xz linux-206c98ffbeda588dbbd9d272505c42acbc364a30.zip |
locking/percpu-rwsem: Convert to bool
Use bool where possible.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Davidlohr Bueso <dbueso@suse.de>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Waiman Long <longman@redhat.com>
Tested-by: Juri Lelli <juri.lelli@redhat.com>
Link: https://lkml.kernel.org/r/20200131151539.984626569@infradead.org
-rw-r--r-- | include/linux/percpu-rwsem.h | 6 | ||||
-rw-r--r-- | kernel/locking/percpu-rwsem.c | 8 |
2 files changed, 7 insertions, 7 deletions
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index f2c36fb5e661..4ceaa1921951 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -41,7 +41,7 @@ is_static struct percpu_rw_semaphore name = { \ #define DEFINE_STATIC_PERCPU_RWSEM(name) \ __DEFINE_PERCPU_RWSEM(name, static) -extern int __percpu_down_read(struct percpu_rw_semaphore *, int); +extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool); extern void __percpu_up_read(struct percpu_rw_semaphore *); static inline void percpu_down_read(struct percpu_rw_semaphore *sem) @@ -69,9 +69,9 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem) preempt_enable(); } -static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) +static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem) { - int ret = 1; + bool ret = true; preempt_disable(); /* diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c index aa2b118d2f88..969389df6eee 100644 --- a/kernel/locking/percpu-rwsem.c +++ b/kernel/locking/percpu-rwsem.c @@ -45,7 +45,7 @@ void percpu_free_rwsem(struct percpu_rw_semaphore *sem) } EXPORT_SYMBOL_GPL(percpu_free_rwsem); -int __percpu_down_read(struct percpu_rw_semaphore *sem, int try) +bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try) { /* * Due to having preemption disabled the decrement happens on @@ -69,7 +69,7 @@ int __percpu_down_read(struct percpu_rw_semaphore *sem, int try) * release in percpu_up_write(). */ if (likely(!smp_load_acquire(&sem->readers_block))) - return 1; + return true; /* * Per the above comment; we still have preemption disabled and @@ -78,7 +78,7 @@ int __percpu_down_read(struct percpu_rw_semaphore *sem, int try) __percpu_up_read(sem); if (try) - return 0; + return false; /* * We either call schedule() in the wait, or we'll fall through @@ -94,7 +94,7 @@ int __percpu_down_read(struct percpu_rw_semaphore *sem, int try) __up_read(&sem->rw_sem); preempt_disable(); - return 1; + return true; } EXPORT_SYMBOL_GPL(__percpu_down_read); |