diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-12-03 20:08:46 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-12-14 23:55:32 +0100 |
commit | e5931943d02bf751b1ec849c0d2ade23d76a8d41 (patch) | |
tree | 119fe3bc583d0d043d97cb9edd98bad52692a546 /arch/parisc | |
parent | locking: Convert raw_rwlock to arch_rwlock (diff) | |
download | linux-e5931943d02bf751b1ec849c0d2ade23d76a8d41.tar.xz linux-e5931943d02bf751b1ec849c0d2ade23d76a8d41.zip |
locking: Convert raw_rwlock functions to arch_rwlock
Name space cleanup for rwlock functions. No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
Diffstat (limited to 'arch/parisc')
-rw-r--r-- | arch/parisc/include/asm/spinlock.h | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 1ff3a0a94a43..74036f436a3b 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -69,7 +69,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *x) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void __raw_read_lock(arch_rwlock_t *rw) +static __inline__ void arch_read_lock(arch_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); @@ -81,7 +81,7 @@ static __inline__ void __raw_read_lock(arch_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void __raw_read_unlock(arch_rwlock_t *rw) +static __inline__ void arch_read_unlock(arch_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); @@ -93,7 +93,7 @@ static __inline__ void __raw_read_unlock(arch_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ int __raw_read_trylock(arch_rwlock_t *rw) +static __inline__ int arch_read_trylock(arch_rwlock_t *rw) { unsigned long flags; retry: @@ -119,7 +119,7 @@ static __inline__ int __raw_read_trylock(arch_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ void __raw_write_lock(arch_rwlock_t *rw) +static __inline__ void arch_write_lock(arch_rwlock_t *rw) { unsigned long flags; retry: @@ -141,7 +141,7 @@ retry: local_irq_restore(flags); } -static __inline__ void __raw_write_unlock(arch_rwlock_t *rw) +static __inline__ void arch_write_unlock(arch_rwlock_t *rw) { rw->counter = 0; arch_spin_unlock(&rw->lock); @@ -149,7 +149,7 @@ static __inline__ void __raw_write_unlock(arch_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ int __raw_write_trylock(arch_rwlock_t *rw) +static __inline__ int arch_write_trylock(arch_rwlock_t *rw) { unsigned long flags; int result = 0; @@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(arch_rwlock_t *rw) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -static __inline__ int __raw_read_can_lock(arch_rwlock_t *rw) +static __inline__ int arch_read_can_lock(arch_rwlock_t *rw) { return rw->counter >= 0; } @@ -182,13 +182,13 @@ static __inline__ int __raw_read_can_lock(arch_rwlock_t *rw) * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -static __inline__ int __raw_write_can_lock(arch_rwlock_t *rw) +static __inline__ int arch_write_can_lock(arch_rwlock_t *rw) { return !rw->counter; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() |