summaryrefslogtreecommitdiffstats
path: root/arch/alpha
diff options
context:
space:
mode:
Diffstat (limited to 'arch/alpha')
-rw-r--r--arch/alpha/include/asm/atomic.h13
-rw-r--r--arch/alpha/include/asm/rwsem.h21
-rw-r--r--arch/alpha/include/asm/spinlock.h14
3 files changed, 31 insertions, 17 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 85867d3cea64..767bfdd42992 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -14,6 +14,15 @@
* than regular operations.
*/
+/*
+ * To ensure dependency ordering is preserved for the _relaxed and
+ * _release atomics, an smp_read_barrier_depends() is unconditionally
+ * inserted into the _relaxed variants, which are used to build the
+ * barriered versions. To avoid redundant back-to-back fences, we can
+ * define the _acquire and _fence versions explicitly.
+ */
+#define __atomic_op_acquire(op, args...) op##_relaxed(args)
+#define __atomic_op_fence __atomic_op_release
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
@@ -61,6 +70,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
@@ -78,6 +88,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
@@ -112,6 +123,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
@@ -129,6 +141,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
index 3925f06afd6b..cf8fc8f9a2ed 100644
--- a/arch/alpha/include/asm/rwsem.h
+++ b/arch/alpha/include/asm/rwsem.h
@@ -22,7 +22,7 @@
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-static inline void __down_read(struct rw_semaphore *sem)
+static inline int ___down_read(struct rw_semaphore *sem)
{
long oldcount;
#ifndef CONFIG_SMP
@@ -42,10 +42,24 @@ static inline void __down_read(struct rw_semaphore *sem)
:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
:"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
#endif
- if (unlikely(oldcount < 0))
+ return (oldcount < 0);
+}
+
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ if (unlikely(___down_read(sem)))
rwsem_down_read_failed(sem);
}
+static inline int __down_read_killable(struct rw_semaphore *sem)
+{
+ if (unlikely(___down_read(sem)))
+ if (IS_ERR(rwsem_down_read_failed_killable(sem)))
+ return -EINTR;
+
+ return 0;
+}
+
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
@@ -95,9 +109,10 @@ static inline void __down_write(struct rw_semaphore *sem)
static inline int __down_write_killable(struct rw_semaphore *sem)
{
- if (unlikely(___down_write(sem)))
+ if (unlikely(___down_write(sem))) {
if (IS_ERR(rwsem_down_write_failed_killable(sem)))
return -EINTR;
+ }
return 0;
}
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index aa4304afbea6..1221cbb86a6f 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -14,7 +14,6 @@
* We make no fairness assumptions. They have a cost.
*/
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x) ((x)->lock != 0)
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
@@ -55,16 +54,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
/***********************************************************/
-static inline int arch_read_can_lock(arch_rwlock_t *lock)
-{
- return (lock->lock & 1) == 0;
-}
-
-static inline int arch_write_can_lock(arch_rwlock_t *lock)
-{
- return lock->lock == 0;
-}
-
static inline void arch_read_lock(arch_rwlock_t *lock)
{
long regx;
@@ -171,7 +160,4 @@ static inline void arch_write_unlock(arch_rwlock_t * lock)
lock->lock = 0;
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#endif /* _ALPHA_SPINLOCK_H */