diff options
Diffstat (limited to 'arch/arc/include/asm/atomic-spinlock.h')
-rw-r--r-- | arch/arc/include/asm/atomic-spinlock.h | 111 |
1 files changed, 111 insertions, 0 deletions
diff --git a/arch/arc/include/asm/atomic-spinlock.h b/arch/arc/include/asm/atomic-spinlock.h new file mode 100644 index 000000000000..bdf87610b2d7 --- /dev/null +++ b/arch/arc/include/asm/atomic-spinlock.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _ASM_ARC_ATOMIC_SPLOCK_H +#define _ASM_ARC_ATOMIC_SPLOCK_H + +#ifndef CONFIG_SMP + + /* violating atomic_xxx API locking protocol in UP for optimization sake */ +#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) + +#else + +static inline void arch_atomic_set(atomic_t *v, int i) +{ + /* + * Independent of hardware support, all of the atomic_xxx() APIs need + * to follow the same locking rules to make sure that a "hardware" + * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn + * sequence + * + * Thus atomic_set() despite being 1 insn (and seemingly atomic) + * requires the locking. + */ + unsigned long flags; + + atomic_ops_lock(flags); + WRITE_ONCE(v->counter, i); + atomic_ops_unlock(flags); +} + +#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i)) + +#endif + +/* + * Non hardware assisted Atomic-R-M-W + * Locking would change to irq-disabling only (UP) and spinlocks (SMP) + */ + +#define ATOMIC_OP(op, c_op, asm_op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + \ + atomic_ops_lock(flags); \ + v->counter c_op i; \ + atomic_ops_unlock(flags); \ +} + +#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ +static inline int arch_atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + unsigned long temp; \ + \ + /* \ + * spin lock/unlock provides the needed smp_mb() before/after \ + */ \ + atomic_ops_lock(flags); \ + temp = v->counter; \ + temp c_op i; \ + v->counter = temp; \ + atomic_ops_unlock(flags); \ + \ + return temp; \ +} + +#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + unsigned long orig; \ + \ + /* \ + * spin lock/unlock provides the needed smp_mb() before/after \ + */ \ + atomic_ops_lock(flags); \ + orig = v->counter; \ + v->counter c_op i; \ + atomic_ops_unlock(flags); \ + \ + return orig; \ +} + +#define ATOMIC_OPS(op, c_op, asm_op) \ + ATOMIC_OP(op, c_op, asm_op) \ + ATOMIC_OP_RETURN(op, c_op, asm_op) \ + ATOMIC_FETCH_OP(op, c_op, asm_op) + +ATOMIC_OPS(add, +=, add) +ATOMIC_OPS(sub, -=, sub) + +#undef ATOMIC_OPS +#define ATOMIC_OPS(op, c_op, asm_op) \ + ATOMIC_OP(op, c_op, asm_op) \ + ATOMIC_FETCH_OP(op, c_op, asm_op) + +ATOMIC_OPS(and, &=, and) +ATOMIC_OPS(andnot, &= ~, bic) +ATOMIC_OPS(or, |=, or) +ATOMIC_OPS(xor, ^=, xor) + +#define arch_atomic_andnot arch_atomic_andnot +#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot + +#undef ATOMIC_OPS +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +#endif |