summaryrefslogtreecommitdiffstats
path: root/arch/arc/include/asm/bitops.h
diff options
context:
space:
mode:
authorNoam Camus <noamc@ezchip.com>2015-05-16 16:49:35 +0200
committerVineet Gupta <vgupta@synopsys.com>2016-05-09 06:02:33 +0200
commita5a10d99a946602cf4ae50eadc65c2480dbd2e56 (patch)
tree613088aec1988af74826a53277e0dde3812bccc2 /arch/arc/include/asm/bitops.h
parentARC: [plat-eznps] Use dedicated user stack top (diff)
downloadlinux-a5a10d99a946602cf4ae50eadc65c2480dbd2e56.tar.xz
linux-a5a10d99a946602cf4ae50eadc65c2480dbd2e56.zip
ARC: [plat-eznps] Use dedicated atomic/bitops/cmpxchg
We need our own implementaions since we lack LLSC support. Our extended ISA provided with optimized solution for all 32bit operations we see in these three headers. Signed-off-by: Noam Camus <noamc@ezchip.com>
Diffstat (limited to 'arch/arc/include/asm/bitops.h')
-rw-r--r--arch/arc/include/asm/bitops.h60
1 files changed, 57 insertions, 3 deletions
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 0352fb8d21b9..8da87feec59a 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -22,7 +22,7 @@
#include <asm/smp.h>
#endif
-#if defined(CONFIG_ARC_HAS_LLSC)
+#ifdef CONFIG_ARC_HAS_LLSC
/*
* Hardware assisted Atomic-R-M-W
@@ -88,7 +88,7 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *
return (old & (1 << nr)) != 0; \
}
-#else /* !CONFIG_ARC_HAS_LLSC */
+#elif !defined(CONFIG_ARC_PLAT_EZNPS)
/*
* Non hardware assisted Atomic-R-M-W
@@ -139,7 +139,55 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *
return (old & (1UL << (nr & 0x1f))) != 0; \
}
-#endif /* CONFIG_ARC_HAS_LLSC */
+#else /* CONFIG_ARC_PLAT_EZNPS */
+
+#define BIT_OP(op, c_op, asm_op) \
+static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
+{ \
+ m += nr >> 5; \
+ \
+ nr = (1UL << (nr & 0x1f)); \
+ if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3) \
+ nr = ~nr; \
+ \
+ __asm__ __volatile__( \
+ " mov r2, %0\n" \
+ " mov r3, %1\n" \
+ " .word %2\n" \
+ : \
+ : "r"(nr), "r"(m), "i"(asm_op) \
+ : "r2", "r3", "memory"); \
+}
+
+#define TEST_N_BIT_OP(op, c_op, asm_op) \
+static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{ \
+ unsigned long old; \
+ \
+ m += nr >> 5; \
+ \
+ nr = old = (1UL << (nr & 0x1f)); \
+ if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3) \
+ old = ~old; \
+ \
+ /* Explicit full memory barrier needed before/after */ \
+ smp_mb(); \
+ \
+ __asm__ __volatile__( \
+ " mov r2, %0\n" \
+ " mov r3, %1\n" \
+ " .word %2\n" \
+ " mov %0, r2" \
+ : "+r"(old) \
+ : "r"(m), "i"(asm_op) \
+ : "r2", "r3", "memory"); \
+ \
+ smp_mb(); \
+ \
+ return (old & nr) != 0; \
+}
+
+#endif /* CONFIG_ARC_PLAT_EZNPS */
/***************************************
* Non atomic variants
@@ -181,9 +229,15 @@ static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long
/* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
__TEST_N_BIT_OP(op, c_op, asm_op)
+#ifndef CONFIG_ARC_PLAT_EZNPS
BIT_OPS(set, |, bset)
BIT_OPS(clear, & ~, bclr)
BIT_OPS(change, ^, bxor)
+#else
+BIT_OPS(set, |, CTOP_INST_AOR_DI_R2_R2_R3)
+BIT_OPS(clear, & ~, CTOP_INST_AAND_DI_R2_R2_R3)
+BIT_OPS(change, ^, CTOP_INST_AXOR_DI_R2_R2_R3)
+#endif
/*
* This routine doesn't need to be atomic.