diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-19 22:05:22 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-19 22:05:22 +0100 |
commit | 2f0bf92513be58d2d65c0a4cc05c5779a7cd81e1 (patch) | |
tree | 969737816b237a6b920253e92cdac5dec99f04b5 /arch/xtensa/include/asm/bitops.h | |
parent | Merge branch 'x86/nuke386' of git://git.kernel.org/pub/scm/linux/kernel/git/t... (diff) | |
parent | xtensa: don't try to build DTB when OF is disabled (diff) | |
download | linux-2f0bf92513be58d2d65c0a4cc05c5779a7cd81e1.tar.xz linux-2f0bf92513be58d2d65c0a4cc05c5779a7cd81e1.zip |
Merge tag 'xtensa-20121218' of git://github.com/czankel/xtensa-linux
Pull Xtensa patchset from Chris Zankel:
"This contains support of device trees, many fixes, and code clean-ups"
* tag 'xtensa-20121218' of git://github.com/czankel/xtensa-linux: (33 commits)
xtensa: don't try to build DTB when OF is disabled
xtensa: set the correct ethernet address for xtfpga
xtensa: clean up files to make them code-style compliant
xtensa: provide endianness macro for sparse
xtensa: fix RASID SR initialization
xtensa: initialize CPENABLE SR when core has one
xtensa: reset all timers on initialization
Use for_each_compatible_node() macro.
xtensa: add XTFPGA DTS
xtensa: add support for the XTFPGA boards
xtensa: add device trees support
xtensa: add IRQ domains support
xtensa: add U-Boot image support (uImage).
xtensa: clean up boot make rules
xtensa: fix mb and wmb definitions
xtensa: add s32c1i-based spinlock implementations
xtensa: add s32c1i-based bitops implementations
xtensa: add s32c1i-based atomic ops implementations
xtensa: add s32c1i sanity check
xtensa: add trap_set_handler function
...
Diffstat (limited to 'arch/xtensa/include/asm/bitops.h')
-rw-r--r-- | arch/xtensa/include/asm/bitops.h | 127 |
1 files changed, 126 insertions, 1 deletions
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h index 5270197ddd36..84afe58d5d37 100644 --- a/arch/xtensa/include/asm/bitops.h +++ b/arch/xtensa/include/asm/bitops.h @@ -29,7 +29,6 @@ #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() -#include <asm-generic/bitops/atomic.h> #include <asm-generic/bitops/non-atomic.h> #if XCHAL_HAVE_NSA @@ -104,6 +103,132 @@ static inline unsigned long __fls(unsigned long word) #endif #include <asm-generic/bitops/fls64.h> + +#if XCHAL_HAVE_S32C1I + +static inline void set_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " or %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (mask), "a" (p) + : "memory"); +} + +static inline void clear_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " and %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (~mask), "a" (p) + : "memory"); +} + +static inline void change_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " xor %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (mask), "a" (p) + : "memory"); +} + +static inline int +test_and_set_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " or %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (mask), "a" (p) + : "memory"); + + return tmp & mask; +} + +static inline int +test_and_clear_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " and %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (~mask), "a" (p) + : "memory"); + + return tmp & mask; +} + +static inline int +test_and_change_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " xor %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (mask), "a" (p) + : "memory"); + + return tmp & mask; +} + +#else + +#include <asm-generic/bitops/atomic.h> + +#endif /* XCHAL_HAVE_S32C1I */ + #include <asm-generic/bitops/find.h> #include <asm-generic/bitops/le.h> |