diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-03-13 19:00:36 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-04-18 14:20:35 +0200 |
commit | 0cd64efb61f1e68be26bd5121ccff3c779dc488b (patch) | |
tree | 4f5b7c1e0fd57f9b7f1652003264a996bb79429e /arch/ia64/include/asm/bitops.h | |
parent | arch,hexagon: Convert smp_mb__*() (diff) | |
download | linux-0cd64efb61f1e68be26bd5121ccff3c779dc488b.tar.xz linux-0cd64efb61f1e68be26bd5121ccff3c779dc488b.zip |
arch,ia64: Convert smp_mb__*()
ia64 atomic ops are full barriers; implement the new
smp_mb__{before,after}_atomic().
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/n/tip-hyp7yj68cmqz1nqbfpr541ca@git.kernel.org
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-ia64@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/ia64/include/asm/bitops.h')
-rw-r--r-- | arch/ia64/include/asm/bitops.h | 6 |
1 files changed, 2 insertions, 4 deletions
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index feb8117ed06a..71e8145243ee 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -16,6 +16,7 @@ #include <linux/compiler.h> #include <linux/types.h> #include <asm/intrinsics.h> +#include <asm/barrier.h> /** * set_bit - Atomically set a bit in memory @@ -65,9 +66,6 @@ __set_bit (int nr, volatile void *addr) *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); } -#define smp_mb__before_clear_bit() barrier(); -#define smp_mb__after_clear_bit() barrier(); - /** * clear_bit - Clears a bit in memory * @nr: Bit to clear @@ -75,7 +73,7 @@ __set_bit (int nr, volatile void *addr) * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */ static __inline__ void |