summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-01-19 01:44:24 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-19 01:44:24 +0100
commita200dcb34693084e56496960d855afdeaaf9578f (patch)
treebf65e4350460b7f98247278469f7600d1808c3fc /arch
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile (diff)
parentcheckpatch: add virt barriers (diff)
downloadlinux-a200dcb34693084e56496960d855afdeaaf9578f.tar.xz
linux-a200dcb34693084e56496960d855afdeaaf9578f.zip
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio barrier rework+fixes from Michael Tsirkin: "This adds a new kind of barrier, and reworks virtio and xen to use it. Plus some fixes here and there" * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (44 commits) checkpatch: add virt barriers checkpatch: check for __smp outside barrier.h checkpatch.pl: add missing memory barriers virtio: make find_vqs() checkpatch.pl-friendly virtio_balloon: fix race between migration and ballooning virtio_balloon: fix race by fill and leak s390: more efficient smp barriers s390: use generic memory barriers xen/events: use virt_xxx barriers xen/io: use virt_xxx barriers xenbus: use virt_xxx barriers virtio_ring: use virt_store_mb sh: move xchg_cmpxchg to a header by itself sh: support 1 and 2 byte xchg virtio_ring: update weak barriers to use virt_xxx Revert "virtio_ring: Update weak barriers to use dma_wmb/rmb" asm-generic: implement virt_xxx memory barriers x86: define __smp_xxx xtensa: define __smp_xxx tile: define __smp_xxx ...
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/barrier.h35
-rw-r--r--arch/arm64/include/asm/barrier.h19
-rw-r--r--arch/blackfin/include/asm/barrier.h4
-rw-r--r--arch/ia64/include/asm/barrier.h24
-rw-r--r--arch/ia64/kernel/iosapic.c6
-rw-r--r--arch/metag/include/asm/barrier.h55
-rw-r--r--arch/mips/include/asm/barrier.h51
-rw-r--r--arch/powerpc/include/asm/barrier.h33
-rw-r--r--arch/s390/include/asm/barrier.h23
-rw-r--r--arch/sh/include/asm/barrier.h3
-rw-r--r--arch/sh/include/asm/cmpxchg-grb.h22
-rw-r--r--arch/sh/include/asm/cmpxchg-irq.h11
-rw-r--r--arch/sh/include/asm/cmpxchg-llsc.h25
-rw-r--r--arch/sh/include/asm/cmpxchg-xchg.h51
-rw-r--r--arch/sh/include/asm/cmpxchg.h3
-rw-r--r--arch/sparc/include/asm/barrier_32.h1
-rw-r--r--arch/sparc/include/asm/barrier_64.h29
-rw-r--r--arch/sparc/include/asm/processor.h3
-rw-r--r--arch/tile/include/asm/barrier.h9
-rw-r--r--arch/x86/include/asm/barrier.h36
-rw-r--r--arch/x86/um/asm/barrier.h9
-rw-r--r--arch/xtensa/include/asm/barrier.h4
22 files changed, 194 insertions, 262 deletions
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 3ff5642d9788..112cc1a5d47f 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -60,38 +60,11 @@ extern void arm_heavy_mb(void);
#define dma_wmb() barrier()
#endif
-#ifndef CONFIG_SMP
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#else
-#define smp_mb() dmb(ish)
-#define smp_rmb() smp_mb()
-#define smp_wmb() dmb(ishst)
-#endif
-
-#define smp_store_release(p, v) \
-do { \
- compiletime_assert_atomic_type(*p); \
- smp_mb(); \
- WRITE_ONCE(*p, v); \
-} while (0)
-
-#define smp_load_acquire(p) \
-({ \
- typeof(*p) ___p1 = READ_ONCE(*p); \
- compiletime_assert_atomic_type(*p); \
- smp_mb(); \
- ___p1; \
-})
-
-#define read_barrier_depends() do { } while(0)
-#define smp_read_barrier_depends() do { } while(0)
-
-#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
+#define __smp_mb() dmb(ish)
+#define __smp_rmb() __smp_mb()
+#define __smp_wmb() dmb(ishst)
-#define smp_mb__before_atomic() smp_mb()
-#define smp_mb__after_atomic() smp_mb()
+#include <asm-generic/barrier.h>
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_BARRIER_H */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 9622eb48f894..dae5c49618db 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -35,11 +35,11 @@
#define dma_rmb() dmb(oshld)
#define dma_wmb() dmb(oshst)
-#define smp_mb() dmb(ish)
-#define smp_rmb() dmb(ishld)
-#define smp_wmb() dmb(ishst)
+#define __smp_mb() dmb(ish)
+#define __smp_rmb() dmb(ishld)
+#define __smp_wmb() dmb(ishst)
-#define smp_store_release(p, v) \
+#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
@@ -62,7 +62,7 @@ do { \
} \
} while (0)
-#define smp_load_acquire(p) \
+#define __smp_load_acquire(p) \
({ \
union { typeof(*p) __val; char __c[1]; } __u; \
compiletime_assert_atomic_type(*p); \
@@ -91,14 +91,7 @@ do { \
__u.__val; \
})
-#define read_barrier_depends() do { } while(0)
-#define smp_read_barrier_depends() do { } while(0)
-
-#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
-#define nop() asm volatile("nop");
-
-#define smp_mb__before_atomic() smp_mb()
-#define smp_mb__after_atomic() smp_mb()
+#include <asm-generic/barrier.h>
#endif /* __ASSEMBLY__ */
diff --git a/arch/blackfin/include/asm/barrier.h b/arch/blackfin/include/asm/barrier.h
index dfb66fe88b34..7cca51cae5ff 100644
--- a/arch/blackfin/include/asm/barrier.h
+++ b/arch/blackfin/include/asm/barrier.h
@@ -78,8 +78,8 @@
#endif /* !CONFIG_SMP */
-#define smp_mb__before_atomic() barrier()
-#define smp_mb__after_atomic() barrier()
+#define __smp_mb__before_atomic() barrier()
+#define __smp_mb__after_atomic() barrier()
#include <asm-generic/barrier.h>
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index 209c4b817c95..588f1614cafc 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -42,34 +42,24 @@
#define dma_rmb() mb()
#define dma_wmb() mb()
-#ifdef CONFIG_SMP
-# define smp_mb() mb()
-#else
-# define smp_mb() barrier()
-#endif
+# define __smp_mb() mb()
-#define smp_rmb() smp_mb()
-#define smp_wmb() smp_mb()
-
-#define read_barrier_depends() do { } while (0)
-#define smp_read_barrier_depends() do { } while (0)
-
-#define smp_mb__before_atomic() barrier()
-#define smp_mb__after_atomic() barrier()
+#define __smp_mb__before_atomic() barrier()
+#define __smp_mb__after_atomic() barrier()
/*
* IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
* need for asm trickery!
*/
-#define smp_store_release(p, v) \
+#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
WRITE_ONCE(*p, v); \
} while (0)
-#define smp_load_acquire(p) \
+#define __smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
@@ -77,12 +67,12 @@ do { \
___p1; \
})
-#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
-
/*
* The group barrier in front of the rsm & ssm are necessary to ensure
* that none of the previous instructions in the same group are
* affected by the rsm/ssm.
*/
+#include <asm-generic/barrier.h>
+
#endif /* _ASM_IA64_BARRIER_H */
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index d2fae054d988..90fde5b8669d 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -256,7 +256,7 @@ set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
}
static void
-nop (struct irq_data *data)
+iosapic_nop (struct irq_data *data)
{
/* do nothing... */
}
@@ -415,7 +415,7 @@ iosapic_unmask_level_irq (struct irq_data *data)
#define iosapic_shutdown_level_irq mask_irq
#define iosapic_enable_level_irq unmask_irq
#define iosapic_disable_level_irq mask_irq
-#define iosapic_ack_level_irq nop
+#define iosapic_ack_level_irq iosapic_nop
static struct irq_chip irq_type_iosapic_level = {
.name = "IO-SAPIC-level",
@@ -453,7 +453,7 @@ iosapic_ack_edge_irq (struct irq_data *data)
}
#define iosapic_enable_edge_irq unmask_irq
-#define iosapic_disable_edge_irq nop
+#define iosapic_disable_edge_irq iosapic_nop
static struct irq_chip irq_type_iosapic_edge = {
.name = "IO-SAPIC-edge",
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
index 172b7e5efc53..5418517aa5eb 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -44,16 +44,6 @@ static inline void wr_fence(void)
#define rmb() barrier()
#define wmb() mb()
-#define dma_rmb() rmb()
-#define dma_wmb() wmb()
-
-#ifndef CONFIG_SMP
-#define fence() do { } while (0)
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#else
-
#ifdef CONFIG_METAG_SMP_WRITE_REORDERING
/*
* Write to the atomic memory unlock system event register (command 0). This is
@@ -63,45 +53,32 @@ static inline void wr_fence(void)
* incoherence). It is therefore ineffective if used after and on the same
* thread as a write.
*/
-static inline void fence(void)
+static inline void metag_fence(void)
{
volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK;
barrier();
*flushptr = 0;
barrier();
}
-#define smp_mb() fence()
-#define smp_rmb() fence()
-#define smp_wmb() barrier()
+#define __smp_mb() metag_fence()
+#define __smp_rmb() metag_fence()
+#define __smp_wmb() barrier()
#else
-#define fence() do { } while (0)
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#endif
+#define metag_fence() do { } while (0)
+#define __smp_mb() barrier()
+#define __smp_rmb() barrier()
+#define __smp_wmb() barrier()
#endif
-#define read_barrier_depends() do { } while (0)
-#define smp_read_barrier_depends() do { } while (0)
-
-#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
-
-#define smp_store_release(p, v) \
-do { \
- compiletime_assert_atomic_type(*p); \
- smp_mb(); \
- WRITE_ONCE(*p, v); \
-} while (0)
+#ifdef CONFIG_SMP
+#define fence() metag_fence()
+#else
+#define fence() do { } while (0)
+#endif
-#define smp_load_acquire(p) \
-({ \
- typeof(*p) ___p1 = READ_ONCE(*p); \
- compiletime_assert_atomic_type(*p); \
- smp_mb(); \
- ___p1; \
-})
+#define __smp_mb__before_atomic() barrier()
+#define __smp_mb__after_atomic() barrier()
-#define smp_mb__before_atomic() barrier()
-#define smp_mb__after_atomic() barrier()
+#include <asm-generic/barrier.h>
#endif /* _ASM_METAG_BARRIER_H */
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index 752e0b86c171..d296633d890e 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -10,9 +10,6 @@
#include <asm/addrspace.h>
-#define read_barrier_depends() do { } while(0)
-#define smp_read_barrier_depends() do { } while(0)
-
#ifdef CONFIG_CPU_HAS_SYNC
#define __sync() \
__asm__ __volatile__( \
@@ -87,23 +84,21 @@
#define wmb() fast_wmb()
#define rmb() fast_rmb()
-#define dma_wmb() fast_wmb()
-#define dma_rmb() fast_rmb()
-#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
+#if defined(CONFIG_WEAK_ORDERING)
# ifdef CONFIG_CPU_CAVIUM_OCTEON
-# define smp_mb() __sync()
-# define smp_rmb() barrier()
-# define smp_wmb() __syncw()
+# define __smp_mb() __sync()
+# define __smp_rmb() barrier()
+# define __smp_wmb() __syncw()
# else
-# define smp_mb() __asm__ __volatile__("sync" : : :"memory")
-# define smp_rmb() __asm__ __volatile__("sync" : : :"memory")
-# define smp_wmb() __asm__ __volatile__("sync" : : :"memory")
+# define __smp_mb() __asm__ __volatile__("sync" : : :"memory")
+# define __smp_rmb() __asm__ __volatile__("sync" : : :"memory")
+# define __smp_wmb() __asm__ __volatile__("sync" : : :"memory")
# endif
#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
+#define __smp_mb() barrier()
+#define __smp_rmb() barrier()
+#define __smp_wmb() barrier()
#endif
#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
@@ -112,13 +107,11 @@
#define __WEAK_LLSC_MB " \n"
#endif
-#define smp_store_mb(var, value) \
- do { WRITE_ONCE(var, value); smp_mb(); } while (0)
-
#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#define smp_mb__before_llsc() smp_wmb()
+#define __smp_mb__before_llsc() __smp_wmb()
/* Cause previous writes to become visible on all CPUs as soon as possible */
#define nudge_writes() __asm__ __volatile__(".set push\n\t" \
".set arch=octeon\n\t" \
@@ -126,25 +119,13 @@
".set pop" : : : "memory")
#else
#define smp_mb__before_llsc() smp_llsc_mb()
+#define __smp_mb__before_llsc() smp_llsc_mb()
#define nudge_writes() mb()
#endif
-#define smp_store_release(p, v) \
-do { \
- compiletime_assert_atomic_type(*p); \
- smp_mb(); \
- WRITE_ONCE(*p, v); \
-} while (0)
-
-#define smp_load_acquire(p) \
-({ \
- typeof(*p) ___p1 = READ_ONCE(*p); \
- compiletime_assert_atomic_type(*p); \
- smp_mb(); \
- ___p1; \
-})
-
-#define smp_mb__before_atomic() smp_mb__before_llsc()
-#define smp_mb__after_atomic() smp_llsc_mb()
+#define __smp_mb__before_atomic() __smp_mb__before_llsc()
+#define __smp_mb__after_atomic() smp_llsc_mb()
+
+#include <asm-generic/barrier.h>
#endif /* __ASM_BARRIER_H */
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index a7af5fb7b914..c0deafc212b8 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -34,8 +34,6 @@
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
-#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
-
#ifdef __SUBARCH_HAS_LWSYNC
# define SMPWMB LWSYNC
#else
@@ -46,22 +44,11 @@
#define dma_rmb() __lwsync()
#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
-#ifdef CONFIG_SMP
-#define smp_lwsync() __lwsync()
-
-#define smp_mb() mb()
-#define smp_rmb() __lwsync()
-#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
-#else
-#define smp_lwsync() barrier()
+#define __smp_lwsync() __lwsync()
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#endif /* CONFIG_SMP */
-
-#define read_barrier_depends() do { } while (0)
-#define smp_read_barrier_depends() do { } while (0)
+#define __smp_mb() mb()
+#define __smp_rmb() __lwsync()
+#define __smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
/*
* This is a barrier which prevents following instructions from being
@@ -72,23 +59,23 @@
#define data_barrier(x) \
asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
-#define smp_store_release(p, v) \
+#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
- smp_lwsync(); \
+ __smp_lwsync(); \
WRITE_ONCE(*p, v); \
} while (0)
-#define smp_load_acquire(p) \
+#define __smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
- smp_lwsync(); \
+ __smp_lwsync(); \
___p1; \
})
-#define smp_mb__before_atomic() smp_mb()
-#define smp_mb__after_atomic() smp_mb()
#define smp_mb__before_spinlock() smp_mb()
+#include <asm-generic/barrier.h>
+
#endif /* _ASM_POWERPC_BARRIER_H */
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 7ffd0b19135c..5c8db3ce61c8 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -26,26 +26,18 @@
#define wmb() barrier()
#define dma_rmb() mb()
#define dma_wmb() mb()
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
+#define __smp_mb() mb()
+#define __smp_rmb() rmb()
+#define __smp_wmb() wmb()
-#define read_barrier_depends() do { } while (0)
-#define smp_read_barrier_depends() do { } while (0)
-
-#define smp_mb__before_atomic() smp_mb()
-#define smp_mb__after_atomic() smp_mb()
-
-#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
-
-#define smp_store_release(p, v) \
+#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
WRITE_ONCE(*p, v); \
} while (0)
-#define smp_load_acquire(p) \
+#define __smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
@@ -53,4 +45,9 @@ do { \
___p1; \
})
+#define __smp_mb__before_atomic() barrier()
+#define __smp_mb__after_atomic() barrier()
+
+#include <asm-generic/barrier.h>
+
#endif /* __ASM_BARRIER_H */
diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h
index bf91037db4e0..f887c6465a82 100644
--- a/arch/sh/include/asm/barrier.h
+++ b/arch/sh/include/asm/barrier.h
@@ -32,7 +32,8 @@
#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
#endif
-#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
+#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
+#define smp_store_mb(var, value) __smp_store_mb(var, value)
#include <asm-generic/barrier.h>
diff --git a/arch/sh/include/asm/cmpxchg-grb.h b/arch/sh/include/asm/cmpxchg-grb.h
index f848dec9e483..2ed557b31bd9 100644
--- a/arch/sh/include/asm/cmpxchg-grb.h
+++ b/arch/sh/include/asm/cmpxchg-grb.h
@@ -23,6 +23,28 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
return retval;
}
+static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
+{
+ unsigned long retval;
+
+ __asm__ __volatile__ (
+ " .align 2 \n\t"
+ " mova 1f, r0 \n\t" /* r0 = end point */
+ " mov r15, r1 \n\t" /* r1 = saved sp */
+ " mov #-6, r15 \n\t" /* LOGIN */
+ " mov.w @%1, %0 \n\t" /* load old value */
+ " extu.w %0, %0 \n\t" /* extend as unsigned */
+ " mov.w %2, @%1 \n\t" /* store new value */
+ "1: mov r1, r15 \n\t" /* LOGOUT */
+ : "=&r" (retval),
+ "+r" (m),
+ "+r" (val) /* inhibit r15 overloading */
+ :
+ : "memory" , "r0", "r1");
+
+ return retval;
+}
+
static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
{
unsigned long retval;
diff --git a/arch/sh/include/asm/cmpxchg-irq.h b/arch/sh/include/asm/cmpxchg-irq.h
index bd11f630414a..f88877257171 100644
--- a/arch/sh/include/asm/cmpxchg-irq.h
+++ b/arch/sh/include/asm/cmpxchg-irq.h
@@ -14,6 +14,17 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
return retval;
}
+static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
+{
+ unsigned long flags, retval;
+
+ local_irq_save(flags);
+ retval = *m;
+ *m = val;
+ local_irq_restore(flags);
+ return retval;
+}
+
static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
{
unsigned long flags, retval;
diff --git a/arch/sh/include/asm/cmpxchg-llsc.h b/arch/sh/include/asm/cmpxchg-llsc.h
index 47136661a203..fcfd32271bff 100644
--- a/arch/sh/include/asm/cmpxchg-llsc.h
+++ b/arch/sh/include/asm/cmpxchg-llsc.h
@@ -22,29 +22,8 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
return retval;
}
-static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
-{
- unsigned long retval;
- unsigned long tmp;
-
- __asm__ __volatile__ (
- "1: \n\t"
- "movli.l @%2, %0 ! xchg_u8 \n\t"
- "mov %0, %1 \n\t"
- "mov %3, %0 \n\t"
- "movco.l %0, @%2 \n\t"
- "bf 1b \n\t"
- "synco \n\t"
- : "=&z"(tmp), "=&r" (retval)
- : "r" (m), "r" (val & 0xff)
- : "t", "memory"
- );
-
- return retval;
-}
-
static inline unsigned long
-__cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
+__cmpxchg_u32(volatile u32 *m, unsigned long old, unsigned long new)
{
unsigned long retval;
unsigned long tmp;
@@ -68,4 +47,6 @@ __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
return retval;
}
+#include <asm/cmpxchg-xchg.h>
+
#endif /* __ASM_SH_CMPXCHG_LLSC_H */
diff --git a/arch/sh/include/asm/cmpxchg-xchg.h b/arch/sh/include/asm/cmpxchg-xchg.h
new file mode 100644
index 000000000000..7219719c23a3
--- /dev/null
+++ b/arch/sh/include/asm/cmpxchg-xchg.h
@@ -0,0 +1,51 @@
+#ifndef __ASM_SH_CMPXCHG_XCHG_H
+#define __ASM_SH_CMPXCHG_XCHG_H
+
+/*
+ * Copyright (C) 2016 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See the
+ * file "COPYING" in the main directory of this archive for more details.
+ */
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
+
+/*
+ * Portable implementations of 1 and 2 byte xchg using a 4 byte cmpxchg.
+ * Note: this header isn't self-contained: before including it, __cmpxchg_u32
+ * must be defined first.
+ */
+static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size)
+{
+ int off = (unsigned long)ptr % sizeof(u32);
+ volatile u32 *p = ptr - off;
+#ifdef __BIG_ENDIAN
+ int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE;
+#else
+ int bitoff = off * BITS_PER_BYTE;
+#endif
+ u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
+ u32 oldv, newv;
+ u32 ret;
+
+ do {
+ oldv = READ_ONCE(*p);
+ ret = (oldv & bitmask) >> bitoff;
+ newv = (oldv & ~bitmask) | (x << bitoff);
+ } while (__cmpxchg_u32(p, oldv, newv) != oldv);
+
+ return ret;
+}
+
+static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
+{
+ return __xchg_cmpxchg(m, val, sizeof *m);
+}
+
+static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
+{
+ return __xchg_cmpxchg(m, val, sizeof *m);
+}
+
+#endif /* __ASM_SH_CMPXCHG_XCHG_H */
diff --git a/arch/sh/include/asm/cmpxchg.h b/arch/sh/include/asm/cmpxchg.h
index 85c97b188d71..5225916c1057 100644
--- a/arch/sh/include/asm/cmpxchg.h
+++ b/arch/sh/include/asm/cmpxchg.h
@@ -27,6 +27,9 @@ extern void __xchg_called_with_bad_pointer(void);
case 4: \
__xchg__res = xchg_u32(__xchg_ptr, x); \
break; \
+ case 2: \
+ __xchg__res = xchg_u16(__xchg_ptr, x); \
+ break; \
case 1: \
__xchg__res = xchg_u8(__xchg_ptr, x); \
break; \
diff --git a/arch/sparc/include/asm/barrier_32.h b/arch/sparc/include/asm/barrier_32.h
index ae69eda288f4..8059130a6cee 100644
--- a/arch/sparc/include/asm/barrier_32.h
+++ b/arch/sparc/include/asm/barrier_32.h
@@ -1,7 +1,6 @@
#ifndef __SPARC_BARRIER_H
#define __SPARC_BARRIER_H
-#include <asm/processor.h> /* for nop() */
#include <asm-generic/barrier.h>
#endif /* !(__SPARC_BARRIER_H) */
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
index 14a928601657..c9f6ee64f41d 100644
--- a/arch/sparc/include/asm/barrier_64.h
+++ b/arch/sparc/include/asm/barrier_64.h
@@ -37,33 +37,14 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
#define rmb() __asm__ __volatile__("":::"memory")
#define wmb() __asm__ __volatile__("":::"memory")
-#define dma_rmb() rmb()
-#define dma_wmb() wmb()
-
-#define smp_store_mb(__var, __value) \
- do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0)
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#else
-#define smp_mb() __asm__ __volatile__("":::"memory")
-#define smp_rmb() __asm__ __volatile__("":::"memory")
-#define smp_wmb() __asm__ __volatile__("":::"memory")
-#endif
-
-#define read_barrier_depends() do { } while (0)
-#define smp_read_barrier_depends() do { } while (0)
-
-#define smp_store_release(p, v) \
+#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
WRITE_ONCE(*p, v); \
} while (0)
-#define smp_load_acquire(p) \
+#define __smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
@@ -71,7 +52,9 @@ do { \
___p1; \
})
-#define smp_mb__before_atomic() barrier()
-#define smp_mb__after_atomic() barrier()
+#define __smp_mb__before_atomic() barrier()
+#define __smp_mb__after_atomic() barrier()
+
+#include <asm-generic/barrier.h>
#endif /* !(__SPARC64_BARRIER_H) */
diff --git a/arch/sparc/include/asm/processor.h b/arch/sparc/include/asm/processor.h
index 2fe99e66e760..9da9646bf6c6 100644
--- a/arch/sparc/include/asm/processor.h
+++ b/arch/sparc/include/asm/processor.h
@@ -5,7 +5,4 @@
#else
#include <asm/processor_32.h>
#endif
-
-#define nop() __asm__ __volatile__ ("nop")
-
#endif
diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h
index 96a42ae79f4d..d55222806c2f 100644
--- a/arch/tile/include/asm/barrier.h
+++ b/arch/tile/include/asm/barrier.h
@@ -79,11 +79,12 @@ mb_incoherent(void)
* But after the word is updated, the routine issues an "mf" before returning,
* and since it's a function call, we don't even need a compiler barrier.
*/
-#define smp_mb__before_atomic() smp_mb()
-#define smp_mb__after_atomic() do { } while (0)
+#define __smp_mb__before_atomic() __smp_mb()
+#define __smp_mb__after_atomic() do { } while (0)
+#define smp_mb__after_atomic() __smp_mb__after_atomic()
#else /* 64 bit */
-#define smp_mb__before_atomic() smp_mb()
-#define smp_mb__after_atomic() smp_mb()
+#define __smp_mb__before_atomic() __smp_mb()
+#define __smp_mb__after_atomic() __smp_mb()
#endif
#include <asm-generic/barrier.h>
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 0681d2532527..a584e1c50918 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -31,20 +31,10 @@
#endif
#define dma_wmb() barrier()
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() dma_rmb()
-#define smp_wmb() barrier()
-#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
-#else /* !SMP */
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
-#endif /* SMP */
-
-#define read_barrier_depends() do { } while (0)
-#define smp_read_barrier_depends() do { } while (0)
+#define __smp_mb() mb()
+#define __smp_rmb() dma_rmb()
+#define __smp_wmb() barrier()
+#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
#if defined(CONFIG_X86_PPRO_FENCE)
@@ -53,31 +43,31 @@
* model and we should fall back to full barriers.
*/
-#define smp_store_release(p, v) \
+#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
- smp_mb(); \
+ __smp_mb(); \
WRITE_ONCE(*p, v); \
} while (0)
-#define smp_load_acquire(p) \
+#define __smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
- smp_mb(); \
+ __smp_mb(); \
___p1; \
})
#else /* regular x86 TSO memory ordering */
-#define smp_store_release(p, v) \
+#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
WRITE_ONCE(*p, v); \
} while (0)
-#define smp_load_acquire(p) \
+#define __smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
@@ -88,7 +78,9 @@ do { \
#endif
/* Atomic operations are already serializing on x86 */
-#define smp_mb__before_atomic() barrier()
-#define smp_mb__after_atomic() barrier()
+#define __smp_mb__before_atomic() barrier()
+#define __smp_mb__after_atomic() barrier()
+
+#include <asm-generic/barrier.h>
#endif /* _ASM_X86_BARRIER_H */
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index 755481f14d90..174781a404ff 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -36,13 +36,6 @@
#endif /* CONFIG_X86_PPRO_FENCE */
#define dma_wmb() barrier()
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-
-#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
-
-#define read_barrier_depends() do { } while (0)
-#define smp_read_barrier_depends() do { } while (0)
+#include <asm-generic/barrier.h>
#endif
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
index 5b88774c75ab..956596e4d437 100644
--- a/arch/xtensa/include/asm/barrier.h
+++ b/arch/xtensa/include/asm/barrier.h
@@ -13,8 +13,8 @@
#define rmb() barrier()
#define wmb() mb()
-#define smp_mb__before_atomic() barrier()
-#define smp_mb__after_atomic() barrier()
+#define __smp_mb__before_atomic() barrier()
+#define __smp_mb__after_atomic() barrier()
#include <asm-generic/barrier.h>