summaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/percpu.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2019-02-27 11:09:56 +0100
committerIngo Molnar <mingo@kernel.org>2019-06-17 12:43:44 +0200
commit2234a6d3a28a971182cc91d5679e444516421de0 (patch)
treebea8d20799cc102fe565c3c1c5ccc69802c7bbf6 /arch/x86/include/asm/percpu.h
parentx86/percpu, sched/fair: Avoid local_clock() (diff)
downloadlinux-2234a6d3a28a971182cc91d5679e444516421de0.tar.xz
linux-2234a6d3a28a971182cc91d5679e444516421de0.zip
x86/percpu: Optimize raw_cpu_xchg()
Since raw_cpu_xchg() doesn't need to be IRQ-safe, like this_cpu_xchg(), we can use a simple load-store instead of the cmpxchg loop. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/include/asm/percpu.h')
-rw-r--r--arch/x86/include/asm/percpu.h20
1 files changed, 16 insertions, 4 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index f75ccccd71aa..2278797c769d 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -407,9 +407,21 @@ do { \
#define raw_cpu_or_1(pcp, val) percpu_to_op(, "or", (pcp), val)
#define raw_cpu_or_2(pcp, val) percpu_to_op(, "or", (pcp), val)
#define raw_cpu_or_4(pcp, val) percpu_to_op(, "or", (pcp), val)
-#define raw_cpu_xchg_1(pcp, val) percpu_xchg_op(, pcp, val)
-#define raw_cpu_xchg_2(pcp, val) percpu_xchg_op(, pcp, val)
-#define raw_cpu_xchg_4(pcp, val) percpu_xchg_op(, pcp, val)
+
+/*
+ * raw_cpu_xchg() can use a load-store since it is not required to be
+ * IRQ-safe.
+ */
+#define raw_percpu_xchg_op(var, nval) \
+({ \
+ typeof(var) pxo_ret__ = raw_cpu_read(var); \
+ raw_cpu_write(var, (nval)); \
+ pxo_ret__; \
+})
+
+#define raw_cpu_xchg_1(pcp, val) raw_percpu_xchg_op(pcp, val)
+#define raw_cpu_xchg_2(pcp, val) raw_percpu_xchg_op(pcp, val)
+#define raw_cpu_xchg_4(pcp, val) raw_percpu_xchg_op(pcp, val)
#define this_cpu_read_1(pcp) percpu_from_op(volatile, "mov", pcp)
#define this_cpu_read_2(pcp) percpu_from_op(volatile, "mov", pcp)
@@ -472,7 +484,7 @@ do { \
#define raw_cpu_and_8(pcp, val) percpu_to_op(, "and", (pcp), val)
#define raw_cpu_or_8(pcp, val) percpu_to_op(, "or", (pcp), val)
#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(, pcp, val)
-#define raw_cpu_xchg_8(pcp, nval) percpu_xchg_op(, pcp, nval)
+#define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval)
#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval)
#define this_cpu_read_8(pcp) percpu_from_op(volatile, "mov", pcp)