summaryrefslogtreecommitdiffstats
path: root/samples
diff options
context:
space:
mode:
authorBjörn Töpel <bjorn.topel@intel.com>2021-01-18 10:17:53 +0100
committerAlexei Starovoitov <ast@kernel.org>2021-01-20 23:10:35 +0100
commitda9d35e2f2e60bb1256691bb9014a69084ea62d0 (patch)
tree1a2ad74291acc369ecd2ed4cf34447a191b9bc27 /samples
parentnet, xdp: Introduce xdp_build_skb_from_frame utility routine (diff)
downloadlinux-da9d35e2f2e60bb1256691bb9014a69084ea62d0.tar.xz
linux-da9d35e2f2e60bb1256691bb9014a69084ea62d0.zip
samples/bpf: Add BPF_ATOMIC_OP macro for BPF samples
Brendan Jackman added extend atomic operations to the BPF instruction set in commit 7064a7341a0d ("Merge branch 'Atomics for eBPF'"), which introduces the BPF_ATOMIC_OP macro. However, that macro was missing for the BPF samples. Fix that by adding it into bpf_insn.h. Fixes: 91c960b00566 ("bpf: Rename BPF_XADD and prepare to encode other atomics in .imm") Signed-off-by: Björn Töpel <bjorn.topel@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Reviewed-by: Brendan Jackman <jackmanb@google.com> Link: https://lore.kernel.org/bpf/20210118091753.107572-1-bjorn.topel@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'samples')
-rw-r--r--samples/bpf/bpf_insn.h24
1 files changed, 20 insertions, 4 deletions
diff --git a/samples/bpf/bpf_insn.h b/samples/bpf/bpf_insn.h
index db67a2847395..aee04534483a 100644
--- a/samples/bpf/bpf_insn.h
+++ b/samples/bpf/bpf_insn.h
@@ -134,15 +134,31 @@ struct bpf_insn;
.off = OFF, \
.imm = 0 })
-/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
-
-#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
+/*
+ * Atomic operations:
+ *
+ * BPF_ADD *(uint *) (dst_reg + off16) += src_reg
+ * BPF_AND *(uint *) (dst_reg + off16) &= src_reg
+ * BPF_OR *(uint *) (dst_reg + off16) |= src_reg
+ * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg
+ * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
+ * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
+ * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
+ * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
+ * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
+ * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
+ */
+
+#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
- .imm = BPF_ADD })
+ .imm = OP })
+
+/* Legacy alias */
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */