summaryrefslogtreecommitdiffstats
path: root/arch/arm64/net/bpf_jit.h
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2019-04-26 21:48:22 +0200
committerAlexei Starovoitov <ast@kernel.org>2019-04-27 03:53:40 +0200
commit34b8ab091f9ef57a2bb3c8c8359a0a03a8abf2f9 (patch)
tree00146898135b5e972cea6c930509f1ecdf8fc1ad /arch/arm64/net/bpf_jit.h
parentbpf, arm64: remove prefetch insn in xadd mapping (diff)
downloadlinux-34b8ab091f9ef57a2bb3c8c8359a0a03a8abf2f9.tar.xz
linux-34b8ab091f9ef57a2bb3c8c8359a0a03a8abf2f9.zip
bpf, arm64: use more scalable stadd over ldxr / stxr loop in xadd
Since ARMv8.1 supplement introduced LSE atomic instructions back in 2016, lets add support for STADD and use that in favor of LDXR / STXR loop for the XADD mapping if available. STADD is encoded as an alias for LDADD with XZR as the destination register, therefore add LDADD to the instruction encoder along with STADD as special case and use it in the JIT for CPUs that advertise LSE atomics in CPUID register. If immediate offset in the BPF XADD insn is 0, then use dst register directly instead of temporary one. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com> Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'arch/arm64/net/bpf_jit.h')
-rw-r--r--arch/arm64/net/bpf_jit.h4
1 files changed, 4 insertions, 0 deletions
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index 6c881659ee8a..76606e87233f 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -100,6 +100,10 @@
#define A64_STXR(sf, Rt, Rn, Rs) \
A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
+/* LSE atomics */
+#define A64_STADD(sf, Rn, Rs) \
+ aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf))
+
/* Add/subtract (immediate) */
#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \