summaryrefslogtreecommitdiffstats
path: root/arch/x86/net
diff options
context:
space:
mode:
authorJie Meng <jmeng@fb.com>2021-10-06 21:41:35 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2021-10-07 21:45:06 +0200
commit6364d7d75a0e015a405d1f8a07f267f076c36ca6 (patch)
tree05a71e3613a83246d0a2f4b549b7cd10d78f3f94 /arch/x86/net
parentMerge branch 'libbpf: Deprecate bpf_{map,program}__{prev,next} APIs since v0.7' (diff)
downloadlinux-6364d7d75a0e015a405d1f8a07f267f076c36ca6.tar.xz
linux-6364d7d75a0e015a405d1f8a07f267f076c36ca6.zip
bpf, x64: Factor out emission of REX byte in more cases
Introduce a single reg version of maybe_emit_mod() and factor out common code in more cases. Signed-off-by: Jie Meng <jmeng@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Song Liu <songliubraving@fb.com> Link: https://lore.kernel.org/bpf/20211006194135.608932-1-jmeng@fb.com
Diffstat (limited to 'arch/x86/net')
-rw-r--r--arch/x86/net/bpf_jit_comp.c67
1 files changed, 31 insertions, 36 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 5a0edea3cc2e..e474718d152b 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -721,6 +721,20 @@ static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
*pprog = prog;
}
+/*
+ * Similar version of maybe_emit_mod() for a single register
+ */
+static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
+{
+ u8 *prog = *pprog;
+
+ if (is64)
+ EMIT1(add_1mod(0x48, reg));
+ else if (is_ereg(reg))
+ EMIT1(add_1mod(0x40, reg));
+ *pprog = prog;
+}
+
/* LDX: dst_reg = *(u8*)(src_reg + off) */
static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
@@ -951,10 +965,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
/* neg dst */
case BPF_ALU | BPF_NEG:
case BPF_ALU64 | BPF_NEG:
- if (BPF_CLASS(insn->code) == BPF_ALU64)
- EMIT1(add_1mod(0x48, dst_reg));
- else if (is_ereg(dst_reg))
- EMIT1(add_1mod(0x40, dst_reg));
+ maybe_emit_1mod(&prog, dst_reg,
+ BPF_CLASS(insn->code) == BPF_ALU64);
EMIT2(0xF7, add_1reg(0xD8, dst_reg));
break;
@@ -968,10 +980,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_ALU64 | BPF_AND | BPF_K:
case BPF_ALU64 | BPF_OR | BPF_K:
case BPF_ALU64 | BPF_XOR | BPF_K:
- if (BPF_CLASS(insn->code) == BPF_ALU64)
- EMIT1(add_1mod(0x48, dst_reg));
- else if (is_ereg(dst_reg))
- EMIT1(add_1mod(0x40, dst_reg));
+ maybe_emit_1mod(&prog, dst_reg,
+ BPF_CLASS(insn->code) == BPF_ALU64);
/*
* b3 holds 'normal' opcode, b2 short form only valid
@@ -1059,11 +1069,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
*/
EMIT2(0x31, 0xd2);
- if (is64)
- EMIT1(add_1mod(0x48, src_reg));
- else if (is_ereg(src_reg))
- EMIT1(add_1mod(0x40, src_reg));
/* div src_reg */
+ maybe_emit_1mod(&prog, src_reg, is64);
EMIT2(0xF7, add_1reg(0xF0, src_reg));
if (BPF_OP(insn->code) == BPF_MOD &&
@@ -1084,10 +1091,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_ALU | BPF_MUL | BPF_K:
case BPF_ALU64 | BPF_MUL | BPF_K:
- if (BPF_CLASS(insn->code) == BPF_ALU64)
- EMIT1(add_2mod(0x48, dst_reg, dst_reg));
- else if (is_ereg(dst_reg))
- EMIT1(add_2mod(0x40, dst_reg, dst_reg));
+ maybe_emit_mod(&prog, dst_reg, dst_reg,
+ BPF_CLASS(insn->code) == BPF_ALU64);
if (is_imm8(imm32))
/* imul dst_reg, dst_reg, imm8 */
@@ -1102,10 +1107,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_ALU | BPF_MUL | BPF_X:
case BPF_ALU64 | BPF_MUL | BPF_X:
- if (BPF_CLASS(insn->code) == BPF_ALU64)
- EMIT1(add_2mod(0x48, src_reg, dst_reg));
- else if (is_ereg(dst_reg) || is_ereg(src_reg))
- EMIT1(add_2mod(0x40, src_reg, dst_reg));
+ maybe_emit_mod(&prog, src_reg, dst_reg,
+ BPF_CLASS(insn->code) == BPF_ALU64);
/* imul dst_reg, src_reg */
EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
@@ -1118,10 +1121,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_ALU64 | BPF_LSH | BPF_K:
case BPF_ALU64 | BPF_RSH | BPF_K:
case BPF_ALU64 | BPF_ARSH | BPF_K:
- if (BPF_CLASS(insn->code) == BPF_ALU64)
- EMIT1(add_1mod(0x48, dst_reg));
- else if (is_ereg(dst_reg))
- EMIT1(add_1mod(0x40, dst_reg));
+ maybe_emit_1mod(&prog, dst_reg,
+ BPF_CLASS(insn->code) == BPF_ALU64);
b3 = simple_alu_opcodes[BPF_OP(insn->code)];
if (imm32 == 1)
@@ -1152,10 +1153,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
}
/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
- if (BPF_CLASS(insn->code) == BPF_ALU64)
- EMIT1(add_1mod(0x48, dst_reg));
- else if (is_ereg(dst_reg))
- EMIT1(add_1mod(0x40, dst_reg));
+ maybe_emit_1mod(&prog, dst_reg,
+ BPF_CLASS(insn->code) == BPF_ALU64);
b3 = simple_alu_opcodes[BPF_OP(insn->code)];
EMIT2(0xD3, add_1reg(b3, dst_reg));
@@ -1465,10 +1464,8 @@ st: if (is_imm8(insn->off))
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP32 | BPF_JSET | BPF_K:
/* test dst_reg, imm32 */
- if (BPF_CLASS(insn->code) == BPF_JMP)
- EMIT1(add_1mod(0x48, dst_reg));
- else if (is_ereg(dst_reg))
- EMIT1(add_1mod(0x40, dst_reg));
+ maybe_emit_1mod(&prog, dst_reg,
+ BPF_CLASS(insn->code) == BPF_JMP);
EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
goto emit_cond_jmp;
@@ -1501,10 +1498,8 @@ st: if (is_imm8(insn->off))
}
/* cmp dst_reg, imm8/32 */
- if (BPF_CLASS(insn->code) == BPF_JMP)
- EMIT1(add_1mod(0x48, dst_reg));
- else if (is_ereg(dst_reg))
- EMIT1(add_1mod(0x40, dst_reg));
+ maybe_emit_1mod(&prog, dst_reg,
+ BPF_CLASS(insn->code) == BPF_JMP);
if (is_imm8(imm32))
EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);