summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJiong Wang <jiong.wang@netronome.com>2019-08-24 04:00:28 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2019-08-26 23:03:05 +0200
commit86c28b2d69f93a218a9a5cef146ed0097a98687f (patch)
treee07815a72b0c7b5ae1e287e9a5be88b9323056ac
parentr8152: Set memory to all 0xFFs on failed reg reads (diff)
downloadlinux-86c28b2d69f93a218a9a5cef146ed0097a98687f.tar.xz
linux-86c28b2d69f93a218a9a5cef146ed0097a98687f.zip
nfp: bpf: fix latency bug when updating stack index register
NFP is using Local Memory to model stack. LM_addr could be used as base of a 16 32-bit word region of Local Memory. Then, if the stack offset is beyond the current region, the local index needs to be updated. The update needs at least three cycles to take effect, therefore the sequence normally looks like: local_csr_wr[ActLMAddr3, gprB_5] nop nop nop If the local index switch happens on a narrow loads, then the instruction preparing value to zero high 32-bit of the destination register could be counted as one cycle, the sequence then could be something like: local_csr_wr[ActLMAddr3, gprB_5] nop nop immed[gprB_5, 0] However, we have zero extension optimization that zeroing high 32-bit could be eliminated, therefore above IMMED insn won't be available for which case the first sequence needs to be generated. Fixes: 0b4de1ff19bf ("nfp: bpf: eliminate zero extension code-gen") Signed-off-by: Jiong Wang <jiong.wang@netronome.com> Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com> Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 4054b70d7719..5afcb3c4c2ef 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1163,7 +1163,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
bool clr_gpr, lmem_step step)
{
s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off;
- bool first = true, last;
+ bool first = true, narrow_ld, last;
bool needs_inc = false;
swreg stack_off_reg;
u8 prev_gpr = 255;
@@ -1209,13 +1209,22 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
needs_inc = true;
}
+
+ narrow_ld = clr_gpr && size < 8;
+
if (lm3) {
+ unsigned int nop_cnt;
+
emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3);
- /* For size < 4 one slot will be filled by zeroing of upper. */
- wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3);
+ /* For size < 4 one slot will be filled by zeroing of upper,
+ * but be careful, that zeroing could be eliminated by zext
+ * optimization.
+ */
+ nop_cnt = narrow_ld && meta->flags & FLAG_INSN_DO_ZEXT ? 2 : 3;
+ wrp_nops(nfp_prog, nop_cnt);
}
- if (clr_gpr && size < 8)
+ if (narrow_ld)
wrp_zext(nfp_prog, meta, gpr);
while (size) {