summaryrefslogtreecommitdiffstats
path: root/arch/riscv/net
diff options
context:
space:
mode:
authorXiao Wang <xiao.w.wang@intel.com>2024-05-23 05:18:35 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2024-05-24 17:16:56 +0200
commite944fc8152744a41dc62e720995538e48b053bb9 (patch)
treef3e3ab4a55d73ebb1665384c0713b70ef29c07b8 /arch/riscv/net
parentriscv, bpf: Optimize zextw insn with Zba extension (diff)
downloadlinux-e944fc8152744a41dc62e720995538e48b053bb9.tar.xz
linux-e944fc8152744a41dc62e720995538e48b053bb9.zip
riscv, bpf: Use STACK_ALIGN macro for size rounding up
Use the macro STACK_ALIGN that is defined in asm/processor.h for stack size rounding up, just like bpf_jit_comp32.c does. Signed-off-by: Xiao Wang <xiao.w.wang@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Reviewed-by: Pu Lehui <pulehui@huawei.com> Link: https://lore.kernel.org/bpf/20240523031835.3977713-1-xiao.w.wang@intel.com
Diffstat (limited to 'arch/riscv/net')
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 79a001d5533e..c21a0ff23415 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -868,7 +868,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
stack_size += 8;
sreg_off = stack_size;
- stack_size = round_up(stack_size, 16);
+ stack_size = round_up(stack_size, STACK_ALIGN);
if (!is_struct_ops) {
/* For the trampoline called from function entry,
@@ -1960,7 +1960,7 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog)
{
int i, stack_adjust = 0, store_offset, bpf_stack_adjust;
- bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
+ bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, STACK_ALIGN);
if (bpf_stack_adjust)
mark_fp(ctx);
@@ -1982,7 +1982,7 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog)
if (ctx->arena_vm_start)
stack_adjust += 8;
- stack_adjust = round_up(stack_adjust, 16);
+ stack_adjust = round_up(stack_adjust, STACK_ALIGN);
stack_adjust += bpf_stack_adjust;
store_offset = stack_adjust - 8;