summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorKumar Kartikeya Dwivedi <memxor@gmail.com>2023-09-13 01:31:59 +0200
committerAlexei Starovoitov <ast@kernel.org>2023-09-16 18:34:21 +0200
commitfd5d27b70188379bb441d404c29a0afb111e1753 (patch)
treed0329f47aa64626a543d404867c05a2b6a0a74fb /arch
parentbpf: Use bpf_is_subprog to check for subprogs (diff)
downloadlinux-fd5d27b70188379bb441d404c29a0afb111e1753.tar.xz
linux-fd5d27b70188379bb441d404c29a0afb111e1753.zip
arch/x86: Implement arch_bpf_stack_walk
The plumbing for offline unwinding when we throw an exception in programs would require walking the stack, hence introduce a new arch_bpf_stack_walk function. This is provided when the JIT supports exceptions, i.e. bpf_jit_supports_exceptions is true. The arch-specific code is really minimal, hence it should be straightforward to extend this support to other architectures as well, as it reuses the logic of arch_stack_walk, but allowing access to unwind_state data. Once the stack pointer and frame pointer are known for the main subprog during the unwinding, we know the stack layout and location of any callee-saved registers which must be restored before we return back to the kernel. This handling will be added in the subsequent patches. Note that while we primarily unwind through BPF frames, which are effectively CONFIG_UNWINDER_FRAME_POINTER, we still need one of this or CONFIG_UNWINDER_ORC to be able to unwind through the bpf_throw frame from which we begin walking the stack. We also require both sp and bp (stack and frame pointers) from the unwind_state structure, which are only available when one of these two options are enabled. Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20230912233214.1518551-3-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/net/bpf_jit_comp.c28
1 files changed, 28 insertions, 0 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index a0d03503b3cb..d0c24b5a6abb 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -16,6 +16,7 @@
#include <asm/set_memory.h>
#include <asm/nospec-branch.h>
#include <asm/text-patching.h>
+#include <asm/unwind.h>
static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
{
@@ -2933,3 +2934,30 @@ void bpf_jit_free(struct bpf_prog *prog)
bpf_prog_unlock_free(prog);
}
+
+bool bpf_jit_supports_exceptions(void)
+{
+ /* We unwind through both kernel frames (starting from within bpf_throw
+ * call) and BPF frames. Therefore we require one of ORC or FP unwinder
+ * to be enabled to walk kernel frames and reach BPF frames in the stack
+ * trace.
+ */
+ return IS_ENABLED(CONFIG_UNWINDER_ORC) || IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER);
+}
+
+void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
+{
+#if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER)
+ struct unwind_state state;
+ unsigned long addr;
+
+ for (unwind_start(&state, current, NULL, NULL); !unwind_done(&state);
+ unwind_next_frame(&state)) {
+ addr = unwind_get_return_address(&state);
+ if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp))
+ break;
+ }
+ return;
+#endif
+ WARN(1, "verification of programs using bpf_throw should have failed\n");
+}