summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/helpers.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2024-04-24 18:00:23 +0200
committerAlexei Starovoitov <ast@kernel.org>2024-04-24 18:06:50 +0200
commitdc92febf7b93da5049fe177804e6b1961fcc6bd7 (patch)
treecd49ada6055ba7c3933b03d82998efb303ec19a1 /kernel/bpf/helpers.c
parentMerge branch 'introduce-bpf_wq' (diff)
downloadlinux-dc92febf7b93da5049fe177804e6b1961fcc6bd7.tar.xz
linux-dc92febf7b93da5049fe177804e6b1961fcc6bd7.zip
bpf: Don't check for recursion in bpf_wq_work.
__bpf_prog_enter_sleepable_recur does recursion check which is not applicable to wq callback. The callback function is part of bpf program and bpf prog might be running on the same cpu. So recursion check would incorrectly prevent callback from running. The code can call __bpf_prog_enter_sleepable(), but run_ctx would be fake, hence use explicit rcu_read_lock_trace(); migrate_disable(); to address this problem. Another reason to open code is __bpf_prog_enter* are not available in !JIT configs. Reported-by: kernel test robot <lkp@intel.com> Closes: https://lore.kernel.org/oe-kbuild-all/202404241719.IIGdpAku-lkp@intel.com/ Closes: https://lore.kernel.org/oe-kbuild-all/202404241811.FFV4Bku3-lkp@intel.com/ Fixes: eb48f6cd41a0 ("bpf: wq: add bpf_wq_init") Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/helpers.c')
-rw-r--r--kernel/bpf/helpers.c18
1 files changed, 5 insertions, 13 deletions
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 047a21b7e4ba..5f61ffa7505a 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1178,9 +1178,7 @@ out:
static void bpf_wq_work(struct work_struct *work)
{
struct bpf_work *w = container_of(work, struct bpf_work, work);
- struct bpf_tramp_run_ctx __maybe_unused run_ctx;
struct bpf_async_cb *cb = &w->cb;
- struct bpf_prog *prog = cb->prog;
struct bpf_map *map = cb->map;
bpf_callback_t callback_fn;
void *value = cb->value;
@@ -1190,7 +1188,7 @@ static void bpf_wq_work(struct work_struct *work)
BTF_TYPE_EMIT(struct bpf_wq);
callback_fn = READ_ONCE(cb->callback_fn);
- if (!callback_fn || !prog)
+ if (!callback_fn)
return;
if (map->map_type == BPF_MAP_TYPE_ARRAY) {
@@ -1203,19 +1201,13 @@ static void bpf_wq_work(struct work_struct *work)
key = value - round_up(map->key_size, 8);
}
- run_ctx.bpf_cookie = 0;
-
- if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
- /* recursion detected */
- __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
- return;
- }
+ rcu_read_lock_trace();
+ migrate_disable();
callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
- /* The verifier checked that return value is zero. */
- __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
- &run_ctx);
+ migrate_enable();
+ rcu_read_unlock_trace();
}
static void bpf_wq_delete_work(struct work_struct *work)