diff options
author | Jiri Olsa <jolsa@kernel.org> | 2022-12-15 22:44:29 +0100 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2022-12-19 22:08:06 +0100 |
commit | f19a4050455aad847fb93f18dc1fe502eb60f989 (patch) | |
tree | 377364cd9f55e19f13e6abb7ef29fada6c3b9f53 /kernel | |
parent | bpf: Add struct for bin_args arg in bpf_bprintf_prepare (diff) | |
download | linux-f19a4050455aad847fb93f18dc1fe502eb60f989.tar.xz linux-f19a4050455aad847fb93f18dc1fe502eb60f989.zip |
bpf: Do cleanup in bpf_bprintf_cleanup only when needed
Currently we always cleanup/decrement bpf_bprintf_nest_level variable
in bpf_bprintf_cleanup if it's > 0.
There's possible scenario where this could cause a problem, when
bpf_bprintf_prepare does not get bin_args buffer (because num_args is 0)
and following bpf_bprintf_cleanup call decrements bpf_bprintf_nest_level
variable, like:
in task context:
bpf_bprintf_prepare(num_args != 0) increments 'bpf_bprintf_nest_level = 1'
-> first irq :
bpf_bprintf_prepare(num_args == 0)
bpf_bprintf_cleanup decrements 'bpf_bprintf_nest_level = 0'
-> second irq:
bpf_bprintf_prepare(num_args != 0) bpf_bprintf_nest_level = 1
gets same buffer as task context above
Adding check to bpf_bprintf_cleanup and doing the real cleanup only if we
got bin_args data in the first place.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20221215214430.1336195-3-jolsa@kernel.org
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/bpf/helpers.c | 16 | ||||
-rw-r--r-- | kernel/trace/bpf_trace.c | 6 |
2 files changed, 12 insertions, 10 deletions
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 7dbf6bb72cad..9cca02e13f2e 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -784,12 +784,14 @@ static int try_get_fmt_tmp_buf(char **tmp_buf) return 0; } -void bpf_bprintf_cleanup(void) +void bpf_bprintf_cleanup(struct bpf_bprintf_data *data) { - if (this_cpu_read(bpf_bprintf_nest_level)) { - this_cpu_dec(bpf_bprintf_nest_level); - preempt_enable(); - } + if (!data->bin_args) + return; + if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0)) + return; + this_cpu_dec(bpf_bprintf_nest_level); + preempt_enable(); } /* @@ -1021,7 +1023,7 @@ nocopy_fmt: err = 0; out: if (err) - bpf_bprintf_cleanup(); + bpf_bprintf_cleanup(data); return err; } @@ -1047,7 +1049,7 @@ BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt, err = bstr_printf(str, str_size, fmt, data.bin_args); - bpf_bprintf_cleanup(); + bpf_bprintf_cleanup(&data); return err + 1; } diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 3e849c3a7cc8..2129f7c68bb5 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -396,7 +396,7 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, trace_bpf_trace_printk(buf); raw_spin_unlock_irqrestore(&trace_printk_lock, flags); - bpf_bprintf_cleanup(); + bpf_bprintf_cleanup(&data); return ret; } @@ -454,7 +454,7 @@ BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args, trace_bpf_trace_printk(buf); raw_spin_unlock_irqrestore(&trace_printk_lock, flags); - bpf_bprintf_cleanup(); + bpf_bprintf_cleanup(&data); return ret; } @@ -494,7 +494,7 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, seq_bprintf(m, fmt, data.bin_args); - bpf_bprintf_cleanup(); + bpf_bprintf_cleanup(&data); return seq_has_overflowed(m) ? -EOVERFLOW : 0; } |