diff options
author | David S. Miller <davem@davemloft.net> | 2024-01-01 15:45:21 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2024-01-01 15:45:21 +0100 |
commit | 240436c06ce992879d59e504b0df3d32deebb43e (patch) | |
tree | 0c1653f11b3e4aca68269bacb978e8e985bb0ee7 /tools/testing/selftests | |
parent | net: mdio: get/put device node during (un)registration (diff) | |
parent | bpf: Avoid unnecessary use of comma operator in verifier (diff) | |
download | linux-240436c06ce992879d59e504b0df3d32deebb43e.tar.xz linux-240436c06ce992879d59e504b0df3d32deebb43e.zip |
Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says:
====================
bpf-next-for-netdev
The following pull-request contains BPF updates for your *net-next* tree.
We've added 22 non-merge commits during the last 3 day(s) which contain
a total of 23 files changed, 652 insertions(+), 431 deletions(-).
The main changes are:
1) Add verifier support for annotating user's global BPF subprogram arguments
with few commonly requested annotations for a better developer experience,
from Andrii Nakryiko.
These tags are:
- Ability to annotate a special PTR_TO_CTX argument
- Ability to annotate a generic PTR_TO_MEM as non-NULL
2) Support BPF verifier tracking of BPF_JNE which helps cases when the compiler
transforms (unsigned) "a > 0" into "if a == 0 goto xxx" and the like, from
Menglong Dong.
3) Fix a warning in bpf_mem_cache's check_obj_size() as reported by LKP, from Hou Tao.
4) Re-support uid/gid options when mounting bpffs which had to be reverted with
the prior token series revert to avoid conflicts, from Daniel Borkmann.
5) Fix a libbpf NULL pointer dereference in bpf_object__collect_prog_relos() found
from fuzzing the library with malformed ELF files, from Mingyi Zhang.
6) Skip DWARF sections in libbpf's linker sanity check given compiler options to
generate compressed debug sections can trigger a rejection due to misalignment,
from Alyssa Ross.
7) Fix an unnecessary use of the comma operator in BPF verifier, from Simon Horman.
8) Fix format specifier for unsigned long values in cpustat sample, from Colin Ian King.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools/testing/selftests')
13 files changed, 298 insertions, 73 deletions
diff --git a/tools/testing/selftests/bpf/benchs/bench_htab_mem.c b/tools/testing/selftests/bpf/benchs/bench_htab_mem.c index 9146d3f414d2..926ee822143e 100644 --- a/tools/testing/selftests/bpf/benchs/bench_htab_mem.c +++ b/tools/testing/selftests/bpf/benchs/bench_htab_mem.c @@ -335,6 +335,7 @@ static void htab_mem_report_final(struct bench_res res[], int res_cnt) " peak memory usage %7.2lfMiB\n", loop_mean, loop_stddev, mem_mean, mem_stddev, peak_mem / 1048576.0); + close(ctx.fd); cleanup_cgroup_environment(); } diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c index 8ec73fdfcdab..f29fc789c14b 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -348,7 +348,8 @@ static void test_func_sockmap_update(void) } static void test_obj_load_failure_common(const char *obj_file, - const char *target_obj_file) + const char *target_obj_file, + const char *exp_msg) { /* * standalone test that asserts failure to load freplace prog @@ -356,6 +357,7 @@ static void test_obj_load_failure_common(const char *obj_file, */ struct bpf_object *obj = NULL, *pkt_obj; struct bpf_program *prog; + char log_buf[64 * 1024]; int err, pkt_fd; __u32 duration = 0; @@ -374,11 +376,21 @@ static void test_obj_load_failure_common(const char *obj_file, err = bpf_program__set_attach_target(prog, pkt_fd, NULL); ASSERT_OK(err, "set_attach_target"); + log_buf[0] = '\0'; + if (exp_msg) + bpf_program__set_log_buf(prog, log_buf, sizeof(log_buf)); + if (env.verbosity > VERBOSE_NONE) + bpf_program__set_log_level(prog, 2); + /* It should fail to load the program */ err = bpf_object__load(obj); + if (env.verbosity > VERBOSE_NONE && exp_msg) /* we overtook log */ + printf("VERIFIER LOG:\n================\n%s\n================\n", log_buf); if (CHECK(!err, "bpf_obj_load should fail", "err %d\n", err)) goto close_prog; + if (exp_msg) + ASSERT_HAS_SUBSTR(log_buf, exp_msg, "fail_msg"); close_prog: bpf_object__close(obj); bpf_object__close(pkt_obj); @@ -388,14 +400,24 @@ static void test_func_replace_return_code(void) { /* test invalid return code in the replaced program */ test_obj_load_failure_common("./freplace_connect_v4_prog.bpf.o", - "./connect4_prog.bpf.o"); + "./connect4_prog.bpf.o", NULL); } static void test_func_map_prog_compatibility(void) { /* test with spin lock map value in the replaced program */ test_obj_load_failure_common("./freplace_attach_probe.bpf.o", - "./test_attach_probe.bpf.o"); + "./test_attach_probe.bpf.o", NULL); +} + +static void test_func_replace_unreliable(void) +{ + /* freplace'ing unreliable main prog should fail with error + * "Cannot replace static functions" + */ + test_obj_load_failure_common("freplace_unreliable_prog.bpf.o", + "./verifier_btf_unreliable_prog.bpf.o", + "Cannot replace static functions"); } static void test_func_replace_global_func(void) @@ -563,6 +585,8 @@ void serial_test_fexit_bpf2bpf(void) test_func_replace_return_code(); if (test__start_subtest("func_map_prog_compatibility")) test_func_map_prog_compatibility(); + if (test__start_subtest("func_replace_unreliable")) + test_func_replace_unreliable(); if (test__start_subtest("func_replace_multi")) test_func_replace_multi(); if (test__start_subtest("fmod_ret_freplace")) diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c index effd78b2a657..7a3fa2ff567b 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_fixup.c +++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c @@ -169,9 +169,9 @@ void test_log_fixup(void) if (test__start_subtest("bad_core_relo_trunc_none")) bad_core_relo(0, TRUNC_NONE /* full buf */); if (test__start_subtest("bad_core_relo_trunc_partial")) - bad_core_relo(300, TRUNC_PARTIAL /* truncate original log a bit */); + bad_core_relo(280, TRUNC_PARTIAL /* truncate original log a bit */); if (test__start_subtest("bad_core_relo_trunc_full")) - bad_core_relo(210, TRUNC_FULL /* truncate also libbpf's message patch */); + bad_core_relo(220, TRUNC_FULL /* truncate also libbpf's message patch */); if (test__start_subtest("bad_core_relo_subprog")) bad_core_relo_subprog(); if (test__start_subtest("missing_map")) diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c index 0c9abd279e18..820d0bcfc474 100644 --- a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c +++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c @@ -590,12 +590,7 @@ static void range_cond(enum num_t t, struct range x, struct range y, *newy = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b)); break; case OP_NE: - /* generic case, can't derive more information */ - *newx = range(t, x.a, x.b); - *newy = range(t, y.a, y.b); - break; - - /* below extended logic is not supported by verifier just yet */ + /* below logic is supported by the verifier now */ if (x.a == x.b && x.a == y.a) { /* X is a constant matching left side of Y */ *newx = range(t, x.a, x.b); @@ -2097,10 +2092,22 @@ static struct subtest_case crafted_cases[] = { {U32, S32, {0, U32_MAX}, {U32_MAX, U32_MAX}}, - {S32, U64, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}, {(u32)(s32)-255, 0}}, - {S32, S64, {(u32)(s32)S32_MIN, (u32)(s32)-255}, {(u32)(s32)-2, 0}}, - {S32, S64, {0, 1}, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}}, - {S32, U32, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}}, + {S32, U64, {(u32)S32_MIN, (u32)S32_MIN}, {(u32)(s32)-255, 0}}, + {S32, S64, {(u32)S32_MIN, (u32)(s32)-255}, {(u32)(s32)-2, 0}}, + {S32, S64, {0, 1}, {(u32)S32_MIN, (u32)S32_MIN}}, + {S32, U32, {(u32)S32_MIN, (u32)S32_MIN}, {(u32)S32_MIN, (u32)S32_MIN}}, + + /* edge overlap testings for BPF_NE */ + {U64, U64, {0, U64_MAX}, {U64_MAX, U64_MAX}}, + {U64, U64, {0, U64_MAX}, {0, 0}}, + {S64, U64, {S64_MIN, 0}, {S64_MIN, S64_MIN}}, + {S64, U64, {S64_MIN, 0}, {0, 0}}, + {S64, U64, {S64_MIN, S64_MAX}, {S64_MAX, S64_MAX}}, + {U32, U32, {0, U32_MAX}, {0, 0}}, + {U32, U32, {0, U32_MAX}, {U32_MAX, U32_MAX}}, + {S32, U32, {(u32)S32_MIN, 0}, {0, 0}}, + {S32, U32, {(u32)S32_MIN, 0}, {(u32)S32_MIN, (u32)S32_MIN}}, + {S32, U32, {(u32)S32_MIN, S32_MAX}, {S32_MAX, S32_MAX}}, }; /* Go over crafted hard-coded cases. This is fast, so we do it as part of diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index ac49ec25211d..d62c5bf00e71 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -14,6 +14,7 @@ #include "verifier_bpf_get_stack.skel.h" #include "verifier_bswap.skel.h" #include "verifier_btf_ctx_access.skel.h" +#include "verifier_btf_unreliable_prog.skel.h" #include "verifier_cfg.skel.h" #include "verifier_cgroup_inv_retcode.skel.h" #include "verifier_cgroup_skb.skel.h" @@ -125,6 +126,7 @@ void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_u void test_verifier_bpf_get_stack(void) { RUN(verifier_bpf_get_stack); } void test_verifier_bswap(void) { RUN(verifier_bswap); } void test_verifier_btf_ctx_access(void) { RUN(verifier_btf_ctx_access); } +void test_verifier_btf_unreliable_prog(void) { RUN(verifier_btf_unreliable_prog); } void test_verifier_cfg(void) { RUN(verifier_cfg); } void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); } void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); } diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c index 0fa564a5cc5b..9fe9c4a4e8f6 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c @@ -78,7 +78,7 @@ int BPF_PROG(cgrp_kfunc_acquire_fp, struct cgroup *cgrp, const char *path) } SEC("kretprobe/cgroup_destroy_locked") -__failure __msg("reg type unsupported for arg#0 function") +__failure __msg("calling kernel function bpf_cgroup_acquire is not allowed") int BPF_PROG(cgrp_kfunc_acquire_unsafe_kretprobe, struct cgroup *cgrp) { struct cgroup *acquired; diff --git a/tools/testing/selftests/bpf/progs/freplace_unreliable_prog.c b/tools/testing/selftests/bpf/progs/freplace_unreliable_prog.c new file mode 100644 index 000000000000..624078abf3de --- /dev/null +++ b/tools/testing/selftests/bpf/progs/freplace_unreliable_prog.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +SEC("freplace/btf_unreliable_kprobe") +/* context type is what BPF verifier expects for kprobe context, but target + * program has `stuct whatever *ctx` argument, so freplace operation will be + * rejected with the following message: + * + * arg0 replace_btf_unreliable_kprobe(struct pt_regs *) doesn't match btf_unreliable_kprobe(struct whatever *) + */ +int replace_btf_unreliable_kprobe(bpf_user_pt_regs_t *ctx) +{ + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c index dcdea3127086..ad88a3796ddf 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c @@ -248,7 +248,7 @@ int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 cl } SEC("lsm/task_free") -__failure __msg("reg type unsupported for arg#0 function") +__failure __msg("R1 must be a rcu pointer") int BPF_PROG(task_kfunc_from_lsm_task_free, struct task_struct *task) { struct task_struct *acquired; diff --git a/tools/testing/selftests/bpf/progs/test_bpf_ma.c b/tools/testing/selftests/bpf/progs/test_bpf_ma.c index b685a4aba6bd..069db9085e78 100644 --- a/tools/testing/selftests/bpf/progs/test_bpf_ma.c +++ b/tools/testing/selftests/bpf/progs/test_bpf_ma.c @@ -17,7 +17,7 @@ struct generic_map_value { char _license[] SEC("license") = "GPL"; -const unsigned int data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096}; +const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096}; const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {}; int err = 0; @@ -166,7 +166,7 @@ static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch_percpu_free((struct bpf_map *)(&array_percpu_##size), batch, idx); \ } while (0) -DEFINE_ARRAY_WITH_KPTR(8); +/* kptr doesn't support bin_data_8 which is a zero-sized array */ DEFINE_ARRAY_WITH_KPTR(16); DEFINE_ARRAY_WITH_KPTR(32); DEFINE_ARRAY_WITH_KPTR(64); @@ -198,21 +198,20 @@ int test_batch_alloc_free(void *ctx) if ((u32)bpf_get_current_pid_tgid() != pid) return 0; - /* Alloc 128 8-bytes objects in batch to trigger refilling, - * then free 128 8-bytes objects in batch to trigger freeing. + /* Alloc 128 16-bytes objects in batch to trigger refilling, + * then free 128 16-bytes objects in batch to trigger freeing. */ - CALL_BATCH_ALLOC_FREE(8, 128, 0); - CALL_BATCH_ALLOC_FREE(16, 128, 1); - CALL_BATCH_ALLOC_FREE(32, 128, 2); - CALL_BATCH_ALLOC_FREE(64, 128, 3); - CALL_BATCH_ALLOC_FREE(96, 128, 4); - CALL_BATCH_ALLOC_FREE(128, 128, 5); - CALL_BATCH_ALLOC_FREE(192, 128, 6); - CALL_BATCH_ALLOC_FREE(256, 128, 7); - CALL_BATCH_ALLOC_FREE(512, 64, 8); - CALL_BATCH_ALLOC_FREE(1024, 32, 9); - CALL_BATCH_ALLOC_FREE(2048, 16, 10); - CALL_BATCH_ALLOC_FREE(4096, 8, 11); + CALL_BATCH_ALLOC_FREE(16, 128, 0); + CALL_BATCH_ALLOC_FREE(32, 128, 1); + CALL_BATCH_ALLOC_FREE(64, 128, 2); + CALL_BATCH_ALLOC_FREE(96, 128, 3); + CALL_BATCH_ALLOC_FREE(128, 128, 4); + CALL_BATCH_ALLOC_FREE(192, 128, 5); + CALL_BATCH_ALLOC_FREE(256, 128, 6); + CALL_BATCH_ALLOC_FREE(512, 64, 7); + CALL_BATCH_ALLOC_FREE(1024, 32, 8); + CALL_BATCH_ALLOC_FREE(2048, 16, 9); + CALL_BATCH_ALLOC_FREE(4096, 8, 10); return 0; } @@ -223,21 +222,20 @@ int test_free_through_map_free(void *ctx) if ((u32)bpf_get_current_pid_tgid() != pid) return 0; - /* Alloc 128 8-bytes objects in batch to trigger refilling, + /* Alloc 128 16-bytes objects in batch to trigger refilling, * then free these objects through map free. */ - CALL_BATCH_ALLOC(8, 128, 0); - CALL_BATCH_ALLOC(16, 128, 1); - CALL_BATCH_ALLOC(32, 128, 2); - CALL_BATCH_ALLOC(64, 128, 3); - CALL_BATCH_ALLOC(96, 128, 4); - CALL_BATCH_ALLOC(128, 128, 5); - CALL_BATCH_ALLOC(192, 128, 6); - CALL_BATCH_ALLOC(256, 128, 7); - CALL_BATCH_ALLOC(512, 64, 8); - CALL_BATCH_ALLOC(1024, 32, 9); - CALL_BATCH_ALLOC(2048, 16, 10); - CALL_BATCH_ALLOC(4096, 8, 11); + CALL_BATCH_ALLOC(16, 128, 0); + CALL_BATCH_ALLOC(32, 128, 1); + CALL_BATCH_ALLOC(64, 128, 2); + CALL_BATCH_ALLOC(96, 128, 3); + CALL_BATCH_ALLOC(128, 128, 4); + CALL_BATCH_ALLOC(192, 128, 5); + CALL_BATCH_ALLOC(256, 128, 6); + CALL_BATCH_ALLOC(512, 64, 7); + CALL_BATCH_ALLOC(1024, 32, 8); + CALL_BATCH_ALLOC(2048, 16, 9); + CALL_BATCH_ALLOC(4096, 8, 10); return 0; } @@ -251,17 +249,17 @@ int test_batch_percpu_alloc_free(void *ctx) /* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling, * then free 128 16-bytes per-cpu objects in batch to trigger freeing. */ - CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1); - CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2); - CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3); - CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4); - CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5); - CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6); - CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7); - CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8); - CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 9); - CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 10); - CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 11); + CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 0); + CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 1); + CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 2); + CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 3); + CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 4); + CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 5); + CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 6); + CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 7); + CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 8); + CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 9); + CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 10); return 0; } @@ -275,17 +273,17 @@ int test_percpu_free_through_map_free(void *ctx) /* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling, * then free these object through map free. */ - CALL_BATCH_PERCPU_ALLOC(16, 128, 1); - CALL_BATCH_PERCPU_ALLOC(32, 128, 2); - CALL_BATCH_PERCPU_ALLOC(64, 128, 3); - CALL_BATCH_PERCPU_ALLOC(96, 128, 4); - CALL_BATCH_PERCPU_ALLOC(128, 128, 5); - CALL_BATCH_PERCPU_ALLOC(192, 128, 6); - CALL_BATCH_PERCPU_ALLOC(256, 128, 7); - CALL_BATCH_PERCPU_ALLOC(512, 64, 8); - CALL_BATCH_PERCPU_ALLOC(1024, 32, 9); - CALL_BATCH_PERCPU_ALLOC(2048, 16, 10); - CALL_BATCH_PERCPU_ALLOC(4096, 8, 11); + CALL_BATCH_PERCPU_ALLOC(16, 128, 0); + CALL_BATCH_PERCPU_ALLOC(32, 128, 1); + CALL_BATCH_PERCPU_ALLOC(64, 128, 2); + CALL_BATCH_PERCPU_ALLOC(96, 128, 3); + CALL_BATCH_PERCPU_ALLOC(128, 128, 4); + CALL_BATCH_PERCPU_ALLOC(192, 128, 5); + CALL_BATCH_PERCPU_ALLOC(256, 128, 6); + CALL_BATCH_PERCPU_ALLOC(512, 64, 7); + CALL_BATCH_PERCPU_ALLOC(1024, 32, 8); + CALL_BATCH_PERCPU_ALLOC(2048, 16, 9); + CALL_BATCH_PERCPU_ALLOC(4096, 8, 10); return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_global_func5.c b/tools/testing/selftests/bpf/progs/test_global_func5.c index cc55aedaf82d..257c0569ff98 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func5.c +++ b/tools/testing/selftests/bpf/progs/test_global_func5.c @@ -26,7 +26,7 @@ int f3(int val, struct __sk_buff *skb) } SEC("tc") -__failure __msg("expected pointer to ctx, but got PTR") +__failure __msg("expects pointer to ctx") int global_func5(struct __sk_buff *skb) { return f1(skb) + f2(2, skb) + f3(3, skb); diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c index ec430b71730b..960998f16306 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bounds.c +++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c @@ -1075,4 +1075,66 @@ l0_%=: r0 = 0; \ : __clobber_all); } +SEC("tc") +__description("bounds check with JMP_NE for reg edge") +__success __retval(0) +__naked void reg_not_equal_const(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + call %[bpf_get_prandom_u32]; \ + r4 = r0; \ + r4 &= 7; \ + if r4 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: r1 = r6; \ + r2 = 0; \ + r3 = r10; \ + r3 += -8; \ + r5 = 0; \ + /* The 4th argument of bpf_skb_store_bytes is defined as \ + * ARG_CONST_SIZE, so 0 is not allowed. The 'r4 != 0' \ + * is providing us this exclusion of zero from initial \ + * [0, 7] range. \ + */ \ + call %[bpf_skb_store_bytes]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_skb_store_bytes) + : __clobber_all); +} + +SEC("tc") +__description("bounds check with JMP_EQ for reg edge") +__success __retval(0) +__naked void reg_equal_const(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + call %[bpf_get_prandom_u32]; \ + r4 = r0; \ + r4 &= 7; \ + if r4 == 0 goto l0_%=; \ + r1 = r6; \ + r2 = 0; \ + r3 = r10; \ + r3 += -8; \ + r5 = 0; \ + /* Just the same as what we do in reg_not_equal_const() */ \ + call %[bpf_skb_store_bytes]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_skb_store_bytes) + : __clobber_all); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_btf_unreliable_prog.c b/tools/testing/selftests/bpf/progs/verifier_btf_unreliable_prog.c new file mode 100644 index 000000000000..36e033a2e02c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_btf_unreliable_prog.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017 Facebook + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> +#include "bpf_misc.h" + +struct whatever {}; + +SEC("kprobe") +__success __log_level(2) +/* context type is wrong, making it impossible to freplace this program */ +int btf_unreliable_kprobe(struct whatever *ctx) +{ + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c index bd696a431244..9eeb2d89cda8 100644 --- a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c +++ b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c @@ -1,12 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ -#include <stdbool.h> -#include <errno.h> -#include <string.h> -#include <linux/bpf.h> +#include <vmlinux.h> #include <bpf/bpf_helpers.h> #include "bpf_misc.h" +#include "xdp_metadata.h" +#include "bpf_kfuncs.h" int arr[1]; int unkn_idx; @@ -98,4 +97,96 @@ int unguarded_unsupp_global_called(void) return global_unsupp(&x); } +long stack[128]; + +__weak int subprog_nullable_ptr_bad(int *p) +{ + return (*p) * 2; /* bad, missing null check */ +} + +SEC("?raw_tp") +__failure __log_level(2) +__msg("invalid mem access 'mem_or_null'") +int arg_tag_nullable_ptr_fail(void *ctx) +{ + int x = 42; + + return subprog_nullable_ptr_bad(&x); +} + +__noinline __weak int subprog_nonnull_ptr_good(int *p1 __arg_nonnull, int *p2 __arg_nonnull) +{ + return (*p1) * (*p2); /* good, no need for NULL checks */ +} + +int x = 47; + +SEC("?raw_tp") +__success __log_level(2) +int arg_tag_nonnull_ptr_good(void *ctx) +{ + int y = 74; + + return subprog_nonnull_ptr_good(&x, &y); +} + +/* this global subprog can be now called from many types of entry progs, each + * with different context type + */ +__weak int subprog_ctx_tag(void *ctx __arg_ctx) +{ + return bpf_get_stack(ctx, stack, sizeof(stack), 0); +} + +SEC("?raw_tp") +__success __log_level(2) +int arg_tag_ctx_raw_tp(void *ctx) +{ + return subprog_ctx_tag(ctx); +} + +SEC("?tp") +__success __log_level(2) +int arg_tag_ctx_tp(void *ctx) +{ + return subprog_ctx_tag(ctx); +} + +SEC("?kprobe") +__success __log_level(2) +int arg_tag_ctx_kprobe(void *ctx) +{ + return subprog_ctx_tag(ctx); +} + +__weak int subprog_dynptr(struct bpf_dynptr *dptr) +{ + long *d, t, buf[1] = {}; + + d = bpf_dynptr_data(dptr, 0, sizeof(long)); + if (!d) + return 0; + + t = *d + 1; + + d = bpf_dynptr_slice(dptr, 0, &buf, sizeof(long)); + if (!d) + return t; + + t = *d + 2; + + return t; +} + +SEC("?xdp") +__success __log_level(2) +int arg_tag_dynptr(struct xdp_md *ctx) +{ + struct bpf_dynptr dptr; + + bpf_dynptr_from_xdp(ctx, 0, &dptr); + + return subprog_dynptr(&dptr); +} + char _license[] SEC("license") = "GPL"; |