diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-21 17:11:04 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-21 17:11:04 +0100 |
commit | 6e95ef0258ff4ee23ae3b06bf6b00b33dbbd5ef7 (patch) | |
tree | 07f66723c602ab3b085d890d7fef898a61bb539c /tools/testing/selftests | |
parent | Merge tag 'soc-arm-6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/soc... (diff) | |
parent | libbpf: Change hash_combine parameters from long to unsigned long (diff) | |
download | linux-6e95ef0258ff4ee23ae3b06bf6b00b33dbbd5ef7.tar.xz linux-6e95ef0258ff4ee23ae3b06bf6b00b33dbbd5ef7.zip |
Merge tag 'bpf-next-6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Pull bpf updates from Alexei Starovoitov:
- Add BPF uprobe session support (Jiri Olsa)
- Optimize uprobe performance (Andrii Nakryiko)
- Add bpf_fastcall support to helpers and kfuncs (Eduard Zingerman)
- Avoid calling free_htab_elem() under hash map bucket lock (Hou Tao)
- Prevent tailcall infinite loop caused by freplace (Leon Hwang)
- Mark raw_tracepoint arguments as nullable (Kumar Kartikeya Dwivedi)
- Introduce uptr support in the task local storage map (Martin KaFai
Lau)
- Stringify errno log messages in libbpf (Mykyta Yatsenko)
- Add kmem_cache BPF iterator for perf's lock profiling (Namhyung Kim)
- Support BPF objects of either endianness in libbpf (Tony Ambardar)
- Add ksym to struct_ops trampoline to fix stack trace (Xu Kuohai)
- Introduce private stack for eligible BPF programs (Yonghong Song)
- Migrate samples/bpf tests to selftests/bpf test_progs (Daniel T. Lee)
- Migrate test_sock to selftests/bpf test_progs (Jordan Rife)
* tag 'bpf-next-6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (152 commits)
libbpf: Change hash_combine parameters from long to unsigned long
selftests/bpf: Fix build error with llvm 19
libbpf: Fix memory leak in bpf_program__attach_uprobe_multi
bpf: use common instruction history across all states
bpf: Add necessary migrate_disable to range_tree.
bpf: Do not alloc arena on unsupported arches
selftests/bpf: Set test path for token/obj_priv_implicit_token_envvar
selftests/bpf: Add a test for arena range tree algorithm
bpf: Introduce range_tree data structure and use it in bpf arena
samples/bpf: Remove unused variable in xdp2skb_meta_kern.c
samples/bpf: Remove unused variables in tc_l2_redirect_kern.c
bpftool: Cast variable `var` to long long
bpf, x86: Propagate tailcall info only for subprogs
bpf: Add kernel symbol for struct_ops trampoline
bpf: Use function pointers count as struct_ops links count
bpf: Remove unused member rcu from bpf_struct_ops_map
selftests/bpf: Add struct_ops prog private stack tests
bpf: Support private stack for struct_ops progs
selftests/bpf: Add tracing prog private stack tests
bpf, x86: Support private stack in jit
...
Diffstat (limited to '')
117 files changed, 3925 insertions, 612 deletions
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index e6533b3400de..d45c9a9b304d 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -16,7 +16,6 @@ fixdep /test_progs-cpuv4 test_verifier_log feature -test_sock urandom_read test_sockmap test_lirc_mode2_user diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 75016962f795..b1080284522d 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -10,6 +10,7 @@ TOOLSDIR := $(abspath ../../..) LIBDIR := $(TOOLSDIR)/lib BPFDIR := $(LIBDIR)/bpf TOOLSINCDIR := $(TOOLSDIR)/include +TOOLSARCHINCDIR := $(TOOLSDIR)/arch/$(SRCARCH)/include BPFTOOLDIR := $(TOOLSDIR)/bpf/bpftool APIDIR := $(TOOLSINCDIR)/uapi ifneq ($(O),) @@ -44,7 +45,7 @@ CFLAGS += -g $(OPT_FLAGS) -rdynamic \ -Wall -Werror -fno-omit-frame-pointer \ $(GENFLAGS) $(SAN_CFLAGS) $(LIBELF_CFLAGS) \ -I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \ - -I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT) + -I$(TOOLSINCDIR) -I$(TOOLSARCHINCDIR) -I$(APIDIR) -I$(OUTPUT) LDFLAGS += $(SAN_LDFLAGS) LDLIBS += $(LIBELF_LIBS) -lz -lrt -lpthread @@ -83,7 +84,7 @@ endif # Order correspond to 'make run_tests' order TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ - test_sock test_sockmap \ + test_sockmap \ test_tcpnotify_user test_sysctl \ test_progs-no_alu32 TEST_INST_SUBDIRS := no_alu32 @@ -132,7 +133,6 @@ TEST_PROGS := test_kmod.sh \ test_tunnel.sh \ test_lwt_seg6local.sh \ test_lirc_mode2.sh \ - test_skb_cgroup_id.sh \ test_flow_dissector.sh \ test_xdp_vlan_mode_generic.sh \ test_xdp_vlan_mode_native.sh \ @@ -274,6 +274,7 @@ $(OUTPUT)/liburandom_read.so: urandom_read_lib1.c urandom_read_lib2.c liburandom $(Q)$(CLANG) $(CLANG_TARGET_ARCH) \ $(filter-out -static,$(CFLAGS) $(LDFLAGS)) \ $(filter %.c,$^) $(filter-out -static,$(LDLIBS)) \ + -Wno-unused-command-line-argument \ -fuse-ld=$(LLD) -Wl,-znoseparate-code -Wl,--build-id=sha1 \ -Wl,--version-script=liburandom_read.map \ -fPIC -shared -o $@ @@ -282,6 +283,7 @@ $(OUTPUT)/urandom_read: urandom_read.c urandom_read_aux.c $(OUTPUT)/liburandom_r $(call msg,BINARY,,$@) $(Q)$(CLANG) $(CLANG_TARGET_ARCH) \ $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $(filter %.c,$^) \ + -Wno-unused-command-line-argument \ -lurandom_read $(filter-out -static,$(LDLIBS)) -L$(OUTPUT) \ -fuse-ld=$(LLD) -Wl,-znoseparate-code -Wl,--build-id=sha1 \ -Wl,-rpath=. -o $@ @@ -295,25 +297,33 @@ $(OUTPUT)/sign-file: ../../../../scripts/sign-file.c $(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch]) $(call msg,MOD,,$@) $(Q)$(RM) bpf_testmod/bpf_testmod.ko # force re-compilation - $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_testmod + $(Q)$(MAKE) $(submake_extras) -C bpf_testmod \ + RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \ + EXTRA_CFLAGS='' EXTRA_LDFLAGS='' $(Q)cp bpf_testmod/bpf_testmod.ko $@ $(OUTPUT)/bpf_test_no_cfi.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_no_cfi/Makefile bpf_test_no_cfi/*.[ch]) $(call msg,MOD,,$@) $(Q)$(RM) bpf_test_no_cfi/bpf_test_no_cfi.ko # force re-compilation - $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_test_no_cfi + $(Q)$(MAKE) $(submake_extras) -C bpf_test_no_cfi \ + RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \ + EXTRA_CFLAGS='' EXTRA_LDFLAGS='' $(Q)cp bpf_test_no_cfi/bpf_test_no_cfi.ko $@ $(OUTPUT)/bpf_test_modorder_x.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_modorder_x/Makefile bpf_test_modorder_x/*.[ch]) $(call msg,MOD,,$@) $(Q)$(RM) bpf_test_modorder_x/bpf_test_modorder_x.ko # force re-compilation - $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_test_modorder_x + $(Q)$(MAKE) $(submake_extras) -C bpf_test_modorder_x \ + RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \ + EXTRA_CFLAGS='' EXTRA_LDFLAGS='' $(Q)cp bpf_test_modorder_x/bpf_test_modorder_x.ko $@ $(OUTPUT)/bpf_test_modorder_y.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_modorder_y/Makefile bpf_test_modorder_y/*.[ch]) $(call msg,MOD,,$@) $(Q)$(RM) bpf_test_modorder_y/bpf_test_modorder_y.ko # force re-compilation - $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_test_modorder_y + $(Q)$(MAKE) $(submake_extras) -C bpf_test_modorder_y \ + RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \ + EXTRA_CFLAGS='' EXTRA_LDFLAGS='' $(Q)cp bpf_test_modorder_y/bpf_test_modorder_y.ko $@ @@ -333,8 +343,8 @@ $(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT) BPFTOOL_OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \ BPFOBJ_OUTPUT=$(BUILD_DIR)/libbpf/ \ BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) \ - EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS)' \ - EXTRA_LDFLAGS='$(SAN_LDFLAGS)' && \ + EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS) $(EXTRA_CFLAGS)' \ + EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' && \ cp $(RUNQSLOWER_OUTPUT)runqslower $@ TEST_GEN_PROGS_EXTENDED += $(TRUNNER_BPFTOOL) @@ -349,7 +359,6 @@ JSON_WRITER := $(OUTPUT)/json_writer.o CAP_HELPERS := $(OUTPUT)/cap_helpers.o NETWORK_HELPERS := $(OUTPUT)/network_helpers.o -$(OUTPUT)/test_sock: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_sockmap: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(OUTPUT)/test_tcpnotify_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(TRACE_HELPERS) $(OUTPUT)/test_sock_fields: $(CGROUP_HELPERS) $(TESTING_HELPERS) @@ -368,7 +377,8 @@ $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/bpftool $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \ ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD="$(HOSTLD)" \ - EXTRA_CFLAGS='-g $(OPT_FLAGS)' \ + EXTRA_CFLAGS='-g $(OPT_FLAGS) $(EXTRA_CFLAGS)' \ + EXTRA_LDFLAGS='$(EXTRA_LDFLAGS)' \ OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \ LIBBPF_OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \ LIBBPF_DESTDIR=$(HOST_SCRATCH_DIR)/ \ @@ -379,7 +389,8 @@ $(CROSS_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ $(BPFOBJ) | $(BUILD_DIR)/bpftool $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \ ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) \ - EXTRA_CFLAGS='-g $(OPT_FLAGS)' \ + EXTRA_CFLAGS='-g $(OPT_FLAGS) $(EXTRA_CFLAGS)' \ + EXTRA_LDFLAGS='$(EXTRA_LDFLAGS)' \ OUTPUT=$(BUILD_DIR)/bpftool/ \ LIBBPF_OUTPUT=$(BUILD_DIR)/libbpf/ \ LIBBPF_DESTDIR=$(SCRATCH_DIR)/ \ @@ -402,8 +413,8 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ $(APIDIR)/linux/bpf.h \ | $(BUILD_DIR)/libbpf $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \ - EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS)' \ - EXTRA_LDFLAGS='$(SAN_LDFLAGS)' \ + EXTRA_CFLAGS='-g $(OPT_FLAGS) $(SAN_CFLAGS) $(EXTRA_CFLAGS)' \ + EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' \ DESTDIR=$(SCRATCH_DIR) prefix= all install_headers ifneq ($(BPFOBJ),$(HOST_BPFOBJ)) @@ -411,7 +422,9 @@ $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ $(APIDIR)/linux/bpf.h \ | $(HOST_BUILD_DIR)/libbpf $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) \ - EXTRA_CFLAGS='-g $(OPT_FLAGS)' ARCH= CROSS_COMPILE= \ + ARCH= CROSS_COMPILE= \ + EXTRA_CFLAGS='-g $(OPT_FLAGS) $(EXTRA_CFLAGS)' \ + EXTRA_LDFLAGS='$(EXTRA_LDFLAGS)' \ OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \ CC="$(HOSTCC)" LD="$(HOSTLD)" \ DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers @@ -460,6 +473,7 @@ endef IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \ grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__') MENDIAN=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian) +BPF_TARGET_ENDIAN=$(if $(IS_LITTLE_ENDIAN),--target=bpfel,--target=bpfeb) ifneq ($(CROSS_COMPILE),) CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%)) @@ -487,17 +501,17 @@ $(OUTPUT)/cgroup_getset_retval_hooks.o: cgroup_getset_retval_hooks.h # $4 - binary name define CLANG_BPF_BUILD_RULE $(call msg,CLNG-BPF,$4,$2) - $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v3 -o $2 + $(Q)$(CLANG) $3 -O2 $(BPF_TARGET_ENDIAN) -c $1 -mcpu=v3 -o $2 endef # Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32 define CLANG_NOALU32_BPF_BUILD_RULE $(call msg,CLNG-BPF,$4,$2) - $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v2 -o $2 + $(Q)$(CLANG) $3 -O2 $(BPF_TARGET_ENDIAN) -c $1 -mcpu=v2 -o $2 endef # Similar to CLANG_BPF_BUILD_RULE, but with cpu-v4 define CLANG_CPUV4_BPF_BUILD_RULE $(call msg,CLNG-BPF,$4,$2) - $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v4 -o $2 + $(Q)$(CLANG) $3 -O2 $(BPF_TARGET_ENDIAN) -c $1 -mcpu=v4 -o $2 endef # Build BPF object using GCC define GCC_BPF_BUILD_RULE @@ -637,10 +651,11 @@ $(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_OUTPUT)/%: $$$$(%-deps) $(BPFTOOL) | $(TR # When the compiler generates a %.d file, only skel basenames (not # full paths) are specified as prerequisites for corresponding %.o -# file. This target makes %.skel.h basename dependent on full paths, -# linking generated %.d dependency with actual %.skel.h files. -$(notdir %.skel.h): $(TRUNNER_OUTPUT)/%.skel.h - @true +# file. vpath directives below instruct make to search for skel files +# in TRUNNER_OUTPUT, if they are not present in the working directory. +vpath %.skel.h $(TRUNNER_OUTPUT) +vpath %.lskel.h $(TRUNNER_OUTPUT) +vpath %.subskel.h $(TRUNNER_OUTPUT) endif @@ -727,6 +742,7 @@ TRUNNER_EXTRA_SOURCES := test_progs.c \ unpriv_helpers.c \ netlink_helpers.c \ jit_disasm_helpers.c \ + io_helpers.c \ test_loader.c \ xsk.c \ disasm.c \ diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c index 2ed0ef6f21ee..32e9f194d449 100644 --- a/tools/testing/selftests/bpf/benchs/bench_trigger.c +++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c @@ -4,6 +4,7 @@ #include <argp.h> #include <unistd.h> #include <stdint.h> +#include "bpf_util.h" #include "bench.h" #include "trigger_bench.skel.h" #include "trace_helpers.h" @@ -72,7 +73,7 @@ static __always_inline void inc_counter(struct counter *counters) unsigned slot; if (unlikely(tid == 0)) - tid = syscall(SYS_gettid); + tid = sys_gettid(); /* multiplicative hashing, it's fast */ slot = 2654435769U * tid; diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index b0668f29f7b3..cd8ecd39c3f3 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -582,4 +582,10 @@ extern int bpf_wq_set_callback_impl(struct bpf_wq *wq, unsigned int flags__k, void *aux__ign) __ksym; #define bpf_wq_set_callback(timer, cb, flags) \ bpf_wq_set_callback_impl(timer, cb, flags, NULL) + +struct bpf_iter_kmem_cache; +extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) __weak __ksym; +extern struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it) __weak __ksym; +extern void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it) __weak __ksym; + #endif diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h index 6c3b4d4f173a..aeef86b3da74 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h @@ -40,6 +40,14 @@ DECLARE_TRACE(bpf_testmod_test_nullable_bare, TP_ARGS(ctx__nullable) ); +struct sk_buff; + +DECLARE_TRACE(bpf_testmod_test_raw_tp_null, + TP_PROTO(struct sk_buff *skb), + TP_ARGS(skb) +); + + #undef BPF_TESTMOD_DECLARE_TRACE #ifdef DECLARE_TRACE_WRITABLE #define BPF_TESTMOD_DECLARE_TRACE(call, proto, args, size) \ diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 12005e3dc3e4..cc9dde507aba 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -245,6 +245,39 @@ __bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx) call_rcu(&ctx->rcu, testmod_free_cb); } +static struct bpf_testmod_ops3 *st_ops3; + +static int bpf_testmod_test_3(void) +{ + return 0; +} + +static int bpf_testmod_test_4(void) +{ + return 0; +} + +static struct bpf_testmod_ops3 __bpf_testmod_ops3 = { + .test_1 = bpf_testmod_test_3, + .test_2 = bpf_testmod_test_4, +}; + +static void bpf_testmod_test_struct_ops3(void) +{ + if (st_ops3) + st_ops3->test_1(); +} + +__bpf_kfunc void bpf_testmod_ops3_call_test_1(void) +{ + st_ops3->test_1(); +} + +__bpf_kfunc void bpf_testmod_ops3_call_test_2(void) +{ + st_ops3->test_2(); +} + struct bpf_testmod_btf_type_tag_1 { int a; }; @@ -380,6 +413,10 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2); + (void)trace_bpf_testmod_test_raw_tp_null(NULL); + + bpf_testmod_test_struct_ops3(); + struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) + sizeof(int)), GFP_KERNEL); if (struct_arg3 != NULL) { @@ -584,6 +621,8 @@ BTF_ID_FLAGS(func, bpf_kfunc_trusted_num_test, KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU) BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE) +BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_1) +BTF_ID_FLAGS(func, bpf_testmod_ops3_call_test_2) BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids) BTF_ID_LIST(bpf_testmod_dtor_ids) @@ -1094,6 +1133,10 @@ static const struct bpf_verifier_ops bpf_testmod_verifier_ops = { .is_valid_access = bpf_testmod_ops_is_valid_access, }; +static const struct bpf_verifier_ops bpf_testmod_verifier_ops3 = { + .is_valid_access = bpf_testmod_ops_is_valid_access, +}; + static int bpf_dummy_reg(void *kdata, struct bpf_link *link) { struct bpf_testmod_ops *ops = kdata; @@ -1173,6 +1216,68 @@ struct bpf_struct_ops bpf_testmod_ops2 = { .owner = THIS_MODULE, }; +static int st_ops3_reg(void *kdata, struct bpf_link *link) +{ + int err = 0; + + mutex_lock(&st_ops_mutex); + if (st_ops3) { + pr_err("st_ops has already been registered\n"); + err = -EEXIST; + goto unlock; + } + st_ops3 = kdata; + +unlock: + mutex_unlock(&st_ops_mutex); + return err; +} + +static void st_ops3_unreg(void *kdata, struct bpf_link *link) +{ + mutex_lock(&st_ops_mutex); + st_ops3 = NULL; + mutex_unlock(&st_ops_mutex); +} + +static void test_1_recursion_detected(struct bpf_prog *prog) +{ + struct bpf_prog_stats *stats; + + stats = this_cpu_ptr(prog->stats); + printk("bpf_testmod: oh no, recursing into test_1, recursion_misses %llu", + u64_stats_read(&stats->misses)); +} + +static int st_ops3_check_member(const struct btf_type *t, + const struct btf_member *member, + const struct bpf_prog *prog) +{ + u32 moff = __btf_member_bit_offset(t, member) / 8; + + switch (moff) { + case offsetof(struct bpf_testmod_ops3, test_1): + prog->aux->priv_stack_requested = true; + prog->aux->recursion_detected = test_1_recursion_detected; + fallthrough; + default: + break; + } + return 0; +} + +struct bpf_struct_ops bpf_testmod_ops3 = { + .verifier_ops = &bpf_testmod_verifier_ops3, + .init = bpf_testmod_ops_init, + .init_member = bpf_testmod_ops_init_member, + .reg = st_ops3_reg, + .unreg = st_ops3_unreg, + .check_member = st_ops3_check_member, + .cfi_stubs = &__bpf_testmod_ops3, + .name = "bpf_testmod_ops3", + .owner = THIS_MODULE, +}; + static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args) { return 0; @@ -1331,6 +1436,7 @@ static int bpf_testmod_init(void) ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set); ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops); ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2); + ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops3, bpf_testmod_ops3); ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops); ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors, ARRAY_SIZE(bpf_testmod_dtors), diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h index fb7dff47597a..356803d1c10e 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h @@ -94,6 +94,11 @@ struct bpf_testmod_ops2 { int (*test_1)(void); }; +struct bpf_testmod_ops3 { + int (*test_1)(void); + int (*test_2)(void); +}; + struct st_ops_args { u64 a; }; diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h index 10587a29b967..5f6963a320d7 100644 --- a/tools/testing/selftests/bpf/bpf_util.h +++ b/tools/testing/selftests/bpf/bpf_util.h @@ -6,6 +6,7 @@ #include <stdlib.h> #include <string.h> #include <errno.h> +#include <syscall.h> #include <bpf/libbpf.h> /* libbpf_num_possible_cpus */ static inline unsigned int bpf_num_possible_cpus(void) @@ -59,4 +60,15 @@ static inline void bpf_strlcpy(char *dst, const char *src, size_t sz) (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) #endif +/* Availability of gettid across glibc versions is hit-and-miss, therefore + * fallback to syscall in this macro and use it everywhere. + */ +#ifndef sys_gettid +#define sys_gettid() syscall(SYS_gettid) +#endif + +#ifndef ENOTSUPP +#define ENOTSUPP 524 +#endif + #endif /* __BPF_UTIL__ */ diff --git a/tools/testing/selftests/bpf/config.vm b/tools/testing/selftests/bpf/config.vm index a9746ca78777..da543b24c144 100644 --- a/tools/testing/selftests/bpf/config.vm +++ b/tools/testing/selftests/bpf/config.vm @@ -1,12 +1,15 @@ -CONFIG_9P_FS=y CONFIG_9P_FS_POSIX_ACL=y CONFIG_9P_FS_SECURITY=y +CONFIG_9P_FS=y CONFIG_CRYPTO_DEV_VIRTIO=y -CONFIG_NET_9P=y +CONFIG_FUSE_FS=y +CONFIG_FUSE_PASSTHROUGH=y CONFIG_NET_9P_VIRTIO=y +CONFIG_NET_9P=y CONFIG_VIRTIO_BALLOON=y CONFIG_VIRTIO_BLK=y CONFIG_VIRTIO_CONSOLE=y +CONFIG_VIRTIO_FS=y CONFIG_VIRTIO_NET=y CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_VSOCKETS_COMMON=y diff --git a/tools/testing/selftests/bpf/io_helpers.c b/tools/testing/selftests/bpf/io_helpers.c new file mode 100644 index 000000000000..4ada0a74aa1f --- /dev/null +++ b/tools/testing/selftests/bpf/io_helpers.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <sys/select.h> +#include <unistd.h> +#include <errno.h> + +int read_with_timeout(int fd, char *buf, size_t count, long usec) +{ + const long M = 1000 * 1000; + struct timeval tv = { usec / M, usec % M }; + fd_set fds; + int err; + + FD_ZERO(&fds); + FD_SET(fd, &fds); + err = select(fd + 1, &fds, NULL, NULL, &tv); + if (err < 0) + return err; + if (FD_ISSET(fd, &fds)) + return read(fd, buf, count); + return -EAGAIN; +} diff --git a/tools/testing/selftests/bpf/io_helpers.h b/tools/testing/selftests/bpf/io_helpers.h new file mode 100644 index 000000000000..21e1134cd3ce --- /dev/null +++ b/tools/testing/selftests/bpf/io_helpers.h @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <unistd.h> + +/* As a regular read(2), but allows to specify a timeout in micro-seconds. + * Returns -EAGAIN on timeout. + */ +int read_with_timeout(int fd, char *buf, size_t count, long usec); diff --git a/tools/testing/selftests/bpf/map_tests/task_storage_map.c b/tools/testing/selftests/bpf/map_tests/task_storage_map.c index 7d050364efca..62971dbf2996 100644 --- a/tools/testing/selftests/bpf/map_tests/task_storage_map.c +++ b/tools/testing/selftests/bpf/map_tests/task_storage_map.c @@ -12,6 +12,7 @@ #include <bpf/bpf.h> #include <bpf/libbpf.h> +#include "bpf_util.h" #include "test_maps.h" #include "task_local_storage_helpers.h" #include "read_bpf_task_storage_busy.skel.h" @@ -115,7 +116,7 @@ void test_task_storage_map_stress_lookup(void) CHECK(err, "attach", "error %d\n", err); /* Trigger program */ - syscall(SYS_gettid); + sys_gettid(); skel->bss->pid = 0; CHECK(skel->bss->busy != 0, "bad bpf_task_storage_busy", "got %d\n", skel->bss->busy); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c index 070c52c312e5..6befa870434b 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -690,7 +690,7 @@ void test_bpf_cookie(void) if (!ASSERT_OK_PTR(skel, "skel_open")) return; - skel->bss->my_tid = syscall(SYS_gettid); + skel->bss->my_tid = sys_gettid(); if (test__start_subtest("kprobe")) kprobe_subtest(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index f0a3a9c18e9e..6f1bfacd7375 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -226,7 +226,7 @@ static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts, ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL), "pthread_create"); - skel->bss->tid = gettid(); + skel->bss->tid = sys_gettid(); do_dummy_read_opts(skel->progs.dump_task, opts); @@ -255,20 +255,20 @@ static void *run_test_task_tid(void *arg) union bpf_iter_link_info linfo; int num_unknown_tid, num_known_tid; - ASSERT_NEQ(getpid(), gettid(), "check_new_thread_id"); + ASSERT_NEQ(getpid(), sys_gettid(), "check_new_thread_id"); memset(&linfo, 0, sizeof(linfo)); - linfo.task.tid = gettid(); + linfo.task.tid = sys_gettid(); opts.link_info = &linfo; opts.link_info_len = sizeof(linfo); test_task_common(&opts, 0, 1); linfo.task.tid = 0; linfo.task.pid = getpid(); - /* This includes the parent thread, this thread, + /* This includes the parent thread, this thread, watchdog timer thread * and the do_nothing_wait thread */ - test_task_common(&opts, 2, 1); + test_task_common(&opts, 3, 1); test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid); ASSERT_GT(num_unknown_tid, 2, "check_num_unknown_tid"); @@ -297,7 +297,7 @@ static void test_task_pid(void) opts.link_info = &linfo; opts.link_info_len = sizeof(linfo); - test_task_common(&opts, 1, 1); + test_task_common(&opts, 2, 1); } static void test_task_pidfd(void) @@ -315,7 +315,7 @@ static void test_task_pidfd(void) opts.link_info = &linfo; opts.link_info_len = sizeof(linfo); - test_task_common(&opts, 1, 1); + test_task_common(&opts, 2, 1); close(pidfd); } diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c index 409a06975823..b7d1b52309d0 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -16,10 +16,6 @@ #include "tcp_ca_kfunc.skel.h" #include "bpf_cc_cubic.skel.h" -#ifndef ENOTSUPP -#define ENOTSUPP 524 -#endif - static const unsigned int total_bytes = 10 * 1024 * 1024; static int expected_stg = 0xeB9F; diff --git a/tools/testing/selftests/bpf/prog_tests/cb_refs.c b/tools/testing/selftests/bpf/prog_tests/cb_refs.c index 3bff680de16c..c40df623a8f7 100644 --- a/tools/testing/selftests/bpf/prog_tests/cb_refs.c +++ b/tools/testing/selftests/bpf/prog_tests/cb_refs.c @@ -11,8 +11,8 @@ struct { const char *prog_name; const char *err_msg; } cb_refs_tests[] = { - { "underflow_prog", "reference has not been acquired before" }, - { "leak_prog", "Unreleased reference" }, + { "underflow_prog", "must point to scalar, or struct with scalar" }, + { "leak_prog", "Possibly NULL pointer passed to helper arg2" }, { "nested_cb", "Unreleased reference id=4 alloc_insn=2" }, /* alloc_insn=2{4,5} */ { "non_cb_transfer_ref", "Unreleased reference id=4 alloc_insn=1" }, /* alloc_insn=1{1,2} */ }; diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c index 747761572098..9015e2c2ab12 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c +++ b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c @@ -63,14 +63,14 @@ static void test_tp_btf(int cgroup_fd) if (!ASSERT_OK(err, "map_delete_elem")) goto out; - skel->bss->target_pid = syscall(SYS_gettid); + skel->bss->target_pid = sys_gettid(); err = cgrp_ls_tp_btf__attach(skel); if (!ASSERT_OK(err, "skel_attach")) goto out; - syscall(SYS_gettid); - syscall(SYS_gettid); + sys_gettid(); + sys_gettid(); skel->bss->target_pid = 0; @@ -154,7 +154,7 @@ static void test_recursion(int cgroup_fd) goto out; /* trigger sys_enter, make sure it does not cause deadlock */ - syscall(SYS_gettid); + sys_gettid(); out: cgrp_ls_recursion__destroy(skel); @@ -224,7 +224,7 @@ static void test_yes_rcu_lock(__u64 cgroup_id) return; CGROUP_MODE_SET(skel); - skel->bss->target_pid = syscall(SYS_gettid); + skel->bss->target_pid = sys_gettid(); bpf_program__set_autoload(skel->progs.yes_rcu_lock, true); err = cgrp_ls_sleepable__load(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 26019313e1fc..1c682550e0e7 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -1010,7 +1010,7 @@ static void run_core_reloc_tests(bool use_btfgen) struct data *data; void *mmap_data = NULL; - my_pid_tgid = getpid() | ((uint64_t)syscall(SYS_gettid) << 32); + my_pid_tgid = getpid() | ((uint64_t)sys_gettid() << 32); for (i = 0; i < ARRAY_SIZE(test_cases); i++) { char btf_file[] = "/tmp/core_reloc.btf.XXXXXX"; diff --git a/tools/testing/selftests/bpf/prog_tests/iters.c b/tools/testing/selftests/bpf/prog_tests/iters.c index 89ff23c4a8bc..3cea71f9c500 100644 --- a/tools/testing/selftests/bpf/prog_tests/iters.c +++ b/tools/testing/selftests/bpf/prog_tests/iters.c @@ -192,8 +192,8 @@ static void subtest_task_iters(void) syscall(SYS_getpgid); iters_task__detach(skel); ASSERT_EQ(skel->bss->procs_cnt, 1, "procs_cnt"); - ASSERT_EQ(skel->bss->threads_cnt, thread_num + 1, "threads_cnt"); - ASSERT_EQ(skel->bss->proc_threads_cnt, thread_num + 1, "proc_threads_cnt"); + ASSERT_EQ(skel->bss->threads_cnt, thread_num + 2, "threads_cnt"); + ASSERT_EQ(skel->bss->proc_threads_cnt, thread_num + 2, "proc_threads_cnt"); ASSERT_EQ(skel->bss->invalid_cnt, 0, "invalid_cnt"); pthread_mutex_unlock(&do_nothing_mutex); for (int i = 0; i < thread_num; i++) diff --git a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c new file mode 100644 index 000000000000..8e13a3416a21 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Google */ + +#include <test_progs.h> +#include <bpf/libbpf.h> +#include <bpf/btf.h> +#include "kmem_cache_iter.skel.h" + +#define SLAB_NAME_MAX 32 + +struct kmem_cache_result { + char name[SLAB_NAME_MAX]; + long obj_size; +}; + +static void subtest_kmem_cache_iter_check_task_struct(struct kmem_cache_iter *skel) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .flags = 0, /* Run it with the current task */ + ); + int prog_fd = bpf_program__fd(skel->progs.check_task_struct); + + /* Get task_struct and check it if's from a slab cache */ + ASSERT_OK(bpf_prog_test_run_opts(prog_fd, &opts), "prog_test_run"); + + /* The BPF program should set 'found' variable */ + ASSERT_EQ(skel->bss->task_struct_found, 1, "task_struct_found"); +} + +static void subtest_kmem_cache_iter_check_slabinfo(struct kmem_cache_iter *skel) +{ + FILE *fp; + int map_fd; + char name[SLAB_NAME_MAX]; + unsigned long objsize; + char rest_of_line[1000]; + struct kmem_cache_result r; + int seen = 0; + + fp = fopen("/proc/slabinfo", "r"); + if (fp == NULL) { + /* CONFIG_SLUB_DEBUG is not enabled */ + return; + } + + map_fd = bpf_map__fd(skel->maps.slab_result); + + /* Ignore first two lines for header */ + fscanf(fp, "slabinfo - version: %*d.%*d\n"); + fscanf(fp, "# %*s %*s %*s %*s %*s %*s : %[^\n]\n", rest_of_line); + + /* Compare name and objsize only - others can be changes frequently */ + while (fscanf(fp, "%s %*u %*u %lu %*u %*u : %[^\n]\n", + name, &objsize, rest_of_line) == 3) { + int ret = bpf_map_lookup_elem(map_fd, &seen, &r); + + if (!ASSERT_OK(ret, "kmem_cache_lookup")) + break; + + ASSERT_STREQ(r.name, name, "kmem_cache_name"); + ASSERT_EQ(r.obj_size, objsize, "kmem_cache_objsize"); + + seen++; + } + + ASSERT_EQ(skel->bss->kmem_cache_seen, seen, "kmem_cache_seen_eq"); + + fclose(fp); +} + +static void subtest_kmem_cache_iter_open_coded(struct kmem_cache_iter *skel) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts); + int err, fd; + + /* No need to attach it, just run it directly */ + fd = bpf_program__fd(skel->progs.open_coded_iter); + + err = bpf_prog_test_run_opts(fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; + + /* It should be same as we've seen from the explicit iterator */ + ASSERT_EQ(skel->bss->open_coded_seen, skel->bss->kmem_cache_seen, "open_code_seen_eq"); +} + +void test_kmem_cache_iter(void) +{ + struct kmem_cache_iter *skel = NULL; + char buf[256]; + int iter_fd; + + skel = kmem_cache_iter__open_and_load(); + if (!ASSERT_OK_PTR(skel, "kmem_cache_iter__open_and_load")) + return; + + if (!ASSERT_OK(kmem_cache_iter__attach(skel), "skel_attach")) + goto destroy; + + iter_fd = bpf_iter_create(bpf_link__fd(skel->links.slab_info_collector)); + if (!ASSERT_GE(iter_fd, 0, "iter_create")) + goto destroy; + + memset(buf, 0, sizeof(buf)); + while (read(iter_fd, buf, sizeof(buf) > 0)) { + /* Read out all contents */ + printf("%s", buf); + } + + /* Next reads should return 0 */ + ASSERT_EQ(read(iter_fd, buf, sizeof(buf)), 0, "read"); + + if (test__start_subtest("check_task_struct")) + subtest_kmem_cache_iter_check_task_struct(skel); + if (test__start_subtest("check_slabinfo")) + subtest_kmem_cache_iter_check_slabinfo(skel); + if (test__start_subtest("open_coded_iter")) + subtest_kmem_cache_iter_open_coded(skel); + + close(iter_fd); + +destroy: + kmem_cache_iter__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c index 960c9323d1e0..66ab1cae923e 100644 --- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c @@ -6,6 +6,7 @@ #include "kprobe_multi_override.skel.h" #include "kprobe_multi_session.skel.h" #include "kprobe_multi_session_cookie.skel.h" +#include "kprobe_multi_verifier.skel.h" #include "bpf/libbpf_internal.h" #include "bpf/hashmap.h" @@ -764,4 +765,5 @@ void test_kprobe_multi_test(void) test_session_skel_api(); if (test__start_subtest("session_cookie")) test_session_cookie_skel_api(); + RUN_TESTS(kprobe_multi_verifier); } diff --git a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c index cad664546912..fa639b021f7e 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c @@ -20,7 +20,7 @@ void test_linked_funcs(void) bpf_program__set_autoload(skel->progs.handler1, true); bpf_program__set_autoload(skel->progs.handler2, true); - skel->rodata->my_tid = syscall(SYS_gettid); + skel->rodata->my_tid = sys_gettid(); skel->bss->syscall_id = SYS_getpgid; err = linked_funcs__load(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c index 27676a04d0b6..169ce689b97c 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_buf.c +++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c @@ -169,7 +169,6 @@ static void bpf_prog_load_log_buf(void) ASSERT_GE(fd, 0, "good_fd1"); if (fd >= 0) close(fd); - fd = -1; /* log_level == 2 should always fill log_buf, even for good prog */ log_buf[0] = '\0'; @@ -180,7 +179,6 @@ static void bpf_prog_load_log_buf(void) ASSERT_GE(fd, 0, "good_fd2"); if (fd >= 0) close(fd); - fd = -1; /* log_level == 0 should fill log_buf for bad prog */ log_buf[0] = '\0'; @@ -191,7 +189,6 @@ static void bpf_prog_load_log_buf(void) ASSERT_LT(fd, 0, "bad_fd"); if (fd >= 0) close(fd); - fd = -1; free(log_buf); } diff --git a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c index 130a3b21e467..6df25de8f080 100644 --- a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c +++ b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c @@ -10,10 +10,6 @@ #include "cgroup_helpers.h" #include "network_helpers.h" -#ifndef ENOTSUPP -#define ENOTSUPP 524 -#endif - static struct btf *btf; static __u32 query_prog_cnt(int cgroup_fd, const char *attach_func) diff --git a/tools/testing/selftests/bpf/prog_tests/map_in_map.c b/tools/testing/selftests/bpf/prog_tests/map_in_map.c index d2a10eb4e5b5..286a9fb469e2 100644 --- a/tools/testing/selftests/bpf/prog_tests/map_in_map.c +++ b/tools/testing/selftests/bpf/prog_tests/map_in_map.c @@ -5,7 +5,9 @@ #include <sys/syscall.h> #include <test_progs.h> #include <bpf/btf.h> + #include "access_map_in_map.skel.h" +#include "update_map_in_htab.skel.h" struct thread_ctx { pthread_barrier_t barrier; @@ -127,6 +129,131 @@ out: access_map_in_map__destroy(skel); } +static void add_del_fd_htab(int outer_fd) +{ + int inner_fd, err; + int key = 1; + + inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr1", 4, 4, 1, NULL); + if (!ASSERT_OK_FD(inner_fd, "inner1")) + return; + err = bpf_map_update_elem(outer_fd, &key, &inner_fd, BPF_NOEXIST); + close(inner_fd); + if (!ASSERT_OK(err, "add")) + return; + + /* Delete */ + err = bpf_map_delete_elem(outer_fd, &key); + ASSERT_OK(err, "del"); +} + +static void overwrite_fd_htab(int outer_fd) +{ + int inner_fd, err; + int key = 1; + + inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr1", 4, 4, 1, NULL); + if (!ASSERT_OK_FD(inner_fd, "inner1")) + return; + err = bpf_map_update_elem(outer_fd, &key, &inner_fd, BPF_NOEXIST); + close(inner_fd); + if (!ASSERT_OK(err, "add")) + return; + + /* Overwrite */ + inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr2", 4, 4, 1, NULL); + if (!ASSERT_OK_FD(inner_fd, "inner2")) + goto out; + err = bpf_map_update_elem(outer_fd, &key, &inner_fd, BPF_EXIST); + close(inner_fd); + if (!ASSERT_OK(err, "overwrite")) + goto out; + + err = bpf_map_delete_elem(outer_fd, &key); + ASSERT_OK(err, "del"); + return; +out: + bpf_map_delete_elem(outer_fd, &key); +} + +static void lookup_delete_fd_htab(int outer_fd) +{ + int key = 1, value; + int inner_fd, err; + + inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr1", 4, 4, 1, NULL); + if (!ASSERT_OK_FD(inner_fd, "inner1")) + return; + err = bpf_map_update_elem(outer_fd, &key, &inner_fd, BPF_NOEXIST); + close(inner_fd); + if (!ASSERT_OK(err, "add")) + return; + + /* lookup_and_delete is not supported for htab of maps */ + err = bpf_map_lookup_and_delete_elem(outer_fd, &key, &value); + ASSERT_EQ(err, -ENOTSUPP, "lookup_del"); + + err = bpf_map_delete_elem(outer_fd, &key); + ASSERT_OK(err, "del"); +} + +static void batched_lookup_delete_fd_htab(int outer_fd) +{ + int keys[2] = {1, 2}, values[2]; + unsigned int cnt, batch; + int inner_fd, err; + + inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr1", 4, 4, 1, NULL); + if (!ASSERT_OK_FD(inner_fd, "inner1")) + return; + + err = bpf_map_update_elem(outer_fd, &keys[0], &inner_fd, BPF_NOEXIST); + close(inner_fd); + if (!ASSERT_OK(err, "add1")) + return; + + inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "arr2", 4, 4, 1, NULL); + if (!ASSERT_OK_FD(inner_fd, "inner2")) + goto out; + err = bpf_map_update_elem(outer_fd, &keys[1], &inner_fd, BPF_NOEXIST); + close(inner_fd); + if (!ASSERT_OK(err, "add2")) + goto out; + + /* batched lookup_and_delete */ + cnt = ARRAY_SIZE(keys); + err = bpf_map_lookup_and_delete_batch(outer_fd, NULL, &batch, keys, values, &cnt, NULL); + ASSERT_TRUE((!err || err == -ENOENT), "delete_batch ret"); + ASSERT_EQ(cnt, ARRAY_SIZE(keys), "delete_batch cnt"); + +out: + bpf_map_delete_elem(outer_fd, &keys[0]); +} + +static void test_update_map_in_htab(bool preallocate) +{ + struct update_map_in_htab *skel; + int err, fd; + + skel = update_map_in_htab__open(); + if (!ASSERT_OK_PTR(skel, "open")) + return; + + err = update_map_in_htab__load(skel); + if (!ASSERT_OK(err, "load")) + goto out; + + fd = preallocate ? bpf_map__fd(skel->maps.outer_htab_map) : + bpf_map__fd(skel->maps.outer_alloc_htab_map); + + add_del_fd_htab(fd); + overwrite_fd_htab(fd); + lookup_delete_fd_htab(fd); + batched_lookup_delete_fd_htab(fd); +out: + update_map_in_htab__destroy(skel); +} + void test_map_in_map(void) { if (test__start_subtest("acc_map_in_array")) @@ -137,5 +264,8 @@ void test_map_in_map(void) test_map_in_map_access("access_map_in_htab", "outer_htab_map"); if (test__start_subtest("sleepable_acc_map_in_htab")) test_map_in_map_access("sleepable_access_map_in_htab", "outer_htab_map"); + if (test__start_subtest("update_map_in_htab")) + test_update_map_in_htab(true); + if (test__start_subtest("update_map_in_alloc_htab")) + test_update_map_in_htab(false); } - diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c index c29787e092d6..761ce24bce38 100644 --- a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c +++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c @@ -23,7 +23,7 @@ static int get_pid_tgid(pid_t *pid, pid_t *tgid, struct stat st; int err; - *pid = syscall(SYS_gettid); + *pid = sys_gettid(); *tgid = getpid(); err = stat("/proc/self/ns/pid", &st); diff --git a/tools/testing/selftests/bpf/prog_tests/perf_link.c b/tools/testing/selftests/bpf/prog_tests/perf_link.c index 3a25f1c743a1..d940ff87fa08 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_link.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_link.c @@ -4,8 +4,12 @@ #include <pthread.h> #include <sched.h> #include <test_progs.h> +#include "testing_helpers.h" #include "test_perf_link.skel.h" +#define BURN_TIMEOUT_MS 100 +#define BURN_TIMEOUT_NS BURN_TIMEOUT_MS * 1000000 + static void burn_cpu(void) { volatile int j = 0; @@ -32,6 +36,7 @@ void serial_test_perf_link(void) int run_cnt_before, run_cnt_after; struct bpf_link_info info; __u32 info_len = sizeof(info); + __u64 timeout_time_ns; /* create perf event */ memset(&attr, 0, sizeof(attr)); @@ -63,8 +68,14 @@ void serial_test_perf_link(void) ASSERT_GT(info.prog_id, 0, "link_prog_id"); /* ensure we get at least one perf_event prog execution */ - burn_cpu(); - ASSERT_GT(skel->bss->run_cnt, 0, "run_cnt"); + timeout_time_ns = get_time_ns() + BURN_TIMEOUT_NS; + while (true) { + burn_cpu(); + if (skel->bss->run_cnt > 0) + break; + if (!ASSERT_LT(get_time_ns(), timeout_time_ns, "run_cnt_timeout")) + break; + } /* perf_event is still active, but we close link and BPF program * shouldn't be executed anymore diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c new file mode 100644 index 000000000000..6fa19449297e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_null.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <test_progs.h> +#include "raw_tp_null.skel.h" + +void test_raw_tp_null(void) +{ + struct raw_tp_null *skel; + + skel = raw_tp_null__open_and_load(); + if (!ASSERT_OK_PTR(skel, "raw_tp_null__open_and_load")) + return; + + skel->bss->tid = sys_gettid(); + + if (!ASSERT_OK(raw_tp_null__attach(skel), "raw_tp_null__attach")) + goto end; + + ASSERT_OK(trigger_module_test_read(2), "trigger testmod read"); + ASSERT_EQ(skel->bss->i, 3, "invocations"); + +end: + raw_tp_null__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c index a1f7e7378a64..ebe0c12b5536 100644 --- a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c @@ -21,7 +21,7 @@ static void test_success(void) if (!ASSERT_OK_PTR(skel, "skel_open")) return; - skel->bss->target_pid = syscall(SYS_gettid); + skel->bss->target_pid = sys_gettid(); bpf_program__set_autoload(skel->progs.get_cgroup_id, true); bpf_program__set_autoload(skel->progs.task_succ, true); @@ -58,7 +58,7 @@ static void test_rcuptr_acquire(void) if (!ASSERT_OK_PTR(skel, "skel_open")) return; - skel->bss->target_pid = syscall(SYS_gettid); + skel->bss->target_pid = sys_gettid(); bpf_program__set_autoload(skel->progs.task_acquire, true); err = rcu_read_lock__load(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c index 6cc69900b310..1702aa592c2c 100644 --- a/tools/testing/selftests/bpf/prog_tests/send_signal.c +++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c @@ -3,22 +3,32 @@ #include <sys/time.h> #include <sys/resource.h> #include "test_send_signal_kern.skel.h" +#include "io_helpers.h" static int sigusr1_received; static void sigusr1_handler(int signum) { - sigusr1_received = 1; + sigusr1_received = 8; +} + +static void sigusr1_siginfo_handler(int s, siginfo_t *i, void *v) +{ + sigusr1_received = (int)(long long)i->si_value.sival_ptr; } static void test_send_signal_common(struct perf_event_attr *attr, - bool signal_thread) + bool signal_thread, bool remote) { struct test_send_signal_kern *skel; + struct sigaction sa; int pipe_c2p[2], pipe_p2c[2]; int err = -1, pmu_fd = -1; + volatile int j = 0; + int retry_count; char buf[256]; pid_t pid; + int old_prio; if (!ASSERT_OK(pipe(pipe_c2p), "pipe_c2p")) return; @@ -39,11 +49,14 @@ static void test_send_signal_common(struct perf_event_attr *attr, } if (pid == 0) { - int old_prio; - volatile int j = 0; - /* install signal handler and notify parent */ - ASSERT_NEQ(signal(SIGUSR1, sigusr1_handler), SIG_ERR, "signal"); + if (remote) { + sa.sa_sigaction = sigusr1_siginfo_handler; + sa.sa_flags = SA_RESTART | SA_SIGINFO; + ASSERT_NEQ(sigaction(SIGUSR1, &sa, NULL), -1, "sigaction"); + } else { + ASSERT_NEQ(signal(SIGUSR1, sigusr1_handler), SIG_ERR, "signal"); + } close(pipe_c2p[0]); /* close read */ close(pipe_p2c[1]); /* close write */ @@ -52,10 +65,12 @@ static void test_send_signal_common(struct perf_event_attr *attr, * that if an interrupt happens, the underlying task * is this process. */ - errno = 0; - old_prio = getpriority(PRIO_PROCESS, 0); - ASSERT_OK(errno, "getpriority"); - ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority"); + if (!remote) { + errno = 0; + old_prio = getpriority(PRIO_PROCESS, 0); + ASSERT_OK(errno, "getpriority"); + ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority"); + } /* notify parent signal handler is installed */ ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write"); @@ -66,20 +81,25 @@ static void test_send_signal_common(struct perf_event_attr *attr, /* wait a little for signal handler */ for (int i = 0; i < 1000000000 && !sigusr1_received; i++) { j /= i + j + 1; - if (!attr) - /* trigger the nanosleep tracepoint program. */ - usleep(1); + if (remote) + sleep(1); + else + if (!attr) + /* trigger the nanosleep tracepoint program. */ + usleep(1); } - buf[0] = sigusr1_received ? '2' : '0'; - ASSERT_EQ(sigusr1_received, 1, "sigusr1_received"); + buf[0] = sigusr1_received; + + ASSERT_EQ(sigusr1_received, 8, "sigusr1_received"); ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write"); /* wait for parent notification and exit */ ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read"); /* restore the old priority */ - ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority"); + if (!remote) + ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority"); close(pipe_c2p[1]); close(pipe_p2c[0]); @@ -93,6 +113,17 @@ static void test_send_signal_common(struct perf_event_attr *attr, if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) goto skel_open_load_failure; + /* boost with a high priority so we got a higher chance + * that if an interrupt happens, the underlying task + * is this process. + */ + if (remote) { + errno = 0; + old_prio = getpriority(PRIO_PROCESS, 0); + ASSERT_OK(errno, "getpriority"); + ASSERT_OK(setpriority(PRIO_PROCESS, 0, -20), "setpriority"); + } + if (!attr) { err = test_send_signal_kern__attach(skel); if (!ASSERT_OK(err, "skel_attach")) { @@ -100,8 +131,12 @@ static void test_send_signal_common(struct perf_event_attr *attr, goto destroy_skel; } } else { - pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1 /* cpu */, - -1 /* group id */, 0 /* flags */); + if (!remote) + pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1 /* cpu */, + -1 /* group id */, 0 /* flags */); + else + pmu_fd = syscall(__NR_perf_event_open, attr, getpid(), -1 /* cpu */, + -1 /* group id */, 0 /* flags */); if (!ASSERT_GE(pmu_fd, 0, "perf_event_open")) { err = -1; goto destroy_skel; @@ -119,13 +154,36 @@ static void test_send_signal_common(struct perf_event_attr *attr, /* trigger the bpf send_signal */ skel->bss->signal_thread = signal_thread; skel->bss->sig = SIGUSR1; - skel->bss->pid = pid; + if (!remote) { + skel->bss->target_pid = 0; + skel->bss->pid = pid; + } else { + skel->bss->target_pid = pid; + skel->bss->pid = getpid(); + } /* notify child that bpf program can send_signal now */ ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write"); - /* wait for result */ - err = read(pipe_c2p[0], buf, 1); + for (retry_count = 0;;) { + /* For the remote test, the BPF program is triggered from this + * process but the other process/thread is signaled. + */ + if (remote) { + if (!attr) { + for (int i = 0; i < 10; i++) + usleep(1); + } else { + for (int i = 0; i < 100000000; i++) + j /= i + 1; + } + } + /* wait for result */ + err = read_with_timeout(pipe_c2p[0], buf, 1, 100); + if (err == -EAGAIN && retry_count++ < 10000) + continue; + break; + } if (!ASSERT_GE(err, 0, "reading pipe")) goto disable_pmu; if (!ASSERT_GT(err, 0, "reading pipe error: size 0")) { @@ -133,7 +191,7 @@ static void test_send_signal_common(struct perf_event_attr *attr, goto disable_pmu; } - ASSERT_EQ(buf[0], '2', "incorrect result"); + ASSERT_EQ(buf[0], 8, "incorrect result"); /* notify child safe to exit */ ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write"); @@ -142,18 +200,21 @@ disable_pmu: close(pmu_fd); destroy_skel: test_send_signal_kern__destroy(skel); + /* restore the old priority */ + if (remote) + ASSERT_OK(setpriority(PRIO_PROCESS, 0, old_prio), "setpriority"); skel_open_load_failure: close(pipe_c2p[0]); close(pipe_p2c[1]); wait(NULL); } -static void test_send_signal_tracepoint(bool signal_thread) +static void test_send_signal_tracepoint(bool signal_thread, bool remote) { - test_send_signal_common(NULL, signal_thread); + test_send_signal_common(NULL, signal_thread, remote); } -static void test_send_signal_perf(bool signal_thread) +static void test_send_signal_perf(bool signal_thread, bool remote) { struct perf_event_attr attr = { .freq = 1, @@ -162,13 +223,14 @@ static void test_send_signal_perf(bool signal_thread) .config = PERF_COUNT_SW_CPU_CLOCK, }; - test_send_signal_common(&attr, signal_thread); + test_send_signal_common(&attr, signal_thread, remote); } -static void test_send_signal_nmi(bool signal_thread) +static void test_send_signal_nmi(bool signal_thread, bool remote) { struct perf_event_attr attr = { - .sample_period = 1, + .freq = 1, + .sample_freq = 1000, .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, }; @@ -191,21 +253,35 @@ static void test_send_signal_nmi(bool signal_thread) close(pmu_fd); } - test_send_signal_common(&attr, signal_thread); + test_send_signal_common(&attr, signal_thread, remote); } void test_send_signal(void) { if (test__start_subtest("send_signal_tracepoint")) - test_send_signal_tracepoint(false); + test_send_signal_tracepoint(false, false); if (test__start_subtest("send_signal_perf")) - test_send_signal_perf(false); + test_send_signal_perf(false, false); if (test__start_subtest("send_signal_nmi")) - test_send_signal_nmi(false); + test_send_signal_nmi(false, false); if (test__start_subtest("send_signal_tracepoint_thread")) - test_send_signal_tracepoint(true); + test_send_signal_tracepoint(true, false); if (test__start_subtest("send_signal_perf_thread")) - test_send_signal_perf(true); + test_send_signal_perf(true, false); if (test__start_subtest("send_signal_nmi_thread")) - test_send_signal_nmi(true); + test_send_signal_nmi(true, false); + + /* Signal remote thread and thread group */ + if (test__start_subtest("send_signal_tracepoint_remote")) + test_send_signal_tracepoint(false, true); + if (test__start_subtest("send_signal_perf_remote")) + test_send_signal_perf(false, true); + if (test__start_subtest("send_signal_nmi_remote")) + test_send_signal_nmi(false, true); + if (test__start_subtest("send_signal_tracepoint_thread_remote")) + test_send_signal_tracepoint(true, true); + if (test__start_subtest("send_signal_perf_thread_remote")) + test_send_signal_perf(true, true); + if (test__start_subtest("send_signal_nmi_thread_remote")) + test_send_signal_nmi(true, true); } diff --git a/tools/testing/selftests/bpf/prog_tests/sock_addr.c b/tools/testing/selftests/bpf/prog_tests/sock_addr.c index a6ee7f8d4f79..b2efabbed220 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_addr.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_addr.c @@ -23,10 +23,6 @@ #include "getpeername_unix_prog.skel.h" #include "network_helpers.h" -#ifndef ENOTSUPP -# define ENOTSUPP 524 -#endif - #define TEST_NS "sock_addr" #define TEST_IF_PREFIX "test_sock_addr" #define TEST_IPV4 "127.0.0.4" diff --git a/tools/testing/selftests/bpf/prog_tests/sock_create.c b/tools/testing/selftests/bpf/prog_tests/sock_create.c new file mode 100644 index 000000000000..187ffc5e60c4 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sock_create.c @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <test_progs.h> +#include "cgroup_helpers.h" + +static char bpf_log_buf[4096]; +static bool verbose; + +enum sock_create_test_error { + OK = 0, + DENY_CREATE, +}; + +static struct sock_create_test { + const char *descr; + const struct bpf_insn insns[64]; + enum bpf_attach_type attach_type; + enum bpf_attach_type expected_attach_type; + + int domain; + int type; + int protocol; + + int optname; + int optval; + enum sock_create_test_error error; +} tests[] = { + { + .descr = "AF_INET set priority", + .insns = { + /* r3 = 123 (priority) */ + BPF_MOV64_IMM(BPF_REG_3, 123), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct bpf_sock, priority)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, + .attach_type = BPF_CGROUP_INET_SOCK_CREATE, + + .domain = AF_INET, + .type = SOCK_DGRAM, + + .optname = SO_PRIORITY, + .optval = 123, + }, + { + .descr = "AF_INET6 set priority", + .insns = { + /* r3 = 123 (priority) */ + BPF_MOV64_IMM(BPF_REG_3, 123), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct bpf_sock, priority)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, + .attach_type = BPF_CGROUP_INET_SOCK_CREATE, + + .domain = AF_INET6, + .type = SOCK_DGRAM, + + .optname = SO_PRIORITY, + .optval = 123, + }, + { + .descr = "AF_INET set mark", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + + /* get uid of process */ + BPF_EMIT_CALL(BPF_FUNC_get_current_uid_gid), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffffffff), + + /* if uid is 0, use given mark(666), else use uid as the mark */ + BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_MOV64_IMM(BPF_REG_3, 666), + + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct bpf_sock, mark)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, + .attach_type = BPF_CGROUP_INET_SOCK_CREATE, + + .domain = AF_INET, + .type = SOCK_DGRAM, + + .optname = SO_MARK, + .optval = 666, + }, + { + .descr = "AF_INET6 set mark", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + + /* get uid of process */ + BPF_EMIT_CALL(BPF_FUNC_get_current_uid_gid), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffffffff), + + /* if uid is 0, use given mark(666), else use uid as the mark */ + BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_MOV64_IMM(BPF_REG_3, 666), + + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct bpf_sock, mark)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, + .attach_type = BPF_CGROUP_INET_SOCK_CREATE, + + .domain = AF_INET6, + .type = SOCK_DGRAM, + + .optname = SO_MARK, + .optval = 666, + }, + { + .descr = "AF_INET bound to iface", + .insns = { + /* r3 = 1 (lo interface) */ + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct bpf_sock, bound_dev_if)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, + .attach_type = BPF_CGROUP_INET_SOCK_CREATE, + + .domain = AF_INET, + .type = SOCK_DGRAM, + + .optname = SO_BINDTOIFINDEX, + .optval = 1, + }, + { + .descr = "AF_INET6 bound to iface", + .insns = { + /* r3 = 1 (lo interface) */ + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct bpf_sock, bound_dev_if)), + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, + .attach_type = BPF_CGROUP_INET_SOCK_CREATE, + + .domain = AF_INET6, + .type = SOCK_DGRAM, + + .optname = SO_BINDTOIFINDEX, + .optval = 1, + }, + { + .descr = "block AF_INET, SOCK_DGRAM, IPPROTO_ICMP socket", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = verdict */ + + /* sock->family == AF_INET */ + BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, + offsetof(struct bpf_sock, family)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, AF_INET, 5), + + /* sock->type == SOCK_DGRAM */ + BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, + offsetof(struct bpf_sock, type)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, SOCK_DGRAM, 3), + + /* sock->protocol == IPPROTO_ICMP */ + BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, + offsetof(struct bpf_sock, protocol)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, IPPROTO_ICMP, 1), + + /* return 0 (block) */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, + .attach_type = BPF_CGROUP_INET_SOCK_CREATE, + + .domain = AF_INET, + .type = SOCK_DGRAM, + .protocol = IPPROTO_ICMP, + + .error = DENY_CREATE, + }, + { + .descr = "block AF_INET6, SOCK_DGRAM, IPPROTO_ICMPV6 socket", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = verdict */ + + /* sock->family == AF_INET6 */ + BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, + offsetof(struct bpf_sock, family)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, AF_INET6, 5), + + /* sock->type == SOCK_DGRAM */ + BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, + offsetof(struct bpf_sock, type)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, SOCK_DGRAM, 3), + + /* sock->protocol == IPPROTO_ICMPV6 */ + BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, + offsetof(struct bpf_sock, protocol)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, IPPROTO_ICMPV6, 1), + + /* return 0 (block) */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, + .attach_type = BPF_CGROUP_INET_SOCK_CREATE, + + .domain = AF_INET, + .type = SOCK_DGRAM, + .protocol = IPPROTO_ICMPV6, + + .error = DENY_CREATE, + }, + { + .descr = "load w/o expected_attach_type (compat mode)", + .insns = { + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .expected_attach_type = 0, + .attach_type = BPF_CGROUP_INET_SOCK_CREATE, + + .domain = AF_INET, + .type = SOCK_STREAM, + }, +}; + +static int load_prog(const struct bpf_insn *insns, + enum bpf_attach_type expected_attach_type) +{ + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .expected_attach_type = expected_attach_type, + .log_level = 2, + .log_buf = bpf_log_buf, + .log_size = sizeof(bpf_log_buf), + ); + int fd, insns_cnt = 0; + + for (; + insns[insns_cnt].code != (BPF_JMP | BPF_EXIT); + insns_cnt++) { + } + insns_cnt++; + + fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, + insns_cnt, &opts); + if (verbose && fd < 0) + fprintf(stderr, "%s\n", bpf_log_buf); + + return fd; +} + +static int run_test(int cgroup_fd, struct sock_create_test *test) +{ + int sock_fd, err, prog_fd, optval, ret = -1; + socklen_t optlen = sizeof(optval); + + prog_fd = load_prog(test->insns, test->expected_attach_type); + if (prog_fd < 0) { + log_err("Failed to load BPF program"); + return -1; + } + + err = bpf_prog_attach(prog_fd, cgroup_fd, test->attach_type, 0); + if (err < 0) { + log_err("Failed to attach BPF program"); + goto close_prog_fd; + } + + sock_fd = socket(test->domain, test->type, test->protocol); + if (sock_fd < 0) { + if (test->error == DENY_CREATE) + ret = 0; + else + log_err("Failed to create socket"); + + goto detach_prog; + } + + if (test->optname) { + err = getsockopt(sock_fd, SOL_SOCKET, test->optname, &optval, &optlen); + if (err) { + log_err("Failed to call getsockopt"); + goto cleanup; + } + + if (optval != test->optval) { + errno = 0; + log_err("getsockopt returned unexpected optval"); + goto cleanup; + } + } + + ret = test->error != OK; + +cleanup: + close(sock_fd); +detach_prog: + bpf_prog_detach2(prog_fd, cgroup_fd, test->attach_type); +close_prog_fd: + close(prog_fd); + return ret; +} + +void test_sock_create(void) +{ + int cgroup_fd, i; + + cgroup_fd = test__join_cgroup("/sock_create"); + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup")) + return; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + if (!test__start_subtest(tests[i].descr)) + continue; + + ASSERT_OK(run_test(cgroup_fd, &tests[i]), tests[i].descr); + } + + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/prog_tests/sock_post_bind.c index 810c3740b2cc..788135c9c673 100644 --- a/tools/testing/selftests/bpf/test_sock.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_post_bind.c @@ -1,132 +1,35 @@ // SPDX-License-Identifier: GPL-2.0 -// Copyright (c) 2018 Facebook - -#include <stdio.h> -#include <unistd.h> - -#include <arpa/inet.h> -#include <sys/types.h> -#include <sys/socket.h> - -#include <linux/filter.h> - -#include <bpf/bpf.h> - +#include <linux/bpf.h> +#include <test_progs.h> #include "cgroup_helpers.h" -#include <bpf/bpf_endian.h> -#include "bpf_util.h" -#define CG_PATH "/foo" -#define MAX_INSNS 512 +#define TEST_NS "sock_post_bind" -char bpf_log_buf[BPF_LOG_BUF_SIZE]; -static bool verbose = false; +static char bpf_log_buf[4096]; -struct sock_test { - const char *descr; +static struct sock_post_bind_test { + const char *descr; /* BPF prog properties */ - struct bpf_insn insns[MAX_INSNS]; - enum bpf_attach_type expected_attach_type; - enum bpf_attach_type attach_type; + const struct bpf_insn insns[64]; + enum bpf_attach_type attach_type; + enum bpf_attach_type expected_attach_type; /* Socket properties */ - int domain; - int type; + int domain; + int type; /* Endpoint to bind() to */ const char *ip; unsigned short port; unsigned short port_retry; + /* Expected test result */ enum { - LOAD_REJECT, ATTACH_REJECT, BIND_REJECT, SUCCESS, RETRY_SUCCESS, RETRY_REJECT } result; -}; - -static struct sock_test tests[] = { - { - .descr = "bind4 load with invalid access: src_ip6", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, - offsetof(struct bpf_sock, src_ip6[0])), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .expected_attach_type = BPF_CGROUP_INET4_POST_BIND, - .attach_type = BPF_CGROUP_INET4_POST_BIND, - .result = LOAD_REJECT, - }, - { - .descr = "bind4 load with invalid access: mark", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, - offsetof(struct bpf_sock, mark)), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .expected_attach_type = BPF_CGROUP_INET4_POST_BIND, - .attach_type = BPF_CGROUP_INET4_POST_BIND, - .result = LOAD_REJECT, - }, - { - .descr = "bind6 load with invalid access: src_ip4", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, - offsetof(struct bpf_sock, src_ip4)), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .expected_attach_type = BPF_CGROUP_INET6_POST_BIND, - .attach_type = BPF_CGROUP_INET6_POST_BIND, - .result = LOAD_REJECT, - }, - { - .descr = "sock_create load with invalid access: src_port", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, - offsetof(struct bpf_sock, src_port)), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, - .attach_type = BPF_CGROUP_INET_SOCK_CREATE, - .result = LOAD_REJECT, - }, - { - .descr = "sock_create load w/o expected_attach_type (compat mode)", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .expected_attach_type = 0, - .attach_type = BPF_CGROUP_INET_SOCK_CREATE, - .domain = AF_INET, - .type = SOCK_STREAM, - .ip = "127.0.0.1", - .port = 8097, - .result = SUCCESS, - }, - { - .descr = "sock_create load w/ expected_attach_type", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE, - .attach_type = BPF_CGROUP_INET_SOCK_CREATE, - .domain = AF_INET, - .type = SOCK_STREAM, - .ip = "127.0.0.1", - .port = 8097, - .result = SUCCESS, - }, +} tests[] = { { .descr = "attach type mismatch bind4 vs bind6", .insns = { @@ -374,40 +277,29 @@ static struct sock_test tests[] = { }, }; -static size_t probe_prog_length(const struct bpf_insn *fp) -{ - size_t len; - - for (len = MAX_INSNS - 1; len > 0; --len) - if (fp[len].code != 0 || fp[len].imm != 0) - break; - return len + 1; -} - -static int load_sock_prog(const struct bpf_insn *prog, - enum bpf_attach_type attach_type) +static int load_prog(const struct bpf_insn *insns, + enum bpf_attach_type expected_attach_type) { - LIBBPF_OPTS(bpf_prog_load_opts, opts); - int ret, insn_cnt; - - insn_cnt = probe_prog_length(prog); - - opts.expected_attach_type = attach_type; - opts.log_buf = bpf_log_buf; - opts.log_size = BPF_LOG_BUF_SIZE; - opts.log_level = 2; + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .expected_attach_type = expected_attach_type, + .log_level = 2, + .log_buf = bpf_log_buf, + .log_size = sizeof(bpf_log_buf), + ); + int fd, insns_cnt = 0; + + for (; + insns[insns_cnt].code != (BPF_JMP | BPF_EXIT); + insns_cnt++) { + } + insns_cnt++; - ret = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", prog, insn_cnt, &opts); - if (verbose && ret < 0) + fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, + insns_cnt, &opts); + if (fd < 0) fprintf(stderr, "%s\n", bpf_log_buf); - return ret; -} - -static int attach_sock_prog(int cgfd, int progfd, - enum bpf_attach_type attach_type) -{ - return bpf_prog_attach(progfd, cgfd, attach_type, BPF_F_ALLOW_OVERRIDE); + return fd; } static int bind_sock(int domain, int type, const char *ip, @@ -477,22 +369,16 @@ out: return res; } -static int run_test_case(int cgfd, const struct sock_test *test) +static int run_test(int cgroup_fd, struct sock_post_bind_test *test) { - int progfd = -1; - int err = 0; - int res; - - printf("Test case: %s .. ", test->descr); - progfd = load_sock_prog(test->insns, test->expected_attach_type); - if (progfd < 0) { - if (test->result == LOAD_REJECT) - goto out; - else - goto err; - } + int err, prog_fd, res, ret = 0; - if (attach_sock_prog(cgfd, progfd, test->attach_type) < 0) { + prog_fd = load_prog(test->insns, test->expected_attach_type); + if (prog_fd < 0) + goto err; + + err = bpf_prog_attach(prog_fd, cgroup_fd, test->attach_type, 0); + if (err < 0) { if (test->result == ATTACH_REJECT) goto out; else @@ -503,54 +389,38 @@ static int run_test_case(int cgfd, const struct sock_test *test) test->port_retry); if (res > 0 && test->result == res) goto out; - err: - err = -1; + ret = -1; out: /* Detaching w/o checking return code: best effort attempt. */ - if (progfd != -1) - bpf_prog_detach(cgfd, test->attach_type); - close(progfd); - printf("[%s]\n", err ? "FAIL" : "PASS"); - return err; + if (prog_fd != -1) + bpf_prog_detach(cgroup_fd, test->attach_type); + close(prog_fd); + return ret; } -static int run_tests(int cgfd) +void test_sock_post_bind(void) { - int passes = 0; - int fails = 0; + struct netns_obj *ns; + int cgroup_fd; int i; - for (i = 0; i < ARRAY_SIZE(tests); ++i) { - if (run_test_case(cgfd, &tests[i])) - ++fails; - else - ++passes; - } - printf("Summary: %d PASSED, %d FAILED\n", passes, fails); - return fails ? -1 : 0; -} - -int main(int argc, char **argv) -{ - int cgfd = -1; - int err = 0; + cgroup_fd = test__join_cgroup("/post_bind"); + if (!ASSERT_OK_FD(cgroup_fd, "join_cgroup")) + return; - cgfd = cgroup_setup_and_join(CG_PATH); - if (cgfd < 0) - goto err; + ns = netns_new(TEST_NS, true); + if (!ASSERT_OK_PTR(ns, "netns_new")) + goto cleanup; - /* Use libbpf 1.0 API mode */ - libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + for (i = 0; i < ARRAY_SIZE(tests); i++) { + if (!test__start_subtest(tests[i].descr)) + continue; - if (run_tests(cgfd)) - goto err; + ASSERT_OK(run_test(cgroup_fd, &tests[i]), tests[i].descr); + } - goto out; -err: - err = -1; -out: - close(cgfd); - cleanup_cgroup_environment(); - return err; +cleanup: + netns_free(ns); + close(cgroup_fd); } diff --git a/tools/testing/selftests/bpf/prog_tests/struct_ops_private_stack.c b/tools/testing/selftests/bpf/prog_tests/struct_ops_private_stack.c new file mode 100644 index 000000000000..4006879ca3fe --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/struct_ops_private_stack.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include "struct_ops_private_stack.skel.h" +#include "struct_ops_private_stack_fail.skel.h" +#include "struct_ops_private_stack_recur.skel.h" + +static void test_private_stack(void) +{ + struct struct_ops_private_stack *skel; + struct bpf_link *link; + int err; + + skel = struct_ops_private_stack__open(); + if (!ASSERT_OK_PTR(skel, "struct_ops_private_stack__open")) + return; + + if (skel->data->skip) { + test__skip(); + goto cleanup; + } + + err = struct_ops_private_stack__load(skel); + if (!ASSERT_OK(err, "struct_ops_private_stack__load")) + goto cleanup; + + link = bpf_map__attach_struct_ops(skel->maps.testmod_1); + if (!ASSERT_OK_PTR(link, "attach_struct_ops")) + goto cleanup; + + ASSERT_OK(trigger_module_test_read(256), "trigger_read"); + + ASSERT_EQ(skel->bss->val_i, 3, "val_i"); + ASSERT_EQ(skel->bss->val_j, 8, "val_j"); + + bpf_link__destroy(link); + +cleanup: + struct_ops_private_stack__destroy(skel); +} + +static void test_private_stack_fail(void) +{ + struct struct_ops_private_stack_fail *skel; + int err; + + skel = struct_ops_private_stack_fail__open(); + if (!ASSERT_OK_PTR(skel, "struct_ops_private_stack_fail__open")) + return; + + if (skel->data->skip) { + test__skip(); + goto cleanup; + } + + err = struct_ops_private_stack_fail__load(skel); + if (!ASSERT_ERR(err, "struct_ops_private_stack_fail__load")) + goto cleanup; + return; + +cleanup: + struct_ops_private_stack_fail__destroy(skel); +} + +static void test_private_stack_recur(void) +{ + struct struct_ops_private_stack_recur *skel; + struct bpf_link *link; + int err; + + skel = struct_ops_private_stack_recur__open(); + if (!ASSERT_OK_PTR(skel, "struct_ops_private_stack_recur__open")) + return; + + if (skel->data->skip) { + test__skip(); + goto cleanup; + } + + err = struct_ops_private_stack_recur__load(skel); + if (!ASSERT_OK(err, "struct_ops_private_stack_recur__load")) + goto cleanup; + + link = bpf_map__attach_struct_ops(skel->maps.testmod_1); + if (!ASSERT_OK_PTR(link, "attach_struct_ops")) + goto cleanup; + + ASSERT_OK(trigger_module_test_read(256), "trigger_read"); + + ASSERT_EQ(skel->bss->val_j, 3, "val_j"); + + bpf_link__destroy(link); + +cleanup: + struct_ops_private_stack_recur__destroy(skel); +} + +void test_struct_ops_private_stack(void) +{ + if (test__start_subtest("private_stack")) + test_private_stack(); + if (test__start_subtest("private_stack_fail")) + test_private_stack_fail(); + if (test__start_subtest("private_stack_recur")) + test_private_stack_recur(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/subskeleton.c b/tools/testing/selftests/bpf/prog_tests/subskeleton.c index 9c31b7004f9c..fdf13ed0152a 100644 --- a/tools/testing/selftests/bpf/prog_tests/subskeleton.c +++ b/tools/testing/selftests/bpf/prog_tests/subskeleton.c @@ -46,7 +46,8 @@ static int subskeleton_lib_subresult(struct bpf_object *obj) return result; } -void test_subskeleton(void) +/* initialize and load through skeleton, then instantiate subskeleton out of it */ +static void subtest_skel_subskeleton(void) { int err, result; struct test_subskeleton *skel; @@ -76,3 +77,76 @@ void test_subskeleton(void) cleanup: test_subskeleton__destroy(skel); } + +/* initialize and load through generic bpf_object API, then instantiate subskeleton out of it */ +static void subtest_obj_subskeleton(void) +{ + int err, result; + const void *elf_bytes; + size_t elf_bytes_sz = 0, rodata_sz = 0, bss_sz = 0; + struct bpf_object *obj; + const struct bpf_map *map; + const struct bpf_program *prog; + struct bpf_link *link = NULL; + struct test_subskeleton__rodata *rodata; + struct test_subskeleton__bss *bss; + + elf_bytes = test_subskeleton__elf_bytes(&elf_bytes_sz); + if (!ASSERT_OK_PTR(elf_bytes, "elf_bytes")) + return; + + obj = bpf_object__open_mem(elf_bytes, elf_bytes_sz, NULL); + if (!ASSERT_OK_PTR(obj, "obj_open_mem")) + return; + + map = bpf_object__find_map_by_name(obj, ".rodata"); + if (!ASSERT_OK_PTR(map, "rodata_map_by_name")) + goto cleanup; + + rodata = bpf_map__initial_value(map, &rodata_sz); + if (!ASSERT_OK_PTR(rodata, "rodata_get")) + goto cleanup; + + rodata->rovar1 = 10; + rodata->var1 = 1; + subskeleton_lib_setup(obj); + + err = bpf_object__load(obj); + if (!ASSERT_OK(err, "obj_load")) + goto cleanup; + + prog = bpf_object__find_program_by_name(obj, "handler1"); + if (!ASSERT_OK_PTR(prog, "prog_by_name")) + goto cleanup; + + link = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(link, "prog_attach")) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); + + map = bpf_object__find_map_by_name(obj, ".bss"); + if (!ASSERT_OK_PTR(map, "bss_map_by_name")) + goto cleanup; + + bss = bpf_map__initial_value(map, &bss_sz); + if (!ASSERT_OK_PTR(rodata, "rodata_get")) + goto cleanup; + + result = subskeleton_lib_subresult(obj) * 10; + ASSERT_EQ(bss->out1, result, "out1"); + +cleanup: + bpf_link__destroy(link); + bpf_object__close(obj); +} + + +void test_subskeleton(void) +{ + if (test__start_subtest("skel_subskel")) + subtest_skel_subskeleton(); + if (test__start_subtest("obj_subskel")) + subtest_obj_subskeleton(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c index 21c5a37846ad..544144620ca6 100644 --- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -7,6 +7,7 @@ #include "tailcall_bpf2bpf_hierarchy3.skel.h" #include "tailcall_freplace.skel.h" #include "tc_bpf2bpf.skel.h" +#include "tailcall_fail.skel.h" /* test_tailcall_1 checks basic functionality by patching multiple locations * in a single program for a single tail call slot with nop->jmp, jmp->nop @@ -1496,8 +1497,8 @@ static void test_tailcall_bpf2bpf_hierarchy_3(void) RUN_TESTS(tailcall_bpf2bpf_hierarchy3); } -/* test_tailcall_freplace checks that the attached freplace prog is OK to - * update the prog_array map. +/* test_tailcall_freplace checks that the freplace prog fails to update the + * prog_array map, no matter whether the freplace prog attaches to its target. */ static void test_tailcall_freplace(void) { @@ -1505,7 +1506,7 @@ static void test_tailcall_freplace(void) struct bpf_link *freplace_link = NULL; struct bpf_program *freplace_prog; struct tc_bpf2bpf *tc_skel = NULL; - int prog_fd, map_fd; + int prog_fd, tc_prog_fd, map_fd; char buff[128] = {}; int err, key; @@ -1523,9 +1524,10 @@ static void test_tailcall_freplace(void) if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load")) goto out; - prog_fd = bpf_program__fd(tc_skel->progs.entry_tc); + tc_prog_fd = bpf_program__fd(tc_skel->progs.entry_tc); freplace_prog = freplace_skel->progs.entry_freplace; - err = bpf_program__set_attach_target(freplace_prog, prog_fd, "subprog"); + err = bpf_program__set_attach_target(freplace_prog, tc_prog_fd, + "subprog_tc"); if (!ASSERT_OK(err, "set_attach_target")) goto out; @@ -1533,27 +1535,121 @@ static void test_tailcall_freplace(void) if (!ASSERT_OK(err, "tailcall_freplace__load")) goto out; - freplace_link = bpf_program__attach_freplace(freplace_prog, prog_fd, - "subprog"); + map_fd = bpf_map__fd(freplace_skel->maps.jmp_table); + prog_fd = bpf_program__fd(freplace_prog); + key = 0; + err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); + ASSERT_ERR(err, "update jmp_table failure"); + + freplace_link = bpf_program__attach_freplace(freplace_prog, tc_prog_fd, + "subprog_tc"); if (!ASSERT_OK_PTR(freplace_link, "attach_freplace")) goto out; - map_fd = bpf_map__fd(freplace_skel->maps.jmp_table); - prog_fd = bpf_program__fd(freplace_prog); + err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); + ASSERT_ERR(err, "update jmp_table failure"); + +out: + bpf_link__destroy(freplace_link); + tailcall_freplace__destroy(freplace_skel); + tc_bpf2bpf__destroy(tc_skel); +} + +/* test_tailcall_bpf2bpf_freplace checks the failure that fails to attach a tail + * callee prog with freplace prog or fails to update an extended prog to + * prog_array map. + */ +static void test_tailcall_bpf2bpf_freplace(void) +{ + struct tailcall_freplace *freplace_skel = NULL; + struct bpf_link *freplace_link = NULL; + struct tc_bpf2bpf *tc_skel = NULL; + char buff[128] = {}; + int prog_fd, map_fd; + int err, key; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = buff, + .data_size_in = sizeof(buff), + .repeat = 1, + ); + + tc_skel = tc_bpf2bpf__open_and_load(); + if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load")) + goto out; + + prog_fd = bpf_program__fd(tc_skel->progs.entry_tc); + freplace_skel = tailcall_freplace__open(); + if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open")) + goto out; + + err = bpf_program__set_attach_target(freplace_skel->progs.entry_freplace, + prog_fd, "subprog_tc"); + if (!ASSERT_OK(err, "set_attach_target")) + goto out; + + err = tailcall_freplace__load(freplace_skel); + if (!ASSERT_OK(err, "tailcall_freplace__load")) + goto out; + + /* OK to attach then detach freplace prog. */ + + freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace, + prog_fd, "subprog_tc"); + if (!ASSERT_OK_PTR(freplace_link, "attach_freplace")) + goto out; + + err = bpf_link__destroy(freplace_link); + if (!ASSERT_OK(err, "destroy link")) + goto out; + + /* OK to update prog_array map then delete element from the map. */ + key = 0; + map_fd = bpf_map__fd(freplace_skel->maps.jmp_table); err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); if (!ASSERT_OK(err, "update jmp_table")) goto out; - prog_fd = bpf_program__fd(tc_skel->progs.entry_tc); - err = bpf_prog_test_run_opts(prog_fd, &topts); - ASSERT_OK(err, "test_run"); - ASSERT_EQ(topts.retval, 34, "test_run retval"); + err = bpf_map_delete_elem(map_fd, &key); + if (!ASSERT_OK(err, "delete_elem from jmp_table")) + goto out; + + /* Fail to attach a tail callee prog with freplace prog. */ + + err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); + if (!ASSERT_OK(err, "update jmp_table")) + goto out; + + freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace, + prog_fd, "subprog_tc"); + if (!ASSERT_ERR_PTR(freplace_link, "attach_freplace failure")) + goto out; + + err = bpf_map_delete_elem(map_fd, &key); + if (!ASSERT_OK(err, "delete_elem from jmp_table")) + goto out; + + /* Fail to update an extended prog to prog_array map. */ + + freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace, + prog_fd, "subprog_tc"); + if (!ASSERT_OK_PTR(freplace_link, "attach_freplace")) + goto out; + + err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); + if (!ASSERT_ERR(err, "update jmp_table failure")) + goto out; out: bpf_link__destroy(freplace_link); - tc_bpf2bpf__destroy(tc_skel); tailcall_freplace__destroy(freplace_skel); + tc_bpf2bpf__destroy(tc_skel); +} + +static void test_tailcall_failure() +{ + RUN_TESTS(tailcall_fail); } void test_tailcalls(void) @@ -1606,4 +1702,8 @@ void test_tailcalls(void) test_tailcall_bpf2bpf_hierarchy_3(); if (test__start_subtest("tailcall_freplace")) test_tailcall_freplace(); + if (test__start_subtest("tailcall_bpf2bpf_freplace")) + test_tailcall_bpf2bpf_freplace(); + if (test__start_subtest("tailcall_failure")) + test_tailcall_failure(); } diff --git a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c index d4579f735398..83b90335967a 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c +++ b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c @@ -68,6 +68,74 @@ cleanup: task_kfunc_success__destroy(skel); } +static int run_vpid_test(void *prog_name) +{ + struct task_kfunc_success *skel; + struct bpf_program *prog; + int prog_fd, err = 0; + + if (getpid() != 1) + return 1; + + skel = open_load_task_kfunc_skel(); + if (!skel) + return 2; + + if (skel->bss->err) { + err = 3; + goto cleanup; + } + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!prog) { + err = 4; + goto cleanup; + } + + prog_fd = bpf_program__fd(prog); + if (prog_fd < 0) { + err = 5; + goto cleanup; + } + + if (bpf_prog_test_run_opts(prog_fd, NULL)) { + err = 6; + goto cleanup; + } + + if (skel->bss->err) + err = 7 + skel->bss->err; +cleanup: + task_kfunc_success__destroy(skel); + return err; +} + +static void run_vpid_success_test(const char *prog_name) +{ + const int stack_size = 1024 * 1024; + int child_pid, wstatus; + char *stack; + + stack = (char *)malloc(stack_size); + if (!ASSERT_OK_PTR(stack, "clone_stack")) + return; + + child_pid = clone(run_vpid_test, stack + stack_size, + CLONE_NEWPID | SIGCHLD, (void *)prog_name); + if (!ASSERT_GT(child_pid, -1, "child_pid")) + goto cleanup; + + if (!ASSERT_GT(waitpid(child_pid, &wstatus, 0), -1, "waitpid")) + goto cleanup; + + if (WEXITSTATUS(wstatus) > 7) + ASSERT_OK(WEXITSTATUS(wstatus) - 7, "vpid_test_failure"); + else + ASSERT_OK(WEXITSTATUS(wstatus), "run_vpid_test_err"); +cleanup: + free(stack); +} + static const char * const success_tests[] = { "test_task_acquire_release_argument", "test_task_acquire_release_current", @@ -83,6 +151,11 @@ static const char * const success_tests[] = { "test_task_kfunc_flavor_relo_not_found", }; +static const char * const vpid_success_tests[] = { + "test_task_from_vpid_current", + "test_task_from_vpid_invalid", +}; + void test_task_kfunc(void) { int i; @@ -94,5 +167,12 @@ void test_task_kfunc(void) run_success_test(success_tests[i]); } + for (i = 0; i < ARRAY_SIZE(vpid_success_tests); i++) { + if (!test__start_subtest(vpid_success_tests[i])) + continue; + + run_vpid_success_test(vpid_success_tests[i]); + } + RUN_TESTS(task_kfunc_failure); } diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c index c33c05161a9e..60f474d965a9 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c +++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c @@ -7,12 +7,20 @@ #include <pthread.h> #include <sys/syscall.h> /* For SYS_xxx definitions */ #include <sys/types.h> +#include <sys/eventfd.h> +#include <sys/mman.h> #include <test_progs.h> +#include <bpf/btf.h> #include "task_local_storage_helpers.h" #include "task_local_storage.skel.h" #include "task_local_storage_exit_creds.skel.h" #include "task_ls_recursion.skel.h" #include "task_storage_nodeadlock.skel.h" +#include "uptr_test_common.h" +#include "task_ls_uptr.skel.h" +#include "uptr_update_failure.skel.h" +#include "uptr_failure.skel.h" +#include "uptr_map_failure.skel.h" static void test_sys_enter_exit(void) { @@ -23,14 +31,14 @@ static void test_sys_enter_exit(void) if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) return; - skel->bss->target_pid = syscall(SYS_gettid); + skel->bss->target_pid = sys_gettid(); err = task_local_storage__attach(skel); if (!ASSERT_OK(err, "skel_attach")) goto out; - syscall(SYS_gettid); - syscall(SYS_gettid); + sys_gettid(); + sys_gettid(); /* 3x syscalls: 1x attach and 2x gettid */ ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt"); @@ -99,7 +107,7 @@ static void test_recursion(void) /* trigger sys_enter, make sure it does not cause deadlock */ skel->bss->test_pid = getpid(); - syscall(SYS_gettid); + sys_gettid(); skel->bss->test_pid = 0; task_ls_recursion__detach(skel); @@ -227,6 +235,259 @@ done: sched_setaffinity(getpid(), sizeof(old), &old); } +static struct user_data udata __attribute__((aligned(16))) = { + .a = 1, + .b = 2, +}; + +static struct user_data udata2 __attribute__((aligned(16))) = { + .a = 3, + .b = 4, +}; + +static void check_udata2(int expected) +{ + udata2.result = udata2.nested_result = 0; + usleep(1); + ASSERT_EQ(udata2.result, expected, "udata2.result"); + ASSERT_EQ(udata2.nested_result, expected, "udata2.nested_result"); +} + +static void test_uptr_basic(void) +{ + int map_fd, parent_task_fd, ev_fd; + struct value_type value = {}; + struct task_ls_uptr *skel; + pid_t child_pid, my_tid; + __u64 ev_dummy_data = 1; + int err; + + my_tid = sys_gettid(); + parent_task_fd = sys_pidfd_open(my_tid, 0); + if (!ASSERT_OK_FD(parent_task_fd, "parent_task_fd")) + return; + + ev_fd = eventfd(0, 0); + if (!ASSERT_OK_FD(ev_fd, "ev_fd")) { + close(parent_task_fd); + return; + } + + skel = task_ls_uptr__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + goto out; + + map_fd = bpf_map__fd(skel->maps.datamap); + value.udata = &udata; + value.nested.udata = &udata; + err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_NOEXIST); + if (!ASSERT_OK(err, "update_elem(udata)")) + goto out; + + err = task_ls_uptr__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto out; + + child_pid = fork(); + if (!ASSERT_NEQ(child_pid, -1, "fork")) + goto out; + + /* Call syscall in the child process, but access the map value of + * the parent process in the BPF program to check if the user kptr + * is translated/mapped correctly. + */ + if (child_pid == 0) { + /* child */ + + /* Overwrite the user_data in the child process to check if + * the BPF program accesses the user_data of the parent. + */ + udata.a = 0; + udata.b = 0; + + /* Wait for the parent to set child_pid */ + read(ev_fd, &ev_dummy_data, sizeof(ev_dummy_data)); + exit(0); + } + + skel->bss->parent_pid = my_tid; + skel->bss->target_pid = child_pid; + + write(ev_fd, &ev_dummy_data, sizeof(ev_dummy_data)); + + err = waitpid(child_pid, NULL, 0); + ASSERT_EQ(err, child_pid, "waitpid"); + ASSERT_EQ(udata.result, MAGIC_VALUE + udata.a + udata.b, "udata.result"); + ASSERT_EQ(udata.nested_result, MAGIC_VALUE + udata.a + udata.b, "udata.nested_result"); + + skel->bss->target_pid = my_tid; + + /* update_elem: uptr changes from udata1 to udata2 */ + value.udata = &udata2; + value.nested.udata = &udata2; + err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_EXIST); + if (!ASSERT_OK(err, "update_elem(udata2)")) + goto out; + check_udata2(MAGIC_VALUE + udata2.a + udata2.b); + + /* update_elem: uptr changes from udata2 uptr to NULL */ + memset(&value, 0, sizeof(value)); + err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_EXIST); + if (!ASSERT_OK(err, "update_elem(udata2)")) + goto out; + check_udata2(0); + + /* update_elem: uptr changes from NULL to udata2 */ + value.udata = &udata2; + value.nested.udata = &udata2; + err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_EXIST); + if (!ASSERT_OK(err, "update_elem(udata2)")) + goto out; + check_udata2(MAGIC_VALUE + udata2.a + udata2.b); + + /* Check if user programs can access the value of user kptrs + * through bpf_map_lookup_elem(). Make sure the kernel value is not + * leaked. + */ + err = bpf_map_lookup_elem(map_fd, &parent_task_fd, &value); + if (!ASSERT_OK(err, "bpf_map_lookup_elem")) + goto out; + ASSERT_EQ(value.udata, NULL, "value.udata"); + ASSERT_EQ(value.nested.udata, NULL, "value.nested.udata"); + + /* delete_elem */ + err = bpf_map_delete_elem(map_fd, &parent_task_fd); + ASSERT_OK(err, "delete_elem(udata2)"); + check_udata2(0); + + /* update_elem: add uptr back to test map_free */ + value.udata = &udata2; + value.nested.udata = &udata2; + err = bpf_map_update_elem(map_fd, &parent_task_fd, &value, BPF_NOEXIST); + ASSERT_OK(err, "update_elem(udata2)"); + +out: + task_ls_uptr__destroy(skel); + close(ev_fd); + close(parent_task_fd); +} + +static void test_uptr_across_pages(void) +{ + int page_size = getpagesize(); + struct value_type value = {}; + struct task_ls_uptr *skel; + int err, task_fd, map_fd; + void *mem; + + task_fd = sys_pidfd_open(getpid(), 0); + if (!ASSERT_OK_FD(task_fd, "task_fd")) + return; + + mem = mmap(NULL, page_size * 2, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (!ASSERT_OK_PTR(mem, "mmap(page_size * 2)")) { + close(task_fd); + return; + } + + skel = task_ls_uptr__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + goto out; + + map_fd = bpf_map__fd(skel->maps.datamap); + value.udata = mem + page_size - offsetof(struct user_data, b); + err = bpf_map_update_elem(map_fd, &task_fd, &value, 0); + if (!ASSERT_ERR(err, "update_elem(udata)")) + goto out; + ASSERT_EQ(errno, EOPNOTSUPP, "errno"); + + value.udata = mem + page_size - sizeof(struct user_data); + err = bpf_map_update_elem(map_fd, &task_fd, &value, 0); + ASSERT_OK(err, "update_elem(udata)"); + +out: + task_ls_uptr__destroy(skel); + close(task_fd); + munmap(mem, page_size * 2); +} + +static void test_uptr_update_failure(void) +{ + struct value_lock_type value = {}; + struct uptr_update_failure *skel; + int err, task_fd, map_fd; + + task_fd = sys_pidfd_open(getpid(), 0); + if (!ASSERT_OK_FD(task_fd, "task_fd")) + return; + + skel = uptr_update_failure__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + goto out; + + map_fd = bpf_map__fd(skel->maps.datamap); + + value.udata = &udata; + err = bpf_map_update_elem(map_fd, &task_fd, &value, BPF_F_LOCK); + if (!ASSERT_ERR(err, "update_elem(udata, BPF_F_LOCK)")) + goto out; + ASSERT_EQ(errno, EOPNOTSUPP, "errno"); + + err = bpf_map_update_elem(map_fd, &task_fd, &value, BPF_EXIST); + if (!ASSERT_ERR(err, "update_elem(udata, BPF_EXIST)")) + goto out; + ASSERT_EQ(errno, ENOENT, "errno"); + + err = bpf_map_update_elem(map_fd, &task_fd, &value, BPF_NOEXIST); + if (!ASSERT_OK(err, "update_elem(udata, BPF_NOEXIST)")) + goto out; + + value.udata = &udata2; + err = bpf_map_update_elem(map_fd, &task_fd, &value, BPF_NOEXIST); + if (!ASSERT_ERR(err, "update_elem(udata2, BPF_NOEXIST)")) + goto out; + ASSERT_EQ(errno, EEXIST, "errno"); + +out: + uptr_update_failure__destroy(skel); + close(task_fd); +} + +static void test_uptr_map_failure(const char *map_name, int expected_errno) +{ + LIBBPF_OPTS(bpf_map_create_opts, create_attr); + struct uptr_map_failure *skel; + struct bpf_map *map; + struct btf *btf; + int map_fd, err; + + skel = uptr_map_failure__open(); + if (!ASSERT_OK_PTR(skel, "uptr_map_failure__open")) + return; + + map = bpf_object__find_map_by_name(skel->obj, map_name); + btf = bpf_object__btf(skel->obj); + err = btf__load_into_kernel(btf); + if (!ASSERT_OK(err, "btf__load_into_kernel")) + goto done; + + create_attr.map_flags = bpf_map__map_flags(map); + create_attr.btf_fd = btf__fd(btf); + create_attr.btf_key_type_id = bpf_map__btf_key_type_id(map); + create_attr.btf_value_type_id = bpf_map__btf_value_type_id(map); + map_fd = bpf_map_create(bpf_map__type(map), map_name, + bpf_map__key_size(map), bpf_map__value_size(map), + 0, &create_attr); + if (ASSERT_ERR_FD(map_fd, "map_create")) + ASSERT_EQ(errno, expected_errno, "errno"); + else + close(map_fd); + +done: + uptr_map_failure__destroy(skel); +} + void test_task_local_storage(void) { if (test__start_subtest("sys_enter_exit")) @@ -237,4 +498,21 @@ void test_task_local_storage(void) test_recursion(); if (test__start_subtest("nodeadlock")) test_nodeadlock(); + if (test__start_subtest("uptr_basic")) + test_uptr_basic(); + if (test__start_subtest("uptr_across_pages")) + test_uptr_across_pages(); + if (test__start_subtest("uptr_update_failure")) + test_uptr_update_failure(); + if (test__start_subtest("uptr_map_failure_e2big")) { + if (getpagesize() == PAGE_SIZE) + test_uptr_map_failure("large_uptr_map", E2BIG); + else + test__skip(); + } + if (test__start_subtest("uptr_map_failure_size0")) + test_uptr_map_failure("empty_uptr_map", EINVAL); + if (test__start_subtest("uptr_map_failure_kstruct")) + test_uptr_map_failure("kstruct_uptr_map", EINVAL); + RUN_TESTS(uptr_failure); } diff --git a/tools/testing/selftests/bpf/prog_tests/timer_lockup.c b/tools/testing/selftests/bpf/prog_tests/timer_lockup.c index 871d16cb95cf..1a2f99596916 100644 --- a/tools/testing/selftests/bpf/prog_tests/timer_lockup.c +++ b/tools/testing/selftests/bpf/prog_tests/timer_lockup.c @@ -5,6 +5,7 @@ #include <test_progs.h> #include <pthread.h> #include <network_helpers.h> +#include <sys/sysinfo.h> #include "timer_lockup.skel.h" @@ -52,6 +53,11 @@ void test_timer_lockup(void) pthread_t thrds[2]; void *ret; + if (get_nprocs() < 2) { + test__skip(); + return; + } + skel = timer_lockup__open_and_load(); if (!ASSERT_OK_PTR(skel, "timer_lockup__open_and_load")) return; diff --git a/tools/testing/selftests/bpf/prog_tests/token.c b/tools/testing/selftests/bpf/prog_tests/token.c index fe86e4fdb89c..c3ab9b6fb069 100644 --- a/tools/testing/selftests/bpf/prog_tests/token.c +++ b/tools/testing/selftests/bpf/prog_tests/token.c @@ -828,8 +828,12 @@ static int userns_obj_priv_btf_success(int mnt_fd, struct token_lsm *lsm_skel) return validate_struct_ops_load(mnt_fd, true /* should succeed */); } +static const char *token_bpffs_custom_dir() +{ + return getenv("BPF_SELFTESTS_BPF_TOKEN_DIR") ?: "/tmp/bpf-token-fs"; +} + #define TOKEN_ENVVAR "LIBBPF_BPF_TOKEN_PATH" -#define TOKEN_BPFFS_CUSTOM "/bpf-token-fs" static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel) { @@ -892,6 +896,7 @@ static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *lsm_skel) { + const char *custom_dir = token_bpffs_custom_dir(); LIBBPF_OPTS(bpf_object_open_opts, opts); struct dummy_st_ops_success *skel; int err; @@ -909,10 +914,10 @@ static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *l * BPF token implicitly, unless pointed to it through * LIBBPF_BPF_TOKEN_PATH envvar */ - rmdir(TOKEN_BPFFS_CUSTOM); - if (!ASSERT_OK(mkdir(TOKEN_BPFFS_CUSTOM, 0777), "mkdir_bpffs_custom")) + rmdir(custom_dir); + if (!ASSERT_OK(mkdir(custom_dir, 0777), "mkdir_bpffs_custom")) goto err_out; - err = sys_move_mount(mnt_fd, "", AT_FDCWD, TOKEN_BPFFS_CUSTOM, MOVE_MOUNT_F_EMPTY_PATH); + err = sys_move_mount(mnt_fd, "", AT_FDCWD, custom_dir, MOVE_MOUNT_F_EMPTY_PATH); if (!ASSERT_OK(err, "move_mount_bpffs")) goto err_out; @@ -925,7 +930,7 @@ static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *l goto err_out; } - err = setenv(TOKEN_ENVVAR, TOKEN_BPFFS_CUSTOM, 1 /*overwrite*/); + err = setenv(TOKEN_ENVVAR, custom_dir, 1 /*overwrite*/); if (!ASSERT_OK(err, "setenv_token_path")) goto err_out; @@ -951,11 +956,11 @@ static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *l if (!ASSERT_ERR(err, "obj_empty_token_path_load")) goto err_out; - rmdir(TOKEN_BPFFS_CUSTOM); + rmdir(custom_dir); unsetenv(TOKEN_ENVVAR); return 0; err_out: - rmdir(TOKEN_BPFFS_CUSTOM); + rmdir(custom_dir); unsetenv(TOKEN_ENVVAR); return -EINVAL; } diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c index 844f6fc8487b..2ee17ef1dae2 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c @@ -8,6 +8,11 @@ #include "uprobe_multi_usdt.skel.h" #include "uprobe_multi_consumers.skel.h" #include "uprobe_multi_pid_filter.skel.h" +#include "uprobe_multi_session.skel.h" +#include "uprobe_multi_session_single.skel.h" +#include "uprobe_multi_session_cookie.skel.h" +#include "uprobe_multi_session_recursive.skel.h" +#include "uprobe_multi_verifier.skel.h" #include "bpf/libbpf_internal.h" #include "testing_helpers.h" #include "../sdt.h" @@ -34,6 +39,12 @@ noinline void usdt_trigger(void) STAP_PROBE(test, pid_filter_usdt); } +noinline void uprobe_session_recursive(int i) +{ + if (i) + uprobe_session_recursive(i - 1); +} + struct child { int go[2]; int c2p[2]; /* child -> parent channel */ @@ -125,7 +136,7 @@ static void *child_thread(void *ctx) struct child *child = ctx; int c = 0, err; - child->tid = syscall(SYS_gettid); + child->tid = sys_gettid(); /* let parent know we are ready */ err = write(child->c2p[1], &c, 1); @@ -778,7 +789,7 @@ get_link(struct uprobe_multi_consumers *skel, int link) } } -static int uprobe_attach(struct uprobe_multi_consumers *skel, int idx) +static int uprobe_attach(struct uprobe_multi_consumers *skel, int idx, unsigned long offset) { struct bpf_program *prog = get_program(skel, idx); struct bpf_link **link = get_link(skel, idx); @@ -787,15 +798,19 @@ static int uprobe_attach(struct uprobe_multi_consumers *skel, int idx) if (!prog || !link) return -1; + opts.offsets = &offset; + opts.cnt = 1; + /* - * bit/prog: 0,1 uprobe entry - * bit/prog: 2,3 uprobe return + * bit/prog: 0 uprobe entry + * bit/prog: 1 uprobe return + * bit/prog: 2 uprobe session without return + * bit/prog: 3 uprobe session with return */ - opts.retprobe = idx == 2 || idx == 3; + opts.retprobe = idx == 1; + opts.session = idx == 2 || idx == 3; - *link = bpf_program__attach_uprobe_multi(prog, 0, "/proc/self/exe", - "uprobe_consumer_test", - &opts); + *link = bpf_program__attach_uprobe_multi(prog, 0, "/proc/self/exe", NULL, &opts); if (!ASSERT_OK_PTR(*link, "bpf_program__attach_uprobe_multi")) return -1; return 0; @@ -816,7 +831,8 @@ static bool test_bit(int bit, unsigned long val) noinline int uprobe_consumer_test(struct uprobe_multi_consumers *skel, - unsigned long before, unsigned long after) + unsigned long before, unsigned long after, + unsigned long offset) { int idx; @@ -829,89 +845,174 @@ uprobe_consumer_test(struct uprobe_multi_consumers *skel, /* ... and attach all new programs in 'after' state */ for (idx = 0; idx < 4; idx++) { if (!test_bit(idx, before) && test_bit(idx, after)) { - if (!ASSERT_OK(uprobe_attach(skel, idx), "uprobe_attach_after")) + if (!ASSERT_OK(uprobe_attach(skel, idx, offset), "uprobe_attach_after")) return -1; } } return 0; } -static void consumer_test(struct uprobe_multi_consumers *skel, - unsigned long before, unsigned long after) +/* + * We generate 16 consumer_testX functions that will have uprobe installed on + * and will be called in separate threads. All function pointer are stored in + * "consumers" section and each thread will pick one function based on index. + */ + +extern const void *__start_consumers; + +#define __CONSUMER_TEST(func) \ +noinline int func(struct uprobe_multi_consumers *skel, unsigned long before, \ + unsigned long after, unsigned long offset) \ +{ \ + return uprobe_consumer_test(skel, before, after, offset); \ +} \ +void *__ ## func __used __attribute__((section("consumers"))) = (void *) func; + +#define CONSUMER_TEST(func) __CONSUMER_TEST(func) + +#define C1 CONSUMER_TEST(__PASTE(consumer_test, __COUNTER__)) +#define C4 C1 C1 C1 C1 +#define C16 C4 C4 C4 C4 + +C16 + +typedef int (*test_t)(struct uprobe_multi_consumers *, unsigned long, + unsigned long, unsigned long); + +static int consumer_test(struct uprobe_multi_consumers *skel, + unsigned long before, unsigned long after, + test_t test, unsigned long offset) { - int err, idx; + int err, idx, ret = -1; printf("consumer_test before %lu after %lu\n", before, after); /* 'before' is each, we attach uprobe for every set idx */ for (idx = 0; idx < 4; idx++) { if (test_bit(idx, before)) { - if (!ASSERT_OK(uprobe_attach(skel, idx), "uprobe_attach_before")) + if (!ASSERT_OK(uprobe_attach(skel, idx, offset), "uprobe_attach_before")) goto cleanup; } } - err = uprobe_consumer_test(skel, before, after); + err = test(skel, before, after, offset); if (!ASSERT_EQ(err, 0, "uprobe_consumer_test")) goto cleanup; for (idx = 0; idx < 4; idx++) { + bool uret_stays, uret_survives; const char *fmt = "BUG"; __u64 val = 0; - if (idx < 2) { + switch (idx) { + case 0: /* * uprobe entry * +1 if define in 'before' */ if (test_bit(idx, before)) val++; - fmt = "prog 0/1: uprobe"; - } else { + fmt = "prog 0: uprobe"; + break; + case 1: /* - * uprobe return is tricky ;-) - * - * to trigger uretprobe consumer, the uretprobe needs to be installed, - * which means one of the 'return' uprobes was alive when probe was hit: - * - * idxs: 2/3 uprobe return in 'installed' mask - * - * in addition if 'after' state removes everything that was installed in - * 'before' state, then uprobe kernel object goes away and return uprobe - * is not installed and we won't hit it even if it's in 'after' state. + * To trigger uretprobe consumer, the uretprobe under test either stayed from + * before to after (uret_stays + test_bit) or uretprobe instance survived and + * we have uretprobe active in after (uret_survives + test_bit) */ - unsigned long had_uretprobes = before & 0b1100; /* is uretprobe installed */ - unsigned long probe_preserved = before & after; /* did uprobe go away */ + uret_stays = before & after & 0b0110; + uret_survives = ((before & 0b0110) && (after & 0b0110) && (before & 0b1001)); - if (had_uretprobes && probe_preserved && test_bit(idx, after)) + if ((uret_stays || uret_survives) && test_bit(idx, after)) val++; - fmt = "idx 2/3: uretprobe"; + fmt = "prog 1: uretprobe"; + break; + case 2: + /* + * session with return + * +1 if defined in 'before' + * +1 if defined in 'after' + */ + if (test_bit(idx, before)) { + val++; + if (test_bit(idx, after)) + val++; + } + fmt = "prog 2: session with return"; + break; + case 3: + /* + * session without return + * +1 if defined in 'before' + */ + if (test_bit(idx, before)) + val++; + fmt = "prog 3: session with NO return"; + break; } - ASSERT_EQ(skel->bss->uprobe_result[idx], val, fmt); + if (!ASSERT_EQ(skel->bss->uprobe_result[idx], val, fmt)) + goto cleanup; skel->bss->uprobe_result[idx] = 0; } + ret = 0; + cleanup: for (idx = 0; idx < 4; idx++) uprobe_detach(skel, idx); + return ret; } -static void test_consumers(void) +#define CONSUMER_MAX 16 + +/* + * Each thread runs 1/16 of the load by running test for single + * 'before' number (based on thread index) and full scale of + * 'after' numbers. + */ +static void *consumer_thread(void *arg) { + unsigned long idx = (unsigned long) arg; struct uprobe_multi_consumers *skel; - int before, after; + unsigned long offset; + const void *func; + int after; skel = uprobe_multi_consumers__open_and_load(); if (!ASSERT_OK_PTR(skel, "uprobe_multi_consumers__open_and_load")) - return; + return NULL; + + func = *((&__start_consumers) + idx); + + offset = get_uprobe_offset(func); + if (!ASSERT_GE(offset, 0, "uprobe_offset")) + goto out; + + for (after = 0; after < CONSUMER_MAX; after++) + if (consumer_test(skel, idx, after, func, offset)) + goto out; + +out: + uprobe_multi_consumers__destroy(skel); + return NULL; +} + + +static void test_consumers(void) +{ + pthread_t pt[CONSUMER_MAX]; + unsigned long idx; + int err; /* * The idea of this test is to try all possible combinations of * uprobes consumers attached on single function. * - * - 2 uprobe entry consumer - * - 2 uprobe exit consumers + * - 1 uprobe entry consumer + * - 1 uprobe exit consumer + * - 1 uprobe session with return + * - 1 uprobe session without return * * The test uses 4 uprobes attached on single function, but that * translates into single uprobe with 4 consumers in kernel. @@ -919,37 +1020,38 @@ static void test_consumers(void) * The before/after values present the state of attached consumers * before and after the probed function: * - * bit/prog 0,1 : uprobe entry - * bit/prog 2,3 : uprobe return + * bit/prog 0 : uprobe entry + * bit/prog 1 : uprobe return * * For example for: * - * before = 0b0101 - * after = 0b0110 + * before = 0b01 + * after = 0b10 * * it means that before we call 'uprobe_consumer_test' we attach * uprobes defined in 'before' value: * - * - bit/prog 0: uprobe entry - * - bit/prog 2: uprobe return + * - bit/prog 1: uprobe entry * * uprobe_consumer_test is called and inside it we attach and detach * uprobes based on 'after' value: * - * - bit/prog 0: stays untouched - * - bit/prog 2: uprobe return is detached + * - bit/prog 0: is detached + * - bit/prog 1: is attached * * uprobe_consumer_test returns and we check counters values increased * by bpf programs on each uprobe to match the expected count based on * before/after bits. */ - for (before = 0; before < 16; before++) { - for (after = 0; after < 16; after++) - consumer_test(skel, before, after); + for (idx = 0; idx < CONSUMER_MAX; idx++) { + err = pthread_create(&pt[idx], NULL, consumer_thread, (void *) idx); + if (!ASSERT_OK(err, "pthread_create")) + break; } - uprobe_multi_consumers__destroy(skel); + while (idx) + pthread_join(pt[--idx], NULL); } static struct bpf_program *uprobe_multi_program(struct uprobe_multi_pid_filter *skel, int idx) @@ -1016,6 +1118,156 @@ static void test_pid_filter_process(bool clone_vm) uprobe_multi_pid_filter__destroy(skel); } +static void test_session_skel_api(void) +{ + struct uprobe_multi_session *skel = NULL; + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + struct bpf_link *link = NULL; + int err; + + skel = uprobe_multi_session__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi_session__open_and_load")) + goto cleanup; + + skel->bss->pid = getpid(); + skel->bss->user_ptr = test_data; + + err = uprobe_multi_session__attach(skel); + if (!ASSERT_OK(err, "uprobe_multi_session__attach")) + goto cleanup; + + /* trigger all probes */ + skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1; + skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2; + skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3; + + uprobe_multi_func_1(); + uprobe_multi_func_2(); + uprobe_multi_func_3(); + + /* + * We expect 2 for uprobe_multi_func_2 because it runs both entry/return probe, + * uprobe_multi_func_[13] run just the entry probe. All expected numbers are + * doubled, because we run extra test for sleepable session. + */ + ASSERT_EQ(skel->bss->uprobe_session_result[0], 2, "uprobe_multi_func_1_result"); + ASSERT_EQ(skel->bss->uprobe_session_result[1], 4, "uprobe_multi_func_2_result"); + ASSERT_EQ(skel->bss->uprobe_session_result[2], 2, "uprobe_multi_func_3_result"); + + /* We expect increase in 3 entry and 1 return session calls -> 4 */ + ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 4, "uprobe_multi_sleep_result"); + +cleanup: + bpf_link__destroy(link); + uprobe_multi_session__destroy(skel); +} + +static void test_session_single_skel_api(void) +{ + struct uprobe_multi_session_single *skel = NULL; + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + int err; + + skel = uprobe_multi_session_single__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_single__open_and_load")) + goto cleanup; + + skel->bss->pid = getpid(); + + err = uprobe_multi_session_single__attach(skel); + if (!ASSERT_OK(err, "uprobe_multi_session_single__attach")) + goto cleanup; + + uprobe_multi_func_1(); + + /* + * We expect consumer 0 and 2 to trigger just entry handler (value 1) + * and consumer 1 to hit both (value 2). + */ + ASSERT_EQ(skel->bss->uprobe_session_result[0], 1, "uprobe_session_result_0"); + ASSERT_EQ(skel->bss->uprobe_session_result[1], 2, "uprobe_session_result_1"); + ASSERT_EQ(skel->bss->uprobe_session_result[2], 1, "uprobe_session_result_2"); + +cleanup: + uprobe_multi_session_single__destroy(skel); +} + +static void test_session_cookie_skel_api(void) +{ + struct uprobe_multi_session_cookie *skel = NULL; + int err; + + skel = uprobe_multi_session_cookie__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_cookie__open_and_load")) + goto cleanup; + + skel->bss->pid = getpid(); + + err = uprobe_multi_session_cookie__attach(skel); + if (!ASSERT_OK(err, "uprobe_multi_session_cookie__attach")) + goto cleanup; + + /* trigger all probes */ + uprobe_multi_func_1(); + uprobe_multi_func_2(); + uprobe_multi_func_3(); + + ASSERT_EQ(skel->bss->test_uprobe_1_result, 1, "test_uprobe_1_result"); + ASSERT_EQ(skel->bss->test_uprobe_2_result, 2, "test_uprobe_2_result"); + ASSERT_EQ(skel->bss->test_uprobe_3_result, 3, "test_uprobe_3_result"); + +cleanup: + uprobe_multi_session_cookie__destroy(skel); +} + +static void test_session_recursive_skel_api(void) +{ + struct uprobe_multi_session_recursive *skel = NULL; + int i, err; + + skel = uprobe_multi_session_recursive__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_recursive__open_and_load")) + goto cleanup; + + skel->bss->pid = getpid(); + + err = uprobe_multi_session_recursive__attach(skel); + if (!ASSERT_OK(err, "uprobe_multi_session_recursive__attach")) + goto cleanup; + + for (i = 0; i < ARRAY_SIZE(skel->bss->test_uprobe_cookie_entry); i++) + skel->bss->test_uprobe_cookie_entry[i] = i + 1; + + uprobe_session_recursive(5); + + /* + * entry uprobe: + * uprobe_session_recursive(5) { *cookie = 1, return 0 + * uprobe_session_recursive(4) { *cookie = 2, return 1 + * uprobe_session_recursive(3) { *cookie = 3, return 0 + * uprobe_session_recursive(2) { *cookie = 4, return 1 + * uprobe_session_recursive(1) { *cookie = 5, return 0 + * uprobe_session_recursive(0) { *cookie = 6, return 1 + * return uprobe: + * } i = 0 not executed + * } i = 1 test_uprobe_cookie_return[0] = 5 + * } i = 2 not executed + * } i = 3 test_uprobe_cookie_return[1] = 3 + * } i = 4 not executed + * } i = 5 test_uprobe_cookie_return[2] = 1 + */ + + ASSERT_EQ(skel->bss->idx_entry, 6, "idx_entry"); + ASSERT_EQ(skel->bss->idx_return, 3, "idx_return"); + + ASSERT_EQ(skel->bss->test_uprobe_cookie_return[0], 5, "test_uprobe_cookie_return[0]"); + ASSERT_EQ(skel->bss->test_uprobe_cookie_return[1], 3, "test_uprobe_cookie_return[1]"); + ASSERT_EQ(skel->bss->test_uprobe_cookie_return[2], 1, "test_uprobe_cookie_return[2]"); + +cleanup: + uprobe_multi_session_recursive__destroy(skel); +} + static void test_bench_attach_uprobe(void) { long attach_start_ns = 0, attach_end_ns = 0; @@ -1112,4 +1364,13 @@ void test_uprobe_multi_test(void) test_pid_filter_process(false); if (test__start_subtest("filter_clone_vm")) test_pid_filter_process(true); + if (test__start_subtest("session")) + test_session_skel_api(); + if (test__start_subtest("session_single")) + test_session_single_skel_api(); + if (test__start_subtest("session_cookie")) + test_session_cookie_skel_api(); + if (test__start_subtest("session_cookie_recursive")) + test_session_recursive_skel_api(); + RUN_TESTS(uprobe_multi_verifier); } diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 75f7a2ce334b..d9f65adb456b 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -61,6 +61,7 @@ #include "verifier_or_jmp32_k.skel.h" #include "verifier_precision.skel.h" #include "verifier_prevent_map_lookup.skel.h" +#include "verifier_private_stack.skel.h" #include "verifier_raw_stack.skel.h" #include "verifier_raw_tp_writable.skel.h" #include "verifier_reg_equal.skel.h" @@ -188,6 +189,7 @@ void test_verifier_bpf_fastcall(void) { RUN(verifier_bpf_fastcall); } void test_verifier_or_jmp32_k(void) { RUN(verifier_or_jmp32_k); } void test_verifier_precision(void) { RUN(verifier_precision); } void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); } +void test_verifier_private_stack(void) { RUN(verifier_private_stack); } void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); } void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); } void test_verifier_reg_equal(void) { RUN(verifier_reg_equal); } diff --git a/tools/testing/selftests/bpf/progs/bpf_iter.h b/tools/testing/selftests/bpf/progs/bpf_iter.h deleted file mode 100644 index c41ee80533ca..000000000000 --- a/tools/testing/selftests/bpf/progs/bpf_iter.h +++ /dev/null @@ -1,167 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2020 Facebook */ -/* "undefine" structs in vmlinux.h, because we "override" them below */ -#define bpf_iter_meta bpf_iter_meta___not_used -#define bpf_iter__bpf_map bpf_iter__bpf_map___not_used -#define bpf_iter__ipv6_route bpf_iter__ipv6_route___not_used -#define bpf_iter__netlink bpf_iter__netlink___not_used -#define bpf_iter__task bpf_iter__task___not_used -#define bpf_iter__task_file bpf_iter__task_file___not_used -#define bpf_iter__task_vma bpf_iter__task_vma___not_used -#define bpf_iter__tcp bpf_iter__tcp___not_used -#define tcp6_sock tcp6_sock___not_used -#define bpf_iter__udp bpf_iter__udp___not_used -#define udp6_sock udp6_sock___not_used -#define bpf_iter__unix bpf_iter__unix___not_used -#define bpf_iter__bpf_map_elem bpf_iter__bpf_map_elem___not_used -#define bpf_iter__bpf_sk_storage_map bpf_iter__bpf_sk_storage_map___not_used -#define bpf_iter__sockmap bpf_iter__sockmap___not_used -#define bpf_iter__bpf_link bpf_iter__bpf_link___not_used -#define bpf_iter__cgroup bpf_iter__cgroup___not_used -#define btf_ptr btf_ptr___not_used -#define BTF_F_COMPACT BTF_F_COMPACT___not_used -#define BTF_F_NONAME BTF_F_NONAME___not_used -#define BTF_F_PTR_RAW BTF_F_PTR_RAW___not_used -#define BTF_F_ZERO BTF_F_ZERO___not_used -#define bpf_iter__ksym bpf_iter__ksym___not_used -#include "vmlinux.h" -#undef bpf_iter_meta -#undef bpf_iter__bpf_map -#undef bpf_iter__ipv6_route -#undef bpf_iter__netlink -#undef bpf_iter__task -#undef bpf_iter__task_file -#undef bpf_iter__task_vma -#undef bpf_iter__tcp -#undef tcp6_sock -#undef bpf_iter__udp -#undef udp6_sock -#undef bpf_iter__unix -#undef bpf_iter__bpf_map_elem -#undef bpf_iter__bpf_sk_storage_map -#undef bpf_iter__sockmap -#undef bpf_iter__bpf_link -#undef bpf_iter__cgroup -#undef btf_ptr -#undef BTF_F_COMPACT -#undef BTF_F_NONAME -#undef BTF_F_PTR_RAW -#undef BTF_F_ZERO -#undef bpf_iter__ksym - -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__ipv6_route { - struct bpf_iter_meta *meta; - struct fib6_info *rt; -} __attribute__((preserve_access_index)); - -struct bpf_iter__netlink { - struct bpf_iter_meta *meta; - struct netlink_sock *sk; -} __attribute__((preserve_access_index)); - -struct bpf_iter__task { - struct bpf_iter_meta *meta; - struct task_struct *task; -} __attribute__((preserve_access_index)); - -struct bpf_iter__task_file { - struct bpf_iter_meta *meta; - struct task_struct *task; - __u32 fd; - struct file *file; -} __attribute__((preserve_access_index)); - -struct bpf_iter__task_vma { - struct bpf_iter_meta *meta; - struct task_struct *task; - struct vm_area_struct *vma; -} __attribute__((preserve_access_index)); - -struct bpf_iter__bpf_map { - struct bpf_iter_meta *meta; - struct bpf_map *map; -} __attribute__((preserve_access_index)); - -struct bpf_iter__tcp { - struct bpf_iter_meta *meta; - struct sock_common *sk_common; - uid_t uid; -} __attribute__((preserve_access_index)); - -struct tcp6_sock { - struct tcp_sock tcp; - struct ipv6_pinfo inet6; -} __attribute__((preserve_access_index)); - -struct bpf_iter__udp { - struct bpf_iter_meta *meta; - struct udp_sock *udp_sk; - uid_t uid __attribute__((aligned(8))); - int bucket __attribute__((aligned(8))); -} __attribute__((preserve_access_index)); - -struct udp6_sock { - struct udp_sock udp; - struct ipv6_pinfo inet6; -} __attribute__((preserve_access_index)); - -struct bpf_iter__unix { - struct bpf_iter_meta *meta; - struct unix_sock *unix_sk; - uid_t uid; -} __attribute__((preserve_access_index)); - -struct bpf_iter__bpf_map_elem { - struct bpf_iter_meta *meta; - struct bpf_map *map; - void *key; - void *value; -}; - -struct bpf_iter__bpf_sk_storage_map { - struct bpf_iter_meta *meta; - struct bpf_map *map; - struct sock *sk; - void *value; -}; - -struct bpf_iter__sockmap { - struct bpf_iter_meta *meta; - struct bpf_map *map; - void *key; - struct sock *sk; -}; - -struct bpf_iter__bpf_link { - struct bpf_iter_meta *meta; - struct bpf_link *link; -}; - -struct bpf_iter__cgroup { - struct bpf_iter_meta *meta; - struct cgroup *cgroup; -} __attribute__((preserve_access_index)); - -struct btf_ptr { - void *ptr; - __u32 type_id; - __u32 flags; -}; - -enum { - BTF_F_COMPACT = (1ULL << 0), - BTF_F_NONAME = (1ULL << 1), - BTF_F_PTR_RAW = (1ULL << 2), - BTF_F_ZERO = (1ULL << 3), -}; - -struct bpf_iter__ksym { - struct bpf_iter_meta *meta; - struct kallsym_iter *ksym; -}; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c index 564835ba7d51..19710cc0f250 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c index d7a69217fb68..f47da665f7e0 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c index e1af2f8f75a6..7b69e1887705 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2022 Red Hat, Inc. */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c index 6c39e86b666f..c868ffb8080f 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c index 9f0e0705b2bf..9fdea8cd4c6f 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c index 5014a17d6c02..aa529f76c7fc 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c index 6cecab2b32ba..e88dab196e0f 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Google LLC. */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c index c7b8e006b171..eb9642923e1c 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include "bpf_tracing_net.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c b/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c index 784a610ce039..73a5cf3ba3d3 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include "bpf_tracing_net.h" #include <bpf/bpf_helpers.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c index 521267818f4d..3e725b1fce37 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2022, Oracle and/or its affiliates. */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c index a28e51e2dcee..00b2ceae81fb 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include "bpf_tracing_net.h" #include <bpf/bpf_helpers.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c index ec7f91850dec..774d4dbe8189 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2021 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include "bpf_tracing_net.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt_unix.c b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt_unix.c index eafc877ea460..d92631ec6161 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt_unix.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt_unix.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright Amazon.com Inc. or its affiliates. */ -#include "bpf_iter.h" +#include <vmlinux.h> #include "bpf_tracing_net.h" #include <bpf/bpf_helpers.h> #include <limits.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c b/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c index f3af0e30cead..317fe49760cc 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Cloudflare */ -#include "bpf_iter.h" +#include <vmlinux.h> #include "bpf_tracing_net.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c index bca8b889cb10..ef2f7c8d9373 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020, Oracle and/or its affiliates. */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_core_read.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c index b0255080662d..959a8d899eaf 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c index 442f4ca39fd7..f5a309455490 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c index 423b39e60b6f..d64ba7ddaed5 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c b/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c index 6cbb3393f243..bc10c4e4b4fa 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c index 92267abb462f..d22449c69363 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include "bpf_tracing_net.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c index 943f7bba180e..8b072666f9d9 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include "bpf_tracing_net.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c index 2a4647f20c46..6b17e7e86a48 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c index dbf61c44acac..56177508798f 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c index e3a7575e81d2..9d8b7310d2c2 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c index 1c7304f56b1e..b150bd468824 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h index d5e3df66ad9a..6a4c50497c5e 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c index cf0c485b1ed7..ffbd4b116d17 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include "bpf_tracing_net.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c index 5031e21c433f..47ff7754f4fd 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#include "bpf_iter.h" +#include <vmlinux.h> #include "bpf_tracing_net.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_unix.c b/tools/testing/selftests/bpf/progs/bpf_iter_unix.c index e6aefae38894..fea275df9e22 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_unix.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_unix.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright Amazon.com Inc. or its affiliates. */ -#include "bpf_iter.h" +#include <vmlinux.h> #include "bpf_tracing_net.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c b/tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c index ee7455d2623a..174298e122d3 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/cgroup_iter.c b/tools/testing/selftests/bpf/progs/cgroup_iter.c index de03997322a7..f30841997a8d 100644 --- a/tools/testing/selftests/bpf/progs/cgroup_iter.c +++ b/tools/testing/selftests/bpf/progs/cgroup_iter.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2022 Google */ - -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c b/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c index 5e282c16eadc..a2de95f85648 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c +++ b/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ - -#include "bpf_iter.h" +#include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include "bpf_misc.h" diff --git a/tools/testing/selftests/bpf/progs/exceptions_fail.c b/tools/testing/selftests/bpf/progs/exceptions_fail.c index 9cceb6521143..fe0f3fa5aab6 100644 --- a/tools/testing/selftests/bpf/progs/exceptions_fail.c +++ b/tools/testing/selftests/bpf/progs/exceptions_fail.c @@ -131,7 +131,7 @@ int reject_subprog_with_lock(void *ctx) } SEC("?tc") -__failure __msg("bpf_rcu_read_unlock is missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_rcu_read_lock-ed region") int reject_with_rcu_read_lock(void *ctx) { bpf_rcu_read_lock(); @@ -147,7 +147,7 @@ __noinline static int throwing_subprog(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("bpf_rcu_read_unlock is missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_rcu_read_lock-ed region") int reject_subprog_with_rcu_read_lock(void *ctx) { bpf_rcu_read_lock(); diff --git a/tools/testing/selftests/bpf/progs/kmem_cache_iter.c b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c new file mode 100644 index 000000000000..b9c8f9457492 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Google */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "bpf_experimental.h" + +char _license[] SEC("license") = "GPL"; + +#define SLAB_NAME_MAX 32 + +struct kmem_cache_result { + char name[SLAB_NAME_MAX]; + long obj_size; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(key_size, sizeof(void *)); + __uint(value_size, SLAB_NAME_MAX); + __uint(max_entries, 1); +} slab_hash SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(key_size, sizeof(int)); + __uint(value_size, sizeof(struct kmem_cache_result)); + __uint(max_entries, 1024); +} slab_result SEC(".maps"); + +extern struct kmem_cache *bpf_get_kmem_cache(u64 addr) __ksym; + +/* Result, will be checked by userspace */ +int task_struct_found; +int kmem_cache_seen; +int open_coded_seen; + +SEC("iter/kmem_cache") +int slab_info_collector(struct bpf_iter__kmem_cache *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct kmem_cache *s = ctx->s; + struct kmem_cache_result *r; + int idx; + + if (s) { + /* To make sure if the slab_iter implements the seq interface + * properly and it's also useful for debugging. + */ + BPF_SEQ_PRINTF(seq, "%s: %u\n", s->name, s->size); + + idx = kmem_cache_seen; + r = bpf_map_lookup_elem(&slab_result, &idx); + if (r == NULL) + return 0; + + kmem_cache_seen++; + + /* Save name and size to match /proc/slabinfo */ + bpf_probe_read_kernel_str(r->name, sizeof(r->name), s->name); + r->obj_size = s->size; + + if (!bpf_strncmp(r->name, 11, "task_struct")) + bpf_map_update_elem(&slab_hash, &s, r->name, BPF_NOEXIST); + } + + return 0; +} + +SEC("raw_tp/bpf_test_finish") +int BPF_PROG(check_task_struct) +{ + u64 curr = bpf_get_current_task(); + struct kmem_cache *s; + char *name; + + s = bpf_get_kmem_cache(curr); + if (s == NULL) { + task_struct_found = -1; + return 0; + } + name = bpf_map_lookup_elem(&slab_hash, &s); + if (name && !bpf_strncmp(name, 11, "task_struct")) + task_struct_found = 1; + else + task_struct_found = -2; + return 0; +} + +SEC("syscall") +int open_coded_iter(const void *ctx) +{ + struct kmem_cache *s; + + bpf_for_each(kmem_cache, s) { + struct kmem_cache_result *r; + + r = bpf_map_lookup_elem(&slab_result, &open_coded_seen); + if (!r) + break; + + if (r->obj_size != s->size) + break; + + open_coded_seen++; + } + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_verifier.c b/tools/testing/selftests/bpf/progs/kprobe_multi_verifier.c new file mode 100644 index 000000000000..288577e81deb --- /dev/null +++ b/tools/testing/selftests/bpf/progs/kprobe_multi_verifier.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/usdt.bpf.h> +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + + +SEC("kprobe.session") +__success +int kprobe_session_return_0(struct pt_regs *ctx) +{ + return 0; +} + +SEC("kprobe.session") +__success +int kprobe_session_return_1(struct pt_regs *ctx) +{ + return 1; +} + +SEC("kprobe.session") +__failure +__msg("At program exit the register R0 has smin=2 smax=2 should have been in [0, 1]") +int kprobe_session_return_2(struct pt_regs *ctx) +{ + return 2; +} diff --git a/tools/testing/selftests/bpf/progs/linked_funcs1.c b/tools/testing/selftests/bpf/progs/linked_funcs1.c index cc79dddac182..049a1f78de3f 100644 --- a/tools/testing/selftests/bpf/progs/linked_funcs1.c +++ b/tools/testing/selftests/bpf/progs/linked_funcs1.c @@ -63,6 +63,8 @@ extern int set_output_val2(int x); /* here we'll force set_output_ctx2() to be __hidden in the final obj file */ __hidden extern void set_output_ctx2(__u64 *ctx); +void *bpf_cast_to_kern_ctx(void *obj) __ksym; + SEC("?raw_tp/sys_enter") int BPF_PROG(handler1, struct pt_regs *regs, long id) { @@ -86,4 +88,10 @@ int BPF_PROG(handler1, struct pt_regs *regs, long id) return 0; } +/* Generate BTF FUNC record and test linking with duplicate extern functions */ +void kfunc_gen1(void) +{ + bpf_cast_to_kern_ctx(0); +} + char LICENSE[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/linked_funcs2.c b/tools/testing/selftests/bpf/progs/linked_funcs2.c index 942cc5526ddf..96850759fd8d 100644 --- a/tools/testing/selftests/bpf/progs/linked_funcs2.c +++ b/tools/testing/selftests/bpf/progs/linked_funcs2.c @@ -63,6 +63,8 @@ extern int set_output_val1(int x); /* here we'll force set_output_ctx1() to be __hidden in the final obj file */ __hidden extern void set_output_ctx1(__u64 *ctx); +void *bpf_cast_to_kern_ctx(void *obj) __ksym; + SEC("?raw_tp/sys_enter") int BPF_PROG(handler2, struct pt_regs *regs, long id) { @@ -86,4 +88,10 @@ int BPF_PROG(handler2, struct pt_regs *regs, long id) return 0; } +/* Generate BTF FUNC record and test linking with duplicate extern functions */ +void kfunc_gen2(void) +{ + bpf_cast_to_kern_ctx(0); +} + char LICENSE[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/preempt_lock.c b/tools/testing/selftests/bpf/progs/preempt_lock.c index 672fc368d9c4..885377e83607 100644 --- a/tools/testing/selftests/bpf/progs/preempt_lock.c +++ b/tools/testing/selftests/bpf/progs/preempt_lock.c @@ -6,7 +6,7 @@ #include "bpf_experimental.h" SEC("?tc") -__failure __msg("1 bpf_preempt_enable is missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_1(struct __sk_buff *ctx) { bpf_preempt_disable(); @@ -14,7 +14,7 @@ int preempt_lock_missing_1(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("2 bpf_preempt_enable(s) are missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_2(struct __sk_buff *ctx) { bpf_preempt_disable(); @@ -23,7 +23,7 @@ int preempt_lock_missing_2(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("3 bpf_preempt_enable(s) are missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_3(struct __sk_buff *ctx) { bpf_preempt_disable(); @@ -33,7 +33,7 @@ int preempt_lock_missing_3(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("1 bpf_preempt_enable is missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_3_minus_2(struct __sk_buff *ctx) { bpf_preempt_disable(); @@ -55,7 +55,7 @@ static __noinline void preempt_enable(void) } SEC("?tc") -__failure __msg("1 bpf_preempt_enable is missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_1_subprog(struct __sk_buff *ctx) { preempt_disable(); @@ -63,7 +63,7 @@ int preempt_lock_missing_1_subprog(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("2 bpf_preempt_enable(s) are missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_2_subprog(struct __sk_buff *ctx) { preempt_disable(); @@ -72,7 +72,7 @@ int preempt_lock_missing_2_subprog(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("1 bpf_preempt_enable is missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_2_minus_1_subprog(struct __sk_buff *ctx) { preempt_disable(); diff --git a/tools/testing/selftests/bpf/progs/raw_tp_null.c b/tools/testing/selftests/bpf/progs/raw_tp_null.c new file mode 100644 index 000000000000..457f34c151e3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/raw_tp_null.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +int tid; +int i; + +SEC("tp_btf/bpf_testmod_test_raw_tp_null") +int BPF_PROG(test_raw_tp_null, struct sk_buff *skb) +{ + struct task_struct *task = bpf_get_current_task_btf(); + + if (task->pid != tid) + return 0; + + i = i + skb->mark + 1; + /* The compiler may move the NULL check before this deref, which causes + * the load to fail as deref of scalar. Prevent that by using a barrier. + */ + barrier(); + /* If dead code elimination kicks in, the increment below will + * be removed. For raw_tp programs, we mark input arguments as + * PTR_MAYBE_NULL, so branch prediction should never kick in. + */ + if (!skb) + i += 2; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/struct_ops_detach.c b/tools/testing/selftests/bpf/progs/struct_ops_detach.c index 56b787a89876..d7fdcabe7d90 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_detach.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_detach.c @@ -6,5 +6,17 @@ char _license[] SEC("license") = "GPL"; +/* + * This subprogram validates that libbpf handles the situation in which BPF + * object has subprograms in .text section, but has no entry BPF programs. + * At some point that was causing issues due to legacy logic of treating such + * subprogram as entry program (with unknown program type, which would fail). + */ +int dangling_subprog(void) +{ + /* do nothing, just be here */ + return 0; +} + SEC(".struct_ops.link") struct bpf_testmod_ops testmod_do_detach; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c new file mode 100644 index 000000000000..8ea57e5348ab --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +#if defined(__TARGET_ARCH_x86) +bool skip __attribute((__section__(".data"))) = false; +#else +bool skip = true; +#endif + +void bpf_testmod_ops3_call_test_2(void) __ksym; + +int val_i, val_j; + +__noinline static int subprog2(int *a, int *b) +{ + return val_i + a[10] + b[20]; +} + +__noinline static int subprog1(int *a) +{ + /* stack size 200 bytes */ + int b[50] = {}; + + b[20] = 2; + return subprog2(a, b); +} + + +SEC("struct_ops") +int BPF_PROG(test_1) +{ + /* stack size 400 bytes */ + int a[100] = {}; + + a[10] = 1; + val_i = subprog1(a); + bpf_testmod_ops3_call_test_2(); + return 0; +} + +SEC("struct_ops") +int BPF_PROG(test_2) +{ + /* stack size 200 bytes */ + int a[50] = {}; + + a[10] = 3; + val_j = subprog1(a); + return 0; +} + +SEC(".struct_ops") +struct bpf_testmod_ops3 testmod_1 = { + .test_1 = (void *)test_1, + .test_2 = (void *)test_2, +}; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c new file mode 100644 index 000000000000..1f55ec4cee37 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +#if defined(__TARGET_ARCH_x86) +bool skip __attribute((__section__(".data"))) = false; +#else +bool skip = true; +#endif + +void bpf_testmod_ops3_call_test_2(void) __ksym; + +int val_i, val_j; + +__noinline static int subprog2(int *a, int *b) +{ + return val_i + a[10] + b[20]; +} + +__noinline static int subprog1(int *a) +{ + /* stack size 200 bytes */ + int b[50] = {}; + + b[20] = 2; + return subprog2(a, b); +} + + +SEC("struct_ops") +int BPF_PROG(test_1) +{ + /* stack size 100 bytes */ + int a[25] = {}; + + a[10] = 1; + val_i = subprog1(a); + bpf_testmod_ops3_call_test_2(); + return 0; +} + +SEC("struct_ops") +int BPF_PROG(test_2) +{ + /* stack size 400 bytes */ + int a[100] = {}; + + a[10] = 3; + val_j = subprog1(a); + return 0; +} + +SEC(".struct_ops") +struct bpf_testmod_ops3 testmod_1 = { + .test_1 = (void *)test_1, + .test_2 = (void *)test_2, +}; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c new file mode 100644 index 000000000000..f2f300d50988 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +#if defined(__TARGET_ARCH_x86) +bool skip __attribute((__section__(".data"))) = false; +#else +bool skip = true; +#endif + +void bpf_testmod_ops3_call_test_1(void) __ksym; + +int val_i, val_j; + +__noinline static int subprog2(int *a, int *b) +{ + return val_i + a[1] + b[20]; +} + +__noinline static int subprog1(int *a) +{ + /* stack size 400 bytes */ + int b[100] = {}; + + b[20] = 2; + return subprog2(a, b); +} + + +SEC("struct_ops") +int BPF_PROG(test_1) +{ + /* stack size 20 bytes */ + int a[5] = {}; + + a[1] = 1; + val_j += subprog1(a); + bpf_testmod_ops3_call_test_1(); + return 0; +} + +SEC(".struct_ops") +struct bpf_testmod_ops3 testmod_1 = { + .test_1 = (void *)test_1, +}; diff --git a/tools/testing/selftests/bpf/progs/tailcall_fail.c b/tools/testing/selftests/bpf/progs/tailcall_fail.c new file mode 100644 index 000000000000..bc77921d2bb0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall_fail.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> + +#include "bpf_misc.h" +#include "bpf_experimental.h" + +extern void bpf_rcu_read_lock(void) __ksym; +extern void bpf_rcu_read_unlock(void) __ksym; + +#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8))) + +private(A) struct bpf_spin_lock lock; + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 3); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +SEC("?tc") +__failure __msg("function calls are not allowed while holding a lock") +int reject_tail_call_spin_lock(struct __sk_buff *ctx) +{ + bpf_spin_lock(&lock); + bpf_tail_call_static(ctx, &jmp_table, 0); + return 0; +} + +SEC("?tc") +__failure __msg("tail_call cannot be used inside bpf_rcu_read_lock-ed region") +int reject_tail_call_rcu_lock(struct __sk_buff *ctx) +{ + bpf_rcu_read_lock(); + bpf_tail_call_static(ctx, &jmp_table, 0); + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?tc") +__failure __msg("tail_call cannot be used inside bpf_preempt_disable-ed region") +int reject_tail_call_preempt_lock(struct __sk_buff *ctx) +{ + bpf_guard_preempt(); + bpf_tail_call_static(ctx, &jmp_table, 0); + return 0; +} + +SEC("?tc") +__failure __msg("tail_call would lead to reference leak") +int reject_tail_call_ref(struct __sk_buff *ctx) +{ + struct foo { int i; } *p; + + p = bpf_obj_new(typeof(*p)); + bpf_tail_call_static(ctx, &jmp_table, 0); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_common.h b/tools/testing/selftests/bpf/progs/task_kfunc_common.h index 6720c4b5be41..e9c4fea7a4bb 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_common.h +++ b/tools/testing/selftests/bpf/progs/task_kfunc_common.h @@ -23,6 +23,7 @@ struct { struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; void bpf_task_release(struct task_struct *p) __ksym; struct task_struct *bpf_task_from_pid(s32 pid) __ksym; +struct task_struct *bpf_task_from_vpid(s32 vpid) __ksym; void bpf_rcu_read_lock(void) __ksym; void bpf_rcu_read_unlock(void) __ksym; diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c index ad88a3796ddf..4c07ea193f72 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c @@ -247,6 +247,20 @@ int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 cl return 0; } +SEC("tp_btf/task_newtask") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +int BPF_PROG(task_kfunc_from_vpid_no_null_check, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *acquired; + + acquired = bpf_task_from_vpid(task->pid); + + /* Releasing bpf_task_from_vpid() lookup without a NULL check. */ + bpf_task_release(acquired); + + return 0; +} + SEC("lsm/task_free") __failure __msg("R1 must be a rcu pointer") int BPF_PROG(task_kfunc_from_lsm_task_free, struct task_struct *task) diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_success.c b/tools/testing/selftests/bpf/progs/task_kfunc_success.c index a55149015063..5fb4fc19d26a 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_success.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_success.c @@ -366,3 +366,54 @@ int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 cl return 0; } + +SEC("syscall") +int test_task_from_vpid_current(const void *ctx) +{ + struct task_struct *current, *v_task; + + v_task = bpf_task_from_vpid(1); + if (!v_task) { + err = 1; + return 0; + } + + current = bpf_get_current_task_btf(); + + /* The current process should be the init process (pid 1) in the new pid namespace. */ + if (current != v_task) + err = 2; + + bpf_task_release(v_task); + return 0; +} + +SEC("syscall") +int test_task_from_vpid_invalid(const void *ctx) +{ + struct task_struct *v_task; + + v_task = bpf_task_from_vpid(-1); + if (v_task) { + err = 1; + goto err; + } + + /* There should be only one process (current process) in the new pid namespace. */ + v_task = bpf_task_from_vpid(2); + if (v_task) { + err = 2; + goto err; + } + + v_task = bpf_task_from_vpid(9999); + if (v_task) { + err = 3; + goto err; + } + + return 0; +err: + bpf_task_release(v_task); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/task_ls_uptr.c b/tools/testing/selftests/bpf/progs/task_ls_uptr.c new file mode 100644 index 000000000000..ddbe11b46eef --- /dev/null +++ b/tools/testing/selftests/bpf/progs/task_ls_uptr.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "uptr_test_common.h" + +struct task_struct *bpf_task_from_pid(s32 pid) __ksym; +void bpf_task_release(struct task_struct *p) __ksym; +void bpf_cgroup_release(struct cgroup *cgrp) __ksym; + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct value_type); +} datamap SEC(".maps"); + +pid_t target_pid = 0; +pid_t parent_pid = 0; + +SEC("tp_btf/sys_enter") +int on_enter(__u64 *ctx) +{ + struct task_struct *task, *data_task; + struct value_type *ptr; + struct user_data *udata; + struct cgroup *cgrp; + + task = bpf_get_current_task_btf(); + if (task->pid != target_pid) + return 0; + + data_task = bpf_task_from_pid(parent_pid); + if (!data_task) + return 0; + + ptr = bpf_task_storage_get(&datamap, data_task, 0, 0); + bpf_task_release(data_task); + if (!ptr) + return 0; + + cgrp = bpf_kptr_xchg(&ptr->cgrp, NULL); + if (cgrp) { + int lvl = cgrp->level; + + bpf_cgroup_release(cgrp); + return lvl; + } + + udata = ptr->udata; + if (!udata || udata->result) + return 0; + udata->result = MAGIC_VALUE + udata->a + udata->b; + + udata = ptr->nested.udata; + if (udata && !udata->nested_result) + udata->nested_result = udata->result; + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c index 8a0632c37839..d1a57f7d09bd 100644 --- a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c +++ b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c @@ -5,10 +5,11 @@ #include "bpf_misc.h" __noinline -int subprog(struct __sk_buff *skb) +int subprog_tc(struct __sk_buff *skb) { int ret = 1; + __sink(skb); __sink(ret); return ret; } @@ -16,7 +17,7 @@ int subprog(struct __sk_buff *skb) SEC("tc") int entry_tc(struct __sk_buff *skb) { - return subprog(skb); + return subprog_tc(skb); } char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c index 92354cd72044..176a355e3062 100644 --- a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c +++ b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c @@ -1,27 +1,50 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Facebook -#include <linux/bpf.h> +#include <vmlinux.h> #include <linux/version.h> #include <bpf/bpf_helpers.h> -__u32 sig = 0, pid = 0, status = 0, signal_thread = 0; +struct task_struct *bpf_task_from_pid(int pid) __ksym; +void bpf_task_release(struct task_struct *p) __ksym; +int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type, u64 value) __ksym; + +__u32 sig = 0, pid = 0, status = 0, signal_thread = 0, target_pid = 0; static __always_inline int bpf_send_signal_test(void *ctx) { + struct task_struct *target_task = NULL; int ret; + u64 value; if (status != 0 || pid == 0) return 0; if ((bpf_get_current_pid_tgid() >> 32) == pid) { - if (signal_thread) - ret = bpf_send_signal_thread(sig); - else - ret = bpf_send_signal(sig); + if (target_pid) { + target_task = bpf_task_from_pid(target_pid); + if (!target_task) + return 0; + value = 8; + } + + if (signal_thread) { + if (target_pid) + ret = bpf_send_signal_task(target_task, sig, PIDTYPE_PID, value); + else + ret = bpf_send_signal_thread(sig); + } else { + if (target_pid) + ret = bpf_send_signal_task(target_task, sig, PIDTYPE_TGID, value); + else + ret = bpf_send_signal(sig); + } if (ret == 0) status = 1; } + if (target_task) + bpf_task_release(target_task); + return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c index 43f40c4fe241..1c8b678e2e9a 100644 --- a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c +++ b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c @@ -28,8 +28,8 @@ struct { }, }; -SEC(".data.A") struct bpf_spin_lock lockA; -SEC(".data.B") struct bpf_spin_lock lockB; +static struct bpf_spin_lock lockA SEC(".data.A"); +static struct bpf_spin_lock lockB SEC(".data.B"); SEC("?tc") int lock_id_kptr_preserve(void *ctx) diff --git a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c index bba3e37f749b..5aaf2b065f86 100644 --- a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c +++ b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c @@ -7,7 +7,11 @@ #include "bpf_misc.h" SEC("tp_btf/bpf_testmod_test_nullable_bare") -__failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'") +/* This used to be a failure test, but raw_tp nullable arguments can now + * directly be dereferenced, whether they have nullable annotation or not, + * and don't need to be explicitly checked. + */ +__success int BPF_PROG(handle_tp_btf_nullable_bare1, struct bpf_testmod_test_read_ctx *nullable_ctx) { return nullable_ctx->len; diff --git a/tools/testing/selftests/bpf/progs/update_map_in_htab.c b/tools/testing/selftests/bpf/progs/update_map_in_htab.c new file mode 100644 index 000000000000..c2066247cd9c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/update_map_in_htab.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2024. Huawei Technologies Co., Ltd */ +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +struct inner_map_type { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(key_size, 4); + __uint(value_size, 4); + __uint(max_entries, 1); +} inner_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); + __type(key, int); + __type(value, int); + __uint(max_entries, 2); + __array(values, struct inner_map_type); +} outer_htab_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, int); + __uint(max_entries, 2); + __array(values, struct inner_map_type); +} outer_alloc_htab_map SEC(".maps"); + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c b/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c index 7e0fdcbbd242..93752bb5690b 100644 --- a/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_consumers.c @@ -24,16 +24,16 @@ int uprobe_1(struct pt_regs *ctx) return 0; } -SEC("uprobe.multi") +SEC("uprobe.session") int uprobe_2(struct pt_regs *ctx) { uprobe_result[2]++; return 0; } -SEC("uprobe.multi") +SEC("uprobe.session") int uprobe_3(struct pt_regs *ctx) { uprobe_result[3]++; - return 0; + return 1; } diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_session.c b/tools/testing/selftests/bpf/progs/uprobe_multi_session.c new file mode 100644 index 000000000000..30bff90b68dc --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_session.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <stdbool.h> +#include "bpf_kfuncs.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +__u64 uprobe_multi_func_1_addr = 0; +__u64 uprobe_multi_func_2_addr = 0; +__u64 uprobe_multi_func_3_addr = 0; + +__u64 uprobe_session_result[3] = {}; +__u64 uprobe_multi_sleep_result = 0; + +void *user_ptr = 0; +int pid = 0; + +static int uprobe_multi_check(void *ctx, bool is_return) +{ + const __u64 funcs[] = { + uprobe_multi_func_1_addr, + uprobe_multi_func_2_addr, + uprobe_multi_func_3_addr, + }; + unsigned int i; + __u64 addr; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 1; + + addr = bpf_get_func_ip(ctx); + + for (i = 0; i < ARRAY_SIZE(funcs); i++) { + if (funcs[i] == addr) { + uprobe_session_result[i]++; + break; + } + } + + /* only uprobe_multi_func_2 executes return probe */ + if ((addr == uprobe_multi_func_1_addr) || + (addr == uprobe_multi_func_3_addr)) + return 1; + + return 0; +} + +SEC("uprobe.session//proc/self/exe:uprobe_multi_func_*") +int uprobe(struct pt_regs *ctx) +{ + return uprobe_multi_check(ctx, bpf_session_is_return()); +} + +static __always_inline bool verify_sleepable_user_copy(void) +{ + char data[9]; + + bpf_copy_from_user(data, sizeof(data), user_ptr); + return bpf_strncmp(data, sizeof(data), "test_data") == 0; +} + +SEC("uprobe.session.s//proc/self/exe:uprobe_multi_func_*") +int uprobe_sleepable(struct pt_regs *ctx) +{ + if (verify_sleepable_user_copy()) + uprobe_multi_sleep_result++; + return uprobe_multi_check(ctx, bpf_session_is_return()); +} diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_session_cookie.c b/tools/testing/selftests/bpf/progs/uprobe_multi_session_cookie.c new file mode 100644 index 000000000000..5befdf944dc6 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_session_cookie.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <stdbool.h> +#include "bpf_kfuncs.h" + +char _license[] SEC("license") = "GPL"; + +int pid = 0; + +__u64 test_uprobe_1_result = 0; +__u64 test_uprobe_2_result = 0; +__u64 test_uprobe_3_result = 0; + +static int check_cookie(__u64 val, __u64 *result) +{ + __u64 *cookie; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 1; + + cookie = bpf_session_cookie(); + + if (bpf_session_is_return()) + *result = *cookie == val ? val : 0; + else + *cookie = val; + return 0; +} + +SEC("uprobe.session//proc/self/exe:uprobe_multi_func_1") +int uprobe_1(struct pt_regs *ctx) +{ + return check_cookie(1, &test_uprobe_1_result); +} + +SEC("uprobe.session//proc/self/exe:uprobe_multi_func_2") +int uprobe_2(struct pt_regs *ctx) +{ + return check_cookie(2, &test_uprobe_2_result); +} + +SEC("uprobe.session//proc/self/exe:uprobe_multi_func_3") +int uprobe_3(struct pt_regs *ctx) +{ + return check_cookie(3, &test_uprobe_3_result); +} diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_session_recursive.c b/tools/testing/selftests/bpf/progs/uprobe_multi_session_recursive.c new file mode 100644 index 000000000000..8fbcd69fae22 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_session_recursive.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <stdbool.h> +#include "bpf_kfuncs.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +int pid = 0; + +int idx_entry = 0; +int idx_return = 0; + +__u64 test_uprobe_cookie_entry[6]; +__u64 test_uprobe_cookie_return[3]; + +static int check_cookie(void) +{ + __u64 *cookie = bpf_session_cookie(); + + if (bpf_session_is_return()) { + if (idx_return >= ARRAY_SIZE(test_uprobe_cookie_return)) + return 1; + test_uprobe_cookie_return[idx_return++] = *cookie; + return 0; + } + + if (idx_entry >= ARRAY_SIZE(test_uprobe_cookie_entry)) + return 1; + *cookie = test_uprobe_cookie_entry[idx_entry]; + return idx_entry++ % 2; +} + + +SEC("uprobe.session//proc/self/exe:uprobe_session_recursive") +int uprobe_recursive(struct pt_regs *ctx) +{ + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 1; + + return check_cookie(); +} diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_session_single.c b/tools/testing/selftests/bpf/progs/uprobe_multi_session_single.c new file mode 100644 index 000000000000..7c960376ae97 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_session_single.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <stdbool.h> +#include "bpf_kfuncs.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +__u64 uprobe_session_result[3] = {}; +int pid = 0; + +static int uprobe_multi_check(void *ctx, int idx) +{ + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 1; + + uprobe_session_result[idx]++; + + /* only consumer 1 executes return probe */ + if (idx == 0 || idx == 2) + return 1; + + return 0; +} + +SEC("uprobe.session//proc/self/exe:uprobe_multi_func_1") +int uprobe_0(struct pt_regs *ctx) +{ + return uprobe_multi_check(ctx, 0); +} + +SEC("uprobe.session//proc/self/exe:uprobe_multi_func_1") +int uprobe_1(struct pt_regs *ctx) +{ + return uprobe_multi_check(ctx, 1); +} + +SEC("uprobe.session//proc/self/exe:uprobe_multi_func_1") +int uprobe_2(struct pt_regs *ctx) +{ + return uprobe_multi_check(ctx, 2); +} diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_verifier.c b/tools/testing/selftests/bpf/progs/uprobe_multi_verifier.c new file mode 100644 index 000000000000..fe49f2cb5360 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_verifier.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/usdt.bpf.h> +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + + +SEC("uprobe.session") +__success +int uprobe_sesison_return_0(struct pt_regs *ctx) +{ + return 0; +} + +SEC("uprobe.session") +__success +int uprobe_sesison_return_1(struct pt_regs *ctx) +{ + return 1; +} + +SEC("uprobe.session") +__failure +__msg("At program exit the register R0 has smin=2 smax=2 should have been in [0, 1]") +int uprobe_sesison_return_2(struct pt_regs *ctx) +{ + return 2; +} diff --git a/tools/testing/selftests/bpf/progs/uptr_failure.c b/tools/testing/selftests/bpf/progs/uptr_failure.c new file mode 100644 index 000000000000..0cfa1fd61440 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uptr_failure.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "bpf_experimental.h" +#include "bpf_misc.h" +#include "uptr_test_common.h" + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct value_type); +} datamap SEC(".maps"); + +SEC("?syscall") +__failure __msg("store to uptr disallowed") +int uptr_write(const void *ctx) +{ + struct task_struct *task; + struct value_type *v; + + task = bpf_get_current_task_btf(); + v = bpf_task_storage_get(&datamap, task, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!v) + return 0; + + v->udata = NULL; + return 0; +} + +SEC("?syscall") +__failure __msg("store to uptr disallowed") +int uptr_write_nested(const void *ctx) +{ + struct task_struct *task; + struct value_type *v; + + task = bpf_get_current_task_btf(); + v = bpf_task_storage_get(&datamap, task, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!v) + return 0; + + v->nested.udata = NULL; + return 0; +} + +SEC("?syscall") +__failure __msg("R1 invalid mem access 'mem_or_null'") +int uptr_no_null_check(const void *ctx) +{ + struct task_struct *task; + struct value_type *v; + + task = bpf_get_current_task_btf(); + v = bpf_task_storage_get(&datamap, task, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!v) + return 0; + + v->udata->result = 0; + + return 0; +} + +SEC("?syscall") +__failure __msg("doesn't point to kptr") +int uptr_kptr_xchg(const void *ctx) +{ + struct task_struct *task; + struct value_type *v; + + task = bpf_get_current_task_btf(); + v = bpf_task_storage_get(&datamap, task, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!v) + return 0; + + bpf_kptr_xchg(&v->udata, NULL); + + return 0; +} + +SEC("?syscall") +__failure __msg("invalid mem access 'scalar'") +int uptr_obj_new(const void *ctx) +{ + struct value_type *v; + + v = bpf_obj_new(typeof(*v)); + if (!v) + return 0; + + if (v->udata) + v->udata->result = 0; + + bpf_obj_drop(v); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/uptr_map_failure.c b/tools/testing/selftests/bpf/progs/uptr_map_failure.c new file mode 100644 index 000000000000..417b763d76b4 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uptr_map_failure.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "uptr_test_common.h" + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct large_uptr); +} large_uptr_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct empty_uptr); +} empty_uptr_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct kstruct_uptr); +} kstruct_uptr_map SEC(".maps"); diff --git a/tools/testing/selftests/bpf/progs/uptr_update_failure.c b/tools/testing/selftests/bpf/progs/uptr_update_failure.c new file mode 100644 index 000000000000..86c3bb954abc --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uptr_update_failure.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "uptr_test_common.h" + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct value_lock_type); +} datamap SEC(".maps"); + +/* load test only. not used */ +SEC("syscall") +int not_used(void *ctx) +{ + struct value_lock_type *ptr; + struct task_struct *task; + struct user_data *udata; + + task = bpf_get_current_task_btf(); + ptr = bpf_task_storage_get(&datamap, task, 0, 0); + if (!ptr) + return 0; + + bpf_spin_lock(&ptr->lock); + + udata = ptr->udata; + if (!udata) { + bpf_spin_unlock(&ptr->lock); + return 0; + } + udata->result = MAGIC_VALUE + udata->a + udata->b; + + bpf_spin_unlock(&ptr->lock); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_large.c b/tools/testing/selftests/bpf/progs/verifier_arena_large.c index 6065f862d964..f94f30cf1bb8 100644 --- a/tools/testing/selftests/bpf/progs/verifier_arena_large.c +++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c @@ -29,12 +29,12 @@ int big_alloc1(void *ctx) if (!page1) return 1; *page1 = 1; - page2 = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE, + page2 = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE * 2, 1, NUMA_NO_NODE, 0); if (!page2) return 2; *page2 = 2; - no_page = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE, + no_page = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE, 1, NUMA_NO_NODE, 0); if (no_page) return 3; @@ -66,4 +66,110 @@ int big_alloc1(void *ctx) #endif return 0; } + +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) +#define PAGE_CNT 100 +__u8 __arena * __arena page[PAGE_CNT]; /* occupies the first page */ +__u8 __arena *base; + +/* + * Check that arena's range_tree algorithm allocates pages sequentially + * on the first pass and then fills in all gaps on the second pass. + */ +__noinline int alloc_pages(int page_cnt, int pages_atonce, bool first_pass, + int max_idx, int step) +{ + __u8 __arena *pg; + int i, pg_idx; + + for (i = 0; i < page_cnt; i++) { + pg = bpf_arena_alloc_pages(&arena, NULL, pages_atonce, + NUMA_NO_NODE, 0); + if (!pg) + return step; + pg_idx = (unsigned long) (pg - base) / PAGE_SIZE; + if (first_pass) { + /* Pages must be allocated sequentially */ + if (pg_idx != i) + return step + 100; + } else { + /* Allocator must fill into gaps */ + if (pg_idx >= max_idx || (pg_idx & 1)) + return step + 200; + } + *pg = pg_idx; + page[pg_idx] = pg; + cond_break; + } + return 0; +} + +SEC("syscall") +__success __retval(0) +int big_alloc2(void *ctx) +{ + __u8 __arena *pg; + int i, err; + + base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (!base) + return 1; + bpf_arena_free_pages(&arena, (void __arena *)base, 1); + + err = alloc_pages(PAGE_CNT, 1, true, PAGE_CNT, 2); + if (err) + return err; + + /* Clear all even pages */ + for (i = 0; i < PAGE_CNT; i += 2) { + pg = page[i]; + if (*pg != i) + return 3; + bpf_arena_free_pages(&arena, (void __arena *)pg, 1); + page[i] = NULL; + cond_break; + } + + /* Allocate into freed gaps */ + err = alloc_pages(PAGE_CNT / 2, 1, false, PAGE_CNT, 4); + if (err) + return err; + + /* Free pairs of pages */ + for (i = 0; i < PAGE_CNT; i += 4) { + pg = page[i]; + if (*pg != i) + return 5; + bpf_arena_free_pages(&arena, (void __arena *)pg, 2); + page[i] = NULL; + page[i + 1] = NULL; + cond_break; + } + + /* Allocate 2 pages at a time into freed gaps */ + err = alloc_pages(PAGE_CNT / 4, 2, false, PAGE_CNT, 6); + if (err) + return err; + + /* Check pages without freeing */ + for (i = 0; i < PAGE_CNT; i += 2) { + pg = page[i]; + if (*pg != i) + return 7; + cond_break; + } + + pg = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + + if (!pg) + return 8; + /* + * The first PAGE_CNT pages are occupied. The new page + * must be above. + */ + if ((pg - base) / PAGE_SIZE < PAGE_CNT) + return 9; + return 0; +} +#endif char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_private_stack.c b/tools/testing/selftests/bpf/progs/verifier_private_stack.c new file mode 100644 index 000000000000..b1fbdf119553 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_private_stack.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +/* From include/linux/filter.h */ +#define MAX_BPF_STACK 512 + +#if defined(__TARGET_ARCH_x86) + +struct elem { + struct bpf_timer t; + char pad[256]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct elem); +} array SEC(".maps"); + +SEC("kprobe") +__description("Private stack, single prog") +__success +__arch_x86_64 +__jited(" movabsq $0x{{.*}}, %r9") +__jited(" addq %gs:0x{{.*}}, %r9") +__jited(" movl $0x2a, %edi") +__jited(" movq %rdi, -0x100(%r9)") +__naked void private_stack_single_prog(void) +{ + asm volatile (" \ + r1 = 42; \ + *(u64 *)(r10 - 256) = r1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("raw_tp") +__description("No private stack") +__success +__arch_x86_64 +__jited(" subq $0x8, %rsp") +__naked void no_private_stack_nested(void) +{ + asm volatile (" \ + r1 = 42; \ + *(u64 *)(r10 - 8) = r1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +__used +__naked static void cumulative_stack_depth_subprog(void) +{ + asm volatile (" \ + r1 = 41; \ + *(u64 *)(r10 - 32) = r1; \ + call %[bpf_get_smp_processor_id]; \ + exit; \ +" : + : __imm(bpf_get_smp_processor_id) + : __clobber_all); +} + +SEC("kprobe") +__description("Private stack, subtree > MAX_BPF_STACK") +__success +__arch_x86_64 +/* private stack fp for the main prog */ +__jited(" movabsq $0x{{.*}}, %r9") +__jited(" addq %gs:0x{{.*}}, %r9") +__jited(" movl $0x2a, %edi") +__jited(" movq %rdi, -0x200(%r9)") +__jited(" pushq %r9") +__jited(" callq 0x{{.*}}") +__jited(" popq %r9") +__jited(" xorl %eax, %eax") +__naked void private_stack_nested_1(void) +{ + asm volatile (" \ + r1 = 42; \ + *(u64 *)(r10 - %[max_bpf_stack]) = r1; \ + call cumulative_stack_depth_subprog; \ + r0 = 0; \ + exit; \ +" : + : __imm_const(max_bpf_stack, MAX_BPF_STACK) + : __clobber_all); +} + +__naked __noinline __used +static unsigned long loop_callback(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 42; \ + *(u64 *)(r10 - 512) = r1; \ + call cumulative_stack_depth_subprog; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_common); +} + +SEC("raw_tp") +__description("Private stack, callback") +__success +__arch_x86_64 +/* for func loop_callback */ +__jited("func #1") +__jited(" endbr64") +__jited(" nopl (%rax,%rax)") +__jited(" nopl (%rax)") +__jited(" pushq %rbp") +__jited(" movq %rsp, %rbp") +__jited(" endbr64") +__jited(" movabsq $0x{{.*}}, %r9") +__jited(" addq %gs:0x{{.*}}, %r9") +__jited(" pushq %r9") +__jited(" callq") +__jited(" popq %r9") +__jited(" movl $0x2a, %edi") +__jited(" movq %rdi, -0x200(%r9)") +__jited(" pushq %r9") +__jited(" callq") +__jited(" popq %r9") +__naked void private_stack_callback(void) +{ + asm volatile (" \ + r1 = 1; \ + r2 = %[loop_callback]; \ + r3 = 0; \ + r4 = 0; \ + call %[bpf_loop]; \ + r0 = 0; \ + exit; \ +" : + : __imm_ptr(loop_callback), + __imm(bpf_loop) + : __clobber_common); +} + +SEC("fentry/bpf_fentry_test9") +__description("Private stack, exception in main prog") +__success __retval(0) +__arch_x86_64 +__jited(" pushq %r9") +__jited(" callq") +__jited(" popq %r9") +int private_stack_exception_main_prog(void) +{ + asm volatile (" \ + r1 = 42; \ + *(u64 *)(r10 - 512) = r1; \ +" ::: __clobber_common); + + bpf_throw(0); + return 0; +} + +__used static int subprog_exception(void) +{ + bpf_throw(0); + return 0; +} + +SEC("fentry/bpf_fentry_test9") +__description("Private stack, exception in subprog") +__success __retval(0) +__arch_x86_64 +__jited(" movq %rdi, -0x200(%r9)") +__jited(" pushq %r9") +__jited(" callq") +__jited(" popq %r9") +int private_stack_exception_sub_prog(void) +{ + asm volatile (" \ + r1 = 42; \ + *(u64 *)(r10 - 512) = r1; \ + call subprog_exception; \ +" ::: __clobber_common); + + return 0; +} + +int glob; +__noinline static void subprog2(int *val) +{ + glob += val[0] * 2; +} + +__noinline static void subprog1(int *val) +{ + int tmp[64] = {}; + + tmp[0] = *val; + subprog2(tmp); +} + +__noinline static int timer_cb1(void *map, int *key, struct bpf_timer *timer) +{ + subprog1(key); + return 0; +} + +__noinline static int timer_cb2(void *map, int *key, struct bpf_timer *timer) +{ + return 0; +} + +SEC("fentry/bpf_fentry_test9") +__description("Private stack, async callback, not nested") +__success __retval(0) +__arch_x86_64 +__jited(" movabsq $0x{{.*}}, %r9") +int private_stack_async_callback_1(void) +{ + struct bpf_timer *arr_timer; + int array_key = 0; + + arr_timer = bpf_map_lookup_elem(&array, &array_key); + if (!arr_timer) + return 0; + + bpf_timer_init(arr_timer, &array, 1); + bpf_timer_set_callback(arr_timer, timer_cb2); + bpf_timer_start(arr_timer, 0, 0); + subprog1(&array_key); + return 0; +} + +SEC("fentry/bpf_fentry_test9") +__description("Private stack, async callback, potential nesting") +__success __retval(0) +__arch_x86_64 +__jited(" subq $0x100, %rsp") +int private_stack_async_callback_2(void) +{ + struct bpf_timer *arr_timer; + int array_key = 0; + + arr_timer = bpf_map_lookup_elem(&array, &array_key); + if (!arr_timer) + return 0; + + bpf_timer_init(arr_timer, &array, 1); + bpf_timer_set_callback(arr_timer, timer_cb1); + bpf_timer_start(arr_timer, 0, 0); + subprog1(&array_key); + return 0; +} + +#else + +SEC("kprobe") +__description("private stack is not supported, use a dummy test") +__success +int dummy_test(void) +{ + return 0; +} + +#endif + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c index c4c6da21265e..683a882b3e6d 100644 --- a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c +++ b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c @@ -791,7 +791,7 @@ l0_%=: r0 = *(u8*)skb[0]; \ SEC("tc") __description("reference tracking: forbid LD_ABS while holding reference") -__failure __msg("BPF_LD_[ABS|IND] cannot be mixed with socket references") +__failure __msg("BPF_LD_[ABS|IND] would lead to reference leak") __naked void ld_abs_while_holding_reference(void) { asm volatile (" \ @@ -836,7 +836,7 @@ l0_%=: r7 = 1; \ SEC("tc") __description("reference tracking: forbid LD_IND while holding reference") -__failure __msg("BPF_LD_[ABS|IND] cannot be mixed with socket references") +__failure __msg("BPF_LD_[ABS|IND] would lead to reference leak") __naked void ld_ind_while_holding_reference(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_sock.c b/tools/testing/selftests/bpf/progs/verifier_sock.c index ee76b51005ab..d3e70e38e442 100644 --- a/tools/testing/selftests/bpf/progs/verifier_sock.c +++ b/tools/testing/selftests/bpf/progs/verifier_sock.c @@ -977,4 +977,64 @@ l1_%=: r0 = *(u8*)(r7 + 0); \ : __clobber_all); } +SEC("cgroup/post_bind4") +__description("sk->src_ip6[0] [load 1st byte]") +__failure __msg("invalid bpf_context access off=28 size=2") +__naked void post_bind4_read_src_ip6(void) +{ + asm volatile (" \ + r6 = r1; \ + r7 = *(u16*)(r6 + %[bpf_sock_src_ip6_0]); \ + r0 = 1; \ + exit; \ +" : + : __imm_const(bpf_sock_src_ip6_0, offsetof(struct bpf_sock, src_ip6[0])) + : __clobber_all); +} + +SEC("cgroup/post_bind4") +__description("sk->mark [load mark]") +__failure __msg("invalid bpf_context access off=16 size=2") +__naked void post_bind4_read_mark(void) +{ + asm volatile (" \ + r6 = r1; \ + r7 = *(u16*)(r6 + %[bpf_sock_mark]); \ + r0 = 1; \ + exit; \ +" : + : __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)) + : __clobber_all); +} + +SEC("cgroup/post_bind6") +__description("sk->src_ip4 [load src_ip4]") +__failure __msg("invalid bpf_context access off=24 size=2") +__naked void post_bind6_read_src_ip4(void) +{ + asm volatile (" \ + r6 = r1; \ + r7 = *(u16*)(r6 + %[bpf_sock_src_ip4]); \ + r0 = 1; \ + exit; \ +" : + : __imm_const(bpf_sock_src_ip4, offsetof(struct bpf_sock, src_ip4)) + : __clobber_all); +} + +SEC("cgroup/sock_create") +__description("sk->src_port [word load]") +__failure __msg("invalid bpf_context access off=44 size=2") +__naked void sock_create_read_src_port(void) +{ + asm volatile (" \ + r6 = r1; \ + r7 = *(u16*)(r6 + %[bpf_sock_src_port]); \ + r0 = 1; \ + exit; \ +" : + : __imm_const(bpf_sock_src_port, offsetof(struct bpf_sock, src_port)) + : __clobber_all); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c index fb316c080c84..3f679de73229 100644 --- a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c +++ b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c @@ -187,7 +187,7 @@ l0_%=: r6 = r0; \ SEC("cgroup/skb") __description("spin_lock: test6 missing unlock") -__failure __msg("unlock is missing") +__failure __msg("BPF_EXIT instruction cannot be used inside bpf_spin_lock-ed region") __failure_unpriv __msg_unpriv("") __naked void spin_lock_test6_missing_unlock(void) { diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 905d5981ace1..8b40e9496af1 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -26,10 +26,6 @@ #include "test_maps.h" #include "testing_helpers.h" -#ifndef ENOTSUPP -#define ENOTSUPP 524 -#endif - int skips; static struct bpf_map_create_opts map_opts = { .sz = sizeof(map_opts) }; diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index c7a70e1a1085..6088d8222d59 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -16,15 +16,18 @@ #include <sys/socket.h> #include <sys/un.h> #include <bpf/btf.h> +#include <time.h> #include "json_writer.h" #include "network_helpers.h" +/* backtrace() and backtrace_symbols_fd() are glibc specific, + * use header file when glibc is available and provide stub + * implementations when another libc implementation is used. + */ #ifdef __GLIBC__ #include <execinfo.h> /* backtrace */ -#endif - -/* Default backtrace funcs if missing at link */ +#else __weak int backtrace(void **buffer, int size) { return 0; @@ -34,6 +37,7 @@ __weak void backtrace_symbols_fd(void *const *buffer, int size, int fd) { dprintf(fd, "<backtrace not supported>\n"); } +#endif /*__GLIBC__ */ int env_verbosity = 0; @@ -176,6 +180,88 @@ int usleep(useconds_t usec) return syscall(__NR_nanosleep, &ts, NULL); } +/* Watchdog timer is started by watchdog_start() and stopped by watchdog_stop(). + * If timer is active for longer than env.secs_till_notify, + * it prints the name of the current test to the stderr. + * If timer is active for longer than env.secs_till_kill, + * it kills the thread executing the test by sending a SIGSEGV signal to it. + */ +static void watchdog_timer_func(union sigval sigval) +{ + struct itimerspec timeout = {}; + char test_name[256]; + int err; + + if (env.subtest_state) + snprintf(test_name, sizeof(test_name), "%s/%s", + env.test->test_name, env.subtest_state->name); + else + snprintf(test_name, sizeof(test_name), "%s", + env.test->test_name); + + switch (env.watchdog_state) { + case WD_NOTIFY: + fprintf(env.stderr_saved, "WATCHDOG: test case %s executes for %d seconds...\n", + test_name, env.secs_till_notify); + timeout.it_value.tv_sec = env.secs_till_kill - env.secs_till_notify; + env.watchdog_state = WD_KILL; + err = timer_settime(env.watchdog, 0, &timeout, NULL); + if (err) + fprintf(env.stderr_saved, "Failed to arm watchdog timer\n"); + break; + case WD_KILL: + fprintf(env.stderr_saved, + "WATCHDOG: test case %s executes for %d seconds, terminating with SIGSEGV\n", + test_name, env.secs_till_kill); + pthread_kill(env.main_thread, SIGSEGV); + break; + } +} + +static void watchdog_start(void) +{ + struct itimerspec timeout = {}; + int err; + + if (env.secs_till_kill == 0) + return; + if (env.secs_till_notify > 0) { + env.watchdog_state = WD_NOTIFY; + timeout.it_value.tv_sec = env.secs_till_notify; + } else { + env.watchdog_state = WD_KILL; + timeout.it_value.tv_sec = env.secs_till_kill; + } + err = timer_settime(env.watchdog, 0, &timeout, NULL); + if (err) + fprintf(env.stderr_saved, "Failed to start watchdog timer\n"); +} + +static void watchdog_stop(void) +{ + struct itimerspec timeout = {}; + int err; + + env.watchdog_state = WD_NOTIFY; + err = timer_settime(env.watchdog, 0, &timeout, NULL); + if (err) + fprintf(env.stderr_saved, "Failed to stop watchdog timer\n"); +} + +static void watchdog_init(void) +{ + struct sigevent watchdog_sev = { + .sigev_notify = SIGEV_THREAD, + .sigev_notify_function = watchdog_timer_func, + }; + int err; + + env.main_thread = pthread_self(); + err = timer_create(CLOCK_MONOTONIC, &watchdog_sev, &env.watchdog); + if (err) + fprintf(stderr, "Failed to initialize watchdog timer\n"); +} + static bool should_run(struct test_selector *sel, int num, const char *name) { int i; @@ -512,6 +598,7 @@ bool test__start_subtest(const char *subtest_name) env.subtest_state = subtest_state; stdio_hijack_init(&subtest_state->log_buf, &subtest_state->log_cnt); + watchdog_start(); return true; } @@ -777,6 +864,7 @@ enum ARG_KEYS { ARG_DEBUG = -1, ARG_JSON_SUMMARY = 'J', ARG_TRAFFIC_MONITOR = 'm', + ARG_WATCHDOG_TIMEOUT = 'w', }; static const struct argp_option opts[] = { @@ -807,6 +895,8 @@ static const struct argp_option opts[] = { { "traffic-monitor", ARG_TRAFFIC_MONITOR, "NAMES", 0, "Monitor network traffic of tests with name matching the pattern (supports '*' wildcard)." }, #endif + { "watchdog-timeout", ARG_WATCHDOG_TIMEOUT, "SECONDS", 0, + "Kill the process if tests are not making progress for specified number of seconds." }, {}, }; @@ -868,6 +958,7 @@ static int libbpf_print_fn(enum libbpf_print_level level, va_copy(args2, args); vfprintf(libbpf_capture_stream, format, args2); + va_end(args2); } if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG) @@ -1031,6 +1122,16 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) true); break; #endif + case ARG_WATCHDOG_TIMEOUT: + env->secs_till_kill = atoi(arg); + if (env->secs_till_kill < 0) { + fprintf(stderr, "Invalid watchdog timeout: %s.\n", arg); + return -EINVAL; + } + if (env->secs_till_kill < env->secs_till_notify) { + env->secs_till_notify = 0; + } + break; default: return ARGP_ERR_UNKNOWN; } @@ -1259,10 +1360,12 @@ static void run_one_test(int test_num) stdio_hijack(&state->log_buf, &state->log_cnt); + watchdog_start(); if (test->run_test) test->run_test(); else if (test->run_serial_test) test->run_serial_test(); + watchdog_stop(); /* ensure last sub-test is finalized properly */ if (env.subtest_state) @@ -1703,6 +1806,7 @@ out: static int worker_main(int sock) { save_netns(); + watchdog_init(); while (true) { /* receive command */ @@ -1812,6 +1916,8 @@ int main(int argc, char **argv) sigaction(SIGSEGV, &sigact, NULL); + env.secs_till_notify = 10; + env.secs_till_kill = 120; err = argp_parse(&argp, argc, argv, 0, NULL, &env); if (err) return err; @@ -1820,6 +1926,8 @@ int main(int argc, char **argv) if (err) return err; + watchdog_init(); + /* Use libbpf 1.0 API mode */ libbpf_set_strict_mode(LIBBPF_STRICT_ALL); libbpf_set_print(libbpf_print_fn); diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 7767d9a825ae..74de33ae37e5 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -131,6 +131,12 @@ struct test_env { pid_t *worker_pids; /* array of worker pids */ int *worker_socks; /* array of worker socks */ int *worker_current_test; /* array of current running test for each worker */ + + pthread_t main_thread; + int secs_till_notify; + int secs_till_kill; + timer_t watchdog; /* watch for stalled tests/subtests */ + enum { WD_NOTIFY, WD_KILL } watchdog_state; }; #define MAX_LOG_TRUNK_SIZE 8192 @@ -390,6 +396,14 @@ int test__join_cgroup(const char *path); ___ok; \ }) +#define ASSERT_ERR_FD(fd, name) ({ \ + static int duration = 0; \ + int ___fd = (fd); \ + bool ___ok = ___fd < 0; \ + CHECK(!___ok, (name), "unexpected fd: %d\n", ___fd); \ + ___ok; \ +}) + #define SYS(goto_label, fmt, ...) \ ({ \ char cmd[1024]; \ diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 610392dfc4fb..447b68509d76 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -42,10 +42,6 @@ #include "../../../include/linux/filter.h" #include "testing_helpers.h" -#ifndef ENOTSUPP -#define ENOTSUPP 524 -#endif - #define MAX_INSNS BPF_MAXINSNS #define MAX_EXPECTED_INSNS 32 #define MAX_UNEXPECTED_INSNS 32 diff --git a/tools/testing/selftests/bpf/uprobe_multi.c b/tools/testing/selftests/bpf/uprobe_multi.c index c7828b13e5ff..dd38dc68f635 100644 --- a/tools/testing/selftests/bpf/uprobe_multi.c +++ b/tools/testing/selftests/bpf/uprobe_multi.c @@ -12,6 +12,10 @@ #define MADV_POPULATE_READ 22 #endif +#ifndef MADV_PAGEOUT +#define MADV_PAGEOUT 21 +#endif + int __attribute__((weak)) uprobe(void) { return 0; diff --git a/tools/testing/selftests/bpf/uptr_test_common.h b/tools/testing/selftests/bpf/uptr_test_common.h new file mode 100644 index 000000000000..f8a134ba12f9 --- /dev/null +++ b/tools/testing/selftests/bpf/uptr_test_common.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#ifndef _UPTR_TEST_COMMON_H +#define _UPTR_TEST_COMMON_H + +#define MAGIC_VALUE 0xabcd1234 +#define PAGE_SIZE 4096 + +#ifdef __BPF__ +/* Avoid fwd btf type being generated for the following struct */ +struct large_data *dummy_large_data; +struct empty_data *dummy_empty_data; +struct user_data *dummy_data; +struct cgroup *dummy_cgrp; +#else +#define __uptr +#define __kptr +#endif + +struct user_data { + int a; + int b; + int result; + int nested_result; +}; + +struct nested_udata { + struct user_data __uptr *udata; +}; + +struct value_type { + struct user_data __uptr *udata; + struct cgroup __kptr *cgrp; + struct nested_udata nested; +}; + +struct value_lock_type { + struct user_data __uptr *udata; + struct bpf_spin_lock lock; +}; + +struct large_data { + __u8 one_page[PAGE_SIZE]; + int a; +}; + +struct large_uptr { + struct large_data __uptr *udata; +}; + +struct empty_data { +}; + +struct empty_uptr { + struct empty_data __uptr *udata; +}; + +struct kstruct_uptr { + struct cgroup __uptr *cgrp; +}; + +#endif diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index 1ec5c4c47235..e12ef953fba8 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -16,6 +16,7 @@ #include <sys/stat.h> #include <bpf/libbpf.h> #include <bpf/btf.h> +#include <bpf/bpf.h> #include <libelf.h> #include <gelf.h> #include <float.h> @@ -179,6 +180,7 @@ static struct env { int files_skipped; int progs_processed; int progs_skipped; + int top_src_lines; } env; static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args) @@ -228,6 +230,7 @@ static const struct argp_option opts[] = { "Force frequent BPF verifier state checkpointing (set BPF_F_TEST_STATE_FREQ program flag)" }, { "test-reg-invariants", 'r', NULL, 0, "Force BPF verifier failure on register invariant violation (BPF_F_TEST_REG_INVARIANTS program flag)" }, + { "top-src-lines", 'S', "N", 0, "Emit N most frequent source code lines" }, {}, }; @@ -327,6 +330,14 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) return err; } break; + case 'S': + errno = 0; + env.top_src_lines = strtol(arg, NULL, 10); + if (errno) { + fprintf(stderr, "invalid top lines N specifier: %s\n", arg); + argp_usage(state); + } + break; case ARGP_KEY_ARG: tmp = realloc(env.filenames, (env.filename_cnt + 1) * sizeof(*env.filenames)); if (!tmp) @@ -854,6 +865,118 @@ static int parse_verif_log(char * const buf, size_t buf_sz, struct verif_stats * return 0; } +struct line_cnt { + char *line; + int cnt; +}; + +static int str_cmp(const void *a, const void *b) +{ + const char **str1 = (const char **)a; + const char **str2 = (const char **)b; + + return strcmp(*str1, *str2); +} + +static int line_cnt_cmp(const void *a, const void *b) +{ + const struct line_cnt *a_cnt = (const struct line_cnt *)a; + const struct line_cnt *b_cnt = (const struct line_cnt *)b; + + if (a_cnt->cnt != b_cnt->cnt) + return a_cnt->cnt < b_cnt->cnt ? -1 : 1; + return strcmp(a_cnt->line, b_cnt->line); +} + +static int print_top_src_lines(char * const buf, size_t buf_sz, const char *prog_name) +{ + int lines_cap = 0; + int lines_size = 0; + char **lines = NULL; + char *line = NULL; + char *state; + struct line_cnt *freq = NULL; + struct line_cnt *cur; + int unique_lines; + int err = 0; + int i; + + while ((line = strtok_r(line ? NULL : buf, "\n", &state))) { + if (strncmp(line, "; ", 2) != 0) + continue; + line += 2; + + if (lines_size == lines_cap) { + char **tmp; + + lines_cap = max(16, lines_cap * 2); + tmp = realloc(lines, lines_cap * sizeof(*tmp)); + if (!tmp) { + err = -ENOMEM; + goto cleanup; + } + lines = tmp; + } + lines[lines_size] = line; + lines_size++; + } + + if (lines_size == 0) + goto cleanup; + + qsort(lines, lines_size, sizeof(*lines), str_cmp); + + freq = calloc(lines_size, sizeof(*freq)); + if (!freq) { + err = -ENOMEM; + goto cleanup; + } + + cur = freq; + cur->line = lines[0]; + cur->cnt = 1; + for (i = 1; i < lines_size; ++i) { + if (strcmp(lines[i], cur->line) != 0) { + cur++; + cur->line = lines[i]; + cur->cnt = 0; + } + cur->cnt++; + } + unique_lines = cur - freq + 1; + + qsort(freq, unique_lines, sizeof(struct line_cnt), line_cnt_cmp); + + printf("Top source lines (%s):\n", prog_name); + for (i = 0; i < min(unique_lines, env.top_src_lines); ++i) { + const char *src_code = freq[i].line; + const char *src_line = NULL; + char *split = strrchr(freq[i].line, '@'); + + if (split) { + src_line = split + 1; + + while (*src_line && isspace(*src_line)) + src_line++; + + while (split > src_code && isspace(*split)) + split--; + *split = '\0'; + } + + if (src_line) + printf("%5d: (%s)\t%s\n", freq[i].cnt, src_line, src_code); + else + printf("%5d: %s\n", freq[i].cnt, src_code); + } + printf("\n"); + +cleanup: + free(freq); + free(lines); + return err; +} + static int guess_prog_type_by_ctx_name(const char *ctx_name, enum bpf_prog_type *prog_type, enum bpf_attach_type *attach_type) @@ -987,6 +1110,35 @@ skip_freplace_fixup: return; } +static int max_verifier_log_size(void) +{ + const int SMALL_LOG_SIZE = UINT_MAX >> 8; + const int BIG_LOG_SIZE = UINT_MAX >> 2; + struct bpf_insn insns[] = { + { .code = BPF_ALU | BPF_MOV | BPF_X, .dst_reg = BPF_REG_0, }, + { .code = BPF_JMP | BPF_EXIT, }, + }; + LIBBPF_OPTS(bpf_prog_load_opts, opts, + .log_size = BIG_LOG_SIZE, + .log_buf = (void *)-1, + .log_level = 4 + ); + int ret, insn_cnt = ARRAY_SIZE(insns); + static int log_size; + + if (log_size != 0) + return log_size; + + ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts); + + if (ret == -EFAULT) + log_size = BIG_LOG_SIZE; + else /* ret == -EINVAL, big log size is not supported by the verifier */ + log_size = SMALL_LOG_SIZE; + + return log_size; +} + static int process_prog(const char *filename, struct bpf_object *obj, struct bpf_program *prog) { const char *base_filename = basename(strdupa(filename)); @@ -1009,13 +1161,16 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf stats = &env.prog_stats[env.prog_stat_cnt++]; memset(stats, 0, sizeof(*stats)); - if (env.verbose) { - buf_sz = env.log_size ? env.log_size : 16 * 1024 * 1024; + if (env.verbose || env.top_src_lines > 0) { + buf_sz = env.log_size ? env.log_size : max_verifier_log_size(); buf = malloc(buf_sz); if (!buf) return -ENOMEM; /* ensure we always request stats */ log_level = env.log_level | 4 | (env.log_fixed ? 8 : 0); + /* --top-src-lines needs verifier log */ + if (env.top_src_lines > 0 && env.log_level == 0) + log_level |= 2; } else { buf = verif_log_buf; buf_sz = sizeof(verif_log_buf); @@ -1048,6 +1203,8 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf filename, prog_name, stats->stats[DURATION], err ? "failure" : "success", buf); } + if (env.top_src_lines > 0) + print_top_src_lines(buf, buf_sz, stats->prog_name); if (verif_log_buf != buf) free(buf); |