summaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/bpf/test_progs.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2020-08-06 13:15:47 +0200
committerArnaldo Carvalho de Melo <acme@redhat.com>2020-08-06 13:15:47 +0200
commit94fb1afb14c4f0ceb8c5508ddddac6819f662e95 (patch)
tree4988e5769dc7482caa7f441475ae31f50bbd37ef /tools/testing/selftests/bpf/test_progs.c
parentperf evsel: Don't set sample_regs_intr/sample_regs_user for dummy event (diff)
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next (diff)
downloadlinux-94fb1afb14c4f0ceb8c5508ddddac6819f662e95.tar.xz
linux-94fb1afb14c4f0ceb8c5508ddddac6819f662e95.zip
Mgerge remote-tracking branch 'torvalds/master' into perf/core
To sync headers, for instance, in this case tools/perf was ahead of upstream till Linus merged tip/perf/core to get the PERF_RECORD_TEXT_POKE changes: Warning: Kernel ABI header at 'tools/include/uapi/linux/perf_event.h' differs from latest version at 'include/uapi/linux/perf_event.h' diff -u tools/include/uapi/linux/perf_event.h include/uapi/linux/perf_event.h Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/testing/selftests/bpf/test_progs.c')
-rw-r--r--tools/testing/selftests/bpf/test_progs.c66
1 files changed, 62 insertions, 4 deletions
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 54fa5fa688ce..b1e4dadacd9b 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -12,6 +12,9 @@
#include <string.h>
#include <execinfo.h> /* backtrace */
+#define EXIT_NO_TEST 2
+#define EXIT_ERR_SETUP_INFRA 3
+
/* defined in test_progs.h */
struct test_env env = {};
@@ -111,13 +114,31 @@ static void reset_affinity() {
if (err < 0) {
stdio_restore();
fprintf(stderr, "Failed to reset process affinity: %d!\n", err);
- exit(-1);
+ exit(EXIT_ERR_SETUP_INFRA);
}
err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
if (err < 0) {
stdio_restore();
fprintf(stderr, "Failed to reset thread affinity: %d!\n", err);
- exit(-1);
+ exit(EXIT_ERR_SETUP_INFRA);
+ }
+}
+
+static void save_netns(void)
+{
+ env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY);
+ if (env.saved_netns_fd == -1) {
+ perror("open(/proc/self/ns/net)");
+ exit(EXIT_ERR_SETUP_INFRA);
+ }
+}
+
+static void restore_netns(void)
+{
+ if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) {
+ stdio_restore();
+ perror("setns(CLONE_NEWNS)");
+ exit(EXIT_ERR_SETUP_INFRA);
}
}
@@ -138,8 +159,6 @@ void test__end_subtest()
test->test_num, test->subtest_num,
test->subtest_name, sub_error_cnt ? "FAIL" : "OK");
- reset_affinity();
-
free(test->subtest_name);
test->subtest_name = NULL;
}
@@ -366,6 +385,8 @@ enum ARG_KEYS {
ARG_TEST_NAME_BLACKLIST = 'b',
ARG_VERIFIER_STATS = 's',
ARG_VERBOSE = 'v',
+ ARG_GET_TEST_CNT = 'c',
+ ARG_LIST_TEST_NAMES = 'l',
};
static const struct argp_option opts[] = {
@@ -379,6 +400,10 @@ static const struct argp_option opts[] = {
"Output verifier statistics", },
{ "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL,
"Verbose output (use -vv or -vvv for progressively verbose output)" },
+ { "count", ARG_GET_TEST_CNT, NULL, 0,
+ "Get number of selected top-level tests " },
+ { "list", ARG_LIST_TEST_NAMES, NULL, 0,
+ "List test names that would run (without running them) " },
{},
};
@@ -511,6 +536,12 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
}
}
break;
+ case ARG_GET_TEST_CNT:
+ env->get_test_cnt = true;
+ break;
+ case ARG_LIST_TEST_NAMES:
+ env->list_test_names = true;
+ break;
case ARGP_KEY_ARG:
argp_usage(state);
break;
@@ -643,6 +674,7 @@ int main(int argc, char **argv)
return -1;
}
+ save_netns();
stdio_hijack();
for (i = 0; i < prog_test_cnt; i++) {
struct prog_test_def *test = &prog_test_defs[i];
@@ -654,6 +686,17 @@ int main(int argc, char **argv)
test->test_num, test->test_name))
continue;
+ if (env.get_test_cnt) {
+ env.succ_cnt++;
+ continue;
+ }
+
+ if (env.list_test_names) {
+ fprintf(env.stdout, "%s\n", test->test_name);
+ env.succ_cnt++;
+ continue;
+ }
+
test->run_test();
/* ensure last sub-test is finalized properly */
if (test->subtest_name)
@@ -673,19 +716,34 @@ int main(int argc, char **argv)
test->error_cnt ? "FAIL" : "OK");
reset_affinity();
+ restore_netns();
if (test->need_cgroup_cleanup)
cleanup_cgroup_environment();
}
stdio_restore();
+
+ if (env.get_test_cnt) {
+ printf("%d\n", env.succ_cnt);
+ goto out;
+ }
+
+ if (env.list_test_names)
+ goto out;
+
fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt);
+out:
free_str_set(&env.test_selector.blacklist);
free_str_set(&env.test_selector.whitelist);
free(env.test_selector.num_set);
free_str_set(&env.subtest_selector.blacklist);
free_str_set(&env.subtest_selector.whitelist);
free(env.subtest_selector.num_set);
+ close(env.saved_netns_fd);
+
+ if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
+ return EXIT_NO_TEST;
return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
}