summaryrefslogtreecommitdiffstats
path: root/tools/perf/tests/builtin-test.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-11-26 23:54:00 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2024-11-26 23:54:00 +0100
commitb50ecc5aca4d18f1f0c4942f5c797bc85edef144 (patch)
tree4bb02793452d5f8a38922f1d740ea08627819f32 /tools/perf/tests/builtin-test.c
parentMerge tag 'parisc-for-6.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/... (diff)
parentperf tests: Fix hwmon parsing with PMU name test (diff)
downloadlinux-b50ecc5aca4d18f1f0c4942f5c797bc85edef144.tar.xz
linux-b50ecc5aca4d18f1f0c4942f5c797bc85edef144.zip
Merge tag 'perf-tools-for-v6.13-2024-11-24' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools
Pull perf tools updates from Namhyung Kim: "perf record: - Enable leader sampling for inherited task events. It was supported only for system-wide events but the kernel started to support such a setup since v6.12. This is to reduce the number of PMU interrupts. The samples of the leader event will contain counts of other events and no samples will be generated for the other member events. $ perf record -e '{cycles,instructions}:S' ${MYPROG} perf report: - Fix --branch-history option to display more branch-related information like prediction, abort and cycles which is available on Intel machines. $ perf record -bg -- perf test -w brstack $ perf report --branch-history ... # # Overhead Source:Line Symbol Shared Object Predicted Abort Cycles IPC [IPC Coverage] # ........ ........................ .............. .................... ......... ..... ...... .................... # 8.17% copy_page_64.S:19 [k] copy_page [kernel.kallsyms] 50.0% 0 5 - - | ---xas_load xarray.h:171 | |--5.68%--xas_load xarray.c:245 (cycles:1) | xas_load xarray.c:242 | xas_load xarray.h:1260 (cycles:1) | xas_descend xarray.c:146 | xas_load xarray.c:244 (cycles:2) | xas_load xarray.c:245 | xas_descend xarray.c:218 (cycles:10) ... perf stat: - Add HWMON PMU support. The HWMON provides various system information like CPU/GPU temperature, fan speed and so on. Expose them as PMU events so that users can see the values using perf stat commands. $ perf stat -e temp_cpu,fan1 true Performance counter stats for 'true': 60.00 'C temp_cpu 0 rpm fan1 0.000745382 seconds time elapsed 0.000883000 seconds user 0.000000000 seconds sys - Display metric threshold in JSON output. Some metrics define thresholds to classify value ranges. It used to be in a different color but it won't work for JSON. Add "metric-threshold" field to the JSON that can be one of "good", "less good", "nearly bad" and "bad". # perf stat -a -M TopdownL1 -j true {"counter-value" : "18693525.000000", "unit" : "", "event" : "TOPDOWN.SLOTS", "event-runtime" : 5552708, "pcnt-running" : 100.00, "metric-value" : "43.226002", "metric-unit" : "% tma_backend_bound", "metric-threshold" : "bad"} {"metric-value" : "29.212267", "metric-unit" : "% tma_frontend_bound", "metric-threshold" : "bad"} {"metric-value" : "7.138972", "metric-unit" : "% tma_bad_speculation", "metric-threshold" : "good"} {"metric-value" : "20.422759", "metric-unit" : "% tma_retiring", "metric-threshold" : "good"} {"counter-value" : "3817732.000000", "unit" : "", "event" : "topdown-retiring", "event-runtime" : 5552708, "pcnt-running" : 100.00, } {"counter-value" : "5472824.000000", "unit" : "", "event" : "topdown-fe-bound", "event-runtime" : 5552708, "pcnt-running" : 100.00, } {"counter-value" : "7984780.000000", "unit" : "", "event" : "topdown-be-bound", "event-runtime" : 5552708, "pcnt-running" : 100.00, } {"counter-value" : "1418181.000000", "unit" : "", "event" : "topdown-bad-spec", "event-runtime" : 5552708, "pcnt-running" : 100.00, } ... perf sched: - Add -P/--pre-migrations option for 'timehist' sub-command to track time a task waited on a run-queue before migrating to a different CPU. $ perf sched timehist -P time cpu task name wait time sch delay run time pre-mig time [tid/pid] (msec) (msec) (msec) (msec) --------------- ------ ------------------------------ --------- --------- --------- --------- 585940.535527 [0000] perf[584885] 0.000 0.000 0.000 0.000 585940.535535 [0000] migration/0[20] 0.000 0.002 0.008 0.000 585940.535559 [0001] perf[584885] 0.000 0.000 0.000 0.000 585940.535563 [0001] migration/1[25] 0.000 0.001 0.004 0.000 585940.535678 [0002] perf[584885] 0.000 0.000 0.000 0.000 585940.535686 [0002] migration/2[31] 0.000 0.002 0.008 0.000 585940.535905 [0001] <idle> 0.000 0.000 0.342 0.000 585940.535938 [0003] perf[584885] 0.000 0.000 0.000 0.000 585940.537048 [0001] sleep[584886] 0.000 0.019 1.142 0.001 585940.537749 [0002] <idle> 0.000 0.000 2.062 0.000 ... Build: - Make libunwind opt-in (LIBUNWIND=1) rather than opt-out. The perf tools are generally built with libelf and libdw which has unwinder functionality. The libunwind support predates it and no need to have duplicate unwinders by default. - Rename NO_DWARF=1 build option to NO_LIBDW=1 in order to clarify it's using libdw for handling DWARF information. Internals: - Do not set exclude_guest bit in the perf_event_attr by default. This was causing a trouble in AMD IBS PMU as it doesn't support the bit. The bit will be set when it's needed later by the fallback logic. Also update the missing feature detection logic to make sure not clear supported bits unnecessarily. - Run perf test in parallel by default and mark flaky tests "exclusive" to run them serially at the end. Some test numbers are changed but the test can complete in less than half the time. JSON vendor events: - Add AMD Zen 5 events and metrics. - Add i.MX91 and i.MX95 DDR metrics - Fix HiSilicon HIP08 Topdown metric name. - Support compat events on PowerPC" * tag 'perf-tools-for-v6.13-2024-11-24' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools: (232 commits) perf tests: Fix hwmon parsing with PMU name test perf hwmon_pmu: Ensure hwmon key union is zeroed before use perf tests hwmon_pmu: Remove double evlist__delete() perf/test: fix perf ftrace test on s390 perf bpf-filter: Return -ENOMEM directly when pfi allocation fails perf test: Correct hwmon test PMU detection perf: Remove unused del_perf_probe_events() perf pmu: Move pmu_metrics_table__find and remove ARM override perf jevents: Add map_for_cpu() perf header: Pass a perf_cpu rather than a PMU to get_cpuid_str perf header: Avoid transitive PMU includes perf arm64 header: Use cpu argument in get_cpuid perf header: Refactor get_cpuid to take a CPU for ARM perf header: Move is_cpu_online to numa bench perf jevents: fix breakage when do perf stat on system metric perf test: Add missing __exit calls in tool/hwmon tests perf tests: Make leader sampling test work without branch event perf util: Remove kernel version deadcode perf test shell trace_exit_race: Use --no-comm to avoid cases where COMM isn't resolved perf test shell trace_exit_race: Show what went wrong in verbose mode ...
Diffstat (limited to 'tools/perf/tests/builtin-test.c')
-rw-r--r--tools/perf/tests/builtin-test.c438
1 files changed, 311 insertions, 127 deletions
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 470a9709427d..8dcf74d3c0a3 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -8,6 +8,7 @@
#include <errno.h>
#include <poll.h>
#include <unistd.h>
+#include <setjmp.h>
#include <string.h>
#include <stdlib.h>
#include <sys/types.h>
@@ -39,11 +40,8 @@
* making them easier to debug.
*/
static bool dont_fork;
-/* Don't fork the tests in parallel and wait for their completion. */
-static bool sequential = true;
-/* Do it in parallel, lacks infrastructure to avoid running tests that clash for resources,
- * So leave it as the developers choice to enable while working on the needed infra */
-static bool parallel;
+/* Fork the tests in parallel and wait for their completion. */
+static bool sequential;
const char *dso_to_test;
const char *test_objdump_path = "objdump";
@@ -73,13 +71,14 @@ static struct test_suite *generic_tests[] = {
&suite__PERF_RECORD,
&suite__pmu,
&suite__pmu_events,
+ &suite__hwmon_pmu,
+ &suite__tool_pmu,
&suite__dso_data,
&suite__perf_evsel__roundtrip_name_test,
#ifdef HAVE_LIBTRACEEVENT
&suite__perf_evsel__tp_sched_test,
&suite__syscall_openat_tp_fields,
#endif
- &suite__attr,
&suite__hists_link,
&suite__python_use,
&suite__bp_signal,
@@ -139,12 +138,6 @@ static struct test_suite *generic_tests[] = {
NULL,
};
-static struct test_suite **tests[] = {
- generic_tests,
- arch_tests,
- NULL, /* shell tests created at runtime. */
-};
-
static struct test_workload *workloads[] = {
&workload__noploop,
&workload__thloop,
@@ -155,6 +148,9 @@ static struct test_workload *workloads[] = {
&workload__landlock,
};
+#define workloads__for_each(workload) \
+ for (unsigned i = 0; i < ARRAY_SIZE(workloads) && ({ workload = workloads[i]; 1; }); i++)
+
static int num_subtests(const struct test_suite *t)
{
int num;
@@ -198,6 +194,14 @@ static test_fnptr test_function(const struct test_suite *t, int subtest)
return t->test_cases[subtest].run_case;
}
+static bool test_exclusive(const struct test_suite *t, int subtest)
+{
+ if (subtest <= 0)
+ return t->test_cases[0].exclusive;
+
+ return t->test_cases[subtest].exclusive;
+}
+
static bool perf_test__matches(const char *desc, int curr, int argc, const char *argv[])
{
int i;
@@ -229,20 +233,47 @@ struct child_test {
int subtest;
};
+static jmp_buf run_test_jmp_buf;
+
+static void child_test_sig_handler(int sig)
+{
+ siglongjmp(run_test_jmp_buf, sig);
+}
+
static int run_test_child(struct child_process *process)
{
+ const int signals[] = {
+ SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGINT, SIGPIPE, SIGQUIT, SIGSEGV, SIGTERM,
+ };
struct child_test *child = container_of(process, struct child_test, process);
int err;
+ err = sigsetjmp(run_test_jmp_buf, 1);
+ if (err) {
+ fprintf(stderr, "\n---- unexpected signal (%d) ----\n", err);
+ err = err > 0 ? -err : -1;
+ goto err_out;
+ }
+
+ for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
+ signal(signals[i], child_test_sig_handler);
+
pr_debug("--- start ---\n");
pr_debug("test child forked, pid %d\n", getpid());
err = test_function(child->test, child->subtest)(child->test, child->subtest);
pr_debug("---- end(%d) ----\n", err);
+
+err_out:
fflush(NULL);
+ for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
+ signal(signals[i], SIG_DFL);
return -err;
}
-static int print_test_result(struct test_suite *t, int i, int subtest, int result, int width)
+#define TEST_RUNNING -3
+
+static int print_test_result(struct test_suite *t, int i, int subtest, int result, int width,
+ int running)
{
if (has_subtests(t)) {
int subw = width > 2 ? width - 2 : width;
@@ -252,6 +283,9 @@ static int print_test_result(struct test_suite *t, int i, int subtest, int resul
pr_info("%3d: %-*s:", i + 1, width, test_description(t, subtest));
switch (result) {
+ case TEST_RUNNING:
+ color_fprintf(stderr, PERF_COLOR_YELLOW, " Running (%d active)\n", running);
+ break;
case TEST_OK:
pr_info(" Ok\n");
break;
@@ -273,16 +307,25 @@ static int print_test_result(struct test_suite *t, int i, int subtest, int resul
return 0;
}
-static int finish_test(struct child_test *child_test, int width)
+static void finish_test(struct child_test **child_tests, int running_test, int child_test_num,
+ int width)
{
- struct test_suite *t = child_test->test;
- int i = child_test->test_num;
- int subi = child_test->subtest;
- int err = child_test->process.err;
- bool err_done = err <= 0;
+ struct child_test *child_test = child_tests[running_test];
+ struct test_suite *t;
+ int i, subi, err;
+ bool err_done = false;
struct strbuf err_output = STRBUF_INIT;
+ int last_running = -1;
int ret;
+ if (child_test == NULL) {
+ /* Test wasn't started. */
+ return;
+ }
+ t = child_test->test;
+ i = child_test->test_num;
+ subi = child_test->subtest;
+ err = child_test->process.err;
/*
* For test suites with subtests, display the suite name ahead of the
* sub test names.
@@ -294,7 +337,7 @@ static int finish_test(struct child_test *child_test, int width)
* Busy loop reading from the child's stdout/stderr that are set to be
* non-blocking until EOF.
*/
- if (!err_done)
+ if (err > 0)
fcntl(err, F_SETFL, O_NONBLOCK);
if (verbose > 1) {
if (has_subtests(t))
@@ -308,57 +351,90 @@ static int finish_test(struct child_test *child_test, int width)
.events = POLLIN | POLLERR | POLLHUP | POLLNVAL,
},
};
- char buf[512];
- ssize_t len;
+ if (perf_use_color_default) {
+ int running = 0;
- /* Poll to avoid excessive spinning, timeout set for 100ms. */
- poll(pfds, ARRAY_SIZE(pfds), /*timeout=*/100);
- if (!err_done && pfds[0].revents) {
- errno = 0;
- len = read(err, buf, sizeof(buf) - 1);
+ for (int y = running_test; y < child_test_num; y++) {
+ if (child_tests[y] == NULL)
+ continue;
+ if (check_if_command_finished(&child_tests[y]->process) == 0)
+ running++;
+ }
+ if (running != last_running) {
+ if (last_running != -1) {
+ /*
+ * Erase "Running (.. active)" line
+ * printed before poll/sleep.
+ */
+ fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
+ }
+ print_test_result(t, i, subi, TEST_RUNNING, width, running);
+ last_running = running;
+ }
+ }
- if (len <= 0) {
- err_done = errno != EAGAIN;
- } else {
- buf[len] = '\0';
- if (verbose > 1)
- fprintf(stdout, "%s", buf);
- else
+ err_done = true;
+ if (err <= 0) {
+ /* No child stderr to poll, sleep for 10ms for child to complete. */
+ usleep(10 * 1000);
+ } else {
+ /* Poll to avoid excessive spinning, timeout set for 100ms. */
+ poll(pfds, ARRAY_SIZE(pfds), /*timeout=*/100);
+ if (pfds[0].revents) {
+ char buf[512];
+ ssize_t len;
+
+ len = read(err, buf, sizeof(buf) - 1);
+
+ if (len > 0) {
+ err_done = false;
+ buf[len] = '\0';
strbuf_addstr(&err_output, buf);
+ }
}
}
+ if (err_done)
+ err_done = check_if_command_finished(&child_test->process);
+ }
+ if (perf_use_color_default && last_running != -1) {
+ /* Erase "Running (.. active)" line printed before poll/sleep. */
+ fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
}
/* Clean up child process. */
ret = finish_command(&child_test->process);
- if (verbose == 1 && ret == TEST_FAIL) {
- /* Add header for test that was skipped above. */
- if (has_subtests(t))
- pr_info("%3d.%1d: %s:\n", i + 1, subi + 1, test_description(t, subi));
- else
- pr_info("%3d: %s:\n", i + 1, test_description(t, -1));
+ if (verbose > 1 || (verbose == 1 && ret == TEST_FAIL))
fprintf(stderr, "%s", err_output.buf);
- }
+
strbuf_release(&err_output);
- print_test_result(t, i, subi, ret, width);
+ print_test_result(t, i, subi, ret, width, /*running=*/0);
if (err > 0)
close(err);
- return 0;
+ zfree(&child_tests[running_test]);
}
static int start_test(struct test_suite *test, int i, int subi, struct child_test **child,
- int width)
+ int width, int pass)
{
int err;
*child = NULL;
if (dont_fork) {
- pr_debug("--- start ---\n");
- err = test_function(test, subi)(test, subi);
- pr_debug("---- end ----\n");
- print_test_result(test, i, subi, err, width);
+ if (pass == 1) {
+ pr_debug("--- start ---\n");
+ err = test_function(test, subi)(test, subi);
+ pr_debug("---- end ----\n");
+ print_test_result(test, i, subi, err, width, /*running=*/0);
+ }
+ return 0;
+ }
+ if (pass == 1 && !sequential && test_exclusive(test, subi)) {
+ /* When parallel, skip exclusive tests on the first pass. */
+ return 0;
+ }
+ if (pass != 1 && (sequential || !test_exclusive(test, subi))) {
+ /* Sequential and non-exclusive tests were run on the first pass. */
return 0;
}
-
*child = zalloc(sizeof(**child));
if (!*child)
return -ENOMEM;
@@ -377,35 +453,42 @@ static int start_test(struct test_suite *test, int i, int subi, struct child_tes
(*child)->process.err = -1;
}
(*child)->process.no_exec_cmd = run_test_child;
- err = start_command(&(*child)->process);
- if (err || !sequential)
- return err;
- return finish_test(*child, width);
+ if (sequential || pass == 2) {
+ err = start_command(&(*child)->process);
+ if (err)
+ return err;
+ finish_test(child, /*running_test=*/0, /*child_test_num=*/1, width);
+ return 0;
+ }
+ return start_command(&(*child)->process);
}
-#define for_each_test(j, k, t) \
- for (j = 0, k = 0; j < ARRAY_SIZE(tests); j++, k = 0) \
- while ((t = tests[j][k++]) != NULL)
+/* State outside of __cmd_test for the sake of the signal handler. */
+
+static size_t num_tests;
+static struct child_test **child_tests;
+static jmp_buf cmd_test_jmp_buf;
-static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
+static void cmd_test_sig_handler(int sig)
{
- struct test_suite *t;
- unsigned int j, k;
- int i = 0;
- int width = 0;
- size_t num_tests = 0;
- struct child_test **child_tests;
- int child_test_num = 0;
+ siglongjmp(cmd_test_jmp_buf, sig);
+}
+
+static int __cmd_test(struct test_suite **suites, int argc, const char *argv[],
+ struct intlist *skiplist)
+{
+ static int width = 0;
+ int err = 0;
- for_each_test(j, k, t) {
- int len = strlen(test_description(t, -1));
+ for (struct test_suite **t = suites; *t; t++) {
+ int len = strlen(test_description(*t, -1));
if (width < len)
width = len;
- if (has_subtests(t)) {
- for (int subi = 0, subn = num_subtests(t); subi < subn; subi++) {
- len = strlen(test_description(t, subi));
+ if (has_subtests(*t)) {
+ for (int subi = 0, subn = num_subtests(*t); subi < subn; subi++) {
+ len = strlen(test_description(*t, subi));
if (width < len)
width = len;
num_tests++;
@@ -418,97 +501,137 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
if (!child_tests)
return -ENOMEM;
- for_each_test(j, k, t) {
- int curr = i++;
-
- if (!perf_test__matches(test_description(t, -1), curr, argc, argv)) {
- bool skip = true;
+ err = sigsetjmp(cmd_test_jmp_buf, 1);
+ if (err) {
+ pr_err("\nSignal (%d) while running tests.\nTerminating tests with the same signal\n",
+ err);
+ for (size_t x = 0; x < num_tests; x++) {
+ struct child_test *child_test = child_tests[x];
- for (int subi = 0, subn = num_subtests(t); subi < subn; subi++) {
- if (perf_test__matches(test_description(t, subi),
- curr, argc, argv))
- skip = false;
- }
-
- if (skip)
+ if (!child_test)
continue;
- }
- if (intlist__find(skiplist, i)) {
- pr_info("%3d: %-*s:", curr + 1, width, test_description(t, -1));
- color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
- continue;
+ pr_debug3("Killing %d pid %d\n",
+ child_test->test_num + 1,
+ child_test->process.pid);
+ kill(child_test->process.pid, err);
}
+ goto err_out;
+ }
+ signal(SIGINT, cmd_test_sig_handler);
+ signal(SIGTERM, cmd_test_sig_handler);
- if (!has_subtests(t)) {
- int err = start_test(t, curr, -1, &child_tests[child_test_num++], width);
+ /*
+ * In parallel mode pass 1 runs non-exclusive tests in parallel, pass 2
+ * runs the exclusive tests sequentially. In other modes all tests are
+ * run in pass 1.
+ */
+ for (int pass = 1; pass <= 2; pass++) {
+ int child_test_num = 0;
+ int i = 0;
+
+ for (struct test_suite **t = suites; *t; t++) {
+ int curr = i++;
+
+ if (!perf_test__matches(test_description(*t, -1), curr, argc, argv)) {
+ /*
+ * Test suite shouldn't be run based on
+ * description. See if subtest should.
+ */
+ bool skip = true;
+
+ for (int subi = 0, subn = num_subtests(*t); subi < subn; subi++) {
+ if (perf_test__matches(test_description(*t, subi),
+ curr, argc, argv))
+ skip = false;
+ }
+
+ if (skip)
+ continue;
+ }
- if (err) {
- /* TODO: if !sequential waitpid the already forked children. */
- free(child_tests);
- return err;
+ if (intlist__find(skiplist, i)) {
+ pr_info("%3d: %-*s:", curr + 1, width, test_description(*t, -1));
+ color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
+ continue;
}
- } else {
- for (int subi = 0, subn = num_subtests(t); subi < subn; subi++) {
- int err;
- if (!perf_test__matches(test_description(t, subi),
+ if (!has_subtests(*t)) {
+ err = start_test(*t, curr, -1, &child_tests[child_test_num++],
+ width, pass);
+ if (err)
+ goto err_out;
+ continue;
+ }
+ for (int subi = 0, subn = num_subtests(*t); subi < subn; subi++) {
+ if (!perf_test__matches(test_description(*t, subi),
curr, argc, argv))
continue;
- err = start_test(t, curr, subi, &child_tests[child_test_num++],
- width);
+ err = start_test(*t, curr, subi, &child_tests[child_test_num++],
+ width, pass);
if (err)
- return err;
+ goto err_out;
}
}
- }
- for (i = 0; i < child_test_num; i++) {
if (!sequential) {
- int ret = finish_test(child_tests[i], width);
-
- if (ret)
- return ret;
+ /* Parallel mode starts tests but doesn't finish them. Do that now. */
+ for (size_t x = 0; x < num_tests; x++)
+ finish_test(child_tests, x, num_tests, width);
}
- free(child_tests[i]);
+ }
+err_out:
+ signal(SIGINT, SIG_DFL);
+ signal(SIGTERM, SIG_DFL);
+ if (err) {
+ pr_err("Internal test harness failure. Completing any started tests:\n:");
+ for (size_t x = 0; x < num_tests; x++)
+ finish_test(child_tests, x, num_tests, width);
}
free(child_tests);
- return 0;
+ return err;
}
-static int perf_test__list(int argc, const char **argv)
+static int perf_test__list(struct test_suite **suites, int argc, const char **argv)
{
- unsigned int j, k;
- struct test_suite *t;
int i = 0;
- for_each_test(j, k, t) {
+ for (struct test_suite **t = suites; *t; t++) {
int curr = i++;
- if (!perf_test__matches(test_description(t, -1), curr, argc, argv))
+ if (!perf_test__matches(test_description(*t, -1), curr, argc, argv))
continue;
- pr_info("%3d: %s\n", i, test_description(t, -1));
+ pr_info("%3d: %s\n", i, test_description(*t, -1));
- if (has_subtests(t)) {
- int subn = num_subtests(t);
+ if (has_subtests(*t)) {
+ int subn = num_subtests(*t);
int subi;
for (subi = 0; subi < subn; subi++)
pr_info("%3d:%1d: %s\n", i, subi + 1,
- test_description(t, subi));
+ test_description(*t, subi));
}
}
return 0;
}
+static int workloads__fprintf_list(FILE *fp)
+{
+ struct test_workload *twl;
+ int printed = 0;
+
+ workloads__for_each(twl)
+ printed += fprintf(fp, "%s\n", twl->name);
+
+ return printed;
+}
+
static int run_workload(const char *work, int argc, const char **argv)
{
- unsigned int i = 0;
struct test_workload *twl;
- for (i = 0; i < ARRAY_SIZE(workloads); i++) {
- twl = workloads[i];
+ workloads__for_each(twl) {
if (!strcmp(twl->name, work))
return twl->func(argc, argv);
}
@@ -526,6 +649,55 @@ static int perf_test__config(const char *var, const char *value,
return 0;
}
+static struct test_suite **build_suites(void)
+{
+ /*
+ * TODO: suites is static to avoid needing to clean up the scripts tests
+ * for leak sanitizer.
+ */
+ static struct test_suite **suites[] = {
+ generic_tests,
+ arch_tests,
+ NULL,
+ };
+ struct test_suite **result;
+ struct test_suite *t;
+ size_t n = 0, num_suites = 0;
+
+ if (suites[2] == NULL)
+ suites[2] = create_script_test_suites();
+
+#define for_each_test(t) \
+ for (size_t i = 0, j = 0; i < ARRAY_SIZE(suites); i++, j = 0) \
+ while ((t = suites[i][j++]) != NULL)
+
+ for_each_test(t)
+ num_suites++;
+
+ result = calloc(num_suites + 1, sizeof(struct test_suite *));
+
+ for (int pass = 1; pass <= 2; pass++) {
+ for_each_test(t) {
+ bool exclusive = false;
+
+ if (!has_subtests(t)) {
+ exclusive = test_exclusive(t, -1);
+ } else {
+ for (int subi = 0, subn = num_subtests(t); subi < subn; subi++) {
+ if (test_exclusive(t, subi)) {
+ exclusive = true;
+ break;
+ }
+ }
+ }
+ if ((!exclusive && pass == 1) || (exclusive && pass == 2))
+ result[n++] = t;
+ }
+ }
+ return result;
+#undef for_each_test
+}
+
int cmd_test(int argc, const char **argv)
{
const char *test_usage[] = {
@@ -534,16 +706,17 @@ int cmd_test(int argc, const char **argv)
};
const char *skip = NULL;
const char *workload = NULL;
+ bool list_workloads = false;
const struct option test_options[] = {
OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('F', "dont-fork", &dont_fork,
"Do not fork for testcase"),
- OPT_BOOLEAN('p', "parallel", &parallel, "Run the tests in parallel"),
OPT_BOOLEAN('S', "sequential", &sequential,
"Run the tests one after another rather than in parallel"),
- OPT_STRING('w', "workload", &workload, "work", "workload to run for testing"),
+ OPT_STRING('w', "workload", &workload, "work", "workload to run for testing, use '--list-workloads' to list the available ones."),
+ OPT_BOOLEAN(0, "list-workloads", &list_workloads, "List the available builtin workloads to use with -w/--workload"),
OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
OPT_STRING(0, "objdump", &test_objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
@@ -552,6 +725,7 @@ int cmd_test(int argc, const char **argv)
const char * const test_subcommands[] = { "list", NULL };
struct intlist *skiplist = NULL;
int ret = hists__init();
+ struct test_suite **suites;
if (ret < 0)
return ret;
@@ -561,22 +735,29 @@ int cmd_test(int argc, const char **argv)
/* Unbuffered output */
setvbuf(stdout, NULL, _IONBF, 0);
- tests[2] = create_script_test_suites();
argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0);
- if (argc >= 1 && !strcmp(argv[0], "list"))
- return perf_test__list(argc - 1, argv + 1);
+ if (argc >= 1 && !strcmp(argv[0], "list")) {
+ suites = build_suites();
+ ret = perf_test__list(suites, argc - 1, argv + 1);
+ free(suites);
+ return ret;
+ }
if (workload)
return run_workload(workload, argc, argv);
+ if (list_workloads) {
+ workloads__fprintf_list(stdout);
+ return 0;
+ }
+
if (dont_fork)
sequential = true;
- else if (parallel)
- sequential = false;
symbol_conf.priv_size = sizeof(int);
symbol_conf.try_vmlinux_path = true;
+
if (symbol__init(NULL) < 0)
return -1;
@@ -588,5 +769,8 @@ int cmd_test(int argc, const char **argv)
*/
rlimit__bump_memlock();
- return __cmd_test(argc, argv, skiplist);
+ suites = build_suites();
+ ret = __cmd_test(suites, argc, argv, skiplist);
+ free(suites);
+ return ret;
}