summaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-stat.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-stat.c')
-rw-r--r--tools/perf/builtin-stat.c396
1 files changed, 195 insertions, 201 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 689a3d43c258..fdf5172646a5 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -46,6 +46,7 @@
#include "util/parse-events.h"
#include "util/pmus.h"
#include "util/pmu.h"
+#include "util/tool_pmu.h"
#include "util/event.h"
#include "util/evlist.h"
#include "util/evsel.h"
@@ -294,14 +295,14 @@ static int read_single_counter(struct evsel *counter, int cpu_map_idx, int threa
* terminates. Use the wait4 values in that case.
*/
if (err && cpu_map_idx == 0 &&
- (evsel__tool_event(counter) == PERF_TOOL_USER_TIME ||
- evsel__tool_event(counter) == PERF_TOOL_SYSTEM_TIME)) {
+ (evsel__tool_event(counter) == TOOL_PMU__EVENT_USER_TIME ||
+ evsel__tool_event(counter) == TOOL_PMU__EVENT_SYSTEM_TIME)) {
u64 val, *start_time;
struct perf_counts_values *count =
perf_counts(counter->counts, cpu_map_idx, thread);
start_time = xyarray__entry(counter->start_times, cpu_map_idx, thread);
- if (evsel__tool_event(counter) == PERF_TOOL_USER_TIME)
+ if (evsel__tool_event(counter) == TOOL_PMU__EVENT_USER_TIME)
val = ru_stats.ru_utime_usec_stat.mean;
else
val = ru_stats.ru_stime_usec_stat.mean;
@@ -639,8 +640,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
* (behavior changed with commit b0a873e).
*/
if (errno == EINVAL || errno == ENOSYS ||
- errno == ENOENT || errno == EOPNOTSUPP ||
- errno == ENXIO) {
+ errno == ENOENT || errno == ENXIO) {
if (verbose > 0)
ui__warning("%s event is not supported by the kernel.\n",
evsel__name(counter));
@@ -658,7 +658,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
if (verbose > 0)
ui__warning("%s\n", msg);
return COUNTER_RETRY;
- } else if (target__has_per_thread(&target) &&
+ } else if (target__has_per_thread(&target) && errno != EOPNOTSUPP &&
evsel_list->core.threads &&
evsel_list->core.threads->err_thread != -1) {
/*
@@ -679,6 +679,19 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
return COUNTER_SKIP;
}
+ if (errno == EOPNOTSUPP) {
+ if (verbose > 0) {
+ ui__warning("%s event is not supported by the kernel.\n",
+ evsel__name(counter));
+ }
+ counter->supported = false;
+ counter->errored = true;
+
+ if ((evsel__leader(counter) != counter) ||
+ !(counter->core.leader->nr_members > 1))
+ return COUNTER_SKIP;
+ }
+
evsel__open_strerror(counter, &target, errno, msg, sizeof(msg));
ui__error("%s\n", msg);
@@ -716,15 +729,19 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
}
if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) {
- if (affinity__setup(&saved_affinity) < 0)
- return -1;
+ if (affinity__setup(&saved_affinity) < 0) {
+ err = -1;
+ goto err_out;
+ }
affinity = &saved_affinity;
}
evlist__for_each_entry(evsel_list, counter) {
counter->reset_group = false;
- if (bpf_counter__load(counter, &target))
- return -1;
+ if (bpf_counter__load(counter, &target)) {
+ err = -1;
+ goto err_out;
+ }
if (!(evsel__is_bperf(counter)))
all_counters_use_bpf = false;
}
@@ -767,7 +784,8 @@ try_again:
switch (stat_handle_error(counter)) {
case COUNTER_FATAL:
- return -1;
+ err = -1;
+ goto err_out;
case COUNTER_RETRY:
goto try_again;
case COUNTER_SKIP:
@@ -808,7 +826,8 @@ try_again_reset:
switch (stat_handle_error(counter)) {
case COUNTER_FATAL:
- return -1;
+ err = -1;
+ goto err_out;
case COUNTER_RETRY:
goto try_again_reset;
case COUNTER_SKIP:
@@ -821,6 +840,7 @@ try_again_reset:
}
}
affinity__cleanup(affinity);
+ affinity = NULL;
evlist__for_each_entry(evsel_list, counter) {
if (!counter->supported) {
@@ -833,8 +853,10 @@ try_again_reset:
stat_config.unit_width = l;
if (evsel__should_store_id(counter) &&
- evsel__store_ids(counter, evsel_list))
- return -1;
+ evsel__store_ids(counter, evsel_list)) {
+ err = -1;
+ goto err_out;
+ }
}
if (evlist__apply_filters(evsel_list, &counter, &target)) {
@@ -855,20 +877,23 @@ try_again_reset:
}
if (err < 0)
- return err;
+ goto err_out;
err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list,
process_synthesized_event, is_pipe);
if (err < 0)
- return err;
+ goto err_out;
+
}
if (target.initial_delay) {
pr_info(EVLIST_DISABLED_MSG);
} else {
err = enable_counters();
- if (err)
- return -1;
+ if (err) {
+ err = -1;
+ goto err_out;
+ }
}
/* Exec the command, if any */
@@ -878,8 +903,10 @@ try_again_reset:
if (target.initial_delay > 0) {
usleep(target.initial_delay * USEC_PER_MSEC);
err = enable_counters();
- if (err)
- return -1;
+ if (err) {
+ err = -1;
+ goto err_out;
+ }
pr_info(EVLIST_ENABLED_MSG);
}
@@ -899,7 +926,8 @@ try_again_reset:
if (workload_exec_errno) {
const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
pr_err("Workload failed: %s\n", emsg);
- return -1;
+ err = -1;
+ goto err_out;
}
if (WIFSIGNALED(status))
@@ -946,8 +974,23 @@ try_again_reset:
evlist__close(evsel_list);
return WEXITSTATUS(status);
+
+err_out:
+ if (forks)
+ evlist__cancel_workload(evsel_list);
+
+ affinity__cleanup(affinity);
+ return err;
}
+/*
+ * Returns -1 for fatal errors which signifies to not continue
+ * when in repeat mode.
+ *
+ * Returns < -1 error codes when stat record is used. These
+ * result in the stat information being displayed, but writing
+ * to the file fails and is non fatal.
+ */
static int run_perf_stat(int argc, const char **argv, int run_idx)
{
int ret;
@@ -1814,130 +1857,25 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
}
/*
- * Add default attributes, if there were no attributes specified or
+ * Add default events, if there were no attributes specified or
* if -d/--detailed, -d -d or -d -d -d is used:
*/
-static int add_default_attributes(void)
+static int add_default_events(void)
{
- struct perf_event_attr default_attrs0[] = {
-
- { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
- { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
- { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
- { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
-
- { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
-};
- struct perf_event_attr frontend_attrs[] = {
- { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
-};
- struct perf_event_attr backend_attrs[] = {
- { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
-};
- struct perf_event_attr default_attrs1[] = {
- { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
- { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
- { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
-
-};
-
-/*
- * Detailed stats (-d), covering the L1 and last level data caches:
- */
- struct perf_event_attr detailed_attrs[] = {
-
- { .type = PERF_TYPE_HW_CACHE,
- .config =
- PERF_COUNT_HW_CACHE_L1D << 0 |
- (PERF_COUNT_HW_CACHE_OP_READ << 8) |
- (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
-
- { .type = PERF_TYPE_HW_CACHE,
- .config =
- PERF_COUNT_HW_CACHE_L1D << 0 |
- (PERF_COUNT_HW_CACHE_OP_READ << 8) |
- (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
-
- { .type = PERF_TYPE_HW_CACHE,
- .config =
- PERF_COUNT_HW_CACHE_LL << 0 |
- (PERF_COUNT_HW_CACHE_OP_READ << 8) |
- (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
-
- { .type = PERF_TYPE_HW_CACHE,
- .config =
- PERF_COUNT_HW_CACHE_LL << 0 |
- (PERF_COUNT_HW_CACHE_OP_READ << 8) |
- (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
-};
-
-/*
- * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
- */
- struct perf_event_attr very_detailed_attrs[] = {
-
- { .type = PERF_TYPE_HW_CACHE,
- .config =
- PERF_COUNT_HW_CACHE_L1I << 0 |
- (PERF_COUNT_HW_CACHE_OP_READ << 8) |
- (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
-
- { .type = PERF_TYPE_HW_CACHE,
- .config =
- PERF_COUNT_HW_CACHE_L1I << 0 |
- (PERF_COUNT_HW_CACHE_OP_READ << 8) |
- (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
-
- { .type = PERF_TYPE_HW_CACHE,
- .config =
- PERF_COUNT_HW_CACHE_DTLB << 0 |
- (PERF_COUNT_HW_CACHE_OP_READ << 8) |
- (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
-
- { .type = PERF_TYPE_HW_CACHE,
- .config =
- PERF_COUNT_HW_CACHE_DTLB << 0 |
- (PERF_COUNT_HW_CACHE_OP_READ << 8) |
- (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
-
- { .type = PERF_TYPE_HW_CACHE,
- .config =
- PERF_COUNT_HW_CACHE_ITLB << 0 |
- (PERF_COUNT_HW_CACHE_OP_READ << 8) |
- (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
-
- { .type = PERF_TYPE_HW_CACHE,
- .config =
- PERF_COUNT_HW_CACHE_ITLB << 0 |
- (PERF_COUNT_HW_CACHE_OP_READ << 8) |
- (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
-
-};
+ const char *pmu = parse_events_option_args.pmu_filter ?: "all";
+ struct parse_events_error err;
+ struct evlist *evlist = evlist__new();
+ struct evsel *evsel;
+ int ret = 0;
-/*
- * Very, very detailed stats (-d -d -d), adding prefetch events:
- */
- struct perf_event_attr very_very_detailed_attrs[] = {
-
- { .type = PERF_TYPE_HW_CACHE,
- .config =
- PERF_COUNT_HW_CACHE_L1D << 0 |
- (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
- (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
-
- { .type = PERF_TYPE_HW_CACHE,
- .config =
- PERF_COUNT_HW_CACHE_L1D << 0 |
- (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
- (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
-};
+ if (!evlist)
+ return -ENOMEM;
- struct perf_event_attr default_null_attrs[] = {};
- const char *pmu = parse_events_option_args.pmu_filter ?: "all";
+ parse_events_error__init(&err);
/* Set attrs if no event is selected and !null_run: */
if (stat_config.null_run)
- return 0;
+ goto out;
if (transaction_run) {
/* Handle -T as -M transaction. Once platform specific metrics
@@ -1947,9 +1885,10 @@ static int add_default_attributes(void)
*/
if (!metricgroup__has_metric(pmu, "transaction")) {
pr_err("Missing transaction metrics\n");
- return -1;
+ ret = -1;
+ goto out;
}
- return metricgroup__parse_groups(evsel_list, pmu, "transaction",
+ ret = metricgroup__parse_groups(evlist, pmu, "transaction",
stat_config.metric_no_group,
stat_config.metric_no_merge,
stat_config.metric_no_threshold,
@@ -1957,6 +1896,7 @@ static int add_default_attributes(void)
stat_config.system_wide,
stat_config.hardware_aware_grouping,
&stat_config.metric_events);
+ goto out;
}
if (smi_cost) {
@@ -1964,26 +1904,29 @@ static int add_default_attributes(void)
if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) {
pr_err("freeze_on_smi is not supported.\n");
- return -1;
+ ret = -1;
+ goto out;
}
if (!smi) {
if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) {
- fprintf(stderr, "Failed to set freeze_on_smi.\n");
- return -1;
+ pr_err("Failed to set freeze_on_smi.\n");
+ ret = -1;
+ goto out;
}
smi_reset = true;
}
if (!metricgroup__has_metric(pmu, "smi")) {
pr_err("Missing smi metrics\n");
- return -1;
+ ret = -1;
+ goto out;
}
if (!force_metric_only)
stat_config.metric_only = true;
- return metricgroup__parse_groups(evsel_list, pmu, "smi",
+ ret = metricgroup__parse_groups(evlist, pmu, "smi",
stat_config.metric_no_group,
stat_config.metric_no_merge,
stat_config.metric_no_threshold,
@@ -1991,6 +1934,7 @@ static int add_default_attributes(void)
stat_config.system_wide,
stat_config.hardware_aware_grouping,
&stat_config.metric_events);
+ goto out;
}
if (topdown_run) {
@@ -2003,21 +1947,23 @@ static int add_default_attributes(void)
if (!max_level) {
pr_err("Topdown requested but the topdown metric groups aren't present.\n"
"(See perf list the metric groups have names like TopdownL1)\n");
- return -1;
+ ret = -1;
+ goto out;
}
if (stat_config.topdown_level > max_level) {
pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level);
- return -1;
- } else if (!stat_config.topdown_level)
+ ret = -1;
+ goto out;
+ } else if (!stat_config.topdown_level) {
stat_config.topdown_level = 1;
-
+ }
if (!stat_config.interval && !stat_config.metric_only) {
fprintf(stat_config.output,
"Topdown accuracy may decrease when measuring long periods.\n"
"Please print the result regularly, e.g. -I1000\n");
}
str[8] = stat_config.topdown_level + '0';
- if (metricgroup__parse_groups(evsel_list,
+ if (metricgroup__parse_groups(evlist,
pmu, str,
/*metric_no_group=*/false,
/*metric_no_merge=*/false,
@@ -2025,41 +1971,49 @@ static int add_default_attributes(void)
stat_config.user_requested_cpu_list,
stat_config.system_wide,
stat_config.hardware_aware_grouping,
- &stat_config.metric_events) < 0)
- return -1;
+ &stat_config.metric_events) < 0) {
+ ret = -1;
+ goto out;
+ }
}
if (!stat_config.topdown_level)
stat_config.topdown_level = 1;
- if (!evsel_list->core.nr_entries) {
+ if (!evlist->core.nr_entries && !evsel_list->core.nr_entries) {
/* No events so add defaults. */
if (target__has_cpu(&target))
- default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
+ ret = parse_events(evlist, "cpu-clock", &err);
+ else
+ ret = parse_events(evlist, "task-clock", &err);
+ if (ret)
+ goto out;
+
+ ret = parse_events(evlist,
+ "context-switches,"
+ "cpu-migrations,"
+ "page-faults,"
+ "instructions,"
+ "cycles,"
+ "stalled-cycles-frontend,"
+ "stalled-cycles-backend,"
+ "branches,"
+ "branch-misses",
+ &err);
+ if (ret)
+ goto out;
- if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
- return -1;
- if (perf_pmus__have_event("cpu", "stalled-cycles-frontend")) {
- if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0)
- return -1;
- }
- if (perf_pmus__have_event("cpu", "stalled-cycles-backend")) {
- if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0)
- return -1;
- }
- if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
- return -1;
/*
* Add TopdownL1 metrics if they exist. To minimize
* multiplexing, don't request threshold computation.
*/
if (metricgroup__has_metric(pmu, "Default")) {
struct evlist *metric_evlist = evlist__new();
- struct evsel *metric_evsel;
-
- if (!metric_evlist)
- return -1;
+ if (!metric_evlist) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (metricgroup__parse_groups(metric_evlist, pmu, "Default",
/*metric_no_group=*/false,
/*metric_no_merge=*/false,
@@ -2067,43 +2021,71 @@ static int add_default_attributes(void)
stat_config.user_requested_cpu_list,
stat_config.system_wide,
stat_config.hardware_aware_grouping,
- &stat_config.metric_events) < 0)
- return -1;
-
- evlist__for_each_entry(metric_evlist, metric_evsel) {
- metric_evsel->skippable = true;
- metric_evsel->default_metricgroup = true;
+ &stat_config.metric_events) < 0) {
+ ret = -1;
+ goto out;
}
- evlist__splice_list_tail(evsel_list, &metric_evlist->core.entries);
+
+ evlist__for_each_entry(metric_evlist, evsel)
+ evsel->default_metricgroup = true;
+
+ evlist__splice_list_tail(evlist, &metric_evlist->core.entries);
evlist__delete(metric_evlist);
}
-
- /* Platform specific attrs */
- if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0)
- return -1;
}
/* Detailed events get appended to the event list: */
- if (detailed_run < 1)
- return 0;
-
- /* Append detailed run extra attributes: */
- if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
- return -1;
-
- if (detailed_run < 2)
- return 0;
-
- /* Append very detailed run extra attributes: */
- if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
- return -1;
-
- if (detailed_run < 3)
- return 0;
-
- /* Append very, very detailed run extra attributes: */
- return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
+ if (!ret && detailed_run >= 1) {
+ /*
+ * Detailed stats (-d), covering the L1 and last level data
+ * caches:
+ */
+ ret = parse_events(evlist,
+ "L1-dcache-loads,"
+ "L1-dcache-load-misses,"
+ "LLC-loads,"
+ "LLC-load-misses",
+ &err);
+ }
+ if (!ret && detailed_run >= 2) {
+ /*
+ * Very detailed stats (-d -d), covering the instruction cache
+ * and the TLB caches:
+ */
+ ret = parse_events(evlist,
+ "L1-icache-loads,"
+ "L1-icache-load-misses,"
+ "dTLB-loads,"
+ "dTLB-load-misses,"
+ "iTLB-loads,"
+ "iTLB-load-misses",
+ &err);
+ }
+ if (!ret && detailed_run >= 3) {
+ /*
+ * Very, very detailed stats (-d -d -d), adding prefetch events:
+ */
+ ret = parse_events(evlist,
+ "L1-dcache-prefetches,"
+ "L1-dcache-prefetch-misses",
+ &err);
+ }
+out:
+ if (!ret) {
+ evlist__for_each_entry(evlist, evsel) {
+ /*
+ * Make at least one event non-skippable so fatal errors are visible.
+ * 'cycles' always used to be default and non-skippable, so use that.
+ */
+ if (strcmp("cycles", evsel__name(evsel)))
+ evsel->skippable = true;
+ }
+ }
+ parse_events_error__exit(&err);
+ evlist__splice_list_tail(evsel_list, &evlist->core.entries);
+ evlist__delete(evlist);
+ return ret;
}
static const char * const stat_record_usage[] = {
@@ -2591,6 +2573,14 @@ int cmd_stat(int argc, const char **argv)
goto out;
}
+ if (stat_config.csv_output || (stat_config.metric_only && stat_config.json_output)) {
+ /*
+ * Current CSV and metric-only JSON output doesn't display the
+ * metric threshold so don't compute it.
+ */
+ stat_config.metric_no_threshold = true;
+ }
+
if (stat_config.walltime_run_table && stat_config.run_count <= 1) {
fprintf(stderr, "--table is only supported with -r\n");
parse_options_usage(stat_usage, stat_options, "r", 1);
@@ -2651,6 +2641,7 @@ int cmd_stat(int argc, const char **argv)
} else if (big_num_opt == 0) /* User passed --no-big-num */
stat_config.big_num = false;
+ target.inherit = !stat_config.no_inherit;
err = target__validate(&target);
if (err) {
target__strerror(&target, err, errbuf, BUFSIZ);
@@ -2760,7 +2751,7 @@ int cmd_stat(int argc, const char **argv)
}
}
- if (add_default_attributes())
+ if (add_default_events())
goto out;
if (stat_config.cgroup_list) {
@@ -2879,7 +2870,10 @@ int cmd_stat(int argc, const char **argv)
evlist__reset_prev_raw_counts(evsel_list);
status = run_perf_stat(argc, argv, run_idx);
- if (forever && status != -1 && !interval) {
+ if (status == -1)
+ break;
+
+ if (forever && !interval) {
print_counters(NULL, argc, argv);
perf_stat__reset_stats();
}