diff options
Diffstat (limited to 'tools/perf/util')
-rw-r--r-- | tools/perf/util/cpumap.h | 1 | ||||
-rw-r--r-- | tools/perf/util/evlist.c | 113 | ||||
-rw-r--r-- | tools/perf/util/evlist.h | 11 | ||||
-rw-r--r-- | tools/perf/util/evsel.c | 35 | ||||
-rw-r--r-- | tools/perf/util/evsel.h | 9 | ||||
-rw-r--r-- | tools/perf/util/genelf.c | 46 | ||||
-rw-r--r-- | tools/perf/util/header.c | 23 | ||||
-rw-r--r-- | tools/perf/util/include/linux/linkage.h | 89 | ||||
-rw-r--r-- | tools/perf/util/machine.c | 1 | ||||
-rw-r--r-- | tools/perf/util/metricgroup.c | 7 | ||||
-rw-r--r-- | tools/perf/util/sort.c | 16 | ||||
-rw-r--r-- | tools/perf/util/stat.c | 5 | ||||
-rw-r--r-- | tools/perf/util/stat.h | 3 |
13 files changed, 269 insertions, 90 deletions
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 57943f3685f8..3a442f021468 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -63,4 +63,5 @@ int cpu_map__build_map(struct perf_cpu_map *cpus, struct perf_cpu_map **res, int cpu_map__cpu(struct perf_cpu_map *cpus, int idx); bool cpu_map__has(struct perf_cpu_map *cpus, int cpu); + #endif /* __PERF_CPUMAP_H */ diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index fdce590d2278..1548237b6558 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -18,6 +18,7 @@ #include "debug.h" #include "units.h" #include <internal/lib.h> // page_size +#include "affinity.h" #include "../perf.h" #include "asm/bug.h" #include "bpf-event.h" @@ -342,14 +343,63 @@ static int perf_evlist__nr_threads(struct evlist *evlist, return perf_thread_map__nr(evlist->core.threads); } +void evlist__cpu_iter_start(struct evlist *evlist) +{ + struct evsel *pos; + + /* + * Reset the per evsel cpu_iter. This is needed because + * each evsel's cpumap may have a different index space, + * and some operations need the index to modify + * the FD xyarray (e.g. open, close) + */ + evlist__for_each_entry(evlist, pos) + pos->cpu_iter = 0; +} + +bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu) +{ + if (ev->cpu_iter >= ev->core.cpus->nr) + return true; + if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu) + return true; + return false; +} + +bool evsel__cpu_iter_skip(struct evsel *ev, int cpu) +{ + if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) { + ev->cpu_iter++; + return false; + } + return true; +} + void evlist__disable(struct evlist *evlist) { struct evsel *pos; + struct affinity affinity; + int cpu, i; + + if (affinity__setup(&affinity) < 0) + return; + evlist__for_each_cpu(evlist, i, cpu) { + affinity__set(&affinity, cpu); + + evlist__for_each_entry(evlist, pos) { + if (evsel__cpu_iter_skip(pos, cpu)) + continue; + if (pos->disabled || !perf_evsel__is_group_leader(pos) || !pos->core.fd) + continue; + evsel__disable_cpu(pos, pos->cpu_iter - 1); + } + } + affinity__cleanup(&affinity); evlist__for_each_entry(evlist, pos) { - if (pos->disabled || !perf_evsel__is_group_leader(pos) || !pos->core.fd) + if (!perf_evsel__is_group_leader(pos) || !pos->core.fd) continue; - evsel__disable(pos); + pos->disabled = true; } evlist->enabled = false; @@ -358,11 +408,28 @@ void evlist__disable(struct evlist *evlist) void evlist__enable(struct evlist *evlist) { struct evsel *pos; + struct affinity affinity; + int cpu, i; + + if (affinity__setup(&affinity) < 0) + return; + evlist__for_each_cpu(evlist, i, cpu) { + affinity__set(&affinity, cpu); + + evlist__for_each_entry(evlist, pos) { + if (evsel__cpu_iter_skip(pos, cpu)) + continue; + if (!perf_evsel__is_group_leader(pos) || !pos->core.fd) + continue; + evsel__enable_cpu(pos, pos->cpu_iter - 1); + } + } + affinity__cleanup(&affinity); evlist__for_each_entry(evlist, pos) { if (!perf_evsel__is_group_leader(pos) || !pos->core.fd) continue; - evsel__enable(pos); + pos->disabled = false; } evlist->enabled = true; @@ -1137,9 +1204,35 @@ void perf_evlist__set_selected(struct evlist *evlist, void evlist__close(struct evlist *evlist) { struct evsel *evsel; + struct affinity affinity; + int cpu, i; - evlist__for_each_entry_reverse(evlist, evsel) - evsel__close(evsel); + /* + * With perf record core.cpus is usually NULL. + * Use the old method to handle this for now. + */ + if (!evlist->core.cpus) { + evlist__for_each_entry_reverse(evlist, evsel) + evsel__close(evsel); + return; + } + + if (affinity__setup(&affinity) < 0) + return; + evlist__for_each_cpu(evlist, i, cpu) { + affinity__set(&affinity, cpu); + + evlist__for_each_entry_reverse(evlist, evsel) { + if (evsel__cpu_iter_skip(evsel, cpu)) + continue; + perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1); + } + } + affinity__cleanup(&affinity); + evlist__for_each_entry_reverse(evlist, evsel) { + perf_evsel__free_fd(&evsel->core); + perf_evsel__free_id(&evsel->core); + } } static int perf_evlist__create_syswide_maps(struct evlist *evlist) @@ -1577,7 +1670,8 @@ void perf_evlist__force_leader(struct evlist *evlist) } struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list, - struct evsel *evsel) + struct evsel *evsel, + bool close) { struct evsel *c2, *leader; bool is_open = true; @@ -1594,10 +1688,15 @@ struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list, if (c2 == evsel) is_open = false; if (c2->leader == leader) { - if (is_open) + if (is_open && close) perf_evsel__close(&c2->core); c2->leader = c2; c2->core.nr_members = 0; + /* + * Set this for all former members of the group + * to indicate they get reopened. + */ + c2->reset_group = true; } } return leader; diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 3655b9ebb147..f5bd5c386df1 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -334,9 +334,17 @@ void perf_evlist__to_front(struct evlist *evlist, #define evlist__for_each_entry_safe(evlist, tmp, evsel) \ __evlist__for_each_entry_safe(&(evlist)->core.entries, tmp, evsel) +#define evlist__for_each_cpu(evlist, index, cpu) \ + evlist__cpu_iter_start(evlist); \ + perf_cpu_map__for_each_cpu (cpu, index, (evlist)->core.all_cpus) + void perf_evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel); +void evlist__cpu_iter_start(struct evlist *evlist); +bool evsel__cpu_iter_skip(struct evsel *ev, int cpu); +bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu); + struct evsel * perf_evlist__find_evsel_by_str(struct evlist *evlist, const char *str); @@ -348,5 +356,6 @@ bool perf_evlist__exclude_kernel(struct evlist *evlist); void perf_evlist__force_leader(struct evlist *evlist); struct evsel *perf_evlist__reset_weak_group(struct evlist *evlist, - struct evsel *evsel); + struct evsel *evsel, + bool close); #endif /* __PERF_EVLIST_H */ diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index f4dea055b080..a69e64236120 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1223,16 +1223,27 @@ int perf_evsel__append_addr_filter(struct evsel *evsel, const char *filter) return perf_evsel__append_filter(evsel, "%s,%s", filter); } +/* Caller has to clear disabled after going through all CPUs. */ +int evsel__enable_cpu(struct evsel *evsel, int cpu) +{ + return perf_evsel__enable_cpu(&evsel->core, cpu); +} + int evsel__enable(struct evsel *evsel) { int err = perf_evsel__enable(&evsel->core); if (!err) evsel->disabled = false; - return err; } +/* Caller has to set disabled after going through all CPUs. */ +int evsel__disable_cpu(struct evsel *evsel, int cpu) +{ + return perf_evsel__disable_cpu(&evsel->core, cpu); +} + int evsel__disable(struct evsel *evsel) { int err = perf_evsel__disable(&evsel->core); @@ -1587,8 +1598,9 @@ static int perf_event_open(struct evsel *evsel, return fd; } -int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, - struct perf_thread_map *threads) +static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, + struct perf_thread_map *threads, + int start_cpu, int end_cpu) { int cpu, thread, nthreads; unsigned long flags = PERF_FLAG_FD_CLOEXEC; @@ -1665,7 +1677,7 @@ retry_sample_id: display_attr(&evsel->core.attr); - for (cpu = 0; cpu < cpus->nr; cpu++) { + for (cpu = start_cpu; cpu < end_cpu; cpu++) { for (thread = 0; thread < nthreads; thread++) { int fd, group_fd; @@ -1843,6 +1855,12 @@ out_close: return err; } +int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, + struct perf_thread_map *threads) +{ + return evsel__open_cpu(evsel, cpus, threads, 0, cpus ? cpus->nr : 1); +} + void evsel__close(struct evsel *evsel) { perf_evsel__close(&evsel->core); @@ -1850,9 +1868,14 @@ void evsel__close(struct evsel *evsel) } int perf_evsel__open_per_cpu(struct evsel *evsel, - struct perf_cpu_map *cpus) + struct perf_cpu_map *cpus, + int cpu) { - return evsel__open(evsel, cpus, NULL); + if (cpu == -1) + return evsel__open_cpu(evsel, cpus, NULL, 0, + cpus ? cpus->nr : 1); + + return evsel__open_cpu(evsel, cpus, NULL, cpu, cpu + 1); } int perf_evsel__open_per_thread(struct evsel *evsel, diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index ddc5ee6f6592..dc14f4a823cd 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -86,6 +86,7 @@ struct evsel { struct list_head config_terms; struct bpf_object *bpf_obj; int bpf_fd; + int err; bool auto_merge_stats; bool merged_stat; const char * metric_expr; @@ -94,7 +95,10 @@ struct evsel { struct evsel *metric_leader; bool collect_stat; bool weak_group; + bool reset_group; + bool errored; bool percore; + int cpu_iter; const char *pmu_name; struct { perf_evsel__sb_cb_t *cb; @@ -218,11 +222,14 @@ int perf_evsel__set_filter(struct evsel *evsel, const char *filter); int perf_evsel__append_tp_filter(struct evsel *evsel, const char *filter); int perf_evsel__append_addr_filter(struct evsel *evsel, const char *filter); +int evsel__enable_cpu(struct evsel *evsel, int cpu); int evsel__enable(struct evsel *evsel); int evsel__disable(struct evsel *evsel); +int evsel__disable_cpu(struct evsel *evsel, int cpu); int perf_evsel__open_per_cpu(struct evsel *evsel, - struct perf_cpu_map *cpus); + struct perf_cpu_map *cpus, + int cpu); int perf_evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads); int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c index f9f18b8b1df9..aed49806a09b 100644 --- a/tools/perf/util/genelf.c +++ b/tools/perf/util/genelf.c @@ -8,15 +8,12 @@ */ #include <sys/types.h> -#include <stdio.h> -#include <getopt.h> #include <stddef.h> #include <libelf.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #include <inttypes.h> -#include <limits.h> #include <fcntl.h> #include <err.h> #ifdef HAVE_DWARF_SUPPORT @@ -31,8 +28,6 @@ #define NT_GNU_BUILD_ID 3 #endif -#define JVMTI - #define BUILD_ID_URANDOM /* different uuid for each run */ #ifdef HAVE_LIBCRYPTO @@ -511,44 +506,3 @@ error: return retval; } - -#ifndef JVMTI - -static unsigned char x86_code[] = { - 0xBB, 0x2A, 0x00, 0x00, 0x00, /* movl $42, %ebx */ - 0xB8, 0x01, 0x00, 0x00, 0x00, /* movl $1, %eax */ - 0xCD, 0x80 /* int $0x80 */ -}; - -static struct options options; - -int main(int argc, char **argv) -{ - int c, fd, ret; - - while ((c = getopt(argc, argv, "o:h")) != -1) { - switch (c) { - case 'o': - options.output = optarg; - break; - case 'h': - printf("Usage: genelf -o output_file [-h]\n"); - return 0; - default: - errx(1, "unknown option"); - } - } - - fd = open(options.output, O_CREAT|O_TRUNC|O_RDWR, 0666); - if (fd == -1) - err(1, "cannot create file %s", options.output); - - ret = jit_write_elf(fd, "main", x86_code, sizeof(x86_code)); - close(fd); - - if (ret != 0) - unlink(options.output); - - return ret; -} -#endif diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index becc2d109423..93ad27830e2b 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -850,7 +850,7 @@ int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) */ int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) { - return -1; + return ENOSYS; /* Not implemented */ } static int write_cpuid(struct feat_fd *ff, @@ -1089,21 +1089,18 @@ static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c) fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map); } -static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp) +#define MAX_CACHE_LVL 4 + +static int build_caches(struct cpu_cache_level caches[], u32 *cntp) { u32 i, cnt = 0; - long ncpus; u32 nr, cpu; u16 level; - ncpus = sysconf(_SC_NPROCESSORS_CONF); - if (ncpus < 0) - return -1; - - nr = (u32)(ncpus & UINT_MAX); + nr = cpu__max_cpu(); for (cpu = 0; cpu < nr; cpu++) { - for (level = 0; level < 10; level++) { + for (level = 0; level < MAX_CACHE_LVL; level++) { struct cpu_cache_level c; int err; @@ -1123,18 +1120,12 @@ static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp) caches[cnt++] = c; else cpu_cache_level__free(&c); - - if (WARN_ONCE(cnt == size, "way too many cpu caches..")) - goto out; } } - out: *cntp = cnt; return 0; } -#define MAX_CACHE_LVL 4 - static int write_cache(struct feat_fd *ff, struct evlist *evlist __maybe_unused) { @@ -1143,7 +1134,7 @@ static int write_cache(struct feat_fd *ff, u32 cnt = 0, i, version = 1; int ret; - ret = build_caches(caches, max_caches, &cnt); + ret = build_caches(caches, &cnt); if (ret) goto out; diff --git a/tools/perf/util/include/linux/linkage.h b/tools/perf/util/include/linux/linkage.h index f01d48a8d707..b8a5159361b4 100644 --- a/tools/perf/util/include/linux/linkage.h +++ b/tools/perf/util/include/linux/linkage.h @@ -5,10 +5,93 @@ /* linkage.h ... for including arch/x86/lib/memcpy_64.S */ -#define ENTRY(name) \ - .globl name; \ +/* Some toolchains use other characters (e.g. '`') to mark new line in macro */ +#ifndef ASM_NL +#define ASM_NL ; +#endif + +#ifndef __ALIGN +#define __ALIGN .align 4,0x90 +#define __ALIGN_STR ".align 4,0x90" +#endif + +/* SYM_T_FUNC -- type used by assembler to mark functions */ +#ifndef SYM_T_FUNC +#define SYM_T_FUNC STT_FUNC +#endif + +/* SYM_A_* -- align the symbol? */ +#define SYM_A_ALIGN ALIGN + +/* SYM_L_* -- linkage of symbols */ +#define SYM_L_GLOBAL(name) .globl name +#define SYM_L_LOCAL(name) /* nothing */ + +#define ALIGN __ALIGN + +/* === generic annotations === */ + +/* SYM_ENTRY -- use only if you have to for non-paired symbols */ +#ifndef SYM_ENTRY +#define SYM_ENTRY(name, linkage, align...) \ + linkage(name) ASM_NL \ + align ASM_NL \ name: +#endif + +/* SYM_START -- use only if you have to */ +#ifndef SYM_START +#define SYM_START(name, linkage, align...) \ + SYM_ENTRY(name, linkage, align) +#endif + +/* SYM_END -- use only if you have to */ +#ifndef SYM_END +#define SYM_END(name, sym_type) \ + .type name sym_type ASM_NL \ + .size name, .-name +#endif + +/* + * SYM_FUNC_START_ALIAS -- use where there are two global names for one + * function + */ +#ifndef SYM_FUNC_START_ALIAS +#define SYM_FUNC_START_ALIAS(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) +#endif + +/* SYM_FUNC_START -- use for global functions */ +#ifndef SYM_FUNC_START +/* + * The same as SYM_FUNC_START_ALIAS, but we will need to distinguish these two + * later. + */ +#define SYM_FUNC_START(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) +#endif + +/* SYM_FUNC_START_LOCAL -- use for local functions */ +#ifndef SYM_FUNC_START_LOCAL +/* the same as SYM_FUNC_START_LOCAL_ALIAS, see comment near SYM_FUNC_START */ +#define SYM_FUNC_START_LOCAL(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) +#endif + +/* SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function */ +#ifndef SYM_FUNC_END_ALIAS +#define SYM_FUNC_END_ALIAS(name) \ + SYM_END(name, SYM_T_FUNC) +#endif -#define ENDPROC(name) +/* + * SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START, + * SYM_FUNC_START_WEAK, ... + */ +#ifndef SYM_FUNC_END +/* the same as SYM_FUNC_END_ALIAS, see comment near SYM_FUNC_START */ +#define SYM_FUNC_END(name) \ + SYM_END(name, SYM_T_FUNC) +#endif #endif /* PERF_LINUX_LINKAGE_H_ */ diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 416d174d223c..c8c5410315e8 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -2446,6 +2446,7 @@ static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms list_for_each_entry(ilist, &inline_node->val, list) { struct map_symbol ilist_ms = { + .maps = ms->maps, .map = map, .sym = ilist->symbol, }; diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index 6a4d350d5cdb..02aee946b6c1 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -103,8 +103,11 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist, if (!strcmp(ev->name, ids[i])) { if (!metric_events[i]) metric_events[i] = ev; + i++; + if (i == idnum) + break; } else { - if (++i == idnum) { + if (i + 1 == idnum) { /* Discard the whole match and start again */ i = 0; memset(metric_events, 0, @@ -124,7 +127,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist, } } - if (i != idnum - 1) { + if (i != idnum) { /* Not whole match */ return NULL; } diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 345b5ccc90f6..9fcba2872130 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -2681,12 +2681,12 @@ static int setup_sort_list(struct perf_hpp_list *list, char *str, ret = sort_dimension__add(list, tok, evlist, level); if (ret == -EINVAL) { if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) - pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); + ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); else - pr_err("Invalid --sort key: `%s'", tok); + ui__error("Invalid --sort key: `%s'", tok); break; } else if (ret == -ESRCH) { - pr_err("Unknown --sort key: `%s'", tok); + ui__error("Unknown --sort key: `%s'", tok); break; } } @@ -2743,7 +2743,7 @@ static int setup_sort_order(struct evlist *evlist) return 0; if (sort_order[1] == '\0') { - pr_err("Invalid --sort key: `+'"); + ui__error("Invalid --sort key: `+'"); return -EINVAL; } @@ -2959,6 +2959,9 @@ int output_field_add(struct perf_hpp_list *list, char *tok) if (strncasecmp(tok, sd->name, strlen(tok))) continue; + if (sort__mode != SORT_MODE__MEMORY) + return -EINVAL; + return __sort_dimension__add_output(list, sd); } @@ -2968,6 +2971,9 @@ int output_field_add(struct perf_hpp_list *list, char *tok) if (strncasecmp(tok, sd->name, strlen(tok))) continue; + if (sort__mode != SORT_MODE__BRANCH) + return -EINVAL; + return __sort_dimension__add_output(list, sd); } @@ -3034,7 +3040,7 @@ static int __setup_output_field(void) strp++; if (!strlen(strp)) { - pr_err("Invalid --fields key: `+'"); + ui__error("Invalid --fields key: `+'"); goto out; } diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 332cb730785b..5f26137b8d60 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -464,7 +464,8 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp) int create_perf_stat_counter(struct evsel *evsel, struct perf_stat_config *config, - struct target *target) + struct target *target, + int cpu) { struct perf_event_attr *attr = &evsel->core.attr; struct evsel *leader = evsel->leader; @@ -518,7 +519,7 @@ int create_perf_stat_counter(struct evsel *evsel, } if (target__has_cpu(target) && !target__has_per_thread(target)) - return perf_evsel__open_per_cpu(evsel, evsel__cpus(evsel)); + return perf_evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu); return perf_evsel__open_per_thread(evsel, evsel->core.threads); } diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index bfa9aaf36ce6..fb990efa54a8 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -214,7 +214,8 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp); int create_perf_stat_counter(struct evsel *evsel, struct perf_stat_config *config, - struct target *target); + struct target *target, + int cpu); void perf_evlist__print_counters(struct evlist *evlist, struct perf_stat_config *config, |