diff options
Diffstat (limited to 'tools')
30 files changed, 266 insertions, 92 deletions
diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile index da8b7aa3d351..07b0b7542511 100644 --- a/tools/lib/lockdep/Makefile +++ b/tools/lib/lockdep/Makefile @@ -87,8 +87,8 @@ endif # BUILD_SRC # We process the rest of the Makefile if this is the final invocation of make ifeq ($(skip-makefile),) -srctree := $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR)) -objtree := $(CURDIR) +srctree := $(realpath $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR))) +objtree := $(realpath $(CURDIR)) src := $(srctree) obj := $(objtree) @@ -112,7 +112,7 @@ export Q VERBOSE LIBLOCKDEP_VERSION = $(LL_VERSION).$(LL_PATCHLEVEL).$(LL_EXTRAVERSION) -INCLUDES = -I. -I/usr/local/include -I./uinclude $(CONFIG_INCLUDES) +INCLUDES = -I. -I/usr/local/include -I./uinclude -I./include $(CONFIG_INCLUDES) # Set compile option CFLAGS if not set elsewhere CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c index f8465a811aa5..23bd69cb5ade 100644 --- a/tools/lib/lockdep/preload.c +++ b/tools/lib/lockdep/preload.c @@ -418,7 +418,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock) __attribute__((constructor)) static void init_preload(void) { - if (__init_state != done) + if (__init_state == done) return; #ifndef __GLIBC__ diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh index 5334ad9d39b7..5334ad9d39b7 100644..100755 --- a/tools/lib/lockdep/run_tests.sh +++ b/tools/lib/lockdep/run_tests.sh diff --git a/tools/lib/lockdep/uinclude/asm/hash.h b/tools/lib/lockdep/uinclude/asm/hash.h new file mode 100644 index 000000000000..d82b170bb216 --- /dev/null +++ b/tools/lib/lockdep/uinclude/asm/hash.h @@ -0,0 +1,6 @@ +#ifndef __ASM_GENERIC_HASH_H +#define __ASM_GENERIC_HASH_H + +/* Stub */ + +#endif /* __ASM_GENERIC_HASH_H */ diff --git a/tools/lib/lockdep/uinclude/linux/rcu.h b/tools/lib/lockdep/uinclude/linux/rcu.h index 4c99fcb5da27..042ee8e463c9 100644 --- a/tools/lib/lockdep/uinclude/linux/rcu.h +++ b/tools/lib/lockdep/uinclude/linux/rcu.h @@ -13,4 +13,9 @@ static inline int rcu_is_cpu_idle(void) return 1; } +static inline bool rcu_is_watching(void) +{ + return false; +} + #endif diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c index cfede86161d8..b22dbb16f877 100644 --- a/tools/perf/builtin-buildid-cache.c +++ b/tools/perf/builtin-buildid-cache.c @@ -63,11 +63,35 @@ static int build_id_cache__kcore_dir(char *dir, size_t sz) return 0; } +static bool same_kallsyms_reloc(const char *from_dir, char *to_dir) +{ + char from[PATH_MAX]; + char to[PATH_MAX]; + const char *name; + u64 addr1 = 0, addr2 = 0; + int i; + + scnprintf(from, sizeof(from), "%s/kallsyms", from_dir); + scnprintf(to, sizeof(to), "%s/kallsyms", to_dir); + + for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { + addr1 = kallsyms__get_function_start(from, name); + if (addr1) + break; + } + + if (name) + addr2 = kallsyms__get_function_start(to, name); + + return addr1 == addr2; +} + static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir, size_t to_dir_sz) { char from[PATH_MAX]; char to[PATH_MAX]; + char to_subdir[PATH_MAX]; struct dirent *dent; int ret = -1; DIR *d; @@ -86,10 +110,11 @@ static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir, continue; scnprintf(to, sizeof(to), "%s/%s/modules", to_dir, dent->d_name); - if (!compare_proc_modules(from, to)) { - scnprintf(to, sizeof(to), "%s/%s", to_dir, - dent->d_name); - strlcpy(to_dir, to, to_dir_sz); + scnprintf(to_subdir, sizeof(to_subdir), "%s/%s", + to_dir, dent->d_name); + if (!compare_proc_modules(from, to) && + same_kallsyms_reloc(from_dir, to_subdir)) { + strlcpy(to_dir, to_subdir, to_dir_sz); ret = 0; break; } diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 3c394bf16fa8..af47531b82ec 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -287,10 +287,7 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data) * have no _text sometimes. */ err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_text"); - if (err < 0) - err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_stext"); + machine); if (err < 0) pr_err("Couldn't record guest kernel [%d]'s reference" " relocation symbol.\n", machine->pid); @@ -457,10 +454,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) } err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_text"); - if (err < 0) - err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_stext"); + machine); if (err < 0) pr_err("Couldn't record kernel reference relocation symbol\n" "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 3c53ec268fbc..02f985f3a396 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -113,14 +113,16 @@ static int report__add_mem_hist_entry(struct perf_tool *tool, struct addr_locati if (!he) return -ENOMEM; - err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); - if (err) - goto out; + if (ui__has_annotation()) { + err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); + if (err) + goto out; - mx = he->mem_info; - err = addr_map_symbol__inc_samples(&mx->daddr, evsel->idx); - if (err) - goto out; + mx = he->mem_info; + err = addr_map_symbol__inc_samples(&mx->daddr, evsel->idx); + if (err) + goto out; + } evsel->hists.stats.total_period += cost; hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); @@ -164,14 +166,18 @@ static int report__add_branch_hist_entry(struct perf_tool *tool, struct addr_loc he = __hists__add_entry(&evsel->hists, al, parent, &bi[i], NULL, 1, 1, 0); if (he) { - bx = he->branch_info; - err = addr_map_symbol__inc_samples(&bx->from, evsel->idx); - if (err) - goto out; - - err = addr_map_symbol__inc_samples(&bx->to, evsel->idx); - if (err) - goto out; + if (ui__has_annotation()) { + bx = he->branch_info; + err = addr_map_symbol__inc_samples(&bx->from, + evsel->idx); + if (err) + goto out; + + err = addr_map_symbol__inc_samples(&bx->to, + evsel->idx); + if (err) + goto out; + } evsel->hists.stats.total_period += 1; hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); @@ -205,7 +211,9 @@ static int report__add_hist_entry(struct perf_tool *tool, struct perf_evsel *evs if (err) goto out; - err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); + if (ui__has_annotation()) + err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); + evsel->hists.stats.total_period += sample->period; hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); out: diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 76cd510d34d0..5f989a7d8bc2 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -176,7 +176,7 @@ static void perf_top__record_precise_ip(struct perf_top *top, { struct annotation *notes; struct symbol *sym; - int err; + int err = 0; if (he == NULL || he->ms.sym == NULL || ((top->sym_filter_entry == NULL || @@ -190,7 +190,9 @@ static void perf_top__record_precise_ip(struct perf_top *top, return; ip = he->ms.map->map_ip(he->ms.map, ip); - err = hist_entry__inc_addr_samples(he, counter, ip); + + if (ui__has_annotation()) + err = hist_entry__inc_addr_samples(he, counter, ip); pthread_mutex_unlock(¬es->lock); diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 896f27047ed6..6aa6fb6f7bd9 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -37,6 +37,10 @@ # define MADV_UNMERGEABLE 13 #endif +#ifndef EFD_SEMAPHORE +# define EFD_SEMAPHORE 1 +#endif + struct tp_field { int offset; union { @@ -279,6 +283,11 @@ static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size, #define SCA_STRARRAY syscall_arg__scnprintf_strarray +#if defined(__i386__) || defined(__x86_64__) +/* + * FIXME: Make this available to all arches as soon as the ioctl beautifier + * gets rewritten to support all arches. + */ static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size, struct syscall_arg *arg) { @@ -286,6 +295,7 @@ static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size, } #define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray +#endif /* defined(__i386__) || defined(__x86_64__) */ static size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg); @@ -839,6 +849,10 @@ static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscal #define SCA_SIGNUM syscall_arg__scnprintf_signum +#if defined(__i386__) || defined(__x86_64__) +/* + * FIXME: Make this available to all arches. + */ #define TCGETS 0x5401 static const char *tioctls[] = { @@ -860,6 +874,7 @@ static const char *tioctls[] = { }; static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401); +#endif /* defined(__i386__) || defined(__x86_64__) */ #define STRARRAY(arg, name, array) \ .arg_scnprintf = { [arg] = SCA_STRARRAY, }, \ @@ -941,9 +956,16 @@ static struct syscall_fmt { { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), }, { .name = "ioctl", .errmsg = true, .arg_scnprintf = { [0] = SCA_FD, /* fd */ +#if defined(__i386__) || defined(__x86_64__) +/* + * FIXME: Make this available to all arches. + */ [1] = SCA_STRHEXARRAY, /* cmd */ [2] = SCA_HEX, /* arg */ }, .arg_parm = { [1] = &strarray__tioctls, /* cmd */ }, }, +#else + [2] = SCA_HEX, /* arg */ }, }, +#endif { .name = "kill", .errmsg = true, .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, }, { .name = "linkat", .errmsg = true, diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index c48d44958172..0331ea2701a3 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile @@ -478,7 +478,7 @@ else endif ifeq ($(feature-libbfd), 1) - EXTLIBS += -lbfd + EXTLIBS += -lbfd -lz -liberty endif ifdef NO_DEMANGLE diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile index 12e551346fa6..523b7bc10553 100644 --- a/tools/perf/config/feature-checks/Makefile +++ b/tools/perf/config/feature-checks/Makefile @@ -121,7 +121,7 @@ test-libpython-version.bin: $(BUILD) $(FLAGS_PYTHON_EMBED) test-libbfd.bin: - $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl + $(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl test-liberty.bin: $(CC) -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty diff --git a/tools/perf/design.txt b/tools/perf/design.txt index 67e5d0cace85..63a0e6f04a01 100644 --- a/tools/perf/design.txt +++ b/tools/perf/design.txt @@ -454,7 +454,6 @@ So to start with, in order to add HAVE_PERF_EVENTS to your Kconfig, you will need at least this: - asm/perf_event.h - a basic stub will suffice at first - support for atomic64 types (and associated helper functions) - - set_perf_event_pending() implemented If your architecture does have hardware capabilities, you can override the weak stub hw_perf_event_init() to register hardware counters. diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 7daa806d9050..e84fa26bc1be 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -100,8 +100,8 @@ #ifdef __aarch64__ #define mb() asm volatile("dmb ish" ::: "memory") -#define wmb() asm volatile("dmb ishld" ::: "memory") -#define rmb() asm volatile("dmb ishst" ::: "memory") +#define wmb() asm volatile("dmb ishst" ::: "memory") +#define rmb() asm volatile("dmb ishld" ::: "memory") #define cpu_relax() asm volatile("yield" ::: "memory") #endif diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c index 2bd13edcbc17..3d9088003a5b 100644 --- a/tools/perf/tests/vmlinux-kallsyms.c +++ b/tools/perf/tests/vmlinux-kallsyms.c @@ -26,7 +26,6 @@ int test__vmlinux_matches_kallsyms(void) struct map *kallsyms_map, *vmlinux_map; struct machine kallsyms, vmlinux; enum map_type type = MAP__FUNCTION; - struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", }; u64 mem_start, mem_end; /* @@ -70,14 +69,6 @@ int test__vmlinux_matches_kallsyms(void) */ kallsyms_map = machine__kernel_map(&kallsyms, type); - sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL); - if (sym == NULL) { - pr_debug("dso__find_symbol_by_name "); - goto out; - } - - ref_reloc_sym.addr = UM(sym->start); - /* * Step 5: * @@ -89,7 +80,6 @@ int test__vmlinux_matches_kallsyms(void) } vmlinux_map = machine__kernel_map(&vmlinux, type); - map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym; /* * Step 6: diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 469eb679fb9d..3aa555ff9d89 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -8,6 +8,8 @@ */ #include "util.h" +#include "ui/ui.h" +#include "sort.h" #include "build-id.h" #include "color.h" #include "cache.h" @@ -489,7 +491,7 @@ static int symbol__inc_addr_samples(struct symbol *sym, struct map *map, { struct annotation *notes; - if (sym == NULL || use_browser != 1 || !sort__has_sym) + if (sym == NULL) return 0; notes = symbol__annotation(sym); @@ -1399,3 +1401,8 @@ int hist_entry__annotate(struct hist_entry *he, size_t privsize) { return symbol__annotate(he->ms.sym, he->ms.map, privsize); } + +bool ui__has_annotation(void) +{ + return use_browser == 1 && sort__has_sym; +} diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index b2aef59d6bb2..56ad4f5287de 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -151,6 +151,8 @@ void symbol__annotate_zero_histogram(struct symbol *sym, int evidx); void symbol__annotate_decay_histogram(struct symbol *sym, int evidx); void disasm__purge(struct list_head *head); +bool ui__has_annotation(void); + int symbol__tty_annotate(struct symbol *sym, struct map *map, struct perf_evsel *evsel, bool print_lines, bool full_paths, int min_pcnt, int max_lines); diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 1fc1c2f04772..b0f3ca850e9e 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -470,23 +470,32 @@ static int find_symbol_cb(void *arg, const char *name, char type, return 1; } +u64 kallsyms__get_function_start(const char *kallsyms_filename, + const char *symbol_name) +{ + struct process_symbol_args args = { .name = symbol_name, }; + + if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0) + return 0; + + return args.start; +} + int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, perf_event__handler_t process, - struct machine *machine, - const char *symbol_name) + struct machine *machine) { size_t size; - const char *filename, *mmap_name; - char path[PATH_MAX]; + const char *mmap_name; char name_buff[PATH_MAX]; struct map *map; + struct kmap *kmap; int err; /* * We should get this from /sys/kernel/sections/.text, but till that is * available use this, and after it is use this as a fallback for older * kernels. */ - struct process_symbol_args args = { .name = symbol_name, }; union perf_event *event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); if (event == NULL) { @@ -502,30 +511,19 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, * see kernel/perf_event.c __perf_event_mmap */ event->header.misc = PERF_RECORD_MISC_KERNEL; - filename = "/proc/kallsyms"; } else { event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; - if (machine__is_default_guest(machine)) - filename = (char *) symbol_conf.default_guest_kallsyms; - else { - sprintf(path, "%s/proc/kallsyms", machine->root_dir); - filename = path; - } - } - - if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) { - free(event); - return -ENOENT; } map = machine->vmlinux_maps[MAP__FUNCTION]; + kmap = map__kmap(map); size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), - "%s%s", mmap_name, symbol_name) + 1; + "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1; size = PERF_ALIGN(size, sizeof(u64)); event->mmap.header.type = PERF_RECORD_MMAP; event->mmap.header.size = (sizeof(event->mmap) - (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); - event->mmap.pgoff = args.start; + event->mmap.pgoff = kmap->ref_reloc_sym->addr; event->mmap.start = map->start; event->mmap.len = map->end - event->mmap.start; event->mmap.pid = machine->pid; diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index faf6e219be21..851fa06f4a42 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -214,8 +214,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool, struct machine *machine, bool mmap_data); int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, perf_event__handler_t process, - struct machine *machine, - const char *symbol_name); + struct machine *machine); int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process, @@ -279,4 +278,7 @@ size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp); size_t perf_event__fprintf_task(union perf_event *event, FILE *fp); size_t perf_event__fprintf(union perf_event *event, FILE *fp); +u64 kallsyms__get_function_start(const char *kallsyms_filename, + const char *symbol_name); + #endif /* __PERF_RECORD_H */ diff --git a/tools/perf/util/include/asm/hash.h b/tools/perf/util/include/asm/hash.h new file mode 100644 index 000000000000..d82b170bb216 --- /dev/null +++ b/tools/perf/util/include/asm/hash.h @@ -0,0 +1,6 @@ +#ifndef __ASM_GENERIC_HASH_H +#define __ASM_GENERIC_HASH_H + +/* Stub */ + +#endif /* __ASM_GENERIC_HASH_H */ diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h index 45cf10a562bd..dadfa7e54287 100644 --- a/tools/perf/util/include/linux/bitops.h +++ b/tools/perf/util/include/linux/bitops.h @@ -87,13 +87,15 @@ static __always_inline unsigned long __ffs(unsigned long word) return num; } +typedef const unsigned long __attribute__((__may_alias__)) long_alias_t; + /* * Find the first set bit in a memory region. */ static inline unsigned long find_first_bit(const unsigned long *addr, unsigned long size) { - const unsigned long *p = addr; + long_alias_t *p = (long_alias_t *) addr; unsigned long result = 0; unsigned long tmp; diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index ded74590b92f..c872991e0f65 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -496,19 +496,22 @@ static int symbol__in_kernel(void *arg, const char *name, return 1; } +static void machine__get_kallsyms_filename(struct machine *machine, char *buf, + size_t bufsz) +{ + if (machine__is_default_guest(machine)) + scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); + else + scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); +} + /* Figure out the start address of kernel map from /proc/kallsyms */ static u64 machine__get_kernel_start_addr(struct machine *machine) { - const char *filename; - char path[PATH_MAX]; + char filename[PATH_MAX]; struct process_args args; - if (machine__is_default_guest(machine)) - filename = (char *)symbol_conf.default_guest_kallsyms; - else { - sprintf(path, "%s/proc/kallsyms", machine->root_dir); - filename = path; - } + machine__get_kallsyms_filename(machine, filename, PATH_MAX); if (symbol__restricted_filename(filename, "/proc/kallsyms")) return 0; @@ -829,9 +832,25 @@ static int machine__create_modules(struct machine *machine) return 0; } +const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; + int machine__create_kernel_maps(struct machine *machine) { struct dso *kernel = machine__get_kernel(machine); + char filename[PATH_MAX]; + const char *name; + u64 addr = 0; + int i; + + machine__get_kallsyms_filename(machine, filename, PATH_MAX); + + for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { + addr = kallsyms__get_function_start(filename, name); + if (addr) + break; + } + if (!addr) + return -1; if (kernel == NULL || __machine__create_kernel_maps(machine, kernel) < 0) @@ -850,6 +869,13 @@ int machine__create_kernel_maps(struct machine *machine) * Now that we have all the maps created, just set the ->end of them: */ map_groups__fixup_end(&machine->kmaps); + + if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, + addr)) { + machine__destroy_kernel_maps(machine); + return -1; + } + return 0; } diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index 477133015440..f77e91e483dc 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h @@ -18,6 +18,8 @@ union perf_event; #define HOST_KERNEL_ID (-1) #define DEFAULT_GUEST_KERNEL_ID (0) +extern const char *ref_reloc_sym_names[]; + struct machine { struct rb_node rb_node; pid_t pid; diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 3b97513f0e77..39cd2d0faff6 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -39,6 +39,7 @@ void map__init(struct map *map, enum map_type type, map->start = start; map->end = end; map->pgoff = pgoff; + map->reloc = 0; map->dso = dso; map->map_ip = map__map_ip; map->unmap_ip = map__unmap_ip; @@ -288,7 +289,7 @@ u64 map__rip_2objdump(struct map *map, u64 rip) if (map->dso->rel) return rip - map->pgoff; - return map->unmap_ip(map, rip); + return map->unmap_ip(map, rip) - map->reloc; } /** @@ -311,7 +312,7 @@ u64 map__objdump_2mem(struct map *map, u64 ip) if (map->dso->rel) return map->unmap_ip(map, ip + map->pgoff); - return ip; + return ip + map->reloc; } void map_groups__init(struct map_groups *mg) diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 18068c6b71c1..257e513205ce 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -36,6 +36,7 @@ struct map { bool erange_warned; u32 priv; u64 pgoff; + u64 reloc; u32 maj, min; /* only valid for MMAP2 record */ u64 ino; /* only valid for MMAP2 record */ u64 ino_generation;/* only valid for MMAP2 record */ diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index d248fca6d7ed..1e15df10a88c 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1091,12 +1091,12 @@ int is_valid_tracepoint(const char *event_string) static bool is_event_supported(u8 type, unsigned config) { bool ret = true; + int open_return; struct perf_evsel *evsel; struct perf_event_attr attr = { .type = type, .config = config, .disabled = 1, - .exclude_kernel = 1, }; struct { struct thread_map map; @@ -1108,7 +1108,20 @@ static bool is_event_supported(u8 type, unsigned config) evsel = perf_evsel__new(&attr); if (evsel) { - ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0; + open_return = perf_evsel__open(evsel, NULL, &tmap.map); + ret = open_return >= 0; + + if (open_return == -EACCES) { + /* + * This happens if the paranoid value + * /proc/sys/kernel/perf_event_paranoid is set to 2 + * Re-run with exclude_kernel set; we don't do that + * by default as some ARM machines do not support it. + * + */ + evsel->attr.exclude_kernel = 1; + ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0; + } perf_evsel__delete(evsel); } diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index a8a9b6cd93a8..d8b048c20cde 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -336,8 +336,8 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs, return ret; for (i = 0; i < ntevs && ret >= 0; i++) { + /* point.address is the addres of point.symbol + point.offset */ offset = tevs[i].point.address - stext; - offset += tevs[i].point.offset; tevs[i].point.offset = 0; zfree(&tevs[i].point.symbol); ret = e_snprintf(buf, 32, "0x%lx", offset); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 0b39a48e5110..5da6ce74c676 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1008,6 +1008,12 @@ static int perf_session__process_user_event(struct perf_session *session, union if (err == 0) perf_session__set_id_hdr_size(session); return err; + case PERF_RECORD_HEADER_EVENT_TYPE: + /* + * Depreceated, but we need to handle it for sake + * of old data files create in pipe mode. + */ + return 0; case PERF_RECORD_HEADER_TRACING_DATA: /* setup for reading amidst mmap */ lseek(fd, file_offset, SEEK_SET); diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 759456728703..3e9f336740fa 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -751,6 +751,8 @@ int dso__load_sym(struct dso *dso, struct map *map, if (strcmp(elf_name, kmap->ref_reloc_sym->name)) continue; kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; + map->reloc = kmap->ref_reloc_sym->addr - + kmap->ref_reloc_sym->unrelocated_addr; break; } } @@ -922,6 +924,7 @@ int dso__load_sym(struct dso *dso, struct map *map, (u64)shdr.sh_offset); sym.st_value -= shdr.sh_addr - shdr.sh_offset; } +new_symbol: /* * We need to figure out if the object was created from C++ sources * DWARF DW_compile_unit has this, but we don't always have access @@ -933,7 +936,6 @@ int dso__load_sym(struct dso *dso, struct map *map, if (demangled != NULL) elf_name = demangled; } -new_symbol: f = symbol__new(sym.st_value, sym.st_size, GELF_ST_BIND(sym.st_info), elf_name); free(demangled); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 39ce9adbaaf0..e89afc097d8a 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -627,7 +627,7 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map, * kernel range is broken in several maps, named [kernel].N, as we don't have * the original ELF section names vmlinux have. */ -static int dso__split_kallsyms(struct dso *dso, struct map *map, +static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta, symbol_filter_t filter) { struct map_groups *kmaps = map__kmap(map)->kmaps; @@ -692,6 +692,12 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, char dso_name[PATH_MAX]; struct dso *ndso; + if (delta) { + /* Kernel was relocated at boot time */ + pos->start -= delta; + pos->end -= delta; + } + if (count == 0) { curr_map = map; goto filter_symbol; @@ -721,6 +727,10 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; map_groups__insert(kmaps, curr_map); ++kernel_range; + } else if (delta) { + /* Kernel was relocated at boot time */ + pos->start -= delta; + pos->end -= delta; } filter_symbol: if (filter && filter(curr_map, pos)) { @@ -976,6 +986,23 @@ static int validate_kcore_modules(const char *kallsyms_filename, return 0; } +static int validate_kcore_addresses(const char *kallsyms_filename, + struct map *map) +{ + struct kmap *kmap = map__kmap(map); + + if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) { + u64 start; + + start = kallsyms__get_function_start(kallsyms_filename, + kmap->ref_reloc_sym->name); + if (start != kmap->ref_reloc_sym->addr) + return -EINVAL; + } + + return validate_kcore_modules(kallsyms_filename, map); +} + struct kcore_mapfn_data { struct dso *dso; enum map_type type; @@ -1019,8 +1046,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map, kallsyms_filename)) return -EINVAL; - /* All modules must be present at their original addresses */ - if (validate_kcore_modules(kallsyms_filename, map)) + /* Modules and kernel must be present at their original addresses */ + if (validate_kcore_addresses(kallsyms_filename, map)) return -EINVAL; md.dso = dso; @@ -1113,15 +1140,41 @@ out_err: return -EINVAL; } +/* + * If the kernel is relocated at boot time, kallsyms won't match. Compute the + * delta based on the relocation reference symbol. + */ +static int kallsyms__delta(struct map *map, const char *filename, u64 *delta) +{ + struct kmap *kmap = map__kmap(map); + u64 addr; + + if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) + return 0; + + addr = kallsyms__get_function_start(filename, + kmap->ref_reloc_sym->name); + if (!addr) + return -1; + + *delta = addr - kmap->ref_reloc_sym->addr; + return 0; +} + int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map, symbol_filter_t filter) { + u64 delta = 0; + if (symbol__restricted_filename(filename, "/proc/kallsyms")) return -1; if (dso__load_all_kallsyms(dso, filename, map) < 0) return -1; + if (kallsyms__delta(map, filename, &delta)) + return -1; + symbols__fixup_duplicate(&dso->symbols[map->type]); symbols__fixup_end(&dso->symbols[map->type]); @@ -1133,7 +1186,7 @@ int dso__load_kallsyms(struct dso *dso, const char *filename, if (!dso__load_kcore(dso, map, filename)) return dso__split_kallsyms_for_kcore(dso, map, filter); else - return dso__split_kallsyms(dso, map, filter); + return dso__split_kallsyms(dso, map, delta, filter); } static int dso__load_perf_map(struct dso *dso, struct map *map, @@ -1283,6 +1336,8 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) if (syms_ss && runtime_ss) break; + } else { + symsrc__destroy(ss); } } @@ -1424,7 +1479,7 @@ static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz) continue; scnprintf(kallsyms_filename, sizeof(kallsyms_filename), "%s/%s/kallsyms", dir, dent->d_name); - if (!validate_kcore_modules(kallsyms_filename, map)) { + if (!validate_kcore_addresses(kallsyms_filename, map)) { strlcpy(dir, kallsyms_filename, dir_sz); ret = 0; break; @@ -1479,7 +1534,7 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map) if (fd != -1) { close(fd); /* If module maps match go with /proc/kallsyms */ - if (!validate_kcore_modules("/proc/kallsyms", map)) + if (!validate_kcore_addresses("/proc/kallsyms", map)) goto proc_kallsyms; } |