diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-26 02:05:40 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-26 02:05:40 +0200 |
commit | bdc6b758e443c21c39a14c075e5b7e01f095b37b (patch) | |
tree | 40b98b5abd501cc232f41af03eb078282d7a6327 /arch/powerpc/perf | |
parent | Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/ke... (diff) | |
parent | Merge tag 'perf-core-for-mingo-20160523' of git://git.kernel.org/pub/scm/linu... (diff) | |
download | linux-bdc6b758e443c21c39a14c075e5b7e01f095b37b.tar.xz linux-bdc6b758e443c21c39a14c075e5b7e01f095b37b.zip |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar:
"Mostly tooling and PMU driver fixes, but also a number of late updates
such as the reworking of the call-chain size limiting logic to make
call-graph recording more robust, plus tooling side changes for the
new 'backwards ring-buffer' extension to the perf ring-buffer"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (34 commits)
perf record: Read from backward ring buffer
perf record: Rename variable to make code clear
perf record: Prevent reading invalid data in record__mmap_read
perf evlist: Add API to pause/resume
perf trace: Use the ptr->name beautifier as default for "filename" args
perf trace: Use the fd->name beautifier as default for "fd" args
perf report: Add srcline_from/to branch sort keys
perf evsel: Record fd into perf_mmap
perf evsel: Add overwrite attribute and check write_backward
perf tools: Set buildid dir under symfs when --symfs is provided
perf trace: Only auto set call-graph to "dwarf" when syscalls are being traced
perf annotate: Sort list of recognised instructions
perf annotate: Fix identification of ARM blt and bls instructions
perf tools: Fix usage of max_stack sysctl
perf callchain: Stop validating callchains by the max_stack sysctl
perf trace: Fix exit_group() formatting
perf top: Use machine->kptr_restrict_warned
perf trace: Warn when trying to resolve kernel addresses with kptr_restrict=1
perf machine: Do not bail out if not managing to read ref reloc symbol
perf/x86/intel/p4: Trival indentation fix, remove space
...
Diffstat (limited to 'arch/powerpc/perf')
-rw-r--r-- | arch/powerpc/perf/callchain.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c index 26d37e6f924e..0fc26714780a 100644 --- a/arch/powerpc/perf/callchain.c +++ b/arch/powerpc/perf/callchain.c @@ -47,7 +47,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp) } void -perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) +perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { unsigned long sp, next_sp; unsigned long next_ip; @@ -76,7 +76,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) next_ip = regs->nip; lr = regs->link; level = 0; - perf_callchain_store(entry, PERF_CONTEXT_KERNEL); + perf_callchain_store_context(entry, PERF_CONTEXT_KERNEL); } else { if (level == 0) @@ -232,7 +232,7 @@ static int sane_signal_64_frame(unsigned long sp) puc == (unsigned long) &sf->uc; } -static void perf_callchain_user_64(struct perf_callchain_entry *entry, +static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { unsigned long sp, next_sp; @@ -247,7 +247,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, sp = regs->gpr[1]; perf_callchain_store(entry, next_ip); - while (entry->nr < sysctl_perf_event_max_stack) { + while (entry->nr < entry->max_stack) { fp = (unsigned long __user *) sp; if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) return; @@ -274,7 +274,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, read_user_stack_64(&uregs[PT_R1], &sp)) return; level = 0; - perf_callchain_store(entry, PERF_CONTEXT_USER); + perf_callchain_store_context(entry, PERF_CONTEXT_USER); perf_callchain_store(entry, next_ip); continue; } @@ -319,7 +319,7 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) return rc; } -static inline void perf_callchain_user_64(struct perf_callchain_entry *entry, +static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { } @@ -439,7 +439,7 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp, return mctx->mc_gregs; } -static void perf_callchain_user_32(struct perf_callchain_entry *entry, +static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { unsigned int sp, next_sp; @@ -453,7 +453,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, sp = regs->gpr[1]; perf_callchain_store(entry, next_ip); - while (entry->nr < sysctl_perf_event_max_stack) { + while (entry->nr < entry->max_stack) { fp = (unsigned int __user *) (unsigned long) sp; if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp)) return; @@ -473,7 +473,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, read_user_stack_32(&uregs[PT_R1], &sp)) return; level = 0; - perf_callchain_store(entry, PERF_CONTEXT_USER); + perf_callchain_store_context(entry, PERF_CONTEXT_USER); perf_callchain_store(entry, next_ip); continue; } @@ -487,7 +487,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, } void -perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) +perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { if (current_is_64bit()) perf_callchain_user_64(entry, regs); |