summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2016-06-23 15:40:57 +0200
committerArnaldo Carvalho de Melo <acme@redhat.com>2016-06-23 22:02:59 +0200
commit50f736372d5ea0ce97ede698f957c9b141aa569e (patch)
treef12cd5756fdc9364c2cd0bd603167c37fb0f284b
parentperf script: Print sample flags more nicely (diff)
downloadlinux-50f736372d5ea0ce97ede698f957c9b141aa569e.tar.xz
linux-50f736372d5ea0ce97ede698f957c9b141aa569e.zip
perf auxtrace: Add option to feed branches to the thread stack
In preparation for using the thread stack to print an indent representing the stack depth in perf script, add an option to tell decoders to feed branches to the thread stack. Add support for that option to Intel PT and Intel BTS. The advantage of using the decoder to feed the thread stack is that it happens before branch filtering and so can be used with different itrace options (e.g. it still works when only showing calls, even though the thread stack needs to see calls and returns). Also it does not conflict with using the thread stack to get callchains. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Acked-by: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1466689258-28493-3-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r--tools/perf/util/auxtrace.h2
-rw-r--r--tools/perf/util/intel-bts.c22
-rw-r--r--tools/perf/util/intel-pt.c5
3 files changed, 23 insertions, 6 deletions
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 767989e0e312..ac5f0d7167e6 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -63,6 +63,7 @@ enum itrace_period_type {
* @calls: limit branch samples to calls (can be combined with @returns)
* @returns: limit branch samples to returns (can be combined with @calls)
* @callchain: add callchain to 'instructions' events
+ * @thread_stack: feed branches to the thread_stack
* @last_branch: add branch context to 'instruction' events
* @callchain_sz: maximum callchain size
* @last_branch_sz: branch context size
@@ -82,6 +83,7 @@ struct itrace_synth_opts {
bool calls;
bool returns;
bool callchain;
+ bool thread_stack;
bool last_branch;
unsigned int callchain_sz;
unsigned int last_branch_sz;
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index ecec73f6fe5a..749e6f2e37ca 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -422,7 +422,8 @@ static int intel_bts_get_branch_type(struct intel_bts_queue *btsq,
}
static int intel_bts_process_buffer(struct intel_bts_queue *btsq,
- struct auxtrace_buffer *buffer)
+ struct auxtrace_buffer *buffer,
+ struct thread *thread)
{
struct branch *branch;
size_t sz, bsz = sizeof(struct branch);
@@ -444,6 +445,12 @@ static int intel_bts_process_buffer(struct intel_bts_queue *btsq,
if (!branch->from && !branch->to)
continue;
intel_bts_get_branch_type(btsq, branch);
+ if (btsq->bts->synth_opts.thread_stack)
+ thread_stack__event(thread, btsq->sample_flags,
+ le64_to_cpu(branch->from),
+ le64_to_cpu(branch->to),
+ btsq->intel_pt_insn.length,
+ buffer->buffer_nr + 1);
if (filter && !(filter & btsq->sample_flags))
continue;
err = intel_bts_synth_branch_sample(btsq, branch);
@@ -507,12 +514,13 @@ static int intel_bts_process_queue(struct intel_bts_queue *btsq, u64 *timestamp)
goto out_put;
}
- if (!btsq->bts->synth_opts.callchain && thread &&
+ if (!btsq->bts->synth_opts.callchain &&
+ !btsq->bts->synth_opts.thread_stack && thread &&
(!old_buffer || btsq->bts->sampling_mode ||
(btsq->bts->snapshot_mode && !buffer->consecutive)))
thread_stack__set_trace_nr(thread, buffer->buffer_nr + 1);
- err = intel_bts_process_buffer(btsq, buffer);
+ err = intel_bts_process_buffer(btsq, buffer, thread);
auxtrace_buffer__drop_data(buffer);
@@ -905,10 +913,14 @@ int intel_bts_process_auxtrace_info(union perf_event *event,
if (dump_trace)
return 0;
- if (session->itrace_synth_opts && session->itrace_synth_opts->set)
+ if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
bts->synth_opts = *session->itrace_synth_opts;
- else
+ } else {
itrace_synth_opts__set_default(&bts->synth_opts);
+ if (session->itrace_synth_opts)
+ bts->synth_opts.thread_stack =
+ session->itrace_synth_opts->thread_stack;
+ }
if (bts->synth_opts.calls)
bts->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index dc243b19197b..551ff6f640be 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -1234,7 +1234,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
if (!(state->type & INTEL_PT_BRANCH))
return 0;
- if (pt->synth_opts.callchain)
+ if (pt->synth_opts.callchain || pt->synth_opts.thread_stack)
thread_stack__event(ptq->thread, ptq->flags, state->from_ip,
state->to_ip, ptq->insn_len,
state->trace_nr);
@@ -2137,6 +2137,9 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
pt->synth_opts.branches = false;
pt->synth_opts.callchain = true;
}
+ if (session->itrace_synth_opts)
+ pt->synth_opts.thread_stack =
+ session->itrace_synth_opts->thread_stack;
}
if (pt->synth_opts.log)