diff options
author | Adrian Hunter <adrian.hunter@intel.com> | 2016-06-23 15:40:57 +0200 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2016-06-23 22:02:59 +0200 |
commit | 50f736372d5ea0ce97ede698f957c9b141aa569e (patch) | |
tree | f12cd5756fdc9364c2cd0bd603167c37fb0f284b /tools/perf/util/intel-bts.c | |
parent | perf script: Print sample flags more nicely (diff) | |
download | linux-50f736372d5ea0ce97ede698f957c9b141aa569e.tar.xz linux-50f736372d5ea0ce97ede698f957c9b141aa569e.zip |
perf auxtrace: Add option to feed branches to the thread stack
In preparation for using the thread stack to print an indent
representing the stack depth in perf script, add an option to tell
decoders to feed branches to the thread stack. Add support for that
option to Intel PT and Intel BTS.
The advantage of using the decoder to feed the thread stack is that it
happens before branch filtering and so can be used with different itrace
options (e.g. it still works when only showing calls, even though the
thread stack needs to see calls and returns). Also it does not conflict
with using the thread stack to get callchains.
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Acked-by: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Link: http://lkml.kernel.org/r/1466689258-28493-3-git-send-email-adrian.hunter@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/intel-bts.c')
-rw-r--r-- | tools/perf/util/intel-bts.c | 22 |
1 files changed, 17 insertions, 5 deletions
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c index ecec73f6fe5a..749e6f2e37ca 100644 --- a/tools/perf/util/intel-bts.c +++ b/tools/perf/util/intel-bts.c @@ -422,7 +422,8 @@ static int intel_bts_get_branch_type(struct intel_bts_queue *btsq, } static int intel_bts_process_buffer(struct intel_bts_queue *btsq, - struct auxtrace_buffer *buffer) + struct auxtrace_buffer *buffer, + struct thread *thread) { struct branch *branch; size_t sz, bsz = sizeof(struct branch); @@ -444,6 +445,12 @@ static int intel_bts_process_buffer(struct intel_bts_queue *btsq, if (!branch->from && !branch->to) continue; intel_bts_get_branch_type(btsq, branch); + if (btsq->bts->synth_opts.thread_stack) + thread_stack__event(thread, btsq->sample_flags, + le64_to_cpu(branch->from), + le64_to_cpu(branch->to), + btsq->intel_pt_insn.length, + buffer->buffer_nr + 1); if (filter && !(filter & btsq->sample_flags)) continue; err = intel_bts_synth_branch_sample(btsq, branch); @@ -507,12 +514,13 @@ static int intel_bts_process_queue(struct intel_bts_queue *btsq, u64 *timestamp) goto out_put; } - if (!btsq->bts->synth_opts.callchain && thread && + if (!btsq->bts->synth_opts.callchain && + !btsq->bts->synth_opts.thread_stack && thread && (!old_buffer || btsq->bts->sampling_mode || (btsq->bts->snapshot_mode && !buffer->consecutive))) thread_stack__set_trace_nr(thread, buffer->buffer_nr + 1); - err = intel_bts_process_buffer(btsq, buffer); + err = intel_bts_process_buffer(btsq, buffer, thread); auxtrace_buffer__drop_data(buffer); @@ -905,10 +913,14 @@ int intel_bts_process_auxtrace_info(union perf_event *event, if (dump_trace) return 0; - if (session->itrace_synth_opts && session->itrace_synth_opts->set) + if (session->itrace_synth_opts && session->itrace_synth_opts->set) { bts->synth_opts = *session->itrace_synth_opts; - else + } else { itrace_synth_opts__set_default(&bts->synth_opts); + if (session->itrace_synth_opts) + bts->synth_opts.thread_stack = + session->itrace_synth_opts->thread_stack; + } if (bts->synth_opts.calls) bts->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC | |