summaryrefslogtreecommitdiffstats
path: root/tools/perf
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf')
-rw-r--r--tools/perf/.gitignore2
-rw-r--r--tools/perf/Documentation/Makefile6
-rw-r--r--tools/perf/Documentation/jit-interface.txt15
-rw-r--r--tools/perf/Documentation/perf-annotate.txt3
-rw-r--r--tools/perf/Documentation/perf-diff.txt3
-rw-r--r--tools/perf/Documentation/perf-kvm.txt30
-rw-r--r--tools/perf/Documentation/perf-list.txt48
-rw-r--r--tools/perf/Documentation/perf-report.txt3
-rw-r--r--tools/perf/Documentation/perf-script-perl.txt4
-rw-r--r--tools/perf/Documentation/perf-script-python.txt10
-rw-r--r--tools/perf/Documentation/perf-trace.txt53
-rw-r--r--tools/perf/MANIFEST4
-rw-r--r--tools/perf/Makefile173
-rw-r--r--tools/perf/arch/x86/Makefile3
-rw-r--r--tools/perf/arch/x86/include/perf_regs.h80
-rw-r--r--tools/perf/arch/x86/util/unwind.c111
-rw-r--r--tools/perf/bash_completion26
-rw-r--r--tools/perf/bench/bench.h3
-rw-r--r--tools/perf/bench/mem-memcpy.c2
-rw-r--r--tools/perf/bench/mem-memset.c2
-rw-r--r--tools/perf/bench/sched-messaging.c2
-rw-r--r--tools/perf/bench/sched-pipe.c10
-rw-r--r--tools/perf/builtin-annotate.c4
-rw-r--r--tools/perf/builtin-bench.c2
-rw-r--r--tools/perf/builtin-buildid-cache.c10
-rw-r--r--tools/perf/builtin-buildid-list.c7
-rw-r--r--tools/perf/builtin-diff.c96
-rw-r--r--tools/perf/builtin-evlist.c2
-rw-r--r--tools/perf/builtin-help.c50
-rw-r--r--tools/perf/builtin-inject.c29
-rw-r--r--tools/perf/builtin-kmem.c234
-rw-r--r--tools/perf/builtin-kvm.c838
-rw-r--r--tools/perf/builtin-list.c16
-rw-r--r--tools/perf/builtin-lock.c414
-rw-r--r--tools/perf/builtin-probe.c24
-rw-r--r--tools/perf/builtin-record.c305
-rw-r--r--tools/perf/builtin-report.c47
-rw-r--r--tools/perf/builtin-sched.c1522
-rw-r--r--tools/perf/builtin-script.c229
-rw-r--r--tools/perf/builtin-stat.c136
-rw-r--r--tools/perf/builtin-test.c353
-rw-r--r--tools/perf/builtin-timechart.c70
-rw-r--r--tools/perf/builtin-top.c33
-rw-r--r--tools/perf/builtin-trace.c310
-rw-r--r--tools/perf/builtin.h2
-rw-r--r--tools/perf/command-list.txt3
-rw-r--r--tools/perf/config/feature-tests.mak50
-rw-r--r--tools/perf/perf-archive.sh6
-rw-r--r--tools/perf/perf.c75
-rw-r--r--tools/perf/perf.h15
-rwxr-xr-xtools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py94
-rw-r--r--tools/perf/scripts/python/bin/event_analyzing_sample-record8
-rw-r--r--tools/perf/scripts/python/bin/event_analyzing_sample-report3
-rw-r--r--tools/perf/scripts/python/event_analyzing_sample.py189
-rw-r--r--tools/perf/ui/browser.c7
-rw-r--r--tools/perf/ui/browsers/annotate.c6
-rw-r--r--tools/perf/ui/browsers/hists.c133
-rw-r--r--tools/perf/ui/gtk/browser.c111
-rw-r--r--tools/perf/ui/gtk/gtk.h3
-rw-r--r--tools/perf/ui/gtk/helpline.c56
-rw-r--r--tools/perf/ui/gtk/setup.c6
-rw-r--r--tools/perf/ui/gtk/util.c9
-rw-r--r--tools/perf/ui/helpline.c56
-rw-r--r--tools/perf/ui/helpline.h33
-rw-r--r--tools/perf/ui/hist.c390
-rw-r--r--tools/perf/ui/setup.c10
-rw-r--r--tools/perf/ui/stdio/hist.c498
-rw-r--r--tools/perf/ui/tui/helpline.c57
-rw-r--r--tools/perf/ui/tui/setup.c10
-rw-r--r--tools/perf/util/alias.c3
-rw-r--r--tools/perf/util/annotate.c19
-rw-r--r--tools/perf/util/annotate.h15
-rw-r--r--tools/perf/util/build-id.c11
-rw-r--r--tools/perf/util/cache.h6
-rw-r--r--tools/perf/util/callchain.c6
-rw-r--r--tools/perf/util/callchain.h2
-rw-r--r--tools/perf/util/cgroup.c4
-rw-r--r--tools/perf/util/config.c6
-rw-r--r--tools/perf/util/cpumap.c22
-rw-r--r--tools/perf/util/cpumap.h13
-rw-r--r--tools/perf/util/debug.c4
-rw-r--r--tools/perf/util/debug.h17
-rw-r--r--tools/perf/util/dso-test-data.c2
-rw-r--r--tools/perf/util/dwarf-aux.c2
-rw-r--r--tools/perf/util/event.c71
-rw-r--r--tools/perf/util/event.h14
-rw-r--r--tools/perf/util/evlist.c143
-rw-r--r--tools/perf/util/evlist.h35
-rw-r--r--tools/perf/util/evsel.c276
-rw-r--r--tools/perf/util/evsel.h63
-rwxr-xr-xtools/perf/util/generate-cmdlist.sh15
-rw-r--r--tools/perf/util/header.c1063
-rw-r--r--tools/perf/util/header.h29
-rw-r--r--tools/perf/util/help.c4
-rw-r--r--tools/perf/util/hist.c721
-rw-r--r--tools/perf/util/hist.h75
-rw-r--r--tools/perf/util/include/linux/bitops.h4
-rw-r--r--tools/perf/util/include/linux/compiler.h9
-rw-r--r--tools/perf/util/include/linux/kernel.h17
-rw-r--r--tools/perf/util/include/linux/magic.h12
-rw-r--r--tools/perf/util/include/linux/rbtree.h1
-rw-r--r--tools/perf/util/include/linux/string.h2
-rw-r--r--tools/perf/util/include/linux/types.h8
-rw-r--r--tools/perf/util/intlist.c8
-rw-r--r--tools/perf/util/map.c47
-rw-r--r--tools/perf/util/map.h9
-rw-r--r--tools/perf/util/parse-events-test.c426
-rw-r--r--tools/perf/util/parse-events.c254
-rw-r--r--tools/perf/util/parse-events.h18
-rw-r--r--tools/perf/util/parse-events.l56
-rw-r--r--tools/perf/util/parse-events.y125
-rw-r--r--tools/perf/util/parse-options.c3
-rw-r--r--tools/perf/util/perf_regs.h14
-rw-r--r--tools/perf/util/pmu.c80
-rw-r--r--tools/perf/util/pmu.h3
-rw-r--r--tools/perf/util/pmu.y6
-rw-r--r--tools/perf/util/probe-event.c69
-rw-r--r--tools/perf/util/probe-finder.c28
-rw-r--r--tools/perf/util/python-ext-sources2
-rw-r--r--tools/perf/util/python.c17
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c50
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c113
-rw-r--r--tools/perf/util/session.c198
-rw-r--r--tools/perf/util/session.h10
-rw-r--r--tools/perf/util/sort.c25
-rw-r--r--tools/perf/util/sort.h2
-rw-r--r--tools/perf/util/stat.c57
-rw-r--r--tools/perf/util/stat.h16
-rw-r--r--tools/perf/util/string.c18
-rw-r--r--tools/perf/util/strlist.c2
-rw-r--r--tools/perf/util/symbol-elf.c841
-rw-r--r--tools/perf/util/symbol-minimal.c307
-rw-r--r--tools/perf/util/symbol.c942
-rw-r--r--tools/perf/util/symbol.h67
-rw-r--r--tools/perf/util/target.c4
-rw-r--r--tools/perf/util/thread.h2
-rw-r--r--tools/perf/util/top.c3
-rw-r--r--tools/perf/util/top.h1
-rw-r--r--tools/perf/util/trace-event-parse.c54
-rw-r--r--tools/perf/util/trace-event-scripting.c34
-rw-r--r--tools/perf/util/trace-event.h12
-rw-r--r--tools/perf/util/unwind.c571
-rw-r--r--tools/perf/util/unwind.h35
-rw-r--r--tools/perf/util/util.c25
-rw-r--r--tools/perf/util/util.h9
-rw-r--r--tools/perf/util/vdso.c111
-rw-r--r--tools/perf/util/vdso.h18
-rw-r--r--tools/perf/util/wrapper.c3
148 files changed, 10246 insertions, 4297 deletions
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index 26b823b61aa1..8f8fbc227a46 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -21,3 +21,5 @@ config.mak
config.mak.autogen
*-bison.*
*-flex.*
+*.pyc
+*.pyo
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
index ca600e09c8d4..9f2e44f2b17a 100644
--- a/tools/perf/Documentation/Makefile
+++ b/tools/perf/Documentation/Makefile
@@ -195,10 +195,10 @@ install-pdf: pdf
#install-html: html
# '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir)
-../PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
- $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) PERF-VERSION-FILE
+$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
+ $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) $(OUTPUT)PERF-VERSION-FILE
--include ../PERF-VERSION-FILE
+-include $(OUTPUT)PERF-VERSION-FILE
#
# Determine "include::" file references in asciidoc files.
diff --git a/tools/perf/Documentation/jit-interface.txt b/tools/perf/Documentation/jit-interface.txt
new file mode 100644
index 000000000000..a8656f564915
--- /dev/null
+++ b/tools/perf/Documentation/jit-interface.txt
@@ -0,0 +1,15 @@
+perf supports a simple JIT interface to resolve symbols for dynamic code generated
+by a JIT.
+
+The JIT has to write a /tmp/perf-%d.map (%d = pid of process) file
+
+This is a text file.
+
+Each line has the following format, fields separated with spaces:
+
+START SIZE symbolname
+
+START and SIZE are hex numbers without 0x.
+symbolname is the rest of the line, so it could contain special characters.
+
+The ownership of the file has to match the process.
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index c89f9e1453f7..c8ffd9fd5c6a 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -85,6 +85,9 @@ OPTIONS
-M::
--disassembler-style=:: Set disassembler style for objdump.
+--objdump=<path>::
+ Path to objdump binary.
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-report[1]
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
index 74d7481ed7a6..ab7f667de1b1 100644
--- a/tools/perf/Documentation/perf-diff.txt
+++ b/tools/perf/Documentation/perf-diff.txt
@@ -17,6 +17,9 @@ captured via perf record.
If no parameters are passed it will assume perf.data.old and perf.data.
+The differential profile is displayed only for events matching both
+specified perf.data files.
+
OPTIONS
-------
-M::
diff --git a/tools/perf/Documentation/perf-kvm.txt b/tools/perf/Documentation/perf-kvm.txt
index dd84cb2f0a88..326f2cb333cb 100644
--- a/tools/perf/Documentation/perf-kvm.txt
+++ b/tools/perf/Documentation/perf-kvm.txt
@@ -12,7 +12,7 @@ SYNOPSIS
[--guestkallsyms=<path> --guestmodules=<path> | --guestvmlinux=<path>]]
{top|record|report|diff|buildid-list}
'perf kvm' [--host] [--guest] [--guestkallsyms=<path> --guestmodules=<path>
- | --guestvmlinux=<path>] {top|record|report|diff|buildid-list}
+ | --guestvmlinux=<path>] {top|record|report|diff|buildid-list|stat}
DESCRIPTION
-----------
@@ -38,6 +38,18 @@ There are a couple of variants of perf kvm:
so that other tools can be used to fetch packages with matching symbol tables
for use by perf report.
+ 'perf kvm stat <command>' to run a command and gather performance counter
+ statistics.
+ Especially, perf 'kvm stat record/report' generates a statistical analysis
+ of KVM events. Currently, vmexit, mmio and ioport events are supported.
+ 'perf kvm stat record <command>' records kvm events and the events between
+ start and end <command>.
+ And this command produces a file which contains tracing results of kvm
+ events.
+
+ 'perf kvm stat report' reports statistical data which includes events
+ handled time, samples, and so on.
+
OPTIONS
-------
-i::
@@ -68,7 +80,21 @@ OPTIONS
--guestvmlinux=<path>::
Guest os kernel vmlinux.
+STAT REPORT OPTIONS
+-------------------
+--vcpu=<value>::
+ analyze events which occures on this vcpu. (default: all vcpus)
+
+--events=<value>::
+ events to be analyzed. Possible values: vmexit, mmio, ioport.
+ (default: vmexit)
+-k::
+--key=<value>::
+ Sorting key. Possible values: sample (default, sort by samples
+ number), time (sort by average time).
+
SEE ALSO
--------
linkperf:perf-top[1], linkperf:perf-record[1], linkperf:perf-report[1],
-linkperf:perf-diff[1], linkperf:perf-buildid-list[1]
+linkperf:perf-diff[1], linkperf:perf-buildid-list[1],
+linkperf:perf-stat[1]
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index ddc22525228d..d1e39dc8c810 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -15,24 +15,43 @@ DESCRIPTION
This command displays the symbolic event types which can be selected in the
various perf commands with the -e option.
+[[EVENT_MODIFIERS]]
EVENT MODIFIERS
---------------
Events can optionally have a modifer by appending a colon and one or
-more modifiers. Modifiers allow the user to restrict when events are
-counted with 'u' for user-space, 'k' for kernel, 'h' for hypervisor.
-Additional modifiers are 'G' for guest counting (in KVM guests) and 'H'
-for host counting (not in KVM guests).
+more modifiers. Modifiers allow the user to restrict the events to be
+counted. The following modifiers exist:
+
+ u - user-space counting
+ k - kernel counting
+ h - hypervisor counting
+ G - guest counting (in KVM guests)
+ H - host counting (not in KVM guests)
+ p - precise level
The 'p' modifier can be used for specifying how precise the instruction
-address should be. The 'p' modifier is currently only implemented for
-Intel PEBS and can be specified multiple times:
- 0 - SAMPLE_IP can have arbitrary skid
- 1 - SAMPLE_IP must have constant skid
- 2 - SAMPLE_IP requested to have 0 skid
- 3 - SAMPLE_IP must have 0 skid
+address should be. The 'p' modifier can be specified multiple times:
+
+ 0 - SAMPLE_IP can have arbitrary skid
+ 1 - SAMPLE_IP must have constant skid
+ 2 - SAMPLE_IP requested to have 0 skid
+ 3 - SAMPLE_IP must have 0 skid
+
+For Intel systems precise event sampling is implemented with PEBS
+which supports up to precise-level 2.
-The PEBS implementation now supports up to 2.
+On AMD systems it is implemented using IBS (up to precise-level 2).
+The precise modifier works with event types 0x76 (cpu-cycles, CPU
+clocks not halted) and 0xC1 (micro-ops retired). Both events map to
+IBS execution sampling (IBS op) with the IBS Op Counter Control bit
+(IbsOpCntCtl) set respectively (see AMD64 Architecture Programmer’s
+Manual Volume 2: System Programming, 13.3 Instruction-Based
+Sampling). Examples to use IBS:
+
+ perf record -a -e cpu-cycles:p ... # use ibs op counting cycles
+ perf record -a -e r076:p ... # same as -e cpu-cycles:p
+ perf record -a -e r0C1:p ... # use ibs op counting micro-ops
RAW HARDWARE EVENT DESCRIPTOR
-----------------------------
@@ -44,6 +63,11 @@ layout of IA32_PERFEVTSELx MSRs (see [Intel® 64 and IA-32 Architectures Softwar
of IA32_PERFEVTSELx MSRs) or AMD's PerfEvtSeln (see [AMD64 Architecture Programmer’s Manual Volume 2: System Programming], Page 344,
Figure 13-7 Performance Event-Select Register (PerfEvtSeln)).
+Note: Only the following bit fields can be set in x86 counter
+registers: event, umask, edge, inv, cmask. Esp. guest/host only and
+OS/user mode flags must be setup using <<EVENT_MODIFIERS, EVENT
+MODIFIERS>>.
+
Example:
If the Intel docs for a QM720 Core i7 describe an event as:
@@ -91,4 +115,4 @@ SEE ALSO
linkperf:perf-stat[1], linkperf:perf-top[1],
linkperf:perf-record[1],
http://www.intel.com/Assets/PDF/manual/253669.pdf[Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3B: System Programming Guide],
-http://support.amd.com/us/Processor_TechDocs/24593.pdf[AMD64 Architecture Programmer’s Manual Volume 2: System Programming]
+http://support.amd.com/us/Processor_TechDocs/24593_APM_v2.pdf[AMD64 Architecture Programmer’s Manual Volume 2: System Programming]
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 495210a612c4..f4d91bebd59d 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -168,6 +168,9 @@ OPTIONS
branch stacks and it will automatically switch to the branch view mode,
unless --no-branch-stack is used.
+--objdump=<path>::
+ Path to objdump binary.
+
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-annotate[1]
diff --git a/tools/perf/Documentation/perf-script-perl.txt b/tools/perf/Documentation/perf-script-perl.txt
index 3152cca15501..d00bef231340 100644
--- a/tools/perf/Documentation/perf-script-perl.txt
+++ b/tools/perf/Documentation/perf-script-perl.txt
@@ -116,8 +116,8 @@ search path and 'use'ing a few support modules (see module
descriptions below):
----
- use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/perf-script-Util/lib";
- use lib "./perf-script-Util/lib";
+ use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
+ use lib "./Perf-Trace-Util/lib";
use Perf::Trace::Core;
use Perf::Trace::Context;
use Perf::Trace::Util;
diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt
index 471022069119..a4027f221a53 100644
--- a/tools/perf/Documentation/perf-script-python.txt
+++ b/tools/perf/Documentation/perf-script-python.txt
@@ -129,7 +129,7 @@ import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/perf-script-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
@@ -216,7 +216,7 @@ import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/perf-script-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
@@ -279,7 +279,7 @@ import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/perf-script-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
@@ -391,7 +391,7 @@ drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 .
drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 ..
drwxr-xr-x 2 trz trz 4096 2010-01-26 22:29 bin
-rw-r--r-- 1 trz trz 2548 2010-01-26 22:29 check-perf-script.py
-drwxr-xr-x 3 trz trz 4096 2010-01-26 22:49 perf-script-Util
+drwxr-xr-x 3 trz trz 4096 2010-01-26 22:49 Perf-Trace-Util
-rw-r--r-- 1 trz trz 1462 2010-01-26 22:30 syscall-counts.py
----
@@ -518,7 +518,7 @@ descriptions below):
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/perf-script-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
new file mode 100644
index 000000000000..3a2ae37310a9
--- /dev/null
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -0,0 +1,53 @@
+perf-trace(1)
+=============
+
+NAME
+----
+perf-trace - strace inspired tool
+
+SYNOPSIS
+--------
+[verse]
+'perf trace'
+
+DESCRIPTION
+-----------
+This command will show the events associated with the target, initially
+syscalls, but other system events like pagefaults, task lifetime events,
+scheduling events, etc.
+
+Initially this is a live mode only tool, but eventually will work with
+perf.data files like the other tools, allowing a detached 'record' from
+analysis phases.
+
+OPTIONS
+-------
+
+--all-cpus::
+ System-wide collection from all CPUs.
+
+-p::
+--pid=::
+ Record events on existing process ID (comma separated list).
+
+--tid=::
+ Record events on existing thread ID (comma separated list).
+
+--uid=::
+ Record events in threads owned by uid. Name or number.
+
+--no-inherit::
+ Child tasks do not inherit counters.
+
+--mmap-pages=::
+ Number of mmap data pages. Must be a power of two.
+
+--cpu::
+Collect samples only on the list of CPUs provided. Multiple CPUs can be provided as a
+comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2.
+In per-thread mode with inheritance mode on (default), Events are captured only when
+the thread executes on the designated CPUs. Default is to monitor all CPUs.
+
+SEE ALSO
+--------
+linkperf:perf-record[1], linkperf:perf-script[1]
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index b4b572e8c100..80db3f4bcf7a 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -10,8 +10,12 @@ include/linux/stringify.h
lib/rbtree.c
include/linux/swab.h
arch/*/include/asm/unistd*.h
+arch/*/include/asm/perf_regs.h
arch/*/lib/memcpy*.S
arch/*/lib/memset*.S
include/linux/poison.h
include/linux/magic.h
include/linux/hw_breakpoint.h
+arch/x86/include/asm/svm.h
+arch/x86/include/asm/vmx.h
+arch/x86/include/asm/kvm_host.h
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 35655c3a7b7a..e5e71e7d95a0 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -37,7 +37,14 @@ include config/utilities.mak
#
# Define NO_NEWT if you do not want TUI support.
#
+# Define NO_GTK2 if you do not want GTK+ GUI support.
+#
# Define NO_DEMANGLE if you do not want C++ symbol demangling.
+#
+# Define NO_LIBELF if you do not want libelf dependency (e.g. cross-builds)
+#
+# Define NO_LIBUNWIND if you do not want libunwind dependency for dwarf
+# backtrace post unwind.
$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
@$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
@@ -50,16 +57,19 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
-e s/s390x/s390/ -e s/parisc64/parisc/ \
-e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
-e s/sh[234].*/sh/ )
+NO_PERF_REGS := 1
CC = $(CROSS_COMPILE)gcc
AR = $(CROSS_COMPILE)ar
# Additional ARCH settings for x86
ifeq ($(ARCH),i386)
- ARCH := x86
+ override ARCH := x86
+ NO_PERF_REGS := 0
+ LIBUNWIND_LIBS = -lunwind -lunwind-x86
endif
ifeq ($(ARCH),x86_64)
- ARCH := x86
+ override ARCH := x86
IS_X86_64 := 0
ifeq (, $(findstring m32,$(EXTRA_CFLAGS)))
IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1)
@@ -69,6 +79,8 @@ ifeq ($(ARCH),x86_64)
ARCH_CFLAGS := -DARCH_X86_64
ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
endif
+ NO_PERF_REGS := 0
+ LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
endif
# Treat warnings as errors unless directed not to
@@ -89,7 +101,7 @@ ifdef PARSER_DEBUG
PARSER_DEBUG_CFLAGS := -DPARSER_DEBUG
endif
-CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS)
+CFLAGS = -fno-omit-frame-pointer -ggdb3 -funwind-tables -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS)
EXTLIBS = -lpthread -lrt -lelf -lm
ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
ALL_LDFLAGS = $(LDFLAGS)
@@ -186,10 +198,10 @@ SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH))
TRACE_EVENT_DIR = ../lib/traceevent/
-ifeq ("$(origin O)", "command line")
- TE_PATH=$(OUTPUT)/
+ifneq ($(OUTPUT),)
+ TE_PATH=$(OUTPUT)
else
- TE_PATH=$(TRACE_EVENT_DIR)/
+ TE_PATH=$(TRACE_EVENT_DIR)
endif
LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
@@ -221,13 +233,13 @@ export PERL_PATH
FLEX = flex
BISON= bison
-$(OUTPUT)util/parse-events-flex.c: util/parse-events.l
+$(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c
$(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
$(OUTPUT)util/parse-events-bison.c: util/parse-events.y
$(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c
-$(OUTPUT)util/pmu-flex.c: util/pmu.l
+$(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c
$(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c
$(OUTPUT)util/pmu-bison.c: util/pmu.y
@@ -252,6 +264,7 @@ LIB_H += util/include/linux/ctype.h
LIB_H += util/include/linux/kernel.h
LIB_H += util/include/linux/list.h
LIB_H += util/include/linux/export.h
+LIB_H += util/include/linux/magic.h
LIB_H += util/include/linux/poison.h
LIB_H += util/include/linux/prefetch.h
LIB_H += util/include/linux/rbtree.h
@@ -321,6 +334,10 @@ LIB_H += $(TRACE_EVENT_DIR)event-parse.h
LIB_H += util/target.h
LIB_H += util/rblist.h
LIB_H += util/intlist.h
+LIB_H += util/perf_regs.h
+LIB_H += util/unwind.h
+LIB_H += ui/helpline.h
+LIB_H += util/vdso.h
LIB_OBJS += $(OUTPUT)util/abspath.o
LIB_OBJS += $(OUTPUT)util/alias.o
@@ -356,6 +373,7 @@ LIB_OBJS += $(OUTPUT)util/usage.o
LIB_OBJS += $(OUTPUT)util/wrapper.o
LIB_OBJS += $(OUTPUT)util/sigchain.o
LIB_OBJS += $(OUTPUT)util/symbol.o
+LIB_OBJS += $(OUTPUT)util/symbol-elf.o
LIB_OBJS += $(OUTPUT)util/dso-test-data.o
LIB_OBJS += $(OUTPUT)util/color.o
LIB_OBJS += $(OUTPUT)util/pager.o
@@ -387,11 +405,15 @@ LIB_OBJS += $(OUTPUT)util/cgroup.o
LIB_OBJS += $(OUTPUT)util/target.o
LIB_OBJS += $(OUTPUT)util/rblist.o
LIB_OBJS += $(OUTPUT)util/intlist.o
+LIB_OBJS += $(OUTPUT)util/vdso.o
+LIB_OBJS += $(OUTPUT)util/stat.o
-BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
+LIB_OBJS += $(OUTPUT)ui/helpline.o
+LIB_OBJS += $(OUTPUT)ui/hist.o
+LIB_OBJS += $(OUTPUT)ui/stdio/hist.o
+BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
-
# Benchmark modules
BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o
BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o
@@ -449,34 +471,73 @@ PYRF_OBJS += $(OUTPUT)util/xyarray.o
-include config.mak.autogen
-include config.mak
-ifndef NO_DWARF
-FLAGS_DWARF=$(ALL_CFLAGS) -ldw -lelf $(ALL_LDFLAGS) $(EXTLIBS)
-ifneq ($(call try-cc,$(SOURCE_DWARF),$(FLAGS_DWARF)),y)
- msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev);
+ifdef NO_LIBELF
NO_DWARF := 1
-endif # Dwarf support
-endif # NO_DWARF
-
--include arch/$(ARCH)/Makefile
-
-ifneq ($(OUTPUT),)
- BASIC_CFLAGS += -I$(OUTPUT)
-endif
-
+ NO_DEMANGLE := 1
+ NO_LIBUNWIND := 1
+else
FLAGS_LIBELF=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS)
ifneq ($(call try-cc,$(SOURCE_LIBELF),$(FLAGS_LIBELF)),y)
FLAGS_GLIBC=$(ALL_CFLAGS) $(ALL_LDFLAGS)
ifneq ($(call try-cc,$(SOURCE_GLIBC),$(FLAGS_GLIBC)),y)
msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
else
- msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel);
+ NO_LIBELF := 1
+ NO_DWARF := 1
+ NO_DEMANGLE := 1
endif
endif
+endif # NO_LIBELF
+
+ifndef NO_LIBUNWIND
+# for linking with debug library, run like:
+# make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/
+ifdef LIBUNWIND_DIR
+ LIBUNWIND_CFLAGS := -I$(LIBUNWIND_DIR)/include
+ LIBUNWIND_LDFLAGS := -L$(LIBUNWIND_DIR)/lib
+endif
+
+FLAGS_UNWIND=$(LIBUNWIND_CFLAGS) $(ALL_CFLAGS) $(LIBUNWIND_LDFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(LIBUNWIND_LIBS)
+ifneq ($(call try-cc,$(SOURCE_LIBUNWIND),$(FLAGS_UNWIND)),y)
+ msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 0.99);
+ NO_LIBUNWIND := 1
+endif # Libunwind support
+endif # NO_LIBUNWIND
+
+-include arch/$(ARCH)/Makefile
+
+ifneq ($(OUTPUT),)
+ BASIC_CFLAGS += -I$(OUTPUT)
+endif
+
+ifdef NO_LIBELF
+BASIC_CFLAGS += -DNO_LIBELF_SUPPORT
+
+EXTLIBS := $(filter-out -lelf,$(EXTLIBS))
+
+# Remove ELF/DWARF dependent codes
+LIB_OBJS := $(filter-out $(OUTPUT)util/symbol-elf.o,$(LIB_OBJS))
+LIB_OBJS := $(filter-out $(OUTPUT)util/dwarf-aux.o,$(LIB_OBJS))
+LIB_OBJS := $(filter-out $(OUTPUT)util/probe-event.o,$(LIB_OBJS))
+LIB_OBJS := $(filter-out $(OUTPUT)util/probe-finder.o,$(LIB_OBJS))
+
+BUILTIN_OBJS := $(filter-out $(OUTPUT)builtin-probe.o,$(BUILTIN_OBJS))
+
+# Use minimal symbol handling
+LIB_OBJS += $(OUTPUT)util/symbol-minimal.o
+
+else # NO_LIBELF
ifneq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_COMMON)),y)
BASIC_CFLAGS += -DLIBELF_NO_MMAP
endif
+FLAGS_DWARF=$(ALL_CFLAGS) -ldw -lelf $(ALL_LDFLAGS) $(EXTLIBS)
+ifneq ($(call try-cc,$(SOURCE_DWARF),$(FLAGS_DWARF)),y)
+ msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev);
+ NO_DWARF := 1
+endif # Dwarf support
+
ifndef NO_DWARF
ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled);
@@ -487,6 +548,29 @@ else
LIB_OBJS += $(OUTPUT)util/dwarf-aux.o
endif # PERF_HAVE_DWARF_REGS
endif # NO_DWARF
+endif # NO_LIBELF
+
+ifdef NO_LIBUNWIND
+ BASIC_CFLAGS += -DNO_LIBUNWIND_SUPPORT
+else
+ EXTLIBS += $(LIBUNWIND_LIBS)
+ BASIC_CFLAGS := $(LIBUNWIND_CFLAGS) $(BASIC_CFLAGS)
+ BASIC_LDFLAGS := $(LIBUNWIND_LDFLAGS) $(BASIC_LDFLAGS)
+ LIB_OBJS += $(OUTPUT)util/unwind.o
+endif
+
+ifdef NO_LIBAUDIT
+ BASIC_CFLAGS += -DNO_LIBAUDIT_SUPPORT
+else
+ FLAGS_LIBAUDIT = $(ALL_CFLAGS) $(ALL_LDFLAGS) -laudit
+ ifneq ($(call try-cc,$(SOURCE_LIBAUDIT),$(FLAGS_LIBAUDIT)),y)
+ msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev);
+ BASIC_CFLAGS += -DNO_LIBAUDIT_SUPPORT
+ else
+ BUILTIN_OBJS += $(OUTPUT)builtin-trace.o
+ EXTLIBS += -laudit
+ endif
+endif
ifdef NO_NEWT
BASIC_CFLAGS += -DNO_NEWT_SUPPORT
@@ -504,14 +588,13 @@ else
LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o
LIB_OBJS += $(OUTPUT)ui/browsers/hists.o
LIB_OBJS += $(OUTPUT)ui/browsers/map.o
- LIB_OBJS += $(OUTPUT)ui/helpline.o
LIB_OBJS += $(OUTPUT)ui/progress.o
LIB_OBJS += $(OUTPUT)ui/util.o
LIB_OBJS += $(OUTPUT)ui/tui/setup.o
LIB_OBJS += $(OUTPUT)ui/tui/util.o
+ LIB_OBJS += $(OUTPUT)ui/tui/helpline.o
LIB_H += ui/browser.h
LIB_H += ui/browsers/map.h
- LIB_H += ui/helpline.h
LIB_H += ui/keysyms.h
LIB_H += ui/libslang.h
LIB_H += ui/progress.h
@@ -523,7 +606,7 @@ endif
ifdef NO_GTK2
BASIC_CFLAGS += -DNO_GTK2_SUPPORT
else
- FLAGS_GTK2=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0)
+ FLAGS_GTK2=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0 2>/dev/null)
ifneq ($(call try-cc,$(SOURCE_GTK2),$(FLAGS_GTK2)),y)
msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev);
BASIC_CFLAGS += -DNO_GTK2_SUPPORT
@@ -531,11 +614,12 @@ else
ifeq ($(call try-cc,$(SOURCE_GTK2_INFOBAR),$(FLAGS_GTK2)),y)
BASIC_CFLAGS += -DHAVE_GTK_INFO_BAR
endif
- BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0)
- EXTLIBS += $(shell pkg-config --libs gtk+-2.0)
+ BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0 2>/dev/null)
+ EXTLIBS += $(shell pkg-config --libs gtk+-2.0 2>/dev/null)
LIB_OBJS += $(OUTPUT)ui/gtk/browser.o
LIB_OBJS += $(OUTPUT)ui/gtk/setup.o
LIB_OBJS += $(OUTPUT)ui/gtk/util.o
+ LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o
# Make sure that it'd be included only once.
ifneq ($(findstring -DNO_NEWT_SUPPORT,$(BASIC_CFLAGS)),)
LIB_OBJS += $(OUTPUT)ui/setup.o
@@ -644,7 +728,7 @@ else
EXTLIBS += -liberty
BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
else
- FLAGS_BFD=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd
+ FLAGS_BFD=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -DPACKAGE='perf' -lbfd
has_bfd := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD))
ifeq ($(has_bfd),y)
EXTLIBS += -lbfd
@@ -674,6 +758,13 @@ else
endif
endif
+ifeq ($(NO_PERF_REGS),0)
+ ifeq ($(ARCH),x86)
+ LIB_H += arch/x86/include/perf_regs.h
+ endif
+else
+ BASIC_CFLAGS += -DNO_PERF_REGS
+endif
ifdef NO_STRLCPY
BASIC_CFLAGS += -DNO_STRLCPY
@@ -683,6 +774,14 @@ else
endif
endif
+ifdef NO_BACKTRACE
+ BASIC_CFLAGS += -DNO_BACKTRACE
+else
+ ifneq ($(call try-cc,$(SOURCE_BACKTRACE),),y)
+ BASIC_CFLAGS += -DNO_BACKTRACE
+ endif
+endif
+
ifdef ASCIIDOC8
export ASCIIDOC8
endif
@@ -700,6 +799,7 @@ perfexecdir_SQ = $(subst ','\'',$(perfexecdir))
template_dir_SQ = $(subst ','\'',$(template_dir))
htmldir_SQ = $(subst ','\'',$(htmldir))
prefix_SQ = $(subst ','\'',$(prefix))
+sysconfdir_SQ = $(subst ','\'',$(sysconfdir))
SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
@@ -767,10 +867,10 @@ $(OUTPUT)perf.o perf.spec \
# over the general rule for .o
$(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -w $<
+ $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(ALL_CFLAGS) -w $<
$(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -w $<
+ $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w $<
$(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
@@ -842,7 +942,10 @@ $(LIB_FILE): $(LIB_OBJS)
# libtraceevent.a
$(LIBTRACEEVENT):
- $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) $(COMMAND_O) libtraceevent.a
+ $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) libtraceevent.a
+
+$(LIBTRACEEVENT)-clean:
+ $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) clean
help:
@echo 'Perf make targets:'
@@ -951,6 +1054,8 @@ install: all
$(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'
$(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'
$(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d'
+ $(INSTALL) bash_completion '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
install-python_ext:
$(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
@@ -981,7 +1086,7 @@ quick-install-html:
### Cleaning rules
-clean:
+clean: $(LIBTRACEEVENT)-clean
$(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS)
$(RM) $(ALL_PROGRAMS) perf
$(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile
index 744e629797be..815841c04eb2 100644
--- a/tools/perf/arch/x86/Makefile
+++ b/tools/perf/arch/x86/Makefile
@@ -2,4 +2,7 @@ ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
endif
+ifndef NO_LIBUNWIND
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind.o
+endif
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o
diff --git a/tools/perf/arch/x86/include/perf_regs.h b/tools/perf/arch/x86/include/perf_regs.h
new file mode 100644
index 000000000000..46fc9f15c6b3
--- /dev/null
+++ b/tools/perf/arch/x86/include/perf_regs.h
@@ -0,0 +1,80 @@
+#ifndef ARCH_PERF_REGS_H
+#define ARCH_PERF_REGS_H
+
+#include <stdlib.h>
+#include "../../util/types.h"
+#include "../../../../../arch/x86/include/asm/perf_regs.h"
+
+#ifndef ARCH_X86_64
+#define PERF_REGS_MASK ((1ULL << PERF_REG_X86_32_MAX) - 1)
+#else
+#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
+ (1ULL << PERF_REG_X86_ES) | \
+ (1ULL << PERF_REG_X86_FS) | \
+ (1ULL << PERF_REG_X86_GS))
+#define PERF_REGS_MASK (((1ULL << PERF_REG_X86_64_MAX) - 1) & ~REG_NOSUPPORT)
+#endif
+#define PERF_REG_IP PERF_REG_X86_IP
+#define PERF_REG_SP PERF_REG_X86_SP
+
+static inline const char *perf_reg_name(int id)
+{
+ switch (id) {
+ case PERF_REG_X86_AX:
+ return "AX";
+ case PERF_REG_X86_BX:
+ return "BX";
+ case PERF_REG_X86_CX:
+ return "CX";
+ case PERF_REG_X86_DX:
+ return "DX";
+ case PERF_REG_X86_SI:
+ return "SI";
+ case PERF_REG_X86_DI:
+ return "DI";
+ case PERF_REG_X86_BP:
+ return "BP";
+ case PERF_REG_X86_SP:
+ return "SP";
+ case PERF_REG_X86_IP:
+ return "IP";
+ case PERF_REG_X86_FLAGS:
+ return "FLAGS";
+ case PERF_REG_X86_CS:
+ return "CS";
+ case PERF_REG_X86_SS:
+ return "SS";
+ case PERF_REG_X86_DS:
+ return "DS";
+ case PERF_REG_X86_ES:
+ return "ES";
+ case PERF_REG_X86_FS:
+ return "FS";
+ case PERF_REG_X86_GS:
+ return "GS";
+#ifdef ARCH_X86_64
+ case PERF_REG_X86_R8:
+ return "R8";
+ case PERF_REG_X86_R9:
+ return "R9";
+ case PERF_REG_X86_R10:
+ return "R10";
+ case PERF_REG_X86_R11:
+ return "R11";
+ case PERF_REG_X86_R12:
+ return "R12";
+ case PERF_REG_X86_R13:
+ return "R13";
+ case PERF_REG_X86_R14:
+ return "R14";
+ case PERF_REG_X86_R15:
+ return "R15";
+#endif /* ARCH_X86_64 */
+ default:
+ return NULL;
+ }
+
+ return NULL;
+}
+
+#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/x86/util/unwind.c b/tools/perf/arch/x86/util/unwind.c
new file mode 100644
index 000000000000..78d956eff96f
--- /dev/null
+++ b/tools/perf/arch/x86/util/unwind.c
@@ -0,0 +1,111 @@
+
+#include <errno.h>
+#include <libunwind.h>
+#include "perf_regs.h"
+#include "../../util/unwind.h"
+
+#ifdef ARCH_X86_64
+int unwind__arch_reg_id(int regnum)
+{
+ int id;
+
+ switch (regnum) {
+ case UNW_X86_64_RAX:
+ id = PERF_REG_X86_AX;
+ break;
+ case UNW_X86_64_RDX:
+ id = PERF_REG_X86_DX;
+ break;
+ case UNW_X86_64_RCX:
+ id = PERF_REG_X86_CX;
+ break;
+ case UNW_X86_64_RBX:
+ id = PERF_REG_X86_BX;
+ break;
+ case UNW_X86_64_RSI:
+ id = PERF_REG_X86_SI;
+ break;
+ case UNW_X86_64_RDI:
+ id = PERF_REG_X86_DI;
+ break;
+ case UNW_X86_64_RBP:
+ id = PERF_REG_X86_BP;
+ break;
+ case UNW_X86_64_RSP:
+ id = PERF_REG_X86_SP;
+ break;
+ case UNW_X86_64_R8:
+ id = PERF_REG_X86_R8;
+ break;
+ case UNW_X86_64_R9:
+ id = PERF_REG_X86_R9;
+ break;
+ case UNW_X86_64_R10:
+ id = PERF_REG_X86_R10;
+ break;
+ case UNW_X86_64_R11:
+ id = PERF_REG_X86_R11;
+ break;
+ case UNW_X86_64_R12:
+ id = PERF_REG_X86_R12;
+ break;
+ case UNW_X86_64_R13:
+ id = PERF_REG_X86_R13;
+ break;
+ case UNW_X86_64_R14:
+ id = PERF_REG_X86_R14;
+ break;
+ case UNW_X86_64_R15:
+ id = PERF_REG_X86_R15;
+ break;
+ case UNW_X86_64_RIP:
+ id = PERF_REG_X86_IP;
+ break;
+ default:
+ pr_err("unwind: invalid reg id %d\n", regnum);
+ return -EINVAL;
+ }
+
+ return id;
+}
+#else
+int unwind__arch_reg_id(int regnum)
+{
+ int id;
+
+ switch (regnum) {
+ case UNW_X86_EAX:
+ id = PERF_REG_X86_AX;
+ break;
+ case UNW_X86_EDX:
+ id = PERF_REG_X86_DX;
+ break;
+ case UNW_X86_ECX:
+ id = PERF_REG_X86_CX;
+ break;
+ case UNW_X86_EBX:
+ id = PERF_REG_X86_BX;
+ break;
+ case UNW_X86_ESI:
+ id = PERF_REG_X86_SI;
+ break;
+ case UNW_X86_EDI:
+ id = PERF_REG_X86_DI;
+ break;
+ case UNW_X86_EBP:
+ id = PERF_REG_X86_BP;
+ break;
+ case UNW_X86_ESP:
+ id = PERF_REG_X86_SP;
+ break;
+ case UNW_X86_EIP:
+ id = PERF_REG_X86_IP;
+ break;
+ default:
+ pr_err("unwind: invalid reg id %d\n", regnum);
+ return -EINVAL;
+ }
+
+ return id;
+}
+#endif /* ARCH_X86_64 */
diff --git a/tools/perf/bash_completion b/tools/perf/bash_completion
new file mode 100644
index 000000000000..1958fa539d0f
--- /dev/null
+++ b/tools/perf/bash_completion
@@ -0,0 +1,26 @@
+# perf completion
+
+have perf &&
+_perf()
+{
+ local cur cmd
+
+ COMPREPLY=()
+ _get_comp_words_by_ref cur prev
+
+ cmd=${COMP_WORDS[0]}
+
+ # List perf subcommands
+ if [ $COMP_CWORD -eq 1 ]; then
+ cmds=$($cmd --list-cmds)
+ COMPREPLY=( $( compgen -W '$cmds' -- "$cur" ) )
+ # List possible events for -e option
+ elif [[ $prev == "-e" && "${COMP_WORDS[1]}" == @(record|stat|top) ]]; then
+ cmds=$($cmd list --raw-dump)
+ COMPREPLY=( $( compgen -W '$cmds' -- "$cur" ) )
+ # Fall down to list regular files
+ else
+ _filedir
+ fi
+} &&
+complete -F _perf perf
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
index a09bece6dad2..8f89998eeaf4 100644
--- a/tools/perf/bench/bench.h
+++ b/tools/perf/bench/bench.h
@@ -3,7 +3,8 @@
extern int bench_sched_messaging(int argc, const char **argv, const char *prefix);
extern int bench_sched_pipe(int argc, const char **argv, const char *prefix);
-extern int bench_mem_memcpy(int argc, const char **argv, const char *prefix __used);
+extern int bench_mem_memcpy(int argc, const char **argv,
+ const char *prefix __maybe_unused);
extern int bench_mem_memset(int argc, const char **argv, const char *prefix);
#define BENCH_FORMAT_DEFAULT_STR "default"
diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c
index 02dad5d3359b..93c83e3cb4a7 100644
--- a/tools/perf/bench/mem-memcpy.c
+++ b/tools/perf/bench/mem-memcpy.c
@@ -177,7 +177,7 @@ static double do_memcpy_gettimeofday(memcpy_t fn, size_t len, bool prefault)
} while (0)
int bench_mem_memcpy(int argc, const char **argv,
- const char *prefix __used)
+ const char *prefix __maybe_unused)
{
int i;
size_t len;
diff --git a/tools/perf/bench/mem-memset.c b/tools/perf/bench/mem-memset.c
index 350cc9557265..c6e4bc523492 100644
--- a/tools/perf/bench/mem-memset.c
+++ b/tools/perf/bench/mem-memset.c
@@ -171,7 +171,7 @@ static double do_memset_gettimeofday(memset_t fn, size_t len, bool prefault)
} while (0)
int bench_mem_memset(int argc, const char **argv,
- const char *prefix __used)
+ const char *prefix __maybe_unused)
{
int i;
size_t len;
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index d1d1b30f99c1..cc1190a0849b 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -267,7 +267,7 @@ static const char * const bench_sched_message_usage[] = {
};
int bench_sched_messaging(int argc, const char **argv,
- const char *prefix __used)
+ const char *prefix __maybe_unused)
{
unsigned int i, total_children;
struct timeval start, stop, diff;
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c
index 0c7454f8b8a9..69cfba8d4c6c 100644
--- a/tools/perf/bench/sched-pipe.c
+++ b/tools/perf/bench/sched-pipe.c
@@ -43,7 +43,7 @@ static const char * const bench_sched_pipe_usage[] = {
};
int bench_sched_pipe(int argc, const char **argv,
- const char *prefix __used)
+ const char *prefix __maybe_unused)
{
int pipe_1[2], pipe_2[2];
int m = 0, i;
@@ -55,14 +55,14 @@ int bench_sched_pipe(int argc, const char **argv,
* discarding returned value of read(), write()
* causes error in building environment for perf
*/
- int __used ret, wait_stat;
- pid_t pid, retpid;
+ int __maybe_unused ret, wait_stat;
+ pid_t pid, retpid __maybe_unused;
argc = parse_options(argc, argv, options,
bench_sched_pipe_usage, 0);
- assert(!pipe(pipe_1));
- assert(!pipe(pipe_2));
+ BUG_ON(pipe(pipe_1));
+ BUG_ON(pipe(pipe_2));
pid = fork();
assert(pid >= 0);
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 67522cf87405..9ea38540b873 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -239,7 +239,7 @@ static const char * const annotate_usage[] = {
NULL
};
-int cmd_annotate(int argc, const char **argv, const char *prefix __used)
+int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
{
struct perf_annotate annotate = {
.tool = {
@@ -282,6 +282,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
"Display raw encoding of assembly instructions (default)"),
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
+ OPT_STRING(0, "objdump", &objdump_path, "path",
+ "objdump binary to use for disassembly and annotations"),
OPT_END()
};
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index 1f3100216448..cae9a5fd2ecf 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -173,7 +173,7 @@ static void all_subsystem(void)
all_suite(&subsystems[i]);
}
-int cmd_bench(int argc, const char **argv, const char *prefix __used)
+int cmd_bench(int argc, const char **argv, const char *prefix __maybe_unused)
{
int i, j, status = 0;
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 29ad20e67919..83654557e108 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -43,15 +43,16 @@ static int build_id_cache__add_file(const char *filename, const char *debugdir)
}
build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
- err = build_id_cache__add_s(sbuild_id, debugdir, filename, false);
+ err = build_id_cache__add_s(sbuild_id, debugdir, filename,
+ false, false);
if (verbose)
pr_info("Adding %s %s: %s\n", sbuild_id, filename,
err ? "FAIL" : "Ok");
return err;
}
-static int build_id_cache__remove_file(const char *filename __used,
- const char *debugdir __used)
+static int build_id_cache__remove_file(const char *filename __maybe_unused,
+ const char *debugdir __maybe_unused)
{
u8 build_id[BUILD_ID_SIZE];
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
@@ -119,7 +120,8 @@ static int __cmd_buildid_cache(void)
return 0;
}
-int cmd_buildid_cache(int argc, const char **argv, const char *prefix __used)
+int cmd_buildid_cache(int argc, const char **argv,
+ const char *prefix __maybe_unused)
{
argc = parse_options(argc, argv, buildid_cache_options,
buildid_cache_usage, 0);
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index 6b2bcfbde150..1159feeebb19 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -16,8 +16,6 @@
#include "util/session.h"
#include "util/symbol.h"
-#include <libelf.h>
-
static const char *input_name;
static bool force;
static bool show_kernel;
@@ -71,7 +69,7 @@ static int perf_session__list_build_ids(void)
{
struct perf_session *session;
- elf_version(EV_CURRENT);
+ symbol__elf_init();
session = perf_session__new(input_name, O_RDONLY, force, false,
&build_id__mark_dso_hit_ops);
@@ -105,7 +103,8 @@ static int __cmd_buildid_list(void)
return perf_session__list_build_ids();
}
-int cmd_buildid_list(int argc, const char **argv, const char *prefix __used)
+int cmd_buildid_list(int argc, const char **argv,
+ const char *prefix __maybe_unused)
{
argc = parse_options(argc, argv, options, buildid_list_usage, 0);
setup_pager();
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index d29d350fb2b7..761f4197a9e2 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -10,6 +10,7 @@
#include "util/event.h"
#include "util/hist.h"
#include "util/evsel.h"
+#include "util/evlist.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/sort.h"
@@ -24,11 +25,6 @@ static char diff__default_sort_order[] = "dso,symbol";
static bool force;
static bool show_displacement;
-struct perf_diff {
- struct perf_tool tool;
- struct perf_session *session;
-};
-
static int hists__add_entry(struct hists *self,
struct addr_location *al, u64 period)
{
@@ -37,14 +33,12 @@ static int hists__add_entry(struct hists *self,
return -ENOMEM;
}
-static int diff__process_sample_event(struct perf_tool *tool,
+static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
- struct perf_evsel *evsel __used,
+ struct perf_evsel *evsel,
struct machine *machine)
{
- struct perf_diff *_diff = container_of(tool, struct perf_diff, tool);
- struct perf_session *session = _diff->session;
struct addr_location al;
if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) {
@@ -56,26 +50,24 @@ static int diff__process_sample_event(struct perf_tool *tool,
if (al.filtered || al.sym == NULL)
return 0;
- if (hists__add_entry(&session->hists, &al, sample->period)) {
+ if (hists__add_entry(&evsel->hists, &al, sample->period)) {
pr_warning("problem incrementing symbol period, skipping event\n");
return -1;
}
- session->hists.stats.total_period += sample->period;
+ evsel->hists.stats.total_period += sample->period;
return 0;
}
-static struct perf_diff diff = {
- .tool = {
- .sample = diff__process_sample_event,
- .mmap = perf_event__process_mmap,
- .comm = perf_event__process_comm,
- .exit = perf_event__process_task,
- .fork = perf_event__process_task,
- .lost = perf_event__process_lost,
- .ordered_samples = true,
- .ordering_requires_timestamps = true,
- },
+static struct perf_tool tool = {
+ .sample = diff__process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .comm = perf_event__process_comm,
+ .exit = perf_event__process_task,
+ .fork = perf_event__process_task,
+ .lost = perf_event__process_lost,
+ .ordered_samples = true,
+ .ordering_requires_timestamps = true,
};
static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
@@ -146,34 +138,71 @@ static void hists__match(struct hists *older, struct hists *newer)
}
}
+static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
+ struct perf_evlist *evlist)
+{
+ struct perf_evsel *e;
+
+ list_for_each_entry(e, &evlist->entries, node)
+ if (perf_evsel__match2(evsel, e))
+ return e;
+
+ return NULL;
+}
+
static int __cmd_diff(void)
{
int ret, i;
#define older (session[0])
#define newer (session[1])
struct perf_session *session[2];
+ struct perf_evlist *evlist_new, *evlist_old;
+ struct perf_evsel *evsel;
+ bool first = true;
older = perf_session__new(input_old, O_RDONLY, force, false,
- &diff.tool);
+ &tool);
newer = perf_session__new(input_new, O_RDONLY, force, false,
- &diff.tool);
+ &tool);
if (session[0] == NULL || session[1] == NULL)
return -ENOMEM;
for (i = 0; i < 2; ++i) {
- diff.session = session[i];
- ret = perf_session__process_events(session[i], &diff.tool);
+ ret = perf_session__process_events(session[i], &tool);
if (ret)
goto out_delete;
- hists__output_resort(&session[i]->hists);
}
- if (show_displacement)
- hists__resort_entries(&older->hists);
+ evlist_old = older->evlist;
+ evlist_new = newer->evlist;
+
+ list_for_each_entry(evsel, &evlist_new->entries, node)
+ hists__output_resort(&evsel->hists);
+
+ list_for_each_entry(evsel, &evlist_old->entries, node) {
+ hists__output_resort(&evsel->hists);
+
+ if (show_displacement)
+ hists__resort_entries(&evsel->hists);
+ }
+
+ list_for_each_entry(evsel, &evlist_new->entries, node) {
+ struct perf_evsel *evsel_old;
+
+ evsel_old = evsel_match(evsel, evlist_old);
+ if (!evsel_old)
+ continue;
+
+ fprintf(stdout, "%s# Event '%s'\n#\n", first ? "" : "\n",
+ perf_evsel__name(evsel));
+
+ first = false;
+
+ hists__match(&evsel_old->hists, &evsel->hists);
+ hists__fprintf(&evsel->hists, &evsel_old->hists,
+ show_displacement, true, 0, 0, stdout);
+ }
- hists__match(&older->hists, &newer->hists);
- hists__fprintf(&newer->hists, &older->hists,
- show_displacement, true, 0, 0, stdout);
out_delete:
for (i = 0; i < 2; ++i)
perf_session__delete(session[i]);
@@ -213,7 +242,7 @@ static const struct option options[] = {
OPT_END()
};
-int cmd_diff(int argc, const char **argv, const char *prefix __used)
+int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
{
sort_order = diff__default_sort_order;
argc = parse_options(argc, argv, options, diff_usage, 0);
@@ -235,6 +264,7 @@ int cmd_diff(int argc, const char **argv, const char *prefix __used)
if (symbol__init() < 0)
return -1;
+ perf_hpp__init(true, show_displacement);
setup_sorting(diff_usage, options);
setup_pager();
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index 0dd5a058f766..1fb164164fd0 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -113,7 +113,7 @@ static const char * const evlist_usage[] = {
NULL
};
-int cmd_evlist(int argc, const char **argv, const char *prefix __used)
+int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused)
{
struct perf_attr_details details = { .verbose = false, };
const char *input_name = NULL;
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 6d5a8a7faf48..25c8b942ff85 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -24,13 +24,14 @@ static struct man_viewer_info_list {
} *man_viewer_info_list;
enum help_format {
+ HELP_FORMAT_NONE,
HELP_FORMAT_MAN,
HELP_FORMAT_INFO,
HELP_FORMAT_WEB,
};
static bool show_all = false;
-static enum help_format help_format = HELP_FORMAT_MAN;
+static enum help_format help_format = HELP_FORMAT_NONE;
static struct option builtin_help_options[] = {
OPT_BOOLEAN('a', "all", &show_all, "print all available commands"),
OPT_SET_UINT('m', "man", &help_format, "show man page", HELP_FORMAT_MAN),
@@ -54,7 +55,9 @@ static enum help_format parse_help_format(const char *format)
return HELP_FORMAT_INFO;
if (!strcmp(format, "web") || !strcmp(format, "html"))
return HELP_FORMAT_WEB;
- die("unrecognized help format '%s'", format);
+
+ pr_err("unrecognized help format '%s'", format);
+ return HELP_FORMAT_NONE;
}
static const char *get_man_viewer_info(const char *name)
@@ -259,6 +262,8 @@ static int perf_help_config(const char *var, const char *value, void *cb)
if (!value)
return config_error_nonbool(var);
help_format = parse_help_format(value);
+ if (help_format == HELP_FORMAT_NONE)
+ return -1;
return 0;
}
if (!strcmp(var, "man.viewer")) {
@@ -352,7 +357,7 @@ static void exec_viewer(const char *name, const char *page)
warning("'%s': unknown man viewer.", name);
}
-static void show_man_page(const char *perf_cmd)
+static int show_man_page(const char *perf_cmd)
{
struct man_viewer_list *viewer;
const char *page = cmd_to_page(perf_cmd);
@@ -365,28 +370,35 @@ static void show_man_page(const char *perf_cmd)
if (fallback)
exec_viewer(fallback, page);
exec_viewer("man", page);
- die("no man viewer handled the request");
+
+ pr_err("no man viewer handled the request");
+ return -1;
}
-static void show_info_page(const char *perf_cmd)
+static int show_info_page(const char *perf_cmd)
{
const char *page = cmd_to_page(perf_cmd);
setenv("INFOPATH", system_path(PERF_INFO_PATH), 1);
execlp("info", "info", "perfman", page, NULL);
+ return -1;
}
-static void get_html_page_path(struct strbuf *page_path, const char *page)
+static int get_html_page_path(struct strbuf *page_path, const char *page)
{
struct stat st;
const char *html_path = system_path(PERF_HTML_PATH);
/* Check that we have a perf documentation directory. */
if (stat(mkpath("%s/perf.html", html_path), &st)
- || !S_ISREG(st.st_mode))
- die("'%s': not a documentation directory.", html_path);
+ || !S_ISREG(st.st_mode)) {
+ pr_err("'%s': not a documentation directory.", html_path);
+ return -1;
+ }
strbuf_init(page_path, 0);
strbuf_addf(page_path, "%s/%s.html", html_path, page);
+
+ return 0;
}
/*
@@ -401,19 +413,23 @@ static void open_html(const char *path)
}
#endif
-static void show_html_page(const char *perf_cmd)
+static int show_html_page(const char *perf_cmd)
{
const char *page = cmd_to_page(perf_cmd);
struct strbuf page_path; /* it leaks but we exec bellow */
- get_html_page_path(&page_path, page);
+ if (get_html_page_path(&page_path, page) != 0)
+ return -1;
open_html(page_path.buf);
+
+ return 0;
}
-int cmd_help(int argc, const char **argv, const char *prefix __used)
+int cmd_help(int argc, const char **argv, const char *prefix __maybe_unused)
{
const char *alias;
+ int rc = 0;
load_command_list("perf-", &main_cmds, &other_cmds);
@@ -444,16 +460,20 @@ int cmd_help(int argc, const char **argv, const char *prefix __used)
switch (help_format) {
case HELP_FORMAT_MAN:
- show_man_page(argv[0]);
+ rc = show_man_page(argv[0]);
break;
case HELP_FORMAT_INFO:
- show_info_page(argv[0]);
+ rc = show_info_page(argv[0]);
break;
case HELP_FORMAT_WEB:
- show_html_page(argv[0]);
+ rc = show_html_page(argv[0]);
+ break;
+ case HELP_FORMAT_NONE:
+ /* fall-through */
default:
+ rc = -1;
break;
}
- return 0;
+ return rc;
}
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 3beab489afc5..1eaa6617c814 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -17,9 +17,9 @@
static char const *input_name = "-";
static bool inject_build_ids;
-static int perf_event__repipe_synth(struct perf_tool *tool __used,
+static int perf_event__repipe_synth(struct perf_tool *tool __maybe_unused,
union perf_event *event,
- struct machine *machine __used)
+ struct machine *machine __maybe_unused)
{
uint32_t size;
void *buf = event;
@@ -40,7 +40,8 @@ static int perf_event__repipe_synth(struct perf_tool *tool __used,
static int perf_event__repipe_op2_synth(struct perf_tool *tool,
union perf_event *event,
- struct perf_session *session __used)
+ struct perf_session *session
+ __maybe_unused)
{
return perf_event__repipe_synth(tool, event, NULL);
}
@@ -52,13 +53,14 @@ static int perf_event__repipe_event_type_synth(struct perf_tool *tool,
}
static int perf_event__repipe_tracing_data_synth(union perf_event *event,
- struct perf_session *session __used)
+ struct perf_session *session
+ __maybe_unused)
{
return perf_event__repipe_synth(NULL, event, NULL);
}
static int perf_event__repipe_attr(union perf_event *event,
- struct perf_evlist **pevlist __used)
+ struct perf_evlist **pevlist __maybe_unused)
{
int ret;
ret = perf_event__process_attr(event, pevlist);
@@ -70,7 +72,7 @@ static int perf_event__repipe_attr(union perf_event *event,
static int perf_event__repipe(struct perf_tool *tool,
union perf_event *event,
- struct perf_sample *sample __used,
+ struct perf_sample *sample __maybe_unused,
struct machine *machine)
{
return perf_event__repipe_synth(tool, event, machine);
@@ -78,8 +80,8 @@ static int perf_event__repipe(struct perf_tool *tool,
static int perf_event__repipe_sample(struct perf_tool *tool,
union perf_event *event,
- struct perf_sample *sample __used,
- struct perf_evsel *evsel __used,
+ struct perf_sample *sample __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused,
struct machine *machine)
{
return perf_event__repipe_synth(tool, event, machine);
@@ -163,7 +165,7 @@ static int dso__inject_build_id(struct dso *self, struct perf_tool *tool,
static int perf_event__inject_buildid(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
- struct perf_evsel *evsel __used,
+ struct perf_evsel *evsel __maybe_unused,
struct machine *machine)
{
struct addr_location al;
@@ -191,10 +193,13 @@ static int perf_event__inject_buildid(struct perf_tool *tool,
* If this fails, too bad, let the other side
* account this as unresolved.
*/
- } else
+ } else {
+#ifndef NO_LIBELF_SUPPORT
pr_warning("no symbols found in %s, maybe "
"install a debug package?\n",
al.map->dso->long_name);
+#endif
+ }
}
}
@@ -221,7 +226,7 @@ struct perf_tool perf_inject = {
extern volatile int session_done;
-static void sig_handler(int sig __attribute__((__unused__)))
+static void sig_handler(int sig __maybe_unused)
{
session_done = 1;
}
@@ -264,7 +269,7 @@ static const struct option options[] = {
OPT_END()
};
-int cmd_inject(int argc, const char **argv, const char *prefix __used)
+int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
{
argc = parse_options(argc, argv, options, report_usage, 0);
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index ce35015f2dc6..bc912c68f49a 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -1,6 +1,8 @@
#include "builtin.h"
#include "perf.h"
+#include "util/evlist.h"
+#include "util/evsel.h"
#include "util/util.h"
#include "util/cache.h"
#include "util/symbol.h"
@@ -57,46 +59,52 @@ static unsigned long nr_allocs, nr_cross_allocs;
#define PATH_SYS_NODE "/sys/devices/system/node"
-struct perf_kmem {
- struct perf_tool tool;
- struct perf_session *session;
-};
-
-static void init_cpunode_map(void)
+static int init_cpunode_map(void)
{
FILE *fp;
- int i;
+ int i, err = -1;
fp = fopen("/sys/devices/system/cpu/kernel_max", "r");
if (!fp) {
max_cpu_num = 4096;
- return;
+ return 0;
+ }
+
+ if (fscanf(fp, "%d", &max_cpu_num) < 1) {
+ pr_err("Failed to read 'kernel_max' from sysfs");
+ goto out_close;
}
- if (fscanf(fp, "%d", &max_cpu_num) < 1)
- die("Failed to read 'kernel_max' from sysfs");
max_cpu_num++;
cpunode_map = calloc(max_cpu_num, sizeof(int));
- if (!cpunode_map)
- die("calloc");
+ if (!cpunode_map) {
+ pr_err("%s: calloc failed\n", __func__);
+ goto out_close;
+ }
+
for (i = 0; i < max_cpu_num; i++)
cpunode_map[i] = -1;
+
+ err = 0;
+out_close:
fclose(fp);
+ return err;
}
-static void setup_cpunode_map(void)
+static int setup_cpunode_map(void)
{
struct dirent *dent1, *dent2;
DIR *dir1, *dir2;
unsigned int cpu, mem;
char buf[PATH_MAX];
- init_cpunode_map();
+ if (init_cpunode_map())
+ return -1;
dir1 = opendir(PATH_SYS_NODE);
if (!dir1)
- return;
+ return -1;
while ((dent1 = readdir(dir1)) != NULL) {
if (dent1->d_type != DT_DIR ||
@@ -116,10 +124,11 @@ static void setup_cpunode_map(void)
closedir(dir2);
}
closedir(dir1);
+ return 0;
}
-static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
- int bytes_req, int bytes_alloc, int cpu)
+static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
+ int bytes_req, int bytes_alloc, int cpu)
{
struct rb_node **node = &root_alloc_stat.rb_node;
struct rb_node *parent = NULL;
@@ -143,8 +152,10 @@ static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
data->bytes_alloc += bytes_alloc;
} else {
data = malloc(sizeof(*data));
- if (!data)
- die("malloc");
+ if (!data) {
+ pr_err("%s: malloc failed\n", __func__);
+ return -1;
+ }
data->ptr = ptr;
data->pingpong = 0;
data->hit = 1;
@@ -156,9 +167,10 @@ static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
}
data->call_site = call_site;
data->alloc_cpu = cpu;
+ return 0;
}
-static void insert_caller_stat(unsigned long call_site,
+static int insert_caller_stat(unsigned long call_site,
int bytes_req, int bytes_alloc)
{
struct rb_node **node = &root_caller_stat.rb_node;
@@ -183,8 +195,10 @@ static void insert_caller_stat(unsigned long call_site,
data->bytes_alloc += bytes_alloc;
} else {
data = malloc(sizeof(*data));
- if (!data)
- die("malloc");
+ if (!data) {
+ pr_err("%s: malloc failed\n", __func__);
+ return -1;
+ }
data->call_site = call_site;
data->pingpong = 0;
data->hit = 1;
@@ -194,39 +208,43 @@ static void insert_caller_stat(unsigned long call_site,
rb_link_node(&data->node, parent, node);
rb_insert_color(&data->node, &root_caller_stat);
}
+
+ return 0;
}
-static void process_alloc_event(void *data,
- struct event_format *event,
- int cpu,
- u64 timestamp __used,
- struct thread *thread __used,
- int node)
+static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
- unsigned long call_site;
- unsigned long ptr;
- int bytes_req;
- int bytes_alloc;
- int node1, node2;
-
- ptr = raw_field_value(event, "ptr", data);
- call_site = raw_field_value(event, "call_site", data);
- bytes_req = raw_field_value(event, "bytes_req", data);
- bytes_alloc = raw_field_value(event, "bytes_alloc", data);
+ unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
+ call_site = perf_evsel__intval(evsel, sample, "call_site");
+ int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
+ bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
- insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu);
- insert_caller_stat(call_site, bytes_req, bytes_alloc);
+ if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
+ insert_caller_stat(call_site, bytes_req, bytes_alloc))
+ return -1;
total_requested += bytes_req;
total_allocated += bytes_alloc;
- if (node) {
- node1 = cpunode_map[cpu];
- node2 = raw_field_value(event, "node", data);
+ nr_allocs++;
+ return 0;
+}
+
+static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel,
+ struct perf_sample *sample)
+{
+ int ret = perf_evsel__process_alloc_event(evsel, sample);
+
+ if (!ret) {
+ int node1 = cpunode_map[sample->cpu],
+ node2 = perf_evsel__intval(evsel, sample, "node");
+
if (node1 != node2)
nr_cross_allocs++;
}
- nr_allocs++;
+
+ return ret;
}
static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
@@ -257,66 +275,37 @@ static struct alloc_stat *search_alloc_stat(unsigned long ptr,
return NULL;
}
-static void process_free_event(void *data,
- struct event_format *event,
- int cpu,
- u64 timestamp __used,
- struct thread *thread __used)
+static int perf_evsel__process_free_event(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
- unsigned long ptr;
+ unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
struct alloc_stat *s_alloc, *s_caller;
- ptr = raw_field_value(event, "ptr", data);
-
s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
if (!s_alloc)
- return;
+ return 0;
- if (cpu != s_alloc->alloc_cpu) {
+ if ((short)sample->cpu != s_alloc->alloc_cpu) {
s_alloc->pingpong++;
s_caller = search_alloc_stat(0, s_alloc->call_site,
&root_caller_stat, callsite_cmp);
- assert(s_caller);
+ if (!s_caller)
+ return -1;
s_caller->pingpong++;
}
s_alloc->alloc_cpu = -1;
-}
-static void process_raw_event(struct perf_tool *tool,
- union perf_event *raw_event __used, void *data,
- int cpu, u64 timestamp, struct thread *thread)
-{
- struct perf_kmem *kmem = container_of(tool, struct perf_kmem, tool);
- struct event_format *event;
- int type;
-
- type = trace_parse_common_type(kmem->session->pevent, data);
- event = pevent_find_event(kmem->session->pevent, type);
-
- if (!strcmp(event->name, "kmalloc") ||
- !strcmp(event->name, "kmem_cache_alloc")) {
- process_alloc_event(data, event, cpu, timestamp, thread, 0);
- return;
- }
-
- if (!strcmp(event->name, "kmalloc_node") ||
- !strcmp(event->name, "kmem_cache_alloc_node")) {
- process_alloc_event(data, event, cpu, timestamp, thread, 1);
- return;
- }
-
- if (!strcmp(event->name, "kfree") ||
- !strcmp(event->name, "kmem_cache_free")) {
- process_free_event(data, event, cpu, timestamp, thread);
- return;
- }
+ return 0;
}
-static int process_sample_event(struct perf_tool *tool,
+typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
+ struct perf_sample *sample);
+
+static int process_sample_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
- struct perf_evsel *evsel __used,
+ struct perf_evsel *evsel,
struct machine *machine)
{
struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
@@ -329,18 +318,18 @@ static int process_sample_event(struct perf_tool *tool,
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
- process_raw_event(tool, event, sample->raw_data, sample->cpu,
- sample->time, thread);
+ if (evsel->handler.func != NULL) {
+ tracepoint_handler f = evsel->handler.func;
+ return f(evsel, sample);
+ }
return 0;
}
-static struct perf_kmem perf_kmem = {
- .tool = {
- .sample = process_sample_event,
- .comm = perf_event__process_comm,
- .ordered_samples = true,
- },
+static struct perf_tool perf_kmem = {
+ .sample = process_sample_event,
+ .comm = perf_event__process_comm,
+ .ordered_samples = true,
};
static double fragmentation(unsigned long n_req, unsigned long n_alloc)
@@ -496,22 +485,32 @@ static int __cmd_kmem(void)
{
int err = -EINVAL;
struct perf_session *session;
-
- session = perf_session__new(input_name, O_RDONLY, 0, false,
- &perf_kmem.tool);
+ const struct perf_evsel_str_handler kmem_tracepoints[] = {
+ { "kmem:kmalloc", perf_evsel__process_alloc_event, },
+ { "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, },
+ { "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, },
+ { "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
+ { "kmem:kfree", perf_evsel__process_free_event, },
+ { "kmem:kmem_cache_free", perf_evsel__process_free_event, },
+ };
+
+ session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_kmem);
if (session == NULL)
return -ENOMEM;
- perf_kmem.session = session;
-
if (perf_session__create_kernel_maps(session) < 0)
goto out_delete;
if (!perf_session__has_traces(session, "kmem record"))
goto out_delete;
+ if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
+ pr_err("Initializing perf session tracepoint handlers failed\n");
+ return -1;
+ }
+
setup_pager();
- err = perf_session__process_events(session, &perf_kmem.tool);
+ err = perf_session__process_events(session, &perf_kmem);
if (err != 0)
goto out_delete;
sort_result();
@@ -635,8 +634,10 @@ static int sort_dimension__add(const char *tok, struct list_head *list)
for (i = 0; i < NUM_AVAIL_SORTS; i++) {
if (!strcmp(avail_sorts[i]->name, tok)) {
sort = malloc(sizeof(*sort));
- if (!sort)
- die("malloc");
+ if (!sort) {
+ pr_err("%s: malloc failed\n", __func__);
+ return -1;
+ }
memcpy(sort, avail_sorts[i], sizeof(*sort));
list_add_tail(&sort->list, list);
return 0;
@@ -651,8 +652,10 @@ static int setup_sorting(struct list_head *sort_list, const char *arg)
char *tok;
char *str = strdup(arg);
- if (!str)
- die("strdup");
+ if (!str) {
+ pr_err("%s: strdup failed\n", __func__);
+ return -1;
+ }
while (true) {
tok = strsep(&str, ",");
@@ -669,8 +672,8 @@ static int setup_sorting(struct list_head *sort_list, const char *arg)
return 0;
}
-static int parse_sort_opt(const struct option *opt __used,
- const char *arg, int unset __used)
+static int parse_sort_opt(const struct option *opt __maybe_unused,
+ const char *arg, int unset __maybe_unused)
{
if (!arg)
return -1;
@@ -683,22 +686,24 @@ static int parse_sort_opt(const struct option *opt __used,
return 0;
}
-static int parse_caller_opt(const struct option *opt __used,
- const char *arg __used, int unset __used)
+static int parse_caller_opt(const struct option *opt __maybe_unused,
+ const char *arg __maybe_unused,
+ int unset __maybe_unused)
{
caller_flag = (alloc_flag + 1);
return 0;
}
-static int parse_alloc_opt(const struct option *opt __used,
- const char *arg __used, int unset __used)
+static int parse_alloc_opt(const struct option *opt __maybe_unused,
+ const char *arg __maybe_unused,
+ int unset __maybe_unused)
{
alloc_flag = (caller_flag + 1);
return 0;
}
-static int parse_line_opt(const struct option *opt __used,
- const char *arg, int unset __used)
+static int parse_line_opt(const struct option *opt __maybe_unused,
+ const char *arg, int unset __maybe_unused)
{
int lines;
@@ -768,7 +773,7 @@ static int __cmd_record(int argc, const char **argv)
return cmd_record(i, rec_argv, NULL);
}
-int cmd_kmem(int argc, const char **argv, const char *prefix __used)
+int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
{
argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
@@ -780,7 +785,8 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __used)
if (!strncmp(argv[0], "rec", 3)) {
return __cmd_record(argc, argv);
} else if (!strcmp(argv[0], "stat")) {
- setup_cpunode_map();
+ if (setup_cpunode_map())
+ return -1;
if (list_empty(&caller_sort))
setup_sorting(&caller_sort, default_sort_order);
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 9fc6e0fa3dce..a28c9cad9048 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1,6 +1,7 @@
#include "builtin.h"
#include "perf.h"
+#include "util/evsel.h"
#include "util/util.h"
#include "util/cache.h"
#include "util/symbol.h"
@@ -10,8 +11,10 @@
#include "util/parse-options.h"
#include "util/trace-event.h"
-
#include "util/debug.h"
+#include "util/debugfs.h"
+#include "util/tool.h"
+#include "util/stat.h"
#include <sys/prctl.h>
@@ -19,11 +22,836 @@
#include <pthread.h>
#include <math.h>
-static const char *file_name;
+#include "../../arch/x86/include/asm/svm.h"
+#include "../../arch/x86/include/asm/vmx.h"
+#include "../../arch/x86/include/asm/kvm.h"
+
+struct event_key {
+ #define INVALID_KEY (~0ULL)
+ u64 key;
+ int info;
+};
+
+struct kvm_events_ops {
+ bool (*is_begin_event)(struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key);
+ bool (*is_end_event)(struct perf_evsel *evsel,
+ struct perf_sample *sample, struct event_key *key);
+ void (*decode_key)(struct event_key *key, char decode[20]);
+ const char *name;
+};
+
+static void exit_event_get_key(struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->info = 0;
+ key->key = perf_evsel__intval(evsel, sample, "exit_reason");
+}
+
+static bool kvm_exit_event(struct perf_evsel *evsel)
+{
+ return !strcmp(evsel->name, "kvm:kvm_exit");
+}
+
+static bool exit_event_begin(struct perf_evsel *evsel,
+ struct perf_sample *sample, struct event_key *key)
+{
+ if (kvm_exit_event(evsel)) {
+ exit_event_get_key(evsel, sample, key);
+ return true;
+ }
+
+ return false;
+}
+
+static bool kvm_entry_event(struct perf_evsel *evsel)
+{
+ return !strcmp(evsel->name, "kvm:kvm_entry");
+}
+
+static bool exit_event_end(struct perf_evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ return kvm_entry_event(evsel);
+}
+
+struct exit_reasons_table {
+ unsigned long exit_code;
+ const char *reason;
+};
+
+struct exit_reasons_table vmx_exit_reasons[] = {
+ VMX_EXIT_REASONS
+};
+
+struct exit_reasons_table svm_exit_reasons[] = {
+ SVM_EXIT_REASONS
+};
+
+static int cpu_isa;
+
+static const char *get_exit_reason(u64 exit_code)
+{
+ int table_size = ARRAY_SIZE(svm_exit_reasons);
+ struct exit_reasons_table *table = svm_exit_reasons;
+
+ if (cpu_isa == 1) {
+ table = vmx_exit_reasons;
+ table_size = ARRAY_SIZE(vmx_exit_reasons);
+ }
+
+ while (table_size--) {
+ if (table->exit_code == exit_code)
+ return table->reason;
+ table++;
+ }
+
+ pr_err("unknown kvm exit code:%lld on %s\n",
+ (unsigned long long)exit_code, cpu_isa ? "VMX" : "SVM");
+ return "UNKNOWN";
+}
+
+static void exit_event_decode_key(struct event_key *key, char decode[20])
+{
+ const char *exit_reason = get_exit_reason(key->key);
+
+ scnprintf(decode, 20, "%s", exit_reason);
+}
+
+static struct kvm_events_ops exit_events = {
+ .is_begin_event = exit_event_begin,
+ .is_end_event = exit_event_end,
+ .decode_key = exit_event_decode_key,
+ .name = "VM-EXIT"
+};
+
+ /*
+ * For the mmio events, we treat:
+ * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry
+ * the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...).
+ */
+static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->key = perf_evsel__intval(evsel, sample, "gpa");
+ key->info = perf_evsel__intval(evsel, sample, "type");
+}
+
+#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
+#define KVM_TRACE_MMIO_READ 1
+#define KVM_TRACE_MMIO_WRITE 2
+
+static bool mmio_event_begin(struct perf_evsel *evsel,
+ struct perf_sample *sample, struct event_key *key)
+{
+ /* MMIO read begin event in kernel. */
+ if (kvm_exit_event(evsel))
+ return true;
+
+ /* MMIO write begin event in kernel. */
+ if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
+ perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) {
+ mmio_event_get_key(evsel, sample, key);
+ return true;
+ }
+
+ return false;
+}
+
+static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample,
+ struct event_key *key)
+{
+ /* MMIO write end event in kernel. */
+ if (kvm_entry_event(evsel))
+ return true;
+
+ /* MMIO read end event in kernel.*/
+ if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
+ perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) {
+ mmio_event_get_key(evsel, sample, key);
+ return true;
+ }
+
+ return false;
+}
+
+static void mmio_event_decode_key(struct event_key *key, char decode[20])
+{
+ scnprintf(decode, 20, "%#lx:%s", (unsigned long)key->key,
+ key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R");
+}
+
+static struct kvm_events_ops mmio_events = {
+ .is_begin_event = mmio_event_begin,
+ .is_end_event = mmio_event_end,
+ .decode_key = mmio_event_decode_key,
+ .name = "MMIO Access"
+};
+
+ /* The time of emulation pio access is from kvm_pio to kvm_entry. */
+static void ioport_event_get_key(struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->key = perf_evsel__intval(evsel, sample, "port");
+ key->info = perf_evsel__intval(evsel, sample, "rw");
+}
+
+static bool ioport_event_begin(struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ if (!strcmp(evsel->name, "kvm:kvm_pio")) {
+ ioport_event_get_key(evsel, sample, key);
+ return true;
+ }
+
+ return false;
+}
+
+static bool ioport_event_end(struct perf_evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ return kvm_entry_event(evsel);
+}
+
+static void ioport_event_decode_key(struct event_key *key, char decode[20])
+{
+ scnprintf(decode, 20, "%#llx:%s", (unsigned long long)key->key,
+ key->info ? "POUT" : "PIN");
+}
+
+static struct kvm_events_ops ioport_events = {
+ .is_begin_event = ioport_event_begin,
+ .is_end_event = ioport_event_end,
+ .decode_key = ioport_event_decode_key,
+ .name = "IO Port Access"
+};
+
+static const char *report_event = "vmexit";
+struct kvm_events_ops *events_ops;
+
+static bool register_kvm_events_ops(void)
+{
+ bool ret = true;
+
+ if (!strcmp(report_event, "vmexit"))
+ events_ops = &exit_events;
+ else if (!strcmp(report_event, "mmio"))
+ events_ops = &mmio_events;
+ else if (!strcmp(report_event, "ioport"))
+ events_ops = &ioport_events;
+ else {
+ pr_err("Unknown report event:%s\n", report_event);
+ ret = false;
+ }
+
+ return ret;
+}
+
+struct kvm_event_stats {
+ u64 time;
+ struct stats stats;
+};
+
+struct kvm_event {
+ struct list_head hash_entry;
+ struct rb_node rb;
+
+ struct event_key key;
+
+ struct kvm_event_stats total;
+
+ #define DEFAULT_VCPU_NUM 8
+ int max_vcpu;
+ struct kvm_event_stats *vcpu;
+};
+
+struct vcpu_event_record {
+ int vcpu_id;
+ u64 start_time;
+ struct kvm_event *last_event;
+};
+
+#define EVENTS_BITS 12
+#define EVENTS_CACHE_SIZE (1UL << EVENTS_BITS)
+
+static u64 total_time;
+static u64 total_count;
+static struct list_head kvm_events_cache[EVENTS_CACHE_SIZE];
+
+static void init_kvm_event_record(void)
+{
+ int i;
+
+ for (i = 0; i < (int)EVENTS_CACHE_SIZE; i++)
+ INIT_LIST_HEAD(&kvm_events_cache[i]);
+}
+
+static int kvm_events_hash_fn(u64 key)
+{
+ return key & (EVENTS_CACHE_SIZE - 1);
+}
+
+static bool kvm_event_expand(struct kvm_event *event, int vcpu_id)
+{
+ int old_max_vcpu = event->max_vcpu;
+
+ if (vcpu_id < event->max_vcpu)
+ return true;
+
+ while (event->max_vcpu <= vcpu_id)
+ event->max_vcpu += DEFAULT_VCPU_NUM;
+
+ event->vcpu = realloc(event->vcpu,
+ event->max_vcpu * sizeof(*event->vcpu));
+ if (!event->vcpu) {
+ pr_err("Not enough memory\n");
+ return false;
+ }
+
+ memset(event->vcpu + old_max_vcpu, 0,
+ (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu));
+ return true;
+}
+
+static struct kvm_event *kvm_alloc_init_event(struct event_key *key)
+{
+ struct kvm_event *event;
+
+ event = zalloc(sizeof(*event));
+ if (!event) {
+ pr_err("Not enough memory\n");
+ return NULL;
+ }
+
+ event->key = *key;
+ return event;
+}
+
+static struct kvm_event *find_create_kvm_event(struct event_key *key)
+{
+ struct kvm_event *event;
+ struct list_head *head;
+
+ BUG_ON(key->key == INVALID_KEY);
+
+ head = &kvm_events_cache[kvm_events_hash_fn(key->key)];
+ list_for_each_entry(event, head, hash_entry)
+ if (event->key.key == key->key && event->key.info == key->info)
+ return event;
+
+ event = kvm_alloc_init_event(key);
+ if (!event)
+ return NULL;
+
+ list_add(&event->hash_entry, head);
+ return event;
+}
+
+static bool handle_begin_event(struct vcpu_event_record *vcpu_record,
+ struct event_key *key, u64 timestamp)
+{
+ struct kvm_event *event = NULL;
+
+ if (key->key != INVALID_KEY)
+ event = find_create_kvm_event(key);
+
+ vcpu_record->last_event = event;
+ vcpu_record->start_time = timestamp;
+ return true;
+}
+
+static void
+kvm_update_event_stats(struct kvm_event_stats *kvm_stats, u64 time_diff)
+{
+ kvm_stats->time += time_diff;
+ update_stats(&kvm_stats->stats, time_diff);
+}
+
+static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event)
+{
+ struct kvm_event_stats *kvm_stats = &event->total;
+
+ if (vcpu_id != -1)
+ kvm_stats = &event->vcpu[vcpu_id];
+
+ return rel_stddev_stats(stddev_stats(&kvm_stats->stats),
+ avg_stats(&kvm_stats->stats));
+}
+
+static bool update_kvm_event(struct kvm_event *event, int vcpu_id,
+ u64 time_diff)
+{
+ kvm_update_event_stats(&event->total, time_diff);
+
+ if (!kvm_event_expand(event, vcpu_id))
+ return false;
+
+ kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff);
+ return true;
+}
+
+static bool handle_end_event(struct vcpu_event_record *vcpu_record,
+ struct event_key *key, u64 timestamp)
+{
+ struct kvm_event *event;
+ u64 time_begin, time_diff;
+
+ event = vcpu_record->last_event;
+ time_begin = vcpu_record->start_time;
+
+ /* The begin event is not caught. */
+ if (!time_begin)
+ return true;
+
+ /*
+ * In some case, the 'begin event' only records the start timestamp,
+ * the actual event is recognized in the 'end event' (e.g. mmio-event).
+ */
+
+ /* Both begin and end events did not get the key. */
+ if (!event && key->key == INVALID_KEY)
+ return true;
+
+ if (!event)
+ event = find_create_kvm_event(key);
+
+ if (!event)
+ return false;
+
+ vcpu_record->last_event = NULL;
+ vcpu_record->start_time = 0;
+
+ BUG_ON(timestamp < time_begin);
+
+ time_diff = timestamp - time_begin;
+ return update_kvm_event(event, vcpu_record->vcpu_id, time_diff);
+}
+
+static
+struct vcpu_event_record *per_vcpu_record(struct thread *thread,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample)
+{
+ /* Only kvm_entry records vcpu id. */
+ if (!thread->priv && kvm_entry_event(evsel)) {
+ struct vcpu_event_record *vcpu_record;
+
+ vcpu_record = zalloc(sizeof(*vcpu_record));
+ if (!vcpu_record) {
+ pr_err("%s: Not enough memory\n", __func__);
+ return NULL;
+ }
+
+ vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, "vcpu_id");
+ thread->priv = vcpu_record;
+ }
+
+ return thread->priv;
+}
+
+static bool handle_kvm_event(struct thread *thread, struct perf_evsel *evsel,
+ struct perf_sample *sample)
+{
+ struct vcpu_event_record *vcpu_record;
+ struct event_key key = {.key = INVALID_KEY};
+
+ vcpu_record = per_vcpu_record(thread, evsel, sample);
+ if (!vcpu_record)
+ return true;
+
+ if (events_ops->is_begin_event(evsel, sample, &key))
+ return handle_begin_event(vcpu_record, &key, sample->time);
+
+ if (events_ops->is_end_event(evsel, sample, &key))
+ return handle_end_event(vcpu_record, &key, sample->time);
+
+ return true;
+}
+
+typedef int (*key_cmp_fun)(struct kvm_event*, struct kvm_event*, int);
+struct kvm_event_key {
+ const char *name;
+ key_cmp_fun key;
+};
+
+static int trace_vcpu = -1;
+#define GET_EVENT_KEY(func, field) \
+static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \
+{ \
+ if (vcpu == -1) \
+ return event->total.field; \
+ \
+ if (vcpu >= event->max_vcpu) \
+ return 0; \
+ \
+ return event->vcpu[vcpu].field; \
+}
+
+#define COMPARE_EVENT_KEY(func, field) \
+GET_EVENT_KEY(func, field) \
+static int compare_kvm_event_ ## func(struct kvm_event *one, \
+ struct kvm_event *two, int vcpu)\
+{ \
+ return get_event_ ##func(one, vcpu) > \
+ get_event_ ##func(two, vcpu); \
+}
+
+GET_EVENT_KEY(time, time);
+COMPARE_EVENT_KEY(count, stats.n);
+COMPARE_EVENT_KEY(mean, stats.mean);
+
+#define DEF_SORT_NAME_KEY(name, compare_key) \
+ { #name, compare_kvm_event_ ## compare_key }
+
+static struct kvm_event_key keys[] = {
+ DEF_SORT_NAME_KEY(sample, count),
+ DEF_SORT_NAME_KEY(time, mean),
+ { NULL, NULL }
+};
+
+static const char *sort_key = "sample";
+static key_cmp_fun compare;
+
+static bool select_key(void)
+{
+ int i;
+
+ for (i = 0; keys[i].name; i++) {
+ if (!strcmp(keys[i].name, sort_key)) {
+ compare = keys[i].key;
+ return true;
+ }
+ }
+
+ pr_err("Unknown compare key:%s\n", sort_key);
+ return false;
+}
+
+static struct rb_root result;
+static void insert_to_result(struct kvm_event *event, key_cmp_fun bigger,
+ int vcpu)
+{
+ struct rb_node **rb = &result.rb_node;
+ struct rb_node *parent = NULL;
+ struct kvm_event *p;
+
+ while (*rb) {
+ p = container_of(*rb, struct kvm_event, rb);
+ parent = *rb;
+
+ if (bigger(event, p, vcpu))
+ rb = &(*rb)->rb_left;
+ else
+ rb = &(*rb)->rb_right;
+ }
+
+ rb_link_node(&event->rb, parent, rb);
+ rb_insert_color(&event->rb, &result);
+}
+
+static void update_total_count(struct kvm_event *event, int vcpu)
+{
+ total_count += get_event_count(event, vcpu);
+ total_time += get_event_time(event, vcpu);
+}
+
+static bool event_is_valid(struct kvm_event *event, int vcpu)
+{
+ return !!get_event_count(event, vcpu);
+}
+
+static void sort_result(int vcpu)
+{
+ unsigned int i;
+ struct kvm_event *event;
+
+ for (i = 0; i < EVENTS_CACHE_SIZE; i++)
+ list_for_each_entry(event, &kvm_events_cache[i], hash_entry)
+ if (event_is_valid(event, vcpu)) {
+ update_total_count(event, vcpu);
+ insert_to_result(event, compare, vcpu);
+ }
+}
+
+/* returns left most element of result, and erase it */
+static struct kvm_event *pop_from_result(void)
+{
+ struct rb_node *node = rb_first(&result);
+
+ if (!node)
+ return NULL;
+
+ rb_erase(node, &result);
+ return container_of(node, struct kvm_event, rb);
+}
+
+static void print_vcpu_info(int vcpu)
+{
+ pr_info("Analyze events for ");
+
+ if (vcpu == -1)
+ pr_info("all VCPUs:\n\n");
+ else
+ pr_info("VCPU %d:\n\n", vcpu);
+}
+
+static void print_result(int vcpu)
+{
+ char decode[20];
+ struct kvm_event *event;
+
+ pr_info("\n\n");
+ print_vcpu_info(vcpu);
+ pr_info("%20s ", events_ops->name);
+ pr_info("%10s ", "Samples");
+ pr_info("%9s ", "Samples%");
+
+ pr_info("%9s ", "Time%");
+ pr_info("%16s ", "Avg time");
+ pr_info("\n\n");
+
+ while ((event = pop_from_result())) {
+ u64 ecount, etime;
+
+ ecount = get_event_count(event, vcpu);
+ etime = get_event_time(event, vcpu);
+
+ events_ops->decode_key(&event->key, decode);
+ pr_info("%20s ", decode);
+ pr_info("%10llu ", (unsigned long long)ecount);
+ pr_info("%8.2f%% ", (double)ecount / total_count * 100);
+ pr_info("%8.2f%% ", (double)etime / total_time * 100);
+ pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount/1e3,
+ kvm_event_rel_stddev(vcpu, event));
+ pr_info("\n");
+ }
+
+ pr_info("\nTotal Samples:%lld, Total events handled time:%.2fus.\n\n",
+ (unsigned long long)total_count, total_time / 1e3);
+}
+
+static int process_sample_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine)
+{
+ struct thread *thread = machine__findnew_thread(machine, sample->tid);
+
+ if (thread == NULL) {
+ pr_debug("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ if (!handle_kvm_event(thread, evsel, sample))
+ return -1;
+
+ return 0;
+}
+
+static struct perf_tool eops = {
+ .sample = process_sample_event,
+ .comm = perf_event__process_comm,
+ .ordered_samples = true,
+};
+
+static int get_cpu_isa(struct perf_session *session)
+{
+ char *cpuid = session->header.env.cpuid;
+ int isa;
+
+ if (strstr(cpuid, "Intel"))
+ isa = 1;
+ else if (strstr(cpuid, "AMD"))
+ isa = 0;
+ else {
+ pr_err("CPU %s is not supported.\n", cpuid);
+ isa = -ENOTSUP;
+ }
+
+ return isa;
+}
+
+static const char *file_name;
+
+static int read_events(void)
+{
+ struct perf_session *kvm_session;
+ int ret;
+
+ kvm_session = perf_session__new(file_name, O_RDONLY, 0, false, &eops);
+ if (!kvm_session) {
+ pr_err("Initializing perf session failed\n");
+ return -EINVAL;
+ }
+
+ if (!perf_session__has_traces(kvm_session, "kvm record"))
+ return -EINVAL;
+
+ /*
+ * Do not use 'isa' recorded in kvm_exit tracepoint since it is not
+ * traced in the old kernel.
+ */
+ ret = get_cpu_isa(kvm_session);
+
+ if (ret < 0)
+ return ret;
+
+ cpu_isa = ret;
+
+ return perf_session__process_events(kvm_session, &eops);
+}
+
+static bool verify_vcpu(int vcpu)
+{
+ if (vcpu != -1 && vcpu < 0) {
+ pr_err("Invalid vcpu:%d.\n", vcpu);
+ return false;
+ }
+
+ return true;
+}
+
+static int kvm_events_report_vcpu(int vcpu)
+{
+ int ret = -EINVAL;
+
+ if (!verify_vcpu(vcpu))
+ goto exit;
+
+ if (!select_key())
+ goto exit;
+
+ if (!register_kvm_events_ops())
+ goto exit;
+
+ init_kvm_event_record();
+ setup_pager();
+
+ ret = read_events();
+ if (ret)
+ goto exit;
+
+ sort_result(vcpu);
+ print_result(vcpu);
+exit:
+ return ret;
+}
+
+static const char * const record_args[] = {
+ "record",
+ "-R",
+ "-f",
+ "-m", "1024",
+ "-c", "1",
+ "-e", "kvm:kvm_entry",
+ "-e", "kvm:kvm_exit",
+ "-e", "kvm:kvm_mmio",
+ "-e", "kvm:kvm_pio",
+};
+
+#define STRDUP_FAIL_EXIT(s) \
+ ({ char *_p; \
+ _p = strdup(s); \
+ if (!_p) \
+ return -ENOMEM; \
+ _p; \
+ })
+
+static int kvm_events_record(int argc, const char **argv)
+{
+ unsigned int rec_argc, i, j;
+ const char **rec_argv;
+
+ rec_argc = ARRAY_SIZE(record_args) + argc + 2;
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+
+ if (rec_argv == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(record_args); i++)
+ rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]);
+
+ rec_argv[i++] = STRDUP_FAIL_EXIT("-o");
+ rec_argv[i++] = STRDUP_FAIL_EXIT(file_name);
+
+ for (j = 1; j < (unsigned int)argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ return cmd_record(i, rec_argv, NULL);
+}
+
+static const char * const kvm_events_report_usage[] = {
+ "perf kvm stat report [<options>]",
+ NULL
+};
+
+static const struct option kvm_events_report_options[] = {
+ OPT_STRING(0, "event", &report_event, "report event",
+ "event for reporting: vmexit, mmio, ioport"),
+ OPT_INTEGER(0, "vcpu", &trace_vcpu,
+ "vcpu id to report"),
+ OPT_STRING('k', "key", &sort_key, "sort-key",
+ "key for sorting: sample(sort by samples number)"
+ " time (sort by avg time)"),
+ OPT_END()
+};
+
+static int kvm_events_report(int argc, const char **argv)
+{
+ symbol__init();
+
+ if (argc) {
+ argc = parse_options(argc, argv,
+ kvm_events_report_options,
+ kvm_events_report_usage, 0);
+ if (argc)
+ usage_with_options(kvm_events_report_usage,
+ kvm_events_report_options);
+ }
+
+ return kvm_events_report_vcpu(trace_vcpu);
+}
+
+static void print_kvm_stat_usage(void)
+{
+ printf("Usage: perf kvm stat <command>\n\n");
+
+ printf("# Available commands:\n");
+ printf("\trecord: record kvm events\n");
+ printf("\treport: report statistical data of kvm events\n");
+
+ printf("\nOtherwise, it is the alias of 'perf stat':\n");
+}
+
+static int kvm_cmd_stat(int argc, const char **argv)
+{
+ if (argc == 1) {
+ print_kvm_stat_usage();
+ goto perf_stat;
+ }
+
+ if (!strncmp(argv[1], "rec", 3))
+ return kvm_events_record(argc - 1, argv + 1);
+
+ if (!strncmp(argv[1], "rep", 3))
+ return kvm_events_report(argc - 1 , argv + 1);
+
+perf_stat:
+ return cmd_stat(argc, argv, NULL);
+}
+
static char name_buffer[256];
static const char * const kvm_usage[] = {
- "perf kvm [<options>] {top|record|report|diff|buildid-list}",
+ "perf kvm [<options>] {top|record|report|diff|buildid-list|stat}",
NULL
};
@@ -102,7 +930,7 @@ static int __cmd_buildid_list(int argc, const char **argv)
return cmd_buildid_list(i, rec_argv, NULL);
}
-int cmd_kvm(int argc, const char **argv, const char *prefix __used)
+int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused)
{
perf_host = 0;
perf_guest = 1;
@@ -135,6 +963,8 @@ int cmd_kvm(int argc, const char **argv, const char *prefix __used)
return cmd_top(argc, argv, NULL);
else if (!strncmp(argv[0], "buildid-list", 12))
return __cmd_buildid_list(argc, argv);
+ else if (!strncmp(argv[0], "stat", 4))
+ return kvm_cmd_stat(argc, argv);
else
usage_with_options(kvm_usage, kvm_options);
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 6313b6eb3ebb..1948eceb517a 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -14,20 +14,20 @@
#include "util/parse-events.h"
#include "util/cache.h"
-int cmd_list(int argc, const char **argv, const char *prefix __used)
+int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
{
setup_pager();
if (argc == 1)
- print_events(NULL);
+ print_events(NULL, false);
else {
int i;
for (i = 1; i < argc; ++i) {
- if (i > 1)
+ if (i > 2)
putchar('\n');
if (strncmp(argv[i], "tracepoint", 10) == 0)
- print_tracepoint_events(NULL, NULL);
+ print_tracepoint_events(NULL, NULL, false);
else if (strcmp(argv[i], "hw") == 0 ||
strcmp(argv[i], "hardware") == 0)
print_events_type(PERF_TYPE_HARDWARE);
@@ -36,13 +36,15 @@ int cmd_list(int argc, const char **argv, const char *prefix __used)
print_events_type(PERF_TYPE_SOFTWARE);
else if (strcmp(argv[i], "cache") == 0 ||
strcmp(argv[i], "hwcache") == 0)
- print_hwcache_events(NULL);
+ print_hwcache_events(NULL, false);
+ else if (strcmp(argv[i], "--raw-dump") == 0)
+ print_events(NULL, true);
else {
char *sep = strchr(argv[i], ':'), *s;
int sep_idx;
if (sep == NULL) {
- print_events(argv[i]);
+ print_events(argv[i], false);
continue;
}
sep_idx = sep - argv[i];
@@ -51,7 +53,7 @@ int cmd_list(int argc, const char **argv, const char *prefix __used)
return -1;
s[sep_idx] = '\0';
- print_tracepoint_events(s, s + sep_idx + 1);
+ print_tracepoint_events(s, s + sep_idx + 1, false);
free(s);
}
}
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index b3c428548868..7d6e09949880 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -1,6 +1,8 @@
#include "builtin.h"
#include "perf.h"
+#include "util/evlist.h"
+#include "util/evsel.h"
#include "util/util.h"
#include "util/cache.h"
#include "util/symbol.h"
@@ -40,7 +42,7 @@ struct lock_stat {
struct rb_node rb; /* used for sorting */
/*
- * FIXME: raw_field_value() returns unsigned long long,
+ * FIXME: perf_evsel__intval() returns u64,
* so address of lockdep_map should be dealed as 64bit.
* Is there more better solution?
*/
@@ -160,8 +162,10 @@ static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
return st;
st = zalloc(sizeof(struct thread_stat));
- if (!st)
- die("memory allocation failed\n");
+ if (!st) {
+ pr_err("memory allocation failed\n");
+ return NULL;
+ }
st->tid = tid;
INIT_LIST_HEAD(&st->seq_list);
@@ -180,8 +184,10 @@ static struct thread_stat *thread_stat_findnew_first(u32 tid)
struct thread_stat *st;
st = zalloc(sizeof(struct thread_stat));
- if (!st)
- die("memory allocation failed\n");
+ if (!st) {
+ pr_err("memory allocation failed\n");
+ return NULL;
+ }
st->tid = tid;
INIT_LIST_HEAD(&st->seq_list);
@@ -247,18 +253,20 @@ struct lock_key keys[] = {
{ NULL, NULL }
};
-static void select_key(void)
+static int select_key(void)
{
int i;
for (i = 0; keys[i].name; i++) {
if (!strcmp(keys[i].name, sort_key)) {
compare = keys[i].key;
- return;
+ return 0;
}
}
- die("Unknown compare key:%s\n", sort_key);
+ pr_err("Unknown compare key: %s\n", sort_key);
+
+ return -1;
}
static void insert_to_result(struct lock_stat *st,
@@ -323,61 +331,24 @@ static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
return new;
alloc_failed:
- die("memory allocation failed\n");
+ pr_err("memory allocation failed\n");
+ return NULL;
}
static const char *input_name;
-struct raw_event_sample {
- u32 size;
- char data[0];
-};
-
-struct trace_acquire_event {
- void *addr;
- const char *name;
- int flag;
-};
-
-struct trace_acquired_event {
- void *addr;
- const char *name;
-};
+struct trace_lock_handler {
+ int (*acquire_event)(struct perf_evsel *evsel,
+ struct perf_sample *sample);
-struct trace_contended_event {
- void *addr;
- const char *name;
-};
+ int (*acquired_event)(struct perf_evsel *evsel,
+ struct perf_sample *sample);
-struct trace_release_event {
- void *addr;
- const char *name;
-};
+ int (*contended_event)(struct perf_evsel *evsel,
+ struct perf_sample *sample);
-struct trace_lock_handler {
- void (*acquire_event)(struct trace_acquire_event *,
- struct event_format *,
- int cpu,
- u64 timestamp,
- struct thread *thread);
-
- void (*acquired_event)(struct trace_acquired_event *,
- struct event_format *,
- int cpu,
- u64 timestamp,
- struct thread *thread);
-
- void (*contended_event)(struct trace_contended_event *,
- struct event_format *,
- int cpu,
- u64 timestamp,
- struct thread *thread);
-
- void (*release_event)(struct trace_release_event *,
- struct event_format *,
- int cpu,
- u64 timestamp,
- struct thread *thread);
+ int (*release_event)(struct perf_evsel *evsel,
+ struct perf_sample *sample);
};
static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
@@ -390,8 +361,10 @@ static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
}
seq = zalloc(sizeof(struct lock_seq_stat));
- if (!seq)
- die("Not enough memory\n");
+ if (!seq) {
+ pr_err("memory allocation failed\n");
+ return NULL;
+ }
seq->state = SEQ_STATE_UNINITIALIZED;
seq->addr = addr;
@@ -414,33 +387,42 @@ enum acquire_flags {
READ_LOCK = 2,
};
-static void
-report_lock_acquire_event(struct trace_acquire_event *acquire_event,
- struct event_format *__event __used,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+static int report_lock_acquire_event(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
+ void *addr;
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
+ const char *name = perf_evsel__strval(evsel, sample, "name");
+ u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
+ int flag = perf_evsel__intval(evsel, sample, "flag");
+
+ memcpy(&addr, &tmp, sizeof(void *));
- ls = lock_stat_findnew(acquire_event->addr, acquire_event->name);
+ ls = lock_stat_findnew(addr, name);
+ if (!ls)
+ return -1;
if (ls->discard)
- return;
+ return 0;
- ts = thread_stat_findnew(thread->pid);
- seq = get_seq(ts, acquire_event->addr);
+ ts = thread_stat_findnew(sample->tid);
+ if (!ts)
+ return -1;
+
+ seq = get_seq(ts, addr);
+ if (!seq)
+ return -1;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
case SEQ_STATE_RELEASED:
- if (!acquire_event->flag) {
+ if (!flag) {
seq->state = SEQ_STATE_ACQUIRING;
} else {
- if (acquire_event->flag & TRY_LOCK)
+ if (flag & TRY_LOCK)
ls->nr_trylock++;
- if (acquire_event->flag & READ_LOCK)
+ if (flag & READ_LOCK)
ls->nr_readlock++;
seq->state = SEQ_STATE_READ_ACQUIRED;
seq->read_count = 1;
@@ -448,7 +430,7 @@ report_lock_acquire_event(struct trace_acquire_event *acquire_event,
}
break;
case SEQ_STATE_READ_ACQUIRED:
- if (acquire_event->flag & READ_LOCK) {
+ if (flag & READ_LOCK) {
seq->read_count++;
ls->nr_acquired++;
goto end;
@@ -473,38 +455,46 @@ broken:
}
ls->nr_acquire++;
- seq->prev_event_time = timestamp;
+ seq->prev_event_time = sample->time;
end:
- return;
+ return 0;
}
-static void
-report_lock_acquired_event(struct trace_acquired_event *acquired_event,
- struct event_format *__event __used,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+static int report_lock_acquired_event(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
+ void *addr;
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
u64 contended_term;
+ const char *name = perf_evsel__strval(evsel, sample, "name");
+ u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
+
+ memcpy(&addr, &tmp, sizeof(void *));
- ls = lock_stat_findnew(acquired_event->addr, acquired_event->name);
+ ls = lock_stat_findnew(addr, name);
+ if (!ls)
+ return -1;
if (ls->discard)
- return;
+ return 0;
+
+ ts = thread_stat_findnew(sample->tid);
+ if (!ts)
+ return -1;
- ts = thread_stat_findnew(thread->pid);
- seq = get_seq(ts, acquired_event->addr);
+ seq = get_seq(ts, addr);
+ if (!seq)
+ return -1;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
/* orphan event, do nothing */
- return;
+ return 0;
case SEQ_STATE_ACQUIRING:
break;
case SEQ_STATE_CONTENDED:
- contended_term = timestamp - seq->prev_event_time;
+ contended_term = sample->time - seq->prev_event_time;
ls->wait_time_total += contended_term;
if (contended_term < ls->wait_time_min)
ls->wait_time_min = contended_term;
@@ -529,33 +519,41 @@ report_lock_acquired_event(struct trace_acquired_event *acquired_event,
seq->state = SEQ_STATE_ACQUIRED;
ls->nr_acquired++;
- seq->prev_event_time = timestamp;
+ seq->prev_event_time = sample->time;
end:
- return;
+ return 0;
}
-static void
-report_lock_contended_event(struct trace_contended_event *contended_event,
- struct event_format *__event __used,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+static int report_lock_contended_event(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
+ void *addr;
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
+ const char *name = perf_evsel__strval(evsel, sample, "name");
+ u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
- ls = lock_stat_findnew(contended_event->addr, contended_event->name);
+ memcpy(&addr, &tmp, sizeof(void *));
+
+ ls = lock_stat_findnew(addr, name);
+ if (!ls)
+ return -1;
if (ls->discard)
- return;
+ return 0;
+
+ ts = thread_stat_findnew(sample->tid);
+ if (!ts)
+ return -1;
- ts = thread_stat_findnew(thread->pid);
- seq = get_seq(ts, contended_event->addr);
+ seq = get_seq(ts, addr);
+ if (!seq)
+ return -1;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
/* orphan event, do nothing */
- return;
+ return 0;
case SEQ_STATE_ACQUIRING:
break;
case SEQ_STATE_RELEASED:
@@ -576,28 +574,36 @@ report_lock_contended_event(struct trace_contended_event *contended_event,
seq->state = SEQ_STATE_CONTENDED;
ls->nr_contended++;
- seq->prev_event_time = timestamp;
+ seq->prev_event_time = sample->time;
end:
- return;
+ return 0;
}
-static void
-report_lock_release_event(struct trace_release_event *release_event,
- struct event_format *__event __used,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+static int report_lock_release_event(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
+ void *addr;
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
+ const char *name = perf_evsel__strval(evsel, sample, "name");
+ u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
+
+ memcpy(&addr, &tmp, sizeof(void *));
- ls = lock_stat_findnew(release_event->addr, release_event->name);
+ ls = lock_stat_findnew(addr, name);
+ if (!ls)
+ return -1;
if (ls->discard)
- return;
+ return 0;
+
+ ts = thread_stat_findnew(sample->tid);
+ if (!ts)
+ return -1;
- ts = thread_stat_findnew(thread->pid);
- seq = get_seq(ts, release_event->addr);
+ seq = get_seq(ts, addr);
+ if (!seq)
+ return -1;
switch (seq->state) {
case SEQ_STATE_UNINITIALIZED:
@@ -631,7 +637,7 @@ free_seq:
list_del(&seq->list);
free(seq);
end:
- return;
+ return 0;
}
/* lock oriented handlers */
@@ -645,96 +651,36 @@ static struct trace_lock_handler report_lock_ops = {
static struct trace_lock_handler *trace_handler;
-static void
-process_lock_acquire_event(void *data,
- struct event_format *event __used,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
-{
- struct trace_acquire_event acquire_event;
- u64 tmp; /* this is required for casting... */
-
- tmp = raw_field_value(event, "lockdep_addr", data);
- memcpy(&acquire_event.addr, &tmp, sizeof(void *));
- acquire_event.name = (char *)raw_field_ptr(event, "name", data);
- acquire_event.flag = (int)raw_field_value(event, "flag", data);
-
- if (trace_handler->acquire_event)
- trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
-}
-
-static void
-process_lock_acquired_event(void *data,
- struct event_format *event __used,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+static int perf_evsel__process_lock_acquire(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
- struct trace_acquired_event acquired_event;
- u64 tmp; /* this is required for casting... */
-
- tmp = raw_field_value(event, "lockdep_addr", data);
- memcpy(&acquired_event.addr, &tmp, sizeof(void *));
- acquired_event.name = (char *)raw_field_ptr(event, "name", data);
-
if (trace_handler->acquire_event)
- trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread);
+ return trace_handler->acquire_event(evsel, sample);
+ return 0;
}
-static void
-process_lock_contended_event(void *data,
- struct event_format *event __used,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+static int perf_evsel__process_lock_acquired(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
- struct trace_contended_event contended_event;
- u64 tmp; /* this is required for casting... */
-
- tmp = raw_field_value(event, "lockdep_addr", data);
- memcpy(&contended_event.addr, &tmp, sizeof(void *));
- contended_event.name = (char *)raw_field_ptr(event, "name", data);
-
- if (trace_handler->acquire_event)
- trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread);
+ if (trace_handler->acquired_event)
+ return trace_handler->acquired_event(evsel, sample);
+ return 0;
}
-static void
-process_lock_release_event(void *data,
- struct event_format *event __used,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+static int perf_evsel__process_lock_contended(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
- struct trace_release_event release_event;
- u64 tmp; /* this is required for casting... */
-
- tmp = raw_field_value(event, "lockdep_addr", data);
- memcpy(&release_event.addr, &tmp, sizeof(void *));
- release_event.name = (char *)raw_field_ptr(event, "name", data);
-
- if (trace_handler->acquire_event)
- trace_handler->release_event(&release_event, event, cpu, timestamp, thread);
+ if (trace_handler->contended_event)
+ return trace_handler->contended_event(evsel, sample);
+ return 0;
}
-static void
-process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread)
+static int perf_evsel__process_lock_release(struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
- struct event_format *event;
- int type;
-
- type = trace_parse_common_type(session->pevent, data);
- event = pevent_find_event(session->pevent, type);
-
- if (!strcmp(event->name, "lock_acquire"))
- process_lock_acquire_event(data, event, cpu, timestamp, thread);
- if (!strcmp(event->name, "lock_acquired"))
- process_lock_acquired_event(data, event, cpu, timestamp, thread);
- if (!strcmp(event->name, "lock_contended"))
- process_lock_contended_event(data, event, cpu, timestamp, thread);
- if (!strcmp(event->name, "lock_release"))
- process_lock_release_event(data, event, cpu, timestamp, thread);
+ if (trace_handler->release_event)
+ return trace_handler->release_event(evsel, sample);
+ return 0;
}
static void print_bad_events(int bad, int total)
@@ -836,20 +782,29 @@ static void dump_map(void)
}
}
-static void dump_info(void)
+static int dump_info(void)
{
+ int rc = 0;
+
if (info_threads)
dump_threads();
else if (info_map)
dump_map();
- else
- die("Unknown type of information\n");
+ else {
+ rc = -1;
+ pr_err("Unknown type of information\n");
+ }
+
+ return rc;
}
-static int process_sample_event(struct perf_tool *tool __used,
+typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
+ struct perf_sample *sample);
+
+static int process_sample_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
- struct perf_evsel *evsel __used,
+ struct perf_evsel *evsel,
struct machine *machine)
{
struct thread *thread = machine__findnew_thread(machine, sample->tid);
@@ -860,7 +815,10 @@ static int process_sample_event(struct perf_tool *tool __used,
return -1;
}
- process_raw_event(sample->raw_data, sample->cpu, sample->time, thread);
+ if (evsel->handler.func != NULL) {
+ tracepoint_handler f = evsel->handler.func;
+ return f(evsel, sample);
+ }
return 0;
}
@@ -871,11 +829,25 @@ static struct perf_tool eops = {
.ordered_samples = true,
};
+static const struct perf_evsel_str_handler lock_tracepoints[] = {
+ { "lock:lock_acquire", perf_evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */
+ { "lock:lock_acquired", perf_evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
+ { "lock:lock_contended", perf_evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
+ { "lock:lock_release", perf_evsel__process_lock_release, }, /* CONFIG_LOCKDEP */
+};
+
static int read_events(void)
{
session = perf_session__new(input_name, O_RDONLY, 0, false, &eops);
- if (!session)
- die("Initializing perf session failed\n");
+ if (!session) {
+ pr_err("Initializing perf session failed\n");
+ return -1;
+ }
+
+ if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) {
+ pr_err("Initializing perf session tracepoint handlers failed\n");
+ return -1;
+ }
return perf_session__process_events(session, &eops);
}
@@ -892,13 +864,18 @@ static void sort_result(void)
}
}
-static void __cmd_report(void)
+static int __cmd_report(void)
{
setup_pager();
- select_key();
- read_events();
+
+ if ((select_key() != 0) ||
+ (read_events() != 0))
+ return -1;
+
sort_result();
print_result();
+
+ return 0;
}
static const char * const report_usage[] = {
@@ -944,10 +921,6 @@ static const char *record_args[] = {
"-f",
"-m", "1024",
"-c", "1",
- "-e", "lock:lock_acquire",
- "-e", "lock:lock_acquired",
- "-e", "lock:lock_contended",
- "-e", "lock:lock_release",
};
static int __cmd_record(int argc, const char **argv)
@@ -955,15 +928,31 @@ static int __cmd_record(int argc, const char **argv)
unsigned int rec_argc, i, j;
const char **rec_argv;
+ for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) {
+ if (!is_valid_tracepoint(lock_tracepoints[i].name)) {
+ pr_err("tracepoint %s is not enabled. "
+ "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
+ lock_tracepoints[i].name);
+ return 1;
+ }
+ }
+
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
- rec_argv = calloc(rec_argc + 1, sizeof(char *));
+ /* factor of 2 is for -e in front of each tracepoint */
+ rec_argc += 2 * ARRAY_SIZE(lock_tracepoints);
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv == NULL)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(record_args); i++)
rec_argv[i] = strdup(record_args[i]);
+ for (j = 0; j < ARRAY_SIZE(lock_tracepoints); j++) {
+ rec_argv[i++] = "-e";
+ rec_argv[i++] = strdup(lock_tracepoints[j].name);
+ }
+
for (j = 1; j < (unsigned int)argc; j++, i++)
rec_argv[i] = argv[j];
@@ -972,9 +961,10 @@ static int __cmd_record(int argc, const char **argv)
return cmd_record(i, rec_argv, NULL);
}
-int cmd_lock(int argc, const char **argv, const char *prefix __used)
+int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused)
{
unsigned int i;
+ int rc = 0;
symbol__init();
for (i = 0; i < LOCKHASH_SIZE; i++)
@@ -1009,11 +999,13 @@ int cmd_lock(int argc, const char **argv, const char *prefix __used)
/* recycling report_lock_ops */
trace_handler = &report_lock_ops;
setup_pager();
- read_events();
- dump_info();
+ if (read_events() != 0)
+ rc = -1;
+ else
+ rc = dump_info();
} else {
usage_with_options(lock_usage, lock_options);
}
- return 0;
+ return rc;
}
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index e215ae61b2ae..118aa8946573 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -143,8 +143,8 @@ static int parse_probe_event_argv(int argc, const char **argv)
return ret;
}
-static int opt_add_probe_event(const struct option *opt __used,
- const char *str, int unset __used)
+static int opt_add_probe_event(const struct option *opt __maybe_unused,
+ const char *str, int unset __maybe_unused)
{
if (str) {
params.mod_events = true;
@@ -153,8 +153,8 @@ static int opt_add_probe_event(const struct option *opt __used,
return 0;
}
-static int opt_del_probe_event(const struct option *opt __used,
- const char *str, int unset __used)
+static int opt_del_probe_event(const struct option *opt __maybe_unused,
+ const char *str, int unset __maybe_unused)
{
if (str) {
params.mod_events = true;
@@ -166,7 +166,7 @@ static int opt_del_probe_event(const struct option *opt __used,
}
static int opt_set_target(const struct option *opt, const char *str,
- int unset __used)
+ int unset __maybe_unused)
{
int ret = -ENOENT;
@@ -188,8 +188,8 @@ static int opt_set_target(const struct option *opt, const char *str,
}
#ifdef DWARF_SUPPORT
-static int opt_show_lines(const struct option *opt __used,
- const char *str, int unset __used)
+static int opt_show_lines(const struct option *opt __maybe_unused,
+ const char *str, int unset __maybe_unused)
{
int ret = 0;
@@ -209,8 +209,8 @@ static int opt_show_lines(const struct option *opt __used,
return ret;
}
-static int opt_show_vars(const struct option *opt __used,
- const char *str, int unset __used)
+static int opt_show_vars(const struct option *opt __maybe_unused,
+ const char *str, int unset __maybe_unused)
{
struct perf_probe_event *pev = &params.events[params.nevents];
int ret;
@@ -229,8 +229,8 @@ static int opt_show_vars(const struct option *opt __used,
}
#endif
-static int opt_set_filter(const struct option *opt __used,
- const char *str, int unset __used)
+static int opt_set_filter(const struct option *opt __maybe_unused,
+ const char *str, int unset __maybe_unused)
{
const char *err;
@@ -327,7 +327,7 @@ static const struct option options[] = {
OPT_END()
};
-int cmd_probe(int argc, const char **argv, const char *prefix __used)
+int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
{
int ret;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 4db6e1ba54e3..f14cb5fdb91f 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -31,6 +31,15 @@
#include <sched.h>
#include <sys/mman.h>
+#define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: "
+
+#ifdef NO_LIBUNWIND_SUPPORT
+static char callchain_help[] = CALLCHAIN_HELP "[fp]";
+#else
+static unsigned long default_stack_dump_size = 8192;
+static char callchain_help[] = CALLCHAIN_HELP "[fp] dwarf";
+#endif
+
enum write_mode_t {
WRITE_FORCE,
WRITE_APPEND
@@ -62,32 +71,38 @@ static void advance_output(struct perf_record *rec, size_t size)
rec->bytes_written += size;
}
-static void write_output(struct perf_record *rec, void *buf, size_t size)
+static int write_output(struct perf_record *rec, void *buf, size_t size)
{
while (size) {
int ret = write(rec->output, buf, size);
- if (ret < 0)
- die("failed to write");
+ if (ret < 0) {
+ pr_err("failed to write\n");
+ return -1;
+ }
size -= ret;
buf += ret;
rec->bytes_written += ret;
}
+
+ return 0;
}
static int process_synthesized_event(struct perf_tool *tool,
union perf_event *event,
- struct perf_sample *sample __used,
- struct machine *machine __used)
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
{
struct perf_record *rec = container_of(tool, struct perf_record, tool);
- write_output(rec, event, event->header.size);
+ if (write_output(rec, event, event->header.size) < 0)
+ return -1;
+
return 0;
}
-static void perf_record__mmap_read(struct perf_record *rec,
+static int perf_record__mmap_read(struct perf_record *rec,
struct perf_mmap *md)
{
unsigned int head = perf_mmap__read_head(md);
@@ -95,9 +110,10 @@ static void perf_record__mmap_read(struct perf_record *rec,
unsigned char *data = md->base + rec->page_size;
unsigned long size;
void *buf;
+ int rc = 0;
if (old == head)
- return;
+ return 0;
rec->samples++;
@@ -108,17 +124,26 @@ static void perf_record__mmap_read(struct perf_record *rec,
size = md->mask + 1 - (old & md->mask);
old += size;
- write_output(rec, buf, size);
+ if (write_output(rec, buf, size) < 0) {
+ rc = -1;
+ goto out;
+ }
}
buf = &data[old & md->mask];
size = head - old;
old += size;
- write_output(rec, buf, size);
+ if (write_output(rec, buf, size) < 0) {
+ rc = -1;
+ goto out;
+ }
md->prev = old;
perf_mmap__write_tail(md, old);
+
+out:
+ return rc;
}
static volatile int done = 0;
@@ -134,7 +159,7 @@ static void sig_handler(int sig)
signr = sig;
}
-static void perf_record__sig_exit(int exit_status __used, void *arg)
+static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
{
struct perf_record *rec = arg;
int status;
@@ -163,31 +188,32 @@ static bool perf_evlist__equal(struct perf_evlist *evlist,
if (evlist->nr_entries != other->nr_entries)
return false;
- pair = list_entry(other->entries.next, struct perf_evsel, node);
+ pair = perf_evlist__first(other);
list_for_each_entry(pos, &evlist->entries, node) {
if (memcmp(&pos->attr, &pair->attr, sizeof(pos->attr) != 0))
return false;
- pair = list_entry(pair->node.next, struct perf_evsel, node);
+ pair = perf_evsel__next(pair);
}
return true;
}
-static void perf_record__open(struct perf_record *rec)
+static int perf_record__open(struct perf_record *rec)
{
- struct perf_evsel *pos, *first;
+ struct perf_evsel *pos;
struct perf_evlist *evlist = rec->evlist;
struct perf_session *session = rec->session;
struct perf_record_opts *opts = &rec->opts;
-
- first = list_entry(evlist->entries.next, struct perf_evsel, node);
+ int rc = 0;
perf_evlist__config_attrs(evlist, opts);
+ if (opts->group)
+ perf_evlist__set_leader(evlist);
+
list_for_each_entry(pos, &evlist->entries, node) {
struct perf_event_attr *attr = &pos->attr;
- struct xyarray *group_fd = NULL;
/*
* Check if parse_single_tracepoint_event has already asked for
* PERF_SAMPLE_TIME.
@@ -202,24 +228,24 @@ static void perf_record__open(struct perf_record *rec)
*/
bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
- if (opts->group && pos != first)
- group_fd = first->fd;
fallback_missing_features:
if (opts->exclude_guest_missing)
attr->exclude_guest = attr->exclude_host = 0;
retry_sample_id:
attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
try_again:
- if (perf_evsel__open(pos, evlist->cpus, evlist->threads,
- opts->group, group_fd) < 0) {
+ if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
int err = errno;
if (err == EPERM || err == EACCES) {
ui__error_paranoid();
- exit(EXIT_FAILURE);
+ rc = -err;
+ goto out;
} else if (err == ENODEV && opts->target.cpu_list) {
- die("No such device - did you specify"
- " an out-of-range profile CPU?\n");
+ pr_err("No such device - did you specify"
+ " an out-of-range profile CPU?\n");
+ rc = -err;
+ goto out;
} else if (err == EINVAL) {
if (!opts->exclude_guest_missing &&
(attr->exclude_guest || attr->exclude_host)) {
@@ -266,42 +292,57 @@ try_again:
if (err == ENOENT) {
ui__error("The %s event is not supported.\n",
perf_evsel__name(pos));
- exit(EXIT_FAILURE);
+ rc = -err;
+ goto out;
}
printf("\n");
- error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n",
- err, strerror(err));
+ error("sys_perf_event_open() syscall returned with %d "
+ "(%s) for event %s. /bin/dmesg may provide "
+ "additional information.\n",
+ err, strerror(err), perf_evsel__name(pos));
#if defined(__i386__) || defined(__x86_64__)
- if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
- die("No hardware sampling interrupt available."
- " No APIC? If so then you can boot the kernel"
- " with the \"lapic\" boot parameter to"
- " force-enable it.\n");
+ if (attr->type == PERF_TYPE_HARDWARE &&
+ err == EOPNOTSUPP) {
+ pr_err("No hardware sampling interrupt available."
+ " No APIC? If so then you can boot the kernel"
+ " with the \"lapic\" boot parameter to"
+ " force-enable it.\n");
+ rc = -err;
+ goto out;
+ }
#endif
- die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
+ pr_err("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
+ rc = -err;
+ goto out;
}
}
- if (perf_evlist__set_filters(evlist)) {
+ if (perf_evlist__apply_filters(evlist)) {
error("failed to set filter with %d (%s)\n", errno,
strerror(errno));
- exit(-1);
+ rc = -1;
+ goto out;
}
if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
- if (errno == EPERM)
- die("Permission error mapping pages.\n"
- "Consider increasing "
- "/proc/sys/kernel/perf_event_mlock_kb,\n"
- "or try again with a smaller value of -m/--mmap_pages.\n"
- "(current value: %d)\n", opts->mmap_pages);
- else if (!is_power_of_2(opts->mmap_pages))
- die("--mmap_pages/-m value must be a power of two.");
-
- die("failed to mmap with %d (%s)\n", errno, strerror(errno));
+ if (errno == EPERM) {
+ pr_err("Permission error mapping pages.\n"
+ "Consider increasing "
+ "/proc/sys/kernel/perf_event_mlock_kb,\n"
+ "or try again with a smaller value of -m/--mmap_pages.\n"
+ "(current value: %d)\n", opts->mmap_pages);
+ rc = -errno;
+ } else if (!is_power_of_2(opts->mmap_pages)) {
+ pr_err("--mmap_pages/-m value must be a power of two.");
+ rc = -EINVAL;
+ } else {
+ pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
+ rc = -errno;
+ }
+ goto out;
}
if (rec->file_new)
@@ -309,11 +350,14 @@ try_again:
else {
if (!perf_evlist__equal(session->evlist, evlist)) {
fprintf(stderr, "incompatible append\n");
- exit(-1);
+ rc = -1;
+ goto out;
}
}
perf_session__set_id_hdr_size(session);
+out:
+ return rc;
}
static int process_buildids(struct perf_record *rec)
@@ -329,10 +373,13 @@ static int process_buildids(struct perf_record *rec)
size, &build_id__mark_dso_hit_ops);
}
-static void perf_record__exit(int status __used, void *arg)
+static void perf_record__exit(int status, void *arg)
{
struct perf_record *rec = arg;
+ if (status != 0)
+ return;
+
if (!rec->opts.pipe_output) {
rec->session->header.data_size += rec->bytes_written;
@@ -387,17 +434,26 @@ static struct perf_event_header finished_round_event = {
.type = PERF_RECORD_FINISHED_ROUND,
};
-static void perf_record__mmap_read_all(struct perf_record *rec)
+static int perf_record__mmap_read_all(struct perf_record *rec)
{
int i;
+ int rc = 0;
for (i = 0; i < rec->evlist->nr_mmaps; i++) {
- if (rec->evlist->mmap[i].base)
- perf_record__mmap_read(rec, &rec->evlist->mmap[i]);
+ if (rec->evlist->mmap[i].base) {
+ if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
+ rc = -1;
+ goto out;
+ }
+ }
}
if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
- write_output(rec, &finished_round_event, sizeof(finished_round_event));
+ rc = write_output(rec, &finished_round_event,
+ sizeof(finished_round_event));
+
+out:
+ return rc;
}
static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
@@ -457,7 +513,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
output = open(output_name, flags, S_IRUSR | S_IWUSR);
if (output < 0) {
perror("failed to create output file");
- exit(-1);
+ return -1;
}
rec->output = output;
@@ -497,7 +553,10 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
}
}
- perf_record__open(rec);
+ if (perf_record__open(rec) != 0) {
+ err = -1;
+ goto out_delete_session;
+ }
/*
* perf_session__delete(session) will be called at perf_record__exit()
@@ -507,19 +566,20 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
if (opts->pipe_output) {
err = perf_header__write_pipe(output);
if (err < 0)
- return err;
+ goto out_delete_session;
} else if (rec->file_new) {
err = perf_session__write_header(session, evsel_list,
output, false);
if (err < 0)
- return err;
+ goto out_delete_session;
}
if (!rec->no_buildid
&& !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
pr_err("Couldn't generate buildids. "
"Use --no-buildid to profile anyway.\n");
- return -1;
+ err = -1;
+ goto out_delete_session;
}
rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
@@ -527,7 +587,8 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
machine = perf_session__find_host_machine(session);
if (!machine) {
pr_err("Couldn't find native kernel information.\n");
- return -1;
+ err = -1;
+ goto out_delete_session;
}
if (opts->pipe_output) {
@@ -535,14 +596,14 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
process_synthesized_event);
if (err < 0) {
pr_err("Couldn't synthesize attrs.\n");
- return err;
+ goto out_delete_session;
}
err = perf_event__synthesize_event_types(tool, process_synthesized_event,
machine);
if (err < 0) {
pr_err("Couldn't synthesize event_types.\n");
- return err;
+ goto out_delete_session;
}
if (have_tracepoints(&evsel_list->entries)) {
@@ -558,7 +619,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
process_synthesized_event);
if (err <= 0) {
pr_err("Couldn't record tracing data.\n");
- return err;
+ goto out_delete_session;
}
advance_output(rec, err);
}
@@ -586,20 +647,24 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
perf_event__synthesize_guest_os);
if (!opts->target.system_wide)
- perf_event__synthesize_thread_map(tool, evsel_list->threads,
+ err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
process_synthesized_event,
machine);
else
- perf_event__synthesize_threads(tool, process_synthesized_event,
+ err = perf_event__synthesize_threads(tool, process_synthesized_event,
machine);
+ if (err != 0)
+ goto out_delete_session;
+
if (rec->realtime_prio) {
struct sched_param param;
param.sched_priority = rec->realtime_prio;
if (sched_setscheduler(0, SCHED_FIFO, &param)) {
pr_err("Could not set realtime priority.\n");
- exit(-1);
+ err = -1;
+ goto out_delete_session;
}
}
@@ -614,7 +679,10 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
for (;;) {
int hits = rec->samples;
- perf_record__mmap_read_all(rec);
+ if (perf_record__mmap_read_all(rec) < 0) {
+ err = -1;
+ goto out_delete_session;
+ }
if (hits == rec->samples) {
if (done)
@@ -732,6 +800,106 @@ error:
return ret;
}
+#ifndef NO_LIBUNWIND_SUPPORT
+static int get_stack_size(char *str, unsigned long *_size)
+{
+ char *endptr;
+ unsigned long size;
+ unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
+
+ size = strtoul(str, &endptr, 0);
+
+ do {
+ if (*endptr)
+ break;
+
+ size = round_up(size, sizeof(u64));
+ if (!size || size > max_size)
+ break;
+
+ *_size = size;
+ return 0;
+
+ } while (0);
+
+ pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
+ max_size, str);
+ return -1;
+}
+#endif /* !NO_LIBUNWIND_SUPPORT */
+
+static int
+parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg,
+ int unset)
+{
+ struct perf_record *rec = (struct perf_record *)opt->value;
+ char *tok, *name, *saveptr = NULL;
+ char *buf;
+ int ret = -1;
+
+ /* --no-call-graph */
+ if (unset)
+ return 0;
+
+ /* We specified default option if none is provided. */
+ BUG_ON(!arg);
+
+ /* We need buffer that we know we can write to. */
+ buf = malloc(strlen(arg) + 1);
+ if (!buf)
+ return -ENOMEM;
+
+ strcpy(buf, arg);
+
+ tok = strtok_r((char *)buf, ",", &saveptr);
+ name = tok ? : (char *)buf;
+
+ do {
+ /* Framepointer style */
+ if (!strncmp(name, "fp", sizeof("fp"))) {
+ if (!strtok_r(NULL, ",", &saveptr)) {
+ rec->opts.call_graph = CALLCHAIN_FP;
+ ret = 0;
+ } else
+ pr_err("callchain: No more arguments "
+ "needed for -g fp\n");
+ break;
+
+#ifndef NO_LIBUNWIND_SUPPORT
+ /* Dwarf style */
+ } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
+ ret = 0;
+ rec->opts.call_graph = CALLCHAIN_DWARF;
+ rec->opts.stack_dump_size = default_stack_dump_size;
+
+ tok = strtok_r(NULL, ",", &saveptr);
+ if (tok) {
+ unsigned long size = 0;
+
+ ret = get_stack_size(tok, &size);
+ rec->opts.stack_dump_size = size;
+ }
+
+ if (!ret)
+ pr_debug("callchain: stack dump size %d\n",
+ rec->opts.stack_dump_size);
+#endif /* !NO_LIBUNWIND_SUPPORT */
+ } else {
+ pr_err("callchain: Unknown -g option "
+ "value: %s\n", arg);
+ break;
+ }
+
+ } while (0);
+
+ free(buf);
+
+ if (!ret)
+ pr_debug("callchain: type %d\n", rec->opts.call_graph);
+
+ return ret;
+}
+
static const char * const record_usage[] = {
"perf record [<options>] [<command>]",
"perf record [<options>] -- <command> [<options>]",
@@ -803,8 +971,9 @@ const struct option record_options[] = {
"number of mmap data pages"),
OPT_BOOLEAN(0, "group", &record.opts.group,
"put the counters into a counter group"),
- OPT_BOOLEAN('g', "call-graph", &record.opts.call_graph,
- "do call-graph (stack chain/backtrace) recording"),
+ OPT_CALLBACK_DEFAULT('g', "call-graph", &record, "mode[,dump_size]",
+ callchain_help, &parse_callchain_opt,
+ "fp"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
@@ -836,7 +1005,7 @@ const struct option record_options[] = {
OPT_END()
};
-int cmd_record(int argc, const char **argv, const char *prefix __used)
+int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
{
int err = -ENOMEM;
struct perf_evsel *pos;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 7c88a243b5db..1da243dfbc3e 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -69,8 +69,8 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool,
if ((sort__has_parent || symbol_conf.use_callchain)
&& sample->callchain) {
- err = machine__resolve_callchain(machine, al->thread,
- sample->callchain, &parent);
+ err = machine__resolve_callchain(machine, evsel, al->thread,
+ sample, &parent);
if (err)
return err;
}
@@ -93,7 +93,7 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool,
struct annotation *notes;
err = -ENOMEM;
bx = he->branch_info;
- if (bx->from.sym && use_browser > 0) {
+ if (bx->from.sym && use_browser == 1 && sort__has_sym) {
notes = symbol__annotation(bx->from.sym);
if (!notes->src
&& symbol__alloc_hist(bx->from.sym) < 0)
@@ -107,7 +107,7 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool,
goto out;
}
- if (bx->to.sym && use_browser > 0) {
+ if (bx->to.sym && use_browser == 1 && sort__has_sym) {
notes = symbol__annotation(bx->to.sym);
if (!notes->src
&& symbol__alloc_hist(bx->to.sym) < 0)
@@ -140,8 +140,8 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
struct hist_entry *he;
if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) {
- err = machine__resolve_callchain(machine, al->thread,
- sample->callchain, &parent);
+ err = machine__resolve_callchain(machine, evsel, al->thread,
+ sample, &parent);
if (err)
return err;
}
@@ -162,7 +162,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
* so we don't allocated the extra space needed because the stdio
* code will not use it.
*/
- if (he->ms.sym != NULL && use_browser > 0) {
+ if (he->ms.sym != NULL && use_browser == 1 && sort__has_sym) {
struct annotation *notes = symbol__annotation(he->ms.sym);
assert(evsel != NULL);
@@ -223,9 +223,9 @@ static int process_sample_event(struct perf_tool *tool,
static int process_read_event(struct perf_tool *tool,
union perf_event *event,
- struct perf_sample *sample __used,
+ struct perf_sample *sample __maybe_unused,
struct perf_evsel *evsel,
- struct machine *machine __used)
+ struct machine *machine __maybe_unused)
{
struct perf_report *rep = container_of(tool, struct perf_report, tool);
@@ -287,7 +287,7 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
extern volatile int session_done;
-static void sig_handler(int sig __used)
+static void sig_handler(int sig __maybe_unused)
{
session_done = 1;
}
@@ -397,17 +397,17 @@ static int __cmd_report(struct perf_report *rep)
desc);
}
- if (dump_trace) {
- perf_session__fprintf_nr_events(session, stdout);
- goto out_delete;
- }
-
if (verbose > 3)
perf_session__fprintf(session, stdout);
if (verbose > 2)
perf_session__fprintf_dsos(session, stdout);
+ if (dump_trace) {
+ perf_session__fprintf_nr_events(session, stdout);
+ goto out_delete;
+ }
+
nr_samples = 0;
list_for_each_entry(pos, &session->evlist->entries, node) {
struct hists *hists = &pos->hists;
@@ -533,13 +533,14 @@ setup:
}
static int
-parse_branch_mode(const struct option *opt __used, const char *str __used, int unset)
+parse_branch_mode(const struct option *opt __maybe_unused,
+ const char *str __maybe_unused, int unset)
{
sort__branch_mode = !unset;
return 0;
}
-int cmd_report(int argc, const char **argv, const char *prefix __used)
+int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
{
struct perf_session *session;
struct stat st;
@@ -638,6 +639,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
"Show a column with the sum of periods"),
OPT_CALLBACK_NOOPT('b', "branch-stack", &sort__branch_mode, "",
"use branch records for histogram filling", parse_branch_mode),
+ OPT_STRING(0, "objdump", &objdump_path, "path",
+ "objdump binary to use for disassembly and annotations"),
OPT_END()
};
@@ -686,15 +689,19 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
if (strcmp(report.input_name, "-") != 0)
setup_browser(true);
- else
+ else {
use_browser = 0;
+ perf_hpp__init(false, false);
+ }
+
+ setup_sorting(report_usage, options);
/*
* Only in the newt browser we are doing integrated annotation,
* so don't allocate extra space that won't be used in the stdio
* implementation.
*/
- if (use_browser > 0) {
+ if (use_browser == 1 && sort__has_sym) {
symbol_conf.priv_size = sizeof(struct annotation);
report.annotate_init = symbol__annotate_init;
/*
@@ -717,8 +724,6 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
if (symbol__init() < 0)
goto error;
- setup_sorting(report_usage, options);
-
if (parent_pattern != default_parent_pattern) {
if (sort_dimension__add("parent") < 0)
goto error;
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 7a9ad2b1ee76..9b9e32eaa805 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -23,31 +23,12 @@
#include <pthread.h>
#include <math.h>
-static const char *input_name;
-
-static char default_sort_order[] = "avg, max, switch, runtime";
-static const char *sort_order = default_sort_order;
-
-static int profile_cpu = -1;
-
#define PR_SET_NAME 15 /* Set process name */
#define MAX_CPUS 4096
-
-static u64 run_measurement_overhead;
-static u64 sleep_measurement_overhead;
-
#define COMM_LEN 20
#define SYM_LEN 129
-
#define MAX_PID 65536
-static unsigned long nr_tasks;
-
-struct perf_sched {
- struct perf_tool tool;
- struct perf_session *session;
-};
-
struct sched_atom;
struct task_desc {
@@ -85,44 +66,6 @@ struct sched_atom {
struct task_desc *wakee;
};
-static struct task_desc *pid_to_task[MAX_PID];
-
-static struct task_desc **tasks;
-
-static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
-static u64 start_time;
-
-static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-static unsigned long nr_run_events;
-static unsigned long nr_sleep_events;
-static unsigned long nr_wakeup_events;
-
-static unsigned long nr_sleep_corrections;
-static unsigned long nr_run_events_optimized;
-
-static unsigned long targetless_wakeups;
-static unsigned long multitarget_wakeups;
-
-static u64 cpu_usage;
-static u64 runavg_cpu_usage;
-static u64 parent_cpu_usage;
-static u64 runavg_parent_cpu_usage;
-
-static unsigned long nr_runs;
-static u64 sum_runtime;
-static u64 sum_fluct;
-static u64 run_avg;
-
-static unsigned int replay_repeat = 10;
-static unsigned long nr_timestamps;
-static unsigned long nr_unordered_timestamps;
-static unsigned long nr_state_machine_bugs;
-static unsigned long nr_context_switch_bugs;
-static unsigned long nr_events;
-static unsigned long nr_lost_chunks;
-static unsigned long nr_lost_events;
-
#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
enum thread_state {
@@ -154,11 +97,79 @@ struct work_atoms {
typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
-static struct rb_root atom_root, sorted_atom_root;
+struct perf_sched;
+
+struct trace_sched_handler {
+ int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel,
+ struct perf_sample *sample, struct machine *machine);
-static u64 all_runtime;
-static u64 all_count;
+ int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel,
+ struct perf_sample *sample, struct machine *machine);
+ int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel,
+ struct perf_sample *sample, struct machine *machine);
+
+ int (*fork_event)(struct perf_sched *sched, struct perf_evsel *evsel,
+ struct perf_sample *sample);
+
+ int (*migrate_task_event)(struct perf_sched *sched,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine);
+};
+
+struct perf_sched {
+ struct perf_tool tool;
+ const char *input_name;
+ const char *sort_order;
+ unsigned long nr_tasks;
+ struct task_desc *pid_to_task[MAX_PID];
+ struct task_desc **tasks;
+ const struct trace_sched_handler *tp_handler;
+ pthread_mutex_t start_work_mutex;
+ pthread_mutex_t work_done_wait_mutex;
+ int profile_cpu;
+/*
+ * Track the current task - that way we can know whether there's any
+ * weird events, such as a task being switched away that is not current.
+ */
+ int max_cpu;
+ u32 curr_pid[MAX_CPUS];
+ struct thread *curr_thread[MAX_CPUS];
+ char next_shortname1;
+ char next_shortname2;
+ unsigned int replay_repeat;
+ unsigned long nr_run_events;
+ unsigned long nr_sleep_events;
+ unsigned long nr_wakeup_events;
+ unsigned long nr_sleep_corrections;
+ unsigned long nr_run_events_optimized;
+ unsigned long targetless_wakeups;
+ unsigned long multitarget_wakeups;
+ unsigned long nr_runs;
+ unsigned long nr_timestamps;
+ unsigned long nr_unordered_timestamps;
+ unsigned long nr_state_machine_bugs;
+ unsigned long nr_context_switch_bugs;
+ unsigned long nr_events;
+ unsigned long nr_lost_chunks;
+ unsigned long nr_lost_events;
+ u64 run_measurement_overhead;
+ u64 sleep_measurement_overhead;
+ u64 start_time;
+ u64 cpu_usage;
+ u64 runavg_cpu_usage;
+ u64 parent_cpu_usage;
+ u64 runavg_parent_cpu_usage;
+ u64 sum_runtime;
+ u64 sum_fluct;
+ u64 run_avg;
+ u64 all_runtime;
+ u64 all_count;
+ u64 cpu_last_switched[MAX_CPUS];
+ struct rb_root atom_root, sorted_atom_root;
+ struct list_head sort_list, cmp_pid;
+};
static u64 get_nsecs(void)
{
@@ -169,13 +180,13 @@ static u64 get_nsecs(void)
return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
}
-static void burn_nsecs(u64 nsecs)
+static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
{
u64 T0 = get_nsecs(), T1;
do {
T1 = get_nsecs();
- } while (T1 + run_measurement_overhead < T0 + nsecs);
+ } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
}
static void sleep_nsecs(u64 nsecs)
@@ -188,24 +199,24 @@ static void sleep_nsecs(u64 nsecs)
nanosleep(&ts, NULL);
}
-static void calibrate_run_measurement_overhead(void)
+static void calibrate_run_measurement_overhead(struct perf_sched *sched)
{
u64 T0, T1, delta, min_delta = 1000000000ULL;
int i;
for (i = 0; i < 10; i++) {
T0 = get_nsecs();
- burn_nsecs(0);
+ burn_nsecs(sched, 0);
T1 = get_nsecs();
delta = T1-T0;
min_delta = min(min_delta, delta);
}
- run_measurement_overhead = min_delta;
+ sched->run_measurement_overhead = min_delta;
printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
}
-static void calibrate_sleep_measurement_overhead(void)
+static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
{
u64 T0, T1, delta, min_delta = 1000000000ULL;
int i;
@@ -218,7 +229,7 @@ static void calibrate_sleep_measurement_overhead(void)
min_delta = min(min_delta, delta);
}
min_delta -= 10000;
- sleep_measurement_overhead = min_delta;
+ sched->sleep_measurement_overhead = min_delta;
printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
}
@@ -251,8 +262,8 @@ static struct sched_atom *last_event(struct task_desc *task)
return task->atoms[task->nr_events - 1];
}
-static void
-add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
+static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
+ u64 timestamp, u64 duration)
{
struct sched_atom *event, *curr_event = last_event(task);
@@ -261,7 +272,7 @@ add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
* to it:
*/
if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
- nr_run_events_optimized++;
+ sched->nr_run_events_optimized++;
curr_event->duration += duration;
return;
}
@@ -271,12 +282,11 @@ add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
event->type = SCHED_EVENT_RUN;
event->duration = duration;
- nr_run_events++;
+ sched->nr_run_events++;
}
-static void
-add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
- struct task_desc *wakee)
+static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
+ u64 timestamp, struct task_desc *wakee)
{
struct sched_atom *event, *wakee_event;
@@ -286,11 +296,11 @@ add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
wakee_event = last_event(wakee);
if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
- targetless_wakeups++;
+ sched->targetless_wakeups++;
return;
}
if (wakee_event->wait_sem) {
- multitarget_wakeups++;
+ sched->multitarget_wakeups++;
return;
}
@@ -299,89 +309,89 @@ add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
wakee_event->specific_wait = 1;
event->wait_sem = wakee_event->wait_sem;
- nr_wakeup_events++;
+ sched->nr_wakeup_events++;
}
-static void
-add_sched_event_sleep(struct task_desc *task, u64 timestamp,
- u64 task_state __used)
+static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
+ u64 timestamp, u64 task_state __maybe_unused)
{
struct sched_atom *event = get_new_event(task, timestamp);
event->type = SCHED_EVENT_SLEEP;
- nr_sleep_events++;
+ sched->nr_sleep_events++;
}
-static struct task_desc *register_pid(unsigned long pid, const char *comm)
+static struct task_desc *register_pid(struct perf_sched *sched,
+ unsigned long pid, const char *comm)
{
struct task_desc *task;
BUG_ON(pid >= MAX_PID);
- task = pid_to_task[pid];
+ task = sched->pid_to_task[pid];
if (task)
return task;
task = zalloc(sizeof(*task));
task->pid = pid;
- task->nr = nr_tasks;
+ task->nr = sched->nr_tasks;
strcpy(task->comm, comm);
/*
* every task starts in sleeping state - this gets ignored
* if there's no wakeup pointing to this sleep state:
*/
- add_sched_event_sleep(task, 0, 0);
+ add_sched_event_sleep(sched, task, 0, 0);
- pid_to_task[pid] = task;
- nr_tasks++;
- tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
- BUG_ON(!tasks);
- tasks[task->nr] = task;
+ sched->pid_to_task[pid] = task;
+ sched->nr_tasks++;
+ sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_task *));
+ BUG_ON(!sched->tasks);
+ sched->tasks[task->nr] = task;
if (verbose)
- printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
+ printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
return task;
}
-static void print_task_traces(void)
+static void print_task_traces(struct perf_sched *sched)
{
struct task_desc *task;
unsigned long i;
- for (i = 0; i < nr_tasks; i++) {
- task = tasks[i];
+ for (i = 0; i < sched->nr_tasks; i++) {
+ task = sched->tasks[i];
printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
task->nr, task->comm, task->pid, task->nr_events);
}
}
-static void add_cross_task_wakeups(void)
+static void add_cross_task_wakeups(struct perf_sched *sched)
{
struct task_desc *task1, *task2;
unsigned long i, j;
- for (i = 0; i < nr_tasks; i++) {
- task1 = tasks[i];
+ for (i = 0; i < sched->nr_tasks; i++) {
+ task1 = sched->tasks[i];
j = i + 1;
- if (j == nr_tasks)
+ if (j == sched->nr_tasks)
j = 0;
- task2 = tasks[j];
- add_sched_event_wakeup(task1, 0, task2);
+ task2 = sched->tasks[j];
+ add_sched_event_wakeup(sched, task1, 0, task2);
}
}
-static void
-process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
+static void perf_sched__process_event(struct perf_sched *sched,
+ struct sched_atom *atom)
{
int ret = 0;
switch (atom->type) {
case SCHED_EVENT_RUN:
- burn_nsecs(atom->duration);
+ burn_nsecs(sched, atom->duration);
break;
case SCHED_EVENT_SLEEP:
if (atom->wait_sem)
@@ -428,8 +438,8 @@ static int self_open_counters(void)
fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
if (fd < 0)
- die("Error: sys_perf_event_open() syscall returned"
- "with %d (%s)\n", fd, strerror(errno));
+ pr_err("Error: sys_perf_event_open() syscall returned "
+ "with %d (%s)\n", fd, strerror(errno));
return fd;
}
@@ -444,31 +454,41 @@ static u64 get_cpu_usage_nsec_self(int fd)
return runtime;
}
+struct sched_thread_parms {
+ struct task_desc *task;
+ struct perf_sched *sched;
+};
+
static void *thread_func(void *ctx)
{
- struct task_desc *this_task = ctx;
+ struct sched_thread_parms *parms = ctx;
+ struct task_desc *this_task = parms->task;
+ struct perf_sched *sched = parms->sched;
u64 cpu_usage_0, cpu_usage_1;
unsigned long i, ret;
char comm2[22];
int fd;
+ free(parms);
+
sprintf(comm2, ":%s", this_task->comm);
prctl(PR_SET_NAME, comm2);
fd = self_open_counters();
-
+ if (fd < 0)
+ return NULL;
again:
ret = sem_post(&this_task->ready_for_work);
BUG_ON(ret);
- ret = pthread_mutex_lock(&start_work_mutex);
+ ret = pthread_mutex_lock(&sched->start_work_mutex);
BUG_ON(ret);
- ret = pthread_mutex_unlock(&start_work_mutex);
+ ret = pthread_mutex_unlock(&sched->start_work_mutex);
BUG_ON(ret);
cpu_usage_0 = get_cpu_usage_nsec_self(fd);
for (i = 0; i < this_task->nr_events; i++) {
this_task->curr_event = i;
- process_sched_event(this_task, this_task->atoms[i]);
+ perf_sched__process_event(sched, this_task->atoms[i]);
}
cpu_usage_1 = get_cpu_usage_nsec_self(fd);
@@ -476,15 +496,15 @@ again:
ret = sem_post(&this_task->work_done_sem);
BUG_ON(ret);
- ret = pthread_mutex_lock(&work_done_wait_mutex);
+ ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
BUG_ON(ret);
- ret = pthread_mutex_unlock(&work_done_wait_mutex);
+ ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
BUG_ON(ret);
goto again;
}
-static void create_tasks(void)
+static void create_tasks(struct perf_sched *sched)
{
struct task_desc *task;
pthread_attr_t attr;
@@ -496,128 +516,129 @@ static void create_tasks(void)
err = pthread_attr_setstacksize(&attr,
(size_t) max(16 * 1024, PTHREAD_STACK_MIN));
BUG_ON(err);
- err = pthread_mutex_lock(&start_work_mutex);
+ err = pthread_mutex_lock(&sched->start_work_mutex);
BUG_ON(err);
- err = pthread_mutex_lock(&work_done_wait_mutex);
+ err = pthread_mutex_lock(&sched->work_done_wait_mutex);
BUG_ON(err);
- for (i = 0; i < nr_tasks; i++) {
- task = tasks[i];
+ for (i = 0; i < sched->nr_tasks; i++) {
+ struct sched_thread_parms *parms = malloc(sizeof(*parms));
+ BUG_ON(parms == NULL);
+ parms->task = task = sched->tasks[i];
+ parms->sched = sched;
sem_init(&task->sleep_sem, 0, 0);
sem_init(&task->ready_for_work, 0, 0);
sem_init(&task->work_done_sem, 0, 0);
task->curr_event = 0;
- err = pthread_create(&task->thread, &attr, thread_func, task);
+ err = pthread_create(&task->thread, &attr, thread_func, parms);
BUG_ON(err);
}
}
-static void wait_for_tasks(void)
+static void wait_for_tasks(struct perf_sched *sched)
{
u64 cpu_usage_0, cpu_usage_1;
struct task_desc *task;
unsigned long i, ret;
- start_time = get_nsecs();
- cpu_usage = 0;
- pthread_mutex_unlock(&work_done_wait_mutex);
+ sched->start_time = get_nsecs();
+ sched->cpu_usage = 0;
+ pthread_mutex_unlock(&sched->work_done_wait_mutex);
- for (i = 0; i < nr_tasks; i++) {
- task = tasks[i];
+ for (i = 0; i < sched->nr_tasks; i++) {
+ task = sched->tasks[i];
ret = sem_wait(&task->ready_for_work);
BUG_ON(ret);
sem_init(&task->ready_for_work, 0, 0);
}
- ret = pthread_mutex_lock(&work_done_wait_mutex);
+ ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
BUG_ON(ret);
cpu_usage_0 = get_cpu_usage_nsec_parent();
- pthread_mutex_unlock(&start_work_mutex);
+ pthread_mutex_unlock(&sched->start_work_mutex);
- for (i = 0; i < nr_tasks; i++) {
- task = tasks[i];
+ for (i = 0; i < sched->nr_tasks; i++) {
+ task = sched->tasks[i];
ret = sem_wait(&task->work_done_sem);
BUG_ON(ret);
sem_init(&task->work_done_sem, 0, 0);
- cpu_usage += task->cpu_usage;
+ sched->cpu_usage += task->cpu_usage;
task->cpu_usage = 0;
}
cpu_usage_1 = get_cpu_usage_nsec_parent();
- if (!runavg_cpu_usage)
- runavg_cpu_usage = cpu_usage;
- runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
+ if (!sched->runavg_cpu_usage)
+ sched->runavg_cpu_usage = sched->cpu_usage;
+ sched->runavg_cpu_usage = (sched->runavg_cpu_usage * 9 + sched->cpu_usage) / 10;
- parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
- if (!runavg_parent_cpu_usage)
- runavg_parent_cpu_usage = parent_cpu_usage;
- runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
- parent_cpu_usage)/10;
+ sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
+ if (!sched->runavg_parent_cpu_usage)
+ sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
+ sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * 9 +
+ sched->parent_cpu_usage)/10;
- ret = pthread_mutex_lock(&start_work_mutex);
+ ret = pthread_mutex_lock(&sched->start_work_mutex);
BUG_ON(ret);
- for (i = 0; i < nr_tasks; i++) {
- task = tasks[i];
+ for (i = 0; i < sched->nr_tasks; i++) {
+ task = sched->tasks[i];
sem_init(&task->sleep_sem, 0, 0);
task->curr_event = 0;
}
}
-static void run_one_test(void)
+static void run_one_test(struct perf_sched *sched)
{
u64 T0, T1, delta, avg_delta, fluct;
T0 = get_nsecs();
- wait_for_tasks();
+ wait_for_tasks(sched);
T1 = get_nsecs();
delta = T1 - T0;
- sum_runtime += delta;
- nr_runs++;
+ sched->sum_runtime += delta;
+ sched->nr_runs++;
- avg_delta = sum_runtime / nr_runs;
+ avg_delta = sched->sum_runtime / sched->nr_runs;
if (delta < avg_delta)
fluct = avg_delta - delta;
else
fluct = delta - avg_delta;
- sum_fluct += fluct;
- if (!run_avg)
- run_avg = delta;
- run_avg = (run_avg*9 + delta)/10;
+ sched->sum_fluct += fluct;
+ if (!sched->run_avg)
+ sched->run_avg = delta;
+ sched->run_avg = (sched->run_avg * 9 + delta) / 10;
- printf("#%-3ld: %0.3f, ",
- nr_runs, (double)delta/1000000.0);
+ printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / 1000000.0);
- printf("ravg: %0.2f, ",
- (double)run_avg/1e6);
+ printf("ravg: %0.2f, ", (double)sched->run_avg / 1e6);
printf("cpu: %0.2f / %0.2f",
- (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
+ (double)sched->cpu_usage / 1e6, (double)sched->runavg_cpu_usage / 1e6);
#if 0
/*
* rusage statistics done by the parent, these are less
- * accurate than the sum_exec_runtime based statistics:
+ * accurate than the sched->sum_exec_runtime based statistics:
*/
printf(" [%0.2f / %0.2f]",
- (double)parent_cpu_usage/1e6,
- (double)runavg_parent_cpu_usage/1e6);
+ (double)sched->parent_cpu_usage/1e6,
+ (double)sched->runavg_parent_cpu_usage/1e6);
#endif
printf("\n");
- if (nr_sleep_corrections)
- printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
- nr_sleep_corrections = 0;
+ if (sched->nr_sleep_corrections)
+ printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
+ sched->nr_sleep_corrections = 0;
}
-static void test_calibrations(void)
+static void test_calibrations(struct perf_sched *sched)
{
u64 T0, T1;
T0 = get_nsecs();
- burn_nsecs(1e6);
+ burn_nsecs(sched, 1e6);
T1 = get_nsecs();
printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
@@ -629,236 +650,92 @@ static void test_calibrations(void)
printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
}
-#define FILL_FIELD(ptr, field, event, data) \
- ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
-
-#define FILL_ARRAY(ptr, array, event, data) \
-do { \
- void *__array = raw_field_ptr(event, #array, data); \
- memcpy(ptr.array, __array, sizeof(ptr.array)); \
-} while(0)
-
-#define FILL_COMMON_FIELDS(ptr, event, data) \
-do { \
- FILL_FIELD(ptr, common_type, event, data); \
- FILL_FIELD(ptr, common_flags, event, data); \
- FILL_FIELD(ptr, common_preempt_count, event, data); \
- FILL_FIELD(ptr, common_pid, event, data); \
- FILL_FIELD(ptr, common_tgid, event, data); \
-} while (0)
-
-
-
-struct trace_switch_event {
- u32 size;
-
- u16 common_type;
- u8 common_flags;
- u8 common_preempt_count;
- u32 common_pid;
- u32 common_tgid;
-
- char prev_comm[16];
- u32 prev_pid;
- u32 prev_prio;
- u64 prev_state;
- char next_comm[16];
- u32 next_pid;
- u32 next_prio;
-};
-
-struct trace_runtime_event {
- u32 size;
-
- u16 common_type;
- u8 common_flags;
- u8 common_preempt_count;
- u32 common_pid;
- u32 common_tgid;
-
- char comm[16];
- u32 pid;
- u64 runtime;
- u64 vruntime;
-};
-
-struct trace_wakeup_event {
- u32 size;
-
- u16 common_type;
- u8 common_flags;
- u8 common_preempt_count;
- u32 common_pid;
- u32 common_tgid;
-
- char comm[16];
- u32 pid;
-
- u32 prio;
- u32 success;
- u32 cpu;
-};
-
-struct trace_fork_event {
- u32 size;
-
- u16 common_type;
- u8 common_flags;
- u8 common_preempt_count;
- u32 common_pid;
- u32 common_tgid;
-
- char parent_comm[16];
- u32 parent_pid;
- char child_comm[16];
- u32 child_pid;
-};
-
-struct trace_migrate_task_event {
- u32 size;
-
- u16 common_type;
- u8 common_flags;
- u8 common_preempt_count;
- u32 common_pid;
- u32 common_tgid;
-
- char comm[16];
- u32 pid;
-
- u32 prio;
- u32 cpu;
-};
-
-struct trace_sched_handler {
- void (*switch_event)(struct trace_switch_event *,
- struct machine *,
- struct event_format *,
- int cpu,
- u64 timestamp,
- struct thread *thread);
-
- void (*runtime_event)(struct trace_runtime_event *,
- struct machine *,
- struct event_format *,
- int cpu,
- u64 timestamp,
- struct thread *thread);
-
- void (*wakeup_event)(struct trace_wakeup_event *,
- struct machine *,
- struct event_format *,
- int cpu,
- u64 timestamp,
- struct thread *thread);
-
- void (*fork_event)(struct trace_fork_event *,
- struct event_format *,
- int cpu,
- u64 timestamp,
- struct thread *thread);
-
- void (*migrate_task_event)(struct trace_migrate_task_event *,
- struct machine *machine,
- struct event_format *,
- int cpu,
- u64 timestamp,
- struct thread *thread);
-};
-
-
-static void
-replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
- struct machine *machine __used,
- struct event_format *event,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+static int
+replay_wakeup_event(struct perf_sched *sched,
+ struct perf_evsel *evsel, struct perf_sample *sample,
+ struct machine *machine __maybe_unused)
{
+ const char *comm = perf_evsel__strval(evsel, sample, "comm");
+ const u32 pid = perf_evsel__intval(evsel, sample, "pid");
struct task_desc *waker, *wakee;
if (verbose) {
- printf("sched_wakeup event %p\n", event);
+ printf("sched_wakeup event %p\n", evsel);
- printf(" ... pid %d woke up %s/%d\n",
- wakeup_event->common_pid,
- wakeup_event->comm,
- wakeup_event->pid);
+ printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
}
- waker = register_pid(wakeup_event->common_pid, "<unknown>");
- wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
+ waker = register_pid(sched, sample->tid, "<unknown>");
+ wakee = register_pid(sched, pid, comm);
- add_sched_event_wakeup(waker, timestamp, wakee);
+ add_sched_event_wakeup(sched, waker, sample->time, wakee);
+ return 0;
}
-static u64 cpu_last_switched[MAX_CPUS];
-
-static void
-replay_switch_event(struct trace_switch_event *switch_event,
- struct machine *machine __used,
- struct event_format *event,
- int cpu,
- u64 timestamp,
- struct thread *thread __used)
+static int replay_switch_event(struct perf_sched *sched,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine __maybe_unused)
{
- struct task_desc *prev, __used *next;
- u64 timestamp0;
+ const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"),
+ *next_comm = perf_evsel__strval(evsel, sample, "next_comm");
+ const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
+ next_pid = perf_evsel__intval(evsel, sample, "next_pid");
+ const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
+ struct task_desc *prev, __maybe_unused *next;
+ u64 timestamp0, timestamp = sample->time;
+ int cpu = sample->cpu;
s64 delta;
if (verbose)
- printf("sched_switch event %p\n", event);
+ printf("sched_switch event %p\n", evsel);
if (cpu >= MAX_CPUS || cpu < 0)
- return;
+ return 0;
- timestamp0 = cpu_last_switched[cpu];
+ timestamp0 = sched->cpu_last_switched[cpu];
if (timestamp0)
delta = timestamp - timestamp0;
else
delta = 0;
- if (delta < 0)
- die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
-
- if (verbose) {
- printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
- switch_event->prev_comm, switch_event->prev_pid,
- switch_event->next_comm, switch_event->next_pid,
- delta);
+ if (delta < 0) {
+ pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
+ return -1;
}
- prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
- next = register_pid(switch_event->next_pid, switch_event->next_comm);
+ pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
+ prev_comm, prev_pid, next_comm, next_pid, delta);
- cpu_last_switched[cpu] = timestamp;
+ prev = register_pid(sched, prev_pid, prev_comm);
+ next = register_pid(sched, next_pid, next_comm);
- add_sched_event_run(prev, timestamp, delta);
- add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
-}
+ sched->cpu_last_switched[cpu] = timestamp;
+ add_sched_event_run(sched, prev, timestamp, delta);
+ add_sched_event_sleep(sched, prev, timestamp, prev_state);
-static void
-replay_fork_event(struct trace_fork_event *fork_event,
- struct event_format *event,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+ return 0;
+}
+
+static int replay_fork_event(struct perf_sched *sched, struct perf_evsel *evsel,
+ struct perf_sample *sample)
{
+ const char *parent_comm = perf_evsel__strval(evsel, sample, "parent_comm"),
+ *child_comm = perf_evsel__strval(evsel, sample, "child_comm");
+ const u32 parent_pid = perf_evsel__intval(evsel, sample, "parent_pid"),
+ child_pid = perf_evsel__intval(evsel, sample, "child_pid");
+
if (verbose) {
- printf("sched_fork event %p\n", event);
- printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
- printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
+ printf("sched_fork event %p\n", evsel);
+ printf("... parent: %s/%d\n", parent_comm, parent_pid);
+ printf("... child: %s/%d\n", child_comm, child_pid);
}
- register_pid(fork_event->parent_pid, fork_event->parent_comm);
- register_pid(fork_event->child_pid, fork_event->child_comm);
-}
-static struct trace_sched_handler replay_ops = {
- .wakeup_event = replay_wakeup_event,
- .switch_event = replay_switch_event,
- .fork_event = replay_fork_event,
-};
+ register_pid(sched, parent_pid, parent_comm);
+ register_pid(sched, child_pid, child_comm);
+ return 0;
+}
struct sort_dimension {
const char *name;
@@ -866,8 +743,6 @@ struct sort_dimension {
struct list_head list;
};
-static LIST_HEAD(cmp_pid);
-
static int
thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
{
@@ -936,43 +811,45 @@ __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
rb_insert_color(&data->node, root);
}
-static void thread_atoms_insert(struct thread *thread)
+static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
{
struct work_atoms *atoms = zalloc(sizeof(*atoms));
- if (!atoms)
- die("No memory");
+ if (!atoms) {
+ pr_err("No memory at %s\n", __func__);
+ return -1;
+ }
atoms->thread = thread;
INIT_LIST_HEAD(&atoms->work_list);
- __thread_latency_insert(&atom_root, atoms, &cmp_pid);
+ __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
+ return 0;
}
-static void
-latency_fork_event(struct trace_fork_event *fork_event __used,
- struct event_format *event __used,
- int cpu __used,
- u64 timestamp __used,
- struct thread *thread __used)
+static int latency_fork_event(struct perf_sched *sched __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused,
+ struct perf_sample *sample __maybe_unused)
{
/* should insert the newcomer */
+ return 0;
}
-__used
-static char sched_out_state(struct trace_switch_event *switch_event)
+static char sched_out_state(u64 prev_state)
{
const char *str = TASK_STATE_TO_CHAR_STR;
- return str[switch_event->prev_state];
+ return str[prev_state];
}
-static void
+static int
add_sched_out_event(struct work_atoms *atoms,
char run_state,
u64 timestamp)
{
struct work_atom *atom = zalloc(sizeof(*atom));
- if (!atom)
- die("Non memory");
+ if (!atom) {
+ pr_err("Non memory at %s", __func__);
+ return -1;
+ }
atom->sched_out_time = timestamp;
@@ -982,10 +859,12 @@ add_sched_out_event(struct work_atoms *atoms,
}
list_add_tail(&atom->list, &atoms->work_list);
+ return 0;
}
static void
-add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
+add_runtime_event(struct work_atoms *atoms, u64 delta,
+ u64 timestamp __maybe_unused)
{
struct work_atom *atom;
@@ -1028,106 +907,128 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
atoms->nb_atoms++;
}
-static void
-latency_switch_event(struct trace_switch_event *switch_event,
- struct machine *machine,
- struct event_format *event __used,
- int cpu,
- u64 timestamp,
- struct thread *thread __used)
+static int latency_switch_event(struct perf_sched *sched,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
{
+ const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
+ next_pid = perf_evsel__intval(evsel, sample, "next_pid");
+ const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
struct work_atoms *out_events, *in_events;
struct thread *sched_out, *sched_in;
- u64 timestamp0;
+ u64 timestamp0, timestamp = sample->time;
+ int cpu = sample->cpu;
s64 delta;
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
- timestamp0 = cpu_last_switched[cpu];
- cpu_last_switched[cpu] = timestamp;
+ timestamp0 = sched->cpu_last_switched[cpu];
+ sched->cpu_last_switched[cpu] = timestamp;
if (timestamp0)
delta = timestamp - timestamp0;
else
delta = 0;
- if (delta < 0)
- die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
-
+ if (delta < 0) {
+ pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
+ return -1;
+ }
- sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
- sched_in = machine__findnew_thread(machine, switch_event->next_pid);
+ sched_out = machine__findnew_thread(machine, prev_pid);
+ sched_in = machine__findnew_thread(machine, next_pid);
- out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
+ out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
if (!out_events) {
- thread_atoms_insert(sched_out);
- out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
- if (!out_events)
- die("out-event: Internal tree error");
+ if (thread_atoms_insert(sched, sched_out))
+ return -1;
+ out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
+ if (!out_events) {
+ pr_err("out-event: Internal tree error");
+ return -1;
+ }
}
- add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
+ if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
+ return -1;
- in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
+ in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
if (!in_events) {
- thread_atoms_insert(sched_in);
- in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
- if (!in_events)
- die("in-event: Internal tree error");
+ if (thread_atoms_insert(sched, sched_in))
+ return -1;
+ in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
+ if (!in_events) {
+ pr_err("in-event: Internal tree error");
+ return -1;
+ }
/*
* Take came in we have not heard about yet,
* add in an initial atom in runnable state:
*/
- add_sched_out_event(in_events, 'R', timestamp);
+ if (add_sched_out_event(in_events, 'R', timestamp))
+ return -1;
}
add_sched_in_event(in_events, timestamp);
+
+ return 0;
}
-static void
-latency_runtime_event(struct trace_runtime_event *runtime_event,
- struct machine *machine,
- struct event_format *event __used,
- int cpu,
- u64 timestamp,
- struct thread *this_thread __used)
+static int latency_runtime_event(struct perf_sched *sched,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
{
- struct thread *thread = machine__findnew_thread(machine, runtime_event->pid);
- struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
+ const u32 pid = perf_evsel__intval(evsel, sample, "pid");
+ const u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
+ struct thread *thread = machine__findnew_thread(machine, pid);
+ struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
+ u64 timestamp = sample->time;
+ int cpu = sample->cpu;
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
if (!atoms) {
- thread_atoms_insert(thread);
- atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
- if (!atoms)
- die("in-event: Internal tree error");
- add_sched_out_event(atoms, 'R', timestamp);
+ if (thread_atoms_insert(sched, thread))
+ return -1;
+ atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
+ if (!atoms) {
+ pr_err("in-event: Internal tree error");
+ return -1;
+ }
+ if (add_sched_out_event(atoms, 'R', timestamp))
+ return -1;
}
- add_runtime_event(atoms, runtime_event->runtime, timestamp);
+ add_runtime_event(atoms, runtime, timestamp);
+ return 0;
}
-static void
-latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
- struct machine *machine,
- struct event_format *__event __used,
- int cpu __used,
- u64 timestamp,
- struct thread *thread __used)
+static int latency_wakeup_event(struct perf_sched *sched,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
{
+ const u32 pid = perf_evsel__intval(evsel, sample, "pid"),
+ success = perf_evsel__intval(evsel, sample, "success");
struct work_atoms *atoms;
struct work_atom *atom;
struct thread *wakee;
+ u64 timestamp = sample->time;
/* Note for later, it may be interesting to observe the failing cases */
- if (!wakeup_event->success)
- return;
+ if (!success)
+ return 0;
- wakee = machine__findnew_thread(machine, wakeup_event->pid);
- atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
+ wakee = machine__findnew_thread(machine, pid);
+ atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
if (!atoms) {
- thread_atoms_insert(wakee);
- atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
- if (!atoms)
- die("wakeup-event: Internal tree error");
- add_sched_out_event(atoms, 'S', timestamp);
+ if (thread_atoms_insert(sched, wakee))
+ return -1;
+ atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
+ if (!atoms) {
+ pr_err("wakeup-event: Internal tree error");
+ return -1;
+ }
+ if (add_sched_out_event(atoms, 'S', timestamp))
+ return -1;
}
BUG_ON(list_empty(&atoms->work_list));
@@ -1139,27 +1040,27 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
* one CPU, or are only looking at only one, so don't
* make useless noise.
*/
- if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
- nr_state_machine_bugs++;
+ if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
+ sched->nr_state_machine_bugs++;
- nr_timestamps++;
+ sched->nr_timestamps++;
if (atom->sched_out_time > timestamp) {
- nr_unordered_timestamps++;
- return;
+ sched->nr_unordered_timestamps++;
+ return 0;
}
atom->state = THREAD_WAIT_CPU;
atom->wake_up_time = timestamp;
+ return 0;
}
-static void
-latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
- struct machine *machine,
- struct event_format *__event __used,
- int cpu __used,
- u64 timestamp,
- struct thread *thread __used)
+static int latency_migrate_task_event(struct perf_sched *sched,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
{
+ const u32 pid = perf_evsel__intval(evsel, sample, "pid");
+ u64 timestamp = sample->time;
struct work_atoms *atoms;
struct work_atom *atom;
struct thread *migrant;
@@ -1167,18 +1068,22 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
/*
* Only need to worry about migration when profiling one CPU.
*/
- if (profile_cpu == -1)
- return;
+ if (sched->profile_cpu == -1)
+ return 0;
- migrant = machine__findnew_thread(machine, migrate_task_event->pid);
- atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
+ migrant = machine__findnew_thread(machine, pid);
+ atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
if (!atoms) {
- thread_atoms_insert(migrant);
- register_pid(migrant->pid, migrant->comm);
- atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
- if (!atoms)
- die("migration-event: Internal tree error");
- add_sched_out_event(atoms, 'R', timestamp);
+ if (thread_atoms_insert(sched, migrant))
+ return -1;
+ register_pid(sched, migrant->pid, migrant->comm);
+ atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
+ if (!atoms) {
+ pr_err("migration-event: Internal tree error");
+ return -1;
+ }
+ if (add_sched_out_event(atoms, 'R', timestamp))
+ return -1;
}
BUG_ON(list_empty(&atoms->work_list));
@@ -1186,21 +1091,15 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
atom = list_entry(atoms->work_list.prev, struct work_atom, list);
atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
- nr_timestamps++;
+ sched->nr_timestamps++;
if (atom->sched_out_time > timestamp)
- nr_unordered_timestamps++;
-}
+ sched->nr_unordered_timestamps++;
-static struct trace_sched_handler lat_ops = {
- .wakeup_event = latency_wakeup_event,
- .switch_event = latency_switch_event,
- .runtime_event = latency_runtime_event,
- .fork_event = latency_fork_event,
- .migrate_task_event = latency_migrate_task_event,
-};
+ return 0;
+}
-static void output_lat_thread(struct work_atoms *work_list)
+static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
{
int i;
int ret;
@@ -1214,8 +1113,8 @@ static void output_lat_thread(struct work_atoms *work_list)
if (!strcmp(work_list->thread->comm, "swapper"))
return;
- all_runtime += work_list->total_runtime;
- all_count += work_list->nb_atoms;
+ sched->all_runtime += work_list->total_runtime;
+ sched->all_count += work_list->nb_atoms;
ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid);
@@ -1241,11 +1140,6 @@ static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
return 0;
}
-static struct sort_dimension pid_sort_dimension = {
- .name = "pid",
- .cmp = pid_cmp,
-};
-
static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
{
u64 avgl, avgr;
@@ -1267,11 +1161,6 @@ static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
return 0;
}
-static struct sort_dimension avg_sort_dimension = {
- .name = "avg",
- .cmp = avg_cmp,
-};
-
static int max_cmp(struct work_atoms *l, struct work_atoms *r)
{
if (l->max_lat < r->max_lat)
@@ -1282,11 +1171,6 @@ static int max_cmp(struct work_atoms *l, struct work_atoms *r)
return 0;
}
-static struct sort_dimension max_sort_dimension = {
- .name = "max",
- .cmp = max_cmp,
-};
-
static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
{
if (l->nb_atoms < r->nb_atoms)
@@ -1297,11 +1181,6 @@ static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
return 0;
}
-static struct sort_dimension switch_sort_dimension = {
- .name = "switch",
- .cmp = switch_cmp,
-};
-
static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
{
if (l->total_runtime < r->total_runtime)
@@ -1312,28 +1191,38 @@ static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
return 0;
}
-static struct sort_dimension runtime_sort_dimension = {
- .name = "runtime",
- .cmp = runtime_cmp,
-};
-
-static struct sort_dimension *available_sorts[] = {
- &pid_sort_dimension,
- &avg_sort_dimension,
- &max_sort_dimension,
- &switch_sort_dimension,
- &runtime_sort_dimension,
-};
-
-#define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
-
-static LIST_HEAD(sort_list);
-
static int sort_dimension__add(const char *tok, struct list_head *list)
{
- int i;
+ size_t i;
+ static struct sort_dimension avg_sort_dimension = {
+ .name = "avg",
+ .cmp = avg_cmp,
+ };
+ static struct sort_dimension max_sort_dimension = {
+ .name = "max",
+ .cmp = max_cmp,
+ };
+ static struct sort_dimension pid_sort_dimension = {
+ .name = "pid",
+ .cmp = pid_cmp,
+ };
+ static struct sort_dimension runtime_sort_dimension = {
+ .name = "runtime",
+ .cmp = runtime_cmp,
+ };
+ static struct sort_dimension switch_sort_dimension = {
+ .name = "switch",
+ .cmp = switch_cmp,
+ };
+ struct sort_dimension *available_sorts[] = {
+ &pid_sort_dimension,
+ &avg_sort_dimension,
+ &max_sort_dimension,
+ &switch_sort_dimension,
+ &runtime_sort_dimension,
+ };
- for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
+ for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
if (!strcmp(available_sorts[i]->name, tok)) {
list_add_tail(&available_sorts[i]->list, list);
@@ -1344,126 +1233,97 @@ static int sort_dimension__add(const char *tok, struct list_head *list)
return -1;
}
-static void setup_sorting(void);
-
-static void sort_lat(void)
+static void perf_sched__sort_lat(struct perf_sched *sched)
{
struct rb_node *node;
for (;;) {
struct work_atoms *data;
- node = rb_first(&atom_root);
+ node = rb_first(&sched->atom_root);
if (!node)
break;
- rb_erase(node, &atom_root);
+ rb_erase(node, &sched->atom_root);
data = rb_entry(node, struct work_atoms, node);
- __thread_latency_insert(&sorted_atom_root, data, &sort_list);
+ __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
}
}
-static struct trace_sched_handler *trace_handler;
-
-static void
-process_sched_wakeup_event(struct perf_tool *tool __used,
- struct event_format *event,
- struct perf_sample *sample,
- struct machine *machine,
- struct thread *thread)
+static int process_sched_wakeup_event(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
{
- void *data = sample->raw_data;
- struct trace_wakeup_event wakeup_event;
-
- FILL_COMMON_FIELDS(wakeup_event, event, data);
+ struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- FILL_ARRAY(wakeup_event, comm, event, data);
- FILL_FIELD(wakeup_event, pid, event, data);
- FILL_FIELD(wakeup_event, prio, event, data);
- FILL_FIELD(wakeup_event, success, event, data);
- FILL_FIELD(wakeup_event, cpu, event, data);
+ if (sched->tp_handler->wakeup_event)
+ return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
- if (trace_handler->wakeup_event)
- trace_handler->wakeup_event(&wakeup_event, machine, event,
- sample->cpu, sample->time, thread);
+ return 0;
}
-/*
- * Track the current task - that way we can know whether there's any
- * weird events, such as a task being switched away that is not current.
- */
-static int max_cpu;
-
-static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
-
-static struct thread *curr_thread[MAX_CPUS];
-
-static char next_shortname1 = 'A';
-static char next_shortname2 = '0';
-
-static void
-map_switch_event(struct trace_switch_event *switch_event,
- struct machine *machine,
- struct event_format *event __used,
- int this_cpu,
- u64 timestamp,
- struct thread *thread __used)
+static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
+ struct perf_sample *sample, struct machine *machine)
{
- struct thread *sched_out __used, *sched_in;
+ const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
+ next_pid = perf_evsel__intval(evsel, sample, "next_pid");
+ struct thread *sched_out __maybe_unused, *sched_in;
int new_shortname;
- u64 timestamp0;
+ u64 timestamp0, timestamp = sample->time;
s64 delta;
- int cpu;
+ int cpu, this_cpu = sample->cpu;
BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
- if (this_cpu > max_cpu)
- max_cpu = this_cpu;
+ if (this_cpu > sched->max_cpu)
+ sched->max_cpu = this_cpu;
- timestamp0 = cpu_last_switched[this_cpu];
- cpu_last_switched[this_cpu] = timestamp;
+ timestamp0 = sched->cpu_last_switched[this_cpu];
+ sched->cpu_last_switched[this_cpu] = timestamp;
if (timestamp0)
delta = timestamp - timestamp0;
else
delta = 0;
- if (delta < 0)
- die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
-
+ if (delta < 0) {
+ pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
+ return -1;
+ }
- sched_out = machine__findnew_thread(machine, switch_event->prev_pid);
- sched_in = machine__findnew_thread(machine, switch_event->next_pid);
+ sched_out = machine__findnew_thread(machine, prev_pid);
+ sched_in = machine__findnew_thread(machine, next_pid);
- curr_thread[this_cpu] = sched_in;
+ sched->curr_thread[this_cpu] = sched_in;
printf(" ");
new_shortname = 0;
if (!sched_in->shortname[0]) {
- sched_in->shortname[0] = next_shortname1;
- sched_in->shortname[1] = next_shortname2;
+ sched_in->shortname[0] = sched->next_shortname1;
+ sched_in->shortname[1] = sched->next_shortname2;
- if (next_shortname1 < 'Z') {
- next_shortname1++;
+ if (sched->next_shortname1 < 'Z') {
+ sched->next_shortname1++;
} else {
- next_shortname1='A';
- if (next_shortname2 < '9') {
- next_shortname2++;
+ sched->next_shortname1='A';
+ if (sched->next_shortname2 < '9') {
+ sched->next_shortname2++;
} else {
- next_shortname2='0';
+ sched->next_shortname2='0';
}
}
new_shortname = 1;
}
- for (cpu = 0; cpu <= max_cpu; cpu++) {
+ for (cpu = 0; cpu <= sched->max_cpu; cpu++) {
if (cpu != this_cpu)
printf(" ");
else
printf("*");
- if (curr_thread[cpu]) {
- if (curr_thread[cpu]->pid)
- printf("%2s ", curr_thread[cpu]->shortname);
+ if (sched->curr_thread[cpu]) {
+ if (sched->curr_thread[cpu]->pid)
+ printf("%2s ", sched->curr_thread[cpu]->shortname);
else
printf(". ");
} else
@@ -1477,134 +1337,97 @@ map_switch_event(struct trace_switch_event *switch_event,
} else {
printf("\n");
}
+
+ return 0;
}
-static void
-process_sched_switch_event(struct perf_tool *tool __used,
- struct event_format *event,
- struct perf_sample *sample,
- struct machine *machine,
- struct thread *thread)
+static int process_sched_switch_event(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
{
- int this_cpu = sample->cpu;
- void *data = sample->raw_data;
- struct trace_switch_event switch_event;
-
- FILL_COMMON_FIELDS(switch_event, event, data);
-
- FILL_ARRAY(switch_event, prev_comm, event, data);
- FILL_FIELD(switch_event, prev_pid, event, data);
- FILL_FIELD(switch_event, prev_prio, event, data);
- FILL_FIELD(switch_event, prev_state, event, data);
- FILL_ARRAY(switch_event, next_comm, event, data);
- FILL_FIELD(switch_event, next_pid, event, data);
- FILL_FIELD(switch_event, next_prio, event, data);
+ struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
+ int this_cpu = sample->cpu, err = 0;
+ u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
+ next_pid = perf_evsel__intval(evsel, sample, "next_pid");
- if (curr_pid[this_cpu] != (u32)-1) {
+ if (sched->curr_pid[this_cpu] != (u32)-1) {
/*
* Are we trying to switch away a PID that is
* not current?
*/
- if (curr_pid[this_cpu] != switch_event.prev_pid)
- nr_context_switch_bugs++;
+ if (sched->curr_pid[this_cpu] != prev_pid)
+ sched->nr_context_switch_bugs++;
}
- if (trace_handler->switch_event)
- trace_handler->switch_event(&switch_event, machine, event,
- this_cpu, sample->time, thread);
- curr_pid[this_cpu] = switch_event.next_pid;
+ if (sched->tp_handler->switch_event)
+ err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
+
+ sched->curr_pid[this_cpu] = next_pid;
+ return err;
}
-static void
-process_sched_runtime_event(struct perf_tool *tool __used,
- struct event_format *event,
- struct perf_sample *sample,
- struct machine *machine,
- struct thread *thread)
+static int process_sched_runtime_event(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
{
- void *data = sample->raw_data;
- struct trace_runtime_event runtime_event;
+ struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- FILL_ARRAY(runtime_event, comm, event, data);
- FILL_FIELD(runtime_event, pid, event, data);
- FILL_FIELD(runtime_event, runtime, event, data);
- FILL_FIELD(runtime_event, vruntime, event, data);
+ if (sched->tp_handler->runtime_event)
+ return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
- if (trace_handler->runtime_event)
- trace_handler->runtime_event(&runtime_event, machine, event,
- sample->cpu, sample->time, thread);
+ return 0;
}
-static void
-process_sched_fork_event(struct perf_tool *tool __used,
- struct event_format *event,
- struct perf_sample *sample,
- struct machine *machine __used,
- struct thread *thread)
+static int process_sched_fork_event(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine __maybe_unused)
{
- void *data = sample->raw_data;
- struct trace_fork_event fork_event;
-
- FILL_COMMON_FIELDS(fork_event, event, data);
+ struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- FILL_ARRAY(fork_event, parent_comm, event, data);
- FILL_FIELD(fork_event, parent_pid, event, data);
- FILL_ARRAY(fork_event, child_comm, event, data);
- FILL_FIELD(fork_event, child_pid, event, data);
+ if (sched->tp_handler->fork_event)
+ return sched->tp_handler->fork_event(sched, evsel, sample);
- if (trace_handler->fork_event)
- trace_handler->fork_event(&fork_event, event,
- sample->cpu, sample->time, thread);
+ return 0;
}
-static void
-process_sched_exit_event(struct perf_tool *tool __used,
- struct event_format *event,
- struct perf_sample *sample __used,
- struct machine *machine __used,
- struct thread *thread __used)
+static int process_sched_exit_event(struct perf_tool *tool __maybe_unused,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
{
- if (verbose)
- printf("sched_exit event %p\n", event);
+ pr_debug("sched_exit event %p\n", evsel);
+ return 0;
}
-static void
-process_sched_migrate_task_event(struct perf_tool *tool __used,
- struct event_format *event,
- struct perf_sample *sample,
- struct machine *machine,
- struct thread *thread)
+static int process_sched_migrate_task_event(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
{
- void *data = sample->raw_data;
- struct trace_migrate_task_event migrate_task_event;
-
- FILL_COMMON_FIELDS(migrate_task_event, event, data);
+ struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- FILL_ARRAY(migrate_task_event, comm, event, data);
- FILL_FIELD(migrate_task_event, pid, event, data);
- FILL_FIELD(migrate_task_event, prio, event, data);
- FILL_FIELD(migrate_task_event, cpu, event, data);
+ if (sched->tp_handler->migrate_task_event)
+ return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
- if (trace_handler->migrate_task_event)
- trace_handler->migrate_task_event(&migrate_task_event, machine,
- event, sample->cpu,
- sample->time, thread);
+ return 0;
}
-typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event_format *event,
- struct perf_sample *sample,
- struct machine *machine,
- struct thread *thread);
+typedef int (*tracepoint_handler)(struct perf_tool *tool,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine);
-static int perf_sched__process_tracepoint_sample(struct perf_tool *tool,
- union perf_event *event __used,
+static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
- struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- struct pevent *pevent = sched->session->pevent;
struct thread *thread = machine__findnew_thread(machine, sample->pid);
+ int err = 0;
if (thread == NULL) {
pr_debug("problem processing %s event, skipping it.\n",
@@ -1617,30 +1440,15 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool,
if (evsel->handler.func != NULL) {
tracepoint_handler f = evsel->handler.func;
-
- if (evsel->handler.data == NULL)
- evsel->handler.data = pevent_find_event(pevent,
- evsel->attr.config);
-
- f(tool, evsel->handler.data, sample, machine, thread);
+ err = f(tool, evsel, sample, machine);
}
- return 0;
+ return err;
}
-static struct perf_sched perf_sched = {
- .tool = {
- .sample = perf_sched__process_tracepoint_sample,
- .comm = perf_event__process_comm,
- .lost = perf_event__process_lost,
- .fork = perf_event__process_task,
- .ordered_samples = true,
- },
-};
-
-static void read_events(bool destroy, struct perf_session **psession)
+static int perf_sched__read_events(struct perf_sched *sched, bool destroy,
+ struct perf_session **psession)
{
- int err = -EINVAL;
const struct perf_evsel_str_handler handlers[] = {
{ "sched:sched_switch", process_sched_switch_event, },
{ "sched:sched_stat_runtime", process_sched_runtime_event, },
@@ -1652,24 +1460,25 @@ static void read_events(bool destroy, struct perf_session **psession)
};
struct perf_session *session;
- session = perf_session__new(input_name, O_RDONLY, 0, false,
- &perf_sched.tool);
- if (session == NULL)
- die("No Memory");
-
- perf_sched.session = session;
+ session = perf_session__new(sched->input_name, O_RDONLY, 0, false, &sched->tool);
+ if (session == NULL) {
+ pr_debug("No Memory for session\n");
+ return -1;
+ }
- err = perf_session__set_tracepoints_handlers(session, handlers);
- assert(err == 0);
+ if (perf_session__set_tracepoints_handlers(session, handlers))
+ goto out_delete;
if (perf_session__has_traces(session, "record -R")) {
- err = perf_session__process_events(session, &perf_sched.tool);
- if (err)
- die("Failed to process events, error %d", err);
+ int err = perf_session__process_events(session, &sched->tool);
+ if (err) {
+ pr_err("Failed to process events, error %d", err);
+ goto out_delete;
+ }
- nr_events = session->hists.stats.nr_events[0];
- nr_lost_events = session->hists.stats.total_lost;
- nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST];
+ sched->nr_events = session->hists.stats.nr_events[0];
+ sched->nr_lost_events = session->hists.stats.total_lost;
+ sched->nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST];
}
if (destroy)
@@ -1677,208 +1486,166 @@ static void read_events(bool destroy, struct perf_session **psession)
if (psession)
*psession = session;
+
+ return 0;
+
+out_delete:
+ perf_session__delete(session);
+ return -1;
}
-static void print_bad_events(void)
+static void print_bad_events(struct perf_sched *sched)
{
- if (nr_unordered_timestamps && nr_timestamps) {
+ if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
- (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
- nr_unordered_timestamps, nr_timestamps);
+ (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
+ sched->nr_unordered_timestamps, sched->nr_timestamps);
}
- if (nr_lost_events && nr_events) {
+ if (sched->nr_lost_events && sched->nr_events) {
printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
- (double)nr_lost_events/(double)nr_events*100.0,
- nr_lost_events, nr_events, nr_lost_chunks);
+ (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
+ sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
}
- if (nr_state_machine_bugs && nr_timestamps) {
+ if (sched->nr_state_machine_bugs && sched->nr_timestamps) {
printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
- (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
- nr_state_machine_bugs, nr_timestamps);
- if (nr_lost_events)
+ (double)sched->nr_state_machine_bugs/(double)sched->nr_timestamps*100.0,
+ sched->nr_state_machine_bugs, sched->nr_timestamps);
+ if (sched->nr_lost_events)
printf(" (due to lost events?)");
printf("\n");
}
- if (nr_context_switch_bugs && nr_timestamps) {
+ if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
- (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
- nr_context_switch_bugs, nr_timestamps);
- if (nr_lost_events)
+ (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
+ sched->nr_context_switch_bugs, sched->nr_timestamps);
+ if (sched->nr_lost_events)
printf(" (due to lost events?)");
printf("\n");
}
}
-static void __cmd_lat(void)
+static int perf_sched__lat(struct perf_sched *sched)
{
struct rb_node *next;
struct perf_session *session;
setup_pager();
- read_events(false, &session);
- sort_lat();
+ if (perf_sched__read_events(sched, false, &session))
+ return -1;
+ perf_sched__sort_lat(sched);
printf("\n ---------------------------------------------------------------------------------------------------------------\n");
printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
printf(" ---------------------------------------------------------------------------------------------------------------\n");
- next = rb_first(&sorted_atom_root);
+ next = rb_first(&sched->sorted_atom_root);
while (next) {
struct work_atoms *work_list;
work_list = rb_entry(next, struct work_atoms, node);
- output_lat_thread(work_list);
+ output_lat_thread(sched, work_list);
next = rb_next(next);
}
printf(" -----------------------------------------------------------------------------------------\n");
printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
- (double)all_runtime/1e6, all_count);
+ (double)sched->all_runtime / 1e6, sched->all_count);
printf(" ---------------------------------------------------\n");
- print_bad_events();
+ print_bad_events(sched);
printf("\n");
perf_session__delete(session);
+ return 0;
}
-static struct trace_sched_handler map_ops = {
- .wakeup_event = NULL,
- .switch_event = map_switch_event,
- .runtime_event = NULL,
- .fork_event = NULL,
-};
-
-static void __cmd_map(void)
+static int perf_sched__map(struct perf_sched *sched)
{
- max_cpu = sysconf(_SC_NPROCESSORS_CONF);
+ sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
setup_pager();
- read_events(true, NULL);
- print_bad_events();
+ if (perf_sched__read_events(sched, true, NULL))
+ return -1;
+ print_bad_events(sched);
+ return 0;
}
-static void __cmd_replay(void)
+static int perf_sched__replay(struct perf_sched *sched)
{
unsigned long i;
- calibrate_run_measurement_overhead();
- calibrate_sleep_measurement_overhead();
+ calibrate_run_measurement_overhead(sched);
+ calibrate_sleep_measurement_overhead(sched);
- test_calibrations();
+ test_calibrations(sched);
- read_events(true, NULL);
+ if (perf_sched__read_events(sched, true, NULL))
+ return -1;
- printf("nr_run_events: %ld\n", nr_run_events);
- printf("nr_sleep_events: %ld\n", nr_sleep_events);
- printf("nr_wakeup_events: %ld\n", nr_wakeup_events);
+ printf("nr_run_events: %ld\n", sched->nr_run_events);
+ printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
+ printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
- if (targetless_wakeups)
- printf("target-less wakeups: %ld\n", targetless_wakeups);
- if (multitarget_wakeups)
- printf("multi-target wakeups: %ld\n", multitarget_wakeups);
- if (nr_run_events_optimized)
+ if (sched->targetless_wakeups)
+ printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
+ if (sched->multitarget_wakeups)
+ printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
+ if (sched->nr_run_events_optimized)
printf("run atoms optimized: %ld\n",
- nr_run_events_optimized);
+ sched->nr_run_events_optimized);
- print_task_traces();
- add_cross_task_wakeups();
+ print_task_traces(sched);
+ add_cross_task_wakeups(sched);
- create_tasks();
+ create_tasks(sched);
printf("------------------------------------------------------------\n");
- for (i = 0; i < replay_repeat; i++)
- run_one_test();
-}
-
-
-static const char * const sched_usage[] = {
- "perf sched [<options>] {record|latency|map|replay|script}",
- NULL
-};
-
-static const struct option sched_options[] = {
- OPT_STRING('i', "input", &input_name, "file",
- "input file name"),
- OPT_INCR('v', "verbose", &verbose,
- "be more verbose (show symbol address, etc)"),
- OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
- "dump raw trace in ASCII"),
- OPT_END()
-};
-
-static const char * const latency_usage[] = {
- "perf sched latency [<options>]",
- NULL
-};
-
-static const struct option latency_options[] = {
- OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
- "sort by key(s): runtime, switch, avg, max"),
- OPT_INCR('v', "verbose", &verbose,
- "be more verbose (show symbol address, etc)"),
- OPT_INTEGER('C', "CPU", &profile_cpu,
- "CPU to profile on"),
- OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
- "dump raw trace in ASCII"),
- OPT_END()
-};
-
-static const char * const replay_usage[] = {
- "perf sched replay [<options>]",
- NULL
-};
+ for (i = 0; i < sched->replay_repeat; i++)
+ run_one_test(sched);
-static const struct option replay_options[] = {
- OPT_UINTEGER('r', "repeat", &replay_repeat,
- "repeat the workload replay N times (-1: infinite)"),
- OPT_INCR('v', "verbose", &verbose,
- "be more verbose (show symbol address, etc)"),
- OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
- "dump raw trace in ASCII"),
- OPT_END()
-};
+ return 0;
+}
-static void setup_sorting(void)
+static void setup_sorting(struct perf_sched *sched, const struct option *options,
+ const char * const usage_msg[])
{
- char *tmp, *tok, *str = strdup(sort_order);
+ char *tmp, *tok, *str = strdup(sched->sort_order);
for (tok = strtok_r(str, ", ", &tmp);
tok; tok = strtok_r(NULL, ", ", &tmp)) {
- if (sort_dimension__add(tok, &sort_list) < 0) {
+ if (sort_dimension__add(tok, &sched->sort_list) < 0) {
error("Unknown --sort key: `%s'", tok);
- usage_with_options(latency_usage, latency_options);
+ usage_with_options(usage_msg, options);
}
}
free(str);
- sort_dimension__add("pid", &cmp_pid);
+ sort_dimension__add("pid", &sched->cmp_pid);
}
-static const char *record_args[] = {
- "record",
- "-a",
- "-R",
- "-f",
- "-m", "1024",
- "-c", "1",
- "-e", "sched:sched_switch",
- "-e", "sched:sched_stat_wait",
- "-e", "sched:sched_stat_sleep",
- "-e", "sched:sched_stat_iowait",
- "-e", "sched:sched_stat_runtime",
- "-e", "sched:sched_process_exit",
- "-e", "sched:sched_process_fork",
- "-e", "sched:sched_wakeup",
- "-e", "sched:sched_migrate_task",
-};
-
static int __cmd_record(int argc, const char **argv)
{
unsigned int rec_argc, i, j;
const char **rec_argv;
+ const char * const record_args[] = {
+ "record",
+ "-a",
+ "-R",
+ "-f",
+ "-m", "1024",
+ "-c", "1",
+ "-e", "sched:sched_switch",
+ "-e", "sched:sched_stat_wait",
+ "-e", "sched:sched_stat_sleep",
+ "-e", "sched:sched_stat_iowait",
+ "-e", "sched:sched_stat_runtime",
+ "-e", "sched:sched_process_exit",
+ "-e", "sched:sched_process_fork",
+ "-e", "sched:sched_wakeup",
+ "-e", "sched:sched_migrate_task",
+ };
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
@@ -1897,8 +1664,85 @@ static int __cmd_record(int argc, const char **argv)
return cmd_record(i, rec_argv, NULL);
}
-int cmd_sched(int argc, const char **argv, const char *prefix __used)
+int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
{
+ const char default_sort_order[] = "avg, max, switch, runtime";
+ struct perf_sched sched = {
+ .tool = {
+ .sample = perf_sched__process_tracepoint_sample,
+ .comm = perf_event__process_comm,
+ .lost = perf_event__process_lost,
+ .fork = perf_event__process_task,
+ .ordered_samples = true,
+ },
+ .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
+ .sort_list = LIST_HEAD_INIT(sched.sort_list),
+ .start_work_mutex = PTHREAD_MUTEX_INITIALIZER,
+ .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
+ .curr_pid = { [0 ... MAX_CPUS - 1] = -1 },
+ .sort_order = default_sort_order,
+ .replay_repeat = 10,
+ .profile_cpu = -1,
+ .next_shortname1 = 'A',
+ .next_shortname2 = '0',
+ };
+ const struct option latency_options[] = {
+ OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
+ "sort by key(s): runtime, switch, avg, max"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_INTEGER('C', "CPU", &sched.profile_cpu,
+ "CPU to profile on"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_END()
+ };
+ const struct option replay_options[] = {
+ OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
+ "repeat the workload replay N times (-1: infinite)"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_END()
+ };
+ const struct option sched_options[] = {
+ OPT_STRING('i', "input", &sched.input_name, "file",
+ "input file name"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_END()
+ };
+ const char * const latency_usage[] = {
+ "perf sched latency [<options>]",
+ NULL
+ };
+ const char * const replay_usage[] = {
+ "perf sched replay [<options>]",
+ NULL
+ };
+ const char * const sched_usage[] = {
+ "perf sched [<options>] {record|latency|map|replay|script}",
+ NULL
+ };
+ struct trace_sched_handler lat_ops = {
+ .wakeup_event = latency_wakeup_event,
+ .switch_event = latency_switch_event,
+ .runtime_event = latency_runtime_event,
+ .fork_event = latency_fork_event,
+ .migrate_task_event = latency_migrate_task_event,
+ };
+ struct trace_sched_handler map_ops = {
+ .switch_event = map_switch_event,
+ };
+ struct trace_sched_handler replay_ops = {
+ .wakeup_event = replay_wakeup_event,
+ .switch_event = replay_switch_event,
+ .fork_event = replay_fork_event,
+ };
+
argc = parse_options(argc, argv, sched_options, sched_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc)
@@ -1914,26 +1758,26 @@ int cmd_sched(int argc, const char **argv, const char *prefix __used)
if (!strncmp(argv[0], "rec", 3)) {
return __cmd_record(argc, argv);
} else if (!strncmp(argv[0], "lat", 3)) {
- trace_handler = &lat_ops;
+ sched.tp_handler = &lat_ops;
if (argc > 1) {
argc = parse_options(argc, argv, latency_options, latency_usage, 0);
if (argc)
usage_with_options(latency_usage, latency_options);
}
- setup_sorting();
- __cmd_lat();
+ setup_sorting(&sched, latency_options, latency_usage);
+ return perf_sched__lat(&sched);
} else if (!strcmp(argv[0], "map")) {
- trace_handler = &map_ops;
- setup_sorting();
- __cmd_map();
+ sched.tp_handler = &map_ops;
+ setup_sorting(&sched, latency_options, latency_usage);
+ return perf_sched__map(&sched);
} else if (!strncmp(argv[0], "rep", 3)) {
- trace_handler = &replay_ops;
+ sched.tp_handler = &replay_ops;
if (argc) {
argc = parse_options(argc, argv, replay_options, replay_usage, 0);
if (argc)
usage_with_options(replay_usage, replay_options);
}
- __cmd_replay();
+ return perf_sched__replay(&sched);
} else {
usage_with_options(sched_usage, sched_options);
}
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 1e60ab70b2b1..1be843aa1546 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -14,6 +14,7 @@
#include "util/util.h"
#include "util/evlist.h"
#include "util/evsel.h"
+#include "util/sort.h"
#include <linux/bitmap.h>
static char const *script_name;
@@ -28,11 +29,6 @@ static bool system_wide;
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
-struct perf_script {
- struct perf_tool tool;
- struct perf_session *session;
-};
-
enum perf_output_field {
PERF_OUTPUT_COMM = 1U << 0,
PERF_OUTPUT_TID = 1U << 1,
@@ -262,14 +258,11 @@ static int perf_session__check_output_opt(struct perf_session *session)
return 0;
}
-static void print_sample_start(struct pevent *pevent,
- struct perf_sample *sample,
+static void print_sample_start(struct perf_sample *sample,
struct thread *thread,
struct perf_evsel *evsel)
{
- int type;
struct perf_event_attr *attr = &evsel->attr;
- struct event_format *event;
const char *evname = NULL;
unsigned long secs;
unsigned long usecs;
@@ -307,20 +300,7 @@ static void print_sample_start(struct pevent *pevent,
}
if (PRINT_FIELD(EVNAME)) {
- if (attr->type == PERF_TYPE_TRACEPOINT) {
- /*
- * XXX Do we really need this here?
- * perf_evlist__set_tracepoint_names should have done
- * this already
- */
- type = trace_parse_common_type(pevent,
- sample->raw_data);
- event = pevent_find_event(pevent, type);
- if (event)
- evname = event->name;
- } else
- evname = perf_evsel__name(evsel);
-
+ evname = perf_evsel__name(evsel);
printf("%s: ", evname ? evname : "[unknown]");
}
}
@@ -401,7 +381,7 @@ static void print_sample_bts(union perf_event *event,
printf(" ");
else
printf("\n");
- perf_event__print_ip(event, sample, machine,
+ perf_evsel__print_ip(evsel, event, sample, machine,
PRINT_FIELD(SYM), PRINT_FIELD(DSO),
PRINT_FIELD(SYMOFFSET));
}
@@ -415,19 +395,17 @@ static void print_sample_bts(union perf_event *event,
printf("\n");
}
-static void process_event(union perf_event *event __unused,
- struct pevent *pevent,
- struct perf_sample *sample,
- struct perf_evsel *evsel,
- struct machine *machine,
- struct thread *thread)
+static void process_event(union perf_event *event, struct perf_sample *sample,
+ struct perf_evsel *evsel, struct machine *machine,
+ struct addr_location *al)
{
struct perf_event_attr *attr = &evsel->attr;
+ struct thread *thread = al->thread;
if (output[attr->type].fields == 0)
return;
- print_sample_start(pevent, sample, thread, evsel);
+ print_sample_start(sample, thread, evsel);
if (is_bts_event(attr)) {
print_sample_bts(event, sample, evsel, machine, thread);
@@ -435,9 +413,8 @@ static void process_event(union perf_event *event __unused,
}
if (PRINT_FIELD(TRACE))
- print_trace_event(pevent, sample->cpu, sample->raw_data,
- sample->raw_size);
-
+ event_format__print(evsel->tp_format, sample->cpu,
+ sample->raw_data, sample->raw_size);
if (PRINT_FIELD(ADDR))
print_sample_addr(event, sample, machine, thread, attr);
@@ -446,7 +423,7 @@ static void process_event(union perf_event *event __unused,
printf(" ");
else
printf("\n");
- perf_event__print_ip(event, sample, machine,
+ perf_evsel__print_ip(evsel, event, sample, machine,
PRINT_FIELD(SYM), PRINT_FIELD(DSO),
PRINT_FIELD(SYMOFFSET));
}
@@ -454,9 +431,9 @@ static void process_event(union perf_event *event __unused,
printf("\n");
}
-static int default_start_script(const char *script __unused,
- int argc __unused,
- const char **argv __unused)
+static int default_start_script(const char *script __maybe_unused,
+ int argc __maybe_unused,
+ const char **argv __maybe_unused)
{
return 0;
}
@@ -466,8 +443,8 @@ static int default_stop_script(void)
return 0;
}
-static int default_generate_script(struct pevent *pevent __unused,
- const char *outfile __unused)
+static int default_generate_script(struct pevent *pevent __maybe_unused,
+ const char *outfile __maybe_unused)
{
return 0;
}
@@ -498,14 +475,13 @@ static int cleanup_scripting(void)
static const char *input_name;
-static int process_sample_event(struct perf_tool *tool __used,
+static int process_sample_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
struct addr_location al;
- struct perf_script *scr = container_of(tool, struct perf_script, tool);
struct thread *thread = machine__findnew_thread(machine, event->ip.tid);
if (thread == NULL) {
@@ -537,32 +513,29 @@ static int process_sample_event(struct perf_tool *tool __used,
if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
return 0;
- scripting_ops->process_event(event, scr->session->pevent,
- sample, evsel, machine, thread);
+ scripting_ops->process_event(event, sample, evsel, machine, &al);
evsel->hists.stats.total_period += sample->period;
return 0;
}
-static struct perf_script perf_script = {
- .tool = {
- .sample = process_sample_event,
- .mmap = perf_event__process_mmap,
- .comm = perf_event__process_comm,
- .exit = perf_event__process_task,
- .fork = perf_event__process_task,
- .attr = perf_event__process_attr,
- .event_type = perf_event__process_event_type,
- .tracing_data = perf_event__process_tracing_data,
- .build_id = perf_event__process_build_id,
- .ordered_samples = true,
- .ordering_requires_timestamps = true,
- },
+static struct perf_tool perf_script = {
+ .sample = process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .comm = perf_event__process_comm,
+ .exit = perf_event__process_task,
+ .fork = perf_event__process_task,
+ .attr = perf_event__process_attr,
+ .event_type = perf_event__process_event_type,
+ .tracing_data = perf_event__process_tracing_data,
+ .build_id = perf_event__process_build_id,
+ .ordered_samples = true,
+ .ordering_requires_timestamps = true,
};
extern volatile int session_done;
-static void sig_handler(int sig __unused)
+static void sig_handler(int sig __maybe_unused)
{
session_done = 1;
}
@@ -573,7 +546,7 @@ static int __cmd_script(struct perf_session *session)
signal(SIGINT, sig_handler);
- ret = perf_session__process_events(session, &perf_script.tool);
+ ret = perf_session__process_events(session, &perf_script);
if (debug_mode)
pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
@@ -672,8 +645,8 @@ static void list_available_languages(void)
fprintf(stderr, "\n");
}
-static int parse_scriptname(const struct option *opt __used,
- const char *str, int unset __used)
+static int parse_scriptname(const struct option *opt __maybe_unused,
+ const char *str, int unset __maybe_unused)
{
char spec[PATH_MAX];
const char *script, *ext;
@@ -718,8 +691,8 @@ static int parse_scriptname(const struct option *opt __used,
return 0;
}
-static int parse_output_fields(const struct option *opt __used,
- const char *arg, int unset __used)
+static int parse_output_fields(const struct option *opt __maybe_unused,
+ const char *arg, int unset __maybe_unused)
{
char *tok;
int i, imax = sizeof(all_output_options) / sizeof(struct output_option);
@@ -1010,8 +983,9 @@ static char *get_script_root(struct dirent *script_dirent, const char *suffix)
return script_root;
}
-static int list_available_scripts(const struct option *opt __used,
- const char *s __used, int unset __used)
+static int list_available_scripts(const struct option *opt __maybe_unused,
+ const char *s __maybe_unused,
+ int unset __maybe_unused)
{
struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
char scripts_path[MAXPATHLEN];
@@ -1058,6 +1032,61 @@ static int list_available_scripts(const struct option *opt __used,
exit(0);
}
+/*
+ * Return -1 if none is found, otherwise the actual scripts number.
+ *
+ * Currently the only user of this function is the script browser, which
+ * will list all statically runnable scripts, select one, execute it and
+ * show the output in a perf browser.
+ */
+int find_scripts(char **scripts_array, char **scripts_path_array)
+{
+ struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+ char scripts_path[MAXPATHLEN];
+ DIR *scripts_dir, *lang_dir;
+ char lang_path[MAXPATHLEN];
+ char *temp;
+ int i = 0;
+
+ snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path());
+
+ scripts_dir = opendir(scripts_path);
+ if (!scripts_dir)
+ return -1;
+
+ for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
+ snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
+ lang_dirent.d_name);
+#ifdef NO_LIBPERL
+ if (strstr(lang_path, "perl"))
+ continue;
+#endif
+#ifdef NO_LIBPYTHON
+ if (strstr(lang_path, "python"))
+ continue;
+#endif
+
+ lang_dir = opendir(lang_path);
+ if (!lang_dir)
+ continue;
+
+ for_each_script(lang_path, lang_dir, script_dirent, script_next) {
+ /* Skip those real time scripts: xxxtop.p[yl] */
+ if (strstr(script_dirent.d_name, "top."))
+ continue;
+ sprintf(scripts_path_array[i], "%s/%s", lang_path,
+ script_dirent.d_name);
+ temp = strchr(script_dirent.d_name, '.');
+ snprintf(scripts_array[i],
+ (temp - script_dirent.d_name) + 1,
+ "%s", script_dirent.d_name);
+ i++;
+ }
+ }
+
+ return i;
+}
+
static char *get_script_path(const char *script_root, const char *suffix)
{
struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
@@ -1170,6 +1199,8 @@ static const struct option options[] = {
parse_output_fields),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
"system-wide collection from all CPUs"),
+ OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
+ "only consider these symbols"),
OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
"only display events for these comms"),
@@ -1181,21 +1212,26 @@ static const struct option options[] = {
OPT_END()
};
-static bool have_cmd(int argc, const char **argv)
+static int have_cmd(int argc, const char **argv)
{
char **__argv = malloc(sizeof(const char *) * argc);
- if (!__argv)
- die("malloc");
+ if (!__argv) {
+ pr_err("malloc failed\n");
+ return -1;
+ }
+
memcpy(__argv, argv, sizeof(const char *) * argc);
argc = parse_options(argc, (const char **)__argv, record_options,
NULL, PARSE_OPT_STOP_AT_NON_OPTION);
free(__argv);
- return argc != 0;
+ system_wide = (argc == 0);
+
+ return 0;
}
-int cmd_script(int argc, const char **argv, const char *prefix __used)
+int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
{
char *rec_script_path = NULL;
char *rep_script_path = NULL;
@@ -1259,13 +1295,13 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
if (pipe(live_pipe) < 0) {
perror("failed to create pipe");
- exit(-1);
+ return -1;
}
pid = fork();
if (pid < 0) {
perror("failed to fork");
- exit(-1);
+ return -1;
}
if (!pid) {
@@ -1277,13 +1313,18 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
if (is_top_script(argv[0])) {
system_wide = true;
} else if (!system_wide) {
- system_wide = !have_cmd(argc - rep_args,
- &argv[rep_args]);
+ if (have_cmd(argc - rep_args, &argv[rep_args]) != 0) {
+ err = -1;
+ goto out;
+ }
}
__argv = malloc((argc + 6) * sizeof(const char *));
- if (!__argv)
- die("malloc");
+ if (!__argv) {
+ pr_err("malloc failed\n");
+ err = -ENOMEM;
+ goto out;
+ }
__argv[j++] = "/bin/sh";
__argv[j++] = rec_script_path;
@@ -1305,8 +1346,12 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
close(live_pipe[1]);
__argv = malloc((argc + 4) * sizeof(const char *));
- if (!__argv)
- die("malloc");
+ if (!__argv) {
+ pr_err("malloc failed\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
j = 0;
__argv[j++] = "/bin/sh";
__argv[j++] = rep_script_path;
@@ -1331,12 +1376,20 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
if (!rec_script_path)
system_wide = false;
- else if (!system_wide)
- system_wide = !have_cmd(argc - 1, &argv[1]);
+ else if (!system_wide) {
+ if (have_cmd(argc - 1, &argv[1]) != 0) {
+ err = -1;
+ goto out;
+ }
+ }
__argv = malloc((argc + 2) * sizeof(const char *));
- if (!__argv)
- die("malloc");
+ if (!__argv) {
+ pr_err("malloc failed\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
__argv[j++] = "/bin/sh";
__argv[j++] = script_path;
if (system_wide)
@@ -1356,12 +1409,10 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
setup_pager();
session = perf_session__new(input_name, O_RDONLY, 0, false,
- &perf_script.tool);
+ &perf_script);
if (session == NULL)
return -ENOMEM;
- perf_script.session = session;
-
if (cpu_list) {
if (perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap))
return -1;
@@ -1387,18 +1438,18 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
input = open(session->filename, O_RDONLY); /* input_name */
if (input < 0) {
perror("failed to open file");
- exit(-1);
+ return -1;
}
err = fstat(input, &perf_stat);
if (err < 0) {
perror("failed to stat file");
- exit(-1);
+ return -1;
}
if (!perf_stat.st_size) {
fprintf(stderr, "zero-sized file, nothing to do!\n");
- exit(0);
+ return 0;
}
scripting_ops = script_spec__lookup(generate_script_lang);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 861f0aec77ae..e8cd4d81b06e 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -51,13 +51,13 @@
#include "util/evsel.h"
#include "util/debug.h"
#include "util/color.h"
+#include "util/stat.h"
#include "util/header.h"
#include "util/cpumap.h"
#include "util/thread.h"
#include "util/thread_map.h"
#include <sys/prctl.h>
-#include <math.h>
#include <locale.h>
#define DEFAULT_SEPARATOR " "
@@ -199,11 +199,6 @@ static int output_fd;
static volatile int done = 0;
-struct stats
-{
- double n, mean, M2;
-};
-
struct perf_stat {
struct stats res_stats[3];
};
@@ -220,48 +215,14 @@ static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
evsel->priv = NULL;
}
-static void update_stats(struct stats *stats, u64 val)
-{
- double delta;
-
- stats->n++;
- delta = val - stats->mean;
- stats->mean += delta / stats->n;
- stats->M2 += delta*(val - stats->mean);
-}
-
-static double avg_stats(struct stats *stats)
+static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
{
- return stats->mean;
+ return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus;
}
-/*
- * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
- *
- * (\Sum n_i^2) - ((\Sum n_i)^2)/n
- * s^2 = -------------------------------
- * n - 1
- *
- * http://en.wikipedia.org/wiki/Stddev
- *
- * The std dev of the mean is related to the std dev by:
- *
- * s
- * s_mean = -------
- * sqrt(n)
- *
- */
-static double stddev_stats(struct stats *stats)
+static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel)
{
- double variance, variance_mean;
-
- if (!stats->n)
- return 0.0;
-
- variance = stats->M2 / (stats->n - 1);
- variance_mean = variance / stats->n;
-
- return sqrt(variance_mean);
+ return perf_evsel__cpus(evsel)->nr;
}
static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
@@ -281,13 +242,9 @@ static int create_perf_stat_counter(struct perf_evsel *evsel,
struct perf_evsel *first)
{
struct perf_event_attr *attr = &evsel->attr;
- struct xyarray *group_fd = NULL;
bool exclude_guest_missing = false;
int ret;
- if (group && evsel != first)
- group_fd = first->fd;
-
if (scale)
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING;
@@ -299,8 +256,7 @@ retry:
evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
if (perf_target__has_cpu(&target)) {
- ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
- group, group_fd);
+ ret = perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
if (ret)
goto check_ret;
return 0;
@@ -311,8 +267,7 @@ retry:
attr->enable_on_exec = 1;
}
- ret = perf_evsel__open_per_thread(evsel, evsel_list->threads,
- group, group_fd);
+ ret = perf_evsel__open_per_thread(evsel, evsel_list->threads);
if (!ret)
return 0;
/* fall through */
@@ -382,7 +337,7 @@ static int read_counter_aggr(struct perf_evsel *counter)
u64 *count = counter->counts->aggr.values;
int i;
- if (__perf_evsel__read(counter, evsel_list->cpus->nr,
+ if (__perf_evsel__read(counter, perf_evsel__nr_cpus(counter),
evsel_list->threads->nr, scale) < 0)
return -1;
@@ -411,7 +366,7 @@ static int read_counter(struct perf_evsel *counter)
u64 *count;
int cpu;
- for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) {
+ for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0)
return -1;
@@ -423,7 +378,7 @@ static int read_counter(struct perf_evsel *counter)
return 0;
}
-static int run_perf_stat(int argc __used, const char **argv)
+static int run_perf_stat(int argc __maybe_unused, const char **argv)
{
unsigned long long t0, t1;
struct perf_evsel *counter, *first;
@@ -434,7 +389,7 @@ static int run_perf_stat(int argc __used, const char **argv)
if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
perror("failed to create pipes");
- exit(1);
+ return -1;
}
if (forks) {
@@ -483,7 +438,10 @@ static int run_perf_stat(int argc __used, const char **argv)
close(child_ready_pipe[0]);
}
- first = list_entry(evsel_list->entries.next, struct perf_evsel, node);
+ if (group)
+ perf_evlist__set_leader(evsel_list);
+
+ first = perf_evlist__first(evsel_list);
list_for_each_entry(counter, &evsel_list->entries, node) {
if (create_perf_stat_counter(counter, first) < 0) {
@@ -513,13 +471,14 @@ static int run_perf_stat(int argc __used, const char **argv)
}
if (child_pid != -1)
kill(child_pid, SIGTERM);
- die("Not all events could be opened.\n");
+
+ pr_err("Not all events could be opened.\n");
return -1;
}
counter->supported = true;
}
- if (perf_evlist__set_filters(evsel_list)) {
+ if (perf_evlist__apply_filters(evsel_list)) {
error("failed to set filter with %d (%s)\n", errno,
strerror(errno));
return -1;
@@ -546,12 +505,12 @@ static int run_perf_stat(int argc __used, const char **argv)
if (no_aggr) {
list_for_each_entry(counter, &evsel_list->entries, node) {
read_counter(counter);
- perf_evsel__close_fd(counter, evsel_list->cpus->nr, 1);
+ perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 1);
}
} else {
list_for_each_entry(counter, &evsel_list->entries, node) {
read_counter_aggr(counter);
- perf_evsel__close_fd(counter, evsel_list->cpus->nr,
+ perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
evsel_list->threads->nr);
}
}
@@ -561,10 +520,7 @@ static int run_perf_stat(int argc __used, const char **argv)
static void print_noise_pct(double total, double avg)
{
- double pct = 0.0;
-
- if (avg)
- pct = 100.0*total/avg;
+ double pct = rel_stddev_stats(total, avg);
if (csv_output)
fprintf(output, "%s%.2f%%", csv_sep, pct);
@@ -592,7 +548,7 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
if (no_aggr)
sprintf(cpustr, "CPU%*d%s",
csv_output ? 0 : -4,
- evsel_list->cpus->map[cpu], csv_sep);
+ perf_evsel__cpus(evsel)->map[cpu], csv_sep);
fprintf(output, fmt, cpustr, msecs, csv_sep, perf_evsel__name(evsel));
@@ -636,7 +592,9 @@ static const char *get_ratio_color(enum grc_type type, double ratio)
return color;
}
-static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg)
+static void print_stalled_cycles_frontend(int cpu,
+ struct perf_evsel *evsel
+ __maybe_unused, double avg)
{
double total, ratio = 0.0;
const char *color;
@@ -653,7 +611,9 @@ static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __us
fprintf(output, " frontend cycles idle ");
}
-static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __used, double avg)
+static void print_stalled_cycles_backend(int cpu,
+ struct perf_evsel *evsel
+ __maybe_unused, double avg)
{
double total, ratio = 0.0;
const char *color;
@@ -670,7 +630,9 @@ static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __use
fprintf(output, " backend cycles idle ");
}
-static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg)
+static void print_branch_misses(int cpu,
+ struct perf_evsel *evsel __maybe_unused,
+ double avg)
{
double total, ratio = 0.0;
const char *color;
@@ -687,7 +649,9 @@ static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double
fprintf(output, " of all branches ");
}
-static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
+static void print_l1_dcache_misses(int cpu,
+ struct perf_evsel *evsel __maybe_unused,
+ double avg)
{
double total, ratio = 0.0;
const char *color;
@@ -704,7 +668,9 @@ static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, dou
fprintf(output, " of all L1-dcache hits ");
}
-static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
+static void print_l1_icache_misses(int cpu,
+ struct perf_evsel *evsel __maybe_unused,
+ double avg)
{
double total, ratio = 0.0;
const char *color;
@@ -721,7 +687,9 @@ static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, dou
fprintf(output, " of all L1-icache hits ");
}
-static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
+static void print_dtlb_cache_misses(int cpu,
+ struct perf_evsel *evsel __maybe_unused,
+ double avg)
{
double total, ratio = 0.0;
const char *color;
@@ -738,7 +706,9 @@ static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do
fprintf(output, " of all dTLB cache hits ");
}
-static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
+static void print_itlb_cache_misses(int cpu,
+ struct perf_evsel *evsel __maybe_unused,
+ double avg)
{
double total, ratio = 0.0;
const char *color;
@@ -755,7 +725,9 @@ static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do
fprintf(output, " of all iTLB cache hits ");
}
-static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
+static void print_ll_cache_misses(int cpu,
+ struct perf_evsel *evsel __maybe_unused,
+ double avg)
{
double total, ratio = 0.0;
const char *color;
@@ -788,7 +760,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
if (no_aggr)
sprintf(cpustr, "CPU%*d%s",
csv_output ? 0 : -4,
- evsel_list->cpus->map[cpu], csv_sep);
+ perf_evsel__cpus(evsel)->map[cpu], csv_sep);
else
cpu = 0;
@@ -949,14 +921,14 @@ static void print_counter(struct perf_evsel *counter)
u64 ena, run, val;
int cpu;
- for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) {
+ for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
val = counter->counts->cpu[cpu].val;
ena = counter->counts->cpu[cpu].ena;
run = counter->counts->cpu[cpu].run;
if (run == 0 || ena == 0) {
fprintf(output, "CPU%*d%s%*s%s%*s",
csv_output ? 0 : -4,
- evsel_list->cpus->map[cpu], csv_sep,
+ perf_evsel__cpus(counter)->map[cpu], csv_sep,
csv_output ? 0 : 18,
counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
csv_sep,
@@ -1061,8 +1033,8 @@ static const char * const stat_usage[] = {
NULL
};
-static int stat__set_big_num(const struct option *opt __used,
- const char *s __used, int unset)
+static int stat__set_big_num(const struct option *opt __maybe_unused,
+ const char *s __maybe_unused, int unset)
{
big_num_opt = unset ? 0 : 1;
return 0;
@@ -1156,7 +1128,7 @@ static int add_default_attributes(void)
return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
}
-int cmd_stat(int argc, const char **argv, const char *prefix __used)
+int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
{
struct perf_evsel *pos;
int status = -ENOMEM;
@@ -1192,7 +1164,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
output = fopen(output_name, mode);
if (!output) {
perror("failed to create output file");
- exit(-1);
+ return -1;
}
clock_gettime(CLOCK_REALTIME, &tm);
fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
@@ -1255,7 +1227,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
list_for_each_entry(pos, &evsel_list->entries, node) {
if (perf_evsel__alloc_stat_priv(pos) < 0 ||
- perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0)
+ perf_evsel__alloc_counts(pos, perf_evsel__nr_cpus(pos)) < 0)
goto out_free_fd;
}
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 1d592f5cbea9..484f26cc0c00 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -14,11 +14,13 @@
#include "util/symbol.h"
#include "util/thread_map.h"
#include "util/pmu.h"
+#include "event-parse.h"
#include "../../include/linux/hw_breakpoint.h"
#include <sys/mman.h>
-static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym)
+static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused,
+ struct symbol *sym)
{
bool *visited = symbol__priv(sym);
*visited = true;
@@ -294,7 +296,7 @@ static int test__open_syscall_event(void)
goto out_thread_map_delete;
}
- if (perf_evsel__open_per_thread(evsel, threads, false, NULL) < 0) {
+ if (perf_evsel__open_per_thread(evsel, threads) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno));
@@ -369,7 +371,7 @@ static int test__open_syscall_event_on_all_cpus(void)
goto out_thread_map_delete;
}
- if (perf_evsel__open(evsel, cpus, threads, false, NULL) < 0) {
+ if (perf_evsel__open(evsel, cpus, threads) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno));
@@ -533,7 +535,7 @@ static int test__basic_mmap(void)
perf_evlist__add(evlist, evsels[i]);
- if (perf_evsel__open(evsels[i], cpus, threads, false, NULL) < 0) {
+ if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno));
@@ -562,7 +564,7 @@ static int test__basic_mmap(void)
goto out_munmap;
}
- err = perf_evlist__parse_sample(evlist, event, &sample, false);
+ err = perf_evlist__parse_sample(evlist, event, &sample);
if (err) {
pr_err("Can't parse sample, err = %d\n", err);
goto out_munmap;
@@ -710,7 +712,7 @@ static int test__PERF_RECORD(void)
/*
* Config the evsels, setting attr->comm on the first one, etc.
*/
- evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
+ evsel = perf_evlist__first(evlist);
evsel->attr.sample_type |= PERF_SAMPLE_CPU;
evsel->attr.sample_type |= PERF_SAMPLE_TID;
evsel->attr.sample_type |= PERF_SAMPLE_TIME;
@@ -737,7 +739,7 @@ static int test__PERF_RECORD(void)
* Call sys_perf_event_open on all the fds on all the evsels,
* grouping them if asked to.
*/
- err = perf_evlist__open(evlist, opts.group);
+ err = perf_evlist__open(evlist);
if (err < 0) {
pr_debug("perf_evlist__open: %s\n", strerror(errno));
goto out_delete_evlist;
@@ -779,7 +781,7 @@ static int test__PERF_RECORD(void)
if (type < PERF_RECORD_MAX)
nr_events[type]++;
- err = perf_evlist__parse_sample(evlist, event, &sample, false);
+ err = perf_evlist__parse_sample(evlist, event, &sample);
if (err < 0) {
if (verbose)
perf_event__fprintf(event, stderr);
@@ -996,7 +998,9 @@ static u64 mmap_read_self(void *addr)
/*
* If the RDPMC instruction faults then signal this back to the test parent task:
*/
-static void segfault_handler(int sig __used, siginfo_t *info __used, void *uc __used)
+static void segfault_handler(int sig __maybe_unused,
+ siginfo_t *info __maybe_unused,
+ void *uc __maybe_unused)
{
exit(-1);
}
@@ -1023,14 +1027,16 @@ static int __test__rdpmc(void)
fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
if (fd < 0) {
- die("Error: sys_perf_event_open() syscall returned "
- "with %d (%s)\n", fd, strerror(errno));
+ pr_err("Error: sys_perf_event_open() syscall returned "
+ "with %d (%s)\n", fd, strerror(errno));
+ return -1;
}
addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
if (addr == (void *)(-1)) {
- die("Error: mmap() syscall returned "
- "with (%s)\n", strerror(errno));
+ pr_err("Error: mmap() syscall returned with (%s)\n",
+ strerror(errno));
+ goto out_close;
}
for (n = 0; n < 6; n++) {
@@ -1051,9 +1057,9 @@ static int __test__rdpmc(void)
}
munmap(addr, page_size);
- close(fd);
-
pr_debug(" ");
+out_close:
+ close(fd);
if (!delta_sum)
return -1;
@@ -1092,6 +1098,309 @@ static int test__perf_pmu(void)
return perf_pmu__test();
}
+static int perf_evsel__roundtrip_cache_name_test(void)
+{
+ char name[128];
+ int type, op, err = 0, ret = 0, i, idx;
+ struct perf_evsel *evsel;
+ struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
+
+ if (evlist == NULL)
+ return -ENOMEM;
+
+ for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
+ for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
+ /* skip invalid cache type */
+ if (!perf_evsel__is_cache_op_valid(type, op))
+ continue;
+
+ for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
+ __perf_evsel__hw_cache_type_op_res_name(type, op, i,
+ name, sizeof(name));
+ err = parse_events(evlist, name, 0);
+ if (err)
+ ret = err;
+ }
+ }
+ }
+
+ idx = 0;
+ evsel = perf_evlist__first(evlist);
+
+ for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
+ for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
+ /* skip invalid cache type */
+ if (!perf_evsel__is_cache_op_valid(type, op))
+ continue;
+
+ for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
+ __perf_evsel__hw_cache_type_op_res_name(type, op, i,
+ name, sizeof(name));
+ if (evsel->idx != idx)
+ continue;
+
+ ++idx;
+
+ if (strcmp(perf_evsel__name(evsel), name)) {
+ pr_debug("%s != %s\n", perf_evsel__name(evsel), name);
+ ret = -1;
+ }
+
+ evsel = perf_evsel__next(evsel);
+ }
+ }
+ }
+
+ perf_evlist__delete(evlist);
+ return ret;
+}
+
+static int __perf_evsel__name_array_test(const char *names[], int nr_names)
+{
+ int i, err;
+ struct perf_evsel *evsel;
+ struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
+
+ if (evlist == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_names; ++i) {
+ err = parse_events(evlist, names[i], 0);
+ if (err) {
+ pr_debug("failed to parse event '%s', err %d\n",
+ names[i], err);
+ goto out_delete_evlist;
+ }
+ }
+
+ err = 0;
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) {
+ --err;
+ pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]);
+ }
+ }
+
+out_delete_evlist:
+ perf_evlist__delete(evlist);
+ return err;
+}
+
+#define perf_evsel__name_array_test(names) \
+ __perf_evsel__name_array_test(names, ARRAY_SIZE(names))
+
+static int perf_evsel__roundtrip_name_test(void)
+{
+ int err = 0, ret = 0;
+
+ err = perf_evsel__name_array_test(perf_evsel__hw_names);
+ if (err)
+ ret = err;
+
+ err = perf_evsel__name_array_test(perf_evsel__sw_names);
+ if (err)
+ ret = err;
+
+ err = perf_evsel__roundtrip_cache_name_test();
+ if (err)
+ ret = err;
+
+ return ret;
+}
+
+static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
+ int size, bool should_be_signed)
+{
+ struct format_field *field = perf_evsel__field(evsel, name);
+ int is_signed;
+ int ret = 0;
+
+ if (field == NULL) {
+ pr_debug("%s: \"%s\" field not found!\n", evsel->name, name);
+ return -1;
+ }
+
+ is_signed = !!(field->flags | FIELD_IS_SIGNED);
+ if (should_be_signed && !is_signed) {
+ pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
+ evsel->name, name, is_signed, should_be_signed);
+ ret = -1;
+ }
+
+ if (field->size != size) {
+ pr_debug("%s: \"%s\" size (%d) should be %d!\n",
+ evsel->name, name, field->size, size);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int perf_evsel__tp_sched_test(void)
+{
+ struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0);
+ int ret = 0;
+
+ if (evsel == NULL) {
+ pr_debug("perf_evsel__new\n");
+ return -1;
+ }
+
+ if (perf_evsel__test_field(evsel, "prev_comm", 16, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "prev_pid", 4, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "prev_prio", 4, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "prev_state", 8, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "next_comm", 16, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "next_pid", 4, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "next_prio", 4, true))
+ ret = -1;
+
+ perf_evsel__delete(evsel);
+
+ evsel = perf_evsel__newtp("sched", "sched_wakeup", 0);
+
+ if (perf_evsel__test_field(evsel, "comm", 16, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "pid", 4, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "prio", 4, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "success", 4, true))
+ ret = -1;
+
+ if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
+ ret = -1;
+
+ return ret;
+}
+
+static int test__syscall_open_tp_fields(void)
+{
+ struct perf_record_opts opts = {
+ .target = {
+ .uid = UINT_MAX,
+ .uses_mmap = true,
+ },
+ .no_delay = true,
+ .freq = 1,
+ .mmap_pages = 256,
+ .raw_samples = true,
+ };
+ const char *filename = "/etc/passwd";
+ int flags = O_RDONLY | O_DIRECTORY;
+ struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
+ struct perf_evsel *evsel;
+ int err = -1, i, nr_events = 0, nr_polls = 0;
+
+ if (evlist == NULL) {
+ pr_debug("%s: perf_evlist__new\n", __func__);
+ goto out;
+ }
+
+ evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0);
+ if (evsel == NULL) {
+ pr_debug("%s: perf_evsel__newtp\n", __func__);
+ goto out_delete_evlist;
+ }
+
+ perf_evlist__add(evlist, evsel);
+
+ err = perf_evlist__create_maps(evlist, &opts.target);
+ if (err < 0) {
+ pr_debug("%s: perf_evlist__create_maps\n", __func__);
+ goto out_delete_evlist;
+ }
+
+ perf_evsel__config(evsel, &opts, evsel);
+
+ evlist->threads->map[0] = getpid();
+
+ err = perf_evlist__open(evlist);
+ if (err < 0) {
+ pr_debug("perf_evlist__open: %s\n", strerror(errno));
+ goto out_delete_evlist;
+ }
+
+ err = perf_evlist__mmap(evlist, UINT_MAX, false);
+ if (err < 0) {
+ pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
+ goto out_delete_evlist;
+ }
+
+ perf_evlist__enable(evlist);
+
+ /*
+ * Generate the event:
+ */
+ open(filename, flags);
+
+ while (1) {
+ int before = nr_events;
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ union perf_event *event;
+
+ while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+ const u32 type = event->header.type;
+ int tp_flags;
+ struct perf_sample sample;
+
+ ++nr_events;
+
+ if (type != PERF_RECORD_SAMPLE)
+ continue;
+
+ err = perf_evsel__parse_sample(evsel, event, &sample);
+ if (err) {
+ pr_err("Can't parse sample, err = %d\n", err);
+ goto out_munmap;
+ }
+
+ tp_flags = perf_evsel__intval(evsel, &sample, "flags");
+
+ if (flags != tp_flags) {
+ pr_debug("%s: Expected flags=%#x, got %#x\n",
+ __func__, flags, tp_flags);
+ goto out_munmap;
+ }
+
+ goto out_ok;
+ }
+ }
+
+ if (nr_events == before)
+ poll(evlist->pollfd, evlist->nr_fds, 10);
+
+ if (++nr_polls > 5) {
+ pr_debug("%s: no events!\n", __func__);
+ goto out_munmap;
+ }
+ }
+out_ok:
+ err = 0;
+out_munmap:
+ perf_evlist__munmap(evlist);
+out_delete_evlist:
+ perf_evlist__delete(evlist);
+out:
+ return err;
+}
+
static struct test {
const char *desc;
int (*func)(void);
@@ -1135,6 +1444,18 @@ static struct test {
.func = dso__test_data,
},
{
+ .desc = "roundtrip evsel->name check",
+ .func = perf_evsel__roundtrip_name_test,
+ },
+ {
+ .desc = "Check parsing of sched tracepoints fields",
+ .func = perf_evsel__tp_sched_test,
+ },
+ {
+ .desc = "Generate and check syscalls:sys_enter_open event fields",
+ .func = test__syscall_open_tp_fields,
+ },
+ {
.func = NULL,
},
};
@@ -1199,7 +1520,7 @@ static int perf_test__list(int argc, const char **argv)
return 0;
}
-int cmd_test(int argc, const char **argv, const char *prefix __used)
+int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
{
const char * const test_usage[] = {
"perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 3b75b2e21ea5..b1a8a3b841cc 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -168,9 +168,8 @@ static struct per_pid *find_create_pid(int pid)
return cursor;
cursor = cursor->next;
}
- cursor = malloc(sizeof(struct per_pid));
+ cursor = zalloc(sizeof(*cursor));
assert(cursor != NULL);
- memset(cursor, 0, sizeof(struct per_pid));
cursor->pid = pid;
cursor->next = all_data;
all_data = cursor;
@@ -195,9 +194,8 @@ static void pid_set_comm(int pid, char *comm)
}
c = c->next;
}
- c = malloc(sizeof(struct per_pidcomm));
+ c = zalloc(sizeof(*c));
assert(c != NULL);
- memset(c, 0, sizeof(struct per_pidcomm));
c->comm = strdup(comm);
p->current = c;
c->next = p->all;
@@ -239,17 +237,15 @@ pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
p = find_create_pid(pid);
c = p->current;
if (!c) {
- c = malloc(sizeof(struct per_pidcomm));
+ c = zalloc(sizeof(*c));
assert(c != NULL);
- memset(c, 0, sizeof(struct per_pidcomm));
p->current = c;
c->next = p->all;
p->all = c;
}
- sample = malloc(sizeof(struct cpu_sample));
+ sample = zalloc(sizeof(*sample));
assert(sample != NULL);
- memset(sample, 0, sizeof(struct cpu_sample));
sample->start_time = start;
sample->end_time = end;
sample->type = type;
@@ -275,28 +271,28 @@ static int cpus_cstate_state[MAX_CPUS];
static u64 cpus_pstate_start_times[MAX_CPUS];
static u64 cpus_pstate_state[MAX_CPUS];
-static int process_comm_event(struct perf_tool *tool __used,
+static int process_comm_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
- struct perf_sample *sample __used,
- struct machine *machine __used)
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
{
pid_set_comm(event->comm.tid, event->comm.comm);
return 0;
}
-static int process_fork_event(struct perf_tool *tool __used,
+static int process_fork_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
- struct perf_sample *sample __used,
- struct machine *machine __used)
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
{
pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
return 0;
}
-static int process_exit_event(struct perf_tool *tool __used,
+static int process_exit_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
- struct perf_sample *sample __used,
- struct machine *machine __used)
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
{
pid_exit(event->fork.pid, event->fork.time);
return 0;
@@ -373,11 +369,10 @@ static void c_state_start(int cpu, u64 timestamp, int state)
static void c_state_end(int cpu, u64 timestamp)
{
- struct power_event *pwr;
- pwr = malloc(sizeof(struct power_event));
+ struct power_event *pwr = zalloc(sizeof(*pwr));
+
if (!pwr)
return;
- memset(pwr, 0, sizeof(struct power_event));
pwr->state = cpus_cstate_state[cpu];
pwr->start_time = cpus_cstate_start_times[cpu];
@@ -392,14 +387,13 @@ static void c_state_end(int cpu, u64 timestamp)
static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
{
struct power_event *pwr;
- pwr = malloc(sizeof(struct power_event));
if (new_freq > 8000000) /* detect invalid data */
return;
+ pwr = zalloc(sizeof(*pwr));
if (!pwr)
return;
- memset(pwr, 0, sizeof(struct power_event));
pwr->state = cpus_pstate_state[cpu];
pwr->start_time = cpus_pstate_start_times[cpu];
@@ -429,15 +423,13 @@ static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
static void
sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te)
{
- struct wake_event *we;
struct per_pid *p;
struct wakeup_entry *wake = (void *)te;
+ struct wake_event *we = zalloc(sizeof(*we));
- we = malloc(sizeof(struct wake_event));
if (!we)
return;
- memset(we, 0, sizeof(struct wake_event));
we->time = timestamp;
we->waker = pid;
@@ -491,11 +483,11 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
}
-static int process_sample_event(struct perf_tool *tool __used,
- union perf_event *event __used,
+static int process_sample_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct machine *machine __used)
+ struct machine *machine __maybe_unused)
{
struct trace_entry *te;
@@ -579,13 +571,12 @@ static void end_sample_processing(void)
struct power_event *pwr;
for (cpu = 0; cpu <= numcpus; cpu++) {
- pwr = malloc(sizeof(struct power_event));
+ /* C state */
+#if 0
+ pwr = zalloc(sizeof(*pwr));
if (!pwr)
return;
- memset(pwr, 0, sizeof(struct power_event));
- /* C state */
-#if 0
pwr->state = cpus_cstate_state[cpu];
pwr->start_time = cpus_cstate_start_times[cpu];
pwr->end_time = last_time;
@@ -597,10 +588,9 @@ static void end_sample_processing(void)
#endif
/* P state */
- pwr = malloc(sizeof(struct power_event));
+ pwr = zalloc(sizeof(*pwr));
if (!pwr)
return;
- memset(pwr, 0, sizeof(struct power_event));
pwr->state = cpus_pstate_state[cpu];
pwr->start_time = cpus_pstate_start_times[cpu];
@@ -830,11 +820,9 @@ static void draw_process_bars(void)
static void add_process_filter(const char *string)
{
- struct process_filter *filt;
- int pid;
+ int pid = strtoull(string, NULL, 10);
+ struct process_filter *filt = malloc(sizeof(*filt));
- pid = strtoull(string, NULL, 10);
- filt = malloc(sizeof(struct process_filter));
if (!filt)
return;
@@ -1081,7 +1069,8 @@ static int __cmd_record(int argc, const char **argv)
}
static int
-parse_process(const struct option *opt __used, const char *arg, int __used unset)
+parse_process(const struct option *opt __maybe_unused, const char *arg,
+ int __maybe_unused unset)
{
if (arg)
add_process_filter(arg);
@@ -1106,7 +1095,8 @@ static const struct option options[] = {
};
-int cmd_timechart(int argc, const char **argv, const char *prefix __used)
+int cmd_timechart(int argc, const char **argv,
+ const char *prefix __maybe_unused)
{
argc = parse_options(argc, argv, options, timechart_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 68cd61ef6ac5..e434a16bb5ac 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -95,7 +95,8 @@ static void perf_top__update_print_entries(struct perf_top *top)
top->print_entries -= 9;
}
-static void perf_top__sig_winch(int sig __used, siginfo_t *info __used, void *arg)
+static void perf_top__sig_winch(int sig __maybe_unused,
+ siginfo_t *info __maybe_unused, void *arg)
{
struct perf_top *top = arg;
@@ -509,7 +510,7 @@ static void perf_top__handle_keypress(struct perf_top *top, int c)
prompt_integer(&counter, "Enter details event counter");
if (counter >= top->evlist->nr_entries) {
- top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node);
+ top->sym_evsel = perf_evlist__first(top->evlist);
fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
sleep(1);
break;
@@ -518,7 +519,7 @@ static void perf_top__handle_keypress(struct perf_top *top, int c)
if (top->sym_evsel->idx == counter)
break;
} else
- top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node);
+ top->sym_evsel = perf_evlist__first(top->evlist);
break;
case 'f':
prompt_integer(&top->count_filter, "Enter display event count filter");
@@ -663,7 +664,7 @@ static const char *skip_symbols[] = {
NULL
};
-static int symbol_filter(struct map *map __used, struct symbol *sym)
+static int symbol_filter(struct map *map __maybe_unused, struct symbol *sym)
{
const char *name = sym->name;
int i;
@@ -783,8 +784,10 @@ static void perf_event__process_sample(struct perf_tool *tool,
if ((sort__has_parent || symbol_conf.use_callchain) &&
sample->callchain) {
- err = machine__resolve_callchain(machine, al.thread,
- sample->callchain, &parent);
+ err = machine__resolve_callchain(machine, evsel,
+ al.thread, sample,
+ &parent);
+
if (err)
return;
}
@@ -820,7 +823,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
int ret;
while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
- ret = perf_evlist__parse_sample(top->evlist, event, &sample, false);
+ ret = perf_evlist__parse_sample(top->evlist, event, &sample);
if (ret) {
pr_err("Can't parse sample, err = %d\n", ret);
continue;
@@ -884,17 +887,14 @@ static void perf_top__mmap_read(struct perf_top *top)
static void perf_top__start_counters(struct perf_top *top)
{
- struct perf_evsel *counter, *first;
+ struct perf_evsel *counter;
struct perf_evlist *evlist = top->evlist;
- first = list_entry(evlist->entries.next, struct perf_evsel, node);
+ if (top->group)
+ perf_evlist__set_leader(evlist);
list_for_each_entry(counter, &evlist->entries, node) {
struct perf_event_attr *attr = &counter->attr;
- struct xyarray *group_fd = NULL;
-
- if (top->group && counter != first)
- group_fd = first->fd;
attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
@@ -925,8 +925,7 @@ retry_sample_id:
attr->sample_id_all = top->sample_id_all_missing ? 0 : 1;
try_again:
if (perf_evsel__open(counter, top->evlist->cpus,
- top->evlist->threads, top->group,
- group_fd) < 0) {
+ top->evlist->threads) < 0) {
int err = errno;
if (err == EPERM || err == EACCES) {
@@ -1165,7 +1164,7 @@ static const char * const top_usage[] = {
NULL
};
-int cmd_top(int argc, const char **argv, const char *prefix __used)
+int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
{
struct perf_evsel *pos;
int status;
@@ -1328,7 +1327,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
pos->attr.sample_period = top.default_interval;
}
- top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
+ top.sym_evsel = perf_evlist__first(top.evlist);
symbol_conf.priv_size = sizeof(struct annotation);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
new file mode 100644
index 000000000000..8f113dab8bf1
--- /dev/null
+++ b/tools/perf/builtin-trace.c
@@ -0,0 +1,310 @@
+#include "builtin.h"
+#include "util/evlist.h"
+#include "util/parse-options.h"
+#include "util/thread_map.h"
+#include "event-parse.h"
+
+#include <libaudit.h>
+#include <stdlib.h>
+
+static struct syscall_fmt {
+ const char *name;
+ const char *alias;
+ bool errmsg;
+ bool timeout;
+} syscall_fmts[] = {
+ { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
+ { .name = "fstat", .errmsg = true, .alias = "newfstat", },
+ { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
+ { .name = "futex", .errmsg = true, },
+ { .name = "poll", .errmsg = true, .timeout = true, },
+ { .name = "ppoll", .errmsg = true, .timeout = true, },
+ { .name = "read", .errmsg = true, },
+ { .name = "recvfrom", .errmsg = true, },
+ { .name = "select", .errmsg = true, .timeout = true, },
+ { .name = "stat", .errmsg = true, .alias = "newstat", },
+};
+
+static int syscall_fmt__cmp(const void *name, const void *fmtp)
+{
+ const struct syscall_fmt *fmt = fmtp;
+ return strcmp(name, fmt->name);
+}
+
+static struct syscall_fmt *syscall_fmt__find(const char *name)
+{
+ const int nmemb = ARRAY_SIZE(syscall_fmts);
+ return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
+}
+
+struct syscall {
+ struct event_format *tp_format;
+ const char *name;
+ struct syscall_fmt *fmt;
+};
+
+struct trace {
+ int audit_machine;
+ struct {
+ int max;
+ struct syscall *table;
+ } syscalls;
+ struct perf_record_opts opts;
+};
+
+static int trace__read_syscall_info(struct trace *trace, int id)
+{
+ char tp_name[128];
+ struct syscall *sc;
+
+ if (id > trace->syscalls.max) {
+ struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
+
+ if (nsyscalls == NULL)
+ return -1;
+
+ if (trace->syscalls.max != -1) {
+ memset(nsyscalls + trace->syscalls.max + 1, 0,
+ (id - trace->syscalls.max) * sizeof(*sc));
+ } else {
+ memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
+ }
+
+ trace->syscalls.table = nsyscalls;
+ trace->syscalls.max = id;
+ }
+
+ sc = trace->syscalls.table + id;
+ sc->name = audit_syscall_to_name(id, trace->audit_machine);
+ if (sc->name == NULL)
+ return -1;
+
+ sc->fmt = syscall_fmt__find(sc->name);
+
+ snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
+ sc->tp_format = event_format__new("syscalls", tp_name);
+
+ if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
+ snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
+ sc->tp_format = event_format__new("syscalls", tp_name);
+ }
+
+ return sc->tp_format != NULL ? 0 : -1;
+}
+
+static size_t syscall__fprintf_args(struct syscall *sc, unsigned long *args, FILE *fp)
+{
+ int i = 0;
+ size_t printed = 0;
+
+ if (sc->tp_format != NULL) {
+ struct format_field *field;
+
+ for (field = sc->tp_format->format.fields->next; field; field = field->next) {
+ printed += fprintf(fp, "%s%s: %ld", printed ? ", " : "",
+ field->name, args[i++]);
+ }
+ } else {
+ while (i < 6) {
+ printed += fprintf(fp, "%sarg%d: %ld", printed ? ", " : "", i, args[i]);
+ ++i;
+ }
+ }
+
+ return printed;
+}
+
+static int trace__run(struct trace *trace)
+{
+ struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
+ struct perf_evsel *evsel, *evsel_enter, *evsel_exit;
+ int err = -1, i, nr_events = 0, before;
+
+ if (evlist == NULL) {
+ printf("Not enough memory to run!\n");
+ goto out;
+ }
+
+ evsel_enter = perf_evsel__newtp("raw_syscalls", "sys_enter", 0);
+ if (evsel_enter == NULL) {
+ printf("Couldn't read the raw_syscalls:sys_enter tracepoint information!\n");
+ goto out_delete_evlist;
+ }
+
+ perf_evlist__add(evlist, evsel_enter);
+
+ evsel_exit = perf_evsel__newtp("raw_syscalls", "sys_exit", 1);
+ if (evsel_exit == NULL) {
+ printf("Couldn't read the raw_syscalls:sys_exit tracepoint information!\n");
+ goto out_delete_evlist;
+ }
+
+ perf_evlist__add(evlist, evsel_exit);
+
+ err = perf_evlist__create_maps(evlist, &trace->opts.target);
+ if (err < 0) {
+ printf("Problems parsing the target to trace, check your options!\n");
+ goto out_delete_evlist;
+ }
+
+ perf_evlist__config_attrs(evlist, &trace->opts);
+
+ err = perf_evlist__open(evlist);
+ if (err < 0) {
+ printf("Couldn't create the events: %s\n", strerror(errno));
+ goto out_delete_evlist;
+ }
+
+ err = perf_evlist__mmap(evlist, UINT_MAX, false);
+ if (err < 0) {
+ printf("Couldn't mmap the events: %s\n", strerror(errno));
+ goto out_delete_evlist;
+ }
+
+ perf_evlist__enable(evlist);
+again:
+ before = nr_events;
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ union perf_event *event;
+
+ while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+ const u32 type = event->header.type;
+ struct syscall *sc;
+ struct perf_sample sample;
+ int id;
+
+ ++nr_events;
+
+ switch (type) {
+ case PERF_RECORD_SAMPLE:
+ break;
+ case PERF_RECORD_LOST:
+ printf("LOST %" PRIu64 " events!\n", event->lost.lost);
+ continue;
+ default:
+ printf("Unexpected %s event, skipping...\n",
+ perf_event__name(type));
+ continue;
+ }
+
+ err = perf_evlist__parse_sample(evlist, event, &sample);
+ if (err) {
+ printf("Can't parse sample, err = %d, skipping...\n", err);
+ continue;
+ }
+
+ evsel = perf_evlist__id2evsel(evlist, sample.id);
+ if (evsel == NULL) {
+ printf("Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
+ continue;
+ }
+
+ id = perf_evsel__intval(evsel, &sample, "id");
+ if (id < 0) {
+ printf("Invalid syscall %d id, skipping...\n", id);
+ continue;
+ }
+
+ if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
+ trace__read_syscall_info(trace, id))
+ continue;
+
+ if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
+ continue;
+
+ sc = &trace->syscalls.table[id];
+
+ if (evlist->threads->map[0] == -1 || evlist->threads->nr > 1)
+ printf("%d ", sample.tid);
+
+ if (evsel == evsel_enter) {
+ void *args = perf_evsel__rawptr(evsel, &sample, "args");
+
+ printf("%s(", sc->name);
+ syscall__fprintf_args(sc, args, stdout);
+ } else if (evsel == evsel_exit) {
+ int ret = perf_evsel__intval(evsel, &sample, "ret");
+
+ if (ret < 0 && sc->fmt && sc->fmt->errmsg) {
+ char bf[256];
+ const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
+ *e = audit_errno_to_name(-ret);
+
+ printf(") = -1 %s %s", e, emsg);
+ } else if (ret == 0 && sc->fmt && sc->fmt->timeout)
+ printf(") = 0 Timeout");
+ else
+ printf(") = %d", ret);
+
+ putchar('\n');
+ }
+ }
+ }
+
+ if (nr_events == before)
+ poll(evlist->pollfd, evlist->nr_fds, -1);
+
+ goto again;
+
+out_delete_evlist:
+ perf_evlist__delete(evlist);
+out:
+ return err;
+}
+
+int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
+{
+ const char * const trace_usage[] = {
+ "perf trace [<options>]",
+ NULL
+ };
+ struct trace trace = {
+ .audit_machine = audit_detect_machine(),
+ .syscalls = {
+ . max = -1,
+ },
+ .opts = {
+ .target = {
+ .uid = UINT_MAX,
+ .uses_mmap = true,
+ },
+ .user_freq = UINT_MAX,
+ .user_interval = ULLONG_MAX,
+ .no_delay = true,
+ .mmap_pages = 1024,
+ },
+ };
+ const struct option trace_options[] = {
+ OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
+ "trace events on existing process id"),
+ OPT_STRING(0, "tid", &trace.opts.target.tid, "tid",
+ "trace events on existing thread id"),
+ OPT_BOOLEAN(0, "all-cpus", &trace.opts.target.system_wide,
+ "system-wide collection from all CPUs"),
+ OPT_STRING(0, "cpu", &trace.opts.target.cpu_list, "cpu",
+ "list of cpus to monitor"),
+ OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
+ "child tasks do not inherit counters"),
+ OPT_UINTEGER(0, "mmap-pages", &trace.opts.mmap_pages,
+ "number of mmap data pages"),
+ OPT_STRING(0, "uid", &trace.opts.target.uid_str, "user",
+ "user to profile"),
+ OPT_END()
+ };
+ int err;
+
+ argc = parse_options(argc, argv, trace_options, trace_usage, 0);
+ if (argc)
+ usage_with_options(trace_usage, trace_options);
+
+ err = perf_target__parse_uid(&trace.opts.target);
+ if (err) {
+ char bf[BUFSIZ];
+ perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
+ printf("%s", bf);
+ return err;
+ }
+
+ return trace__run(&trace);
+}
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index b382bd551aac..08143bd854c7 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -34,6 +34,8 @@ extern int cmd_kmem(int argc, const char **argv, const char *prefix);
extern int cmd_lock(int argc, const char **argv, const char *prefix);
extern int cmd_kvm(int argc, const char **argv, const char *prefix);
extern int cmd_test(int argc, const char **argv, const char *prefix);
+extern int cmd_trace(int argc, const char **argv, const char *prefix);
extern int cmd_inject(int argc, const char **argv, const char *prefix);
+extern int find_scripts(char **scripts_array, char **scripts_path_array);
#endif
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt
index d695fe40fbff..3e86bbd8c2d5 100644
--- a/tools/perf/command-list.txt
+++ b/tools/perf/command-list.txt
@@ -17,8 +17,9 @@ perf-report mainporcelain common
perf-stat mainporcelain common
perf-timechart mainporcelain common
perf-top mainporcelain common
+perf-trace mainporcelain common
perf-script mainporcelain common
-perf-probe mainporcelain common
+perf-probe mainporcelain full
perf-kmem mainporcelain common
perf-lock mainporcelain common
perf-kvm mainporcelain common
diff --git a/tools/perf/config/feature-tests.mak b/tools/perf/config/feature-tests.mak
index 6c18785a6417..4add41bb0c7e 100644
--- a/tools/perf/config/feature-tests.mak
+++ b/tools/perf/config/feature-tests.mak
@@ -154,3 +154,53 @@ int main(void)
return 0;
}
endef
+
+ifndef NO_LIBUNWIND
+define SOURCE_LIBUNWIND
+#include <libunwind.h>
+#include <stdlib.h>
+
+extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
+ unw_word_t ip,
+ unw_dyn_info_t *di,
+ unw_proc_info_t *pi,
+ int need_unwind_info, void *arg);
+
+
+#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
+
+int main(void)
+{
+ unw_addr_space_t addr_space;
+ addr_space = unw_create_addr_space(NULL, 0);
+ unw_init_remote(NULL, addr_space, NULL);
+ dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL);
+ return 0;
+}
+endef
+endif
+
+ifndef NO_BACKTRACE
+define SOURCE_BACKTRACE
+#include <execinfo.h>
+#include <stdio.h>
+
+int main(void)
+{
+ backtrace(NULL, 0);
+ backtrace_symbols(NULL, 0);
+ return 0;
+}
+endef
+endif
+
+ifndef NO_LIBAUDIT
+define SOURCE_LIBAUDIT
+#include <libaudit.h>
+
+int main(void)
+{
+ return audit_open();
+}
+endef
+endif \ No newline at end of file
diff --git a/tools/perf/perf-archive.sh b/tools/perf/perf-archive.sh
index 95b6f8b6177a..e91930620269 100644
--- a/tools/perf/perf-archive.sh
+++ b/tools/perf/perf-archive.sh
@@ -24,7 +24,7 @@ NOBUILDID=0000000000000000000000000000000000000000
perf buildid-list -i $PERF_DATA --with-hits | grep -v "^$NOBUILDID " > $BUILDIDS
if [ ! -s $BUILDIDS ] ; then
echo "perf archive: no build-ids found"
- rm -f $BUILDIDS
+ rm $BUILDIDS || true
exit 1
fi
@@ -39,8 +39,8 @@ while read build_id ; do
echo ${filename#$PERF_BUILDID_LINKDIR} >> $MANIFEST
done
-tar cfj $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST
-rm -f $MANIFEST $BUILDIDS
+tar cjf $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST
+rm $MANIFEST $BUILDIDS || true
echo -e "Now please run:\n"
echo -e "$ tar xvf $PERF_DATA.tar.bz2 -C ~/.debug\n"
echo "wherever you need to run 'perf report' on."
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 2b2e225a4d4c..fc2f770e3027 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -14,6 +14,7 @@
#include "util/run-command.h"
#include "util/parse-events.h"
#include "util/debugfs.h"
+#include <pthread.h>
const char perf_usage_string[] =
"perf [--version] [--help] COMMAND [ARGS]";
@@ -24,6 +25,42 @@ const char perf_more_info_string[] =
int use_browser = -1;
static int use_pager = -1;
+struct cmd_struct {
+ const char *cmd;
+ int (*fn)(int, const char **, const char *);
+ int option;
+};
+
+static struct cmd_struct commands[] = {
+ { "buildid-cache", cmd_buildid_cache, 0 },
+ { "buildid-list", cmd_buildid_list, 0 },
+ { "diff", cmd_diff, 0 },
+ { "evlist", cmd_evlist, 0 },
+ { "help", cmd_help, 0 },
+ { "list", cmd_list, 0 },
+ { "record", cmd_record, 0 },
+ { "report", cmd_report, 0 },
+ { "bench", cmd_bench, 0 },
+ { "stat", cmd_stat, 0 },
+ { "timechart", cmd_timechart, 0 },
+ { "top", cmd_top, 0 },
+ { "annotate", cmd_annotate, 0 },
+ { "version", cmd_version, 0 },
+ { "script", cmd_script, 0 },
+ { "sched", cmd_sched, 0 },
+#ifndef NO_LIBELF_SUPPORT
+ { "probe", cmd_probe, 0 },
+#endif
+ { "kmem", cmd_kmem, 0 },
+ { "lock", cmd_lock, 0 },
+ { "kvm", cmd_kvm, 0 },
+ { "test", cmd_test, 0 },
+#ifndef NO_LIBAUDIT_SUPPORT
+ { "trace", cmd_trace, 0 },
+#endif
+ { "inject", cmd_inject, 0 },
+};
+
struct pager_config {
const char *cmd;
int val;
@@ -160,6 +197,14 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
fprintf(stderr, "dir: %s\n", debugfs_mountpoint);
if (envchanged)
*envchanged = 1;
+ } else if (!strcmp(cmd, "--list-cmds")) {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(commands); i++) {
+ struct cmd_struct *p = commands+i;
+ printf("%s ", p->cmd);
+ }
+ exit(0);
} else {
fprintf(stderr, "Unknown option: %s\n", cmd);
usage(perf_usage_string);
@@ -245,12 +290,6 @@ const char perf_version_string[] = PERF_VERSION;
*/
#define NEED_WORK_TREE (1<<2)
-struct cmd_struct {
- const char *cmd;
- int (*fn)(int, const char **, const char *);
- int option;
-};
-
static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
{
int status;
@@ -296,30 +335,6 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
static void handle_internal_command(int argc, const char **argv)
{
const char *cmd = argv[0];
- static struct cmd_struct commands[] = {
- { "buildid-cache", cmd_buildid_cache, 0 },
- { "buildid-list", cmd_buildid_list, 0 },
- { "diff", cmd_diff, 0 },
- { "evlist", cmd_evlist, 0 },
- { "help", cmd_help, 0 },
- { "list", cmd_list, 0 },
- { "record", cmd_record, 0 },
- { "report", cmd_report, 0 },
- { "bench", cmd_bench, 0 },
- { "stat", cmd_stat, 0 },
- { "timechart", cmd_timechart, 0 },
- { "top", cmd_top, 0 },
- { "annotate", cmd_annotate, 0 },
- { "version", cmd_version, 0 },
- { "script", cmd_script, 0 },
- { "sched", cmd_sched, 0 },
- { "probe", cmd_probe, 0 },
- { "kmem", cmd_kmem, 0 },
- { "lock", cmd_lock, 0 },
- { "kvm", cmd_kvm, 0 },
- { "test", cmd_test, 0 },
- { "inject", cmd_inject, 0 },
- };
unsigned int i;
static const char ext[] = STRIP_EXTENSION;
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index f960ccb2edc6..a89cbbb61801 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -88,6 +88,12 @@ void get_term_dimensions(struct winsize *ws);
#define CPUINFO_PROC "Processor"
#endif
+#ifdef __aarch64__
+#include "../../arch/arm64/include/asm/unistd.h"
+#define rmb() asm volatile("dmb ld" ::: "memory")
+#define cpu_relax() asm volatile("yield" ::: "memory")
+#endif
+
#ifdef __mips__
#include "../../arch/mips/include/asm/unistd.h"
#define rmb() asm volatile( \
@@ -209,9 +215,15 @@ void pthread__unblock_sigwinch(void);
#include "util/target.h"
+enum perf_call_graph_mode {
+ CALLCHAIN_NONE,
+ CALLCHAIN_FP,
+ CALLCHAIN_DWARF
+};
+
struct perf_record_opts {
struct perf_target target;
- bool call_graph;
+ int call_graph;
bool group;
bool inherit_stat;
bool no_delay;
@@ -230,6 +242,7 @@ struct perf_record_opts {
u64 branch_stack;
u64 default_interval;
u64 user_interval;
+ u16 stack_dump_size;
};
#endif
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
new file mode 100755
index 000000000000..9e0985794e20
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
@@ -0,0 +1,94 @@
+# EventClass.py
+#
+# This is a library defining some events types classes, which could
+# be used by other scripts to analyzing the perf samples.
+#
+# Currently there are just a few classes defined for examples,
+# PerfEvent is the base class for all perf event sample, PebsEvent
+# is a HW base Intel x86 PEBS event, and user could add more SW/HW
+# event classes based on requirements.
+
+import struct
+
+# Event types, user could add more here
+EVTYPE_GENERIC = 0
+EVTYPE_PEBS = 1 # Basic PEBS event
+EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
+EVTYPE_IBS = 3
+
+#
+# Currently we don't have good way to tell the event type, but by
+# the size of raw buffer, raw PEBS event with load latency data's
+# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
+#
+def create_event(name, comm, dso, symbol, raw_buf):
+ if (len(raw_buf) == 144):
+ event = PebsEvent(name, comm, dso, symbol, raw_buf)
+ elif (len(raw_buf) == 176):
+ event = PebsNHM(name, comm, dso, symbol, raw_buf)
+ else:
+ event = PerfEvent(name, comm, dso, symbol, raw_buf)
+
+ return event
+
+class PerfEvent(object):
+ event_num = 0
+ def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
+ self.name = name
+ self.comm = comm
+ self.dso = dso
+ self.symbol = symbol
+ self.raw_buf = raw_buf
+ self.ev_type = ev_type
+ PerfEvent.event_num += 1
+
+ def show(self):
+ print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
+
+#
+# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
+# contains the context info when that event happened: the EFLAGS and
+# linear IP info, as well as all the registers.
+#
+class PebsEvent(PerfEvent):
+ pebs_num = 0
+ def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
+ tmp_buf=raw_buf[0:80]
+ flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
+ self.flags = flags
+ self.ip = ip
+ self.ax = ax
+ self.bx = bx
+ self.cx = cx
+ self.dx = dx
+ self.si = si
+ self.di = di
+ self.bp = bp
+ self.sp = sp
+
+ PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
+ PebsEvent.pebs_num += 1
+ del tmp_buf
+
+#
+# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
+# in the four 64 bit words write after the PEBS data:
+# Status: records the IA32_PERF_GLOBAL_STATUS register value
+# DLA: Data Linear Address (EIP)
+# DSE: Data Source Encoding, where the latency happens, hit or miss
+# in L1/L2/L3 or IO operations
+# LAT: the actual latency in cycles
+#
+class PebsNHM(PebsEvent):
+ pebs_nhm_num = 0
+ def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
+ tmp_buf=raw_buf[144:176]
+ status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
+ self.status = status
+ self.dla = dla
+ self.dse = dse
+ self.lat = lat
+
+ PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
+ PebsNHM.pebs_nhm_num += 1
+ del tmp_buf
diff --git a/tools/perf/scripts/python/bin/event_analyzing_sample-record b/tools/perf/scripts/python/bin/event_analyzing_sample-record
new file mode 100644
index 000000000000..5ce652dabd02
--- /dev/null
+++ b/tools/perf/scripts/python/bin/event_analyzing_sample-record
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+#
+# event_analyzing_sample.py can cover all type of perf samples including
+# the tracepoints, so no special record requirements, just record what
+# you want to analyze.
+#
+perf record $@
diff --git a/tools/perf/scripts/python/bin/event_analyzing_sample-report b/tools/perf/scripts/python/bin/event_analyzing_sample-report
new file mode 100644
index 000000000000..0941fc94e158
--- /dev/null
+++ b/tools/perf/scripts/python/bin/event_analyzing_sample-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: analyze all perf samples
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/event_analyzing_sample.py
diff --git a/tools/perf/scripts/python/event_analyzing_sample.py b/tools/perf/scripts/python/event_analyzing_sample.py
new file mode 100644
index 000000000000..163c39fa12d9
--- /dev/null
+++ b/tools/perf/scripts/python/event_analyzing_sample.py
@@ -0,0 +1,189 @@
+# event_analyzing_sample.py: general event handler in python
+#
+# Current perf report is already very powerful with the annotation integrated,
+# and this script is not trying to be as powerful as perf report, but
+# providing end user/developer a flexible way to analyze the events other
+# than trace points.
+#
+# The 2 database related functions in this script just show how to gather
+# the basic information, and users can modify and write their own functions
+# according to their specific requirement.
+#
+# The first function "show_general_events" just does a basic grouping for all
+# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
+# for a x86 HW PMU event: PEBS with load latency data.
+#
+
+import os
+import sys
+import math
+import struct
+import sqlite3
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from EventClass import *
+
+#
+# If the perf.data has a big number of samples, then the insert operation
+# will be very time consuming (about 10+ minutes for 10000 samples) if the
+# .db database is on disk. Move the .db file to RAM based FS to speedup
+# the handling, which will cut the time down to several seconds.
+#
+con = sqlite3.connect("/dev/shm/perf.db")
+con.isolation_level = None
+
+def trace_begin():
+ print "In trace_begin:\n"
+
+ #
+ # Will create several tables at the start, pebs_ll is for PEBS data with
+ # load latency info, while gen_events is for general event.
+ #
+ con.execute("""
+ create table if not exists gen_events (
+ name text,
+ symbol text,
+ comm text,
+ dso text
+ );""")
+ con.execute("""
+ create table if not exists pebs_ll (
+ name text,
+ symbol text,
+ comm text,
+ dso text,
+ flags integer,
+ ip integer,
+ status integer,
+ dse integer,
+ dla integer,
+ lat integer
+ );""")
+
+#
+# Create and insert event object to a database so that user could
+# do more analysis with simple database commands.
+#
+def process_event(param_dict):
+ event_attr = param_dict["attr"]
+ sample = param_dict["sample"]
+ raw_buf = param_dict["raw_buf"]
+ comm = param_dict["comm"]
+ name = param_dict["ev_name"]
+
+ # Symbol and dso info are not always resolved
+ if (param_dict.has_key("dso")):
+ dso = param_dict["dso"]
+ else:
+ dso = "Unknown_dso"
+
+ if (param_dict.has_key("symbol")):
+ symbol = param_dict["symbol"]
+ else:
+ symbol = "Unknown_symbol"
+
+ # Create the event object and insert it to the right table in database
+ event = create_event(name, comm, dso, symbol, raw_buf)
+ insert_db(event)
+
+def insert_db(event):
+ if event.ev_type == EVTYPE_GENERIC:
+ con.execute("insert into gen_events values(?, ?, ?, ?)",
+ (event.name, event.symbol, event.comm, event.dso))
+ elif event.ev_type == EVTYPE_PEBS_LL:
+ event.ip &= 0x7fffffffffffffff
+ event.dla &= 0x7fffffffffffffff
+ con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
+ (event.name, event.symbol, event.comm, event.dso, event.flags,
+ event.ip, event.status, event.dse, event.dla, event.lat))
+
+def trace_end():
+ print "In trace_end:\n"
+ # We show the basic info for the 2 type of event classes
+ show_general_events()
+ show_pebs_ll()
+ con.close()
+
+#
+# As the event number may be very big, so we can't use linear way
+# to show the histogram in real number, but use a log2 algorithm.
+#
+
+def num2sym(num):
+ # Each number will have at least one '#'
+ snum = '#' * (int)(math.log(num, 2) + 1)
+ return snum
+
+def show_general_events():
+
+ # Check the total record number in the table
+ count = con.execute("select count(*) from gen_events")
+ for t in count:
+ print "There is %d records in gen_events table" % t[0]
+ if t[0] == 0:
+ return
+
+ print "Statistics about the general events grouped by thread/symbol/dso: \n"
+
+ # Group by thread
+ commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
+ print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+ for row in commq:
+ print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
+
+ # Group by symbol
+ print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+ symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
+ for row in symbolq:
+ print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+
+ # Group by dso
+ print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
+ dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
+ for row in dsoq:
+ print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
+
+#
+# This function just shows the basic info, and we could do more with the
+# data in the tables, like checking the function parameters when some
+# big latency events happen.
+#
+def show_pebs_ll():
+
+ count = con.execute("select count(*) from pebs_ll")
+ for t in count:
+ print "There is %d records in pebs_ll table" % t[0]
+ if t[0] == 0:
+ return
+
+ print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
+
+ # Group by thread
+ commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
+ print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+ for row in commq:
+ print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
+
+ # Group by symbol
+ print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+ symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
+ for row in symbolq:
+ print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+
+ # Group by dse
+ dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
+ print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
+ for row in dseq:
+ print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+
+ # Group by latency
+ latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
+ print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
+ for row in latq:
+ print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+
+def trace_unhandled(event_name, context, event_fields_dict):
+ print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index 1818a531f1d3..4aeb7d5df939 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -269,7 +269,7 @@ int ui_browser__show(struct ui_browser *browser, const char *title,
return err ? 0 : -1;
}
-void ui_browser__hide(struct ui_browser *browser __used)
+void ui_browser__hide(struct ui_browser *browser __maybe_unused)
{
pthread_mutex_lock(&ui__lock);
ui_helpline__pop();
@@ -518,7 +518,7 @@ static struct ui_browser__colorset {
static int ui_browser__color_config(const char *var, const char *value,
- void *data __used)
+ void *data __maybe_unused)
{
char *fg = NULL, *bg;
int i;
@@ -602,7 +602,8 @@ void __ui_browser__vline(struct ui_browser *browser, unsigned int column,
SLsmg_set_char_set(0);
}
-void ui_browser__write_graph(struct ui_browser *browser __used, int graph)
+void ui_browser__write_graph(struct ui_browser *browser __maybe_unused,
+ int graph)
{
SLsmg_set_char_set(1);
SLsmg_write_char(graph);
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 67a2703e666a..8f8cd2d73b3b 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -54,7 +54,8 @@ static inline struct browser_disasm_line *disasm_line__browser(struct disasm_lin
return (struct browser_disasm_line *)(dl + 1);
}
-static bool disasm_line__filter(struct ui_browser *browser __used, void *entry)
+static bool disasm_line__filter(struct ui_browser *browser __maybe_unused,
+ void *entry)
{
if (annotate_browser__opts.hide_src_code) {
struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
@@ -928,7 +929,8 @@ static int annotate_config__cmp(const void *name, const void *cfgp)
return strcmp(name, cfg->name);
}
-static int annotate__config(const char *var, const char *value, void *data __used)
+static int annotate__config(const char *var, const char *value,
+ void *data __maybe_unused)
{
struct annotate__config *cfg;
const char *name;
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 413bd62eedb1..a21f40bebbac 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -24,9 +24,12 @@ struct hist_browser {
struct hist_entry *he_selection;
struct map_symbol *selection;
int print_seq;
+ bool show_dso;
bool has_symbols;
};
+extern void hist_browser__init_hpp(void);
+
static int hists__browser_title(struct hists *hists, char *bf, size_t size,
const char *ev_name);
@@ -376,12 +379,19 @@ out:
}
static char *callchain_list__sym_name(struct callchain_list *cl,
- char *bf, size_t bfsize)
+ char *bf, size_t bfsize, bool show_dso)
{
+ int printed;
+
if (cl->ms.sym)
- return cl->ms.sym->name;
+ printed = scnprintf(bf, bfsize, "%s", cl->ms.sym->name);
+ else
+ printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
+
+ if (show_dso)
+ scnprintf(bf + printed, bfsize - printed, " %s",
+ cl->ms.map ? cl->ms.map->dso->short_name : "unknown");
- snprintf(bf, bfsize, "%#" PRIx64, cl->ip);
return bf;
}
@@ -417,7 +427,7 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browse
remaining -= cumul;
list_for_each_entry(chain, &child->val, list) {
- char ipstr[BITS_PER_LONG / 4 + 1], *alloc_str;
+ char bf[1024], *alloc_str;
const char *str;
int color;
bool was_first = first;
@@ -434,7 +444,8 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browse
}
alloc_str = NULL;
- str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+ str = callchain_list__sym_name(chain, bf, sizeof(bf),
+ browser->show_dso);
if (was_first) {
double percent = cumul * 100.0 / new_total;
@@ -493,7 +504,7 @@ static int hist_browser__show_callchain_node(struct hist_browser *browser,
char folded_sign = ' ';
list_for_each_entry(chain, &node->val, list) {
- char ipstr[BITS_PER_LONG / 4 + 1], *s;
+ char bf[1024], *s;
int color;
folded_sign = callchain_list__folded(chain);
@@ -510,7 +521,8 @@ static int hist_browser__show_callchain_node(struct hist_browser *browser,
*is_current_entry = true;
}
- s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+ s = callchain_list__sym_name(chain, bf, sizeof(bf),
+ browser->show_dso);
ui_browser__gotorc(&browser->b, row, 0);
ui_browser__set_color(&browser->b, color);
slsmg_write_nstring(" ", offset);
@@ -553,14 +565,47 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
return row - first_row;
}
+#define HPP__COLOR_FN(_name, _field) \
+static int hist_browser__hpp_color_ ## _name(struct perf_hpp *hpp, \
+ struct hist_entry *he) \
+{ \
+ double percent = 100.0 * he->_field / hpp->total_period; \
+ *(double *)hpp->ptr = percent; \
+ return scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent); \
+}
+
+HPP__COLOR_FN(overhead, period)
+HPP__COLOR_FN(overhead_sys, period_sys)
+HPP__COLOR_FN(overhead_us, period_us)
+HPP__COLOR_FN(overhead_guest_sys, period_guest_sys)
+HPP__COLOR_FN(overhead_guest_us, period_guest_us)
+
+#undef HPP__COLOR_FN
+
+void hist_browser__init_hpp(void)
+{
+ perf_hpp__init(false, false);
+
+ perf_hpp__format[PERF_HPP__OVERHEAD].color =
+ hist_browser__hpp_color_overhead;
+ perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color =
+ hist_browser__hpp_color_overhead_sys;
+ perf_hpp__format[PERF_HPP__OVERHEAD_US].color =
+ hist_browser__hpp_color_overhead_us;
+ perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color =
+ hist_browser__hpp_color_overhead_guest_sys;
+ perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color =
+ hist_browser__hpp_color_overhead_guest_us;
+}
+
static int hist_browser__show_entry(struct hist_browser *browser,
struct hist_entry *entry,
unsigned short row)
{
char s[256];
double percent;
- int printed = 0;
- int width = browser->b.width - 6; /* The percentage */
+ int i, printed = 0;
+ int width = browser->b.width;
char folded_sign = ' ';
bool current_entry = ui_browser__is_current_entry(&browser->b, row);
off_t row_offset = entry->row_offset;
@@ -576,35 +621,50 @@ static int hist_browser__show_entry(struct hist_browser *browser,
}
if (row_offset == 0) {
- hist_entry__snprintf(entry, s, sizeof(s), browser->hists);
- percent = (entry->period * 100.0) / browser->hists->stats.total_period;
+ struct perf_hpp hpp = {
+ .buf = s,
+ .size = sizeof(s),
+ .total_period = browser->hists->stats.total_period,
+ };
- ui_browser__set_percent_color(&browser->b, percent, current_entry);
ui_browser__gotorc(&browser->b, row, 0);
- if (symbol_conf.use_callchain) {
- slsmg_printf("%c ", folded_sign);
- width -= 2;
- }
- slsmg_printf(" %5.2f%%", percent);
+ for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
+ if (!perf_hpp__format[i].cond)
+ continue;
- /* The scroll bar isn't being used */
- if (!browser->b.navkeypressed)
- width += 1;
+ if (i) {
+ slsmg_printf(" ");
+ width -= 2;
+ }
- if (!current_entry || !browser->b.navkeypressed)
- ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL);
+ if (perf_hpp__format[i].color) {
+ hpp.ptr = &percent;
+ /* It will set percent for us. See HPP__COLOR_FN above. */
+ width -= perf_hpp__format[i].color(&hpp, entry);
- if (symbol_conf.show_nr_samples) {
- slsmg_printf(" %11u", entry->nr_events);
- width -= 12;
- }
+ ui_browser__set_percent_color(&browser->b, percent, current_entry);
+
+ if (i == 0 && symbol_conf.use_callchain) {
+ slsmg_printf("%c ", folded_sign);
+ width -= 2;
+ }
+
+ slsmg_printf("%s", s);
- if (symbol_conf.show_total_period) {
- slsmg_printf(" %12" PRIu64, entry->period);
- width -= 13;
+ if (!current_entry || !browser->b.navkeypressed)
+ ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL);
+ } else {
+ width -= perf_hpp__format[i].entry(&hpp, entry);
+ slsmg_printf("%s", s);
+ }
}
+ /* The scroll bar isn't being used */
+ if (!browser->b.navkeypressed)
+ width += 1;
+
+ hist_entry__sort_snprintf(entry, s, sizeof(s), browser->hists);
slsmg_write_nstring(s, width);
++row;
++printed;
@@ -830,7 +890,7 @@ static int hist_browser__fprintf_callchain_node_rb_tree(struct hist_browser *bro
remaining -= cumul;
list_for_each_entry(chain, &child->val, list) {
- char ipstr[BITS_PER_LONG / 4 + 1], *alloc_str;
+ char bf[1024], *alloc_str;
const char *str;
bool was_first = first;
@@ -842,7 +902,8 @@ static int hist_browser__fprintf_callchain_node_rb_tree(struct hist_browser *bro
folded_sign = callchain_list__folded(chain);
alloc_str = NULL;
- str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+ str = callchain_list__sym_name(chain, bf, sizeof(bf),
+ browser->show_dso);
if (was_first) {
double percent = cumul * 100.0 / new_total;
@@ -880,10 +941,10 @@ static int hist_browser__fprintf_callchain_node(struct hist_browser *browser,
int printed = 0;
list_for_each_entry(chain, &node->val, list) {
- char ipstr[BITS_PER_LONG / 4 + 1], *s;
+ char bf[1024], *s;
folded_sign = callchain_list__folded(chain);
- s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+ s = callchain_list__sym_name(chain, bf, sizeof(bf), browser->show_dso);
printed += fprintf(fp, "%*s%c %s\n", offset, " ", folded_sign, s);
}
@@ -920,7 +981,7 @@ static int hist_browser__fprintf_entry(struct hist_browser *browser,
if (symbol_conf.use_callchain)
folded_sign = hist_entry__folded(he);
- hist_entry__snprintf(he, s, sizeof(s), browser->hists);
+ hist_entry__sort_snprintf(he, s, sizeof(s), browser->hists);
percent = (he->period * 100.0) / browser->hists->stats.total_period;
if (symbol_conf.use_callchain)
@@ -1133,6 +1194,9 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
continue;
case 'd':
goto zoom_dso;
+ case 'V':
+ browser->show_dso = !browser->show_dso;
+ continue;
case 't':
goto zoom_thread;
case '/':
@@ -1164,6 +1228,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"d Zoom into current DSO\n"
"t Zoom into current Thread\n"
"P Print histograms to perf.hist.N\n"
+ "V Verbose (DSO names in callchains, etc)\n"
"/ Filter symbol by name");
continue;
case K_ENTER:
diff --git a/tools/perf/ui/gtk/browser.c b/tools/perf/ui/gtk/browser.c
index ec12e0b4ded6..7ff99ec1d95e 100644
--- a/tools/perf/ui/gtk/browser.c
+++ b/tools/perf/ui/gtk/browser.c
@@ -3,6 +3,7 @@
#include "../evsel.h"
#include "../sort.h"
#include "../hist.h"
+#include "../helpline.h"
#include "gtk.h"
#include <signal.h>
@@ -35,6 +36,57 @@ static void perf_gtk__resize_window(GtkWidget *window)
gtk_window_resize(GTK_WINDOW(window), width, height);
}
+static const char *perf_gtk__get_percent_color(double percent)
+{
+ if (percent >= MIN_RED)
+ return "<span fgcolor='red'>";
+ if (percent >= MIN_GREEN)
+ return "<span fgcolor='dark green'>";
+ return NULL;
+}
+
+#define HPP__COLOR_FN(_name, _field) \
+static int perf_gtk__hpp_color_ ## _name(struct perf_hpp *hpp, \
+ struct hist_entry *he) \
+{ \
+ double percent = 100.0 * he->_field / hpp->total_period; \
+ const char *markup; \
+ int ret = 0; \
+ \
+ markup = perf_gtk__get_percent_color(percent); \
+ if (markup) \
+ ret += scnprintf(hpp->buf, hpp->size, "%s", markup); \
+ ret += scnprintf(hpp->buf + ret, hpp->size - ret, "%6.2f%%", percent); \
+ if (markup) \
+ ret += scnprintf(hpp->buf + ret, hpp->size - ret, "</span>"); \
+ \
+ return ret; \
+}
+
+HPP__COLOR_FN(overhead, period)
+HPP__COLOR_FN(overhead_sys, period_sys)
+HPP__COLOR_FN(overhead_us, period_us)
+HPP__COLOR_FN(overhead_guest_sys, period_guest_sys)
+HPP__COLOR_FN(overhead_guest_us, period_guest_us)
+
+#undef HPP__COLOR_FN
+
+void perf_gtk__init_hpp(void)
+{
+ perf_hpp__init(false, false);
+
+ perf_hpp__format[PERF_HPP__OVERHEAD].color =
+ perf_gtk__hpp_color_overhead;
+ perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color =
+ perf_gtk__hpp_color_overhead_sys;
+ perf_hpp__format[PERF_HPP__OVERHEAD_US].color =
+ perf_gtk__hpp_color_overhead_us;
+ perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color =
+ perf_gtk__hpp_color_overhead_guest_sys;
+ perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color =
+ perf_gtk__hpp_color_overhead_guest_us;
+}
+
static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
{
GType col_types[MAX_COLUMNS];
@@ -42,15 +94,25 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
struct sort_entry *se;
GtkListStore *store;
struct rb_node *nd;
- u64 total_period;
GtkWidget *view;
- int col_idx;
+ int i, col_idx;
int nr_cols;
+ char s[512];
+
+ struct perf_hpp hpp = {
+ .buf = s,
+ .size = sizeof(s),
+ .total_period = hists->stats.total_period,
+ };
nr_cols = 0;
- /* The percentage column */
- col_types[nr_cols++] = G_TYPE_STRING;
+ for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
+ if (!perf_hpp__format[i].cond)
+ continue;
+
+ col_types[nr_cols++] = G_TYPE_STRING;
+ }
list_for_each_entry(se, &hist_entry__sort_list, list) {
if (se->elide)
@@ -67,11 +129,17 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
col_idx = 0;
- /* The percentage column */
- gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
- -1, "Overhead (%)",
- renderer, "text",
- col_idx++, NULL);
+ for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
+ if (!perf_hpp__format[i].cond)
+ continue;
+
+ perf_hpp__format[i].header(&hpp);
+
+ gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
+ -1, s,
+ renderer, "markup",
+ col_idx++, NULL);
+ }
list_for_each_entry(se, &hist_entry__sort_list, list) {
if (se->elide)
@@ -87,13 +155,9 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
g_object_unref(GTK_TREE_MODEL(store));
- total_period = hists->stats.total_period;
-
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
GtkTreeIter iter;
- double percent;
- char s[512];
if (h->filtered)
continue;
@@ -102,11 +166,17 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
col_idx = 0;
- percent = (h->period * 100.0) / total_period;
+ for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
+ if (!perf_hpp__format[i].cond)
+ continue;
- snprintf(s, ARRAY_SIZE(s), "%.2f", percent);
+ if (perf_hpp__format[i].color)
+ perf_hpp__format[i].color(&hpp, h);
+ else
+ perf_hpp__format[i].entry(&hpp, h);
- gtk_list_store_set(store, &iter, col_idx++, s, -1);
+ gtk_list_store_set(store, &iter, col_idx++, s, -1);
+ }
list_for_each_entry(se, &hist_entry__sort_list, list) {
if (se->elide)
@@ -166,9 +236,10 @@ static GtkWidget *perf_gtk__setup_statusbar(void)
}
int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
- const char *help __used,
- void (*timer) (void *arg)__used,
- void *arg __used, int delay_secs __used)
+ const char *help,
+ void (*timer) (void *arg)__maybe_unused,
+ void *arg __maybe_unused,
+ int delay_secs __maybe_unused)
{
struct perf_evsel *pos;
GtkWidget *vbox;
@@ -233,6 +304,8 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER);
+ ui_helpline__push(help);
+
gtk_main();
perf_gtk__deactivate_context(&pgctx);
diff --git a/tools/perf/ui/gtk/gtk.h b/tools/perf/ui/gtk/gtk.h
index a4d0f2b4a2dc..687af0bba187 100644
--- a/tools/perf/ui/gtk/gtk.h
+++ b/tools/perf/ui/gtk/gtk.h
@@ -29,6 +29,9 @@ static inline bool perf_gtk__is_active_context(struct perf_gtk_context *ctx)
struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window);
int perf_gtk__deactivate_context(struct perf_gtk_context **ctx);
+void perf_gtk__init_helpline(void);
+void perf_gtk__init_hpp(void);
+
#ifndef HAVE_GTK_INFO_BAR
static inline GtkWidget *perf_gtk__setup_info_bar(void)
{
diff --git a/tools/perf/ui/gtk/helpline.c b/tools/perf/ui/gtk/helpline.c
new file mode 100644
index 000000000000..5db4432ff12a
--- /dev/null
+++ b/tools/perf/ui/gtk/helpline.c
@@ -0,0 +1,56 @@
+#include <stdio.h>
+#include <string.h>
+
+#include "gtk.h"
+#include "../ui.h"
+#include "../helpline.h"
+#include "../../util/debug.h"
+
+static void gtk_helpline_pop(void)
+{
+ if (!perf_gtk__is_active_context(pgctx))
+ return;
+
+ gtk_statusbar_pop(GTK_STATUSBAR(pgctx->statbar),
+ pgctx->statbar_ctx_id);
+}
+
+static void gtk_helpline_push(const char *msg)
+{
+ if (!perf_gtk__is_active_context(pgctx))
+ return;
+
+ gtk_statusbar_push(GTK_STATUSBAR(pgctx->statbar),
+ pgctx->statbar_ctx_id, msg);
+}
+
+static struct ui_helpline gtk_helpline_fns = {
+ .pop = gtk_helpline_pop,
+ .push = gtk_helpline_push,
+};
+
+void perf_gtk__init_helpline(void)
+{
+ helpline_fns = &gtk_helpline_fns;
+}
+
+int perf_gtk__show_helpline(const char *fmt, va_list ap)
+{
+ int ret;
+ char *ptr;
+ static int backlog;
+
+ ret = vscnprintf(ui_helpline__current + backlog,
+ sizeof(ui_helpline__current) - backlog, fmt, ap);
+ backlog += ret;
+
+ /* only first line can be displayed */
+ ptr = strchr(ui_helpline__current, '\n');
+ if (ptr && (ptr - ui_helpline__current) <= backlog) {
+ *ptr = '\0';
+ ui_helpline__puts(ui_helpline__current);
+ backlog = 0;
+ }
+
+ return ret;
+}
diff --git a/tools/perf/ui/gtk/setup.c b/tools/perf/ui/gtk/setup.c
index 92879ce61e2f..3c4c6ef78283 100644
--- a/tools/perf/ui/gtk/setup.c
+++ b/tools/perf/ui/gtk/setup.c
@@ -7,11 +7,15 @@ extern struct perf_error_ops perf_gtk_eops;
int perf_gtk__init(void)
{
perf_error__register(&perf_gtk_eops);
+ perf_gtk__init_helpline();
+ perf_gtk__init_hpp();
return gtk_init_check(NULL, NULL) ? 0 : -1;
}
-void perf_gtk__exit(bool wait_for_ok __used)
+void perf_gtk__exit(bool wait_for_ok __maybe_unused)
{
+ if (!perf_gtk__is_active_context(pgctx))
+ return;
perf_error__unregister(&perf_gtk_eops);
gtk_main_quit();
}
diff --git a/tools/perf/ui/gtk/util.c b/tools/perf/ui/gtk/util.c
index 0ead373c0dfb..8aada5b3c04c 100644
--- a/tools/perf/ui/gtk/util.c
+++ b/tools/perf/ui/gtk/util.c
@@ -117,13 +117,8 @@ struct perf_error_ops perf_gtk_eops = {
* For now, just add stubs for NO_NEWT=1 build.
*/
#ifdef NO_NEWT_SUPPORT
-int ui_helpline__show_help(const char *format __used, va_list ap __used)
-{
- return 0;
-}
-
-void ui_progress__update(u64 curr __used, u64 total __used,
- const char *title __used)
+void ui_progress__update(u64 curr __maybe_unused, u64 total __maybe_unused,
+ const char *title __maybe_unused)
{
}
#endif
diff --git a/tools/perf/ui/helpline.c b/tools/perf/ui/helpline.c
index 2f950c2641c8..a49bcf3c190b 100644
--- a/tools/perf/ui/helpline.c
+++ b/tools/perf/ui/helpline.c
@@ -5,23 +5,32 @@
#include "../debug.h"
#include "helpline.h"
#include "ui.h"
-#include "libslang.h"
-void ui_helpline__pop(void)
+char ui_helpline__current[512];
+
+static void nop_helpline__pop(void)
{
}
-char ui_helpline__current[512];
+static void nop_helpline__push(const char *msg __maybe_unused)
+{
+}
-void ui_helpline__push(const char *msg)
+static struct ui_helpline default_helpline_fns = {
+ .pop = nop_helpline__pop,
+ .push = nop_helpline__push,
+};
+
+struct ui_helpline *helpline_fns = &default_helpline_fns;
+
+void ui_helpline__pop(void)
{
- const size_t sz = sizeof(ui_helpline__current);
+ helpline_fns->pop();
+}
- SLsmg_gotorc(SLtt_Screen_Rows - 1, 0);
- SLsmg_set_color(0);
- SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols);
- SLsmg_refresh();
- strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0';
+void ui_helpline__push(const char *msg)
+{
+ helpline_fns->push(msg);
}
void ui_helpline__vpush(const char *fmt, va_list ap)
@@ -50,30 +59,3 @@ void ui_helpline__puts(const char *msg)
ui_helpline__pop();
ui_helpline__push(msg);
}
-
-void ui_helpline__init(void)
-{
- ui_helpline__puts(" ");
-}
-
-char ui_helpline__last_msg[1024];
-
-int ui_helpline__show_help(const char *format, va_list ap)
-{
- int ret;
- static int backlog;
-
- pthread_mutex_lock(&ui__lock);
- ret = vscnprintf(ui_helpline__last_msg + backlog,
- sizeof(ui_helpline__last_msg) - backlog, format, ap);
- backlog += ret;
-
- if (ui_helpline__last_msg[backlog - 1] == '\n') {
- ui_helpline__puts(ui_helpline__last_msg);
- SLsmg_refresh();
- backlog = 0;
- }
- pthread_mutex_unlock(&ui__lock);
-
- return ret;
-}
diff --git a/tools/perf/ui/helpline.h b/tools/perf/ui/helpline.h
index 7bab6b34e35e..2b667ee454c3 100644
--- a/tools/perf/ui/helpline.h
+++ b/tools/perf/ui/helpline.h
@@ -4,13 +4,44 @@
#include <stdio.h>
#include <stdarg.h>
+#include "../util/cache.h"
+
+struct ui_helpline {
+ void (*pop)(void);
+ void (*push)(const char *msg);
+};
+
+extern struct ui_helpline *helpline_fns;
+
void ui_helpline__init(void);
+
void ui_helpline__pop(void);
void ui_helpline__push(const char *msg);
void ui_helpline__vpush(const char *fmt, va_list ap);
void ui_helpline__fpush(const char *fmt, ...);
void ui_helpline__puts(const char *msg);
-extern char ui_helpline__current[];
+extern char ui_helpline__current[512];
+
+#ifdef NO_NEWT_SUPPORT
+static inline int ui_helpline__show_help(const char *format __maybe_unused,
+ va_list ap __maybe_unused)
+{
+ return 0;
+}
+#else
+extern char ui_helpline__last_msg[];
+int ui_helpline__show_help(const char *format, va_list ap);
+#endif /* NO_NEWT_SUPPORT */
+
+#ifdef NO_GTK2_SUPPORT
+static inline int perf_gtk__show_helpline(const char *format __maybe_unused,
+ va_list ap __maybe_unused)
+{
+ return 0;
+}
+#else
+int perf_gtk__show_helpline(const char *format, va_list ap);
+#endif /* NO_GTK2_SUPPORT */
#endif /* _PERF_UI_HELPLINE_H_ */
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
new file mode 100644
index 000000000000..e3f8cd46e7d7
--- /dev/null
+++ b/tools/perf/ui/hist.c
@@ -0,0 +1,390 @@
+#include <math.h>
+
+#include "../util/hist.h"
+#include "../util/util.h"
+#include "../util/sort.h"
+
+
+/* hist period print (hpp) functions */
+static int hpp__header_overhead(struct perf_hpp *hpp)
+{
+ const char *fmt = hpp->ptr ? "Baseline" : "Overhead";
+
+ return scnprintf(hpp->buf, hpp->size, fmt);
+}
+
+static int hpp__width_overhead(struct perf_hpp *hpp __maybe_unused)
+{
+ return 8;
+}
+
+static int hpp__color_overhead(struct perf_hpp *hpp, struct hist_entry *he)
+{
+ double percent = 100.0 * he->period / hpp->total_period;
+
+ if (hpp->ptr) {
+ struct hists *old_hists = hpp->ptr;
+ u64 total_period = old_hists->stats.total_period;
+ u64 base_period = he->pair ? he->pair->period : 0;
+
+ if (total_period)
+ percent = 100.0 * base_period / total_period;
+ else
+ percent = 0.0;
+ }
+
+ return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%%", percent);
+}
+
+static int hpp__entry_overhead(struct perf_hpp *hpp, struct hist_entry *he)
+{
+ double percent = 100.0 * he->period / hpp->total_period;
+ const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%%";
+
+ if (hpp->ptr) {
+ struct hists *old_hists = hpp->ptr;
+ u64 total_period = old_hists->stats.total_period;
+ u64 base_period = he->pair ? he->pair->period : 0;
+
+ if (total_period)
+ percent = 100.0 * base_period / total_period;
+ else
+ percent = 0.0;
+ }
+
+ return scnprintf(hpp->buf, hpp->size, fmt, percent);
+}
+
+static int hpp__header_overhead_sys(struct perf_hpp *hpp)
+{
+ const char *fmt = symbol_conf.field_sep ? "%s" : "%7s";
+
+ return scnprintf(hpp->buf, hpp->size, fmt, "sys");
+}
+
+static int hpp__width_overhead_sys(struct perf_hpp *hpp __maybe_unused)
+{
+ return 7;
+}
+
+static int hpp__color_overhead_sys(struct perf_hpp *hpp, struct hist_entry *he)
+{
+ double percent = 100.0 * he->period_sys / hpp->total_period;
+ return percent_color_snprintf(hpp->buf, hpp->size, "%6.2f%%", percent);
+}
+
+static int hpp__entry_overhead_sys(struct perf_hpp *hpp, struct hist_entry *he)
+{
+ double percent = 100.0 * he->period_sys / hpp->total_period;
+ const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%";
+
+ return scnprintf(hpp->buf, hpp->size, fmt, percent);
+}
+
+static int hpp__header_overhead_us(struct perf_hpp *hpp)
+{
+ const char *fmt = symbol_conf.field_sep ? "%s" : "%7s";
+
+ return scnprintf(hpp->buf, hpp->size, fmt, "user");
+}
+
+static int hpp__width_overhead_us(struct perf_hpp *hpp __maybe_unused)
+{
+ return 7;
+}
+
+static int hpp__color_overhead_us(struct perf_hpp *hpp, struct hist_entry *he)
+{
+ double percent = 100.0 * he->period_us / hpp->total_period;
+ return percent_color_snprintf(hpp->buf, hpp->size, "%6.2f%%", percent);
+}
+
+static int hpp__entry_overhead_us(struct perf_hpp *hpp, struct hist_entry *he)
+{
+ double percent = 100.0 * he->period_us / hpp->total_period;
+ const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%";
+
+ return scnprintf(hpp->buf, hpp->size, fmt, percent);
+}
+
+static int hpp__header_overhead_guest_sys(struct perf_hpp *hpp)
+{
+ return scnprintf(hpp->buf, hpp->size, "guest sys");
+}
+
+static int hpp__width_overhead_guest_sys(struct perf_hpp *hpp __maybe_unused)
+{
+ return 9;
+}
+
+static int hpp__color_overhead_guest_sys(struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ double percent = 100.0 * he->period_guest_sys / hpp->total_period;
+ return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%% ", percent);
+}
+
+static int hpp__entry_overhead_guest_sys(struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ double percent = 100.0 * he->period_guest_sys / hpp->total_period;
+ const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%% ";
+
+ return scnprintf(hpp->buf, hpp->size, fmt, percent);
+}
+
+static int hpp__header_overhead_guest_us(struct perf_hpp *hpp)
+{
+ return scnprintf(hpp->buf, hpp->size, "guest usr");
+}
+
+static int hpp__width_overhead_guest_us(struct perf_hpp *hpp __maybe_unused)
+{
+ return 9;
+}
+
+static int hpp__color_overhead_guest_us(struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ double percent = 100.0 * he->period_guest_us / hpp->total_period;
+ return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%% ", percent);
+}
+
+static int hpp__entry_overhead_guest_us(struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ double percent = 100.0 * he->period_guest_us / hpp->total_period;
+ const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%% ";
+
+ return scnprintf(hpp->buf, hpp->size, fmt, percent);
+}
+
+static int hpp__header_samples(struct perf_hpp *hpp)
+{
+ const char *fmt = symbol_conf.field_sep ? "%s" : "%11s";
+
+ return scnprintf(hpp->buf, hpp->size, fmt, "Samples");
+}
+
+static int hpp__width_samples(struct perf_hpp *hpp __maybe_unused)
+{
+ return 11;
+}
+
+static int hpp__entry_samples(struct perf_hpp *hpp, struct hist_entry *he)
+{
+ const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%11" PRIu64;
+
+ return scnprintf(hpp->buf, hpp->size, fmt, he->nr_events);
+}
+
+static int hpp__header_period(struct perf_hpp *hpp)
+{
+ const char *fmt = symbol_conf.field_sep ? "%s" : "%12s";
+
+ return scnprintf(hpp->buf, hpp->size, fmt, "Period");
+}
+
+static int hpp__width_period(struct perf_hpp *hpp __maybe_unused)
+{
+ return 12;
+}
+
+static int hpp__entry_period(struct perf_hpp *hpp, struct hist_entry *he)
+{
+ const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%12" PRIu64;
+
+ return scnprintf(hpp->buf, hpp->size, fmt, he->period);
+}
+
+static int hpp__header_delta(struct perf_hpp *hpp)
+{
+ const char *fmt = symbol_conf.field_sep ? "%s" : "%7s";
+
+ return scnprintf(hpp->buf, hpp->size, fmt, "Delta");
+}
+
+static int hpp__width_delta(struct perf_hpp *hpp __maybe_unused)
+{
+ return 7;
+}
+
+static int hpp__entry_delta(struct perf_hpp *hpp, struct hist_entry *he)
+{
+ struct hists *pair_hists = hpp->ptr;
+ u64 old_total, new_total;
+ double old_percent = 0, new_percent = 0;
+ double diff;
+ const char *fmt = symbol_conf.field_sep ? "%s" : "%7.7s";
+ char buf[32] = " ";
+
+ old_total = pair_hists->stats.total_period;
+ if (old_total > 0 && he->pair)
+ old_percent = 100.0 * he->pair->period / old_total;
+
+ new_total = hpp->total_period;
+ if (new_total > 0)
+ new_percent = 100.0 * he->period / new_total;
+
+ diff = new_percent - old_percent;
+ if (fabs(diff) >= 0.01)
+ scnprintf(buf, sizeof(buf), "%+4.2F%%", diff);
+
+ return scnprintf(hpp->buf, hpp->size, fmt, buf);
+}
+
+static int hpp__header_displ(struct perf_hpp *hpp)
+{
+ return scnprintf(hpp->buf, hpp->size, "Displ.");
+}
+
+static int hpp__width_displ(struct perf_hpp *hpp __maybe_unused)
+{
+ return 6;
+}
+
+static int hpp__entry_displ(struct perf_hpp *hpp,
+ struct hist_entry *he __maybe_unused)
+{
+ const char *fmt = symbol_conf.field_sep ? "%s" : "%6.6s";
+ char buf[32] = " ";
+
+ if (hpp->displacement)
+ scnprintf(buf, sizeof(buf), "%+4ld", hpp->displacement);
+
+ return scnprintf(hpp->buf, hpp->size, fmt, buf);
+}
+
+#define HPP__COLOR_PRINT_FNS(_name) \
+ .header = hpp__header_ ## _name, \
+ .width = hpp__width_ ## _name, \
+ .color = hpp__color_ ## _name, \
+ .entry = hpp__entry_ ## _name
+
+#define HPP__PRINT_FNS(_name) \
+ .header = hpp__header_ ## _name, \
+ .width = hpp__width_ ## _name, \
+ .entry = hpp__entry_ ## _name
+
+struct perf_hpp_fmt perf_hpp__format[] = {
+ { .cond = true, HPP__COLOR_PRINT_FNS(overhead) },
+ { .cond = false, HPP__COLOR_PRINT_FNS(overhead_sys) },
+ { .cond = false, HPP__COLOR_PRINT_FNS(overhead_us) },
+ { .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_sys) },
+ { .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_us) },
+ { .cond = false, HPP__PRINT_FNS(samples) },
+ { .cond = false, HPP__PRINT_FNS(period) },
+ { .cond = false, HPP__PRINT_FNS(delta) },
+ { .cond = false, HPP__PRINT_FNS(displ) }
+};
+
+#undef HPP__COLOR_PRINT_FNS
+#undef HPP__PRINT_FNS
+
+void perf_hpp__init(bool need_pair, bool show_displacement)
+{
+ if (symbol_conf.show_cpu_utilization) {
+ perf_hpp__format[PERF_HPP__OVERHEAD_SYS].cond = true;
+ perf_hpp__format[PERF_HPP__OVERHEAD_US].cond = true;
+
+ if (perf_guest) {
+ perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].cond = true;
+ perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].cond = true;
+ }
+ }
+
+ if (symbol_conf.show_nr_samples)
+ perf_hpp__format[PERF_HPP__SAMPLES].cond = true;
+
+ if (symbol_conf.show_total_period)
+ perf_hpp__format[PERF_HPP__PERIOD].cond = true;
+
+ if (need_pair) {
+ perf_hpp__format[PERF_HPP__DELTA].cond = true;
+
+ if (show_displacement)
+ perf_hpp__format[PERF_HPP__DISPL].cond = true;
+ }
+}
+
+static inline void advance_hpp(struct perf_hpp *hpp, int inc)
+{
+ hpp->buf += inc;
+ hpp->size -= inc;
+}
+
+int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he,
+ bool color)
+{
+ const char *sep = symbol_conf.field_sep;
+ char *start = hpp->buf;
+ int i, ret;
+
+ if (symbol_conf.exclude_other && !he->parent)
+ return 0;
+
+ for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
+ if (!perf_hpp__format[i].cond)
+ continue;
+
+ if (!sep || i > 0) {
+ ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
+ advance_hpp(hpp, ret);
+ }
+
+ if (color && perf_hpp__format[i].color)
+ ret = perf_hpp__format[i].color(hpp, he);
+ else
+ ret = perf_hpp__format[i].entry(hpp, he);
+
+ advance_hpp(hpp, ret);
+ }
+
+ return hpp->buf - start;
+}
+
+int hist_entry__sort_snprintf(struct hist_entry *he, char *s, size_t size,
+ struct hists *hists)
+{
+ const char *sep = symbol_conf.field_sep;
+ struct sort_entry *se;
+ int ret = 0;
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ if (se->elide)
+ continue;
+
+ ret += scnprintf(s + ret, size - ret, "%s", sep ?: " ");
+ ret += se->se_snprintf(he, s + ret, size - ret,
+ hists__col_len(hists, se->se_width_idx));
+ }
+
+ return ret;
+}
+
+/*
+ * See hists__fprintf to match the column widths
+ */
+unsigned int hists__sort_list_width(struct hists *hists)
+{
+ struct sort_entry *se;
+ int i, ret = 0;
+
+ for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
+ if (!perf_hpp__format[i].cond)
+ continue;
+ if (i)
+ ret += 2;
+
+ ret += perf_hpp__format[i].width(NULL);
+ }
+
+ list_for_each_entry(se, &hist_entry__sort_list, list)
+ if (!se->elide)
+ ret += 2 + hists__col_len(hists, se->se_width_idx);
+
+ if (verbose) /* Addr + origin */
+ ret += 3 + BITS_PER_LONG / 4;
+
+ return ret;
+}
diff --git a/tools/perf/ui/setup.c b/tools/perf/ui/setup.c
index 791fb15ce350..bd7d460f844c 100644
--- a/tools/perf/ui/setup.c
+++ b/tools/perf/ui/setup.c
@@ -1,6 +1,10 @@
-#include "../cache.h"
-#include "../debug.h"
+#include <pthread.h>
+#include "../util/cache.h"
+#include "../util/debug.h"
+#include "../util/hist.h"
+
+pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
void setup_browser(bool fallback_to_pager)
{
@@ -25,6 +29,8 @@ void setup_browser(bool fallback_to_pager)
use_browser = 0;
if (fallback_to_pager)
setup_pager();
+
+ perf_hpp__init(false, false);
break;
}
}
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
new file mode 100644
index 000000000000..882461a42830
--- /dev/null
+++ b/tools/perf/ui/stdio/hist.c
@@ -0,0 +1,498 @@
+#include <stdio.h>
+
+#include "../../util/util.h"
+#include "../../util/hist.h"
+#include "../../util/sort.h"
+
+
+static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
+{
+ int i;
+ int ret = fprintf(fp, " ");
+
+ for (i = 0; i < left_margin; i++)
+ ret += fprintf(fp, " ");
+
+ return ret;
+}
+
+static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
+ int left_margin)
+{
+ int i;
+ size_t ret = callchain__fprintf_left_margin(fp, left_margin);
+
+ for (i = 0; i < depth; i++)
+ if (depth_mask & (1 << i))
+ ret += fprintf(fp, "| ");
+ else
+ ret += fprintf(fp, " ");
+
+ ret += fprintf(fp, "\n");
+
+ return ret;
+}
+
+static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
+ int depth, int depth_mask, int period,
+ u64 total_samples, u64 hits,
+ int left_margin)
+{
+ int i;
+ size_t ret = 0;
+
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ for (i = 0; i < depth; i++) {
+ if (depth_mask & (1 << i))
+ ret += fprintf(fp, "|");
+ else
+ ret += fprintf(fp, " ");
+ if (!period && i == depth - 1) {
+ double percent;
+
+ percent = hits * 100.0 / total_samples;
+ ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
+ } else
+ ret += fprintf(fp, "%s", " ");
+ }
+ if (chain->ms.sym)
+ ret += fprintf(fp, "%s\n", chain->ms.sym->name);
+ else
+ ret += fprintf(fp, "0x%0" PRIx64 "\n", chain->ip);
+
+ return ret;
+}
+
+static struct symbol *rem_sq_bracket;
+static struct callchain_list rem_hits;
+
+static void init_rem_hits(void)
+{
+ rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
+ if (!rem_sq_bracket) {
+ fprintf(stderr, "Not enough memory to display remaining hits\n");
+ return;
+ }
+
+ strcpy(rem_sq_bracket->name, "[...]");
+ rem_hits.ms.sym = rem_sq_bracket;
+}
+
+static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
+ u64 total_samples, int depth,
+ int depth_mask, int left_margin)
+{
+ struct rb_node *node, *next;
+ struct callchain_node *child;
+ struct callchain_list *chain;
+ int new_depth_mask = depth_mask;
+ u64 remaining;
+ size_t ret = 0;
+ int i;
+ uint entries_printed = 0;
+
+ remaining = total_samples;
+
+ node = rb_first(root);
+ while (node) {
+ u64 new_total;
+ u64 cumul;
+
+ child = rb_entry(node, struct callchain_node, rb_node);
+ cumul = callchain_cumul_hits(child);
+ remaining -= cumul;
+
+ /*
+ * The depth mask manages the output of pipes that show
+ * the depth. We don't want to keep the pipes of the current
+ * level for the last child of this depth.
+ * Except if we have remaining filtered hits. They will
+ * supersede the last child
+ */
+ next = rb_next(node);
+ if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
+ new_depth_mask &= ~(1 << (depth - 1));
+
+ /*
+ * But we keep the older depth mask for the line separator
+ * to keep the level link until we reach the last child
+ */
+ ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
+ left_margin);
+ i = 0;
+ list_for_each_entry(chain, &child->val, list) {
+ ret += ipchain__fprintf_graph(fp, chain, depth,
+ new_depth_mask, i++,
+ total_samples,
+ cumul,
+ left_margin);
+ }
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL)
+ new_total = child->children_hit;
+ else
+ new_total = total_samples;
+
+ ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
+ depth + 1,
+ new_depth_mask | (1 << depth),
+ left_margin);
+ node = next;
+ if (++entries_printed == callchain_param.print_limit)
+ break;
+ }
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL &&
+ remaining && remaining != total_samples) {
+
+ if (!rem_sq_bracket)
+ return ret;
+
+ new_depth_mask &= ~(1 << (depth - 1));
+ ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
+ new_depth_mask, 0, total_samples,
+ remaining, left_margin);
+ }
+
+ return ret;
+}
+
+static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
+ u64 total_samples, int left_margin)
+{
+ struct callchain_node *cnode;
+ struct callchain_list *chain;
+ u32 entries_printed = 0;
+ bool printed = false;
+ struct rb_node *node;
+ int i = 0;
+ int ret = 0;
+
+ /*
+ * If have one single callchain root, don't bother printing
+ * its percentage (100 % in fractal mode and the same percentage
+ * than the hist in graph mode). This also avoid one level of column.
+ */
+ node = rb_first(root);
+ if (node && !rb_next(node)) {
+ cnode = rb_entry(node, struct callchain_node, rb_node);
+ list_for_each_entry(chain, &cnode->val, list) {
+ /*
+ * If we sort by symbol, the first entry is the same than
+ * the symbol. No need to print it otherwise it appears as
+ * displayed twice.
+ */
+ if (!i++ && sort__first_dimension == SORT_SYM)
+ continue;
+ if (!printed) {
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ ret += fprintf(fp, "|\n");
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ ret += fprintf(fp, "---");
+ left_margin += 3;
+ printed = true;
+ } else
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+
+ if (chain->ms.sym)
+ ret += fprintf(fp, " %s\n", chain->ms.sym->name);
+ else
+ ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
+
+ if (++entries_printed == callchain_param.print_limit)
+ break;
+ }
+ root = &cnode->rb_root;
+ }
+
+ ret += __callchain__fprintf_graph(fp, root, total_samples,
+ 1, 1, left_margin);
+ ret += fprintf(fp, "\n");
+
+ return ret;
+}
+
+static size_t __callchain__fprintf_flat(FILE *fp,
+ struct callchain_node *self,
+ u64 total_samples)
+{
+ struct callchain_list *chain;
+ size_t ret = 0;
+
+ if (!self)
+ return 0;
+
+ ret += __callchain__fprintf_flat(fp, self->parent, total_samples);
+
+
+ list_for_each_entry(chain, &self->val, list) {
+ if (chain->ip >= PERF_CONTEXT_MAX)
+ continue;
+ if (chain->ms.sym)
+ ret += fprintf(fp, " %s\n", chain->ms.sym->name);
+ else
+ ret += fprintf(fp, " %p\n",
+ (void *)(long)chain->ip);
+ }
+
+ return ret;
+}
+
+static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self,
+ u64 total_samples)
+{
+ size_t ret = 0;
+ u32 entries_printed = 0;
+ struct rb_node *rb_node;
+ struct callchain_node *chain;
+
+ rb_node = rb_first(self);
+ while (rb_node) {
+ double percent;
+
+ chain = rb_entry(rb_node, struct callchain_node, rb_node);
+ percent = chain->hit * 100.0 / total_samples;
+
+ ret = percent_color_fprintf(fp, " %6.2f%%\n", percent);
+ ret += __callchain__fprintf_flat(fp, chain, total_samples);
+ ret += fprintf(fp, "\n");
+ if (++entries_printed == callchain_param.print_limit)
+ break;
+
+ rb_node = rb_next(rb_node);
+ }
+
+ return ret;
+}
+
+static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
+ u64 total_samples, int left_margin,
+ FILE *fp)
+{
+ switch (callchain_param.mode) {
+ case CHAIN_GRAPH_REL:
+ return callchain__fprintf_graph(fp, &he->sorted_chain, he->period,
+ left_margin);
+ break;
+ case CHAIN_GRAPH_ABS:
+ return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
+ left_margin);
+ break;
+ case CHAIN_FLAT:
+ return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
+ break;
+ case CHAIN_NONE:
+ break;
+ default:
+ pr_err("Bad callchain mode\n");
+ }
+
+ return 0;
+}
+
+static size_t hist_entry__callchain_fprintf(struct hist_entry *he,
+ struct hists *hists,
+ u64 total_period, FILE *fp)
+{
+ int left_margin = 0;
+
+ if (sort__first_dimension == SORT_COMM) {
+ struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
+ typeof(*se), list);
+ left_margin = hists__col_len(hists, se->se_width_idx);
+ left_margin -= thread__comm_len(he->thread);
+ }
+
+ return hist_entry_callchain__fprintf(he, total_period, left_margin, fp);
+}
+
+static int hist_entry__fprintf(struct hist_entry *he, size_t size,
+ struct hists *hists, struct hists *pair_hists,
+ long displacement, u64 total_period, FILE *fp)
+{
+ char bf[512];
+ int ret;
+ struct perf_hpp hpp = {
+ .buf = bf,
+ .size = size,
+ .total_period = total_period,
+ .displacement = displacement,
+ .ptr = pair_hists,
+ };
+ bool color = !symbol_conf.field_sep;
+
+ if (size == 0 || size > sizeof(bf))
+ size = hpp.size = sizeof(bf);
+
+ ret = hist_entry__period_snprintf(&hpp, he, color);
+ hist_entry__sort_snprintf(he, bf + ret, size - ret, hists);
+
+ ret = fprintf(fp, "%s\n", bf);
+
+ if (symbol_conf.use_callchain)
+ ret += hist_entry__callchain_fprintf(he, hists,
+ total_period, fp);
+
+ return ret;
+}
+
+size_t hists__fprintf(struct hists *hists, struct hists *pair,
+ bool show_displacement, bool show_header, int max_rows,
+ int max_cols, FILE *fp)
+{
+ struct sort_entry *se;
+ struct rb_node *nd;
+ size_t ret = 0;
+ u64 total_period;
+ unsigned long position = 1;
+ long displacement = 0;
+ unsigned int width;
+ const char *sep = symbol_conf.field_sep;
+ const char *col_width = symbol_conf.col_width_list_str;
+ int idx, nr_rows = 0;
+ char bf[64];
+ struct perf_hpp dummy_hpp = {
+ .buf = bf,
+ .size = sizeof(bf),
+ .ptr = pair,
+ };
+
+ init_rem_hits();
+
+ if (!show_header)
+ goto print_entries;
+
+ fprintf(fp, "# ");
+ for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) {
+ if (!perf_hpp__format[idx].cond)
+ continue;
+
+ if (idx)
+ fprintf(fp, "%s", sep ?: " ");
+
+ perf_hpp__format[idx].header(&dummy_hpp);
+ fprintf(fp, "%s", bf);
+ }
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ if (se->elide)
+ continue;
+ if (sep) {
+ fprintf(fp, "%c%s", *sep, se->se_header);
+ continue;
+ }
+ width = strlen(se->se_header);
+ if (symbol_conf.col_width_list_str) {
+ if (col_width) {
+ hists__set_col_len(hists, se->se_width_idx,
+ atoi(col_width));
+ col_width = strchr(col_width, ',');
+ if (col_width)
+ ++col_width;
+ }
+ }
+ if (!hists__new_col_len(hists, se->se_width_idx, width))
+ width = hists__col_len(hists, se->se_width_idx);
+ fprintf(fp, " %*s", width, se->se_header);
+ }
+
+ fprintf(fp, "\n");
+ if (max_rows && ++nr_rows >= max_rows)
+ goto out;
+
+ if (sep)
+ goto print_entries;
+
+ fprintf(fp, "# ");
+ for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) {
+ unsigned int i;
+
+ if (!perf_hpp__format[idx].cond)
+ continue;
+
+ if (idx)
+ fprintf(fp, "%s", sep ?: " ");
+
+ width = perf_hpp__format[idx].width(&dummy_hpp);
+ for (i = 0; i < width; i++)
+ fprintf(fp, ".");
+ }
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ unsigned int i;
+
+ if (se->elide)
+ continue;
+
+ fprintf(fp, " ");
+ width = hists__col_len(hists, se->se_width_idx);
+ if (width == 0)
+ width = strlen(se->se_header);
+ for (i = 0; i < width; i++)
+ fprintf(fp, ".");
+ }
+
+ fprintf(fp, "\n");
+ if (max_rows && ++nr_rows >= max_rows)
+ goto out;
+
+ fprintf(fp, "#\n");
+ if (max_rows && ++nr_rows >= max_rows)
+ goto out;
+
+print_entries:
+ total_period = hists->stats.total_period;
+
+ for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (h->filtered)
+ continue;
+
+ if (show_displacement) {
+ if (h->pair != NULL)
+ displacement = ((long)h->pair->position -
+ (long)position);
+ else
+ displacement = 0;
+ ++position;
+ }
+ ret += hist_entry__fprintf(h, max_cols, hists, pair, displacement,
+ total_period, fp);
+
+ if (max_rows && ++nr_rows >= max_rows)
+ goto out;
+
+ if (h->ms.map == NULL && verbose > 1) {
+ __map_groups__fprintf_maps(&h->thread->mg,
+ MAP__FUNCTION, verbose, fp);
+ fprintf(fp, "%.10s end\n", graph_dotted_line);
+ }
+ }
+out:
+ free(rem_sq_bracket);
+
+ return ret;
+}
+
+size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
+{
+ int i;
+ size_t ret = 0;
+
+ for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
+ const char *name;
+
+ if (hists->stats.nr_events[i] == 0)
+ continue;
+
+ name = perf_event__name(i);
+ if (!strcmp(name, "UNKNOWN"))
+ continue;
+
+ ret += fprintf(fp, "%16s events: %10d\n", name,
+ hists->stats.nr_events[i]);
+ }
+
+ return ret;
+}
diff --git a/tools/perf/ui/tui/helpline.c b/tools/perf/ui/tui/helpline.c
new file mode 100644
index 000000000000..2884d2f41e33
--- /dev/null
+++ b/tools/perf/ui/tui/helpline.c
@@ -0,0 +1,57 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <pthread.h>
+
+#include "../../util/debug.h"
+#include "../helpline.h"
+#include "../ui.h"
+#include "../libslang.h"
+
+static void tui_helpline__pop(void)
+{
+}
+
+static void tui_helpline__push(const char *msg)
+{
+ const size_t sz = sizeof(ui_helpline__current);
+
+ SLsmg_gotorc(SLtt_Screen_Rows - 1, 0);
+ SLsmg_set_color(0);
+ SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols);
+ SLsmg_refresh();
+ strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0';
+}
+
+struct ui_helpline tui_helpline_fns = {
+ .pop = tui_helpline__pop,
+ .push = tui_helpline__push,
+};
+
+void ui_helpline__init(void)
+{
+ helpline_fns = &tui_helpline_fns;
+ ui_helpline__puts(" ");
+}
+
+char ui_helpline__last_msg[1024];
+
+int ui_helpline__show_help(const char *format, va_list ap)
+{
+ int ret;
+ static int backlog;
+
+ pthread_mutex_lock(&ui__lock);
+ ret = vscnprintf(ui_helpline__last_msg + backlog,
+ sizeof(ui_helpline__last_msg) - backlog, format, ap);
+ backlog += ret;
+
+ if (ui_helpline__last_msg[backlog - 1] == '\n') {
+ ui_helpline__puts(ui_helpline__last_msg);
+ SLsmg_refresh();
+ backlog = 0;
+ }
+ pthread_mutex_unlock(&ui__lock);
+
+ return ret;
+}
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c
index e813c1d17346..60debb81537a 100644
--- a/tools/perf/ui/tui/setup.c
+++ b/tools/perf/ui/tui/setup.c
@@ -11,12 +11,12 @@
#include "../libslang.h"
#include "../keysyms.h"
-pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
-
static volatile int ui__need_resize;
extern struct perf_error_ops perf_tui_eops;
+extern void hist_browser__init_hpp(void);
+
void ui__refresh_dimensions(bool force)
{
if (force || ui__need_resize) {
@@ -28,7 +28,7 @@ void ui__refresh_dimensions(bool force)
}
}
-static void ui__sigwinch(int sig __used)
+static void ui__sigwinch(int sig __maybe_unused)
{
ui__need_resize = 1;
}
@@ -88,7 +88,7 @@ int ui__getch(int delay_secs)
return SLkp_getkey();
}
-static void newt_suspend(void *d __used)
+static void newt_suspend(void *d __maybe_unused)
{
newtSuspend();
raise(SIGTSTP);
@@ -126,6 +126,8 @@ int ui__init(void)
signal(SIGTERM, ui__signal);
perf_error__register(&perf_tui_eops);
+
+ hist_browser__init_hpp();
out:
return err;
}
diff --git a/tools/perf/util/alias.c b/tools/perf/util/alias.c
index b8144e80bb1e..e6d134773d0a 100644
--- a/tools/perf/util/alias.c
+++ b/tools/perf/util/alias.c
@@ -3,7 +3,8 @@
static const char *alias_key;
static char *alias_val;
-static int alias_lookup_cb(const char *k, const char *v, void *cb __used)
+static int alias_lookup_cb(const char *k, const char *v,
+ void *cb __maybe_unused)
{
if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) {
if (!v)
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 3a282c0057d2..f0a910371377 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -17,6 +17,7 @@
#include <pthread.h>
const char *disassembler_style;
+const char *objdump_path;
static struct ins *ins__find(const char *name);
static int disasm_line__parse(char *line, char **namep, char **rawp);
@@ -312,8 +313,8 @@ static struct ins_ops dec_ops = {
.scnprintf = dec__scnprintf,
};
-static int nop__scnprintf(struct ins *ins __used, char *bf, size_t size,
- struct ins_operands *ops __used)
+static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
+ struct ins_operands *ops __maybe_unused)
{
return scnprintf(bf, size, "%-6.6s", "nop");
}
@@ -415,7 +416,7 @@ static struct ins *ins__find(const char *name)
return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__cmp);
}
-int symbol__annotate_init(struct map *map __used, struct symbol *sym)
+int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
pthread_mutex_init(&notes->lock, NULL);
@@ -820,9 +821,10 @@ fallback:
dso, dso->long_name, sym, sym->name);
snprintf(command, sizeof(command),
- "objdump %s%s --start-address=0x%016" PRIx64
+ "%s %s%s --start-address=0x%016" PRIx64
" --stop-address=0x%016" PRIx64
" -d %s %s -C %s|grep -v %s|expand",
+ objdump_path ? objdump_path : "objdump",
disassembler_style ? "-M " : "",
disassembler_style ? disassembler_style : "",
map__rip_2objdump(map, sym->start),
@@ -982,7 +984,8 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
int context)
{
struct dso *dso = map->dso;
- const char *filename = dso->long_name, *d_filename;
+ char *filename;
+ const char *d_filename;
struct annotation *notes = symbol__annotation(sym);
struct disasm_line *pos, *queue = NULL;
u64 start = map__rip_2objdump(map, sym->start);
@@ -990,6 +993,10 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
int more = 0;
u64 len;
+ filename = strdup(dso->long_name);
+ if (!filename)
+ return -ENOMEM;
+
if (full_paths)
d_filename = filename;
else
@@ -1040,6 +1047,8 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
}
}
+ free(filename);
+
return more;
}
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 78a5692dd718..9b5b21e7b032 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -7,6 +7,7 @@
#include "symbol.h"
#include <linux/list.h>
#include <linux/rbtree.h>
+#include <pthread.h>
struct ins;
@@ -125,7 +126,7 @@ int symbol__alloc_hist(struct symbol *sym);
void symbol__annotate_zero_histograms(struct symbol *sym);
int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize);
-int symbol__annotate_init(struct map *map __used, struct symbol *sym);
+int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym);
int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
bool full_paths, int min_pcnt, int max_lines,
int context);
@@ -138,11 +139,12 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
int max_lines);
#ifdef NO_NEWT_SUPPORT
-static inline int symbol__tui_annotate(struct symbol *sym __used,
- struct map *map __used,
- int evidx __used,
- void(*timer)(void *arg) __used,
- void *arg __used, int delay_secs __used)
+static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused,
+ struct map *map __maybe_unused,
+ int evidx __maybe_unused,
+ void(*timer)(void *arg) __maybe_unused,
+ void *arg __maybe_unused,
+ int delay_secs __maybe_unused)
{
return 0;
}
@@ -152,5 +154,6 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
#endif
extern const char *disassembler_style;
+extern const char *objdump_path;
#endif /* __PERF_ANNOTATE_H */
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index fd9a5944b627..8e3a740ddbd4 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -16,10 +16,10 @@
#include "session.h"
#include "tool.h"
-static int build_id__mark_dso_hit(struct perf_tool *tool __used,
+static int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
union perf_event *event,
- struct perf_sample *sample __used,
- struct perf_evsel *evsel __used,
+ struct perf_sample *sample __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused,
struct machine *machine)
{
struct addr_location al;
@@ -41,9 +41,10 @@ static int build_id__mark_dso_hit(struct perf_tool *tool __used,
return 0;
}
-static int perf_event__exit_del_thread(struct perf_tool *tool __used,
+static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused,
union perf_event *event,
- struct perf_sample *sample __used,
+ struct perf_sample *sample
+ __maybe_unused,
struct machine *machine)
{
struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index cff18c617d13..ab1769426541 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -39,7 +39,7 @@ static inline void setup_browser(bool fallback_to_pager)
if (fallback_to_pager)
setup_pager();
}
-static inline void exit_browser(bool wait_for_ok __used) {}
+static inline void exit_browser(bool wait_for_ok __maybe_unused) {}
#else
void setup_browser(bool fallback_to_pager);
void exit_browser(bool wait_for_ok);
@@ -49,7 +49,7 @@ static inline int ui__init(void)
{
return -1;
}
-static inline void ui__exit(bool wait_for_ok __used) {}
+static inline void ui__exit(bool wait_for_ok __maybe_unused) {}
#else
int ui__init(void);
void ui__exit(bool wait_for_ok);
@@ -60,7 +60,7 @@ static inline int perf_gtk__init(void)
{
return -1;
}
-static inline void perf_gtk__exit(bool wait_for_ok __used) {}
+static inline void perf_gtk__exit(bool wait_for_ok __maybe_unused) {}
#else
int perf_gtk__init(void);
void perf_gtk__exit(bool wait_for_ok);
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 3a6bff47614f..d3b3f5d82137 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -93,7 +93,7 @@ __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
*/
static void
sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
- u64 min_hit, struct callchain_param *param __used)
+ u64 min_hit, struct callchain_param *param __maybe_unused)
{
__sort_chain_flat(rb_root, &root->node, min_hit);
}
@@ -115,7 +115,7 @@ static void __sort_chain_graph_abs(struct callchain_node *node,
static void
sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
- u64 min_hit, struct callchain_param *param __used)
+ u64 min_hit, struct callchain_param *param __maybe_unused)
{
__sort_chain_graph_abs(&chain_root->node, min_hit);
rb_root->rb_node = chain_root->node.rb_root.rb_node;
@@ -140,7 +140,7 @@ static void __sort_chain_graph_rel(struct callchain_node *node,
static void
sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
- u64 min_hit __used, struct callchain_param *param)
+ u64 min_hit __maybe_unused, struct callchain_param *param)
{
__sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
rb_root->rb_node = chain_root->node.rb_root.rb_node;
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 3bdb407f9cd9..eb340571e7d6 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -58,7 +58,7 @@ struct callchain_list {
/*
* A callchain cursor is a single linked list that
* let one feed a callchain progressively.
- * It keeps persitent allocated entries to minimize
+ * It keeps persistent allocated entries to minimize
* allocations.
*/
struct callchain_cursor_node {
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index dbe2f16b1a1a..96bbda1ddb83 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -138,8 +138,8 @@ void close_cgroup(struct cgroup_sel *cgrp)
}
}
-int parse_cgroups(const struct option *opt __used, const char *str,
- int unset __used)
+int parse_cgroups(const struct option *opt __maybe_unused, const char *str,
+ int unset __maybe_unused)
{
struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
const char *p, *e, *eos = str + strlen(str);
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 6faa3a18bfbd..3e0fdd369ccb 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -342,13 +342,15 @@ const char *perf_config_dirname(const char *name, const char *value)
return value;
}
-static int perf_default_core_config(const char *var __used, const char *value __used)
+static int perf_default_core_config(const char *var __maybe_unused,
+ const char *value __maybe_unused)
{
/* Add other config variables here. */
return 0;
}
-int perf_default_config(const char *var, const char *value, void *dummy __used)
+int perf_default_config(const char *var, const char *value,
+ void *dummy __maybe_unused)
{
if (!prefixcmp(var, "core."))
return perf_default_core_config(var, value);
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index adc72f09914d..2b32ffa9ebdb 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -38,24 +38,19 @@ static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
return cpus;
}
-static struct cpu_map *cpu_map__read_all_cpu_map(void)
+struct cpu_map *cpu_map__read(FILE *file)
{
struct cpu_map *cpus = NULL;
- FILE *onlnf;
int nr_cpus = 0;
int *tmp_cpus = NULL, *tmp;
int max_entries = 0;
int n, cpu, prev;
char sep;
- onlnf = fopen("/sys/devices/system/cpu/online", "r");
- if (!onlnf)
- return cpu_map__default_new();
-
sep = 0;
prev = -1;
for (;;) {
- n = fscanf(onlnf, "%u%c", &cpu, &sep);
+ n = fscanf(file, "%u%c", &cpu, &sep);
if (n <= 0)
break;
if (prev >= 0) {
@@ -95,6 +90,19 @@ static struct cpu_map *cpu_map__read_all_cpu_map(void)
cpus = cpu_map__default_new();
out_free_tmp:
free(tmp_cpus);
+ return cpus;
+}
+
+static struct cpu_map *cpu_map__read_all_cpu_map(void)
+{
+ struct cpu_map *cpus = NULL;
+ FILE *onlnf;
+
+ onlnf = fopen("/sys/devices/system/cpu/online", "r");
+ if (!onlnf)
+ return cpu_map__default_new();
+
+ cpus = cpu_map__read(onlnf);
fclose(onlnf);
return cpus;
}
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index c41518573c6a..2f68a3b8c285 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -2,6 +2,7 @@
#define __PERF_CPUMAP_H
#include <stdio.h>
+#include <stdbool.h>
struct cpu_map {
int nr;
@@ -11,7 +12,17 @@ struct cpu_map {
struct cpu_map *cpu_map__new(const char *cpu_list);
struct cpu_map *cpu_map__dummy_new(void);
void cpu_map__delete(struct cpu_map *map);
-
+struct cpu_map *cpu_map__read(FILE *file);
size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp);
+static inline int cpu_map__nr(const struct cpu_map *map)
+{
+ return map ? map->nr : 1;
+}
+
+static inline bool cpu_map__all(const struct cpu_map *map)
+{
+ return map ? map->map[0] == -1 : true;
+}
+
#endif /* __PERF_CPUMAP_H */
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 4dfe0bb3c322..66eb3828ceb5 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -23,8 +23,10 @@ int eprintf(int level, const char *fmt, ...)
if (verbose >= level) {
va_start(args, fmt);
- if (use_browser > 0)
+ if (use_browser == 1)
ret = ui_helpline__show_help(fmt, args);
+ else if (use_browser == 2)
+ ret = perf_gtk__show_helpline(fmt, args);
else
ret = vfprintf(stderr, fmt, args);
va_end(args);
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index 015c91dbc096..bb2e7d1007ab 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -4,6 +4,7 @@
#include <stdbool.h>
#include "event.h"
+#include "../ui/helpline.h"
extern int verbose;
extern bool quiet, dump_trace;
@@ -15,32 +16,26 @@ struct ui_progress;
struct perf_error_ops;
#if defined(NO_NEWT_SUPPORT) && defined(NO_GTK2_SUPPORT)
-static inline int ui_helpline__show_help(const char *format __used, va_list ap __used)
-{
- return 0;
-}
-
-static inline void ui_progress__update(u64 curr __used, u64 total __used,
- const char *title __used) {}
+static inline void ui_progress__update(u64 curr __maybe_unused,
+ u64 total __maybe_unused,
+ const char *title __maybe_unused) {}
#define ui__error(format, arg...) ui__warning(format, ##arg)
static inline int
-perf_error__register(struct perf_error_ops *eops __used)
+perf_error__register(struct perf_error_ops *eops __maybe_unused)
{
return 0;
}
static inline int
-perf_error__unregister(struct perf_error_ops *eops __used)
+perf_error__unregister(struct perf_error_ops *eops __maybe_unused)
{
return 0;
}
#else /* NO_NEWT_SUPPORT && NO_GTK2_SUPPORT */
-extern char ui_helpline__last_msg[];
-int ui_helpline__show_help(const char *format, va_list ap);
#include "../ui/progress.h"
int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
#include "../ui/util.h"
diff --git a/tools/perf/util/dso-test-data.c b/tools/perf/util/dso-test-data.c
index 541cdc72c7df..c6caedeb1d6b 100644
--- a/tools/perf/util/dso-test-data.c
+++ b/tools/perf/util/dso-test-data.c
@@ -23,7 +23,7 @@ static char *test_file(int size)
int fd, i;
unsigned char *buf;
- fd = mkostemp(templ, O_CREAT|O_WRONLY|O_TRUNC);
+ fd = mkstemp(templ);
buf = malloc(size);
if (!buf) {
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index ee51e9b4dc09..3e5f5430a28a 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -804,6 +804,8 @@ int die_get_typename(Dwarf_Die *vr_die, char *buf, int len)
tmp = "union ";
else if (tag == DW_TAG_structure_type)
tmp = "struct ";
+ else if (tag == DW_TAG_enumeration_type)
+ tmp = "enum ";
/* Write a base name */
ret = snprintf(buf, len, "%s%s", tmp, dwarf_diename(&type));
return (ret >= len) ? -E2BIG : ret;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 2a6f33cd888c..6715b1938725 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -112,7 +112,7 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
event->comm.header.type = PERF_RECORD_COMM;
size = strlen(event->comm.comm) + 1;
- size = ALIGN(size, sizeof(u64));
+ size = PERF_ALIGN(size, sizeof(u64));
memset(event->comm.comm + size, 0, machine->id_hdr_size);
event->comm.header.size = (sizeof(event->comm) -
(sizeof(event->comm.comm) - size) +
@@ -120,7 +120,9 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
if (!full) {
event->comm.tid = pid;
- process(tool, event, &synth_sample, machine);
+ if (process(tool, event, &synth_sample, machine) != 0)
+ return -1;
+
goto out;
}
@@ -143,7 +145,7 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
sizeof(event->comm.comm));
size = strlen(event->comm.comm) + 1;
- size = ALIGN(size, sizeof(u64));
+ size = PERF_ALIGN(size, sizeof(u64));
memset(event->comm.comm + size, 0, machine->id_hdr_size);
event->comm.header.size = (sizeof(event->comm) -
(sizeof(event->comm.comm) - size) +
@@ -151,7 +153,10 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
event->comm.tid = pid;
- process(tool, event, &synth_sample, machine);
+ if (process(tool, event, &synth_sample, machine) != 0) {
+ tgid = -1;
+ break;
+ }
}
closedir(tasks);
@@ -167,6 +172,7 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
{
char filename[PATH_MAX];
FILE *fp;
+ int rc = 0;
snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
@@ -222,7 +228,7 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
size = strlen(execname);
execname[size - 1] = '\0'; /* Remove \n */
memcpy(event->mmap.filename, execname, size);
- size = ALIGN(size, sizeof(u64));
+ size = PERF_ALIGN(size, sizeof(u64));
event->mmap.len -= event->mmap.start;
event->mmap.header.size = (sizeof(event->mmap) -
(sizeof(event->mmap.filename) - size));
@@ -231,18 +237,22 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
event->mmap.pid = tgid;
event->mmap.tid = pid;
- process(tool, event, &synth_sample, machine);
+ if (process(tool, event, &synth_sample, machine) != 0) {
+ rc = -1;
+ break;
+ }
}
}
fclose(fp);
- return 0;
+ return rc;
}
int perf_event__synthesize_modules(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine)
{
+ int rc = 0;
struct rb_node *nd;
struct map_groups *kmaps = &machine->kmaps;
union perf_event *event = zalloc((sizeof(event->mmap) +
@@ -272,7 +282,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
if (pos->dso->kernel)
continue;
- size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
+ size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
event->mmap.header.type = PERF_RECORD_MMAP;
event->mmap.header.size = (sizeof(event->mmap) -
(sizeof(event->mmap.filename) - size));
@@ -284,11 +294,14 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
memcpy(event->mmap.filename, pos->dso->long_name,
pos->dso->long_name_len + 1);
- process(tool, event, &synth_sample, machine);
+ if (process(tool, event, &synth_sample, machine) != 0) {
+ rc = -1;
+ break;
+ }
}
free(event);
- return 0;
+ return rc;
}
static int __event__synthesize_thread(union perf_event *comm_event,
@@ -392,12 +405,16 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
if (*end) /* only interested in proper numerical dirents */
continue;
- __event__synthesize_thread(comm_event, mmap_event, pid, 1,
- process, tool, machine);
+ if (__event__synthesize_thread(comm_event, mmap_event, pid, 1,
+ process, tool, machine) != 0) {
+ err = -1;
+ goto out_closedir;
+ }
}
- closedir(proc);
err = 0;
+out_closedir:
+ closedir(proc);
out_free_mmap:
free(mmap_event);
out_free_comm:
@@ -412,7 +429,7 @@ struct process_symbol_args {
};
static int find_symbol_cb(void *arg, const char *name, char type,
- u64 start, u64 end __used)
+ u64 start)
{
struct process_symbol_args *args = arg;
@@ -477,7 +494,7 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
map = machine->vmlinux_maps[MAP__FUNCTION];
size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
"%s%s", mmap_name, symbol_name) + 1;
- size = ALIGN(size, sizeof(u64));
+ size = PERF_ALIGN(size, sizeof(u64));
event->mmap.header.type = PERF_RECORD_MMAP;
event->mmap.header.size = (sizeof(event->mmap) -
(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
@@ -497,9 +514,9 @@ size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid);
}
-int perf_event__process_comm(struct perf_tool *tool __used,
+int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
union perf_event *event,
- struct perf_sample *sample __used,
+ struct perf_sample *sample __maybe_unused,
struct machine *machine)
{
struct thread *thread = machine__findnew_thread(machine, event->comm.tid);
@@ -515,10 +532,10 @@ int perf_event__process_comm(struct perf_tool *tool __used,
return 0;
}
-int perf_event__process_lost(struct perf_tool *tool __used,
+int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
union perf_event *event,
- struct perf_sample *sample __used,
- struct machine *machine __used)
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
{
dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
event->lost.id, event->lost.lost);
@@ -538,7 +555,8 @@ static void perf_event__set_kernel_mmap_len(union perf_event *event,
maps[MAP__FUNCTION]->end = ~0ULL;
}
-static int perf_event__process_kernel_mmap(struct perf_tool *tool __used,
+static int perf_event__process_kernel_mmap(struct perf_tool *tool
+ __maybe_unused,
union perf_event *event,
struct machine *machine)
{
@@ -640,7 +658,7 @@ size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
int perf_event__process_mmap(struct perf_tool *tool,
union perf_event *event,
- struct perf_sample *sample __used,
+ struct perf_sample *sample __maybe_unused,
struct machine *machine)
{
struct thread *thread;
@@ -684,9 +702,9 @@ size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
event->fork.ppid, event->fork.ptid);
}
-int perf_event__process_task(struct perf_tool *tool __used,
+int perf_event__process_task(struct perf_tool *tool __maybe_unused,
union perf_event *event,
- struct perf_sample *sample __used,
+ struct perf_sample *sample __maybe_unused,
struct machine *machine)
{
struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
@@ -886,8 +904,9 @@ int perf_event__preprocess_sample(const union perf_event *event,
al->sym = map__find_symbol(al->map, al->addr, filter);
}
- if (symbol_conf.sym_list && al->sym &&
- !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
+ if (symbol_conf.sym_list &&
+ (!al->sym || !strlist__has_entry(symbol_conf.sym_list,
+ al->sym->name)))
goto out_filtered;
return 0;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index d84870b06426..21b99e741a87 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -69,6 +69,16 @@ struct sample_event {
u64 array[];
};
+struct regs_dump {
+ u64 *regs;
+};
+
+struct stack_dump {
+ u16 offset;
+ u64 size;
+ char *data;
+};
+
struct perf_sample {
u64 ip;
u32 pid, tid;
@@ -82,6 +92,8 @@ struct perf_sample {
void *raw_data;
struct ip_callchain *callchain;
struct branch_stack *branch_stack;
+ struct regs_dump user_regs;
+ struct stack_dump user_stack;
};
#define BUILD_ID_SIZE 20
@@ -89,7 +101,7 @@ struct perf_sample {
struct build_id_event {
struct perf_event_header header;
pid_t pid;
- u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))];
+ u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
char filename[];
};
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 9b38681add9e..ae89686102f4 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -57,7 +57,7 @@ void perf_evlist__config_attrs(struct perf_evlist *evlist,
if (evlist->cpus->map[0] < 0)
opts->no_inherit = true;
- first = list_entry(evlist->entries.next, struct perf_evsel, node);
+ first = perf_evlist__first(evlist);
list_for_each_entry(evsel, &evlist->entries, node) {
perf_evsel__config(evsel, opts, first);
@@ -108,6 +108,25 @@ void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
evlist->nr_entries += nr_entries;
}
+void __perf_evlist__set_leader(struct list_head *list)
+{
+ struct perf_evsel *evsel, *leader;
+
+ leader = list_entry(list->next, struct perf_evsel, node);
+ leader->leader = NULL;
+
+ list_for_each_entry(evsel, list, node) {
+ if (evsel != leader)
+ evsel->leader = leader;
+ }
+}
+
+void perf_evlist__set_leader(struct perf_evlist *evlist)
+{
+ if (evlist->nr_entries)
+ __perf_evlist__set_leader(&evlist->entries);
+}
+
int perf_evlist__add_default(struct perf_evlist *evlist)
{
struct perf_event_attr attr = {
@@ -285,7 +304,7 @@ void perf_evlist__enable(struct perf_evlist *evlist)
int cpu, thread;
struct perf_evsel *pos;
- for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
list_for_each_entry(pos, &evlist->entries, node) {
for (thread = 0; thread < evlist->threads->nr; thread++)
ioctl(FD(pos, cpu, thread),
@@ -296,7 +315,7 @@ void perf_evlist__enable(struct perf_evlist *evlist)
static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
{
- int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
+ int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries;
evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
return evlist->pollfd != NULL ? 0 : -ENOMEM;
}
@@ -357,7 +376,7 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
int hash;
if (evlist->nr_entries == 1)
- return list_entry(evlist->entries.next, struct perf_evsel, node);
+ return perf_evlist__first(evlist);
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
head = &evlist->heads[hash];
@@ -367,7 +386,7 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
return sid->evsel;
if (!perf_evlist__sample_id_all(evlist))
- return list_entry(evlist->entries.next, struct perf_evsel, node);
+ return perf_evlist__first(evlist);
return NULL;
}
@@ -456,8 +475,8 @@ void perf_evlist__munmap(struct perf_evlist *evlist)
static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
{
- evlist->nr_mmaps = evlist->cpus->nr;
- if (evlist->cpus->map[0] == -1)
+ evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
+ if (cpu_map__all(evlist->cpus))
evlist->nr_mmaps = evlist->threads->nr;
evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
return evlist->mmap != NULL ? 0 : -ENOMEM;
@@ -603,11 +622,11 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
list_for_each_entry(evsel, &evlist->entries, node) {
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
evsel->sample_id == NULL &&
- perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
+ perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
return -ENOMEM;
}
- if (evlist->cpus->map[0] == -1)
+ if (cpu_map__all(cpus))
return perf_evlist__mmap_per_thread(evlist, prot, mask);
return perf_evlist__mmap_per_cpu(evlist, prot, mask);
@@ -647,39 +666,44 @@ void perf_evlist__delete_maps(struct perf_evlist *evlist)
evlist->threads = NULL;
}
-int perf_evlist__set_filters(struct perf_evlist *evlist)
+int perf_evlist__apply_filters(struct perf_evlist *evlist)
{
- const struct thread_map *threads = evlist->threads;
- const struct cpu_map *cpus = evlist->cpus;
struct perf_evsel *evsel;
- char *filter;
- int thread;
- int cpu;
- int err;
- int fd;
+ int err = 0;
+ const int ncpus = cpu_map__nr(evlist->cpus),
+ nthreads = evlist->threads->nr;
list_for_each_entry(evsel, &evlist->entries, node) {
- filter = evsel->filter;
- if (!filter)
+ if (evsel->filter == NULL)
continue;
- for (cpu = 0; cpu < cpus->nr; cpu++) {
- for (thread = 0; thread < threads->nr; thread++) {
- fd = FD(evsel, cpu, thread);
- err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
- if (err)
- return err;
- }
- }
+
+ err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
+ if (err)
+ break;
}
- return 0;
+ return err;
}
-bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
+int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
{
- struct perf_evsel *pos, *first;
+ struct perf_evsel *evsel;
+ int err = 0;
+ const int ncpus = cpu_map__nr(evlist->cpus),
+ nthreads = evlist->threads->nr;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
+ if (err)
+ break;
+ }
+
+ return err;
+}
- pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
+bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
+{
+ struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
list_for_each_entry_continue(pos, &evlist->entries, node) {
if (first->attr.sample_type != pos->attr.sample_type)
@@ -689,23 +713,19 @@ bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
return true;
}
-u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
+u64 perf_evlist__sample_type(struct perf_evlist *evlist)
{
- struct perf_evsel *first;
-
- first = list_entry(evlist->entries.next, struct perf_evsel, node);
+ struct perf_evsel *first = perf_evlist__first(evlist);
return first->attr.sample_type;
}
-u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist)
+u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
{
- struct perf_evsel *first;
+ struct perf_evsel *first = perf_evlist__first(evlist);
struct perf_sample *data;
u64 sample_type;
u16 size = 0;
- first = list_entry(evlist->entries.next, struct perf_evsel, node);
-
if (!first->attr.sample_id_all)
goto out;
@@ -729,11 +749,9 @@ out:
return size;
}
-bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
+bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
{
- struct perf_evsel *pos, *first;
-
- pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
+ struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
list_for_each_entry_continue(pos, &evlist->entries, node) {
if (first->attr.sample_id_all != pos->attr.sample_id_all)
@@ -743,11 +761,9 @@ bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
return true;
}
-bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
+bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
{
- struct perf_evsel *first;
-
- first = list_entry(evlist->entries.next, struct perf_evsel, node);
+ struct perf_evsel *first = perf_evlist__first(evlist);
return first->attr.sample_id_all;
}
@@ -757,21 +773,13 @@ void perf_evlist__set_selected(struct perf_evlist *evlist,
evlist->selected = evsel;
}
-int perf_evlist__open(struct perf_evlist *evlist, bool group)
+int perf_evlist__open(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel, *first;
+ struct perf_evsel *evsel;
int err, ncpus, nthreads;
- first = list_entry(evlist->entries.next, struct perf_evsel, node);
-
list_for_each_entry(evsel, &evlist->entries, node) {
- struct xyarray *group_fd = NULL;
-
- if (group && evsel != first)
- group_fd = first->fd;
-
- err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
- group, group_fd);
+ err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
if (err < 0)
goto out_err;
}
@@ -883,8 +891,21 @@ int perf_evlist__start_workload(struct perf_evlist *evlist)
}
int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
- struct perf_sample *sample, bool swapped)
+ struct perf_sample *sample)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+ return perf_evsel__parse_sample(evsel, event, sample);
+}
+
+size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
{
- struct perf_evsel *e = list_entry(evlist->entries.next, struct perf_evsel, node);
- return perf_evsel__parse_sample(e, event, sample, swapped);
+ struct perf_evsel *evsel;
+ size_t printed = 0;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
+ perf_evsel__name(evsel));
+ }
+
+ return printed + fprintf(fp, "\n");;
}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 528c1acd9298..3f1fb66be022 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -5,6 +5,7 @@
#include <stdio.h>
#include "../perf.h"
#include "event.h"
+#include "evsel.h"
#include "util.h"
#include <unistd.h>
@@ -41,8 +42,6 @@ struct perf_evsel_str_handler {
void *handler;
};
-struct perf_evsel;
-
struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
struct thread_map *threads);
void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
@@ -73,6 +72,8 @@ int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
#define perf_evlist__set_tracepoints_handlers_array(evlist, array) \
perf_evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array))
+int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter);
+
struct perf_evsel *
perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
@@ -85,7 +86,7 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
-int perf_evlist__open(struct perf_evlist *evlist, bool group);
+int perf_evlist__open(struct perf_evlist *evlist);
void perf_evlist__config_attrs(struct perf_evlist *evlist,
struct perf_record_opts *opts);
@@ -116,20 +117,34 @@ static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
int perf_evlist__create_maps(struct perf_evlist *evlist,
struct perf_target *target);
void perf_evlist__delete_maps(struct perf_evlist *evlist);
-int perf_evlist__set_filters(struct perf_evlist *evlist);
+int perf_evlist__apply_filters(struct perf_evlist *evlist);
+
+void __perf_evlist__set_leader(struct list_head *list);
+void perf_evlist__set_leader(struct perf_evlist *evlist);
-u64 perf_evlist__sample_type(const struct perf_evlist *evlist);
-bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist);
-u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist);
+u64 perf_evlist__sample_type(struct perf_evlist *evlist);
+bool perf_evlist__sample_id_all(struct perf_evlist *evlist);
+u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist);
int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
- struct perf_sample *sample, bool swapped);
+ struct perf_sample *sample);
-bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist);
-bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist);
+bool perf_evlist__valid_sample_type(struct perf_evlist *evlist);
+bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
struct list_head *list,
int nr_entries);
+static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
+{
+ return list_entry(evlist->entries.next, struct perf_evsel, node);
+}
+
+static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
+{
+ return list_entry(evlist->entries.prev, struct perf_evsel, node);
+}
+
+size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 2eaae140def2..ffdd94e9c9c3 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -8,7 +8,10 @@
*/
#include <byteswap.h>
+#include <linux/bitops.h>
#include "asm/bug.h"
+#include "debugfs.h"
+#include "event-parse.h"
#include "evsel.h"
#include "evlist.h"
#include "util.h"
@@ -16,9 +19,10 @@
#include "thread_map.h"
#include "target.h"
#include "../../../include/linux/hw_breakpoint.h"
+#include "../../include/linux/perf_event.h"
+#include "perf_regs.h"
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
-#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
static int __perf_evsel__sample_size(u64 sample_type)
{
@@ -66,7 +70,80 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
return evsel;
}
-static const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
+struct event_format *event_format__new(const char *sys, const char *name)
+{
+ int fd, n;
+ char *filename;
+ void *bf = NULL, *nbf;
+ size_t size = 0, alloc_size = 0;
+ struct event_format *format = NULL;
+
+ if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0)
+ goto out;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ goto out_free_filename;
+
+ do {
+ if (size == alloc_size) {
+ alloc_size += BUFSIZ;
+ nbf = realloc(bf, alloc_size);
+ if (nbf == NULL)
+ goto out_free_bf;
+ bf = nbf;
+ }
+
+ n = read(fd, bf + size, BUFSIZ);
+ if (n < 0)
+ goto out_free_bf;
+ size += n;
+ } while (n > 0);
+
+ pevent_parse_format(&format, bf, size, sys);
+
+out_free_bf:
+ free(bf);
+ close(fd);
+out_free_filename:
+ free(filename);
+out:
+ return format;
+}
+
+struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx)
+{
+ struct perf_evsel *evsel = zalloc(sizeof(*evsel));
+
+ if (evsel != NULL) {
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_TRACEPOINT,
+ .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
+ PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
+ };
+
+ if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
+ goto out_free;
+
+ evsel->tp_format = event_format__new(sys, name);
+ if (evsel->tp_format == NULL)
+ goto out_free;
+
+ event_attr_init(&attr);
+ attr.config = evsel->tp_format->id;
+ attr.sample_period = 1;
+ perf_evsel__init(evsel, &attr, idx);
+ }
+
+ return evsel;
+
+out_free:
+ free(evsel->name);
+ free(evsel);
+ return NULL;
+}
+
+const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
"cycles",
"instructions",
"cache-references",
@@ -129,12 +206,12 @@ static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
}
-static const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
+const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
"cpu-clock",
"task-clock",
"page-faults",
"context-switches",
- "CPU-migrations",
+ "cpu-migrations",
"minor-faults",
"major-faults",
"alignment-faults",
@@ -317,7 +394,8 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
break;
default:
- scnprintf(bf, sizeof(bf), "%s", "unknown attr type");
+ scnprintf(bf, sizeof(bf), "unknown attr type: %d",
+ evsel->attr.type);
break;
}
@@ -367,9 +445,18 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
attr->mmap_data = track;
}
- if (opts->call_graph)
+ if (opts->call_graph) {
attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
+ if (opts->call_graph == CALLCHAIN_DWARF) {
+ attr->sample_type |= PERF_SAMPLE_REGS_USER |
+ PERF_SAMPLE_STACK_USER;
+ attr->sample_regs_user = PERF_REGS_MASK;
+ attr->sample_stack_user = opts->stack_dump_size;
+ attr->exclude_callchain_user = 1;
+ }
+ }
+
if (perf_target__has_cpu(&opts->target))
attr->sample_type |= PERF_SAMPLE_CPU;
@@ -421,6 +508,24 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
return evsel->fd != NULL ? 0 : -ENOMEM;
}
+int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
+ const char *filter)
+{
+ int cpu, thread;
+
+ for (cpu = 0; cpu < ncpus; cpu++) {
+ for (thread = 0; thread < nthreads; thread++) {
+ int fd = FD(evsel, cpu, thread),
+ err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
+
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
{
evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
@@ -481,6 +586,9 @@ void perf_evsel__delete(struct perf_evsel *evsel)
{
perf_evsel__exit(evsel);
close_cgroup(evsel->cgrp);
+ free(evsel->group_name);
+ if (evsel->tp_format)
+ pevent_free_format(evsel->tp_format);
free(evsel->name);
free(evsel);
}
@@ -556,9 +664,28 @@ int __perf_evsel__read(struct perf_evsel *evsel,
return 0;
}
+static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
+{
+ struct perf_evsel *leader = evsel->leader;
+ int fd;
+
+ if (!leader)
+ return -1;
+
+ /*
+ * Leader must be already processed/open,
+ * if not it's a bug.
+ */
+ BUG_ON(!leader->fd);
+
+ fd = FD(leader, cpu, thread);
+ BUG_ON(fd == -1);
+
+ return fd;
+}
+
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
- struct thread_map *threads, bool group,
- struct xyarray *group_fds)
+ struct thread_map *threads)
{
int cpu, thread;
unsigned long flags = 0;
@@ -574,13 +701,15 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
}
for (cpu = 0; cpu < cpus->nr; cpu++) {
- int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1;
for (thread = 0; thread < threads->nr; thread++) {
+ int group_fd;
if (!evsel->cgrp)
pid = threads->map[thread];
+ group_fd = get_group_fd(evsel, cpu, thread);
+
FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
pid,
cpus->map[cpu],
@@ -589,9 +718,6 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
err = -errno;
goto out_close;
}
-
- if (group && group_fd == -1)
- group_fd = FD(evsel, cpu, thread);
}
}
@@ -635,8 +761,7 @@ static struct {
};
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
- struct thread_map *threads, bool group,
- struct xyarray *group_fd)
+ struct thread_map *threads)
{
if (cpus == NULL) {
/* Work around old compiler warnings about strict aliasing */
@@ -646,30 +771,28 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
if (threads == NULL)
threads = &empty_thread_map.map;
- return __perf_evsel__open(evsel, cpus, threads, group, group_fd);
+ return __perf_evsel__open(evsel, cpus, threads);
}
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
- struct cpu_map *cpus, bool group,
- struct xyarray *group_fd)
+ struct cpu_map *cpus)
{
- return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group,
- group_fd);
+ return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
}
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
- struct thread_map *threads, bool group,
- struct xyarray *group_fd)
+ struct thread_map *threads)
{
- return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group,
- group_fd);
+ return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
}
-static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
- struct perf_sample *sample,
- bool swapped)
+static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
+ const union perf_event *event,
+ struct perf_sample *sample)
{
+ u64 type = evsel->attr.sample_type;
const u64 *array = event->sample.array;
+ bool swapped = evsel->needs_swap;
union u64_swap u;
array += ((event->header.size -
@@ -730,9 +853,11 @@ static bool sample_overlap(const union perf_event *event,
}
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
- struct perf_sample *data, bool swapped)
+ struct perf_sample *data)
{
u64 type = evsel->attr.sample_type;
+ u64 regs_user = evsel->attr.sample_regs_user;
+ bool swapped = evsel->needs_swap;
const u64 *array;
/*
@@ -749,7 +874,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
if (event->header.type != PERF_RECORD_SAMPLE) {
if (!evsel->attr.sample_id_all)
return 0;
- return perf_event__parse_id_sample(event, type, data, swapped);
+ return perf_evsel__parse_id_sample(evsel, event, data);
}
array = event->sample.array;
@@ -869,6 +994,32 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
sz /= sizeof(u64);
array += sz;
}
+
+ if (type & PERF_SAMPLE_REGS_USER) {
+ /* First u64 tells us if we have any regs in sample. */
+ u64 avail = *array++;
+
+ if (avail) {
+ data->user_regs.regs = (u64 *)array;
+ array += hweight_long(regs_user);
+ }
+ }
+
+ if (type & PERF_SAMPLE_STACK_USER) {
+ u64 size = *array++;
+
+ data->user_stack.offset = ((char *)(array - 1)
+ - (char *) event);
+
+ if (!size) {
+ data->user_stack.size = 0;
+ } else {
+ data->user_stack.data = (char *)array;
+ array += size / sizeof(*array);
+ data->user_stack.size = *array;
+ }
+ }
+
return 0;
}
@@ -947,3 +1098,72 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
return 0;
}
+
+struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
+{
+ return pevent_find_field(evsel->tp_format, name);
+}
+
+void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
+ const char *name)
+{
+ struct format_field *field = perf_evsel__field(evsel, name);
+ int offset;
+
+ if (!field)
+ return NULL;
+
+ offset = field->offset;
+
+ if (field->flags & FIELD_IS_DYNAMIC) {
+ offset = *(int *)(sample->raw_data + field->offset);
+ offset &= 0xffff;
+ }
+
+ return sample->raw_data + offset;
+}
+
+u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
+ const char *name)
+{
+ struct format_field *field = perf_evsel__field(evsel, name);
+ void *ptr;
+ u64 value;
+
+ if (!field)
+ return 0;
+
+ ptr = sample->raw_data + field->offset;
+
+ switch (field->size) {
+ case 1:
+ return *(u8 *)ptr;
+ case 2:
+ value = *(u16 *)ptr;
+ break;
+ case 4:
+ value = *(u32 *)ptr;
+ break;
+ case 8:
+ value = *(u64 *)ptr;
+ break;
+ default:
+ return 0;
+ }
+
+ if (!evsel->needs_swap)
+ return value;
+
+ switch (field->size) {
+ case 2:
+ return bswap_16(value);
+ case 4:
+ return bswap_32(value);
+ case 8:
+ return bswap_64(value);
+ default:
+ return 0;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index b559929983bb..3ead0d59c03d 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -53,9 +53,10 @@ struct perf_evsel {
u64 *id;
struct perf_counts *counts;
int idx;
- int ids;
+ u32 ids;
struct hists hists;
char *name;
+ struct event_format *tp_format;
union {
void *priv;
off_t id_offset;
@@ -65,8 +66,14 @@ struct perf_evsel {
void *func;
void *data;
} handler;
+ struct cpu_map *cpus;
unsigned int sample_size;
bool supported;
+ bool needs_swap;
+ /* parse modifier helper */
+ int exclude_GH;
+ struct perf_evsel *leader;
+ char *group_name;
};
struct cpu_map;
@@ -75,6 +82,10 @@ struct perf_evlist;
struct perf_record_opts;
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
+struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx);
+
+struct event_format *event_format__new(const char *sys, const char *name);
+
void perf_evsel__init(struct perf_evsel *evsel,
struct perf_event_attr *attr, int idx);
void perf_evsel__exit(struct perf_evsel *evsel);
@@ -92,8 +103,10 @@ extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
[PERF_EVSEL__MAX_ALIASES];
extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_EVSEL__MAX_ALIASES];
-const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
- [PERF_EVSEL__MAX_ALIASES];
+extern const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
+ [PERF_EVSEL__MAX_ALIASES];
+extern const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX];
+extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX];
int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
char *bf, size_t size);
const char *perf_evsel__name(struct perf_evsel *evsel);
@@ -105,21 +118,46 @@ void perf_evsel__free_fd(struct perf_evsel *evsel);
void perf_evsel__free_id(struct perf_evsel *evsel);
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
+int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
+ const char *filter);
+
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
- struct cpu_map *cpus, bool group,
- struct xyarray *group_fds);
+ struct cpu_map *cpus);
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
- struct thread_map *threads, bool group,
- struct xyarray *group_fds);
+ struct thread_map *threads);
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
- struct thread_map *threads, bool group,
- struct xyarray *group_fds);
+ struct thread_map *threads);
void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads);
+struct perf_sample;
+
+void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
+ const char *name);
+u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
+ const char *name);
+
+static inline char *perf_evsel__strval(struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ const char *name)
+{
+ return perf_evsel__rawptr(evsel, sample, name);
+}
+
+struct format_field;
+
+struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name);
+
#define perf_evsel__match(evsel, t, c) \
(evsel->attr.type == PERF_TYPE_##t && \
evsel->attr.config == PERF_COUNT_##c)
+static inline bool perf_evsel__match2(struct perf_evsel *e1,
+ struct perf_evsel *e2)
+{
+ return (e1->attr.type == e2->attr.type) &&
+ (e1->attr.config == e2->attr.config);
+}
+
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
int cpu, int thread, bool scale);
@@ -181,5 +219,10 @@ static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
void hists__init(struct hists *hists);
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
- struct perf_sample *sample, bool swapped);
+ struct perf_sample *sample);
+
+static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
+{
+ return list_entry(evsel->node.next, struct perf_evsel, node);
+}
#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/generate-cmdlist.sh b/tools/perf/util/generate-cmdlist.sh
index f06f6fd148f8..389590c1ad21 100755
--- a/tools/perf/util/generate-cmdlist.sh
+++ b/tools/perf/util/generate-cmdlist.sh
@@ -21,4 +21,19 @@ do
p
}' "Documentation/perf-$cmd.txt"
done
+
+echo "#ifndef NO_LIBELF_SUPPORT"
+sed -n -e 's/^perf-\([^ ]*\)[ ].* full.*/\1/p' command-list.txt |
+sort |
+while read cmd
+do
+ sed -n '
+ /^NAME/,/perf-'"$cmd"'/H
+ ${
+ x
+ s/.*perf-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/
+ p
+ }' "Documentation/perf-$cmd.txt"
+done
+echo "#endif /* NO_LIBELF_SUPPORT */"
echo "};"
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 74ea3c2f8138..7daad237dea5 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -20,11 +20,14 @@
#include "symbol.h"
#include "debug.h"
#include "cpumap.h"
+#include "pmu.h"
+#include "vdso.h"
+#include "strbuf.h"
static bool no_buildid_cache = false;
-static int event_count;
-static struct perf_trace_event_type *events;
+static int trace_event_count;
+static struct perf_trace_event_type *trace_events;
static u32 header_argc;
static const char **header_argv;
@@ -36,24 +39,24 @@ int perf_header__push_event(u64 id, const char *name)
if (strlen(name) > MAX_EVENT_NAME)
pr_warning("Event %s will be truncated\n", name);
- nevents = realloc(events, (event_count + 1) * sizeof(*events));
+ nevents = realloc(trace_events, (trace_event_count + 1) * sizeof(*trace_events));
if (nevents == NULL)
return -ENOMEM;
- events = nevents;
+ trace_events = nevents;
- memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
- events[event_count].event_id = id;
- strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
- event_count++;
+ memset(&trace_events[trace_event_count], 0, sizeof(struct perf_trace_event_type));
+ trace_events[trace_event_count].event_id = id;
+ strncpy(trace_events[trace_event_count].name, name, MAX_EVENT_NAME - 1);
+ trace_event_count++;
return 0;
}
char *perf_header__find_event(u64 id)
{
int i;
- for (i = 0 ; i < event_count; i++) {
- if (events[i].event_id == id)
- return events[i].name;
+ for (i = 0 ; i < trace_event_count; i++) {
+ if (trace_events[i].event_id == id)
+ return trace_events[i].name;
}
return NULL;
}
@@ -128,7 +131,7 @@ static int do_write_string(int fd, const char *str)
int ret;
olen = strlen(str) + 1;
- len = ALIGN(olen, NAME_ALIGN);
+ len = PERF_ALIGN(olen, NAME_ALIGN);
/* write len, incl. \0 */
ret = do_write(fd, &len, sizeof(len));
@@ -206,6 +209,29 @@ perf_header__set_cmdline(int argc, const char **argv)
continue; \
else
+static int write_buildid(char *name, size_t name_len, u8 *build_id,
+ pid_t pid, u16 misc, int fd)
+{
+ int err;
+ struct build_id_event b;
+ size_t len;
+
+ len = name_len + 1;
+ len = PERF_ALIGN(len, NAME_ALIGN);
+
+ memset(&b, 0, sizeof(b));
+ memcpy(&b.build_id, build_id, BUILD_ID_SIZE);
+ b.pid = pid;
+ b.header.misc = misc;
+ b.header.size = sizeof(b) + len;
+
+ err = do_write(fd, &b, sizeof(b));
+ if (err < 0)
+ return err;
+
+ return write_padded(fd, name, name_len + 1, len);
+}
+
static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
u16 misc, int fd)
{
@@ -213,24 +239,23 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
dsos__for_each_with_build_id(pos, head) {
int err;
- struct build_id_event b;
- size_t len;
+ char *name;
+ size_t name_len;
if (!pos->hit)
continue;
- len = pos->long_name_len + 1;
- len = ALIGN(len, NAME_ALIGN);
- memset(&b, 0, sizeof(b));
- memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
- b.pid = pid;
- b.header.misc = misc;
- b.header.size = sizeof(b) + len;
- err = do_write(fd, &b, sizeof(b));
- if (err < 0)
- return err;
- err = write_padded(fd, pos->long_name,
- pos->long_name_len + 1, len);
- if (err < 0)
+
+ if (is_vdso_map(pos->short_name)) {
+ name = (char *) VDSO__MAP_NAME;
+ name_len = sizeof(VDSO__MAP_NAME) + 1;
+ } else {
+ name = pos->long_name;
+ name_len = pos->long_name_len + 1;
+ }
+
+ err = write_buildid(name, name_len, pos->build_id,
+ pid, misc, fd);
+ if (err)
return err;
}
@@ -276,19 +301,20 @@ static int dsos__write_buildid_table(struct perf_header *header, int fd)
}
int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
- const char *name, bool is_kallsyms)
+ const char *name, bool is_kallsyms, bool is_vdso)
{
const size_t size = PATH_MAX;
char *realname, *filename = zalloc(size),
*linkname = zalloc(size), *targetname;
int len, err = -1;
+ bool slash = is_kallsyms || is_vdso;
if (is_kallsyms) {
if (symbol_conf.kptr_restrict) {
pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
return 0;
}
- realname = (char *)name;
+ realname = (char *) name;
} else
realname = realpath(name, NULL);
@@ -296,7 +322,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
goto out_free;
len = scnprintf(filename, size, "%s%s%s",
- debugdir, is_kallsyms ? "/" : "", realname);
+ debugdir, slash ? "/" : "",
+ is_vdso ? VDSO__MAP_NAME : realname);
if (mkdir_p(filename, 0755))
goto out_free;
@@ -332,13 +359,14 @@ out_free:
static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
const char *name, const char *debugdir,
- bool is_kallsyms)
+ bool is_kallsyms, bool is_vdso)
{
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
build_id__sprintf(build_id, build_id_size, sbuild_id);
- return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
+ return build_id_cache__add_s(sbuild_id, debugdir, name,
+ is_kallsyms, is_vdso);
}
int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
@@ -382,9 +410,11 @@ out_free:
static int dso__cache_build_id(struct dso *dso, const char *debugdir)
{
bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
+ bool is_vdso = is_vdso_map(dso->short_name);
return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
- dso->long_name, debugdir, is_kallsyms);
+ dso->long_name, debugdir,
+ is_kallsyms, is_vdso);
}
static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
@@ -446,7 +476,7 @@ static bool perf_session__read_build_ids(struct perf_session *session, bool with
return ret;
}
-static int write_tracing_data(int fd, struct perf_header *h __used,
+static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
struct perf_evlist *evlist)
{
return read_tracing_data(fd, &evlist->entries);
@@ -454,7 +484,7 @@ static int write_tracing_data(int fd, struct perf_header *h __used,
static int write_build_id(int fd, struct perf_header *h,
- struct perf_evlist *evlist __used)
+ struct perf_evlist *evlist __maybe_unused)
{
struct perf_session *session;
int err;
@@ -475,8 +505,8 @@ static int write_build_id(int fd, struct perf_header *h,
return 0;
}
-static int write_hostname(int fd, struct perf_header *h __used,
- struct perf_evlist *evlist __used)
+static int write_hostname(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
{
struct utsname uts;
int ret;
@@ -488,8 +518,8 @@ static int write_hostname(int fd, struct perf_header *h __used,
return do_write_string(fd, uts.nodename);
}
-static int write_osrelease(int fd, struct perf_header *h __used,
- struct perf_evlist *evlist __used)
+static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
{
struct utsname uts;
int ret;
@@ -501,8 +531,8 @@ static int write_osrelease(int fd, struct perf_header *h __used,
return do_write_string(fd, uts.release);
}
-static int write_arch(int fd, struct perf_header *h __used,
- struct perf_evlist *evlist __used)
+static int write_arch(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
{
struct utsname uts;
int ret;
@@ -514,14 +544,14 @@ static int write_arch(int fd, struct perf_header *h __used,
return do_write_string(fd, uts.machine);
}
-static int write_version(int fd, struct perf_header *h __used,
- struct perf_evlist *evlist __used)
+static int write_version(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
{
return do_write_string(fd, perf_version_string);
}
-static int write_cpudesc(int fd, struct perf_header *h __used,
- struct perf_evlist *evlist __used)
+static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
{
#ifndef CPUINFO_PROC
#define CPUINFO_PROC NULL
@@ -579,8 +609,8 @@ done:
return ret;
}
-static int write_nrcpus(int fd, struct perf_header *h __used,
- struct perf_evlist *evlist __used)
+static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
{
long nr;
u32 nrc, nra;
@@ -605,15 +635,14 @@ static int write_nrcpus(int fd, struct perf_header *h __used,
return do_write(fd, &nra, sizeof(nra));
}
-static int write_event_desc(int fd, struct perf_header *h __used,
+static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
struct perf_evlist *evlist)
{
- struct perf_evsel *attr;
- u32 nre = 0, nri, sz;
+ struct perf_evsel *evsel;
+ u32 nre, nri, sz;
int ret;
- list_for_each_entry(attr, &evlist->entries, node)
- nre++;
+ nre = evlist->nr_entries;
/*
* write number of events
@@ -625,14 +654,14 @@ static int write_event_desc(int fd, struct perf_header *h __used,
/*
* size of perf_event_attr struct
*/
- sz = (u32)sizeof(attr->attr);
+ sz = (u32)sizeof(evsel->attr);
ret = do_write(fd, &sz, sizeof(sz));
if (ret < 0)
return ret;
- list_for_each_entry(attr, &evlist->entries, node) {
+ list_for_each_entry(evsel, &evlist->entries, node) {
- ret = do_write(fd, &attr->attr, sz);
+ ret = do_write(fd, &evsel->attr, sz);
if (ret < 0)
return ret;
/*
@@ -642,7 +671,7 @@ static int write_event_desc(int fd, struct perf_header *h __used,
* copy into an nri to be independent of the
* type of ids,
*/
- nri = attr->ids;
+ nri = evsel->ids;
ret = do_write(fd, &nri, sizeof(nri));
if (ret < 0)
return ret;
@@ -650,21 +679,21 @@ static int write_event_desc(int fd, struct perf_header *h __used,
/*
* write event string as passed on cmdline
*/
- ret = do_write_string(fd, perf_evsel__name(attr));
+ ret = do_write_string(fd, perf_evsel__name(evsel));
if (ret < 0)
return ret;
/*
* write unique ids for this event
*/
- ret = do_write(fd, attr->id, attr->ids * sizeof(u64));
+ ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
if (ret < 0)
return ret;
}
return 0;
}
-static int write_cmdline(int fd, struct perf_header *h __used,
- struct perf_evlist *evlist __used)
+static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
{
char buf[MAXPATHLEN];
char proc[32];
@@ -832,8 +861,8 @@ static struct cpu_topo *build_cpu_topology(void)
return tp;
}
-static int write_cpu_topology(int fd, struct perf_header *h __used,
- struct perf_evlist *evlist __used)
+static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
{
struct cpu_topo *tp;
u32 i;
@@ -868,8 +897,8 @@ done:
-static int write_total_mem(int fd, struct perf_header *h __used,
- struct perf_evlist *evlist __used)
+static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
{
char *buf = NULL;
FILE *fp;
@@ -954,8 +983,8 @@ done:
return ret;
}
-static int write_numa_topology(int fd, struct perf_header *h __used,
- struct perf_evlist *evlist __used)
+static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
{
char *buf = NULL;
size_t len = 0;
@@ -1004,16 +1033,56 @@ done:
}
/*
+ * File format:
+ *
+ * struct pmu_mappings {
+ * u32 pmu_num;
+ * struct pmu_map {
+ * u32 type;
+ * char name[];
+ * }[pmu_num];
+ * };
+ */
+
+static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_pmu *pmu = NULL;
+ off_t offset = lseek(fd, 0, SEEK_CUR);
+ __u32 pmu_num = 0;
+
+ /* write real pmu_num later */
+ do_write(fd, &pmu_num, sizeof(pmu_num));
+
+ while ((pmu = perf_pmu__scan(pmu))) {
+ if (!pmu->name)
+ continue;
+ pmu_num++;
+ do_write(fd, &pmu->type, sizeof(pmu->type));
+ do_write_string(fd, pmu->name);
+ }
+
+ if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
+ /* discard all */
+ lseek(fd, offset, SEEK_SET);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
* default get_cpuid(): nothing gets recorded
* actual implementation must be in arch/$(ARCH)/util/header.c
*/
-int __attribute__((weak)) get_cpuid(char *buffer __used, size_t sz __used)
+int __attribute__ ((weak)) get_cpuid(char *buffer __maybe_unused,
+ size_t sz __maybe_unused)
{
return -1;
}
-static int write_cpuid(int fd, struct perf_header *h __used,
- struct perf_evlist *evlist __used)
+static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
{
char buffer[64];
int ret;
@@ -1027,133 +1096,113 @@ write_it:
return do_write_string(fd, buffer);
}
-static int write_branch_stack(int fd __used, struct perf_header *h __used,
- struct perf_evlist *evlist __used)
+static int write_branch_stack(int fd __maybe_unused,
+ struct perf_header *h __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
{
return 0;
}
-static void print_hostname(struct perf_header *ph, int fd, FILE *fp)
+static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
{
- char *str = do_read_string(fd, ph);
- fprintf(fp, "# hostname : %s\n", str);
- free(str);
+ fprintf(fp, "# hostname : %s\n", ph->env.hostname);
}
-static void print_osrelease(struct perf_header *ph, int fd, FILE *fp)
+static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
{
- char *str = do_read_string(fd, ph);
- fprintf(fp, "# os release : %s\n", str);
- free(str);
+ fprintf(fp, "# os release : %s\n", ph->env.os_release);
}
-static void print_arch(struct perf_header *ph, int fd, FILE *fp)
+static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
{
- char *str = do_read_string(fd, ph);
- fprintf(fp, "# arch : %s\n", str);
- free(str);
+ fprintf(fp, "# arch : %s\n", ph->env.arch);
}
-static void print_cpudesc(struct perf_header *ph, int fd, FILE *fp)
+static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
{
- char *str = do_read_string(fd, ph);
- fprintf(fp, "# cpudesc : %s\n", str);
- free(str);
+ fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
}
-static void print_nrcpus(struct perf_header *ph, int fd, FILE *fp)
+static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
{
- ssize_t ret;
- u32 nr;
-
- ret = read(fd, &nr, sizeof(nr));
- if (ret != (ssize_t)sizeof(nr))
- nr = -1; /* interpreted as error */
-
- if (ph->needs_swap)
- nr = bswap_32(nr);
-
- fprintf(fp, "# nrcpus online : %u\n", nr);
-
- ret = read(fd, &nr, sizeof(nr));
- if (ret != (ssize_t)sizeof(nr))
- nr = -1; /* interpreted as error */
-
- if (ph->needs_swap)
- nr = bswap_32(nr);
-
- fprintf(fp, "# nrcpus avail : %u\n", nr);
+ fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
+ fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
}
-static void print_version(struct perf_header *ph, int fd, FILE *fp)
+static void print_version(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
{
- char *str = do_read_string(fd, ph);
- fprintf(fp, "# perf version : %s\n", str);
- free(str);
+ fprintf(fp, "# perf version : %s\n", ph->env.version);
}
-static void print_cmdline(struct perf_header *ph, int fd, FILE *fp)
+static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
{
- ssize_t ret;
+ int nr, i;
char *str;
- u32 nr, i;
-
- ret = read(fd, &nr, sizeof(nr));
- if (ret != (ssize_t)sizeof(nr))
- return;
- if (ph->needs_swap)
- nr = bswap_32(nr);
+ nr = ph->env.nr_cmdline;
+ str = ph->env.cmdline;
fprintf(fp, "# cmdline : ");
for (i = 0; i < nr; i++) {
- str = do_read_string(fd, ph);
fprintf(fp, "%s ", str);
- free(str);
+ str += strlen(str) + 1;
}
fputc('\n', fp);
}
-static void print_cpu_topology(struct perf_header *ph, int fd, FILE *fp)
+static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
{
- ssize_t ret;
- u32 nr, i;
+ int nr, i;
char *str;
- ret = read(fd, &nr, sizeof(nr));
- if (ret != (ssize_t)sizeof(nr))
- return;
-
- if (ph->needs_swap)
- nr = bswap_32(nr);
+ nr = ph->env.nr_sibling_cores;
+ str = ph->env.sibling_cores;
for (i = 0; i < nr; i++) {
- str = do_read_string(fd, ph);
fprintf(fp, "# sibling cores : %s\n", str);
- free(str);
+ str += strlen(str) + 1;
}
- ret = read(fd, &nr, sizeof(nr));
- if (ret != (ssize_t)sizeof(nr))
- return;
-
- if (ph->needs_swap)
- nr = bswap_32(nr);
+ nr = ph->env.nr_sibling_threads;
+ str = ph->env.sibling_threads;
for (i = 0; i < nr; i++) {
- str = do_read_string(fd, ph);
fprintf(fp, "# sibling threads : %s\n", str);
- free(str);
+ str += strlen(str) + 1;
}
}
-static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
+static void free_event_desc(struct perf_evsel *events)
+{
+ struct perf_evsel *evsel;
+
+ if (!events)
+ return;
+
+ for (evsel = events; evsel->attr.size; evsel++) {
+ if (evsel->name)
+ free(evsel->name);
+ if (evsel->id)
+ free(evsel->id);
+ }
+
+ free(events);
+}
+
+static struct perf_evsel *
+read_event_desc(struct perf_header *ph, int fd)
{
- struct perf_event_attr attr;
- uint64_t id;
+ struct perf_evsel *evsel, *events = NULL;
+ u64 *id;
void *buf = NULL;
- char *str;
u32 nre, sz, nr, i, j;
ssize_t ret;
size_t msz;
@@ -1173,18 +1222,22 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
if (ph->needs_swap)
sz = bswap_32(sz);
- memset(&attr, 0, sizeof(attr));
-
/* buffer to hold on file attr struct */
buf = malloc(sz);
if (!buf)
goto error;
- msz = sizeof(attr);
+ /* the last event terminates with evsel->attr.size == 0: */
+ events = calloc(nre + 1, sizeof(*events));
+ if (!events)
+ goto error;
+
+ msz = sizeof(evsel->attr);
if (sz < msz)
msz = sz;
- for (i = 0 ; i < nre; i++) {
+ for (i = 0, evsel = events; i < nre; evsel++, i++) {
+ evsel->idx = i;
/*
* must read entire on-file attr struct to
@@ -1197,146 +1250,188 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
if (ph->needs_swap)
perf_event__attr_swap(buf);
- memcpy(&attr, buf, msz);
+ memcpy(&evsel->attr, buf, msz);
ret = read(fd, &nr, sizeof(nr));
if (ret != (ssize_t)sizeof(nr))
goto error;
- if (ph->needs_swap)
+ if (ph->needs_swap) {
nr = bswap_32(nr);
+ evsel->needs_swap = true;
+ }
- str = do_read_string(fd, ph);
- fprintf(fp, "# event : name = %s, ", str);
- free(str);
+ evsel->name = do_read_string(fd, ph);
+
+ if (!nr)
+ continue;
+
+ id = calloc(nr, sizeof(*id));
+ if (!id)
+ goto error;
+ evsel->ids = nr;
+ evsel->id = id;
+
+ for (j = 0 ; j < nr; j++) {
+ ret = read(fd, id, sizeof(*id));
+ if (ret != (ssize_t)sizeof(*id))
+ goto error;
+ if (ph->needs_swap)
+ *id = bswap_64(*id);
+ id++;
+ }
+ }
+out:
+ if (buf)
+ free(buf);
+ return events;
+error:
+ if (events)
+ free_event_desc(events);
+ events = NULL;
+ goto out;
+}
+
+static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
+{
+ struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
+ u32 j;
+ u64 *id;
+
+ if (!events) {
+ fprintf(fp, "# event desc: not available or unable to read\n");
+ return;
+ }
+
+ for (evsel = events; evsel->attr.size; evsel++) {
+ fprintf(fp, "# event : name = %s, ", evsel->name);
fprintf(fp, "type = %d, config = 0x%"PRIx64
", config1 = 0x%"PRIx64", config2 = 0x%"PRIx64,
- attr.type,
- (u64)attr.config,
- (u64)attr.config1,
- (u64)attr.config2);
+ evsel->attr.type,
+ (u64)evsel->attr.config,
+ (u64)evsel->attr.config1,
+ (u64)evsel->attr.config2);
fprintf(fp, ", excl_usr = %d, excl_kern = %d",
- attr.exclude_user,
- attr.exclude_kernel);
+ evsel->attr.exclude_user,
+ evsel->attr.exclude_kernel);
fprintf(fp, ", excl_host = %d, excl_guest = %d",
- attr.exclude_host,
- attr.exclude_guest);
+ evsel->attr.exclude_host,
+ evsel->attr.exclude_guest);
- fprintf(fp, ", precise_ip = %d", attr.precise_ip);
+ fprintf(fp, ", precise_ip = %d", evsel->attr.precise_ip);
- if (nr)
+ if (evsel->ids) {
fprintf(fp, ", id = {");
-
- for (j = 0 ; j < nr; j++) {
- ret = read(fd, &id, sizeof(id));
- if (ret != (ssize_t)sizeof(id))
- goto error;
-
- if (ph->needs_swap)
- id = bswap_64(id);
-
- if (j)
- fputc(',', fp);
-
- fprintf(fp, " %"PRIu64, id);
- }
- if (nr && j == nr)
+ for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
+ if (j)
+ fputc(',', fp);
+ fprintf(fp, " %"PRIu64, *id);
+ }
fprintf(fp, " }");
+ }
+
fputc('\n', fp);
}
- free(buf);
- return;
-error:
- fprintf(fp, "# event desc: not available or unable to read\n");
+
+ free_event_desc(events);
}
-static void print_total_mem(struct perf_header *h __used, int fd, FILE *fp)
+static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
{
- uint64_t mem;
- ssize_t ret;
-
- ret = read(fd, &mem, sizeof(mem));
- if (ret != sizeof(mem))
- goto error;
-
- if (h->needs_swap)
- mem = bswap_64(mem);
-
- fprintf(fp, "# total memory : %"PRIu64" kB\n", mem);
- return;
-error:
- fprintf(fp, "# total memory : unknown\n");
+ fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
}
-static void print_numa_topology(struct perf_header *h __used, int fd, FILE *fp)
+static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
{
- ssize_t ret;
u32 nr, c, i;
- char *str;
+ char *str, *tmp;
uint64_t mem_total, mem_free;
/* nr nodes */
- ret = read(fd, &nr, sizeof(nr));
- if (ret != (ssize_t)sizeof(nr))
- goto error;
-
- if (h->needs_swap)
- nr = bswap_32(nr);
+ nr = ph->env.nr_numa_nodes;
+ str = ph->env.numa_nodes;
for (i = 0; i < nr; i++) {
-
/* node number */
- ret = read(fd, &c, sizeof(c));
- if (ret != (ssize_t)sizeof(c))
+ c = strtoul(str, &tmp, 0);
+ if (*tmp != ':')
goto error;
- if (h->needs_swap)
- c = bswap_32(c);
-
- ret = read(fd, &mem_total, sizeof(u64));
- if (ret != sizeof(u64))
+ str = tmp + 1;
+ mem_total = strtoull(str, &tmp, 0);
+ if (*tmp != ':')
goto error;
- ret = read(fd, &mem_free, sizeof(u64));
- if (ret != sizeof(u64))
+ str = tmp + 1;
+ mem_free = strtoull(str, &tmp, 0);
+ if (*tmp != ':')
goto error;
- if (h->needs_swap) {
- mem_total = bswap_64(mem_total);
- mem_free = bswap_64(mem_free);
- }
-
fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
" free = %"PRIu64" kB\n",
- c,
- mem_total,
- mem_free);
+ c, mem_total, mem_free);
- str = do_read_string(fd, h);
+ str = tmp + 1;
fprintf(fp, "# node%u cpu list : %s\n", c, str);
- free(str);
}
return;
error:
fprintf(fp, "# numa topology : not available\n");
}
-static void print_cpuid(struct perf_header *ph, int fd, FILE *fp)
+static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
{
- char *str = do_read_string(fd, ph);
- fprintf(fp, "# cpuid : %s\n", str);
- free(str);
+ fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
}
-static void print_branch_stack(struct perf_header *ph __used, int fd __used,
- FILE *fp)
+static void print_branch_stack(struct perf_header *ph __maybe_unused,
+ int fd __maybe_unused, FILE *fp)
{
fprintf(fp, "# contains samples with branch stack\n");
}
+static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
+ FILE *fp)
+{
+ const char *delimiter = "# pmu mappings: ";
+ char *str, *tmp;
+ u32 pmu_num;
+ u32 type;
+
+ pmu_num = ph->env.nr_pmu_mappings;
+ if (!pmu_num) {
+ fprintf(fp, "# pmu mappings: not available\n");
+ return;
+ }
+
+ str = ph->env.pmu_mappings;
+
+ while (pmu_num) {
+ type = strtoul(str, &tmp, 0);
+ if (*tmp != ':')
+ goto error;
+
+ str = tmp + 1;
+ fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
+
+ delimiter = ", ";
+ str += strlen(str) + 1;
+ pmu_num--;
+ }
+
+ fprintf(fp, "\n");
+
+ if (!pmu_num)
+ return;
+error:
+ fprintf(fp, "# pmu mappings: unable to read\n");
+}
+
static int __event_process_build_id(struct build_id_event *bev,
char *filename,
struct perf_session *session)
@@ -1398,7 +1493,7 @@ static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
struct perf_session *session = container_of(header, struct perf_session, header);
struct {
struct perf_event_header header;
- u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))];
+ u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
char filename[0];
} old_bev;
struct build_id_event bev;
@@ -1487,28 +1582,375 @@ out:
return err;
}
-static int process_tracing_data(struct perf_file_section *section __unused,
- struct perf_header *ph __unused,
- int feat __unused, int fd, void *data)
+static int process_tracing_data(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph __maybe_unused,
+ int fd, void *data)
{
trace_report(fd, data, false);
return 0;
}
static int process_build_id(struct perf_file_section *section,
- struct perf_header *ph,
- int feat __unused, int fd, void *data __used)
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
{
if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
pr_debug("Failed to read buildids, continuing...\n");
return 0;
}
+static int process_hostname(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ph->env.hostname = do_read_string(fd, ph);
+ return ph->env.hostname ? 0 : -ENOMEM;
+}
+
+static int process_osrelease(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ph->env.os_release = do_read_string(fd, ph);
+ return ph->env.os_release ? 0 : -ENOMEM;
+}
+
+static int process_version(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ph->env.version = do_read_string(fd, ph);
+ return ph->env.version ? 0 : -ENOMEM;
+}
+
+static int process_arch(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ph->env.arch = do_read_string(fd, ph);
+ return ph->env.arch ? 0 : -ENOMEM;
+}
+
+static int process_nrcpus(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ size_t ret;
+ u32 nr;
+
+ ret = read(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+ return -1;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ ph->env.nr_cpus_online = nr;
+
+ ret = read(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+ return -1;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ ph->env.nr_cpus_avail = nr;
+ return 0;
+}
+
+static int process_cpudesc(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ph->env.cpu_desc = do_read_string(fd, ph);
+ return ph->env.cpu_desc ? 0 : -ENOMEM;
+}
+
+static int process_cpuid(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ ph->env.cpuid = do_read_string(fd, ph);
+ return ph->env.cpuid ? 0 : -ENOMEM;
+}
+
+static int process_total_mem(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ uint64_t mem;
+ size_t ret;
+
+ ret = read(fd, &mem, sizeof(mem));
+ if (ret != sizeof(mem))
+ return -1;
+
+ if (ph->needs_swap)
+ mem = bswap_64(mem);
+
+ ph->env.total_mem = mem;
+ return 0;
+}
+
+static struct perf_evsel *
+perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
+{
+ struct perf_evsel *evsel;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ if (evsel->idx == idx)
+ return evsel;
+ }
+
+ return NULL;
+}
+
+static void
+perf_evlist__set_event_name(struct perf_evlist *evlist,
+ struct perf_evsel *event)
+{
+ struct perf_evsel *evsel;
+
+ if (!event->name)
+ return;
+
+ evsel = perf_evlist__find_by_index(evlist, event->idx);
+ if (!evsel)
+ return;
+
+ if (evsel->name)
+ return;
+
+ evsel->name = strdup(event->name);
+}
+
+static int
+process_event_desc(struct perf_file_section *section __maybe_unused,
+ struct perf_header *header, int fd,
+ void *data __maybe_unused)
+{
+ struct perf_session *session;
+ struct perf_evsel *evsel, *events = read_event_desc(header, fd);
+
+ if (!events)
+ return 0;
+
+ session = container_of(header, struct perf_session, header);
+ for (evsel = events; evsel->attr.size; evsel++)
+ perf_evlist__set_event_name(session->evlist, evsel);
+
+ free_event_desc(events);
+
+ return 0;
+}
+
+static int process_cmdline(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ size_t ret;
+ char *str;
+ u32 nr, i;
+ struct strbuf sb;
+
+ ret = read(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+ return -1;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ ph->env.nr_cmdline = nr;
+ strbuf_init(&sb, 128);
+
+ for (i = 0; i < nr; i++) {
+ str = do_read_string(fd, ph);
+ if (!str)
+ goto error;
+
+ /* include a NULL character at the end */
+ strbuf_add(&sb, str, strlen(str) + 1);
+ free(str);
+ }
+ ph->env.cmdline = strbuf_detach(&sb, NULL);
+ return 0;
+
+error:
+ strbuf_release(&sb);
+ return -1;
+}
+
+static int process_cpu_topology(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ size_t ret;
+ u32 nr, i;
+ char *str;
+ struct strbuf sb;
+
+ ret = read(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+ return -1;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ ph->env.nr_sibling_cores = nr;
+ strbuf_init(&sb, 128);
+
+ for (i = 0; i < nr; i++) {
+ str = do_read_string(fd, ph);
+ if (!str)
+ goto error;
+
+ /* include a NULL character at the end */
+ strbuf_add(&sb, str, strlen(str) + 1);
+ free(str);
+ }
+ ph->env.sibling_cores = strbuf_detach(&sb, NULL);
+
+ ret = read(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+ return -1;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ ph->env.nr_sibling_threads = nr;
+
+ for (i = 0; i < nr; i++) {
+ str = do_read_string(fd, ph);
+ if (!str)
+ goto error;
+
+ /* include a NULL character at the end */
+ strbuf_add(&sb, str, strlen(str) + 1);
+ free(str);
+ }
+ ph->env.sibling_threads = strbuf_detach(&sb, NULL);
+ return 0;
+
+error:
+ strbuf_release(&sb);
+ return -1;
+}
+
+static int process_numa_topology(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ size_t ret;
+ u32 nr, node, i;
+ char *str;
+ uint64_t mem_total, mem_free;
+ struct strbuf sb;
+
+ /* nr nodes */
+ ret = read(fd, &nr, sizeof(nr));
+ if (ret != sizeof(nr))
+ goto error;
+
+ if (ph->needs_swap)
+ nr = bswap_32(nr);
+
+ ph->env.nr_numa_nodes = nr;
+ strbuf_init(&sb, 256);
+
+ for (i = 0; i < nr; i++) {
+ /* node number */
+ ret = read(fd, &node, sizeof(node));
+ if (ret != sizeof(node))
+ goto error;
+
+ ret = read(fd, &mem_total, sizeof(u64));
+ if (ret != sizeof(u64))
+ goto error;
+
+ ret = read(fd, &mem_free, sizeof(u64));
+ if (ret != sizeof(u64))
+ goto error;
+
+ if (ph->needs_swap) {
+ node = bswap_32(node);
+ mem_total = bswap_64(mem_total);
+ mem_free = bswap_64(mem_free);
+ }
+
+ strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":",
+ node, mem_total, mem_free);
+
+ str = do_read_string(fd, ph);
+ if (!str)
+ goto error;
+
+ /* include a NULL character at the end */
+ strbuf_add(&sb, str, strlen(str) + 1);
+ free(str);
+ }
+ ph->env.numa_nodes = strbuf_detach(&sb, NULL);
+ return 0;
+
+error:
+ strbuf_release(&sb);
+ return -1;
+}
+
+static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
+ struct perf_header *ph, int fd,
+ void *data __maybe_unused)
+{
+ size_t ret;
+ char *name;
+ u32 pmu_num;
+ u32 type;
+ struct strbuf sb;
+
+ ret = read(fd, &pmu_num, sizeof(pmu_num));
+ if (ret != sizeof(pmu_num))
+ return -1;
+
+ if (ph->needs_swap)
+ pmu_num = bswap_32(pmu_num);
+
+ if (!pmu_num) {
+ pr_debug("pmu mappings not available\n");
+ return 0;
+ }
+
+ ph->env.nr_pmu_mappings = pmu_num;
+ strbuf_init(&sb, 128);
+
+ while (pmu_num) {
+ if (read(fd, &type, sizeof(type)) != sizeof(type))
+ goto error;
+ if (ph->needs_swap)
+ type = bswap_32(type);
+
+ name = do_read_string(fd, ph);
+ if (!name)
+ goto error;
+
+ strbuf_addf(&sb, "%u:%s", type, name);
+ /* include a NULL character at the end */
+ strbuf_add(&sb, "", 1);
+
+ free(name);
+ pmu_num--;
+ }
+ ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
+ return 0;
+
+error:
+ strbuf_release(&sb);
+ return -1;
+}
+
struct feature_ops {
int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
void (*print)(struct perf_header *h, int fd, FILE *fp);
int (*process)(struct perf_file_section *section,
- struct perf_header *h, int feat, int fd, void *data);
+ struct perf_header *h, int fd, void *data);
const char *name;
bool full_only;
};
@@ -1520,7 +1962,7 @@ struct feature_ops {
.process = process_##func }
#define FEAT_OPF(n, func) \
[n] = { .name = #n, .write = write_##func, .print = print_##func, \
- .full_only = true }
+ .process = process_##func, .full_only = true }
/* feature_ops not implemented: */
#define print_tracing_data NULL
@@ -1529,19 +1971,20 @@ struct feature_ops {
static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPP(HEADER_TRACING_DATA, tracing_data),
FEAT_OPP(HEADER_BUILD_ID, build_id),
- FEAT_OPA(HEADER_HOSTNAME, hostname),
- FEAT_OPA(HEADER_OSRELEASE, osrelease),
- FEAT_OPA(HEADER_VERSION, version),
- FEAT_OPA(HEADER_ARCH, arch),
- FEAT_OPA(HEADER_NRCPUS, nrcpus),
- FEAT_OPA(HEADER_CPUDESC, cpudesc),
- FEAT_OPA(HEADER_CPUID, cpuid),
- FEAT_OPA(HEADER_TOTAL_MEM, total_mem),
- FEAT_OPA(HEADER_EVENT_DESC, event_desc),
- FEAT_OPA(HEADER_CMDLINE, cmdline),
+ FEAT_OPP(HEADER_HOSTNAME, hostname),
+ FEAT_OPP(HEADER_OSRELEASE, osrelease),
+ FEAT_OPP(HEADER_VERSION, version),
+ FEAT_OPP(HEADER_ARCH, arch),
+ FEAT_OPP(HEADER_NRCPUS, nrcpus),
+ FEAT_OPP(HEADER_CPUDESC, cpudesc),
+ FEAT_OPP(HEADER_CPUID, cpuid),
+ FEAT_OPP(HEADER_TOTAL_MEM, total_mem),
+ FEAT_OPP(HEADER_EVENT_DESC, event_desc),
+ FEAT_OPP(HEADER_CMDLINE, cmdline),
FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology),
FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology),
FEAT_OPA(HEADER_BRANCH_STACK, branch_stack),
+ FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings),
};
struct header_print_data {
@@ -1683,17 +2126,17 @@ int perf_session__write_header(struct perf_session *session,
struct perf_file_header f_header;
struct perf_file_attr f_attr;
struct perf_header *header = &session->header;
- struct perf_evsel *attr, *pair = NULL;
+ struct perf_evsel *evsel, *pair = NULL;
int err;
lseek(fd, sizeof(f_header), SEEK_SET);
if (session->evlist != evlist)
- pair = list_entry(session->evlist->entries.next, struct perf_evsel, node);
+ pair = perf_evlist__first(session->evlist);
- list_for_each_entry(attr, &evlist->entries, node) {
- attr->id_offset = lseek(fd, 0, SEEK_CUR);
- err = do_write(fd, attr->id, attr->ids * sizeof(u64));
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ evsel->id_offset = lseek(fd, 0, SEEK_CUR);
+ err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
if (err < 0) {
out_err_write:
pr_debug("failed to write perf header\n");
@@ -1703,19 +2146,19 @@ out_err_write:
err = do_write(fd, pair->id, pair->ids * sizeof(u64));
if (err < 0)
goto out_err_write;
- attr->ids += pair->ids;
- pair = list_entry(pair->node.next, struct perf_evsel, node);
+ evsel->ids += pair->ids;
+ pair = perf_evsel__next(pair);
}
}
header->attr_offset = lseek(fd, 0, SEEK_CUR);
- list_for_each_entry(attr, &evlist->entries, node) {
+ list_for_each_entry(evsel, &evlist->entries, node) {
f_attr = (struct perf_file_attr){
- .attr = attr->attr,
+ .attr = evsel->attr,
.ids = {
- .offset = attr->id_offset,
- .size = attr->ids * sizeof(u64),
+ .offset = evsel->id_offset,
+ .size = evsel->ids * sizeof(u64),
}
};
err = do_write(fd, &f_attr, sizeof(f_attr));
@@ -1726,9 +2169,9 @@ out_err_write:
}
header->event_offset = lseek(fd, 0, SEEK_CUR);
- header->event_size = event_count * sizeof(struct perf_trace_event_type);
- if (events) {
- err = do_write(fd, events, header->event_size);
+ header->event_size = trace_event_count * sizeof(struct perf_trace_event_type);
+ if (trace_events) {
+ err = do_write(fd, trace_events, header->event_size);
if (err < 0) {
pr_debug("failed to write perf header events\n");
return err;
@@ -1829,6 +2272,8 @@ out_free:
static const int attr_file_abi_sizes[] = {
[0] = PERF_ATTR_SIZE_VER0,
[1] = PERF_ATTR_SIZE_VER1,
+ [2] = PERF_ATTR_SIZE_VER2,
+ [3] = PERF_ATTR_SIZE_VER3,
0,
};
@@ -2019,7 +2464,7 @@ static int perf_file_section__process(struct perf_file_section *section,
if (!feat_ops[feat].process)
return 0;
- return feat_ops[feat].process(section, ph, feat, fd, data);
+ return feat_ops[feat].process(section, ph, fd, data);
}
static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
@@ -2108,32 +2553,39 @@ static int read_attr(int fd, struct perf_header *ph,
return ret <= 0 ? -1 : 0;
}
-static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel,
- struct pevent *pevent)
+static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
+ struct pevent *pevent)
{
- struct event_format *event = pevent_find_event(pevent,
- evsel->attr.config);
+ struct event_format *event;
char bf[128];
+ /* already prepared */
+ if (evsel->tp_format)
+ return 0;
+
+ event = pevent_find_event(pevent, evsel->attr.config);
if (event == NULL)
return -1;
- snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
- evsel->name = strdup(bf);
- if (event->name == NULL)
- return -1;
+ if (!evsel->name) {
+ snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
+ evsel->name = strdup(bf);
+ if (evsel->name == NULL)
+ return -1;
+ }
+ evsel->tp_format = event;
return 0;
}
-static int perf_evlist__set_tracepoint_names(struct perf_evlist *evlist,
- struct pevent *pevent)
+static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
+ struct pevent *pevent)
{
struct perf_evsel *pos;
list_for_each_entry(pos, &evlist->entries, node) {
if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
- perf_evsel__set_tracepoint_name(pos, pevent))
+ perf_evsel__prepare_tracepoint_event(pos, pevent))
return -1;
}
@@ -2176,6 +2628,8 @@ int perf_session__read_header(struct perf_session *session, int fd)
if (evsel == NULL)
goto out_delete_evlist;
+
+ evsel->needs_swap = header->needs_swap;
/*
* Do it before so that if perf_evsel__alloc_id fails, this
* entry gets purged too at perf_evlist__delete().
@@ -2207,13 +2661,13 @@ int perf_session__read_header(struct perf_session *session, int fd)
if (f_header.event_types.size) {
lseek(fd, f_header.event_types.offset, SEEK_SET);
- events = malloc(f_header.event_types.size);
- if (events == NULL)
+ trace_events = malloc(f_header.event_types.size);
+ if (trace_events == NULL)
return -ENOMEM;
- if (perf_header__getbuffer64(header, fd, events,
+ if (perf_header__getbuffer64(header, fd, trace_events,
f_header.event_types.size))
goto out_errno;
- event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
+ trace_event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
}
perf_header__process_sections(header, fd, &session->pevent,
@@ -2221,7 +2675,8 @@ int perf_session__read_header(struct perf_session *session, int fd)
lseek(fd, header->data_offset, SEEK_SET);
- if (perf_evlist__set_tracepoint_names(session->evlist, session->pevent))
+ if (perf_evlist__prepare_tracepoint_events(session->evlist,
+ session->pevent))
goto out_delete_evlist;
header->frozen = 1;
@@ -2236,7 +2691,7 @@ out_delete_evlist:
}
int perf_event__synthesize_attr(struct perf_tool *tool,
- struct perf_event_attr *attr, u16 ids, u64 *id,
+ struct perf_event_attr *attr, u32 ids, u64 *id,
perf_event__handler_t process)
{
union perf_event *ev;
@@ -2244,7 +2699,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
int err;
size = sizeof(struct perf_event_attr);
- size = ALIGN(size, sizeof(u64));
+ size = PERF_ALIGN(size, sizeof(u64));
size += sizeof(struct perf_event_header);
size += ids * sizeof(u64);
@@ -2257,9 +2712,12 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
memcpy(ev->attr.id, id, ids * sizeof(u64));
ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
- ev->attr.header.size = size;
+ ev->attr.header.size = (u16)size;
- err = process(tool, ev, NULL, NULL);
+ if (ev->attr.header.size == size)
+ err = process(tool, ev, NULL, NULL);
+ else
+ err = -E2BIG;
free(ev);
@@ -2270,12 +2728,12 @@ int perf_event__synthesize_attrs(struct perf_tool *tool,
struct perf_session *session,
perf_event__handler_t process)
{
- struct perf_evsel *attr;
+ struct perf_evsel *evsel;
int err = 0;
- list_for_each_entry(attr, &session->evlist->entries, node) {
- err = perf_event__synthesize_attr(tool, &attr->attr, attr->ids,
- attr->id, process);
+ list_for_each_entry(evsel, &session->evlist->entries, node) {
+ err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
+ evsel->id, process);
if (err) {
pr_debug("failed to create perf header attribute\n");
return err;
@@ -2288,7 +2746,7 @@ int perf_event__synthesize_attrs(struct perf_tool *tool,
int perf_event__process_attr(union perf_event *event,
struct perf_evlist **pevlist)
{
- unsigned int i, ids, n_ids;
+ u32 i, ids, n_ids;
struct perf_evsel *evsel;
struct perf_evlist *evlist = *pevlist;
@@ -2339,7 +2797,7 @@ int perf_event__synthesize_event_type(struct perf_tool *tool,
ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
size = strlen(ev.event_type.event_type.name);
- size = ALIGN(size, sizeof(u64));
+ size = PERF_ALIGN(size, sizeof(u64));
ev.event_type.header.size = sizeof(ev.event_type) -
(sizeof(ev.event_type.event_type.name) - size);
@@ -2355,8 +2813,8 @@ int perf_event__synthesize_event_types(struct perf_tool *tool,
struct perf_trace_event_type *type;
int i, err = 0;
- for (i = 0; i < event_count; i++) {
- type = &events[i];
+ for (i = 0; i < trace_event_count; i++) {
+ type = &trace_events[i];
err = perf_event__synthesize_event_type(tool, type->event_id,
type->name, process,
@@ -2370,7 +2828,7 @@ int perf_event__synthesize_event_types(struct perf_tool *tool,
return err;
}
-int perf_event__process_event_type(struct perf_tool *tool __unused,
+int perf_event__process_event_type(struct perf_tool *tool __maybe_unused,
union perf_event *event)
{
if (perf_header__push_event(event->event_type.event_type.event_id,
@@ -2387,7 +2845,7 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
union perf_event ev;
struct tracing_data *tdata;
ssize_t size = 0, aligned_size = 0, padding;
- int err __used = 0;
+ int err __maybe_unused = 0;
/*
* We are going to store the size of the data followed
@@ -2408,7 +2866,7 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
size = tdata->size;
- aligned_size = ALIGN(size, sizeof(u64));
+ aligned_size = PERF_ALIGN(size, sizeof(u64));
padding = aligned_size - size;
ev.tracing_data.header.size = sizeof(ev.tracing_data);
ev.tracing_data.size = aligned_size;
@@ -2439,7 +2897,7 @@ int perf_event__process_tracing_data(union perf_event *event,
size_read = trace_report(session->fd, &session->pevent,
session->repipe);
- padding = ALIGN(size_read, sizeof(u64)) - size_read;
+ padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
if (read(session->fd, buf, padding) < 0)
die("reading input file");
@@ -2452,6 +2910,9 @@ int perf_event__process_tracing_data(union perf_event *event,
if (size_read + padding != size)
die("tracing data size mismatch");
+ perf_evlist__prepare_tracepoint_events(session->evlist,
+ session->pevent);
+
return size_read + padding;
}
@@ -2470,7 +2931,7 @@ int perf_event__synthesize_build_id(struct perf_tool *tool,
memset(&ev, 0, sizeof(ev));
len = pos->long_name_len + 1;
- len = ALIGN(len, NAME_ALIGN);
+ len = PERF_ALIGN(len, NAME_ALIGN);
memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
ev.build_id.header.misc = misc;
@@ -2483,7 +2944,7 @@ int perf_event__synthesize_build_id(struct perf_tool *tool,
return err;
}
-int perf_event__process_build_id(struct perf_tool *tool __used,
+int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_session *session)
{
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 2d42b3e1826f..99bdd3abce59 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -28,6 +28,7 @@ enum {
HEADER_CPU_TOPOLOGY,
HEADER_NUMA_TOPOLOGY,
HEADER_BRANCH_STACK,
+ HEADER_PMU_MAPPINGS,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
@@ -57,6 +58,29 @@ struct perf_header;
int perf_file_header__read(struct perf_file_header *header,
struct perf_header *ph, int fd);
+struct perf_session_env {
+ char *hostname;
+ char *os_release;
+ char *version;
+ char *arch;
+ int nr_cpus_online;
+ int nr_cpus_avail;
+ char *cpu_desc;
+ char *cpuid;
+ unsigned long long total_mem;
+
+ int nr_cmdline;
+ char *cmdline;
+ int nr_sibling_cores;
+ char *sibling_cores;
+ int nr_sibling_threads;
+ char *sibling_threads;
+ int nr_numa_nodes;
+ char *numa_nodes;
+ int nr_pmu_mappings;
+ char *pmu_mappings;
+};
+
struct perf_header {
int frozen;
bool needs_swap;
@@ -66,6 +90,7 @@ struct perf_header {
u64 event_offset;
u64 event_size;
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
+ struct perf_session_env env;
};
struct perf_evlist;
@@ -95,11 +120,11 @@ int perf_header__process_sections(struct perf_header *header, int fd,
int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full);
int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
- const char *name, bool is_kallsyms);
+ const char *name, bool is_kallsyms, bool is_vdso);
int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
int perf_event__synthesize_attr(struct perf_tool *tool,
- struct perf_event_attr *attr, u16 ids, u64 *id,
+ struct perf_event_attr *attr, u32 ids, u64 *id,
perf_event__handler_t process);
int perf_event__synthesize_attrs(struct perf_tool *tool,
struct perf_session *session,
diff --git a/tools/perf/util/help.c b/tools/perf/util/help.c
index 6f2975a00358..8b1f6e891b8a 100644
--- a/tools/perf/util/help.c
+++ b/tools/perf/util/help.c
@@ -3,6 +3,7 @@
#include "exec_cmd.h"
#include "levenshtein.h"
#include "help.h"
+#include <termios.h>
void add_cmdname(struct cmdnames *cmds, const char *name, size_t len)
{
@@ -331,7 +332,8 @@ const char *help_unknown_cmd(const char *cmd)
exit(1);
}
-int cmd_version(int argc __used, const char **argv __used, const char *prefix __used)
+int cmd_version(int argc __maybe_unused, const char **argv __maybe_unused,
+ const char *prefix __maybe_unused)
{
printf("perf version %s\n", perf_version_string);
return 0;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index f247ef2789a4..236bc9d98ff2 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -45,7 +45,7 @@ bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
return false;
}
-static void hists__reset_col_len(struct hists *hists)
+void hists__reset_col_len(struct hists *hists)
{
enum hist_column col;
@@ -63,7 +63,7 @@ static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
hists__set_col_len(hists, dso, unresolved_col_width);
}
-static void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
+void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
{
const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
u16 len;
@@ -114,6 +114,22 @@ static void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
}
}
+void hists__output_recalc_col_len(struct hists *hists, int max_rows)
+{
+ struct rb_node *next = rb_first(&hists->entries);
+ struct hist_entry *n;
+ int row = 0;
+
+ hists__reset_col_len(hists);
+
+ while (next && row++ < max_rows) {
+ n = rb_entry(next, struct hist_entry, rb_node);
+ if (!n->filtered)
+ hists__calc_col_len(hists, n);
+ next = rb_next(&n->rb_node);
+ }
+}
+
static void hist_entry__add_cpumode_period(struct hist_entry *he,
unsigned int cpumode, u64 period)
{
@@ -378,7 +394,7 @@ void hist_entry__free(struct hist_entry *he)
* collapse the histogram
*/
-static bool hists__collapse_insert_entry(struct hists *hists __used,
+static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
struct rb_root *root,
struct hist_entry *he)
{
@@ -394,8 +410,13 @@ static bool hists__collapse_insert_entry(struct hists *hists __used,
cmp = hist_entry__collapse(iter, he);
if (!cmp) {
- iter->period += he->period;
- iter->nr_events += he->nr_events;
+ iter->period += he->period;
+ iter->period_sys += he->period_sys;
+ iter->period_us += he->period_us;
+ iter->period_guest_sys += he->period_guest_sys;
+ iter->period_guest_us += he->period_guest_us;
+ iter->nr_events += he->nr_events;
+
if (symbol_conf.use_callchain) {
callchain_cursor_reset(&callchain_cursor);
callchain_merge(&callchain_cursor,
@@ -547,674 +568,6 @@ void hists__output_resort_threaded(struct hists *hists)
return __hists__output_resort(hists, true);
}
-static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
-{
- int i;
- int ret = fprintf(fp, " ");
-
- for (i = 0; i < left_margin; i++)
- ret += fprintf(fp, " ");
-
- return ret;
-}
-
-static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
- int left_margin)
-{
- int i;
- size_t ret = callchain__fprintf_left_margin(fp, left_margin);
-
- for (i = 0; i < depth; i++)
- if (depth_mask & (1 << i))
- ret += fprintf(fp, "| ");
- else
- ret += fprintf(fp, " ");
-
- ret += fprintf(fp, "\n");
-
- return ret;
-}
-
-static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
- int depth, int depth_mask, int period,
- u64 total_samples, u64 hits,
- int left_margin)
-{
- int i;
- size_t ret = 0;
-
- ret += callchain__fprintf_left_margin(fp, left_margin);
- for (i = 0; i < depth; i++) {
- if (depth_mask & (1 << i))
- ret += fprintf(fp, "|");
- else
- ret += fprintf(fp, " ");
- if (!period && i == depth - 1) {
- double percent;
-
- percent = hits * 100.0 / total_samples;
- ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
- } else
- ret += fprintf(fp, "%s", " ");
- }
- if (chain->ms.sym)
- ret += fprintf(fp, "%s\n", chain->ms.sym->name);
- else
- ret += fprintf(fp, "0x%0" PRIx64 "\n", chain->ip);
-
- return ret;
-}
-
-static struct symbol *rem_sq_bracket;
-static struct callchain_list rem_hits;
-
-static void init_rem_hits(void)
-{
- rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
- if (!rem_sq_bracket) {
- fprintf(stderr, "Not enough memory to display remaining hits\n");
- return;
- }
-
- strcpy(rem_sq_bracket->name, "[...]");
- rem_hits.ms.sym = rem_sq_bracket;
-}
-
-static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
- u64 total_samples, int depth,
- int depth_mask, int left_margin)
-{
- struct rb_node *node, *next;
- struct callchain_node *child;
- struct callchain_list *chain;
- int new_depth_mask = depth_mask;
- u64 remaining;
- size_t ret = 0;
- int i;
- uint entries_printed = 0;
-
- remaining = total_samples;
-
- node = rb_first(root);
- while (node) {
- u64 new_total;
- u64 cumul;
-
- child = rb_entry(node, struct callchain_node, rb_node);
- cumul = callchain_cumul_hits(child);
- remaining -= cumul;
-
- /*
- * The depth mask manages the output of pipes that show
- * the depth. We don't want to keep the pipes of the current
- * level for the last child of this depth.
- * Except if we have remaining filtered hits. They will
- * supersede the last child
- */
- next = rb_next(node);
- if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
- new_depth_mask &= ~(1 << (depth - 1));
-
- /*
- * But we keep the older depth mask for the line separator
- * to keep the level link until we reach the last child
- */
- ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
- left_margin);
- i = 0;
- list_for_each_entry(chain, &child->val, list) {
- ret += ipchain__fprintf_graph(fp, chain, depth,
- new_depth_mask, i++,
- total_samples,
- cumul,
- left_margin);
- }
-
- if (callchain_param.mode == CHAIN_GRAPH_REL)
- new_total = child->children_hit;
- else
- new_total = total_samples;
-
- ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
- depth + 1,
- new_depth_mask | (1 << depth),
- left_margin);
- node = next;
- if (++entries_printed == callchain_param.print_limit)
- break;
- }
-
- if (callchain_param.mode == CHAIN_GRAPH_REL &&
- remaining && remaining != total_samples) {
-
- if (!rem_sq_bracket)
- return ret;
-
- new_depth_mask &= ~(1 << (depth - 1));
- ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
- new_depth_mask, 0, total_samples,
- remaining, left_margin);
- }
-
- return ret;
-}
-
-static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
- u64 total_samples, int left_margin)
-{
- struct callchain_node *cnode;
- struct callchain_list *chain;
- u32 entries_printed = 0;
- bool printed = false;
- struct rb_node *node;
- int i = 0;
- int ret = 0;
-
- /*
- * If have one single callchain root, don't bother printing
- * its percentage (100 % in fractal mode and the same percentage
- * than the hist in graph mode). This also avoid one level of column.
- */
- node = rb_first(root);
- if (node && !rb_next(node)) {
- cnode = rb_entry(node, struct callchain_node, rb_node);
- list_for_each_entry(chain, &cnode->val, list) {
- /*
- * If we sort by symbol, the first entry is the same than
- * the symbol. No need to print it otherwise it appears as
- * displayed twice.
- */
- if (!i++ && sort__first_dimension == SORT_SYM)
- continue;
- if (!printed) {
- ret += callchain__fprintf_left_margin(fp, left_margin);
- ret += fprintf(fp, "|\n");
- ret += callchain__fprintf_left_margin(fp, left_margin);
- ret += fprintf(fp, "---");
- left_margin += 3;
- printed = true;
- } else
- ret += callchain__fprintf_left_margin(fp, left_margin);
-
- if (chain->ms.sym)
- ret += fprintf(fp, " %s\n", chain->ms.sym->name);
- else
- ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
-
- if (++entries_printed == callchain_param.print_limit)
- break;
- }
- root = &cnode->rb_root;
- }
-
- ret += __callchain__fprintf_graph(fp, root, total_samples,
- 1, 1, left_margin);
- ret += fprintf(fp, "\n");
-
- return ret;
-}
-
-static size_t __callchain__fprintf_flat(FILE *fp,
- struct callchain_node *self,
- u64 total_samples)
-{
- struct callchain_list *chain;
- size_t ret = 0;
-
- if (!self)
- return 0;
-
- ret += __callchain__fprintf_flat(fp, self->parent, total_samples);
-
-
- list_for_each_entry(chain, &self->val, list) {
- if (chain->ip >= PERF_CONTEXT_MAX)
- continue;
- if (chain->ms.sym)
- ret += fprintf(fp, " %s\n", chain->ms.sym->name);
- else
- ret += fprintf(fp, " %p\n",
- (void *)(long)chain->ip);
- }
-
- return ret;
-}
-
-static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self,
- u64 total_samples)
-{
- size_t ret = 0;
- u32 entries_printed = 0;
- struct rb_node *rb_node;
- struct callchain_node *chain;
-
- rb_node = rb_first(self);
- while (rb_node) {
- double percent;
-
- chain = rb_entry(rb_node, struct callchain_node, rb_node);
- percent = chain->hit * 100.0 / total_samples;
-
- ret = percent_color_fprintf(fp, " %6.2f%%\n", percent);
- ret += __callchain__fprintf_flat(fp, chain, total_samples);
- ret += fprintf(fp, "\n");
- if (++entries_printed == callchain_param.print_limit)
- break;
-
- rb_node = rb_next(rb_node);
- }
-
- return ret;
-}
-
-static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
- u64 total_samples, int left_margin,
- FILE *fp)
-{
- switch (callchain_param.mode) {
- case CHAIN_GRAPH_REL:
- return callchain__fprintf_graph(fp, &he->sorted_chain, he->period,
- left_margin);
- break;
- case CHAIN_GRAPH_ABS:
- return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
- left_margin);
- break;
- case CHAIN_FLAT:
- return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
- break;
- case CHAIN_NONE:
- break;
- default:
- pr_err("Bad callchain mode\n");
- }
-
- return 0;
-}
-
-void hists__output_recalc_col_len(struct hists *hists, int max_rows)
-{
- struct rb_node *next = rb_first(&hists->entries);
- struct hist_entry *n;
- int row = 0;
-
- hists__reset_col_len(hists);
-
- while (next && row++ < max_rows) {
- n = rb_entry(next, struct hist_entry, rb_node);
- if (!n->filtered)
- hists__calc_col_len(hists, n);
- next = rb_next(&n->rb_node);
- }
-}
-
-static int hist_entry__pcnt_snprintf(struct hist_entry *he, char *s,
- size_t size, struct hists *pair_hists,
- bool show_displacement, long displacement,
- bool color, u64 total_period)
-{
- u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
- u64 nr_events;
- const char *sep = symbol_conf.field_sep;
- int ret;
-
- if (symbol_conf.exclude_other && !he->parent)
- return 0;
-
- if (pair_hists) {
- period = he->pair ? he->pair->period : 0;
- nr_events = he->pair ? he->pair->nr_events : 0;
- total = pair_hists->stats.total_period;
- period_sys = he->pair ? he->pair->period_sys : 0;
- period_us = he->pair ? he->pair->period_us : 0;
- period_guest_sys = he->pair ? he->pair->period_guest_sys : 0;
- period_guest_us = he->pair ? he->pair->period_guest_us : 0;
- } else {
- period = he->period;
- nr_events = he->nr_events;
- total = total_period;
- period_sys = he->period_sys;
- period_us = he->period_us;
- period_guest_sys = he->period_guest_sys;
- period_guest_us = he->period_guest_us;
- }
-
- if (total) {
- if (color)
- ret = percent_color_snprintf(s, size,
- sep ? "%.2f" : " %6.2f%%",
- (period * 100.0) / total);
- else
- ret = scnprintf(s, size, sep ? "%.2f" : " %6.2f%%",
- (period * 100.0) / total);
- if (symbol_conf.show_cpu_utilization) {
- ret += percent_color_snprintf(s + ret, size - ret,
- sep ? "%.2f" : " %6.2f%%",
- (period_sys * 100.0) / total);
- ret += percent_color_snprintf(s + ret, size - ret,
- sep ? "%.2f" : " %6.2f%%",
- (period_us * 100.0) / total);
- if (perf_guest) {
- ret += percent_color_snprintf(s + ret,
- size - ret,
- sep ? "%.2f" : " %6.2f%%",
- (period_guest_sys * 100.0) /
- total);
- ret += percent_color_snprintf(s + ret,
- size - ret,
- sep ? "%.2f" : " %6.2f%%",
- (period_guest_us * 100.0) /
- total);
- }
- }
- } else
- ret = scnprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
-
- if (symbol_conf.show_nr_samples) {
- if (sep)
- ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
- else
- ret += scnprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
- }
-
- if (symbol_conf.show_total_period) {
- if (sep)
- ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
- else
- ret += scnprintf(s + ret, size - ret, " %12" PRIu64, period);
- }
-
- if (pair_hists) {
- char bf[32];
- double old_percent = 0, new_percent = 0, diff;
-
- if (total > 0)
- old_percent = (period * 100.0) / total;
- if (total_period > 0)
- new_percent = (he->period * 100.0) / total_period;
-
- diff = new_percent - old_percent;
-
- if (fabs(diff) >= 0.01)
- scnprintf(bf, sizeof(bf), "%+4.2F%%", diff);
- else
- scnprintf(bf, sizeof(bf), " ");
-
- if (sep)
- ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
- else
- ret += scnprintf(s + ret, size - ret, "%11.11s", bf);
-
- if (show_displacement) {
- if (displacement)
- scnprintf(bf, sizeof(bf), "%+4ld", displacement);
- else
- scnprintf(bf, sizeof(bf), " ");
-
- if (sep)
- ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
- else
- ret += scnprintf(s + ret, size - ret, "%6.6s", bf);
- }
- }
-
- return ret;
-}
-
-int hist_entry__snprintf(struct hist_entry *he, char *s, size_t size,
- struct hists *hists)
-{
- const char *sep = symbol_conf.field_sep;
- struct sort_entry *se;
- int ret = 0;
-
- list_for_each_entry(se, &hist_entry__sort_list, list) {
- if (se->elide)
- continue;
-
- ret += scnprintf(s + ret, size - ret, "%s", sep ?: " ");
- ret += se->se_snprintf(he, s + ret, size - ret,
- hists__col_len(hists, se->se_width_idx));
- }
-
- return ret;
-}
-
-static int hist_entry__fprintf(struct hist_entry *he, size_t size,
- struct hists *hists, struct hists *pair_hists,
- bool show_displacement, long displacement,
- u64 total_period, FILE *fp)
-{
- char bf[512];
- int ret;
-
- if (size == 0 || size > sizeof(bf))
- size = sizeof(bf);
-
- ret = hist_entry__pcnt_snprintf(he, bf, size, pair_hists,
- show_displacement, displacement,
- true, total_period);
- hist_entry__snprintf(he, bf + ret, size - ret, hists);
- return fprintf(fp, "%s\n", bf);
-}
-
-static size_t hist_entry__fprintf_callchain(struct hist_entry *he,
- struct hists *hists,
- u64 total_period, FILE *fp)
-{
- int left_margin = 0;
-
- if (sort__first_dimension == SORT_COMM) {
- struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
- typeof(*se), list);
- left_margin = hists__col_len(hists, se->se_width_idx);
- left_margin -= thread__comm_len(he->thread);
- }
-
- return hist_entry_callchain__fprintf(he, total_period, left_margin, fp);
-}
-
-size_t hists__fprintf(struct hists *hists, struct hists *pair,
- bool show_displacement, bool show_header, int max_rows,
- int max_cols, FILE *fp)
-{
- struct sort_entry *se;
- struct rb_node *nd;
- size_t ret = 0;
- u64 total_period;
- unsigned long position = 1;
- long displacement = 0;
- unsigned int width;
- const char *sep = symbol_conf.field_sep;
- const char *col_width = symbol_conf.col_width_list_str;
- int nr_rows = 0;
-
- init_rem_hits();
-
- if (!show_header)
- goto print_entries;
-
- fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
-
- if (symbol_conf.show_cpu_utilization) {
- if (sep) {
- ret += fprintf(fp, "%csys", *sep);
- ret += fprintf(fp, "%cus", *sep);
- if (perf_guest) {
- ret += fprintf(fp, "%cguest sys", *sep);
- ret += fprintf(fp, "%cguest us", *sep);
- }
- } else {
- ret += fprintf(fp, " sys ");
- ret += fprintf(fp, " us ");
- if (perf_guest) {
- ret += fprintf(fp, " guest sys ");
- ret += fprintf(fp, " guest us ");
- }
- }
- }
-
- if (symbol_conf.show_nr_samples) {
- if (sep)
- fprintf(fp, "%cSamples", *sep);
- else
- fputs(" Samples ", fp);
- }
-
- if (symbol_conf.show_total_period) {
- if (sep)
- ret += fprintf(fp, "%cPeriod", *sep);
- else
- ret += fprintf(fp, " Period ");
- }
-
- if (pair) {
- if (sep)
- ret += fprintf(fp, "%cDelta", *sep);
- else
- ret += fprintf(fp, " Delta ");
-
- if (show_displacement) {
- if (sep)
- ret += fprintf(fp, "%cDisplacement", *sep);
- else
- ret += fprintf(fp, " Displ");
- }
- }
-
- list_for_each_entry(se, &hist_entry__sort_list, list) {
- if (se->elide)
- continue;
- if (sep) {
- fprintf(fp, "%c%s", *sep, se->se_header);
- continue;
- }
- width = strlen(se->se_header);
- if (symbol_conf.col_width_list_str) {
- if (col_width) {
- hists__set_col_len(hists, se->se_width_idx,
- atoi(col_width));
- col_width = strchr(col_width, ',');
- if (col_width)
- ++col_width;
- }
- }
- if (!hists__new_col_len(hists, se->se_width_idx, width))
- width = hists__col_len(hists, se->se_width_idx);
- fprintf(fp, " %*s", width, se->se_header);
- }
-
- fprintf(fp, "\n");
- if (max_rows && ++nr_rows >= max_rows)
- goto out;
-
- if (sep)
- goto print_entries;
-
- fprintf(fp, "# ........");
- if (symbol_conf.show_cpu_utilization)
- fprintf(fp, " ....... .......");
- if (symbol_conf.show_nr_samples)
- fprintf(fp, " ..........");
- if (symbol_conf.show_total_period)
- fprintf(fp, " ............");
- if (pair) {
- fprintf(fp, " ..........");
- if (show_displacement)
- fprintf(fp, " .....");
- }
- list_for_each_entry(se, &hist_entry__sort_list, list) {
- unsigned int i;
-
- if (se->elide)
- continue;
-
- fprintf(fp, " ");
- width = hists__col_len(hists, se->se_width_idx);
- if (width == 0)
- width = strlen(se->se_header);
- for (i = 0; i < width; i++)
- fprintf(fp, ".");
- }
-
- fprintf(fp, "\n");
- if (max_rows && ++nr_rows >= max_rows)
- goto out;
-
- fprintf(fp, "#\n");
- if (max_rows && ++nr_rows >= max_rows)
- goto out;
-
-print_entries:
- total_period = hists->stats.total_period;
-
- for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
- struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
-
- if (h->filtered)
- continue;
-
- if (show_displacement) {
- if (h->pair != NULL)
- displacement = ((long)h->pair->position -
- (long)position);
- else
- displacement = 0;
- ++position;
- }
- ret += hist_entry__fprintf(h, max_cols, hists, pair, show_displacement,
- displacement, total_period, fp);
-
- if (symbol_conf.use_callchain)
- ret += hist_entry__fprintf_callchain(h, hists, total_period, fp);
- if (max_rows && ++nr_rows >= max_rows)
- goto out;
-
- if (h->ms.map == NULL && verbose > 1) {
- __map_groups__fprintf_maps(&h->thread->mg,
- MAP__FUNCTION, verbose, fp);
- fprintf(fp, "%.10s end\n", graph_dotted_line);
- }
- }
-out:
- free(rem_sq_bracket);
-
- return ret;
-}
-
-/*
- * See hists__fprintf to match the column widths
- */
-unsigned int hists__sort_list_width(struct hists *hists)
-{
- struct sort_entry *se;
- int ret = 9; /* total % */
-
- if (symbol_conf.show_cpu_utilization) {
- ret += 7; /* count_sys % */
- ret += 6; /* count_us % */
- if (perf_guest) {
- ret += 13; /* count_guest_sys % */
- ret += 12; /* count_guest_us % */
- }
- }
-
- if (symbol_conf.show_nr_samples)
- ret += 11;
-
- if (symbol_conf.show_total_period)
- ret += 13;
-
- list_for_each_entry(se, &hist_entry__sort_list, list)
- if (!se->elide)
- ret += 2 + hists__col_len(hists, se->se_width_idx);
-
- if (verbose) /* Addr + origin */
- ret += 3 + BITS_PER_LONG / 4;
-
- return ret;
-}
-
static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
enum hist_filter filter)
{
@@ -1342,25 +695,3 @@ void hists__inc_nr_events(struct hists *hists, u32 type)
++hists->stats.nr_events[0];
++hists->stats.nr_events[type];
}
-
-size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
-{
- int i;
- size_t ret = 0;
-
- for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
- const char *name;
-
- if (hists->stats.nr_events[i] == 0)
- continue;
-
- name = perf_event__name(i);
- if (!strcmp(name, "UNKNOWN"))
- continue;
-
- ret += fprintf(fp, "%16s events: %10d\n", name,
- hists->stats.nr_events[i]);
- }
-
- return ret;
-}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 0b096c27a419..f011ad4756e8 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -75,8 +75,8 @@ struct hist_entry *__hists__add_entry(struct hists *self,
struct symbol *parent, u64 period);
int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
-int hist_entry__snprintf(struct hist_entry *self, char *bf, size_t size,
- struct hists *hists);
+int hist_entry__sort_snprintf(struct hist_entry *self, char *bf, size_t size,
+ struct hists *hists);
void hist_entry__free(struct hist_entry *);
struct hist_entry *__hists__add_branch_entry(struct hists *self,
@@ -112,25 +112,66 @@ void hists__filter_by_symbol(struct hists *hists);
u16 hists__col_len(struct hists *self, enum hist_column col);
void hists__set_col_len(struct hists *self, enum hist_column col, u16 len);
bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len);
+void hists__reset_col_len(struct hists *hists);
+void hists__calc_col_len(struct hists *hists, struct hist_entry *he);
+
+struct perf_hpp {
+ char *buf;
+ size_t size;
+ u64 total_period;
+ const char *sep;
+ long displacement;
+ void *ptr;
+};
+
+struct perf_hpp_fmt {
+ bool cond;
+ int (*header)(struct perf_hpp *hpp);
+ int (*width)(struct perf_hpp *hpp);
+ int (*color)(struct perf_hpp *hpp, struct hist_entry *he);
+ int (*entry)(struct perf_hpp *hpp, struct hist_entry *he);
+};
+
+extern struct perf_hpp_fmt perf_hpp__format[];
+
+enum {
+ PERF_HPP__OVERHEAD,
+ PERF_HPP__OVERHEAD_SYS,
+ PERF_HPP__OVERHEAD_US,
+ PERF_HPP__OVERHEAD_GUEST_SYS,
+ PERF_HPP__OVERHEAD_GUEST_US,
+ PERF_HPP__SAMPLES,
+ PERF_HPP__PERIOD,
+ PERF_HPP__DELTA,
+ PERF_HPP__DISPL,
+
+ PERF_HPP__MAX_INDEX
+};
+
+void perf_hpp__init(bool need_pair, bool show_displacement);
+int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he,
+ bool color);
struct perf_evlist;
#ifdef NO_NEWT_SUPPORT
static inline
-int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __used,
- const char *help __used,
- void(*timer)(void *arg) __used,
- void *arg __used,
- int refresh __used)
+int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
+ const char *help __maybe_unused,
+ void(*timer)(void *arg) __maybe_unused,
+ void *arg __maybe_unused,
+ int refresh __maybe_unused)
{
return 0;
}
-static inline int hist_entry__tui_annotate(struct hist_entry *self __used,
- int evidx __used,
- void(*timer)(void *arg) __used,
- void *arg __used,
- int delay_secs __used)
+static inline int hist_entry__tui_annotate(struct hist_entry *self
+ __maybe_unused,
+ int evidx __maybe_unused,
+ void(*timer)(void *arg)
+ __maybe_unused,
+ void *arg __maybe_unused,
+ int delay_secs __maybe_unused)
{
return 0;
}
@@ -148,11 +189,11 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
#ifdef NO_GTK2_SUPPORT
static inline
-int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __used,
- const char *help __used,
- void(*timer)(void *arg) __used,
- void *arg __used,
- int refresh __used)
+int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __maybe_unused,
+ const char *help __maybe_unused,
+ void(*timer)(void *arg) __maybe_unused,
+ void *arg __maybe_unused,
+ int refresh __maybe_unused)
{
return 0;
}
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
index 587a230d2075..a55d8cf083c9 100644
--- a/tools/perf/util/include/linux/bitops.h
+++ b/tools/perf/util/include/linux/bitops.h
@@ -5,6 +5,10 @@
#include <linux/compiler.h>
#include <asm/hweight.h>
+#ifndef __WORDSIZE
+#define __WORDSIZE (__SIZEOF_LONG__ * 8)
+#endif
+
#define BITS_PER_LONG __WORDSIZE
#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h
index 547628e97f3d..96b919dae11c 100644
--- a/tools/perf/util/include/linux/compiler.h
+++ b/tools/perf/util/include/linux/compiler.h
@@ -9,6 +9,13 @@
#define __attribute_const__
#endif
-#define __used __attribute__((__unused__))
+#ifndef __maybe_unused
+#define __maybe_unused __attribute__((unused))
+#endif
+#define __packed __attribute__((__packed__))
+
+#ifndef __force
+#define __force
+#endif
#endif
diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h
index b6842c1d02a8..d8c927c868ee 100644
--- a/tools/perf/util/include/linux/kernel.h
+++ b/tools/perf/util/include/linux/kernel.h
@@ -8,8 +8,8 @@
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
-#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
-#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
+#define PERF_ALIGN(x, a) __PERF_ALIGN_MASK(x, (typeof(x))(a)-1)
+#define __PERF_ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
#ifndef offsetof
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
@@ -46,9 +46,22 @@
_min1 < _min2 ? _min1 : _min2; })
#endif
+#ifndef roundup
+#define roundup(x, y) ( \
+{ \
+ const typeof(y) __y = y; \
+ (((x) + (__y - 1)) / __y) * __y; \
+} \
+)
+#endif
+
#ifndef BUG_ON
+#ifdef NDEBUG
+#define BUG_ON(cond) do { if (cond) {} } while (0)
+#else
#define BUG_ON(cond) assert(!(cond))
#endif
+#endif
/*
* Both need more care to handle endianness
diff --git a/tools/perf/util/include/linux/magic.h b/tools/perf/util/include/linux/magic.h
new file mode 100644
index 000000000000..58b64ed4da12
--- /dev/null
+++ b/tools/perf/util/include/linux/magic.h
@@ -0,0 +1,12 @@
+#ifndef _PERF_LINUX_MAGIC_H_
+#define _PERF_LINUX_MAGIC_H_
+
+#ifndef DEBUGFS_MAGIC
+#define DEBUGFS_MAGIC 0x64626720
+#endif
+
+#ifndef SYSFS_MAGIC
+#define SYSFS_MAGIC 0x62656572
+#endif
+
+#endif
diff --git a/tools/perf/util/include/linux/rbtree.h b/tools/perf/util/include/linux/rbtree.h
index 7a243a143037..2a030c5af3aa 100644
--- a/tools/perf/util/include/linux/rbtree.h
+++ b/tools/perf/util/include/linux/rbtree.h
@@ -1 +1,2 @@
+#include <stdbool.h>
#include "../../../../include/linux/rbtree.h"
diff --git a/tools/perf/util/include/linux/string.h b/tools/perf/util/include/linux/string.h
index 3b2f5900276f..6f19c548ecc0 100644
--- a/tools/perf/util/include/linux/string.h
+++ b/tools/perf/util/include/linux/string.h
@@ -1 +1,3 @@
#include <string.h>
+
+void *memdup(const void *src, size_t len);
diff --git a/tools/perf/util/include/linux/types.h b/tools/perf/util/include/linux/types.h
index 12de3b8112f9..eb464786c084 100644
--- a/tools/perf/util/include/linux/types.h
+++ b/tools/perf/util/include/linux/types.h
@@ -3,6 +3,14 @@
#include <asm/types.h>
+#ifndef __bitwise
+#define __bitwise
+#endif
+
+#ifndef __le32
+typedef __u32 __bitwise __le32;
+#endif
+
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
diff --git a/tools/perf/util/intlist.c b/tools/perf/util/intlist.c
index fd530dced9cb..9d0740024ba8 100644
--- a/tools/perf/util/intlist.c
+++ b/tools/perf/util/intlist.c
@@ -11,7 +11,7 @@
#include "intlist.h"
-static struct rb_node *intlist__node_new(struct rblist *rblist __used,
+static struct rb_node *intlist__node_new(struct rblist *rblist __maybe_unused,
const void *entry)
{
int i = (int)((long)entry);
@@ -31,7 +31,7 @@ static void int_node__delete(struct int_node *ilist)
free(ilist);
}
-static void intlist__node_delete(struct rblist *rblist __used,
+static void intlist__node_delete(struct rblist *rblist __maybe_unused,
struct rb_node *rb_node)
{
struct int_node *node = container_of(rb_node, struct int_node, rb_node);
@@ -52,9 +52,9 @@ int intlist__add(struct intlist *ilist, int i)
return rblist__add_node(&ilist->rblist, (void *)((long)i));
}
-void intlist__remove(struct intlist *ilist __used, struct int_node *node)
+void intlist__remove(struct intlist *ilist, struct int_node *node)
{
- int_node__delete(node);
+ rblist__remove_node(&ilist->rblist, &node->rb_node);
}
struct int_node *intlist__find(struct intlist *ilist, int i)
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index cc33486ad9e2..ead5316b3f89 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -9,6 +9,7 @@
#include "map.h"
#include "thread.h"
#include "strlist.h"
+#include "vdso.h"
const char *map_type__name[MAP__NR_TYPES] = {
[MAP__FUNCTION] = "Functions",
@@ -23,7 +24,6 @@ static inline int is_anon_memory(const char *filename)
static inline int is_no_dso_memory(const char *filename)
{
return !strcmp(filename, "[stack]") ||
- !strcmp(filename, "[vdso]") ||
!strcmp(filename, "[heap]");
}
@@ -52,9 +52,10 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
if (self != NULL) {
char newfilename[PATH_MAX];
struct dso *dso;
- int anon, no_dso;
+ int anon, no_dso, vdso;
anon = is_anon_memory(filename);
+ vdso = is_vdso_map(filename);
no_dso = is_no_dso_memory(filename);
if (anon) {
@@ -62,7 +63,12 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
filename = newfilename;
}
- dso = __dsos__findnew(dsos__list, filename);
+ if (vdso) {
+ pgoff = 0;
+ dso = vdso__dso_findnew(dsos__list);
+ } else
+ dso = __dsos__findnew(dsos__list, filename);
+
if (dso == NULL)
goto out_delete;
@@ -86,6 +92,25 @@ out_delete:
return NULL;
}
+/*
+ * Constructor variant for modules (where we know from /proc/modules where
+ * they are loaded) and for vmlinux, where only after we load all the
+ * symbols we'll know where it starts and ends.
+ */
+struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
+{
+ struct map *map = calloc(1, (sizeof(*map) +
+ (dso->kernel ? sizeof(struct kmap) : 0)));
+ if (map != NULL) {
+ /*
+ * ->end will be filled after we load all the symbols
+ */
+ map__init(map, type, start, 0, 0, dso);
+ }
+
+ return map;
+}
+
void map__delete(struct map *self)
{
free(self);
@@ -137,6 +162,7 @@ int map__load(struct map *self, symbol_filter_t filter)
pr_warning(", continuing without symbols\n");
return -1;
} else if (nr == 0) {
+#ifndef NO_LIBELF_SUPPORT
const size_t len = strlen(name);
const size_t real_len = len - sizeof(DSO__DELETED);
@@ -149,7 +175,7 @@ int map__load(struct map *self, symbol_filter_t filter)
pr_warning("no symbols found in %s, maybe install "
"a debug package?\n", name);
}
-
+#endif
return -1;
}
/*
@@ -217,15 +243,14 @@ size_t map__fprintf(struct map *self, FILE *fp)
size_t map__fprintf_dsoname(struct map *map, FILE *fp)
{
- const char *dsoname;
+ const char *dsoname = "[unknown]";
if (map && map->dso && (map->dso->name || map->dso->long_name)) {
if (symbol_conf.show_kernel_path && map->dso->long_name)
dsoname = map->dso->long_name;
else if (map->dso->name)
dsoname = map->dso->name;
- } else
- dsoname = "[unknown]";
+ }
return fprintf(fp, "%s", dsoname);
}
@@ -242,14 +267,6 @@ u64 map__rip_2objdump(struct map *map, u64 rip)
return addr;
}
-u64 map__objdump_2ip(struct map *map, u64 addr)
-{
- u64 ip = map->dso->adjust_symbols ?
- addr :
- map->unmap_ip(map, addr); /* RIP -> IP */
- return ip;
-}
-
void map_groups__init(struct map_groups *mg)
{
int i;
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 03a1e9b08b21..d2250fc97e25 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -96,7 +96,7 @@ static inline u64 map__unmap_ip(struct map *map, u64 ip)
return ip + map->start - map->pgoff;
}
-static inline u64 identity__map_ip(struct map *map __used, u64 ip)
+static inline u64 identity__map_ip(struct map *map __maybe_unused, u64 ip)
{
return ip;
}
@@ -104,7 +104,6 @@ static inline u64 identity__map_ip(struct map *map __used, u64 ip)
/* rip/ip <-> addr suitable for passing to `objdump --start-address=` */
u64 map__rip_2objdump(struct map *map, u64 rip);
-u64 map__objdump_2ip(struct map *map, u64 addr);
struct symbol;
@@ -115,6 +114,7 @@ void map__init(struct map *self, enum map_type type,
struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
u64 pgoff, u32 pid, char *filename,
enum map_type type);
+struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
void map__delete(struct map *self);
struct map *map__clone(struct map *self);
int map__overlap(struct map *l, struct map *r);
@@ -157,9 +157,12 @@ int machine__init(struct machine *self, const char *root_dir, pid_t pid);
void machine__exit(struct machine *self);
void machine__delete(struct machine *self);
+struct perf_evsel;
+struct perf_sample;
int machine__resolve_callchain(struct machine *machine,
+ struct perf_evsel *evsel,
struct thread *thread,
- struct ip_callchain *chain,
+ struct perf_sample *sample,
struct symbol **parent);
int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name,
u64 addr);
diff --git a/tools/perf/util/parse-events-test.c b/tools/perf/util/parse-events-test.c
index 127d648cc548..28c18d1d52c3 100644
--- a/tools/perf/util/parse-events-test.c
+++ b/tools/perf/util/parse-events-test.c
@@ -18,8 +18,7 @@ do { \
static int test__checkevent_tracepoint(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
@@ -48,8 +47,7 @@ static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist)
static int test__checkevent_raw(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
@@ -59,8 +57,7 @@ static int test__checkevent_raw(struct perf_evlist *evlist)
static int test__checkevent_numeric(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type);
@@ -70,8 +67,7 @@ static int test__checkevent_numeric(struct perf_evlist *evlist)
static int test__checkevent_symbolic_name(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
@@ -82,8 +78,7 @@ static int test__checkevent_symbolic_name(struct perf_evlist *evlist)
static int test__checkevent_symbolic_name_config(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
@@ -100,8 +95,7 @@ static int test__checkevent_symbolic_name_config(struct perf_evlist *evlist)
static int test__checkevent_symbolic_alias(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type);
@@ -112,8 +106,7 @@ static int test__checkevent_symbolic_alias(struct perf_evlist *evlist)
static int test__checkevent_genhw(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->attr.type);
@@ -123,8 +116,7 @@ static int test__checkevent_genhw(struct perf_evlist *evlist)
static int test__checkevent_breakpoint(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
@@ -138,8 +130,7 @@ static int test__checkevent_breakpoint(struct perf_evlist *evlist)
static int test__checkevent_breakpoint_x(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
@@ -152,8 +143,7 @@ static int test__checkevent_breakpoint_x(struct perf_evlist *evlist)
static int test__checkevent_breakpoint_r(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type",
@@ -168,8 +158,7 @@ static int test__checkevent_breakpoint_r(struct perf_evlist *evlist)
static int test__checkevent_breakpoint_w(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type",
@@ -184,8 +173,7 @@ static int test__checkevent_breakpoint_w(struct perf_evlist *evlist)
static int test__checkevent_breakpoint_rw(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type",
@@ -200,8 +188,7 @@ static int test__checkevent_breakpoint_rw(struct perf_evlist *evlist)
static int test__checkevent_tracepoint_modifier(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
@@ -232,8 +219,7 @@ test__checkevent_tracepoint_multi_modifier(struct perf_evlist *evlist)
static int test__checkevent_raw_modifier(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
@@ -245,8 +231,7 @@ static int test__checkevent_raw_modifier(struct perf_evlist *evlist)
static int test__checkevent_numeric_modifier(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
@@ -258,8 +243,7 @@ static int test__checkevent_numeric_modifier(struct perf_evlist *evlist)
static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
@@ -271,8 +255,7 @@ static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist)
static int test__checkevent_exclude_host_modifier(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
@@ -282,8 +265,7 @@ static int test__checkevent_exclude_host_modifier(struct perf_evlist *evlist)
static int test__checkevent_exclude_guest_modifier(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
@@ -293,8 +275,7 @@ static int test__checkevent_exclude_guest_modifier(struct perf_evlist *evlist)
static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
@@ -306,8 +287,7 @@ static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist)
static int test__checkevent_genhw_modifier(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
@@ -319,75 +299,71 @@ static int test__checkevent_genhw_modifier(struct perf_evlist *evlist)
static int test__checkevent_breakpoint_modifier(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong name",
- !strcmp(perf_evsel__name(evsel), "mem:0x0:rw:u"));
+ !strcmp(perf_evsel__name(evsel), "mem:0:u"));
return test__checkevent_breakpoint(evlist);
}
static int test__checkevent_breakpoint_x_modifier(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong name",
- !strcmp(perf_evsel__name(evsel), "mem:0x0:x:k"));
+ !strcmp(perf_evsel__name(evsel), "mem:0:x:k"));
return test__checkevent_breakpoint_x(evlist);
}
static int test__checkevent_breakpoint_r_modifier(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong name",
- !strcmp(perf_evsel__name(evsel), "mem:0x0:r:hp"));
+ !strcmp(perf_evsel__name(evsel), "mem:0:r:hp"));
return test__checkevent_breakpoint_r(evlist);
}
static int test__checkevent_breakpoint_w_modifier(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong name",
- !strcmp(perf_evsel__name(evsel), "mem:0x0:w:up"));
+ !strcmp(perf_evsel__name(evsel), "mem:0:w:up"));
return test__checkevent_breakpoint_w(evlist);
}
static int test__checkevent_breakpoint_rw_modifier(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong name",
- !strcmp(perf_evsel__name(evsel), "mem:0x0:rw:kp"));
+ !strcmp(perf_evsel__name(evsel), "mem:0:rw:kp"));
return test__checkevent_breakpoint_rw(evlist);
}
@@ -395,8 +371,7 @@ static int test__checkevent_breakpoint_rw_modifier(struct perf_evlist *evlist)
static int test__checkevent_pmu(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = list_entry(evlist->entries.next,
- struct perf_evsel, node);
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
@@ -410,12 +385,11 @@ static int test__checkevent_pmu(struct perf_evlist *evlist)
static int test__checkevent_list(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel;
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries);
/* r1 */
- evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
TEST_ASSERT_VAL("wrong config1", 0 == evsel->attr.config1);
@@ -426,7 +400,7 @@ static int test__checkevent_list(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
/* syscalls:sys_enter_open:k */
- evsel = list_entry(evsel->node.next, struct perf_evsel, node);
+ evsel = perf_evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
TEST_ASSERT_VAL("wrong sample_type",
PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
@@ -437,7 +411,7 @@ static int test__checkevent_list(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
/* 1:1:hp */
- evsel = list_entry(evsel->node.next, struct perf_evsel, node);
+ evsel = perf_evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type);
TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
@@ -450,22 +424,21 @@ static int test__checkevent_list(struct perf_evlist *evlist)
static int test__checkevent_pmu_name(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel;
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
/* cpu/config=1,name=krava/u */
- evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
TEST_ASSERT_VAL("wrong name", !strcmp(perf_evsel__name(evsel), "krava"));
/* cpu/config=2/u" */
- evsel = list_entry(evsel->node.next, struct perf_evsel, node);
+ evsel = perf_evsel__next(evsel);
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
TEST_ASSERT_VAL("wrong config", 2 == evsel->attr.config);
TEST_ASSERT_VAL("wrong name",
- !strcmp(perf_evsel__name(evsel), "raw 0x2:u"));
+ !strcmp(perf_evsel__name(evsel), "cpu/config=2/u"));
return 0;
}
@@ -513,6 +486,280 @@ static int test__checkterms_simple(struct list_head *terms)
return 0;
}
+static int test__group1(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
+
+ /* instructions:k */
+ evsel = leader = perf_evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL);
+
+ /* cycles:upp */
+ evsel = perf_evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+
+ return 0;
+}
+
+static int test__group2(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries);
+
+ /* faults + :ku modifier */
+ evsel = leader = perf_evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_SW_PAGE_FAULTS == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL);
+
+ /* cache-references + :u modifier */
+ evsel = perf_evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CACHE_REFERENCES == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+
+ /* cycles:k */
+ evsel = perf_evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL);
+
+ return 0;
+}
+
+static int test__group3(struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries);
+
+ /* group1 syscalls:sys_enter_open:H */
+ evsel = leader = perf_evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong sample_type",
+ PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
+ TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL);
+ TEST_ASSERT_VAL("wrong group name",
+ !strcmp(leader->group_name, "group1"));
+
+ /* group1 cycles:kppp */
+ evsel = perf_evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 3);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+
+ /* group2 cycles + G modifier */
+ evsel = leader = perf_evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL);
+ TEST_ASSERT_VAL("wrong group name",
+ !strcmp(leader->group_name, "group2"));
+
+ /* group2 1:3 + G modifier */
+ evsel = perf_evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config", 3 == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+
+ /* instructions:u */
+ evsel = perf_evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL);
+
+ return 0;
+}
+
+static int test__group4(struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
+
+ /* cycles:u + p */
+ evsel = leader = perf_evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL);
+
+ /* instructions:kp + p */
+ evsel = perf_evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+
+ return 0;
+}
+
+static int test__group5(struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries);
+
+ /* cycles + G */
+ evsel = leader = perf_evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL);
+
+ /* instructions + G */
+ evsel = perf_evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+
+ /* cycles:G */
+ evsel = leader = perf_evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL);
+
+ /* instructions:G */
+ evsel = perf_evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+
+ /* cycles */
+ evsel = perf_evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL);
+
+ return 0;
+}
+
struct test__event_st {
const char *name;
__u32 type;
@@ -632,6 +879,26 @@ static struct test__event_st test__events[] = {
.name = "mem:0:rw:kp",
.check = test__checkevent_breakpoint_rw_modifier,
},
+ [28] = {
+ .name = "{instructions:k,cycles:upp}",
+ .check = test__group1,
+ },
+ [29] = {
+ .name = "{faults:k,cache-references}:u,cycles:k",
+ .check = test__group2,
+ },
+ [30] = {
+ .name = "group1{syscalls:sys_enter_open:H,cycles:kppp},group2{cycles,1:3}:G,instructions:u",
+ .check = test__group3,
+ },
+ [31] = {
+ .name = "{cycles:u,instructions:kp}:p",
+ .check = test__group4,
+ },
+ [32] = {
+ .name = "{cycles,instructions}:G,{cycles:G,instructions:G},cycles",
+ .check = test__group5,
+ },
};
static struct test__event_st test__events_pmu[] = {
@@ -658,9 +925,6 @@ static struct test__term test__terms[] = {
},
};
-#define TEST__TERMS_CNT (sizeof(test__terms) / \
- sizeof(struct test__term))
-
static int test_event(struct test__event_st *e)
{
struct perf_evlist *evlist;
@@ -685,19 +949,19 @@ static int test_event(struct test__event_st *e)
static int test_events(struct test__event_st *events, unsigned cnt)
{
- int ret = 0;
+ int ret1, ret2 = 0;
unsigned i;
for (i = 0; i < cnt; i++) {
struct test__event_st *e = &events[i];
pr_debug("running test %d '%s'\n", i, e->name);
- ret = test_event(e);
- if (ret)
- break;
+ ret1 = test_event(e);
+ if (ret1)
+ ret2 = ret1;
}
- return ret;
+ return ret2;
}
static int test_term(struct test__term *t)
@@ -752,19 +1016,19 @@ static int test_pmu(void)
ret = stat(path, &st);
if (ret)
- pr_debug("ommiting PMU cpu tests\n");
+ pr_debug("omitting PMU cpu tests\n");
return !ret;
}
int parse_events__test(void)
{
- int ret;
+ int ret1, ret2 = 0;
#define TEST_EVENTS(tests) \
do { \
- ret = test_events(tests, ARRAY_SIZE(tests)); \
- if (ret) \
- return ret; \
+ ret1 = test_events(tests, ARRAY_SIZE(tests)); \
+ if (!ret2) \
+ ret2 = ret1; \
} while (0)
TEST_EVENTS(test__events);
@@ -772,5 +1036,9 @@ do { \
if (test_pmu())
TEST_EVENTS(test__events_pmu);
- return test_terms(test__terms, ARRAY_SIZE(test__terms));
+ ret1 = test_terms(test__terms, ARRAY_SIZE(test__terms));
+ if (!ret2)
+ ret2 = ret1;
+
+ return ret2;
}
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 74a5af4d33ec..aed38e4b9dfa 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -239,8 +239,11 @@ const char *event_type(int type)
return "unknown";
}
-static int add_event(struct list_head **_list, int *idx,
- struct perf_event_attr *attr, char *name)
+
+
+static int __add_event(struct list_head **_list, int *idx,
+ struct perf_event_attr *attr,
+ char *name, struct cpu_map *cpus)
{
struct perf_evsel *evsel;
struct list_head *list = *_list;
@@ -260,6 +263,7 @@ static int add_event(struct list_head **_list, int *idx,
return -ENOMEM;
}
+ evsel->cpus = cpus;
if (name)
evsel->name = strdup(name);
list_add_tail(&evsel->node, list);
@@ -267,6 +271,12 @@ static int add_event(struct list_head **_list, int *idx,
return 0;
}
+static int add_event(struct list_head **_list, int *idx,
+ struct perf_event_attr *attr, char *name)
+{
+ return __add_event(_list, idx, attr, name, NULL);
+}
+
static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size)
{
int i, j;
@@ -308,7 +318,7 @@ int parse_events_add_cache(struct list_head **list, int *idx,
for (i = 0; (i < 2) && (op_result[i]); i++) {
char *str = op_result[i];
- snprintf(name + n, MAX_NAME_LEN - n, "-%s\n", str);
+ n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str);
if (cache_op == -1) {
cache_op = parse_aliases(str, perf_evsel__hw_cache_op,
@@ -346,42 +356,28 @@ int parse_events_add_cache(struct list_head **list, int *idx,
return add_event(list, idx, &attr, name);
}
-static int add_tracepoint(struct list_head **list, int *idx,
+static int add_tracepoint(struct list_head **listp, int *idx,
char *sys_name, char *evt_name)
{
- struct perf_event_attr attr;
- char name[MAX_NAME_LEN];
- char evt_path[MAXPATHLEN];
- char id_buf[4];
- u64 id;
- int fd;
-
- snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
- sys_name, evt_name);
-
- fd = open(evt_path, O_RDONLY);
- if (fd < 0)
- return -1;
+ struct perf_evsel *evsel;
+ struct list_head *list = *listp;
- if (read(fd, id_buf, sizeof(id_buf)) < 0) {
- close(fd);
- return -1;
+ if (!list) {
+ list = malloc(sizeof(*list));
+ if (!list)
+ return -ENOMEM;
+ INIT_LIST_HEAD(list);
}
- close(fd);
- id = atoll(id_buf);
-
- memset(&attr, 0, sizeof(attr));
- attr.config = id;
- attr.type = PERF_TYPE_TRACEPOINT;
- attr.sample_type |= PERF_SAMPLE_RAW;
- attr.sample_type |= PERF_SAMPLE_TIME;
- attr.sample_type |= PERF_SAMPLE_CPU;
- attr.sample_type |= PERF_SAMPLE_PERIOD;
- attr.sample_period = 1;
+ evsel = perf_evsel__newtp(sys_name, evt_name, (*idx)++);
+ if (!evsel) {
+ free(list);
+ return -ENOMEM;
+ }
- snprintf(name, MAX_NAME_LEN, "%s:%s", sys_name, evt_name);
- return add_event(list, idx, &attr, name);
+ list_add_tail(&evsel->node, list);
+ *listp = list;
+ return 0;
}
static int add_tracepoint_multi(struct list_head **list, int *idx,
@@ -551,7 +547,7 @@ static int config_attr(struct perf_event_attr *attr,
}
int parse_events_add_numeric(struct list_head **list, int *idx,
- unsigned long type, unsigned long config,
+ u32 type, u64 config,
struct list_head *head_config)
{
struct perf_event_attr attr;
@@ -607,8 +603,23 @@ int parse_events_add_pmu(struct list_head **list, int *idx,
if (perf_pmu__config(pmu, &attr, head_config))
return -EINVAL;
- return add_event(list, idx, &attr,
- pmu_event_name(head_config));
+ return __add_event(list, idx, &attr, pmu_event_name(head_config),
+ pmu->cpus);
+}
+
+int parse_events__modifier_group(struct list_head *list,
+ char *event_mod)
+{
+ return parse_events__modifier_event(list, event_mod, true);
+}
+
+void parse_events__set_leader(char *name, struct list_head *list)
+{
+ struct perf_evsel *leader;
+
+ __perf_evlist__set_leader(list);
+ leader = list_entry(list->next, struct perf_evsel, node);
+ leader->group_name = name ? strdup(name) : NULL;
}
void parse_events_update_lists(struct list_head *list_event,
@@ -616,21 +627,45 @@ void parse_events_update_lists(struct list_head *list_event,
{
/*
* Called for single event definition. Update the
- * 'all event' list, and reinit the 'signle event'
+ * 'all event' list, and reinit the 'single event'
* list, for next event definition.
*/
list_splice_tail(list_event, list_all);
free(list_event);
}
-int parse_events_modifier(struct list_head *list, char *str)
+struct event_modifier {
+ int eu;
+ int ek;
+ int eh;
+ int eH;
+ int eG;
+ int precise;
+ int exclude_GH;
+};
+
+static int get_event_modifier(struct event_modifier *mod, char *str,
+ struct perf_evsel *evsel)
{
- struct perf_evsel *evsel;
- int exclude = 0, exclude_GH = 0;
- int eu = 0, ek = 0, eh = 0, eH = 0, eG = 0, precise = 0;
+ int eu = evsel ? evsel->attr.exclude_user : 0;
+ int ek = evsel ? evsel->attr.exclude_kernel : 0;
+ int eh = evsel ? evsel->attr.exclude_hv : 0;
+ int eH = evsel ? evsel->attr.exclude_host : 0;
+ int eG = evsel ? evsel->attr.exclude_guest : 0;
+ int precise = evsel ? evsel->attr.precise_ip : 0;
- if (str == NULL)
- return 0;
+ int exclude = eu | ek | eh;
+ int exclude_GH = evsel ? evsel->exclude_GH : 0;
+
+ /*
+ * We are here for group and 'GH' was not set as event
+ * modifier and whatever event/group modifier override
+ * default 'GH' setup.
+ */
+ if (evsel && !exclude_GH)
+ eH = eG = 0;
+
+ memset(mod, 0, sizeof(*mod));
while (*str) {
if (*str == 'u') {
@@ -674,13 +709,51 @@ int parse_events_modifier(struct list_head *list, char *str)
if (precise > 3)
return -EINVAL;
+ mod->eu = eu;
+ mod->ek = ek;
+ mod->eh = eh;
+ mod->eH = eH;
+ mod->eG = eG;
+ mod->precise = precise;
+ mod->exclude_GH = exclude_GH;
+ return 0;
+}
+
+int parse_events__modifier_event(struct list_head *list, char *str, bool add)
+{
+ struct perf_evsel *evsel;
+ struct event_modifier mod;
+
+ if (str == NULL)
+ return 0;
+
+ if (!add && get_event_modifier(&mod, str, NULL))
+ return -EINVAL;
+
list_for_each_entry(evsel, list, node) {
- evsel->attr.exclude_user = eu;
- evsel->attr.exclude_kernel = ek;
- evsel->attr.exclude_hv = eh;
- evsel->attr.precise_ip = precise;
- evsel->attr.exclude_host = eH;
- evsel->attr.exclude_guest = eG;
+
+ if (add && get_event_modifier(&mod, str, evsel))
+ return -EINVAL;
+
+ evsel->attr.exclude_user = mod.eu;
+ evsel->attr.exclude_kernel = mod.ek;
+ evsel->attr.exclude_hv = mod.eh;
+ evsel->attr.precise_ip = mod.precise;
+ evsel->attr.exclude_host = mod.eH;
+ evsel->attr.exclude_guest = mod.eG;
+ evsel->exclude_GH = mod.exclude_GH;
+ }
+
+ return 0;
+}
+
+int parse_events_name(struct list_head *list, char *name)
+{
+ struct perf_evsel *evsel;
+
+ list_for_each_entry(evsel, list, node) {
+ if (!evsel->name)
+ evsel->name = strdup(name);
}
return 0;
@@ -730,7 +803,8 @@ int parse_events_terms(struct list_head *terms, const char *str)
return ret;
}
-int parse_events(struct perf_evlist *evlist, const char *str, int unset __used)
+int parse_events(struct perf_evlist *evlist, const char *str,
+ int unset __maybe_unused)
{
struct parse_events_data__events data = {
.list = LIST_HEAD_INIT(data.list),
@@ -756,20 +830,20 @@ int parse_events(struct perf_evlist *evlist, const char *str, int unset __used)
}
int parse_events_option(const struct option *opt, const char *str,
- int unset __used)
+ int unset __maybe_unused)
{
struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
return parse_events(evlist, str, unset);
}
int parse_filter(const struct option *opt, const char *str,
- int unset __used)
+ int unset __maybe_unused)
{
struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
struct perf_evsel *last = NULL;
if (evlist->nr_entries > 0)
- last = list_entry(evlist->entries.prev, struct perf_evsel, node);
+ last = perf_evlist__last(evlist);
if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) {
fprintf(stderr,
@@ -799,7 +873,8 @@ static const char * const event_type_descriptors[] = {
* Print the events from <debugfs_mount_point>/tracing/events
*/
-void print_tracepoint_events(const char *subsys_glob, const char *event_glob)
+void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
+ bool name_only)
{
DIR *sys_dir, *evt_dir;
struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
@@ -829,6 +904,11 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob)
!strglobmatch(evt_dirent.d_name, event_glob))
continue;
+ if (name_only) {
+ printf("%s:%s ", sys_dirent.d_name, evt_dirent.d_name);
+ continue;
+ }
+
snprintf(evt_path, MAXPATHLEN, "%s:%s",
sys_dirent.d_name, evt_dirent.d_name);
printf(" %-50s [%s]\n", evt_path,
@@ -906,7 +986,7 @@ void print_events_type(u8 type)
__print_events_type(type, event_symbols_hw, PERF_COUNT_HW_MAX);
}
-int print_hwcache_events(const char *event_glob)
+int print_hwcache_events(const char *event_glob, bool name_only)
{
unsigned int type, op, i, printed = 0;
char name[64];
@@ -923,8 +1003,11 @@ int print_hwcache_events(const char *event_glob)
if (event_glob != NULL && !strglobmatch(name, event_glob))
continue;
- printf(" %-50s [%s]\n", name,
- event_type_descriptors[PERF_TYPE_HW_CACHE]);
+ if (name_only)
+ printf("%s ", name);
+ else
+ printf(" %-50s [%s]\n", name,
+ event_type_descriptors[PERF_TYPE_HW_CACHE]);
++printed;
}
}
@@ -934,7 +1017,8 @@ int print_hwcache_events(const char *event_glob)
}
static void print_symbol_events(const char *event_glob, unsigned type,
- struct event_symbol *syms, unsigned max)
+ struct event_symbol *syms, unsigned max,
+ bool name_only)
{
unsigned i, printed = 0;
char name[MAX_NAME_LEN];
@@ -946,6 +1030,11 @@ static void print_symbol_events(const char *event_glob, unsigned type,
(syms->alias && strglobmatch(syms->alias, event_glob))))
continue;
+ if (name_only) {
+ printf("%s ", syms->symbol);
+ continue;
+ }
+
if (strlen(syms->alias))
snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
else
@@ -963,39 +1052,42 @@ static void print_symbol_events(const char *event_glob, unsigned type,
/*
* Print the help text for the event symbols:
*/
-void print_events(const char *event_glob)
+void print_events(const char *event_glob, bool name_only)
{
-
- printf("\n");
- printf("List of pre-defined events (to be used in -e):\n");
+ if (!name_only) {
+ printf("\n");
+ printf("List of pre-defined events (to be used in -e):\n");
+ }
print_symbol_events(event_glob, PERF_TYPE_HARDWARE,
- event_symbols_hw, PERF_COUNT_HW_MAX);
+ event_symbols_hw, PERF_COUNT_HW_MAX, name_only);
print_symbol_events(event_glob, PERF_TYPE_SOFTWARE,
- event_symbols_sw, PERF_COUNT_SW_MAX);
+ event_symbols_sw, PERF_COUNT_SW_MAX, name_only);
- print_hwcache_events(event_glob);
+ print_hwcache_events(event_glob, name_only);
if (event_glob != NULL)
return;
- printf("\n");
- printf(" %-50s [%s]\n",
- "rNNN",
- event_type_descriptors[PERF_TYPE_RAW]);
- printf(" %-50s [%s]\n",
- "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
- event_type_descriptors[PERF_TYPE_RAW]);
- printf(" (see 'perf list --help' on how to encode it)\n");
- printf("\n");
-
- printf(" %-50s [%s]\n",
- "mem:<addr>[:access]",
+ if (!name_only) {
+ printf("\n");
+ printf(" %-50s [%s]\n",
+ "rNNN",
+ event_type_descriptors[PERF_TYPE_RAW]);
+ printf(" %-50s [%s]\n",
+ "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
+ event_type_descriptors[PERF_TYPE_RAW]);
+ printf(" (see 'perf list --help' on how to encode it)\n");
+ printf("\n");
+
+ printf(" %-50s [%s]\n",
+ "mem:<addr>[:access]",
event_type_descriptors[PERF_TYPE_BREAKPOINT]);
- printf("\n");
+ printf("\n");
+ }
- print_tracepoint_events(NULL, NULL);
+ print_tracepoint_events(NULL, NULL, name_only);
}
int parse_events__is_hardcoded_term(struct parse_events__term *term)
@@ -1005,7 +1097,7 @@ int parse_events__is_hardcoded_term(struct parse_events__term *term)
static int new_term(struct parse_events__term **_term, int type_val,
int type_term, char *config,
- char *str, long num)
+ char *str, u64 num)
{
struct parse_events__term *term;
@@ -1034,7 +1126,7 @@ static int new_term(struct parse_events__term **_term, int type_val,
}
int parse_events__term_num(struct parse_events__term **term,
- int type_term, char *config, long num)
+ int type_term, char *config, u64 num)
{
return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term,
config, NULL, num);
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index ee9c218a193c..c356e443448d 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -55,7 +55,7 @@ struct parse_events__term {
char *config;
union {
char *str;
- long num;
+ u64 num;
} val;
int type_val;
int type_term;
@@ -73,17 +73,19 @@ struct parse_events_data__terms {
int parse_events__is_hardcoded_term(struct parse_events__term *term);
int parse_events__term_num(struct parse_events__term **_term,
- int type_term, char *config, long num);
+ int type_term, char *config, u64 num);
int parse_events__term_str(struct parse_events__term **_term,
int type_term, char *config, char *str);
int parse_events__term_clone(struct parse_events__term **new,
struct parse_events__term *term);
void parse_events__free_terms(struct list_head *terms);
-int parse_events_modifier(struct list_head *list, char *str);
+int parse_events__modifier_event(struct list_head *list, char *str, bool add);
+int parse_events__modifier_group(struct list_head *list, char *event_mod);
+int parse_events_name(struct list_head *list, char *name);
int parse_events_add_tracepoint(struct list_head **list, int *idx,
char *sys, char *event);
int parse_events_add_numeric(struct list_head **list, int *idx,
- unsigned long type, unsigned long config,
+ u32 type, u64 config,
struct list_head *head_config);
int parse_events_add_cache(struct list_head **list, int *idx,
char *type, char *op_result1, char *op_result2);
@@ -91,15 +93,17 @@ int parse_events_add_breakpoint(struct list_head **list, int *idx,
void *ptr, char *type);
int parse_events_add_pmu(struct list_head **list, int *idx,
char *pmu , struct list_head *head_config);
+void parse_events__set_leader(char *name, struct list_head *list);
void parse_events_update_lists(struct list_head *list_event,
struct list_head *list_all);
void parse_events_error(void *data, void *scanner, char const *msg);
int parse_events__test(void);
-void print_events(const char *event_glob);
+void print_events(const char *event_glob, bool name_only);
void print_events_type(u8 type);
-void print_tracepoint_events(const char *subsys_glob, const char *event_glob);
-int print_hwcache_events(const char *event_glob);
+void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
+ bool name_only);
+int print_hwcache_events(const char *event_glob, bool name_only);
extern int is_valid_tracepoint(const char *event_string);
extern int valid_debugfs_mount(const char *debugfs);
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 384ca74c6b22..c87efc12579d 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -15,10 +15,10 @@ YYSTYPE *parse_events_get_lval(yyscan_t yyscanner);
static int __value(YYSTYPE *yylval, char *str, int base, int token)
{
- long num;
+ u64 num;
errno = 0;
- num = strtoul(str, NULL, base);
+ num = strtoull(str, NULL, base);
if (errno)
return PE_ERROR;
@@ -70,6 +70,12 @@ static int term(yyscan_t scanner, int type)
%}
%x mem
+%s config
+%x event
+
+group [^,{}/]*[{][^}]*[}][^,{}/]*
+event_pmu [^,{}/]+[/][^/]*[/][^,{}/]*
+event [^,{}/]+
num_dec [0-9]+
num_hex 0x[a-fA-F0-9]+
@@ -84,7 +90,13 @@ modifier_bp [rwx]{1,3}
{
int start_token;
- start_token = (int) parse_events_get_extra(yyscanner);
+ start_token = parse_events_get_extra(yyscanner);
+
+ if (start_token == PE_START_TERMS)
+ BEGIN(config);
+ else if (start_token == PE_START_EVENTS)
+ BEGIN(event);
+
if (start_token) {
parse_events_set_extra(NULL, yyscanner);
return start_token;
@@ -92,6 +104,26 @@ modifier_bp [rwx]{1,3}
}
%}
+<event>{
+
+{group} {
+ BEGIN(INITIAL); yyless(0);
+ }
+
+{event_pmu} |
+{event} {
+ str(yyscanner, PE_EVENT_NAME);
+ BEGIN(INITIAL); yyless(0);
+ return PE_EVENT_NAME;
+ }
+
+. |
+<<EOF>> {
+ BEGIN(INITIAL); yyless(0);
+ }
+
+}
+
cpu-cycles|cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); }
stalled-cycles-frontend|idle-cycles-frontend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); }
stalled-cycles-backend|idle-cycles-backend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); }
@@ -127,18 +159,16 @@ speculative-read|speculative-load |
refs|Reference|ops|access |
misses|miss { return str(yyscanner, PE_NAME_CACHE_OP_RESULT); }
- /*
- * These are event config hardcoded term names to be specified
- * within xxx/.../ syntax. So far we dont clash with other names,
- * so we can put them here directly. In case the we have a conflict
- * in future, this needs to go into '//' condition block.
- */
+<config>{
config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); }
config1 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); }
config2 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); }
name { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NAME); }
period { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); }
branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); }
+, { return ','; }
+"/" { BEGIN(INITIAL); return '/'; }
+}
mem: { BEGIN(mem); return PE_PREFIX_MEM; }
r{num_raw_hex} { return raw(yyscanner); }
@@ -147,10 +177,12 @@ r{num_raw_hex} { return raw(yyscanner); }
{modifier_event} { return str(yyscanner, PE_MODIFIER_EVENT); }
{name} { return str(yyscanner, PE_NAME); }
-"/" { return '/'; }
+"/" { BEGIN(config); return '/'; }
- { return '-'; }
-, { return ','; }
+, { BEGIN(event); return ','; }
: { return ':'; }
+"{" { BEGIN(event); return '{'; }
+"}" { return '}'; }
= { return '='; }
\n { }
@@ -175,7 +207,7 @@ r{num_raw_hex} { return raw(yyscanner); }
%%
-int parse_events_wrap(void *scanner __used)
+int parse_events_wrap(void *scanner __maybe_unused)
{
return 1;
}
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 2bc5fbff2b5d..cd88209e3c58 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -27,10 +27,11 @@ do { \
%token PE_START_EVENTS PE_START_TERMS
%token PE_VALUE PE_VALUE_SYM_HW PE_VALUE_SYM_SW PE_RAW PE_TERM
+%token PE_EVENT_NAME
%token PE_NAME
%token PE_MODIFIER_EVENT PE_MODIFIER_BP
%token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT
-%token PE_PREFIX_MEM PE_PREFIX_RAW
+%token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP
%token PE_ERROR
%type <num> PE_VALUE
%type <num> PE_VALUE_SYM_HW
@@ -42,6 +43,7 @@ do { \
%type <str> PE_NAME_CACHE_OP_RESULT
%type <str> PE_MODIFIER_EVENT
%type <str> PE_MODIFIER_BP
+%type <str> PE_EVENT_NAME
%type <num> value_sym
%type <head> event_config
%type <term> event_term
@@ -53,44 +55,125 @@ do { \
%type <head> event_legacy_numeric
%type <head> event_legacy_raw
%type <head> event_def
+%type <head> event_mod
+%type <head> event_name
+%type <head> event
+%type <head> events
+%type <head> group_def
+%type <head> group
+%type <head> groups
%union
{
char *str;
- unsigned long num;
+ u64 num;
struct list_head *head;
struct parse_events__term *term;
}
%%
start:
-PE_START_EVENTS events
+PE_START_EVENTS start_events
|
-PE_START_TERMS terms
+PE_START_TERMS start_terms
+
+start_events: groups
+{
+ struct parse_events_data__events *data = _data;
+
+ parse_events_update_lists($1, &data->list);
+}
+
+groups:
+groups ',' group
+{
+ struct list_head *list = $1;
+ struct list_head *group = $3;
+
+ parse_events_update_lists(group, list);
+ $$ = list;
+}
+|
+groups ',' event
+{
+ struct list_head *list = $1;
+ struct list_head *event = $3;
+
+ parse_events_update_lists(event, list);
+ $$ = list;
+}
+|
+group
+|
+event
+
+group:
+group_def ':' PE_MODIFIER_EVENT
+{
+ struct list_head *list = $1;
+
+ ABORT_ON(parse_events__modifier_group(list, $3));
+ $$ = list;
+}
+|
+group_def
+
+group_def:
+PE_NAME '{' events '}'
+{
+ struct list_head *list = $3;
+
+ parse_events__set_leader($1, list);
+ $$ = list;
+}
+|
+'{' events '}'
+{
+ struct list_head *list = $2;
+
+ parse_events__set_leader(NULL, list);
+ $$ = list;
+}
events:
-events ',' event | event
+events ',' event
+{
+ struct list_head *event = $3;
+ struct list_head *list = $1;
-event:
-event_def PE_MODIFIER_EVENT
+ parse_events_update_lists(event, list);
+ $$ = list;
+}
+|
+event
+
+event: event_mod
+
+event_mod:
+event_name PE_MODIFIER_EVENT
{
- struct parse_events_data__events *data = _data;
+ struct list_head *list = $1;
/*
* Apply modifier on all events added by single event definition
* (there could be more events added for multiple tracepoint
* definitions via '*?'.
*/
- ABORT_ON(parse_events_modifier($1, $2));
- parse_events_update_lists($1, &data->list);
+ ABORT_ON(parse_events__modifier_event(list, $2, false));
+ $$ = list;
}
|
-event_def
-{
- struct parse_events_data__events *data = _data;
+event_name
- parse_events_update_lists($1, &data->list);
+event_name:
+PE_EVENT_NAME event_def
+{
+ ABORT_ON(parse_events_name($2, $1));
+ free($1);
+ $$ = $2;
}
+|
+event_def
event_def: event_pmu |
event_legacy_symbol |
@@ -207,7 +290,7 @@ PE_VALUE ':' PE_VALUE
struct parse_events_data__events *data = _data;
struct list_head *list = NULL;
- ABORT_ON(parse_events_add_numeric(&list, &data->idx, $1, $3, NULL));
+ ABORT_ON(parse_events_add_numeric(&list, &data->idx, (u32)$1, $3, NULL));
$$ = list;
}
@@ -222,7 +305,7 @@ PE_RAW
$$ = list;
}
-terms: event_config
+start_terms: event_config
{
struct parse_events_data__terms *data = _data;
data->terms = $1;
@@ -282,7 +365,7 @@ PE_TERM '=' PE_NAME
{
struct parse_events__term *term;
- ABORT_ON(parse_events__term_str(&term, $1, NULL, $3));
+ ABORT_ON(parse_events__term_str(&term, (int)$1, NULL, $3));
$$ = term;
}
|
@@ -290,7 +373,7 @@ PE_TERM '=' PE_VALUE
{
struct parse_events__term *term;
- ABORT_ON(parse_events__term_num(&term, $1, NULL, $3));
+ ABORT_ON(parse_events__term_num(&term, (int)$1, NULL, $3));
$$ = term;
}
|
@@ -298,7 +381,7 @@ PE_TERM
{
struct parse_events__term *term;
- ABORT_ON(parse_events__term_num(&term, $1, NULL, 1));
+ ABORT_ON(parse_events__term_num(&term, (int)$1, NULL, 1));
$$ = term;
}
@@ -308,7 +391,7 @@ sep_slash_dc: '/' | ':' |
%%
-void parse_events_error(void *data __used, void *scanner __used,
- char const *msg __used)
+void parse_events_error(void *data __maybe_unused, void *scanner __maybe_unused,
+ char const *msg __maybe_unused)
{
}
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index 594f8fad5ecd..443fc116512b 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -557,7 +557,8 @@ int parse_options_usage(const char * const *usagestr,
}
-int parse_opt_verbosity_cb(const struct option *opt, const char *arg __used,
+int parse_opt_verbosity_cb(const struct option *opt,
+ const char *arg __maybe_unused,
int unset)
{
int *target = opt->value;
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
new file mode 100644
index 000000000000..316dbe7f86ed
--- /dev/null
+++ b/tools/perf/util/perf_regs.h
@@ -0,0 +1,14 @@
+#ifndef __PERF_REGS_H
+#define __PERF_REGS_H
+
+#ifndef NO_PERF_REGS
+#include <perf_regs.h>
+#else
+#define PERF_REGS_MASK 0
+
+static inline const char *perf_reg_name(int id __maybe_unused)
+{
+ return NULL;
+}
+#endif /* NO_PERF_REGS */
+#endif /* __PERF_REGS_H */
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 67715a42cd6d..8a2229da594f 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -9,6 +9,9 @@
#include "util.h"
#include "pmu.h"
#include "parse-events.h"
+#include "cpumap.h"
+
+#define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/"
int perf_pmu_parse(struct list_head *list, char *name);
extern FILE *perf_pmu_in;
@@ -69,7 +72,7 @@ static int pmu_format(char *name, struct list_head *format)
return -1;
snprintf(path, PATH_MAX,
- "%s/bus/event_source/devices/%s/format", sysfs, name);
+ "%s" EVENT_SOURCE_DEVICE_PATH "%s/format", sysfs, name);
if (stat(path, &st) < 0)
return 0; /* no error if format does not exist */
@@ -206,7 +209,7 @@ static int pmu_type(char *name, __u32 *type)
return -1;
snprintf(path, PATH_MAX,
- "%s/bus/event_source/devices/%s/type", sysfs, name);
+ "%s" EVENT_SOURCE_DEVICE_PATH "%s/type", sysfs, name);
if (stat(path, &st) < 0)
return -1;
@@ -222,6 +225,62 @@ static int pmu_type(char *name, __u32 *type)
return ret;
}
+/* Add all pmus in sysfs to pmu list: */
+static void pmu_read_sysfs(void)
+{
+ char path[PATH_MAX];
+ const char *sysfs;
+ DIR *dir;
+ struct dirent *dent;
+
+ sysfs = sysfs_find_mountpoint();
+ if (!sysfs)
+ return;
+
+ snprintf(path, PATH_MAX,
+ "%s" EVENT_SOURCE_DEVICE_PATH, sysfs);
+
+ dir = opendir(path);
+ if (!dir)
+ return;
+
+ while ((dent = readdir(dir))) {
+ if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
+ continue;
+ /* add to static LIST_HEAD(pmus): */
+ perf_pmu__find(dent->d_name);
+ }
+
+ closedir(dir);
+}
+
+static struct cpu_map *pmu_cpumask(char *name)
+{
+ struct stat st;
+ char path[PATH_MAX];
+ const char *sysfs;
+ FILE *file;
+ struct cpu_map *cpus;
+
+ sysfs = sysfs_find_mountpoint();
+ if (!sysfs)
+ return NULL;
+
+ snprintf(path, PATH_MAX,
+ "%s/bus/event_source/devices/%s/cpumask", sysfs, name);
+
+ if (stat(path, &st) < 0)
+ return NULL;
+
+ file = fopen(path, "r");
+ if (!file)
+ return NULL;
+
+ cpus = cpu_map__read(file);
+ fclose(file);
+ return cpus;
+}
+
static struct perf_pmu *pmu_lookup(char *name)
{
struct perf_pmu *pmu;
@@ -244,6 +303,8 @@ static struct perf_pmu *pmu_lookup(char *name)
if (!pmu)
return NULL;
+ pmu->cpus = pmu_cpumask(name);
+
pmu_aliases(name, &aliases);
INIT_LIST_HEAD(&pmu->format);
@@ -267,6 +328,21 @@ static struct perf_pmu *pmu_find(char *name)
return NULL;
}
+struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu)
+{
+ /*
+ * pmu iterator: If pmu is NULL, we start at the begin,
+ * otherwise return the next pmu. Returns NULL on end.
+ */
+ if (!pmu) {
+ pmu_read_sysfs();
+ pmu = list_prepare_entry(pmu, &pmus, list);
+ }
+ list_for_each_entry_continue(pmu, &pmus, list)
+ return pmu;
+ return NULL;
+}
+
struct perf_pmu *perf_pmu__find(char *name)
{
struct perf_pmu *pmu;
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 535f2c5258ab..53c7794fc4be 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -28,6 +28,7 @@ struct perf_pmu__alias {
struct perf_pmu {
char *name;
__u32 type;
+ struct cpu_map *cpus;
struct list_head format;
struct list_head aliases;
struct list_head list;
@@ -46,5 +47,7 @@ int perf_pmu__new_format(struct list_head *list, char *name,
int config, unsigned long *bits);
void perf_pmu__set_format(unsigned long *bits, long from, long to);
+struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu);
+
int perf_pmu__test(void);
#endif /* __PMU_H */
diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y
index 20ea77e93169..ec898047ebb9 100644
--- a/tools/perf/util/pmu.y
+++ b/tools/perf/util/pmu.y
@@ -86,8 +86,8 @@ PP_VALUE
%%
-void perf_pmu_error(struct list_head *list __used,
- char *name __used,
- char const *msg __used)
+void perf_pmu_error(struct list_head *list __maybe_unused,
+ char *name __maybe_unused,
+ char const *msg __maybe_unused)
{
}
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 0dda25d82d06..49a256e6e0a2 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -41,7 +41,7 @@
#include "symbol.h"
#include "thread.h"
#include "debugfs.h"
-#include "trace-event.h" /* For __unused */
+#include "trace-event.h" /* For __maybe_unused */
#include "probe-event.h"
#include "probe-finder.h"
#include "session.h"
@@ -647,8 +647,8 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
}
static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
- struct probe_trace_event **tevs __unused,
- int max_tevs __unused, const char *target)
+ struct probe_trace_event **tevs __maybe_unused,
+ int max_tevs __maybe_unused, const char *target)
{
if (perf_probe_event_need_dwarf(pev)) {
pr_warning("Debuginfo-analysis is not supported.\n");
@@ -661,17 +661,18 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
return 0;
}
-int show_line_range(struct line_range *lr __unused, const char *module __unused)
+int show_line_range(struct line_range *lr __maybe_unused,
+ const char *module __maybe_unused)
{
pr_warning("Debuginfo-analysis is not supported.\n");
return -ENOSYS;
}
-int show_available_vars(struct perf_probe_event *pevs __unused,
- int npevs __unused, int max_vls __unused,
- const char *module __unused,
- struct strfilter *filter __unused,
- bool externs __unused)
+int show_available_vars(struct perf_probe_event *pevs __maybe_unused,
+ int npevs __maybe_unused, int max_vls __maybe_unused,
+ const char *module __maybe_unused,
+ struct strfilter *filter __maybe_unused,
+ bool externs __maybe_unused)
{
pr_warning("Debuginfo-analysis is not supported.\n");
return -ENOSYS;
@@ -1099,6 +1100,7 @@ static int parse_probe_trace_command(const char *cmd,
struct probe_trace_point *tp = &tev->point;
char pr;
char *p;
+ char *argv0_str = NULL, *fmt, *fmt1_str, *fmt2_str, *fmt3_str;
int ret, i, argc;
char **argv;
@@ -1115,14 +1117,27 @@ static int parse_probe_trace_command(const char *cmd,
}
/* Scan event and group name. */
- ret = sscanf(argv[0], "%c:%a[^/ \t]/%a[^ \t]",
- &pr, (float *)(void *)&tev->group,
- (float *)(void *)&tev->event);
- if (ret != 3) {
+ argv0_str = strdup(argv[0]);
+ if (argv0_str == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ fmt1_str = strtok_r(argv0_str, ":", &fmt);
+ fmt2_str = strtok_r(NULL, "/", &fmt);
+ fmt3_str = strtok_r(NULL, " \t", &fmt);
+ if (fmt1_str == NULL || strlen(fmt1_str) != 1 || fmt2_str == NULL
+ || fmt3_str == NULL) {
semantic_error("Failed to parse event name: %s\n", argv[0]);
ret = -EINVAL;
goto out;
}
+ pr = fmt1_str[0];
+ tev->group = strdup(fmt2_str);
+ tev->event = strdup(fmt3_str);
+ if (tev->group == NULL || tev->event == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
pr_debug("Group:%s Event:%s probe:%c\n", tev->group, tev->event, pr);
tp->retprobe = (pr == 'r');
@@ -1134,10 +1149,17 @@ static int parse_probe_trace_command(const char *cmd,
p++;
} else
p = argv[1];
- ret = sscanf(p, "%a[^+]+%lu", (float *)(void *)&tp->symbol,
- &tp->offset);
- if (ret == 1)
+ fmt1_str = strtok_r(p, "+", &fmt);
+ tp->symbol = strdup(fmt1_str);
+ if (tp->symbol == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ fmt2_str = strtok_r(NULL, "", &fmt);
+ if (fmt2_str == NULL)
tp->offset = 0;
+ else
+ tp->offset = strtoul(fmt2_str, NULL, 10);
tev->nargs = argc - 2;
tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
@@ -1161,6 +1183,7 @@ static int parse_probe_trace_command(const char *cmd,
}
ret = 0;
out:
+ free(argv0_str);
argv_free(argv);
return ret;
}
@@ -2183,7 +2206,7 @@ static struct strfilter *available_func_filter;
* If a symbol corresponds to a function with global binding and
* matches filter return 0. For all others return 1.
*/
-static int filter_available_functions(struct map *map __unused,
+static int filter_available_functions(struct map *map __maybe_unused,
struct symbol *sym)
{
if (sym->binding == STB_GLOBAL &&
@@ -2307,10 +2330,17 @@ static int convert_name_to_addr(struct perf_probe_event *pev, const char *exec)
function = NULL;
}
if (!pev->group) {
- char *ptr1, *ptr2;
+ char *ptr1, *ptr2, *exec_copy;
pev->group = zalloc(sizeof(char *) * 64);
- ptr1 = strdup(basename(exec));
+ exec_copy = strdup(exec);
+ if (!exec_copy) {
+ ret = -ENOMEM;
+ pr_warning("Failed to copy exec string.\n");
+ goto out;
+ }
+
+ ptr1 = strdup(basename(exec_copy));
if (ptr1) {
ptr2 = strpbrk(ptr1, "-._");
if (ptr2)
@@ -2319,6 +2349,7 @@ static int convert_name_to_addr(struct perf_probe_event *pev, const char *exec)
ptr1);
free(ptr1);
}
+ free(exec_copy);
}
free(pp->function);
pp->function = zalloc(sizeof(char *) * MAX_PROBE_ARGS);
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index d448984ed789..1daf5c14e751 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -207,7 +207,7 @@ static int debuginfo__init_online_kernel_dwarf(struct debuginfo *self,
#else
/* With older elfutils, this just support kernel module... */
static int debuginfo__init_online_kernel_dwarf(struct debuginfo *self,
- Dwarf_Addr addr __used)
+ Dwarf_Addr addr __maybe_unused)
{
const char *path = kernel_get_module_path("kernel");
@@ -525,8 +525,10 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname,
return -ENOENT;
}
/* Verify it is a data structure */
- if (dwarf_tag(&type) != DW_TAG_structure_type) {
- pr_warning("%s is not a data structure.\n", varname);
+ tag = dwarf_tag(&type);
+ if (tag != DW_TAG_structure_type && tag != DW_TAG_union_type) {
+ pr_warning("%s is not a data structure nor an union.\n",
+ varname);
return -EINVAL;
}
@@ -539,8 +541,9 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname,
*ref_ptr = ref;
} else {
/* Verify it is a data structure */
- if (tag != DW_TAG_structure_type) {
- pr_warning("%s is not a data structure.\n", varname);
+ if (tag != DW_TAG_structure_type && tag != DW_TAG_union_type) {
+ pr_warning("%s is not a data structure nor an union.\n",
+ varname);
return -EINVAL;
}
if (field->name[0] == '[') {
@@ -567,10 +570,15 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname,
}
/* Get the offset of the field */
- ret = die_get_data_member_location(die_mem, &offs);
- if (ret < 0) {
- pr_warning("Failed to get the offset of %s.\n", field->name);
- return ret;
+ if (tag == DW_TAG_union_type) {
+ offs = 0;
+ } else {
+ ret = die_get_data_member_location(die_mem, &offs);
+ if (ret < 0) {
+ pr_warning("Failed to get the offset of %s.\n",
+ field->name);
+ return ret;
+ }
}
ref->offset += (long)offs;
@@ -1419,7 +1427,7 @@ static int line_range_add_line(const char *src, unsigned int lineno,
}
static int line_range_walk_cb(const char *fname, int lineno,
- Dwarf_Addr addr __used,
+ Dwarf_Addr addr __maybe_unused,
void *data)
{
struct line_finder *lf = data;
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 213362850abd..c40c2d33199e 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -1,5 +1,5 @@
#
-# List of files needed by perf python extention
+# List of files needed by perf python extension
#
# Each source file must be placed on its own line so that it can be
# processed by Makefile and util/setup.py accordingly.
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 0688bfb6d280..9181bf212fb9 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -627,7 +627,7 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
* This will group just the fds for this single evsel, to group
* multiple events, use evlist.open().
*/
- if (perf_evsel__open(evsel, cpus, threads, group, NULL) < 0) {
+ if (perf_evsel__open(evsel, cpus, threads) < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
@@ -672,7 +672,7 @@ struct pyrf_evlist {
};
static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
- PyObject *args, PyObject *kwargs __used)
+ PyObject *args, PyObject *kwargs __maybe_unused)
{
PyObject *pcpus = NULL, *pthreads = NULL;
struct cpu_map *cpus;
@@ -733,7 +733,8 @@ static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
}
static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
- PyObject *args __used, PyObject *kwargs __used)
+ PyObject *args __maybe_unused,
+ PyObject *kwargs __maybe_unused)
{
struct perf_evlist *evlist = &pevlist->evlist;
PyObject *list = PyList_New(0);
@@ -765,7 +766,8 @@ free_list:
static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
- PyObject *args, PyObject *kwargs __used)
+ PyObject *args,
+ PyObject *kwargs __maybe_unused)
{
struct perf_evlist *evlist = &pevlist->evlist;
PyObject *pevsel;
@@ -803,7 +805,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
if (pyevent == NULL)
return PyErr_NoMemory();
- err = perf_evlist__parse_sample(evlist, event, &pevent->sample, false);
+ err = perf_evlist__parse_sample(evlist, event, &pevent->sample);
if (err)
return PyErr_Format(PyExc_OSError,
"perf: can't parse sample, err=%d", err);
@@ -824,7 +826,10 @@ static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, &group))
return NULL;
- if (perf_evlist__open(evlist, group) < 0) {
+ if (group)
+ perf_evlist__set_leader(evlist);
+
+ if (perf_evlist__open(evlist) < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 02dfa19a467f..f80605eb1855 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -25,16 +25,16 @@
#include <ctype.h>
#include <errno.h>
-#include "../../perf.h"
#include "../util.h"
+#include <EXTERN.h>
+#include <perl.h>
+
+#include "../../perf.h"
#include "../thread.h"
#include "../event.h"
#include "../trace-event.h"
#include "../evsel.h"
-#include <EXTERN.h>
-#include <perl.h>
-
void boot_Perf__Trace__Context(pTHX_ CV *cv);
void boot_DynaLoader(pTHX_ CV *cv);
typedef PerlInterpreter * INTERP;
@@ -237,16 +237,16 @@ static void define_event_symbols(struct event_format *event,
define_event_symbols(event, ev_name, args->next);
}
-static inline
-struct event_format *find_cache_event(struct pevent *pevent, int type)
+static inline struct event_format *find_cache_event(struct perf_evsel *evsel)
{
static char ev_name[256];
struct event_format *event;
+ int type = evsel->attr.config;
if (events[type])
return events[type];
- events[type] = event = pevent_find_event(pevent, type);
+ events[type] = event = evsel->tp_format;
if (!event)
return NULL;
@@ -257,23 +257,22 @@ struct event_format *find_cache_event(struct pevent *pevent, int type)
return event;
}
-static void perl_process_tracepoint(union perf_event *perf_event __unused,
- struct pevent *pevent,
+static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused,
struct perf_sample *sample,
struct perf_evsel *evsel,
- struct machine *machine __unused,
- struct thread *thread)
+ struct machine *machine __maybe_unused,
+ struct addr_location *al)
{
struct format_field *field;
static char handler[256];
unsigned long long val;
unsigned long s, ns;
struct event_format *event;
- int type;
int pid;
int cpu = sample->cpu;
void *data = sample->raw_data;
unsigned long long nsecs = sample->time;
+ struct thread *thread = al->thread;
char *comm = thread->comm;
dSP;
@@ -281,13 +280,11 @@ static void perl_process_tracepoint(union perf_event *perf_event __unused,
if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
return;
- type = trace_parse_common_type(pevent, data);
-
- event = find_cache_event(pevent, type);
+ event = find_cache_event(evsel);
if (!event)
- die("ug! no event found for type %d", type);
+ die("ug! no event found for type %" PRIu64, evsel->attr.config);
- pid = trace_parse_common_pid(pevent, data);
+ pid = raw_field_value(event, "common_pid", data);
sprintf(handler, "%s::%s", event->system, event->name);
@@ -320,7 +317,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __unused,
offset = field->offset;
XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0)));
} else { /* FIELD_IS_NUMERIC */
- val = read_size(pevent, data + field->offset,
+ val = read_size(event, data + field->offset,
field->size);
if (field->flags & FIELD_IS_SIGNED) {
XPUSHs(sv_2mortal(newSViv(val)));
@@ -349,11 +346,11 @@ static void perl_process_tracepoint(union perf_event *perf_event __unused,
LEAVE;
}
-static void perl_process_event_generic(union perf_event *pevent __unused,
+static void perl_process_event_generic(union perf_event *event,
struct perf_sample *sample,
- struct perf_evsel *evsel __unused,
- struct machine *machine __unused,
- struct thread *thread __unused)
+ struct perf_evsel *evsel,
+ struct machine *machine __maybe_unused,
+ struct addr_location *al __maybe_unused)
{
dSP;
@@ -363,7 +360,7 @@ static void perl_process_event_generic(union perf_event *pevent __unused,
ENTER;
SAVETMPS;
PUSHMARK(SP);
- XPUSHs(sv_2mortal(newSVpvn((const char *)pevent, pevent->header.size)));
+ XPUSHs(sv_2mortal(newSVpvn((const char *)event, event->header.size)));
XPUSHs(sv_2mortal(newSVpvn((const char *)&evsel->attr, sizeof(evsel->attr))));
XPUSHs(sv_2mortal(newSVpvn((const char *)sample, sizeof(*sample))));
XPUSHs(sv_2mortal(newSVpvn((const char *)sample->raw_data, sample->raw_size)));
@@ -376,14 +373,13 @@ static void perl_process_event_generic(union perf_event *pevent __unused,
}
static void perl_process_event(union perf_event *event,
- struct pevent *pevent,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine,
- struct thread *thread)
+ struct addr_location *al)
{
- perl_process_tracepoint(event, pevent, sample, evsel, machine, thread);
- perl_process_event_generic(event, sample, evsel, machine, thread);
+ perl_process_tracepoint(event, sample, evsel, machine, al);
+ perl_process_event_generic(event, sample, evsel, machine, al);
}
static void run_start_sub(void)
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index ce4d1b0c3862..730c6630cba5 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -27,10 +27,12 @@
#include <errno.h>
#include "../../perf.h"
+#include "../evsel.h"
#include "../util.h"
#include "../event.h"
#include "../thread.h"
#include "../trace-event.h"
+#include "../evsel.h"
PyMODINIT_FUNC initperf_trace_context(void);
@@ -194,16 +196,21 @@ static void define_event_symbols(struct event_format *event,
define_event_symbols(event, ev_name, args->next);
}
-static inline
-struct event_format *find_cache_event(struct pevent *pevent, int type)
+static inline struct event_format *find_cache_event(struct perf_evsel *evsel)
{
static char ev_name[256];
struct event_format *event;
+ int type = evsel->attr.config;
+ /*
+ * XXX: Do we really need to cache this since now we have evsel->tp_format
+ * cached already? Need to re-read this "cache" routine that as well calls
+ * define_event_symbols() :-\
+ */
if (events[type])
return events[type];
- events[type] = event = pevent_find_event(pevent, type);
+ events[type] = event = evsel->tp_format;
if (!event)
return NULL;
@@ -214,12 +221,12 @@ struct event_format *find_cache_event(struct pevent *pevent, int type)
return event;
}
-static void python_process_event(union perf_event *perf_event __unused,
- struct pevent *pevent,
+static void python_process_tracepoint(union perf_event *perf_event
+ __maybe_unused,
struct perf_sample *sample,
- struct perf_evsel *evsel __unused,
- struct machine *machine __unused,
- struct thread *thread)
+ struct perf_evsel *evsel,
+ struct machine *machine __maybe_unused,
+ struct addr_location *al)
{
PyObject *handler, *retval, *context, *t, *obj, *dict = NULL;
static char handler_name[256];
@@ -228,24 +235,22 @@ static void python_process_event(union perf_event *perf_event __unused,
unsigned long s, ns;
struct event_format *event;
unsigned n = 0;
- int type;
int pid;
int cpu = sample->cpu;
void *data = sample->raw_data;
unsigned long long nsecs = sample->time;
+ struct thread *thread = al->thread;
char *comm = thread->comm;
t = PyTuple_New(MAX_FIELDS);
if (!t)
Py_FatalError("couldn't create Python tuple");
- type = trace_parse_common_type(pevent, data);
-
- event = find_cache_event(pevent, type);
+ event = find_cache_event(evsel);
if (!event)
- die("ug! no event found for type %d", type);
+ die("ug! no event found for type %d", (int)evsel->attr.config);
- pid = trace_parse_common_pid(pevent, data);
+ pid = raw_field_value(event, "common_pid", data);
sprintf(handler_name, "%s__%s", event->system, event->name);
@@ -290,7 +295,7 @@ static void python_process_event(union perf_event *perf_event __unused,
offset = field->offset;
obj = PyString_FromString((char *)data + offset);
} else { /* FIELD_IS_NUMERIC */
- val = read_size(pevent, data + field->offset,
+ val = read_size(event, data + field->offset,
field->size);
if (field->flags & FIELD_IS_SIGNED) {
if ((long long)val >= LONG_MIN &&
@@ -335,6 +340,84 @@ static void python_process_event(union perf_event *perf_event __unused,
Py_DECREF(t);
}
+static void python_process_general_event(union perf_event *perf_event
+ __maybe_unused,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine __maybe_unused,
+ struct addr_location *al)
+{
+ PyObject *handler, *retval, *t, *dict;
+ static char handler_name[64];
+ unsigned n = 0;
+ struct thread *thread = al->thread;
+
+ /*
+ * Use the MAX_FIELDS to make the function expandable, though
+ * currently there is only one item for the tuple.
+ */
+ t = PyTuple_New(MAX_FIELDS);
+ if (!t)
+ Py_FatalError("couldn't create Python tuple");
+
+ dict = PyDict_New();
+ if (!dict)
+ Py_FatalError("couldn't create Python dictionary");
+
+ snprintf(handler_name, sizeof(handler_name), "%s", "process_event");
+
+ handler = PyDict_GetItemString(main_dict, handler_name);
+ if (!handler || !PyCallable_Check(handler))
+ goto exit;
+
+ PyDict_SetItemString(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel)));
+ PyDict_SetItemString(dict, "attr", PyString_FromStringAndSize(
+ (const char *)&evsel->attr, sizeof(evsel->attr)));
+ PyDict_SetItemString(dict, "sample", PyString_FromStringAndSize(
+ (const char *)sample, sizeof(*sample)));
+ PyDict_SetItemString(dict, "raw_buf", PyString_FromStringAndSize(
+ (const char *)sample->raw_data, sample->raw_size));
+ PyDict_SetItemString(dict, "comm",
+ PyString_FromString(thread->comm));
+ if (al->map) {
+ PyDict_SetItemString(dict, "dso",
+ PyString_FromString(al->map->dso->name));
+ }
+ if (al->sym) {
+ PyDict_SetItemString(dict, "symbol",
+ PyString_FromString(al->sym->name));
+ }
+
+ PyTuple_SetItem(t, n++, dict);
+ if (_PyTuple_Resize(&t, n) == -1)
+ Py_FatalError("error resizing Python tuple");
+
+ retval = PyObject_CallObject(handler, t);
+ if (retval == NULL)
+ handler_call_die(handler_name);
+exit:
+ Py_DECREF(dict);
+ Py_DECREF(t);
+}
+
+static void python_process_event(union perf_event *perf_event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine,
+ struct addr_location *al)
+{
+ switch (evsel->attr.type) {
+ case PERF_TYPE_TRACEPOINT:
+ python_process_tracepoint(perf_event, sample, evsel,
+ machine, al);
+ break;
+ /* Reserve for future process_hw/sw/raw APIs */
+ default:
+ python_process_general_event(perf_event, sample, evsel,
+ machine, al);
+ }
+}
+
static int run_start_sub(void)
{
PyObject *handler, *retval;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 2437fb0b463a..8cdd23239c90 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -15,6 +15,9 @@
#include "util.h"
#include "cpumap.h"
#include "event-parse.h"
+#include "perf_regs.h"
+#include "unwind.h"
+#include "vdso.h"
static int perf_session__open(struct perf_session *self, bool force)
{
@@ -209,6 +212,7 @@ void perf_session__delete(struct perf_session *self)
machine__exit(&self->host_machine);
close(self->fd);
free(self);
+ vdso__exit();
}
void machine__remove_thread(struct machine *self, struct thread *th)
@@ -288,10 +292,11 @@ struct branch_info *machine__resolve_bstack(struct machine *self,
return bi;
}
-int machine__resolve_callchain(struct machine *self,
- struct thread *thread,
- struct ip_callchain *chain,
- struct symbol **parent)
+static int machine__resolve_callchain_sample(struct machine *machine,
+ struct thread *thread,
+ struct ip_callchain *chain,
+ struct symbol **parent)
+
{
u8 cpumode = PERF_RECORD_MISC_USER;
unsigned int i;
@@ -316,11 +321,14 @@ int machine__resolve_callchain(struct machine *self,
if (ip >= PERF_CONTEXT_MAX) {
switch (ip) {
case PERF_CONTEXT_HV:
- cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
+ cpumode = PERF_RECORD_MISC_HYPERVISOR;
+ break;
case PERF_CONTEXT_KERNEL:
- cpumode = PERF_RECORD_MISC_KERNEL; break;
+ cpumode = PERF_RECORD_MISC_KERNEL;
+ break;
case PERF_CONTEXT_USER:
- cpumode = PERF_RECORD_MISC_USER; break;
+ cpumode = PERF_RECORD_MISC_USER;
+ break;
default:
pr_debug("invalid callchain context: "
"%"PRId64"\n", (s64) ip);
@@ -335,7 +343,7 @@ int machine__resolve_callchain(struct machine *self,
}
al.filtered = false;
- thread__find_addr_location(thread, self, cpumode,
+ thread__find_addr_location(thread, machine, cpumode,
MAP__FUNCTION, ip, &al, NULL);
if (al.sym != NULL) {
if (sort__has_parent && !*parent &&
@@ -354,49 +362,92 @@ int machine__resolve_callchain(struct machine *self,
return 0;
}
-static int process_event_synth_tracing_data_stub(union perf_event *event __used,
- struct perf_session *session __used)
+static int unwind_entry(struct unwind_entry *entry, void *arg)
+{
+ struct callchain_cursor *cursor = arg;
+ return callchain_cursor_append(cursor, entry->ip,
+ entry->map, entry->sym);
+}
+
+int machine__resolve_callchain(struct machine *machine,
+ struct perf_evsel *evsel,
+ struct thread *thread,
+ struct perf_sample *sample,
+ struct symbol **parent)
+
+{
+ int ret;
+
+ callchain_cursor_reset(&callchain_cursor);
+
+ ret = machine__resolve_callchain_sample(machine, thread,
+ sample->callchain, parent);
+ if (ret)
+ return ret;
+
+ /* Can we do dwarf post unwind? */
+ if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
+ (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
+ return 0;
+
+ /* Bail out if nothing was captured. */
+ if ((!sample->user_regs.regs) ||
+ (!sample->user_stack.size))
+ return 0;
+
+ return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
+ thread, evsel->attr.sample_regs_user,
+ sample);
+
+}
+
+static int process_event_synth_tracing_data_stub(union perf_event *event
+ __maybe_unused,
+ struct perf_session *session
+ __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
-static int process_event_synth_attr_stub(union perf_event *event __used,
- struct perf_evlist **pevlist __used)
+static int process_event_synth_attr_stub(union perf_event *event __maybe_unused,
+ struct perf_evlist **pevlist
+ __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
-static int process_event_sample_stub(struct perf_tool *tool __used,
- union perf_event *event __used,
- struct perf_sample *sample __used,
- struct perf_evsel *evsel __used,
- struct machine *machine __used)
+static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused,
+ struct machine *machine __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
-static int process_event_stub(struct perf_tool *tool __used,
- union perf_event *event __used,
- struct perf_sample *sample __used,
- struct machine *machine __used)
+static int process_event_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
-static int process_finished_round_stub(struct perf_tool *tool __used,
- union perf_event *event __used,
- struct perf_session *perf_session __used)
+static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_session *perf_session
+ __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
-static int process_event_type_stub(struct perf_tool *tool __used,
- union perf_event *event __used)
+static int process_event_type_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
@@ -473,7 +524,7 @@ static void swap_sample_id_all(union perf_event *event, void *data)
}
static void perf_event__all64_swap(union perf_event *event,
- bool sample_id_all __used)
+ bool sample_id_all __maybe_unused)
{
struct perf_event_header *hdr = &event->header;
mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
@@ -487,7 +538,7 @@ static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
if (sample_id_all) {
void *data = &event->comm.comm;
- data += ALIGN(strlen(data) + 1, sizeof(u64));
+ data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
swap_sample_id_all(event, data);
}
}
@@ -504,7 +555,7 @@ static void perf_event__mmap_swap(union perf_event *event,
if (sample_id_all) {
void *data = &event->mmap.filename;
- data += ALIGN(strlen(data) + 1, sizeof(u64));
+ data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
swap_sample_id_all(event, data);
}
}
@@ -584,7 +635,7 @@ void perf_event__attr_swap(struct perf_event_attr *attr)
}
static void perf_event__hdr_attr_swap(union perf_event *event,
- bool sample_id_all __used)
+ bool sample_id_all __maybe_unused)
{
size_t size;
@@ -596,14 +647,14 @@ static void perf_event__hdr_attr_swap(union perf_event *event,
}
static void perf_event__event_type_swap(union perf_event *event,
- bool sample_id_all __used)
+ bool sample_id_all __maybe_unused)
{
event->event_type.event_type.event_id =
bswap_64(event->event_type.event_type.event_id);
}
static void perf_event__tracing_data_swap(union perf_event *event,
- bool sample_id_all __used)
+ bool sample_id_all __maybe_unused)
{
event->tracing_data.size = bswap_32(event->tracing_data.size);
}
@@ -652,7 +703,7 @@ static int perf_session_deliver_event(struct perf_session *session,
struct perf_tool *tool,
u64 file_offset);
-static void flush_sample_queue(struct perf_session *s,
+static int flush_sample_queue(struct perf_session *s,
struct perf_tool *tool)
{
struct ordered_samples *os = &s->ordered_samples;
@@ -665,19 +716,21 @@ static void flush_sample_queue(struct perf_session *s,
int ret;
if (!tool->ordered_samples || !limit)
- return;
+ return 0;
list_for_each_entry_safe(iter, tmp, head, list) {
if (iter->timestamp > limit)
break;
- ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample,
- s->header.needs_swap);
+ ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
if (ret)
pr_err("Can't parse sample, err = %d\n", ret);
- else
- perf_session_deliver_event(s, iter->event, &sample, tool,
- iter->file_offset);
+ else {
+ ret = perf_session_deliver_event(s, iter->event, &sample, tool,
+ iter->file_offset);
+ if (ret)
+ return ret;
+ }
os->last_flush = iter->timestamp;
list_del(&iter->list);
@@ -697,6 +750,8 @@ static void flush_sample_queue(struct perf_session *s,
}
os->nr_samples = 0;
+
+ return 0;
}
/*
@@ -739,13 +794,14 @@ static void flush_sample_queue(struct perf_session *s,
* etc...
*/
static int process_finished_round(struct perf_tool *tool,
- union perf_event *event __used,
+ union perf_event *event __maybe_unused,
struct perf_session *session)
{
- flush_sample_queue(session, tool);
- session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
+ int ret = flush_sample_queue(session, tool);
+ if (!ret)
+ session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
- return 0;
+ return ret;
}
/* The queue is ordered by time */
@@ -860,6 +916,34 @@ static void branch_stack__printf(struct perf_sample *sample)
sample->branch_stack->entries[i].to);
}
+static void regs_dump__printf(u64 mask, u64 *regs)
+{
+ unsigned rid, i = 0;
+
+ for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
+ u64 val = regs[i++];
+
+ printf(".... %-5s 0x%" PRIx64 "\n",
+ perf_reg_name(rid), val);
+ }
+}
+
+static void regs_user__printf(struct perf_sample *sample, u64 mask)
+{
+ struct regs_dump *user_regs = &sample->user_regs;
+
+ if (user_regs->regs) {
+ printf("... user regs: mask 0x%" PRIx64 "\n", mask);
+ regs_dump__printf(mask, user_regs->regs);
+ }
+}
+
+static void stack_user__printf(struct stack_dump *dump)
+{
+ printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
+ dump->size, dump->offset);
+}
+
static void perf_session__print_tstamp(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample)
@@ -897,7 +981,7 @@ static void dump_event(struct perf_session *session, union perf_event *event,
event->header.size, perf_event__name(event->header.type));
}
-static void dump_sample(struct perf_session *session, union perf_event *event,
+static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
struct perf_sample *sample)
{
u64 sample_type;
@@ -909,13 +993,19 @@ static void dump_sample(struct perf_session *session, union perf_event *event,
event->header.misc, sample->pid, sample->tid, sample->ip,
sample->period, sample->addr);
- sample_type = perf_evlist__sample_type(session->evlist);
+ sample_type = evsel->attr.sample_type;
if (sample_type & PERF_SAMPLE_CALLCHAIN)
callchain__printf(sample);
if (sample_type & PERF_SAMPLE_BRANCH_STACK)
branch_stack__printf(sample);
+
+ if (sample_type & PERF_SAMPLE_REGS_USER)
+ regs_user__printf(sample, evsel->attr.sample_regs_user);
+
+ if (sample_type & PERF_SAMPLE_STACK_USER)
+ stack_user__printf(&sample->user_stack);
}
static struct machine *
@@ -973,7 +1063,7 @@ static int perf_session_deliver_event(struct perf_session *session,
switch (event->header.type) {
case PERF_RECORD_SAMPLE:
- dump_sample(session, event, sample);
+ dump_sample(evsel, event, sample);
if (evsel == NULL) {
++session->hists.stats.nr_unknown_id;
return 0;
@@ -1083,8 +1173,7 @@ static int perf_session__process_event(struct perf_session *session,
/*
* For all kernel events we get the sample data
*/
- ret = perf_evlist__parse_sample(session->evlist, event, &sample,
- session->header.needs_swap);
+ ret = perf_evlist__parse_sample(session->evlist, event, &sample);
if (ret)
return ret;
@@ -1369,7 +1458,7 @@ more:
err = 0;
/* do the final flush for ordered samples */
session->ordered_samples.next_flush = ULLONG_MAX;
- flush_sample_queue(session, tool);
+ err = flush_sample_queue(session, tool);
out_err:
perf_session__warn_about_errors(session, tool);
perf_session_free_sample_buffers(session);
@@ -1498,9 +1587,9 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
return NULL;
}
-void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
- struct machine *machine, int print_sym,
- int print_dso, int print_symoffset)
+void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine,
+ int print_sym, int print_dso, int print_symoffset)
{
struct addr_location al;
struct callchain_cursor_node *node;
@@ -1514,8 +1603,9 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
if (symbol_conf.use_callchain && sample->callchain) {
- if (machine__resolve_callchain(machine, al.thread,
- sample->callchain, NULL) != 0) {
+
+ if (machine__resolve_callchain(machine, evsel, al.thread,
+ sample, NULL) != 0) {
if (verbose)
error("Failed to resolve callchain. Skipping\n");
return;
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 1f7ec87db7d7..aab414fbb64b 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -36,9 +36,7 @@ struct perf_session {
struct pevent *pevent;
/*
* FIXME: Need to split this up further, we need global
- * stats + per event stats. 'perf diff' also needs
- * to properly support multiple events in a single
- * perf.data file.
+ * stats + per event stats.
*/
struct hists hists;
int fd;
@@ -129,9 +127,9 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
unsigned int type);
-void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
- struct machine *machine, int print_sym,
- int print_dso, int print_symoffset);
+void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine,
+ int print_sym, int print_dso, int print_symoffset);
int perf_session__cpu_bitmap(struct perf_session *session,
const char *cpu_list, unsigned long *cpu_bitmap);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 0f5a0a496bc4..b5b1b9211960 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -8,12 +8,11 @@ const char default_sort_order[] = "comm,dso,symbol";
const char *sort_order = default_sort_order;
int sort__need_collapse = 0;
int sort__has_parent = 0;
+int sort__has_sym = 0;
int sort__branch_mode = -1; /* -1 = means not set */
enum sort_type sort__first_dimension;
-char * field_sep;
-
LIST_HEAD(hist_entry__sort_list);
static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
@@ -23,11 +22,11 @@ static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
va_start(ap, fmt);
n = vsnprintf(bf, size, fmt, ap);
- if (field_sep && n > 0) {
+ if (symbol_conf.field_sep && n > 0) {
char *sep = bf;
while (1) {
- sep = strchr(sep, *field_sep);
+ sep = strchr(sep, *symbol_conf.field_sep);
if (sep == NULL)
break;
*sep = '.';
@@ -172,7 +171,7 @@ static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf,
static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
u64 ip, char level, char *bf, size_t size,
- unsigned int width __used)
+ unsigned int width __maybe_unused)
{
size_t ret = 0;
@@ -207,7 +206,8 @@ struct sort_entry sort_dso = {
};
static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
- size_t size, unsigned int width __used)
+ size_t size,
+ unsigned int width __maybe_unused)
{
return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip,
self->level, bf, size, width);
@@ -250,7 +250,8 @@ sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
}
static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf,
- size_t size, unsigned int width __used)
+ size_t size,
+ unsigned int width __maybe_unused)
{
FILE *fp;
char cmd[PATH_MAX + 2], *path = self->srcline, *nl;
@@ -399,7 +400,8 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
}
static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf,
- size_t size, unsigned int width __used)
+ size_t size,
+ unsigned int width __maybe_unused)
{
struct addr_map_symbol *from = &self->branch_info->from;
return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
@@ -408,7 +410,8 @@ static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf,
}
static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf,
- size_t size, unsigned int width __used)
+ size_t size,
+ unsigned int width __maybe_unused)
{
struct addr_map_symbol *to = &self->branch_info->to;
return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
@@ -509,6 +512,10 @@ int sort_dimension__add(const char *tok)
return -EINVAL;
}
sort__has_parent = 1;
+ } else if (sd->entry == &sort_sym ||
+ sd->entry == &sort_sym_from ||
+ sd->entry == &sort_sym_to) {
+ sort__has_sym = 1;
}
if (sd->taken)
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index e724b26acd51..12d634792de5 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -31,8 +31,8 @@ extern const char *parent_pattern;
extern const char default_sort_order[];
extern int sort__need_collapse;
extern int sort__has_parent;
+extern int sort__has_sym;
extern int sort__branch_mode;
-extern char *field_sep;
extern struct sort_entry sort_comm;
extern struct sort_entry sort_dso;
extern struct sort_entry sort_sym;
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
new file mode 100644
index 000000000000..23742126f47c
--- /dev/null
+++ b/tools/perf/util/stat.c
@@ -0,0 +1,57 @@
+#include <math.h>
+
+#include "stat.h"
+
+void update_stats(struct stats *stats, u64 val)
+{
+ double delta;
+
+ stats->n++;
+ delta = val - stats->mean;
+ stats->mean += delta / stats->n;
+ stats->M2 += delta*(val - stats->mean);
+}
+
+double avg_stats(struct stats *stats)
+{
+ return stats->mean;
+}
+
+/*
+ * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
+ *
+ * (\Sum n_i^2) - ((\Sum n_i)^2)/n
+ * s^2 = -------------------------------
+ * n - 1
+ *
+ * http://en.wikipedia.org/wiki/Stddev
+ *
+ * The std dev of the mean is related to the std dev by:
+ *
+ * s
+ * s_mean = -------
+ * sqrt(n)
+ *
+ */
+double stddev_stats(struct stats *stats)
+{
+ double variance, variance_mean;
+
+ if (!stats->n)
+ return 0.0;
+
+ variance = stats->M2 / (stats->n - 1);
+ variance_mean = variance / stats->n;
+
+ return sqrt(variance_mean);
+}
+
+double rel_stddev_stats(double stddev, double avg)
+{
+ double pct = 0.0;
+
+ if (avg)
+ pct = 100.0 * stddev/avg;
+
+ return pct;
+}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
new file mode 100644
index 000000000000..588367c3c767
--- /dev/null
+++ b/tools/perf/util/stat.h
@@ -0,0 +1,16 @@
+#ifndef __PERF_STATS_H
+#define __PERF_STATS_H
+
+#include "types.h"
+
+struct stats
+{
+ double n, mean, M2;
+};
+
+void update_stats(struct stats *stats, u64 val);
+double avg_stats(struct stats *stats);
+double stddev_stats(struct stats *stats);
+double rel_stddev_stats(double stddev, double avg);
+
+#endif
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index 199bc4d8905d..32170590892d 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -1,5 +1,5 @@
#include "util.h"
-#include "string.h"
+#include "linux/string.h"
#define K 1024LL
/*
@@ -335,3 +335,19 @@ char *rtrim(char *s)
return s;
}
+
+/**
+ * memdup - duplicate region of memory
+ * @src: memory region to duplicate
+ * @len: memory region length
+ */
+void *memdup(const void *src, size_t len)
+{
+ void *p;
+
+ p = malloc(len);
+ if (p)
+ memcpy(p, src, len);
+
+ return p;
+}
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
index 95856ff3dda4..155d8b7078a7 100644
--- a/tools/perf/util/strlist.c
+++ b/tools/perf/util/strlist.c
@@ -93,7 +93,7 @@ out:
void strlist__remove(struct strlist *slist, struct str_node *snode)
{
- str_node__delete(snode, slist->dupstr);
+ rblist__remove_node(&slist->rblist, &snode->rb_node);
}
struct str_node *strlist__find(struct strlist *slist, const char *entry)
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
new file mode 100644
index 000000000000..db0cc92cf2ea
--- /dev/null
+++ b/tools/perf/util/symbol-elf.c
@@ -0,0 +1,841 @@
+#include <libelf.h>
+#include <gelf.h>
+#include <elf.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include "symbol.h"
+#include "debug.h"
+
+#ifndef NT_GNU_BUILD_ID
+#define NT_GNU_BUILD_ID 3
+#endif
+
+/**
+ * elf_symtab__for_each_symbol - iterate thru all the symbols
+ *
+ * @syms: struct elf_symtab instance to iterate
+ * @idx: uint32_t idx
+ * @sym: GElf_Sym iterator
+ */
+#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
+ for (idx = 0, gelf_getsym(syms, idx, &sym);\
+ idx < nr_syms; \
+ idx++, gelf_getsym(syms, idx, &sym))
+
+static inline uint8_t elf_sym__type(const GElf_Sym *sym)
+{
+ return GELF_ST_TYPE(sym->st_info);
+}
+
+static inline int elf_sym__is_function(const GElf_Sym *sym)
+{
+ return elf_sym__type(sym) == STT_FUNC &&
+ sym->st_name != 0 &&
+ sym->st_shndx != SHN_UNDEF;
+}
+
+static inline bool elf_sym__is_object(const GElf_Sym *sym)
+{
+ return elf_sym__type(sym) == STT_OBJECT &&
+ sym->st_name != 0 &&
+ sym->st_shndx != SHN_UNDEF;
+}
+
+static inline int elf_sym__is_label(const GElf_Sym *sym)
+{
+ return elf_sym__type(sym) == STT_NOTYPE &&
+ sym->st_name != 0 &&
+ sym->st_shndx != SHN_UNDEF &&
+ sym->st_shndx != SHN_ABS;
+}
+
+static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
+{
+ switch (type) {
+ case MAP__FUNCTION:
+ return elf_sym__is_function(sym);
+ case MAP__VARIABLE:
+ return elf_sym__is_object(sym);
+ default:
+ return false;
+ }
+}
+
+static inline const char *elf_sym__name(const GElf_Sym *sym,
+ const Elf_Data *symstrs)
+{
+ return symstrs->d_buf + sym->st_name;
+}
+
+static inline const char *elf_sec__name(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return secstrs->d_buf + shdr->sh_name;
+}
+
+static inline int elf_sec__is_text(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
+}
+
+static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
+}
+
+static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
+ enum map_type type)
+{
+ switch (type) {
+ case MAP__FUNCTION:
+ return elf_sec__is_text(shdr, secstrs);
+ case MAP__VARIABLE:
+ return elf_sec__is_data(shdr, secstrs);
+ default:
+ return false;
+ }
+}
+
+static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
+{
+ Elf_Scn *sec = NULL;
+ GElf_Shdr shdr;
+ size_t cnt = 1;
+
+ while ((sec = elf_nextscn(elf, sec)) != NULL) {
+ gelf_getshdr(sec, &shdr);
+
+ if ((addr >= shdr.sh_addr) &&
+ (addr < (shdr.sh_addr + shdr.sh_size)))
+ return cnt;
+
+ ++cnt;
+ }
+
+ return -1;
+}
+
+static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
+ GElf_Shdr *shp, const char *name,
+ size_t *idx)
+{
+ Elf_Scn *sec = NULL;
+ size_t cnt = 1;
+
+ /* Elf is corrupted/truncated, avoid calling elf_strptr. */
+ if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
+ return NULL;
+
+ while ((sec = elf_nextscn(elf, sec)) != NULL) {
+ char *str;
+
+ gelf_getshdr(sec, shp);
+ str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
+ if (!strcmp(name, str)) {
+ if (idx)
+ *idx = cnt;
+ break;
+ }
+ ++cnt;
+ }
+
+ return sec;
+}
+
+#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
+ for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
+ idx < nr_entries; \
+ ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
+
+#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
+ for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
+ idx < nr_entries; \
+ ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
+
+/*
+ * We need to check if we have a .dynsym, so that we can handle the
+ * .plt, synthesizing its symbols, that aren't on the symtabs (be it
+ * .dynsym or .symtab).
+ * And always look at the original dso, not at debuginfo packages, that
+ * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
+ */
+int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map,
+ symbol_filter_t filter)
+{
+ uint32_t nr_rel_entries, idx;
+ GElf_Sym sym;
+ u64 plt_offset;
+ GElf_Shdr shdr_plt;
+ struct symbol *f;
+ GElf_Shdr shdr_rel_plt, shdr_dynsym;
+ Elf_Data *reldata, *syms, *symstrs;
+ Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
+ size_t dynsym_idx;
+ GElf_Ehdr ehdr;
+ char sympltname[1024];
+ Elf *elf;
+ int nr = 0, symidx, err = 0;
+
+ if (!ss->dynsym)
+ return 0;
+
+ elf = ss->elf;
+ ehdr = ss->ehdr;
+
+ scn_dynsym = ss->dynsym;
+ shdr_dynsym = ss->dynshdr;
+ dynsym_idx = ss->dynsym_idx;
+
+ if (scn_dynsym == NULL)
+ goto out_elf_end;
+
+ scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
+ ".rela.plt", NULL);
+ if (scn_plt_rel == NULL) {
+ scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
+ ".rel.plt", NULL);
+ if (scn_plt_rel == NULL)
+ goto out_elf_end;
+ }
+
+ err = -1;
+
+ if (shdr_rel_plt.sh_link != dynsym_idx)
+ goto out_elf_end;
+
+ if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
+ goto out_elf_end;
+
+ /*
+ * Fetch the relocation section to find the idxes to the GOT
+ * and the symbols in the .dynsym they refer to.
+ */
+ reldata = elf_getdata(scn_plt_rel, NULL);
+ if (reldata == NULL)
+ goto out_elf_end;
+
+ syms = elf_getdata(scn_dynsym, NULL);
+ if (syms == NULL)
+ goto out_elf_end;
+
+ scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
+ if (scn_symstrs == NULL)
+ goto out_elf_end;
+
+ symstrs = elf_getdata(scn_symstrs, NULL);
+ if (symstrs == NULL)
+ goto out_elf_end;
+
+ if (symstrs->d_size == 0)
+ goto out_elf_end;
+
+ nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
+ plt_offset = shdr_plt.sh_offset;
+
+ if (shdr_rel_plt.sh_type == SHT_RELA) {
+ GElf_Rela pos_mem, *pos;
+
+ elf_section__for_each_rela(reldata, pos, pos_mem, idx,
+ nr_rel_entries) {
+ symidx = GELF_R_SYM(pos->r_info);
+ plt_offset += shdr_plt.sh_entsize;
+ gelf_getsym(syms, symidx, &sym);
+ snprintf(sympltname, sizeof(sympltname),
+ "%s@plt", elf_sym__name(&sym, symstrs));
+
+ f = symbol__new(plt_offset, shdr_plt.sh_entsize,
+ STB_GLOBAL, sympltname);
+ if (!f)
+ goto out_elf_end;
+
+ if (filter && filter(map, f))
+ symbol__delete(f);
+ else {
+ symbols__insert(&dso->symbols[map->type], f);
+ ++nr;
+ }
+ }
+ } else if (shdr_rel_plt.sh_type == SHT_REL) {
+ GElf_Rel pos_mem, *pos;
+ elf_section__for_each_rel(reldata, pos, pos_mem, idx,
+ nr_rel_entries) {
+ symidx = GELF_R_SYM(pos->r_info);
+ plt_offset += shdr_plt.sh_entsize;
+ gelf_getsym(syms, symidx, &sym);
+ snprintf(sympltname, sizeof(sympltname),
+ "%s@plt", elf_sym__name(&sym, symstrs));
+
+ f = symbol__new(plt_offset, shdr_plt.sh_entsize,
+ STB_GLOBAL, sympltname);
+ if (!f)
+ goto out_elf_end;
+
+ if (filter && filter(map, f))
+ symbol__delete(f);
+ else {
+ symbols__insert(&dso->symbols[map->type], f);
+ ++nr;
+ }
+ }
+ }
+
+ err = 0;
+out_elf_end:
+ if (err == 0)
+ return nr;
+ pr_debug("%s: problems reading %s PLT info.\n",
+ __func__, dso->long_name);
+ return 0;
+}
+
+/*
+ * Align offset to 4 bytes as needed for note name and descriptor data.
+ */
+#define NOTE_ALIGN(n) (((n) + 3) & -4U)
+
+static int elf_read_build_id(Elf *elf, void *bf, size_t size)
+{
+ int err = -1;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr;
+ Elf_Data *data;
+ Elf_Scn *sec;
+ Elf_Kind ek;
+ void *ptr;
+
+ if (size < BUILD_ID_SIZE)
+ goto out;
+
+ ek = elf_kind(elf);
+ if (ek != ELF_K_ELF)
+ goto out;
+
+ if (gelf_getehdr(elf, &ehdr) == NULL) {
+ pr_err("%s: cannot get elf header.\n", __func__);
+ goto out;
+ }
+
+ /*
+ * Check following sections for notes:
+ * '.note.gnu.build-id'
+ * '.notes'
+ * '.note' (VDSO specific)
+ */
+ do {
+ sec = elf_section_by_name(elf, &ehdr, &shdr,
+ ".note.gnu.build-id", NULL);
+ if (sec)
+ break;
+
+ sec = elf_section_by_name(elf, &ehdr, &shdr,
+ ".notes", NULL);
+ if (sec)
+ break;
+
+ sec = elf_section_by_name(elf, &ehdr, &shdr,
+ ".note", NULL);
+ if (sec)
+ break;
+
+ return err;
+
+ } while (0);
+
+ data = elf_getdata(sec, NULL);
+ if (data == NULL)
+ goto out;
+
+ ptr = data->d_buf;
+ while (ptr < (data->d_buf + data->d_size)) {
+ GElf_Nhdr *nhdr = ptr;
+ size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
+ descsz = NOTE_ALIGN(nhdr->n_descsz);
+ const char *name;
+
+ ptr += sizeof(*nhdr);
+ name = ptr;
+ ptr += namesz;
+ if (nhdr->n_type == NT_GNU_BUILD_ID &&
+ nhdr->n_namesz == sizeof("GNU")) {
+ if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
+ size_t sz = min(size, descsz);
+ memcpy(bf, ptr, sz);
+ memset(bf + sz, 0, size - sz);
+ err = descsz;
+ break;
+ }
+ }
+ ptr += descsz;
+ }
+
+out:
+ return err;
+}
+
+int filename__read_build_id(const char *filename, void *bf, size_t size)
+{
+ int fd, err = -1;
+ Elf *elf;
+
+ if (size < BUILD_ID_SIZE)
+ goto out;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ goto out;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL) {
+ pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
+ goto out_close;
+ }
+
+ err = elf_read_build_id(elf, bf, size);
+
+ elf_end(elf);
+out_close:
+ close(fd);
+out:
+ return err;
+}
+
+int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
+{
+ int fd, err = -1;
+
+ if (size < BUILD_ID_SIZE)
+ goto out;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ goto out;
+
+ while (1) {
+ char bf[BUFSIZ];
+ GElf_Nhdr nhdr;
+ size_t namesz, descsz;
+
+ if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
+ break;
+
+ namesz = NOTE_ALIGN(nhdr.n_namesz);
+ descsz = NOTE_ALIGN(nhdr.n_descsz);
+ if (nhdr.n_type == NT_GNU_BUILD_ID &&
+ nhdr.n_namesz == sizeof("GNU")) {
+ if (read(fd, bf, namesz) != (ssize_t)namesz)
+ break;
+ if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
+ size_t sz = min(descsz, size);
+ if (read(fd, build_id, sz) == (ssize_t)sz) {
+ memset(build_id + sz, 0, size - sz);
+ err = 0;
+ break;
+ }
+ } else if (read(fd, bf, descsz) != (ssize_t)descsz)
+ break;
+ } else {
+ int n = namesz + descsz;
+ if (read(fd, bf, n) != n)
+ break;
+ }
+ }
+ close(fd);
+out:
+ return err;
+}
+
+int filename__read_debuglink(const char *filename, char *debuglink,
+ size_t size)
+{
+ int fd, err = -1;
+ Elf *elf;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr;
+ Elf_Data *data;
+ Elf_Scn *sec;
+ Elf_Kind ek;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ goto out;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL) {
+ pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
+ goto out_close;
+ }
+
+ ek = elf_kind(elf);
+ if (ek != ELF_K_ELF)
+ goto out_close;
+
+ if (gelf_getehdr(elf, &ehdr) == NULL) {
+ pr_err("%s: cannot get elf header.\n", __func__);
+ goto out_close;
+ }
+
+ sec = elf_section_by_name(elf, &ehdr, &shdr,
+ ".gnu_debuglink", NULL);
+ if (sec == NULL)
+ goto out_close;
+
+ data = elf_getdata(sec, NULL);
+ if (data == NULL)
+ goto out_close;
+
+ /* the start of this section is a zero-terminated string */
+ strncpy(debuglink, data->d_buf, size);
+
+ elf_end(elf);
+
+out_close:
+ close(fd);
+out:
+ return err;
+}
+
+static int dso__swap_init(struct dso *dso, unsigned char eidata)
+{
+ static unsigned int const endian = 1;
+
+ dso->needs_swap = DSO_SWAP__NO;
+
+ switch (eidata) {
+ case ELFDATA2LSB:
+ /* We are big endian, DSO is little endian. */
+ if (*(unsigned char const *)&endian != 1)
+ dso->needs_swap = DSO_SWAP__YES;
+ break;
+
+ case ELFDATA2MSB:
+ /* We are little endian, DSO is big endian. */
+ if (*(unsigned char const *)&endian != 0)
+ dso->needs_swap = DSO_SWAP__YES;
+ break;
+
+ default:
+ pr_err("unrecognized DSO data encoding %d\n", eidata);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+bool symsrc__possibly_runtime(struct symsrc *ss)
+{
+ return ss->dynsym || ss->opdsec;
+}
+
+bool symsrc__has_symtab(struct symsrc *ss)
+{
+ return ss->symtab != NULL;
+}
+
+void symsrc__destroy(struct symsrc *ss)
+{
+ free(ss->name);
+ elf_end(ss->elf);
+ close(ss->fd);
+}
+
+int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
+ enum dso_binary_type type)
+{
+ int err = -1;
+ GElf_Ehdr ehdr;
+ Elf *elf;
+ int fd;
+
+ fd = open(name, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL) {
+ pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
+ goto out_close;
+ }
+
+ if (gelf_getehdr(elf, &ehdr) == NULL) {
+ pr_debug("%s: cannot get elf header.\n", __func__);
+ goto out_elf_end;
+ }
+
+ if (dso__swap_init(dso, ehdr.e_ident[EI_DATA]))
+ goto out_elf_end;
+
+ /* Always reject images with a mismatched build-id: */
+ if (dso->has_build_id) {
+ u8 build_id[BUILD_ID_SIZE];
+
+ if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0)
+ goto out_elf_end;
+
+ if (!dso__build_id_equal(dso, build_id))
+ goto out_elf_end;
+ }
+
+ ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
+ NULL);
+ if (ss->symshdr.sh_type != SHT_SYMTAB)
+ ss->symtab = NULL;
+
+ ss->dynsym_idx = 0;
+ ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
+ &ss->dynsym_idx);
+ if (ss->dynshdr.sh_type != SHT_DYNSYM)
+ ss->dynsym = NULL;
+
+ ss->opdidx = 0;
+ ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
+ &ss->opdidx);
+ if (ss->opdshdr.sh_type != SHT_PROGBITS)
+ ss->opdsec = NULL;
+
+ if (dso->kernel == DSO_TYPE_USER) {
+ GElf_Shdr shdr;
+ ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
+ elf_section_by_name(elf, &ehdr, &shdr,
+ ".gnu.prelink_undo",
+ NULL) != NULL);
+ } else {
+ ss->adjust_symbols = 0;
+ }
+
+ ss->name = strdup(name);
+ if (!ss->name)
+ goto out_elf_end;
+
+ ss->elf = elf;
+ ss->fd = fd;
+ ss->ehdr = ehdr;
+ ss->type = type;
+
+ return 0;
+
+out_elf_end:
+ elf_end(elf);
+out_close:
+ close(fd);
+ return err;
+}
+
+int dso__load_sym(struct dso *dso, struct map *map,
+ struct symsrc *syms_ss, struct symsrc *runtime_ss,
+ symbol_filter_t filter, int kmodule)
+{
+ struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
+ struct map *curr_map = map;
+ struct dso *curr_dso = dso;
+ Elf_Data *symstrs, *secstrs;
+ uint32_t nr_syms;
+ int err = -1;
+ uint32_t idx;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr;
+ Elf_Data *syms, *opddata = NULL;
+ GElf_Sym sym;
+ Elf_Scn *sec, *sec_strndx;
+ Elf *elf;
+ int nr = 0;
+
+ dso->symtab_type = syms_ss->type;
+
+ if (!syms_ss->symtab) {
+ syms_ss->symtab = syms_ss->dynsym;
+ syms_ss->symshdr = syms_ss->dynshdr;
+ }
+
+ elf = syms_ss->elf;
+ ehdr = syms_ss->ehdr;
+ sec = syms_ss->symtab;
+ shdr = syms_ss->symshdr;
+
+ if (runtime_ss->opdsec)
+ opddata = elf_rawdata(runtime_ss->opdsec, NULL);
+
+ syms = elf_getdata(sec, NULL);
+ if (syms == NULL)
+ goto out_elf_end;
+
+ sec = elf_getscn(elf, shdr.sh_link);
+ if (sec == NULL)
+ goto out_elf_end;
+
+ symstrs = elf_getdata(sec, NULL);
+ if (symstrs == NULL)
+ goto out_elf_end;
+
+ sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
+ if (sec_strndx == NULL)
+ goto out_elf_end;
+
+ secstrs = elf_getdata(sec_strndx, NULL);
+ if (secstrs == NULL)
+ goto out_elf_end;
+
+ nr_syms = shdr.sh_size / shdr.sh_entsize;
+
+ memset(&sym, 0, sizeof(sym));
+ dso->adjust_symbols = runtime_ss->adjust_symbols;
+ elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
+ struct symbol *f;
+ const char *elf_name = elf_sym__name(&sym, symstrs);
+ char *demangled = NULL;
+ int is_label = elf_sym__is_label(&sym);
+ const char *section_name;
+ bool used_opd = false;
+
+ if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
+ strcmp(elf_name, kmap->ref_reloc_sym->name) == 0)
+ kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
+
+ if (!is_label && !elf_sym__is_a(&sym, map->type))
+ continue;
+
+ /* Reject ARM ELF "mapping symbols": these aren't unique and
+ * don't identify functions, so will confuse the profile
+ * output: */
+ if (ehdr.e_machine == EM_ARM) {
+ if (!strcmp(elf_name, "$a") ||
+ !strcmp(elf_name, "$d") ||
+ !strcmp(elf_name, "$t"))
+ continue;
+ }
+
+ if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
+ u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
+ u64 *opd = opddata->d_buf + offset;
+ sym.st_value = DSO__SWAP(dso, u64, *opd);
+ sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
+ sym.st_value);
+ used_opd = true;
+ }
+
+ sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
+ if (!sec)
+ goto out_elf_end;
+
+ gelf_getshdr(sec, &shdr);
+
+ if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
+ continue;
+
+ section_name = elf_sec__name(&shdr, secstrs);
+
+ /* On ARM, symbols for thumb functions have 1 added to
+ * the symbol address as a flag - remove it */
+ if ((ehdr.e_machine == EM_ARM) &&
+ (map->type == MAP__FUNCTION) &&
+ (sym.st_value & 1))
+ --sym.st_value;
+
+ if (dso->kernel != DSO_TYPE_USER || kmodule) {
+ char dso_name[PATH_MAX];
+
+ if (strcmp(section_name,
+ (curr_dso->short_name +
+ dso->short_name_len)) == 0)
+ goto new_symbol;
+
+ if (strcmp(section_name, ".text") == 0) {
+ curr_map = map;
+ curr_dso = dso;
+ goto new_symbol;
+ }
+
+ snprintf(dso_name, sizeof(dso_name),
+ "%s%s", dso->short_name, section_name);
+
+ curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name);
+ if (curr_map == NULL) {
+ u64 start = sym.st_value;
+
+ if (kmodule)
+ start += map->start + shdr.sh_offset;
+
+ curr_dso = dso__new(dso_name);
+ if (curr_dso == NULL)
+ goto out_elf_end;
+ curr_dso->kernel = dso->kernel;
+ curr_dso->long_name = dso->long_name;
+ curr_dso->long_name_len = dso->long_name_len;
+ curr_map = map__new2(start, curr_dso,
+ map->type);
+ if (curr_map == NULL) {
+ dso__delete(curr_dso);
+ goto out_elf_end;
+ }
+ curr_map->map_ip = identity__map_ip;
+ curr_map->unmap_ip = identity__map_ip;
+ curr_dso->symtab_type = dso->symtab_type;
+ map_groups__insert(kmap->kmaps, curr_map);
+ dsos__add(&dso->node, curr_dso);
+ dso__set_loaded(curr_dso, map->type);
+ } else
+ curr_dso = curr_map->dso;
+
+ goto new_symbol;
+ }
+
+ if ((used_opd && runtime_ss->adjust_symbols)
+ || (!used_opd && syms_ss->adjust_symbols)) {
+ pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
+ "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
+ (u64)sym.st_value, (u64)shdr.sh_addr,
+ (u64)shdr.sh_offset);
+ sym.st_value -= shdr.sh_addr - shdr.sh_offset;
+ }
+ /*
+ * We need to figure out if the object was created from C++ sources
+ * DWARF DW_compile_unit has this, but we don't always have access
+ * to it...
+ */
+ demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI);
+ if (demangled != NULL)
+ elf_name = demangled;
+new_symbol:
+ f = symbol__new(sym.st_value, sym.st_size,
+ GELF_ST_BIND(sym.st_info), elf_name);
+ free(demangled);
+ if (!f)
+ goto out_elf_end;
+
+ if (filter && filter(curr_map, f))
+ symbol__delete(f);
+ else {
+ symbols__insert(&curr_dso->symbols[curr_map->type], f);
+ nr++;
+ }
+ }
+
+ /*
+ * For misannotated, zeroed, ASM function sizes.
+ */
+ if (nr > 0) {
+ symbols__fixup_duplicate(&dso->symbols[map->type]);
+ symbols__fixup_end(&dso->symbols[map->type]);
+ if (kmap) {
+ /*
+ * We need to fixup this here too because we create new
+ * maps here, for things like vsyscall sections.
+ */
+ __map_groups__fixup_end(kmap->kmaps, map->type);
+ }
+ }
+ err = nr;
+out_elf_end:
+ return err;
+}
+
+void symbol__elf_init(void)
+{
+ elf_version(EV_CURRENT);
+}
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
new file mode 100644
index 000000000000..259f8f2ea9c9
--- /dev/null
+++ b/tools/perf/util/symbol-minimal.c
@@ -0,0 +1,307 @@
+#include "symbol.h"
+
+#include <elf.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <string.h>
+#include <byteswap.h>
+#include <sys/stat.h>
+
+
+static bool check_need_swap(int file_endian)
+{
+ const int data = 1;
+ u8 *check = (u8 *)&data;
+ int host_endian;
+
+ if (check[0] == 1)
+ host_endian = ELFDATA2LSB;
+ else
+ host_endian = ELFDATA2MSB;
+
+ return host_endian != file_endian;
+}
+
+#define NOTE_ALIGN(sz) (((sz) + 3) & ~3)
+
+#define NT_GNU_BUILD_ID 3
+
+static int read_build_id(void *note_data, size_t note_len, void *bf,
+ size_t size, bool need_swap)
+{
+ struct {
+ u32 n_namesz;
+ u32 n_descsz;
+ u32 n_type;
+ } *nhdr;
+ void *ptr;
+
+ ptr = note_data;
+ while (ptr < (note_data + note_len)) {
+ const char *name;
+ size_t namesz, descsz;
+
+ nhdr = ptr;
+ if (need_swap) {
+ nhdr->n_namesz = bswap_32(nhdr->n_namesz);
+ nhdr->n_descsz = bswap_32(nhdr->n_descsz);
+ nhdr->n_type = bswap_32(nhdr->n_type);
+ }
+
+ namesz = NOTE_ALIGN(nhdr->n_namesz);
+ descsz = NOTE_ALIGN(nhdr->n_descsz);
+
+ ptr += sizeof(*nhdr);
+ name = ptr;
+ ptr += namesz;
+ if (nhdr->n_type == NT_GNU_BUILD_ID &&
+ nhdr->n_namesz == sizeof("GNU")) {
+ if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
+ size_t sz = min(size, descsz);
+ memcpy(bf, ptr, sz);
+ memset(bf + sz, 0, size - sz);
+ return 0;
+ }
+ }
+ ptr += descsz;
+ }
+
+ return -1;
+}
+
+int filename__read_debuglink(const char *filename __maybe_unused,
+ char *debuglink __maybe_unused,
+ size_t size __maybe_unused)
+{
+ return -1;
+}
+
+/*
+ * Just try PT_NOTE header otherwise fails
+ */
+int filename__read_build_id(const char *filename, void *bf, size_t size)
+{
+ FILE *fp;
+ int ret = -1;
+ bool need_swap = false;
+ u8 e_ident[EI_NIDENT];
+ size_t buf_size;
+ void *buf;
+ int i;
+
+ fp = fopen(filename, "r");
+ if (fp == NULL)
+ return -1;
+
+ if (fread(e_ident, sizeof(e_ident), 1, fp) != 1)
+ goto out;
+
+ if (memcmp(e_ident, ELFMAG, SELFMAG) ||
+ e_ident[EI_VERSION] != EV_CURRENT)
+ goto out;
+
+ need_swap = check_need_swap(e_ident[EI_DATA]);
+
+ /* for simplicity */
+ fseek(fp, 0, SEEK_SET);
+
+ if (e_ident[EI_CLASS] == ELFCLASS32) {
+ Elf32_Ehdr ehdr;
+ Elf32_Phdr *phdr;
+
+ if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
+ goto out;
+
+ if (need_swap) {
+ ehdr.e_phoff = bswap_32(ehdr.e_phoff);
+ ehdr.e_phentsize = bswap_16(ehdr.e_phentsize);
+ ehdr.e_phnum = bswap_16(ehdr.e_phnum);
+ }
+
+ buf_size = ehdr.e_phentsize * ehdr.e_phnum;
+ buf = malloc(buf_size);
+ if (buf == NULL)
+ goto out;
+
+ fseek(fp, ehdr.e_phoff, SEEK_SET);
+ if (fread(buf, buf_size, 1, fp) != 1)
+ goto out_free;
+
+ for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
+ void *tmp;
+
+ if (need_swap) {
+ phdr->p_type = bswap_32(phdr->p_type);
+ phdr->p_offset = bswap_32(phdr->p_offset);
+ phdr->p_filesz = bswap_32(phdr->p_filesz);
+ }
+
+ if (phdr->p_type != PT_NOTE)
+ continue;
+
+ buf_size = phdr->p_filesz;
+ tmp = realloc(buf, buf_size);
+ if (tmp == NULL)
+ goto out_free;
+
+ buf = tmp;
+ fseek(fp, phdr->p_offset, SEEK_SET);
+ if (fread(buf, buf_size, 1, fp) != 1)
+ goto out_free;
+
+ ret = read_build_id(buf, buf_size, bf, size, need_swap);
+ if (ret == 0)
+ ret = size;
+ break;
+ }
+ } else {
+ Elf64_Ehdr ehdr;
+ Elf64_Phdr *phdr;
+
+ if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
+ goto out;
+
+ if (need_swap) {
+ ehdr.e_phoff = bswap_64(ehdr.e_phoff);
+ ehdr.e_phentsize = bswap_16(ehdr.e_phentsize);
+ ehdr.e_phnum = bswap_16(ehdr.e_phnum);
+ }
+
+ buf_size = ehdr.e_phentsize * ehdr.e_phnum;
+ buf = malloc(buf_size);
+ if (buf == NULL)
+ goto out;
+
+ fseek(fp, ehdr.e_phoff, SEEK_SET);
+ if (fread(buf, buf_size, 1, fp) != 1)
+ goto out_free;
+
+ for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
+ void *tmp;
+
+ if (need_swap) {
+ phdr->p_type = bswap_32(phdr->p_type);
+ phdr->p_offset = bswap_64(phdr->p_offset);
+ phdr->p_filesz = bswap_64(phdr->p_filesz);
+ }
+
+ if (phdr->p_type != PT_NOTE)
+ continue;
+
+ buf_size = phdr->p_filesz;
+ tmp = realloc(buf, buf_size);
+ if (tmp == NULL)
+ goto out_free;
+
+ buf = tmp;
+ fseek(fp, phdr->p_offset, SEEK_SET);
+ if (fread(buf, buf_size, 1, fp) != 1)
+ goto out_free;
+
+ ret = read_build_id(buf, buf_size, bf, size, need_swap);
+ if (ret == 0)
+ ret = size;
+ break;
+ }
+ }
+out_free:
+ free(buf);
+out:
+ fclose(fp);
+ return ret;
+}
+
+int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
+{
+ int fd;
+ int ret = -1;
+ struct stat stbuf;
+ size_t buf_size;
+ void *buf;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ if (fstat(fd, &stbuf) < 0)
+ goto out;
+
+ buf_size = stbuf.st_size;
+ buf = malloc(buf_size);
+ if (buf == NULL)
+ goto out;
+
+ if (read(fd, buf, buf_size) != (ssize_t) buf_size)
+ goto out_free;
+
+ ret = read_build_id(buf, buf_size, build_id, size, false);
+out_free:
+ free(buf);
+out:
+ close(fd);
+ return ret;
+}
+
+int symsrc__init(struct symsrc *ss, struct dso *dso __maybe_unused,
+ const char *name,
+ enum dso_binary_type type)
+{
+ int fd = open(name, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ ss->name = strdup(name);
+ if (!ss->name)
+ goto out_close;
+
+ ss->type = type;
+
+ return 0;
+out_close:
+ close(fd);
+ return -1;
+}
+
+bool symsrc__possibly_runtime(struct symsrc *ss __maybe_unused)
+{
+ /* Assume all sym sources could be a runtime image. */
+ return true;
+}
+
+bool symsrc__has_symtab(struct symsrc *ss __maybe_unused)
+{
+ return false;
+}
+
+void symsrc__destroy(struct symsrc *ss)
+{
+ free(ss->name);
+ close(ss->fd);
+}
+
+int dso__synthesize_plt_symbols(struct dso *dso __maybe_unused,
+ struct symsrc *ss __maybe_unused,
+ struct map *map __maybe_unused,
+ symbol_filter_t filter __maybe_unused)
+{
+ return 0;
+}
+
+int dso__load_sym(struct dso *dso, struct map *map __maybe_unused,
+ struct symsrc *ss,
+ struct symsrc *runtime_ss __maybe_unused,
+ symbol_filter_t filter __maybe_unused,
+ int kmodule __maybe_unused)
+{
+ unsigned char *build_id[BUILD_ID_SIZE];
+
+ if (filename__read_build_id(ss->name, build_id, BUILD_ID_SIZE) > 0) {
+ dso__set_build_id(dso, build_id);
+ return 1;
+ }
+ return 0;
+}
+
+void symbol__elf_init(void)
+{
+}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 8b63b678e127..e2e8c697cffe 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -15,8 +15,6 @@
#include "symbol.h"
#include "strlist.h"
-#include <libelf.h>
-#include <gelf.h>
#include <elf.h>
#include <limits.h>
#include <sys/utsname.h>
@@ -25,15 +23,7 @@
#define KSYM_NAME_LEN 256
#endif
-#ifndef NT_GNU_BUILD_ID
-#define NT_GNU_BUILD_ID 3
-#endif
-
static void dso_cache__free(struct rb_root *root);
-static bool dso__build_id_equal(const struct dso *dso, u8 *build_id);
-static int elf_read_build_id(Elf *elf, void *bf, size_t size);
-static void dsos__add(struct list_head *head, struct dso *dso);
-static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
static int dso__load_kernel_sym(struct dso *dso, struct map *map,
symbol_filter_t filter);
static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
@@ -170,7 +160,7 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
return SYMBOL_B;
}
-static void symbols__fixup_duplicate(struct rb_root *symbols)
+void symbols__fixup_duplicate(struct rb_root *symbols)
{
struct rb_node *nd;
struct symbol *curr, *next;
@@ -199,7 +189,7 @@ again:
}
}
-static void symbols__fixup_end(struct rb_root *symbols)
+void symbols__fixup_end(struct rb_root *symbols)
{
struct rb_node *nd, *prevnd = rb_first(symbols);
struct symbol *curr, *prev;
@@ -222,7 +212,7 @@ static void symbols__fixup_end(struct rb_root *symbols)
curr->end = roundup(curr->start, 4096);
}
-static void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
+void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
{
struct map *prev, *curr;
struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]);
@@ -252,8 +242,7 @@ static void map_groups__fixup_end(struct map_groups *mg)
__map_groups__fixup_end(mg, i);
}
-static struct symbol *symbol__new(u64 start, u64 len, u8 binding,
- const char *name)
+struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
{
size_t namelen = strlen(name) + 1;
struct symbol *sym = calloc(1, (symbol_conf.priv_size +
@@ -390,7 +379,7 @@ void dso__set_build_id(struct dso *dso, void *build_id)
dso->has_build_id = 1;
}
-static void symbols__insert(struct rb_root *symbols, struct symbol *sym)
+void symbols__insert(struct rb_root *symbols, struct symbol *sym)
{
struct rb_node **p = &symbols->rb_node;
struct rb_node *parent = NULL;
@@ -574,7 +563,7 @@ size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
int kallsyms__parse(const char *filename, void *arg,
int (*process_symbol)(void *arg, const char *name,
- char type, u64 start, u64 end))
+ char type, u64 start))
{
char *line = NULL;
size_t n;
@@ -614,13 +603,8 @@ int kallsyms__parse(const char *filename, void *arg,
break;
}
- /*
- * module symbols are not sorted so we add all
- * symbols with zero length and rely on
- * symbols__fixup_end() to fix it up.
- */
err = process_symbol(arg, symbol_name,
- symbol_type, start, start);
+ symbol_type, start);
if (err)
break;
}
@@ -647,7 +631,7 @@ static u8 kallsyms2elf_type(char type)
}
static int map__process_kallsym_symbol(void *arg, const char *name,
- char type, u64 start, u64 end)
+ char type, u64 start)
{
struct symbol *sym;
struct process_kallsyms_args *a = arg;
@@ -656,8 +640,12 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
if (!symbol_type__is_a(type, a->map->type))
return 0;
- sym = symbol__new(start, end - start + 1,
- kallsyms2elf_type(type), name);
+ /*
+ * module symbols are not sorted so we add all
+ * symbols, setting length to 0, and rely on
+ * symbols__fixup_end() to fix it up.
+ */
+ sym = symbol__new(start, 0, kallsyms2elf_type(type), name);
if (sym == NULL)
return -ENOMEM;
/*
@@ -904,556 +892,7 @@ out_failure:
return -1;
}
-/**
- * elf_symtab__for_each_symbol - iterate thru all the symbols
- *
- * @syms: struct elf_symtab instance to iterate
- * @idx: uint32_t idx
- * @sym: GElf_Sym iterator
- */
-#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
- for (idx = 0, gelf_getsym(syms, idx, &sym);\
- idx < nr_syms; \
- idx++, gelf_getsym(syms, idx, &sym))
-
-static inline uint8_t elf_sym__type(const GElf_Sym *sym)
-{
- return GELF_ST_TYPE(sym->st_info);
-}
-
-static inline int elf_sym__is_function(const GElf_Sym *sym)
-{
- return elf_sym__type(sym) == STT_FUNC &&
- sym->st_name != 0 &&
- sym->st_shndx != SHN_UNDEF;
-}
-
-static inline bool elf_sym__is_object(const GElf_Sym *sym)
-{
- return elf_sym__type(sym) == STT_OBJECT &&
- sym->st_name != 0 &&
- sym->st_shndx != SHN_UNDEF;
-}
-
-static inline int elf_sym__is_label(const GElf_Sym *sym)
-{
- return elf_sym__type(sym) == STT_NOTYPE &&
- sym->st_name != 0 &&
- sym->st_shndx != SHN_UNDEF &&
- sym->st_shndx != SHN_ABS;
-}
-
-static inline const char *elf_sec__name(const GElf_Shdr *shdr,
- const Elf_Data *secstrs)
-{
- return secstrs->d_buf + shdr->sh_name;
-}
-
-static inline int elf_sec__is_text(const GElf_Shdr *shdr,
- const Elf_Data *secstrs)
-{
- return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
-}
-
-static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
- const Elf_Data *secstrs)
-{
- return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
-}
-
-static inline const char *elf_sym__name(const GElf_Sym *sym,
- const Elf_Data *symstrs)
-{
- return symstrs->d_buf + sym->st_name;
-}
-
-static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
- GElf_Shdr *shp, const char *name,
- size_t *idx)
-{
- Elf_Scn *sec = NULL;
- size_t cnt = 1;
-
- while ((sec = elf_nextscn(elf, sec)) != NULL) {
- char *str;
-
- gelf_getshdr(sec, shp);
- str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
- if (!strcmp(name, str)) {
- if (idx)
- *idx = cnt;
- break;
- }
- ++cnt;
- }
-
- return sec;
-}
-
-#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
- for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
- idx < nr_entries; \
- ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
-
-#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
- for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
- idx < nr_entries; \
- ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
-
-/*
- * We need to check if we have a .dynsym, so that we can handle the
- * .plt, synthesizing its symbols, that aren't on the symtabs (be it
- * .dynsym or .symtab).
- * And always look at the original dso, not at debuginfo packages, that
- * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
- */
-static int
-dso__synthesize_plt_symbols(struct dso *dso, char *name, struct map *map,
- symbol_filter_t filter)
-{
- uint32_t nr_rel_entries, idx;
- GElf_Sym sym;
- u64 plt_offset;
- GElf_Shdr shdr_plt;
- struct symbol *f;
- GElf_Shdr shdr_rel_plt, shdr_dynsym;
- Elf_Data *reldata, *syms, *symstrs;
- Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
- size_t dynsym_idx;
- GElf_Ehdr ehdr;
- char sympltname[1024];
- Elf *elf;
- int nr = 0, symidx, fd, err = 0;
-
- fd = open(name, O_RDONLY);
- if (fd < 0)
- goto out;
-
- elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
- if (elf == NULL)
- goto out_close;
-
- if (gelf_getehdr(elf, &ehdr) == NULL)
- goto out_elf_end;
-
- scn_dynsym = elf_section_by_name(elf, &ehdr, &shdr_dynsym,
- ".dynsym", &dynsym_idx);
- if (scn_dynsym == NULL)
- goto out_elf_end;
-
- scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
- ".rela.plt", NULL);
- if (scn_plt_rel == NULL) {
- scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
- ".rel.plt", NULL);
- if (scn_plt_rel == NULL)
- goto out_elf_end;
- }
-
- err = -1;
-
- if (shdr_rel_plt.sh_link != dynsym_idx)
- goto out_elf_end;
-
- if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
- goto out_elf_end;
-
- /*
- * Fetch the relocation section to find the idxes to the GOT
- * and the symbols in the .dynsym they refer to.
- */
- reldata = elf_getdata(scn_plt_rel, NULL);
- if (reldata == NULL)
- goto out_elf_end;
-
- syms = elf_getdata(scn_dynsym, NULL);
- if (syms == NULL)
- goto out_elf_end;
-
- scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
- if (scn_symstrs == NULL)
- goto out_elf_end;
-
- symstrs = elf_getdata(scn_symstrs, NULL);
- if (symstrs == NULL)
- goto out_elf_end;
-
- nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
- plt_offset = shdr_plt.sh_offset;
-
- if (shdr_rel_plt.sh_type == SHT_RELA) {
- GElf_Rela pos_mem, *pos;
-
- elf_section__for_each_rela(reldata, pos, pos_mem, idx,
- nr_rel_entries) {
- symidx = GELF_R_SYM(pos->r_info);
- plt_offset += shdr_plt.sh_entsize;
- gelf_getsym(syms, symidx, &sym);
- snprintf(sympltname, sizeof(sympltname),
- "%s@plt", elf_sym__name(&sym, symstrs));
-
- f = symbol__new(plt_offset, shdr_plt.sh_entsize,
- STB_GLOBAL, sympltname);
- if (!f)
- goto out_elf_end;
-
- if (filter && filter(map, f))
- symbol__delete(f);
- else {
- symbols__insert(&dso->symbols[map->type], f);
- ++nr;
- }
- }
- } else if (shdr_rel_plt.sh_type == SHT_REL) {
- GElf_Rel pos_mem, *pos;
- elf_section__for_each_rel(reldata, pos, pos_mem, idx,
- nr_rel_entries) {
- symidx = GELF_R_SYM(pos->r_info);
- plt_offset += shdr_plt.sh_entsize;
- gelf_getsym(syms, symidx, &sym);
- snprintf(sympltname, sizeof(sympltname),
- "%s@plt", elf_sym__name(&sym, symstrs));
-
- f = symbol__new(plt_offset, shdr_plt.sh_entsize,
- STB_GLOBAL, sympltname);
- if (!f)
- goto out_elf_end;
-
- if (filter && filter(map, f))
- symbol__delete(f);
- else {
- symbols__insert(&dso->symbols[map->type], f);
- ++nr;
- }
- }
- }
-
- err = 0;
-out_elf_end:
- elf_end(elf);
-out_close:
- close(fd);
-
- if (err == 0)
- return nr;
-out:
- pr_debug("%s: problems reading %s PLT info.\n",
- __func__, dso->long_name);
- return 0;
-}
-
-static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
-{
- switch (type) {
- case MAP__FUNCTION:
- return elf_sym__is_function(sym);
- case MAP__VARIABLE:
- return elf_sym__is_object(sym);
- default:
- return false;
- }
-}
-
-static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
- enum map_type type)
-{
- switch (type) {
- case MAP__FUNCTION:
- return elf_sec__is_text(shdr, secstrs);
- case MAP__VARIABLE:
- return elf_sec__is_data(shdr, secstrs);
- default:
- return false;
- }
-}
-
-static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
-{
- Elf_Scn *sec = NULL;
- GElf_Shdr shdr;
- size_t cnt = 1;
-
- while ((sec = elf_nextscn(elf, sec)) != NULL) {
- gelf_getshdr(sec, &shdr);
-
- if ((addr >= shdr.sh_addr) &&
- (addr < (shdr.sh_addr + shdr.sh_size)))
- return cnt;
-
- ++cnt;
- }
-
- return -1;
-}
-
-static int dso__swap_init(struct dso *dso, unsigned char eidata)
-{
- static unsigned int const endian = 1;
-
- dso->needs_swap = DSO_SWAP__NO;
-
- switch (eidata) {
- case ELFDATA2LSB:
- /* We are big endian, DSO is little endian. */
- if (*(unsigned char const *)&endian != 1)
- dso->needs_swap = DSO_SWAP__YES;
- break;
-
- case ELFDATA2MSB:
- /* We are little endian, DSO is big endian. */
- if (*(unsigned char const *)&endian != 0)
- dso->needs_swap = DSO_SWAP__YES;
- break;
-
- default:
- pr_err("unrecognized DSO data encoding %d\n", eidata);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
- int fd, symbol_filter_t filter, int kmodule,
- int want_symtab)
-{
- struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
- struct map *curr_map = map;
- struct dso *curr_dso = dso;
- Elf_Data *symstrs, *secstrs;
- uint32_t nr_syms;
- int err = -1;
- uint32_t idx;
- GElf_Ehdr ehdr;
- GElf_Shdr shdr, opdshdr;
- Elf_Data *syms, *opddata = NULL;
- GElf_Sym sym;
- Elf_Scn *sec, *sec_strndx, *opdsec;
- Elf *elf;
- int nr = 0;
- size_t opdidx = 0;
-
- elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
- if (elf == NULL) {
- pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
- goto out_close;
- }
-
- if (gelf_getehdr(elf, &ehdr) == NULL) {
- pr_debug("%s: cannot get elf header.\n", __func__);
- goto out_elf_end;
- }
-
- if (dso__swap_init(dso, ehdr.e_ident[EI_DATA]))
- goto out_elf_end;
-
- /* Always reject images with a mismatched build-id: */
- if (dso->has_build_id) {
- u8 build_id[BUILD_ID_SIZE];
-
- if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0)
- goto out_elf_end;
-
- if (!dso__build_id_equal(dso, build_id))
- goto out_elf_end;
- }
-
- sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL);
- if (sec == NULL) {
- if (want_symtab)
- goto out_elf_end;
-
- sec = elf_section_by_name(elf, &ehdr, &shdr, ".dynsym", NULL);
- if (sec == NULL)
- goto out_elf_end;
- }
-
- opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx);
- if (opdshdr.sh_type != SHT_PROGBITS)
- opdsec = NULL;
- if (opdsec)
- opddata = elf_rawdata(opdsec, NULL);
-
- syms = elf_getdata(sec, NULL);
- if (syms == NULL)
- goto out_elf_end;
-
- sec = elf_getscn(elf, shdr.sh_link);
- if (sec == NULL)
- goto out_elf_end;
-
- symstrs = elf_getdata(sec, NULL);
- if (symstrs == NULL)
- goto out_elf_end;
-
- sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
- if (sec_strndx == NULL)
- goto out_elf_end;
-
- secstrs = elf_getdata(sec_strndx, NULL);
- if (secstrs == NULL)
- goto out_elf_end;
-
- nr_syms = shdr.sh_size / shdr.sh_entsize;
-
- memset(&sym, 0, sizeof(sym));
- if (dso->kernel == DSO_TYPE_USER) {
- dso->adjust_symbols = (ehdr.e_type == ET_EXEC ||
- elf_section_by_name(elf, &ehdr, &shdr,
- ".gnu.prelink_undo",
- NULL) != NULL);
- } else {
- dso->adjust_symbols = 0;
- }
- elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
- struct symbol *f;
- const char *elf_name = elf_sym__name(&sym, symstrs);
- char *demangled = NULL;
- int is_label = elf_sym__is_label(&sym);
- const char *section_name;
-
- if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
- strcmp(elf_name, kmap->ref_reloc_sym->name) == 0)
- kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
-
- if (!is_label && !elf_sym__is_a(&sym, map->type))
- continue;
-
- /* Reject ARM ELF "mapping symbols": these aren't unique and
- * don't identify functions, so will confuse the profile
- * output: */
- if (ehdr.e_machine == EM_ARM) {
- if (!strcmp(elf_name, "$a") ||
- !strcmp(elf_name, "$d") ||
- !strcmp(elf_name, "$t"))
- continue;
- }
-
- if (opdsec && sym.st_shndx == opdidx) {
- u32 offset = sym.st_value - opdshdr.sh_addr;
- u64 *opd = opddata->d_buf + offset;
- sym.st_value = DSO__SWAP(dso, u64, *opd);
- sym.st_shndx = elf_addr_to_index(elf, sym.st_value);
- }
-
- sec = elf_getscn(elf, sym.st_shndx);
- if (!sec)
- goto out_elf_end;
-
- gelf_getshdr(sec, &shdr);
-
- if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
- continue;
-
- section_name = elf_sec__name(&shdr, secstrs);
-
- /* On ARM, symbols for thumb functions have 1 added to
- * the symbol address as a flag - remove it */
- if ((ehdr.e_machine == EM_ARM) &&
- (map->type == MAP__FUNCTION) &&
- (sym.st_value & 1))
- --sym.st_value;
-
- if (dso->kernel != DSO_TYPE_USER || kmodule) {
- char dso_name[PATH_MAX];
-
- if (strcmp(section_name,
- (curr_dso->short_name +
- dso->short_name_len)) == 0)
- goto new_symbol;
-
- if (strcmp(section_name, ".text") == 0) {
- curr_map = map;
- curr_dso = dso;
- goto new_symbol;
- }
-
- snprintf(dso_name, sizeof(dso_name),
- "%s%s", dso->short_name, section_name);
-
- curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name);
- if (curr_map == NULL) {
- u64 start = sym.st_value;
-
- if (kmodule)
- start += map->start + shdr.sh_offset;
-
- curr_dso = dso__new(dso_name);
- if (curr_dso == NULL)
- goto out_elf_end;
- curr_dso->kernel = dso->kernel;
- curr_dso->long_name = dso->long_name;
- curr_dso->long_name_len = dso->long_name_len;
- curr_map = map__new2(start, curr_dso,
- map->type);
- if (curr_map == NULL) {
- dso__delete(curr_dso);
- goto out_elf_end;
- }
- curr_map->map_ip = identity__map_ip;
- curr_map->unmap_ip = identity__map_ip;
- curr_dso->symtab_type = dso->symtab_type;
- map_groups__insert(kmap->kmaps, curr_map);
- dsos__add(&dso->node, curr_dso);
- dso__set_loaded(curr_dso, map->type);
- } else
- curr_dso = curr_map->dso;
-
- goto new_symbol;
- }
-
- if (curr_dso->adjust_symbols) {
- pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
- "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
- (u64)sym.st_value, (u64)shdr.sh_addr,
- (u64)shdr.sh_offset);
- sym.st_value -= shdr.sh_addr - shdr.sh_offset;
- }
- /*
- * We need to figure out if the object was created from C++ sources
- * DWARF DW_compile_unit has this, but we don't always have access
- * to it...
- */
- demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI);
- if (demangled != NULL)
- elf_name = demangled;
-new_symbol:
- f = symbol__new(sym.st_value, sym.st_size,
- GELF_ST_BIND(sym.st_info), elf_name);
- free(demangled);
- if (!f)
- goto out_elf_end;
-
- if (filter && filter(curr_map, f))
- symbol__delete(f);
- else {
- symbols__insert(&curr_dso->symbols[curr_map->type], f);
- nr++;
- }
- }
-
- /*
- * For misannotated, zeroed, ASM function sizes.
- */
- if (nr > 0) {
- symbols__fixup_duplicate(&dso->symbols[map->type]);
- symbols__fixup_end(&dso->symbols[map->type]);
- if (kmap) {
- /*
- * We need to fixup this here too because we create new
- * maps here, for things like vsyscall sections.
- */
- __map_groups__fixup_end(kmap->kmaps, map->type);
- }
- }
- err = nr;
-out_elf_end:
- elf_end(elf);
-out_close:
- return err;
-}
-
-static bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
+bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
{
return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
}
@@ -1480,216 +919,11 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
return have_build_id;
}
-/*
- * Align offset to 4 bytes as needed for note name and descriptor data.
- */
-#define NOTE_ALIGN(n) (((n) + 3) & -4U)
-
-static int elf_read_build_id(Elf *elf, void *bf, size_t size)
-{
- int err = -1;
- GElf_Ehdr ehdr;
- GElf_Shdr shdr;
- Elf_Data *data;
- Elf_Scn *sec;
- Elf_Kind ek;
- void *ptr;
-
- if (size < BUILD_ID_SIZE)
- goto out;
-
- ek = elf_kind(elf);
- if (ek != ELF_K_ELF)
- goto out;
-
- if (gelf_getehdr(elf, &ehdr) == NULL) {
- pr_err("%s: cannot get elf header.\n", __func__);
- goto out;
- }
-
- /*
- * Check following sections for notes:
- * '.note.gnu.build-id'
- * '.notes'
- * '.note' (VDSO specific)
- */
- do {
- sec = elf_section_by_name(elf, &ehdr, &shdr,
- ".note.gnu.build-id", NULL);
- if (sec)
- break;
-
- sec = elf_section_by_name(elf, &ehdr, &shdr,
- ".notes", NULL);
- if (sec)
- break;
-
- sec = elf_section_by_name(elf, &ehdr, &shdr,
- ".note", NULL);
- if (sec)
- break;
-
- return err;
-
- } while (0);
-
- data = elf_getdata(sec, NULL);
- if (data == NULL)
- goto out;
-
- ptr = data->d_buf;
- while (ptr < (data->d_buf + data->d_size)) {
- GElf_Nhdr *nhdr = ptr;
- size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
- descsz = NOTE_ALIGN(nhdr->n_descsz);
- const char *name;
-
- ptr += sizeof(*nhdr);
- name = ptr;
- ptr += namesz;
- if (nhdr->n_type == NT_GNU_BUILD_ID &&
- nhdr->n_namesz == sizeof("GNU")) {
- if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
- size_t sz = min(size, descsz);
- memcpy(bf, ptr, sz);
- memset(bf + sz, 0, size - sz);
- err = descsz;
- break;
- }
- }
- ptr += descsz;
- }
-
-out:
- return err;
-}
-
-int filename__read_build_id(const char *filename, void *bf, size_t size)
-{
- int fd, err = -1;
- Elf *elf;
-
- if (size < BUILD_ID_SIZE)
- goto out;
-
- fd = open(filename, O_RDONLY);
- if (fd < 0)
- goto out;
-
- elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
- if (elf == NULL) {
- pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
- goto out_close;
- }
-
- err = elf_read_build_id(elf, bf, size);
-
- elf_end(elf);
-out_close:
- close(fd);
-out:
- return err;
-}
-
-int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
-{
- int fd, err = -1;
-
- if (size < BUILD_ID_SIZE)
- goto out;
-
- fd = open(filename, O_RDONLY);
- if (fd < 0)
- goto out;
-
- while (1) {
- char bf[BUFSIZ];
- GElf_Nhdr nhdr;
- size_t namesz, descsz;
-
- if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
- break;
-
- namesz = NOTE_ALIGN(nhdr.n_namesz);
- descsz = NOTE_ALIGN(nhdr.n_descsz);
- if (nhdr.n_type == NT_GNU_BUILD_ID &&
- nhdr.n_namesz == sizeof("GNU")) {
- if (read(fd, bf, namesz) != (ssize_t)namesz)
- break;
- if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
- size_t sz = min(descsz, size);
- if (read(fd, build_id, sz) == (ssize_t)sz) {
- memset(build_id + sz, 0, size - sz);
- err = 0;
- break;
- }
- } else if (read(fd, bf, descsz) != (ssize_t)descsz)
- break;
- } else {
- int n = namesz + descsz;
- if (read(fd, bf, n) != n)
- break;
- }
- }
- close(fd);
-out:
- return err;
-}
-
-static int filename__read_debuglink(const char *filename,
- char *debuglink, size_t size)
-{
- int fd, err = -1;
- Elf *elf;
- GElf_Ehdr ehdr;
- GElf_Shdr shdr;
- Elf_Data *data;
- Elf_Scn *sec;
- Elf_Kind ek;
-
- fd = open(filename, O_RDONLY);
- if (fd < 0)
- goto out;
-
- elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
- if (elf == NULL) {
- pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
- goto out_close;
- }
-
- ek = elf_kind(elf);
- if (ek != ELF_K_ELF)
- goto out_close;
-
- if (gelf_getehdr(elf, &ehdr) == NULL) {
- pr_err("%s: cannot get elf header.\n", __func__);
- goto out_close;
- }
-
- sec = elf_section_by_name(elf, &ehdr, &shdr,
- ".gnu_debuglink", NULL);
- if (sec == NULL)
- goto out_close;
-
- data = elf_getdata(sec, NULL);
- if (data == NULL)
- goto out_close;
-
- /* the start of this section is a zero-terminated string */
- strncpy(debuglink, data->d_buf, size);
-
- elf_end(elf);
-
-out_close:
- close(fd);
-out:
- return err;
-}
-
char dso__symtab_origin(const struct dso *dso)
{
static const char origin[] = {
[DSO_BINARY_TYPE__KALLSYMS] = 'k',
+ [DSO_BINARY_TYPE__VMLINUX] = 'v',
[DSO_BINARY_TYPE__JAVA_JIT] = 'j',
[DSO_BINARY_TYPE__DEBUGLINK] = 'l',
[DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
@@ -1700,6 +934,7 @@ char dso__symtab_origin(const struct dso *dso)
[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
[DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
[DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
+ [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
};
if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
@@ -1775,7 +1010,9 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type,
default:
case DSO_BINARY_TYPE__KALLSYMS:
+ case DSO_BINARY_TYPE__VMLINUX:
case DSO_BINARY_TYPE__GUEST_KALLSYMS:
+ case DSO_BINARY_TYPE__GUEST_VMLINUX:
case DSO_BINARY_TYPE__JAVA_JIT:
case DSO_BINARY_TYPE__NOT_FOUND:
ret = -1;
@@ -1789,11 +1026,12 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
{
char *name;
int ret = -1;
- int fd;
u_int i;
struct machine *machine;
char *root_dir = (char *) "";
- int want_symtab;
+ int ss_pos = 0;
+ struct symsrc ss_[2];
+ struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
dso__set_loaded(dso, map->type);
@@ -1835,54 +1073,69 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
root_dir = machine->root_dir;
/* Iterate over candidate debug images.
- * On the first pass, only load images if they have a full symtab.
- * Failing that, do a second pass where we accept .dynsym also
+ * Keep track of "interesting" ones (those which have a symtab, dynsym,
+ * and/or opd section) for processing.
*/
- want_symtab = 1;
-restart:
for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
+ struct symsrc *ss = &ss_[ss_pos];
+ bool next_slot = false;
- dso->symtab_type = binary_type_symtab[i];
+ enum dso_binary_type symtab_type = binary_type_symtab[i];
- if (dso__binary_type_file(dso, dso->symtab_type,
+ if (dso__binary_type_file(dso, symtab_type,
root_dir, name, PATH_MAX))
continue;
/* Name is now the name of the next image to try */
- fd = open(name, O_RDONLY);
- if (fd < 0)
+ if (symsrc__init(ss, dso, name, symtab_type) < 0)
continue;
- ret = dso__load_sym(dso, map, name, fd, filter, 0,
- want_symtab);
- close(fd);
+ if (!syms_ss && symsrc__has_symtab(ss)) {
+ syms_ss = ss;
+ next_slot = true;
+ }
- /*
- * Some people seem to have debuginfo files _WITHOUT_ debug
- * info!?!?
- */
- if (!ret)
- continue;
+ if (!runtime_ss && symsrc__possibly_runtime(ss)) {
+ runtime_ss = ss;
+ next_slot = true;
+ }
- if (ret > 0) {
- int nr_plt;
+ if (next_slot) {
+ ss_pos++;
- nr_plt = dso__synthesize_plt_symbols(dso, name, map, filter);
- if (nr_plt > 0)
- ret += nr_plt;
- break;
+ if (syms_ss && runtime_ss)
+ break;
}
+
}
- /*
- * If we wanted a full symtab but no image had one,
- * relax our requirements and repeat the search.
- */
- if (ret <= 0 && want_symtab) {
- want_symtab = 0;
- goto restart;
+ if (!runtime_ss && !syms_ss)
+ goto out_free;
+
+ if (runtime_ss && !syms_ss) {
+ syms_ss = runtime_ss;
+ }
+
+ /* We'll have to hope for the best */
+ if (!runtime_ss && syms_ss)
+ runtime_ss = syms_ss;
+
+ if (syms_ss)
+ ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, 0);
+ else
+ ret = -1;
+
+ if (ret > 0) {
+ int nr_plt;
+
+ nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map, filter);
+ if (nr_plt > 0)
+ ret += nr_plt;
}
+ for (; ss_pos > 0; ss_pos--)
+ symsrc__destroy(&ss_[ss_pos - 1]);
+out_free:
free(name);
if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
return 0;
@@ -2030,25 +1283,6 @@ static int machine__set_modules_path(struct machine *machine)
return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
}
-/*
- * Constructor variant for modules (where we know from /proc/modules where
- * they are loaded) and for vmlinux, where only after we load all the
- * symbols we'll know where it starts and ends.
- */
-static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
-{
- struct map *map = calloc(1, (sizeof(*map) +
- (dso->kernel ? sizeof(struct kmap) : 0)));
- if (map != NULL) {
- /*
- * ->end will be filled after we load all the symbols
- */
- map__init(map, type, start, 0, 0, dso);
- }
-
- return map;
-}
-
struct map *machine__new_module(struct machine *machine, u64 start,
const char *filename)
{
@@ -2141,22 +1375,30 @@ out_failure:
int dso__load_vmlinux(struct dso *dso, struct map *map,
const char *vmlinux, symbol_filter_t filter)
{
- int err = -1, fd;
+ int err = -1;
+ struct symsrc ss;
char symfs_vmlinux[PATH_MAX];
+ enum dso_binary_type symtab_type;
snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s",
symbol_conf.symfs, vmlinux);
- fd = open(symfs_vmlinux, O_RDONLY);
- if (fd < 0)
+
+ if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+ symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
+ else
+ symtab_type = DSO_BINARY_TYPE__VMLINUX;
+
+ if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
return -1;
- dso__set_long_name(dso, (char *)vmlinux);
- dso__set_loaded(dso, map->type);
- err = dso__load_sym(dso, map, symfs_vmlinux, fd, filter, 0, 0);
- close(fd);
+ err = dso__load_sym(dso, map, &ss, &ss, filter, 0);
+ symsrc__destroy(&ss);
- if (err > 0)
+ if (err > 0) {
+ dso__set_long_name(dso, (char *)vmlinux);
+ dso__set_loaded(dso, map->type);
pr_debug("Using %s for symbols\n", symfs_vmlinux);
+ }
return err;
}
@@ -2173,10 +1415,8 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map,
filename = dso__build_id_filename(dso, NULL, 0);
if (filename != NULL) {
err = dso__load_vmlinux(dso, map, filename, filter);
- if (err > 0) {
- dso__set_long_name(dso, filename);
+ if (err > 0)
goto out;
- }
free(filename);
}
@@ -2291,9 +1531,8 @@ do_kallsyms:
free(kallsyms_allocated_filename);
if (err > 0) {
+ dso__set_long_name(dso, strdup("[kernel.kallsyms]"));
out_fixup:
- if (kallsyms_filename != NULL)
- dso__set_long_name(dso, strdup("[kernel.kallsyms]"));
map__fixup_start(map);
map__fixup_end(map);
}
@@ -2352,12 +1591,12 @@ out_try_fixup:
return err;
}
-static void dsos__add(struct list_head *head, struct dso *dso)
+void dsos__add(struct list_head *head, struct dso *dso)
{
list_add_tail(&dso->node, head);
}
-static struct dso *dsos__find(struct list_head *head, const char *name)
+struct dso *dsos__find(struct list_head *head, const char *name)
{
struct dso *pos;
@@ -2516,7 +1755,7 @@ struct process_args {
};
static int symbol__in_kernel(void *arg, const char *name,
- char type __used, u64 start, u64 end __used)
+ char type __maybe_unused, u64 start)
{
struct process_args *args = arg;
@@ -2752,9 +1991,10 @@ int symbol__init(void)
if (symbol_conf.initialized)
return 0;
- symbol_conf.priv_size = ALIGN(symbol_conf.priv_size, sizeof(u64));
+ symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
+
+ symbol__elf_init();
- elf_version(EV_CURRENT);
if (symbol_conf.sort_by_name)
symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
sizeof(struct symbol));
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 1fe733a1e21f..b441b07172b7 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -10,22 +10,31 @@
#include <linux/rbtree.h>
#include <stdio.h>
#include <byteswap.h>
+#include <libgen.h>
+
+#ifndef NO_LIBELF_SUPPORT
+#include <libelf.h>
+#include <gelf.h>
+#include <elf.h>
+#endif
#ifdef HAVE_CPLUS_DEMANGLE
extern char *cplus_demangle(const char *, int);
-static inline char *bfd_demangle(void __used *v, const char *c, int i)
+static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i)
{
return cplus_demangle(c, i);
}
#else
#ifdef NO_DEMANGLE
-static inline char *bfd_demangle(void __used *v, const char __used *c,
- int __used i)
+static inline char *bfd_demangle(void __maybe_unused *v,
+ const char __maybe_unused *c,
+ int __maybe_unused i)
{
return NULL;
}
#else
+#define PACKAGE 'perf'
#include <bfd.h>
#endif
#endif
@@ -158,6 +167,8 @@ struct addr_location {
enum dso_binary_type {
DSO_BINARY_TYPE__KALLSYMS = 0,
DSO_BINARY_TYPE__GUEST_KALLSYMS,
+ DSO_BINARY_TYPE__VMLINUX,
+ DSO_BINARY_TYPE__GUEST_VMLINUX,
DSO_BINARY_TYPE__JAVA_JIT,
DSO_BINARY_TYPE__DEBUGLINK,
DSO_BINARY_TYPE__BUILD_ID_CACHE,
@@ -217,6 +228,36 @@ struct dso {
char name[0];
};
+struct symsrc {
+ char *name;
+ int fd;
+ enum dso_binary_type type;
+
+#ifndef NO_LIBELF_SUPPORT
+ Elf *elf;
+ GElf_Ehdr ehdr;
+
+ Elf_Scn *opdsec;
+ size_t opdidx;
+ GElf_Shdr opdshdr;
+
+ Elf_Scn *symtab;
+ GElf_Shdr symshdr;
+
+ Elf_Scn *dynsym;
+ size_t dynsym_idx;
+ GElf_Shdr dynshdr;
+
+ bool adjust_symbols;
+#endif
+};
+
+void symsrc__destroy(struct symsrc *ss);
+int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
+ enum dso_binary_type type);
+bool symsrc__has_symtab(struct symsrc *ss);
+bool symsrc__possibly_runtime(struct symsrc *ss);
+
#define DSO__SWAP(dso, type, val) \
({ \
type ____r = val; \
@@ -254,6 +295,8 @@ static inline void dso__set_loaded(struct dso *dso, enum map_type type)
void dso__sort_by_name(struct dso *dso, enum map_type type);
+void dsos__add(struct list_head *head, struct dso *dso);
+struct dso *dsos__find(struct list_head *head, const char *name);
struct dso *__dsos__findnew(struct list_head *head, const char *name);
int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter);
@@ -283,6 +326,7 @@ size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp);
char dso__symtab_origin(const struct dso *dso);
void dso__set_long_name(struct dso *dso, char *name);
void dso__set_build_id(struct dso *dso, void *build_id);
+bool dso__build_id_equal(const struct dso *dso, u8 *build_id);
void dso__read_running_kernel_build_id(struct dso *dso,
struct machine *machine);
struct map *dso__new_map(const char *name);
@@ -297,7 +341,9 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
int build_id__sprintf(const u8 *build_id, int len, char *bf);
int kallsyms__parse(const char *filename, void *arg,
int (*process_symbol)(void *arg, const char *name,
- char type, u64 start, u64 end));
+ char type, u64 start));
+int filename__read_debuglink(const char *filename, char *debuglink,
+ size_t size);
void machine__destroy_kernel_maps(struct machine *machine);
int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel);
@@ -309,6 +355,8 @@ void machines__destroy_guest_kernel_maps(struct rb_root *machines);
int symbol__init(void);
void symbol__exit(void);
+void symbol__elf_init(void);
+struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name);
size_t symbol__fprintf_symname_offs(const struct symbol *sym,
const struct addr_location *al, FILE *fp);
size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp);
@@ -326,4 +374,15 @@ ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
struct machine *machine, u64 addr,
u8 *data, ssize_t size);
int dso__test_data(void);
+int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
+ struct symsrc *runtime_ss, symbol_filter_t filter,
+ int kmodule);
+int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss,
+ struct map *map, symbol_filter_t filter);
+
+void symbols__insert(struct rb_root *symbols, struct symbol *sym);
+void symbols__fixup_duplicate(struct rb_root *symbols);
+void symbols__fixup_end(struct rb_root *symbols);
+void __map_groups__fixup_end(struct map_groups *mg, enum map_type type);
+
#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c
index 051eaa68095e..065528b7563e 100644
--- a/tools/perf/util/target.c
+++ b/tools/perf/util/target.c
@@ -117,8 +117,8 @@ int perf_target__strerror(struct perf_target *target, int errnum,
if (err != buf) {
size_t len = strlen(err);
- char *c = mempcpy(buf, err, min(buflen - 1, len));
- *c = '\0';
+ memcpy(buf, err, min(buflen - 1, len));
+ *(buf + min(buflen - 1, len)) = '\0';
}
return 0;
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 70c2c13ff679..f66610b7bacf 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -16,6 +16,8 @@ struct thread {
bool comm_set;
char *comm;
int comm_len;
+
+ void *priv;
};
struct machine;
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
index 7eeebcee291c..884dde9b9bc1 100644
--- a/tools/perf/util/top.c
+++ b/tools/perf/util/top.c
@@ -58,8 +58,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
}
if (top->evlist->nr_entries == 1) {
- struct perf_evsel *first;
- first = list_entry(top->evlist->entries.next, struct perf_evsel, node);
+ struct perf_evsel *first = perf_evlist__first(top->evlist);
ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ",
(uint64_t)first->attr.sample_period,
top->freq ? "Hz" : "");
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index 33347ca89ee4..86ff1b15059b 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -5,6 +5,7 @@
#include "types.h"
#include <stddef.h>
#include <stdbool.h>
+#include <termios.h>
struct perf_evlist;
struct perf_evsel;
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 0715c843c2e7..3aabcd687cd5 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -162,25 +162,16 @@ int trace_parse_common_pid(struct pevent *pevent, void *data)
return pevent_data_pid(pevent, &record);
}
-unsigned long long read_size(struct pevent *pevent, void *ptr, int size)
+unsigned long long read_size(struct event_format *event, void *ptr, int size)
{
- return pevent_read_number(pevent, ptr, size);
+ return pevent_read_number(event->pevent, ptr, size);
}
-void print_trace_event(struct pevent *pevent, int cpu, void *data, int size)
+void event_format__print(struct event_format *event,
+ int cpu, void *data, int size)
{
- struct event_format *event;
struct pevent_record record;
struct trace_seq s;
- int type;
-
- type = trace_parse_common_type(pevent, data);
-
- event = pevent_find_event(pevent, type);
- if (!event) {
- warning("ug! no event found for type %d", type);
- return;
- }
memset(&record, 0, sizeof(record));
record.cpu = cpu;
@@ -192,6 +183,19 @@ void print_trace_event(struct pevent *pevent, int cpu, void *data, int size)
trace_seq_do_printf(&s);
}
+void print_trace_event(struct pevent *pevent, int cpu, void *data, int size)
+{
+ int type = trace_parse_common_type(pevent, data);
+ struct event_format *event = pevent_find_event(pevent, type);
+
+ if (!event) {
+ warning("ug! no event found for type %d", type);
+ return;
+ }
+
+ event_format__print(event, cpu, data, size);
+}
+
void print_event(struct pevent *pevent, int cpu, void *data, int size,
unsigned long long nsecs, char *comm)
{
@@ -217,7 +221,7 @@ void print_event(struct pevent *pevent, int cpu, void *data, int size,
}
void parse_proc_kallsyms(struct pevent *pevent,
- char *file, unsigned int size __unused)
+ char *file, unsigned int size __maybe_unused)
{
unsigned long long addr;
char *func;
@@ -225,31 +229,29 @@ void parse_proc_kallsyms(struct pevent *pevent,
char *next = NULL;
char *addr_str;
char *mod;
- char ch;
+ char *fmt;
line = strtok_r(file, "\n", &next);
while (line) {
mod = NULL;
- sscanf(line, "%as %c %as\t[%as",
- (float *)(void *)&addr_str, /* workaround gcc warning */
- &ch, (float *)(void *)&func, (float *)(void *)&mod);
+ addr_str = strtok_r(line, " ", &fmt);
addr = strtoull(addr_str, NULL, 16);
- free(addr_str);
-
- /* truncate the extra ']' */
+ /* skip character */
+ strtok_r(NULL, " ", &fmt);
+ func = strtok_r(NULL, "\t", &fmt);
+ mod = strtok_r(NULL, "]", &fmt);
+ /* truncate the extra '[' */
if (mod)
- mod[strlen(mod) - 1] = 0;
+ mod = mod + 1;
pevent_register_function(pevent, func, addr, mod);
- free(func);
- free(mod);
line = strtok_r(NULL, "\n", &next);
}
}
void parse_ftrace_printk(struct pevent *pevent,
- char *file, unsigned int size __unused)
+ char *file, unsigned int size __maybe_unused)
{
unsigned long long addr;
char *printk;
@@ -289,7 +291,7 @@ struct event_format *trace_find_next_event(struct pevent *pevent,
{
static int idx;
- if (!pevent->events)
+ if (!pevent || !pevent->events)
return NULL;
if (!event) {
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index 474aa7a7df43..8715a1006d00 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -35,12 +35,11 @@ static int stop_script_unsupported(void)
return 0;
}
-static void process_event_unsupported(union perf_event *event __unused,
- struct pevent *pevent __unused,
- struct perf_sample *sample __unused,
- struct perf_evsel *evsel __unused,
- struct machine *machine __unused,
- struct thread *thread __unused)
+static void process_event_unsupported(union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused,
+ struct machine *machine __maybe_unused,
+ struct addr_location *al __maybe_unused)
{
}
@@ -53,17 +52,19 @@ static void print_python_unsupported_msg(void)
"\n etc.\n");
}
-static int python_start_script_unsupported(const char *script __unused,
- int argc __unused,
- const char **argv __unused)
+static int python_start_script_unsupported(const char *script __maybe_unused,
+ int argc __maybe_unused,
+ const char **argv __maybe_unused)
{
print_python_unsupported_msg();
return -1;
}
-static int python_generate_script_unsupported(struct pevent *pevent __unused,
- const char *outfile __unused)
+static int python_generate_script_unsupported(struct pevent *pevent
+ __maybe_unused,
+ const char *outfile
+ __maybe_unused)
{
print_python_unsupported_msg();
@@ -115,17 +116,18 @@ static void print_perl_unsupported_msg(void)
"\n etc.\n");
}
-static int perl_start_script_unsupported(const char *script __unused,
- int argc __unused,
- const char **argv __unused)
+static int perl_start_script_unsupported(const char *script __maybe_unused,
+ int argc __maybe_unused,
+ const char **argv __maybe_unused)
{
print_perl_unsupported_msg();
return -1;
}
-static int perl_generate_script_unsupported(struct pevent *pevent __unused,
- const char *outfile __unused)
+static int perl_generate_script_unsupported(struct pevent *pevent
+ __maybe_unused,
+ const char *outfile __maybe_unused)
{
print_perl_unsupported_msg();
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 8fef1d6687b7..a55fd37ffea1 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -9,7 +9,6 @@ struct machine;
struct perf_sample;
union perf_event;
struct perf_tool;
-struct thread;
extern int header_page_size_size;
extern int header_page_ts_size;
@@ -32,6 +31,8 @@ int bigendian(void);
struct pevent *read_trace_init(int file_bigendian, int host_bigendian);
void print_trace_event(struct pevent *pevent, int cpu, void *data, int size);
+void event_format__print(struct event_format *event,
+ int cpu, void *data, int size);
void print_event(struct pevent *pevent, int cpu, void *data, int size,
unsigned long long nsecs, char *comm);
@@ -56,7 +57,7 @@ int trace_parse_common_pid(struct pevent *pevent, void *data);
struct event_format *trace_find_next_event(struct pevent *pevent,
struct event_format *event);
-unsigned long long read_size(struct pevent *pevent, void *ptr, int size);
+unsigned long long read_size(struct event_format *event, void *ptr, int size);
unsigned long long eval_flag(const char *flag);
struct pevent_record *trace_read_data(struct pevent *pevent, int cpu);
@@ -74,16 +75,19 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs,
void tracing_data_put(struct tracing_data *tdata);
+struct addr_location;
+
+struct perf_session;
+
struct scripting_ops {
const char *name;
int (*start_script) (const char *script, int argc, const char **argv);
int (*stop_script) (void);
void (*process_event) (union perf_event *event,
- struct pevent *pevent,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine,
- struct thread *thread);
+ struct addr_location *al);
int (*generate_script) (struct pevent *pevent, const char *outfile);
};
diff --git a/tools/perf/util/unwind.c b/tools/perf/util/unwind.c
new file mode 100644
index 000000000000..958723ba3d2e
--- /dev/null
+++ b/tools/perf/util/unwind.c
@@ -0,0 +1,571 @@
+/*
+ * Post mortem Dwarf CFI based unwinding on top of regs and stack dumps.
+ *
+ * Lots of this code have been borrowed or heavily inspired from parts of
+ * the libunwind 0.99 code which are (amongst other contributors I may have
+ * forgotten):
+ *
+ * Copyright (C) 2002-2007 Hewlett-Packard Co
+ * Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * And the bugs have been added by:
+ *
+ * Copyright (C) 2010, Frederic Weisbecker <fweisbec@gmail.com>
+ * Copyright (C) 2012, Jiri Olsa <jolsa@redhat.com>
+ *
+ */
+
+#include <elf.h>
+#include <gelf.h>
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <linux/list.h>
+#include <libunwind.h>
+#include <libunwind-ptrace.h>
+#include "thread.h"
+#include "session.h"
+#include "perf_regs.h"
+#include "unwind.h"
+#include "util.h"
+
+extern int
+UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
+ unw_word_t ip,
+ unw_dyn_info_t *di,
+ unw_proc_info_t *pi,
+ int need_unwind_info, void *arg);
+
+#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
+
+#define DW_EH_PE_FORMAT_MASK 0x0f /* format of the encoded value */
+#define DW_EH_PE_APPL_MASK 0x70 /* how the value is to be applied */
+
+/* Pointer-encoding formats: */
+#define DW_EH_PE_omit 0xff
+#define DW_EH_PE_ptr 0x00 /* pointer-sized unsigned value */
+#define DW_EH_PE_udata4 0x03 /* unsigned 32-bit value */
+#define DW_EH_PE_udata8 0x04 /* unsigned 64-bit value */
+#define DW_EH_PE_sdata4 0x0b /* signed 32-bit value */
+#define DW_EH_PE_sdata8 0x0c /* signed 64-bit value */
+
+/* Pointer-encoding application: */
+#define DW_EH_PE_absptr 0x00 /* absolute value */
+#define DW_EH_PE_pcrel 0x10 /* rel. to addr. of encoded value */
+
+/*
+ * The following are not documented by LSB v1.3, yet they are used by
+ * GCC, presumably they aren't documented by LSB since they aren't
+ * used on Linux:
+ */
+#define DW_EH_PE_funcrel 0x40 /* start-of-procedure-relative */
+#define DW_EH_PE_aligned 0x50 /* aligned pointer */
+
+/* Flags intentionaly not handled, since they're not needed:
+ * #define DW_EH_PE_indirect 0x80
+ * #define DW_EH_PE_uleb128 0x01
+ * #define DW_EH_PE_udata2 0x02
+ * #define DW_EH_PE_sleb128 0x09
+ * #define DW_EH_PE_sdata2 0x0a
+ * #define DW_EH_PE_textrel 0x20
+ * #define DW_EH_PE_datarel 0x30
+ */
+
+struct unwind_info {
+ struct perf_sample *sample;
+ struct machine *machine;
+ struct thread *thread;
+ u64 sample_uregs;
+};
+
+#define dw_read(ptr, type, end) ({ \
+ type *__p = (type *) ptr; \
+ type __v; \
+ if ((__p + 1) > (type *) end) \
+ return -EINVAL; \
+ __v = *__p++; \
+ ptr = (typeof(ptr)) __p; \
+ __v; \
+ })
+
+static int __dw_read_encoded_value(u8 **p, u8 *end, u64 *val,
+ u8 encoding)
+{
+ u8 *cur = *p;
+ *val = 0;
+
+ switch (encoding) {
+ case DW_EH_PE_omit:
+ *val = 0;
+ goto out;
+ case DW_EH_PE_ptr:
+ *val = dw_read(cur, unsigned long, end);
+ goto out;
+ default:
+ break;
+ }
+
+ switch (encoding & DW_EH_PE_APPL_MASK) {
+ case DW_EH_PE_absptr:
+ break;
+ case DW_EH_PE_pcrel:
+ *val = (unsigned long) cur;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((encoding & 0x07) == 0x00)
+ encoding |= DW_EH_PE_udata4;
+
+ switch (encoding & DW_EH_PE_FORMAT_MASK) {
+ case DW_EH_PE_sdata4:
+ *val += dw_read(cur, s32, end);
+ break;
+ case DW_EH_PE_udata4:
+ *val += dw_read(cur, u32, end);
+ break;
+ case DW_EH_PE_sdata8:
+ *val += dw_read(cur, s64, end);
+ break;
+ case DW_EH_PE_udata8:
+ *val += dw_read(cur, u64, end);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ out:
+ *p = cur;
+ return 0;
+}
+
+#define dw_read_encoded_value(ptr, end, enc) ({ \
+ u64 __v; \
+ if (__dw_read_encoded_value(&ptr, end, &__v, enc)) { \
+ return -EINVAL; \
+ } \
+ __v; \
+ })
+
+static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
+ GElf_Shdr *shp, const char *name)
+{
+ Elf_Scn *sec = NULL;
+
+ while ((sec = elf_nextscn(elf, sec)) != NULL) {
+ char *str;
+
+ gelf_getshdr(sec, shp);
+ str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
+ if (!strcmp(name, str))
+ break;
+ }
+
+ return sec;
+}
+
+static u64 elf_section_offset(int fd, const char *name)
+{
+ Elf *elf;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr;
+ u64 offset = 0;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL)
+ return 0;
+
+ do {
+ if (gelf_getehdr(elf, &ehdr) == NULL)
+ break;
+
+ if (!elf_section_by_name(elf, &ehdr, &shdr, name))
+ break;
+
+ offset = shdr.sh_offset;
+ } while (0);
+
+ elf_end(elf);
+ return offset;
+}
+
+struct table_entry {
+ u32 start_ip_offset;
+ u32 fde_offset;
+};
+
+struct eh_frame_hdr {
+ unsigned char version;
+ unsigned char eh_frame_ptr_enc;
+ unsigned char fde_count_enc;
+ unsigned char table_enc;
+
+ /*
+ * The rest of the header is variable-length and consists of the
+ * following members:
+ *
+ * encoded_t eh_frame_ptr;
+ * encoded_t fde_count;
+ */
+
+ /* A single encoded pointer should not be more than 8 bytes. */
+ u64 enc[2];
+
+ /*
+ * struct {
+ * encoded_t start_ip;
+ * encoded_t fde_addr;
+ * } binary_search_table[fde_count];
+ */
+ char data[0];
+} __packed;
+
+static int unwind_spec_ehframe(struct dso *dso, struct machine *machine,
+ u64 offset, u64 *table_data, u64 *segbase,
+ u64 *fde_count)
+{
+ struct eh_frame_hdr hdr;
+ u8 *enc = (u8 *) &hdr.enc;
+ u8 *end = (u8 *) &hdr.data;
+ ssize_t r;
+
+ r = dso__data_read_offset(dso, machine, offset,
+ (u8 *) &hdr, sizeof(hdr));
+ if (r != sizeof(hdr))
+ return -EINVAL;
+
+ /* We dont need eh_frame_ptr, just skip it. */
+ dw_read_encoded_value(enc, end, hdr.eh_frame_ptr_enc);
+
+ *fde_count = dw_read_encoded_value(enc, end, hdr.fde_count_enc);
+ *segbase = offset;
+ *table_data = (enc - (u8 *) &hdr) + offset;
+ return 0;
+}
+
+static int read_unwind_spec(struct dso *dso, struct machine *machine,
+ u64 *table_data, u64 *segbase, u64 *fde_count)
+{
+ int ret = -EINVAL, fd;
+ u64 offset;
+
+ fd = dso__data_fd(dso, machine);
+ if (fd < 0)
+ return -EINVAL;
+
+ offset = elf_section_offset(fd, ".eh_frame_hdr");
+ close(fd);
+
+ if (offset)
+ ret = unwind_spec_ehframe(dso, machine, offset,
+ table_data, segbase,
+ fde_count);
+
+ /* TODO .debug_frame check if eh_frame_hdr fails */
+ return ret;
+}
+
+static struct map *find_map(unw_word_t ip, struct unwind_info *ui)
+{
+ struct addr_location al;
+
+ thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER,
+ MAP__FUNCTION, ip, &al);
+ return al.map;
+}
+
+static int
+find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
+ int need_unwind_info, void *arg)
+{
+ struct unwind_info *ui = arg;
+ struct map *map;
+ unw_dyn_info_t di;
+ u64 table_data, segbase, fde_count;
+
+ map = find_map(ip, ui);
+ if (!map || !map->dso)
+ return -EINVAL;
+
+ pr_debug("unwind: find_proc_info dso %s\n", map->dso->name);
+
+ if (read_unwind_spec(map->dso, ui->machine,
+ &table_data, &segbase, &fde_count))
+ return -EINVAL;
+
+ memset(&di, 0, sizeof(di));
+ di.format = UNW_INFO_FORMAT_REMOTE_TABLE;
+ di.start_ip = map->start;
+ di.end_ip = map->end;
+ di.u.rti.segbase = map->start + segbase;
+ di.u.rti.table_data = map->start + table_data;
+ di.u.rti.table_len = fde_count * sizeof(struct table_entry)
+ / sizeof(unw_word_t);
+ return dwarf_search_unwind_table(as, ip, &di, pi,
+ need_unwind_info, arg);
+}
+
+static int access_fpreg(unw_addr_space_t __maybe_unused as,
+ unw_regnum_t __maybe_unused num,
+ unw_fpreg_t __maybe_unused *val,
+ int __maybe_unused __write,
+ void __maybe_unused *arg)
+{
+ pr_err("unwind: access_fpreg unsupported\n");
+ return -UNW_EINVAL;
+}
+
+static int get_dyn_info_list_addr(unw_addr_space_t __maybe_unused as,
+ unw_word_t __maybe_unused *dil_addr,
+ void __maybe_unused *arg)
+{
+ return -UNW_ENOINFO;
+}
+
+static int resume(unw_addr_space_t __maybe_unused as,
+ unw_cursor_t __maybe_unused *cu,
+ void __maybe_unused *arg)
+{
+ pr_err("unwind: resume unsupported\n");
+ return -UNW_EINVAL;
+}
+
+static int
+get_proc_name(unw_addr_space_t __maybe_unused as,
+ unw_word_t __maybe_unused addr,
+ char __maybe_unused *bufp, size_t __maybe_unused buf_len,
+ unw_word_t __maybe_unused *offp, void __maybe_unused *arg)
+{
+ pr_err("unwind: get_proc_name unsupported\n");
+ return -UNW_EINVAL;
+}
+
+static int access_dso_mem(struct unwind_info *ui, unw_word_t addr,
+ unw_word_t *data)
+{
+ struct addr_location al;
+ ssize_t size;
+
+ thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER,
+ MAP__FUNCTION, addr, &al);
+ if (!al.map) {
+ pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
+ return -1;
+ }
+
+ if (!al.map->dso)
+ return -1;
+
+ size = dso__data_read_addr(al.map->dso, al.map, ui->machine,
+ addr, (u8 *) data, sizeof(*data));
+
+ return !(size == sizeof(*data));
+}
+
+static int reg_value(unw_word_t *valp, struct regs_dump *regs, int id,
+ u64 sample_regs)
+{
+ int i, idx = 0;
+
+ if (!(sample_regs & (1 << id)))
+ return -EINVAL;
+
+ for (i = 0; i < id; i++) {
+ if (sample_regs & (1 << i))
+ idx++;
+ }
+
+ *valp = regs->regs[idx];
+ return 0;
+}
+
+static int access_mem(unw_addr_space_t __maybe_unused as,
+ unw_word_t addr, unw_word_t *valp,
+ int __write, void *arg)
+{
+ struct unwind_info *ui = arg;
+ struct stack_dump *stack = &ui->sample->user_stack;
+ unw_word_t start, end;
+ int offset;
+ int ret;
+
+ /* Don't support write, probably not needed. */
+ if (__write || !stack || !ui->sample->user_regs.regs) {
+ *valp = 0;
+ return 0;
+ }
+
+ ret = reg_value(&start, &ui->sample->user_regs, PERF_REG_SP,
+ ui->sample_uregs);
+ if (ret)
+ return ret;
+
+ end = start + stack->size;
+
+ /* Check overflow. */
+ if (addr + sizeof(unw_word_t) < addr)
+ return -EINVAL;
+
+ if (addr < start || addr + sizeof(unw_word_t) >= end) {
+ ret = access_dso_mem(ui, addr, valp);
+ if (ret) {
+ pr_debug("unwind: access_mem %p not inside range %p-%p\n",
+ (void *)addr, (void *)start, (void *)end);
+ *valp = 0;
+ return ret;
+ }
+ return 0;
+ }
+
+ offset = addr - start;
+ *valp = *(unw_word_t *)&stack->data[offset];
+ pr_debug("unwind: access_mem addr %p, val %lx, offset %d\n",
+ (void *)addr, (unsigned long)*valp, offset);
+ return 0;
+}
+
+static int access_reg(unw_addr_space_t __maybe_unused as,
+ unw_regnum_t regnum, unw_word_t *valp,
+ int __write, void *arg)
+{
+ struct unwind_info *ui = arg;
+ int id, ret;
+
+ /* Don't support write, I suspect we don't need it. */
+ if (__write) {
+ pr_err("unwind: access_reg w %d\n", regnum);
+ return 0;
+ }
+
+ if (!ui->sample->user_regs.regs) {
+ *valp = 0;
+ return 0;
+ }
+
+ id = unwind__arch_reg_id(regnum);
+ if (id < 0)
+ return -EINVAL;
+
+ ret = reg_value(valp, &ui->sample->user_regs, id, ui->sample_uregs);
+ if (ret) {
+ pr_err("unwind: can't read reg %d\n", regnum);
+ return ret;
+ }
+
+ pr_debug("unwind: reg %d, val %lx\n", regnum, (unsigned long)*valp);
+ return 0;
+}
+
+static void put_unwind_info(unw_addr_space_t __maybe_unused as,
+ unw_proc_info_t *pi __maybe_unused,
+ void *arg __maybe_unused)
+{
+ pr_debug("unwind: put_unwind_info called\n");
+}
+
+static int entry(u64 ip, struct thread *thread, struct machine *machine,
+ unwind_entry_cb_t cb, void *arg)
+{
+ struct unwind_entry e;
+ struct addr_location al;
+
+ thread__find_addr_location(thread, machine,
+ PERF_RECORD_MISC_USER,
+ MAP__FUNCTION, ip, &al, NULL);
+
+ e.ip = ip;
+ e.map = al.map;
+ e.sym = al.sym;
+
+ pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
+ al.sym ? al.sym->name : "''",
+ ip,
+ al.map ? al.map->map_ip(al.map, ip) : (u64) 0);
+
+ return cb(&e, arg);
+}
+
+static void display_error(int err)
+{
+ switch (err) {
+ case UNW_EINVAL:
+ pr_err("unwind: Only supports local.\n");
+ break;
+ case UNW_EUNSPEC:
+ pr_err("unwind: Unspecified error.\n");
+ break;
+ case UNW_EBADREG:
+ pr_err("unwind: Register unavailable.\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static unw_accessors_t accessors = {
+ .find_proc_info = find_proc_info,
+ .put_unwind_info = put_unwind_info,
+ .get_dyn_info_list_addr = get_dyn_info_list_addr,
+ .access_mem = access_mem,
+ .access_reg = access_reg,
+ .access_fpreg = access_fpreg,
+ .resume = resume,
+ .get_proc_name = get_proc_name,
+};
+
+static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
+ void *arg)
+{
+ unw_addr_space_t addr_space;
+ unw_cursor_t c;
+ int ret;
+
+ addr_space = unw_create_addr_space(&accessors, 0);
+ if (!addr_space) {
+ pr_err("unwind: Can't create unwind address space.\n");
+ return -ENOMEM;
+ }
+
+ ret = unw_init_remote(&c, addr_space, ui);
+ if (ret)
+ display_error(ret);
+
+ while (!ret && (unw_step(&c) > 0)) {
+ unw_word_t ip;
+
+ unw_get_reg(&c, UNW_REG_IP, &ip);
+ ret = entry(ip, ui->thread, ui->machine, cb, arg);
+ }
+
+ unw_destroy_addr_space(addr_space);
+ return ret;
+}
+
+int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+ struct machine *machine, struct thread *thread,
+ u64 sample_uregs, struct perf_sample *data)
+{
+ unw_word_t ip;
+ struct unwind_info ui = {
+ .sample = data,
+ .sample_uregs = sample_uregs,
+ .thread = thread,
+ .machine = machine,
+ };
+ int ret;
+
+ if (!data->user_regs.regs)
+ return -EINVAL;
+
+ ret = reg_value(&ip, &data->user_regs, PERF_REG_IP, sample_uregs);
+ if (ret)
+ return ret;
+
+ ret = entry(ip, thread, machine, cb, arg);
+ if (ret)
+ return -ENOMEM;
+
+ return get_entries(&ui, cb, arg);
+}
diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h
new file mode 100644
index 000000000000..a78c8b303bb5
--- /dev/null
+++ b/tools/perf/util/unwind.h
@@ -0,0 +1,35 @@
+#ifndef __UNWIND_H
+#define __UNWIND_H
+
+#include "types.h"
+#include "event.h"
+#include "symbol.h"
+
+struct unwind_entry {
+ struct map *map;
+ struct symbol *sym;
+ u64 ip;
+};
+
+typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg);
+
+#ifndef NO_LIBUNWIND_SUPPORT
+int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+ struct machine *machine,
+ struct thread *thread,
+ u64 sample_uregs,
+ struct perf_sample *data);
+int unwind__arch_reg_id(int regnum);
+#else
+static inline int
+unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
+ void *arg __maybe_unused,
+ struct machine *machine __maybe_unused,
+ struct thread *thread __maybe_unused,
+ u64 sample_uregs __maybe_unused,
+ struct perf_sample *data __maybe_unused)
+{
+ return 0;
+}
+#endif /* NO_LIBUNWIND_SUPPORT */
+#endif /* __UNWIND_H */
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index d03599fbe78b..2055cf38041c 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -1,6 +1,11 @@
#include "../perf.h"
#include "util.h"
#include <sys/mman.h>
+#ifndef NO_BACKTRACE
+#include <execinfo.h>
+#endif
+#include <stdio.h>
+#include <stdlib.h>
/*
* XXX We need to find a better place for these things...
@@ -158,3 +163,23 @@ size_t hex_width(u64 v)
return n;
}
+
+/* Obtain a backtrace and print it to stdout. */
+#ifndef NO_BACKTRACE
+void dump_stack(void)
+{
+ void *array[16];
+ size_t size = backtrace(array, ARRAY_SIZE(array));
+ char **strings = backtrace_symbols(array, size);
+ size_t i;
+
+ printf("Obtained %zd stack frames.\n", size);
+
+ for (i = 0; i < size; i++)
+ printf("%s\n", strings[i]);
+
+ free(strings);
+}
+#else
+void dump_stack(void) {}
+#endif
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index b13c7331eaf8..70fa70b535b2 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -69,13 +69,8 @@
#include <sys/poll.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
-#include <sys/select.h>
-#include <netinet/in.h>
-#include <netinet/tcp.h>
-#include <arpa/inet.h>
-#include <netdb.h>
#include <inttypes.h>
-#include "../../../include/linux/magic.h"
+#include <linux/magic.h>
#include "types.h"
#include <sys/ttydefaults.h>
@@ -266,4 +261,6 @@ size_t hex_width(u64 v);
char *rtrim(char *s);
+void dump_stack(void);
+
#endif
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
new file mode 100644
index 000000000000..e60951fcdb12
--- /dev/null
+++ b/tools/perf/util/vdso.c
@@ -0,0 +1,111 @@
+
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <linux/kernel.h>
+
+#include "vdso.h"
+#include "util.h"
+#include "symbol.h"
+#include "linux/string.h"
+
+static bool vdso_found;
+static char vdso_file[] = "/tmp/perf-vdso.so-XXXXXX";
+
+static int find_vdso_map(void **start, void **end)
+{
+ FILE *maps;
+ char line[128];
+ int found = 0;
+
+ maps = fopen("/proc/self/maps", "r");
+ if (!maps) {
+ pr_err("vdso: cannot open maps\n");
+ return -1;
+ }
+
+ while (!found && fgets(line, sizeof(line), maps)) {
+ int m = -1;
+
+ /* We care only about private r-x mappings. */
+ if (2 != sscanf(line, "%p-%p r-xp %*x %*x:%*x %*u %n",
+ start, end, &m))
+ continue;
+ if (m < 0)
+ continue;
+
+ if (!strncmp(&line[m], VDSO__MAP_NAME,
+ sizeof(VDSO__MAP_NAME) - 1))
+ found = 1;
+ }
+
+ fclose(maps);
+ return !found;
+}
+
+static char *get_file(void)
+{
+ char *vdso = NULL;
+ char *buf = NULL;
+ void *start, *end;
+ size_t size;
+ int fd;
+
+ if (vdso_found)
+ return vdso_file;
+
+ if (find_vdso_map(&start, &end))
+ return NULL;
+
+ size = end - start;
+
+ buf = memdup(start, size);
+ if (!buf)
+ return NULL;
+
+ fd = mkstemp(vdso_file);
+ if (fd < 0)
+ goto out;
+
+ if (size == (size_t) write(fd, buf, size))
+ vdso = vdso_file;
+
+ close(fd);
+
+ out:
+ free(buf);
+
+ vdso_found = (vdso != NULL);
+ return vdso;
+}
+
+void vdso__exit(void)
+{
+ if (vdso_found)
+ unlink(vdso_file);
+}
+
+struct dso *vdso__dso_findnew(struct list_head *head)
+{
+ struct dso *dso = dsos__find(head, VDSO__MAP_NAME);
+
+ if (!dso) {
+ char *file;
+
+ file = get_file();
+ if (!file)
+ return NULL;
+
+ dso = dso__new(VDSO__MAP_NAME);
+ if (dso != NULL) {
+ dsos__add(head, dso);
+ dso__set_long_name(dso, file);
+ }
+ }
+
+ return dso;
+}
diff --git a/tools/perf/util/vdso.h b/tools/perf/util/vdso.h
new file mode 100644
index 000000000000..0f76e7caf6f8
--- /dev/null
+++ b/tools/perf/util/vdso.h
@@ -0,0 +1,18 @@
+#ifndef __PERF_VDSO__
+#define __PERF_VDSO__
+
+#include <linux/types.h>
+#include <string.h>
+#include <stdbool.h>
+
+#define VDSO__MAP_NAME "[vdso]"
+
+static inline bool is_vdso_map(const char *filename)
+{
+ return !strcmp(filename, VDSO__MAP_NAME);
+}
+
+struct dso *vdso__dso_findnew(struct list_head *head);
+void vdso__exit(void);
+
+#endif /* __PERF_VDSO__ */
diff --git a/tools/perf/util/wrapper.c b/tools/perf/util/wrapper.c
index 73e900edb5a2..19f15b650703 100644
--- a/tools/perf/util/wrapper.c
+++ b/tools/perf/util/wrapper.c
@@ -7,7 +7,8 @@
* There's no pack memory to release - but stay close to the Git
* version so wrap this away:
*/
-static inline void release_pack_memory(size_t size __used, int flag __used)
+static inline void release_pack_memory(size_t size __maybe_unused,
+ int flag __maybe_unused)
{
}