diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2011-01-12 13:52:47 +0100 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2011-01-22 22:56:29 +0100 |
commit | 72cb7013e08dec29631e0447f9496b7bacd3e14b (patch) | |
tree | a26ccf04710cdc06f03fffafe5a09f4f6503abf4 | |
parent | perf evsel: Allow specifying if the inherit bit should be set (diff) | |
download | linux-72cb7013e08dec29631e0447f9496b7bacd3e14b.tar.xz linux-72cb7013e08dec29631e0447f9496b7bacd3e14b.zip |
perf top: Use perf_evsel__open
Now that it handles group_fd and inherit we can use it, sharing it with
stat.
Next step: 'perf record' should use, then move the mmap_array out of
->priv and into perf_evsel, with top and record sharing this, and at the
same time, write a 'perf test' stress test.
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r-- | tools/perf/builtin-top.c | 92 |
1 files changed, 42 insertions, 50 deletions
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 1bc465215fc6..15d89bede2fb 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1210,39 +1210,50 @@ static void perf_session__mmap_read(struct perf_session *self) } } -int group_fd; - static void start_counter(int i, struct perf_evlist *evlist, struct perf_evsel *evsel) { struct xyarray *mmap_array = evsel->priv; struct mmap_data *mm; - struct perf_event_attr *attr; - int cpu = -1; int thread_index; - if (target_tid == -1) - cpu = cpus->map[i]; - - attr = &evsel->attr; + for (thread_index = 0; thread_index < threads->nr; thread_index++) { + assert(FD(evsel, i, thread_index) >= 0); + fcntl(FD(evsel, i, thread_index), F_SETFL, O_NONBLOCK); - attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; + evlist->pollfd[evlist->nr_fds].fd = FD(evsel, i, thread_index); + evlist->pollfd[evlist->nr_fds].events = POLLIN; + evlist->nr_fds++; - if (freq) { - attr->sample_type |= PERF_SAMPLE_PERIOD; - attr->freq = 1; - attr->sample_freq = freq; + mm = xyarray__entry(mmap_array, i, thread_index); + mm->prev = 0; + mm->mask = mmap_pages*page_size - 1; + mm->base = mmap(NULL, (mmap_pages+1)*page_size, + PROT_READ, MAP_SHARED, FD(evsel, i, thread_index), 0); + if (mm->base == MAP_FAILED) + die("failed to mmap with %d (%s)\n", errno, strerror(errno)); } +} + +static void start_counters(struct perf_evlist *evlist) +{ + struct perf_evsel *counter; + int i; - attr->inherit = (cpu < 0) && inherit; - attr->mmap = 1; + list_for_each_entry(counter, &evlist->entries, node) { + struct perf_event_attr *attr = &counter->attr; - for (thread_index = 0; thread_index < threads->nr; thread_index++) { -try_again: - FD(evsel, i, thread_index) = sys_perf_event_open(attr, - threads->map[thread_index], cpu, group_fd, 0); + attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; + + if (freq) { + attr->sample_type |= PERF_SAMPLE_PERIOD; + attr->freq = 1; + attr->sample_freq = freq; + } - if (FD(evsel, i, thread_index) < 0) { + attr->mmap = 1; +try_again: + if (perf_evsel__open(counter, cpus, threads, group, inherit) < 0) { int err = errno; if (err == EPERM || err == EACCES) @@ -1254,8 +1265,8 @@ try_again: * based cpu-clock-tick sw counter, which * is always available even if no PMU support: */ - if (attr->type == PERF_TYPE_HARDWARE - && attr->config == PERF_COUNT_HW_CPU_CYCLES) { + if (attr->type == PERF_TYPE_HARDWARE && + attr->config == PERF_COUNT_HW_CPU_CYCLES) { if (verbose) warning(" ... trying to fall back to cpu-clock-ticks\n"); @@ -1265,39 +1276,24 @@ try_again: goto try_again; } printf("\n"); - error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n", - FD(evsel, i, thread_index), strerror(err)); + error("sys_perf_event_open() syscall returned with %d " + "(%s). /bin/dmesg may provide additional information.\n", + err, strerror(err)); die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); exit(-1); } - assert(FD(evsel, i, thread_index) >= 0); - fcntl(FD(evsel, i, thread_index), F_SETFL, O_NONBLOCK); - - /* - * First counter acts as the group leader: - */ - if (group && group_fd == -1) - group_fd = FD(evsel, i, thread_index); - - evlist->pollfd[evlist->nr_fds].fd = FD(evsel, i, thread_index); - evlist->pollfd[evlist->nr_fds].events = POLLIN; - evlist->nr_fds++; + } - mm = xyarray__entry(mmap_array, i, thread_index); - mm->prev = 0; - mm->mask = mmap_pages*page_size - 1; - mm->base = mmap(NULL, (mmap_pages+1)*page_size, - PROT_READ, MAP_SHARED, FD(evsel, i, thread_index), 0); - if (mm->base == MAP_FAILED) - die("failed to mmap with %d (%s)\n", errno, strerror(errno)); + for (i = 0; i < cpus->nr; i++) { + list_for_each_entry(counter, &evlist->entries, node) + start_counter(i, evsel_list, counter); } } static int __cmd_top(void) { pthread_t thread; - struct perf_evsel *counter; - int i, ret; + int ret; /* * FIXME: perf_session__new should allow passing a O_MMAP, so that all this * mmap reading, etc is encapsulated in it. Use O_WRONLY for now. @@ -1311,11 +1307,7 @@ static int __cmd_top(void) else event__synthesize_threads(event__process, session); - for (i = 0; i < cpus->nr; i++) { - group_fd = -1; - list_for_each_entry(counter, &evsel_list->entries, node) - start_counter(i, evsel_list, counter); - } + start_counters(evsel_list); /* Wait for a minimal set of events before starting the snapshot */ poll(evsel_list->pollfd, evsel_list->nr_fds, 100); |