diff options
author | Kan Liang <kan.liang@intel.com> | 2018-01-18 22:26:23 +0100 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2018-02-15 13:53:40 +0100 |
commit | 7bb45972952db9298fe5cc440160dcad1a66bfbc (patch) | |
tree | c7642dbb2f3e4c5a9be33ac4c891c102b3424928 | |
parent | perf mmap: Introduce perf_mmap__read_done() (diff) | |
download | linux-7bb45972952db9298fe5cc440160dcad1a66bfbc.tar.xz linux-7bb45972952db9298fe5cc440160dcad1a66bfbc.zip |
perf mmap: Introduce perf_mmap__read_event()
Except for 'perf record', the other perf tools read events one by one
from the ring buffer using perf_mmap__read_forward(). But it only
supports non-overwrite mode.
Introduce perf_mmap__read_event() to support both non-overwrite and
overwrite mode.
Usage:
perf_mmap__read_init()
while(event = perf_mmap__read_event()) {
//process the event
perf_mmap__consume()
}
perf_mmap__read_done()
It cannot use perf_mmap__read_backward(). Because it always reads the
stale buffer which is already processed. Furthermore, the forward and
backward concepts have been removed. The perf_mmap__read_backward() will
be replaced and discarded later.
Signed-off-by: Kan Liang <kan.liang@intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1516310792-208685-9-git-send-email-kan.liang@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r-- | tools/perf/util/mmap.c | 39 | ||||
-rw-r--r-- | tools/perf/util/mmap.h | 4 |
2 files changed, 43 insertions, 0 deletions
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c index 4f59eaefc706..f804926778b7 100644 --- a/tools/perf/util/mmap.c +++ b/tools/perf/util/mmap.c @@ -113,6 +113,45 @@ union perf_event *perf_mmap__read_backward(struct perf_mmap *map) return perf_mmap__read(map, &map->prev, end); } +/* + * Read event from ring buffer one by one. + * Return one event for each call. + * + * Usage: + * perf_mmap__read_init() + * while(event = perf_mmap__read_event()) { + * //process the event + * perf_mmap__consume() + * } + * perf_mmap__read_done() + */ +union perf_event *perf_mmap__read_event(struct perf_mmap *map, + bool overwrite, + u64 *startp, u64 end) +{ + union perf_event *event; + + /* + * Check if event was unmapped due to a POLLHUP/POLLERR. + */ + if (!refcount_read(&map->refcnt)) + return NULL; + + if (startp == NULL) + return NULL; + + /* non-overwirte doesn't pause the ringbuffer */ + if (!overwrite) + end = perf_mmap__read_head(map); + + event = perf_mmap__read(map, startp, end); + + if (!overwrite) + map->prev = *startp; + + return event; +} + void perf_mmap__read_catchup(struct perf_mmap *map) { u64 head; diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h index 95549d4af943..28718543dd42 100644 --- a/tools/perf/util/mmap.h +++ b/tools/perf/util/mmap.h @@ -89,6 +89,10 @@ static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail) union perf_event *perf_mmap__read_forward(struct perf_mmap *map); union perf_event *perf_mmap__read_backward(struct perf_mmap *map); +union perf_event *perf_mmap__read_event(struct perf_mmap *map, + bool overwrite, + u64 *startp, u64 end); + int perf_mmap__push(struct perf_mmap *md, bool backward, void *to, int push(void *to, void *buf, size_t size)); |