diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2017-10-06 15:31:47 +0200 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2017-10-23 16:20:53 +0200 |
commit | 1695849735752d2ace22d8c424ba579e33df691c (patch) | |
tree | 83133ee193eafdaf8802fc0465c85964adf47974 /tools/perf/util/mmap.h | |
parent | perf vendor events: Update JSON metrics for Skylake Server (diff) | |
download | linux-1695849735752d2ace22d8c424ba579e33df691c.tar.xz linux-1695849735752d2ace22d8c424ba579e33df691c.zip |
perf mmap: Move perf_mmap and methods to separate mmap.[ch] files
To better organize the sources, and we may end up even using it
directly, without evlists and evsels.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-oiqrm7grflurnnzo2ovfnslg@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/mmap.h')
-rw-r--r-- | tools/perf/util/mmap.h | 94 |
1 files changed, 94 insertions, 0 deletions
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h new file mode 100644 index 000000000000..f37ff45c8ec1 --- /dev/null +++ b/tools/perf/util/mmap.h @@ -0,0 +1,94 @@ +#ifndef __PERF_MMAP_H +#define __PERF_MMAP_H 1 + +#include <linux/compiler.h> +#include <linux/refcount.h> +#include <linux/types.h> +#include <asm/barrier.h> +#include <stdbool.h> +#include "auxtrace.h" +#include "event.h" + +/** + * struct perf_mmap - perf's ring buffer mmap details + * + * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this + */ +struct perf_mmap { + void *base; + int mask; + int fd; + refcount_t refcnt; + u64 prev; + struct auxtrace_mmap auxtrace_mmap; + char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8); +}; + +/* + * State machine of bkw_mmap_state: + * + * .________________(forbid)_____________. + * | V + * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY + * ^ ^ | ^ | + * | |__(forbid)____/ |___(forbid)___/| + * | | + * \_________________(3)_______________/ + * + * NOTREADY : Backward ring buffers are not ready + * RUNNING : Backward ring buffers are recording + * DATA_PENDING : We are required to collect data from backward ring buffers + * EMPTY : We have collected data from backward ring buffers. + * + * (0): Setup backward ring buffer + * (1): Pause ring buffers for reading + * (2): Read from ring buffers + * (3): Resume ring buffers for recording + */ +enum bkw_mmap_state { + BKW_MMAP_NOTREADY, + BKW_MMAP_RUNNING, + BKW_MMAP_DATA_PENDING, + BKW_MMAP_EMPTY, +}; + +struct mmap_params { + int prot, mask; + struct auxtrace_mmap_params auxtrace_mp; +}; + +int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd); +void perf_mmap__munmap(struct perf_mmap *map); + +void perf_mmap__get(struct perf_mmap *map); +void perf_mmap__put(struct perf_mmap *map); + +void perf_mmap__consume(struct perf_mmap *map, bool overwrite); + +void perf_mmap__read_catchup(struct perf_mmap *md); + +static inline u64 perf_mmap__read_head(struct perf_mmap *mm) +{ + struct perf_event_mmap_page *pc = mm->base; + u64 head = ACCESS_ONCE(pc->data_head); + rmb(); + return head; +} + +static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail) +{ + struct perf_event_mmap_page *pc = md->base; + + /* + * ensure all reads are done before we write the tail out. + */ + mb(); + pc->data_tail = tail; +} + +union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup); +union perf_event *perf_mmap__read_backward(struct perf_mmap *map); + +size_t perf_mmap__mmap_len(struct perf_mmap *map); + +#endif /*__PERF_MMAP_H */ |