diff options
author | Masami Hiramatsu <mhiramat@kernel.org> | 2018-04-25 14:19:59 +0200 |
---|---|---|
committer | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2018-10-11 04:19:09 +0200 |
commit | 9b960a38835fcaf977f20dcc34ce9e54ff9563bd (patch) | |
tree | 667aad6f436e5228009e5eedeb5600d43148088a /kernel/trace/trace_probe_tmpl.h | |
parent | tracing: probeevent: Append traceprobe_ for exported function (diff) | |
download | linux-9b960a38835fcaf977f20dcc34ce9e54ff9563bd.tar.xz linux-9b960a38835fcaf977f20dcc34ce9e54ff9563bd.zip |
tracing: probeevent: Unify fetch_insn processing common part
Unify the fetch_insn bottom process (from stage 2: dereference
indirect data) from kprobe and uprobe events, since those are
mostly same.
Link: http://lkml.kernel.org/r/152465879965.26224.8547240824606804815.stgit@devbox
Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_probe_tmpl.h')
-rw-r--r-- | kernel/trace/trace_probe_tmpl.h | 55 |
1 files changed, 54 insertions, 1 deletions
diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h index 3b4aba6f84cc..b4075f3e3a29 100644 --- a/kernel/trace/trace_probe_tmpl.h +++ b/kernel/trace/trace_probe_tmpl.h @@ -49,13 +49,66 @@ fetch_apply_bitfield(struct fetch_insn *code, void *buf) } /* - * This must be defined for each callsite. + * These functions must be defined for each callsite. * Return consumed dynamic data size (>= 0), or error (< 0). * If dest is NULL, don't store result and return required dynamic data size. */ static int process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, void *base); +static nokprobe_inline int fetch_store_strlen(unsigned long addr); +static nokprobe_inline int +fetch_store_string(unsigned long addr, void *dest, void *base); +static nokprobe_inline int +probe_mem_read(void *dest, void *src, size_t size); + +/* From the 2nd stage, routine is same */ +static nokprobe_inline int +process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val, + void *dest, void *base) +{ + int ret = 0; + + /* 2nd stage: dereference memory if needed */ + while (code->op == FETCH_OP_DEREF) { + ret = probe_mem_read(&val, (void *)val + code->offset, + sizeof(val)); + if (ret) + return ret; + code++; + } + + /* 3rd stage: store value to buffer */ + if (unlikely(!dest)) { + if (code->op == FETCH_OP_ST_STRING) + return fetch_store_strlen(val + code->offset); + else + return -EILSEQ; + } + + switch (code->op) { + case FETCH_OP_ST_RAW: + fetch_store_raw(val, code, dest); + break; + case FETCH_OP_ST_MEM: + probe_mem_read(dest, (void *)val + code->offset, code->size); + break; + case FETCH_OP_ST_STRING: + ret = fetch_store_string(val + code->offset, dest, base); + break; + default: + return -EILSEQ; + } + code++; + + /* 4th stage: modify stored value if needed */ + if (code->op == FETCH_OP_MOD_BF) { + fetch_apply_bitfield(code, dest); + code++; + } + + return code->op == FETCH_OP_END ? ret : -EILSEQ; +} /* Sum up total data length for dynamic arraies (strings) */ static nokprobe_inline int |