diff options
author | Steven Rostedt (Red Hat) <rostedt@goodmis.org> | 2015-09-29 21:38:55 +0200 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2015-09-29 21:38:55 +0200 |
commit | 73dddbb57bb08d465dd0ecab93db0c5209e50cfe (patch) | |
tree | 63f5adc345c08a15d42b9dff1d9d394ceddb1740 /kernel/trace | |
parent | tracing: Do not create function tracer options when not compiled in (diff) | |
download | linux-73dddbb57bb08d465dd0ecab93db0c5209e50cfe.tar.xz linux-73dddbb57bb08d465dd0ecab93db0c5209e50cfe.zip |
tracing: Only create stacktrace option when STACKTRACE is configured
Only create the stacktrace trace option when CONFIG_STACKTRACE is
configured.
Cleaned up the ftrace_trace_stack() function call a little to allow better
encapsulation of the stacktrace trace flag.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace.c | 28 | ||||
-rw-r--r-- | kernel/trace/trace.h | 9 |
2 files changed, 23 insertions, 14 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index cb223ad51cdf..865f3fad9ff0 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -472,8 +472,9 @@ static inline void trace_access_lock_init(void) static void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs); -static void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, - int skip, int pc); +static inline void ftrace_trace_stack(struct ring_buffer *buffer, + unsigned long flags, + int skip, int pc, struct pt_regs *regs); #else static inline void __ftrace_trace_stack(struct ring_buffer *buffer, @@ -482,7 +483,8 @@ static inline void __ftrace_trace_stack(struct ring_buffer *buffer, { } static inline void ftrace_trace_stack(struct ring_buffer *buffer, - unsigned long flags, int skip, int pc) + unsigned long flags, + int skip, int pc, struct pt_regs *regs) { } @@ -571,7 +573,7 @@ int __trace_puts(unsigned long ip, const char *str, int size) entry->buf[size] = '\0'; __buffer_unlock_commit(buffer, event); - ftrace_trace_stack(buffer, irq_flags, 4, pc); + ftrace_trace_stack(buffer, irq_flags, 4, pc, NULL); return size; } @@ -611,7 +613,7 @@ int __trace_bputs(unsigned long ip, const char *str) entry->str = str; __buffer_unlock_commit(buffer, event); - ftrace_trace_stack(buffer, irq_flags, 4, pc); + ftrace_trace_stack(buffer, irq_flags, 4, pc, NULL); return 1; } @@ -1685,7 +1687,7 @@ void trace_buffer_unlock_commit(struct trace_array *tr, { __buffer_unlock_commit(buffer, event); - ftrace_trace_stack(buffer, flags, 6, pc); + ftrace_trace_stack(buffer, flags, 6, pc, NULL); ftrace_trace_userstack(buffer, flags, pc); } EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); @@ -1737,8 +1739,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr, { __buffer_unlock_commit(buffer, event); - if (trace_flags & TRACE_ITER_STACKTRACE) - __ftrace_trace_stack(buffer, flags, 0, pc, regs); + ftrace_trace_stack(buffer, flags, 6, pc, regs); ftrace_trace_userstack(buffer, flags, pc); } EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs); @@ -1867,13 +1868,14 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, } -static void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, - int skip, int pc) +static inline void ftrace_trace_stack(struct ring_buffer *buffer, + unsigned long flags, + int skip, int pc, struct pt_regs *regs) { if (!(trace_flags & TRACE_ITER_STACKTRACE)) return; - __ftrace_trace_stack(buffer, flags, skip, pc, NULL); + __ftrace_trace_stack(buffer, flags, skip, pc, regs); } void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, @@ -2158,7 +2160,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) memcpy(entry->buf, tbuffer, sizeof(u32) * len); if (!call_filter_check_discard(call, entry, buffer, event)) { __buffer_unlock_commit(buffer, event); - ftrace_trace_stack(buffer, flags, 6, pc); + ftrace_trace_stack(buffer, flags, 6, pc, NULL); } out: @@ -2210,7 +2212,7 @@ __trace_array_vprintk(struct ring_buffer *buffer, memcpy(&entry->buf, tbuffer, len + 1); if (!call_filter_check_discard(call, entry, buffer, event)) { __buffer_unlock_commit(buffer, event); - ftrace_trace_stack(buffer, flags, 6, pc); + ftrace_trace_stack(buffer, flags, 6, pc, NULL); } out: preempt_enable_notrace(); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index b389d409b952..af34e1822dad 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -911,6 +911,13 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, # define FUNCTION_DEFAULT_FLAGS 0UL #endif +#ifdef CONFIG_STACKTRACE +# define STACK_FLAGS \ + C(STACKTRACE, "stacktrace"), +#else +# define STACK_FLAGS +#endif + /* * trace_iterator_flags is an enumeration that defines bit * positions into trace_flags that controls the output. @@ -927,7 +934,6 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, C(HEX, "hex"), \ C(BIN, "bin"), \ C(BLOCK, "block"), \ - C(STACKTRACE, "stacktrace"), \ C(PRINTK, "trace_printk"), \ C(ANNOTATE, "annotate"), \ C(USERSTACKTRACE, "userstacktrace"), \ @@ -942,6 +948,7 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, C(MARKERS, "markers"), \ FUNCTION_FLAGS \ FGRAPH_FLAGS \ + STACK_FLAGS \ BRANCH_FLAGS /* |