summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_event_perf.c
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2020-11-06 03:32:44 +0100
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2020-11-06 14:42:05 +0100
commit5d029b035bf112466541b844ee1b86197936db65 (patch)
tree40070ca454969abf0464abcbaed1ea1cb946e131 /kernel/trace/trace_event_perf.c
parentperf/ftrace: Add recursion protection to the ftrace callback (diff)
downloadlinux-5d029b035bf112466541b844ee1b86197936db65.tar.xz
linux-5d029b035bf112466541b844ee1b86197936db65.zip
perf/ftrace: Check for rcu_is_watching() in callback function
If a ftrace callback requires "rcu_is_watching", then it adds the FTRACE_OPS_FL_RCU flag and it will not be called if RCU is not "watching". But this means that it will use a trampoline when called, and this slows down the function tracing a tad. By checking rcu_is_watching() from within the callback, it no longer needs the RCU flag set in the ftrace_ops and it can be safely called directly. Link: https://lkml.kernel.org/r/20201028115613.591878956@goodmis.org Link: https://lkml.kernel.org/r/20201106023547.711035826@goodmis.org Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Petr Mladek <pmladek@suse.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Jiri Olsa <jolsa@redhat.com> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_event_perf.c')
-rw-r--r--kernel/trace/trace_event_perf.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index fd58d83861d8..a2b9fddb8148 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -441,6 +441,9 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
int rctx;
int bit;
+ if (!rcu_is_watching())
+ return;
+
if ((unsigned long)ops->private != smp_processor_id())
return;
@@ -484,7 +487,6 @@ static int perf_ftrace_function_register(struct perf_event *event)
{
struct ftrace_ops *ops = &event->ftrace_ops;
- ops->flags = FTRACE_OPS_FL_RCU;
ops->func = perf_ftrace_function_call;
ops->private = (void *)(unsigned long)nr_cpu_ids;