summaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2020-03-20 04:40:40 +0100
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2020-03-27 21:39:02 +0100
commit717e3f5ebc823e5ecfdd15155b0fd81af9fc58d6 (patch)
tree4a6dee32b382d562589d9e87de05f58f06db2ebf /kernel/trace/ftrace.c
parentftrace/kprobe: Show the maxactive number on kprobe_events (diff)
downloadlinux-717e3f5ebc823e5ecfdd15155b0fd81af9fc58d6.tar.xz
linux-717e3f5ebc823e5ecfdd15155b0fd81af9fc58d6.zip
ftrace: Make function trace pid filtering a bit more exact
The set_ftrace_pid file is used to filter function tracing to only trace tasks that are listed in that file. Instead of testing the pids listed in that file (it's a bitmask) at each function trace event, the logic is done via a sched_switch hook. A flag is set when the next task to run is in the list of pids in the set_ftrace_pid file. But the sched_switch hook is not at the exact location of when the task switches, and the flag gets set before the task to be traced actually runs. This leaves a residue of traced functions that do not belong to the pid that should be filtered on. By changing the logic slightly, where instead of having a boolean flag to test, record the pid that should be traced, with special values for not to trace and always trace. Then at each function call, a check will be made to see if the function should be ignored, or if the current pid matches the function that should be traced, and only trace if it matches (or if it has the special value to always trace). Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c32
1 files changed, 25 insertions, 7 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 3f7ee102868a..34ae736cb1f8 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -139,13 +139,23 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops)
#endif
}
+#define FTRACE_PID_IGNORE -1
+#define FTRACE_PID_TRACE -2
+
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs)
{
struct trace_array *tr = op->private;
+ int pid;
- if (tr && this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid))
- return;
+ if (tr) {
+ pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
+ if (pid == FTRACE_PID_IGNORE)
+ return;
+ if (pid != FTRACE_PID_TRACE &&
+ pid != current->pid)
+ return;
+ }
op->saved_func(ip, parent_ip, op, regs);
}
@@ -6924,8 +6934,12 @@ ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
pid_list = rcu_dereference_sched(tr->function_pids);
- this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
- trace_ignore_this_task(pid_list, next));
+ if (trace_ignore_this_task(pid_list, next))
+ this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
+ FTRACE_PID_IGNORE);
+ else
+ this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
+ next->pid);
}
static void
@@ -6978,7 +6992,7 @@ static void clear_ftrace_pids(struct trace_array *tr)
unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
for_each_possible_cpu(cpu)
- per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = false;
+ per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
rcu_assign_pointer(tr->function_pids, NULL);
@@ -7103,8 +7117,12 @@ static void ignore_task_cpu(void *data)
pid_list = rcu_dereference_protected(tr->function_pids,
mutex_is_locked(&ftrace_lock));
- this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
- trace_ignore_this_task(pid_list, current));
+ if (trace_ignore_this_task(pid_list, current))
+ this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
+ FTRACE_PID_IGNORE);
+ else
+ this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
+ current->pid);
}
static ssize_t