summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2018-08-09 21:31:48 +0200
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2018-08-10 21:12:01 +0200
commite0a568dcd18bdaa77877d558700ce4d3bbbb12b8 (patch)
tree98962d8b1862aa1422e2e79021a6019a5a232a10 /kernel/trace
parentftrace: Remove unused pointer ftrace_swapper_pid (diff)
downloadlinux-e0a568dcd18bdaa77877d558700ce4d3bbbb12b8.tar.xz
linux-e0a568dcd18bdaa77877d558700ce4d3bbbb12b8.zip
tracing: Fix synchronizing to event changes with tracepoint_synchronize_unregister()
Now that some trace events can be protected by srcu_read_lock(tracepoint_srcu), we need to make sure all locations that depend on this are also protected. There were many places that did a synchronize_sched() thinking that it was enough to protect againts access to trace events. This use to be the case, but now that we use SRCU for _rcuidle() trace events, they may not be protected by synchronize_sched(), as they may be called in paths that RCU is not watching for preempt disable. Fixes: e6753f23d961d ("tracepoint: Make rcuidle tracepoint callers use SRCU") Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace_events.c8
-rw-r--r--kernel/trace/trace_events_filter.c15
-rw-r--r--kernel/trace/trace_events_hist.c2
-rw-r--r--kernel/trace/trace_events_trigger.c6
4 files changed, 17 insertions, 14 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 7b508ce8ac44..808cf29febe2 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -636,7 +636,7 @@ static void __ftrace_clear_event_pids(struct trace_array *tr)
rcu_assign_pointer(tr->filtered_pids, NULL);
/* Wait till all users are no longer using pid filtering */
- synchronize_sched();
+ tracepoint_synchronize_unregister();
trace_free_pid_list(pid_list);
}
@@ -1622,7 +1622,7 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
}
if (filtered_pids) {
- synchronize_sched();
+ tracepoint_synchronize_unregister();
trace_free_pid_list(filtered_pids);
} else if (pid_list) {
/*
@@ -3036,8 +3036,8 @@ int event_trace_del_tracer(struct trace_array *tr)
/* Disable any running events */
__ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
- /* Access to events are within rcu_read_lock_sched() */
- synchronize_sched();
+ /* Make sure no more events are being executed */
+ tracepoint_synchronize_unregister();
down_write(&trace_event_sem);
__trace_remove_event_dirs(tr);
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 893a206bcba4..184c7685d5ea 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -899,7 +899,8 @@ int filter_match_preds(struct event_filter *filter, void *rec)
if (!filter)
return 1;
- prog = rcu_dereference_sched(filter->prog);
+ /* Protected by either SRCU(tracepoint_srcu) or preempt_disable */
+ prog = rcu_dereference_raw(filter->prog);
if (!prog)
return 1;
@@ -1626,10 +1627,10 @@ static int process_system_preds(struct trace_subsystem_dir *dir,
/*
* The calls can still be using the old filters.
- * Do a synchronize_sched() to ensure all calls are
+ * Do a synchronize_sched() and to ensure all calls are
* done with them before we free them.
*/
- synchronize_sched();
+ tracepoint_synchronize_unregister();
list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
__free_filter(filter_item->filter);
list_del(&filter_item->list);
@@ -1648,7 +1649,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir,
kfree(filter);
/* If any call succeeded, we still need to sync */
if (!fail)
- synchronize_sched();
+ tracepoint_synchronize_unregister();
list_for_each_entry_safe(filter_item, tmp, &filter_list, list) {
__free_filter(filter_item->filter);
list_del(&filter_item->list);
@@ -1790,7 +1791,7 @@ int apply_event_filter(struct trace_event_file *file, char *filter_string)
event_clear_filter(file);
/* Make sure the filter is not being used */
- synchronize_sched();
+ tracepoint_synchronize_unregister();
__free_filter(filter);
return 0;
@@ -1817,7 +1818,7 @@ int apply_event_filter(struct trace_event_file *file, char *filter_string)
if (tmp) {
/* Make sure the call is done with the filter */
- synchronize_sched();
+ tracepoint_synchronize_unregister();
__free_filter(tmp);
}
}
@@ -1847,7 +1848,7 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
filter = system->filter;
system->filter = NULL;
/* Ensure all filters are no longer used */
- synchronize_sched();
+ tracepoint_synchronize_unregister();
filter_free_subsystem_filters(dir, tr);
__free_filter(filter);
goto out_unlock;
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index aae18af94c94..c522b51d9909 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -5141,7 +5141,7 @@ static void hist_clear(struct event_trigger_data *data)
if (data->name)
pause_named_trigger(data);
- synchronize_sched();
+ tracepoint_synchronize_unregister();
tracing_map_clear(hist_data->map);
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 58d21fd52932..750044ef15e8 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -34,7 +34,9 @@ void trigger_data_free(struct event_trigger_data *data)
if (data->cmd_ops->set_filter)
data->cmd_ops->set_filter(NULL, data, NULL);
- synchronize_sched(); /* make sure current triggers exit before free */
+ /* make sure current triggers exit before free */
+ tracepoint_synchronize_unregister();
+
kfree(data);
}
@@ -752,7 +754,7 @@ int set_trigger_filter(char *filter_str,
if (tmp) {
/* Make sure the call is done with the filter */
- synchronize_sched();
+ tracepoint_synchronize_unregister();
free_event_filter(tmp);
}