diff options
author | Masami Hiramatsu <mhiramat@kernel.org> | 2018-11-05 10:00:43 +0100 |
---|---|---|
committer | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2018-12-09 02:54:09 +0100 |
commit | fc800a10be26017f8f338bc8e500d48e3e6429d9 (patch) | |
tree | 2a64e2470385b62d91461a843bb94c7acaddc2b3 /kernel/trace/trace_events.c | |
parent | tracing/uprobes: Add busy check when cleanup all uprobes (diff) | |
download | linux-fc800a10be26017f8f338bc8e500d48e3e6429d9.tar.xz linux-fc800a10be26017f8f338bc8e500d48e3e6429d9.zip |
tracing: Lock event_mutex before synth_event_mutex
synthetic event is using synth_event_mutex for protecting
synth_event_list, and event_trigger_write() path acquires
locks as below order.
event_trigger_write(event_mutex)
->trigger_process_regex(trigger_cmd_mutex)
->event_hist_trigger_func(synth_event_mutex)
On the other hand, synthetic event creation and deletion paths
call trace_add_event_call() and trace_remove_event_call()
which acquires event_mutex. In that case, if we keep the
synth_event_mutex locked while registering/unregistering synthetic
events, its dependency will be inversed.
To avoid this issue, current synthetic event is using a 2 phase
process to create/delete events. For example, it searches existing
events under synth_event_mutex to check for event-name conflicts, and
unlocks synth_event_mutex, then registers a new event under event_mutex
locked. Finally, it locks synth_event_mutex and tries to add the
new event to the list. But it can introduce complexity and a chance
for name conflicts.
To solve this simpler, this introduces trace_add_event_call_nolock()
and trace_remove_event_call_nolock() which don't acquire
event_mutex inside. synthetic event can lock event_mutex before
synth_event_mutex to solve the lock dependency issue simpler.
Link: http://lkml.kernel.org/r/154140844377.17322.13781091165954002713.stgit@devbox
Reviewed-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Tested-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r-- | kernel/trace/trace_events.c | 34 |
1 files changed, 28 insertions, 6 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index f94be0c2827b..a3b157f689ee 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -2305,11 +2305,11 @@ __trace_early_add_new_event(struct trace_event_call *call, struct ftrace_module_file_ops; static void __add_event_to_tracers(struct trace_event_call *call); -/* Add an additional event_call dynamically */ -int trace_add_event_call(struct trace_event_call *call) +int trace_add_event_call_nolock(struct trace_event_call *call) { int ret; - mutex_lock(&event_mutex); + lockdep_assert_held(&event_mutex); + mutex_lock(&trace_types_lock); ret = __register_event(call, NULL); @@ -2317,6 +2317,16 @@ int trace_add_event_call(struct trace_event_call *call) __add_event_to_tracers(call); mutex_unlock(&trace_types_lock); + return ret; +} + +/* Add an additional event_call dynamically */ +int trace_add_event_call(struct trace_event_call *call) +{ + int ret; + + mutex_lock(&event_mutex); + ret = trace_add_event_call_nolock(call); mutex_unlock(&event_mutex); return ret; } @@ -2366,17 +2376,29 @@ static int probe_remove_event_call(struct trace_event_call *call) return 0; } -/* Remove an event_call */ -int trace_remove_event_call(struct trace_event_call *call) +/* no event_mutex version */ +int trace_remove_event_call_nolock(struct trace_event_call *call) { int ret; - mutex_lock(&event_mutex); + lockdep_assert_held(&event_mutex); + mutex_lock(&trace_types_lock); down_write(&trace_event_sem); ret = probe_remove_event_call(call); up_write(&trace_event_sem); mutex_unlock(&trace_types_lock); + + return ret; +} + +/* Remove an event_call */ +int trace_remove_event_call(struct trace_event_call *call) +{ + int ret; + + mutex_lock(&event_mutex); + ret = trace_remove_event_call_nolock(call); mutex_unlock(&event_mutex); return ret; |