summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-09-02 20:17:06 +0200
committerSteven Rostedt <rostedt@goodmis.org>2009-09-05 00:59:39 +0200
commite77405ad80f53966524b5c31244e13fbbbecbd84 (patch)
tree65c05f9e1573e9958e52bb72655e00c8592aacd2 /kernel/trace/trace_functions_graph.c
parenttracing: make tracing_reset safe for external use (diff)
downloadlinux-e77405ad80f53966524b5c31244e13fbbbecbd84.tar.xz
linux-e77405ad80f53966524b5c31244e13fbbbecbd84.zip
tracing: pass around ring buffer instead of tracer
The latency tracers (irqsoff and wakeup) can swap trace buffers on the fly. If an event is happening and has reserved data on one of the buffers, and the latency tracer swaps the global buffer with the max buffer, the result is that the event may commit the data to the wrong buffer. This patch changes the API to the trace recording to be recieve the buffer that was used to reserve a commit. Then this buffer can be passed in to the commit. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 3f4a251b7d16..b3749a2c3132 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -173,19 +173,20 @@ static int __trace_graph_entry(struct trace_array *tr,
{
struct ftrace_event_call *call = &event_funcgraph_entry;
struct ring_buffer_event *event;
+ struct ring_buffer *buffer = tr->buffer;
struct ftrace_graph_ent_entry *entry;
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return 0;
- event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_ENT,
+ event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
sizeof(*entry), flags, pc);
if (!event)
return 0;
entry = ring_buffer_event_data(event);
entry->graph_ent = *trace;
- if (!filter_current_check_discard(call, entry, event))
- ring_buffer_unlock_commit(tr->buffer, event);
+ if (!filter_current_check_discard(buffer, call, entry, event))
+ ring_buffer_unlock_commit(buffer, event);
return 1;
}
@@ -236,19 +237,20 @@ static void __trace_graph_return(struct trace_array *tr,
{
struct ftrace_event_call *call = &event_funcgraph_exit;
struct ring_buffer_event *event;
+ struct ring_buffer *buffer = tr->buffer;
struct ftrace_graph_ret_entry *entry;
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return;
- event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_RET,
+ event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
entry->ret = *trace;
- if (!filter_current_check_discard(call, entry, event))
- ring_buffer_unlock_commit(tr->buffer, event);
+ if (!filter_current_check_discard(buffer, call, entry, event))
+ ring_buffer_unlock_commit(buffer, event);
}
void trace_graph_return(struct ftrace_graph_ret *trace)