diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-10-08 03:53:41 +0200 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2009-10-08 03:53:41 +0200 |
commit | 8f6e8a314ab37cadd72da5ace9027f2d04aba854 (patch) | |
tree | 2ce9adc1a11cd6d31742d07a557b787d6f0ceb74 /kernel | |
parent | tracing: fix warning on kernel/trace/trace_branch.c andtrace_hw_branches.c (diff) | |
download | linux-8f6e8a314ab37cadd72da5ace9027f2d04aba854.tar.xz linux-8f6e8a314ab37cadd72da5ace9027f2d04aba854.zip |
tracing: user local buffer variable for trace branch tracer
Just using the tr->buffer for the API to trace_buffer_lock_reserve
is not good enough. This is because the tr->buffer may change, and we
do not want to commit with a different buffer that we reserved from.
This patch uses a local variable to hold the buffer that was used to
reserve and commit with.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/trace_branch.c | 8 |
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 216e2dd302a0..4a194f08f88c 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c @@ -34,6 +34,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) struct trace_array *tr = branch_tracer; struct ring_buffer_event *event; struct trace_branch *entry; + struct ring_buffer *buffer; unsigned long flags; int cpu, pc; const char *p; @@ -54,7 +55,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) goto out; pc = preempt_count(); - event = trace_buffer_lock_reserve(tr->buffer, TRACE_BRANCH, + buffer = tr->buffer; + event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH, sizeof(*entry), flags, pc); if (!event) goto out; @@ -74,8 +76,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) entry->line = f->line; entry->correct = val == expect; - if (!filter_check_discard(call, entry, tr->buffer, event)) - ring_buffer_unlock_commit(tr->buffer, event); + if (!filter_check_discard(call, entry, buffer, event)) + ring_buffer_unlock_commit(buffer, event); out: atomic_dec(&tr->data[cpu]->disabled); |