summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_branch.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-10-07 21:53:41 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-10-07 21:53:41 -0400
commit8f6e8a314ab37cadd72da5ace9027f2d04aba854 (patch)
tree2ce9adc1a11cd6d31742d07a557b787d6f0ceb74 /kernel/trace/trace_branch.c
parentc8647b28726b09b087155417bb698e7b3789f8a0 (diff)
downloadtalos-obmc-linux-8f6e8a314ab37cadd72da5ace9027f2d04aba854.tar.gz
talos-obmc-linux-8f6e8a314ab37cadd72da5ace9027f2d04aba854.zip
tracing: user local buffer variable for trace branch tracer
Just using the tr->buffer for the API to trace_buffer_lock_reserve is not good enough. This is because the tr->buffer may change, and we do not want to commit with a different buffer that we reserved from. This patch uses a local variable to hold the buffer that was used to reserve and commit with. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_branch.c')
-rw-r--r--kernel/trace/trace_branch.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 216e2dd302a0..4a194f08f88c 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -34,6 +34,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
struct trace_array *tr = branch_tracer;
struct ring_buffer_event *event;
struct trace_branch *entry;
+ struct ring_buffer *buffer;
unsigned long flags;
int cpu, pc;
const char *p;
@@ -54,7 +55,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
goto out;
pc = preempt_count();
- event = trace_buffer_lock_reserve(tr->buffer, TRACE_BRANCH,
+ buffer = tr->buffer;
+ event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
sizeof(*entry), flags, pc);
if (!event)
goto out;
@@ -74,8 +76,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
entry->line = f->line;
entry->correct = val == expect;
- if (!filter_check_discard(call, entry, tr->buffer, event))
- ring_buffer_unlock_commit(tr->buffer, event);
+ if (!filter_check_discard(call, entry, buffer, event))
+ ring_buffer_unlock_commit(buffer, event);
out:
atomic_dec(&tr->data[cpu]->disabled);
OpenPOWER on IntegriCloud