summaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2012-11-02 17:47:21 -0400
committerSteven Rostedt <rostedt@goodmis.org>2013-01-22 23:38:01 -0500
commitedc15cafcbfa3d73f819cae99885a2e35e4cbce5 (patch)
tree964e0de8816e6d4b602318d90718770921e301d2 /kernel/trace/ftrace.c
parente46cbf75c621725964fe1f6e7013e8bcd86a0e3d (diff)
downloadblackbird-op-linux-edc15cafcbfa3d73f819cae99885a2e35e4cbce5.tar.gz
blackbird-op-linux-edc15cafcbfa3d73f819cae99885a2e35e4cbce5.zip
tracing: Avoid unnecessary multiple recursion checks
When function tracing occurs, the following steps are made: If arch does not support a ftrace feature: call internal function (uses INTERNAL bits) which calls... If callback is registered to the "global" list, the list function is called and recursion checks the GLOBAL bits. then this function calls... The function callback, which can use the FTRACE bits to check for recursion. Now if the arch does not suppport a feature, and it calls the global list function which calls the ftrace callback all three of these steps will do a recursion protection. There's no reason to do one if the previous caller already did. The recursion that we are protecting against will go through the same steps again. To prevent the multiple recursion checks, if a recursion bit is set that is higher than the MAX bit of the current check, then we know that the check was made by the previous caller, and we can skip the current check. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c40
1 files changed, 9 insertions, 31 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 639b6ab1f04c..ce8c3d68292f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -158,25 +158,15 @@ ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
{
int bit;
- if (in_interrupt()) {
- if (in_nmi())
- bit = TRACE_GLOBAL_NMI_BIT;
-
- else if (in_irq())
- bit = TRACE_GLOBAL_IRQ_BIT;
- else
- bit = TRACE_GLOBAL_SIRQ_BIT;
- } else
- bit = TRACE_GLOBAL_BIT;
-
- if (unlikely(trace_recursion_test(bit)))
+ bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
+ if (bit < 0)
return;
- trace_recursion_set(bit);
do_for_each_ftrace_op(op, ftrace_global_list) {
op->func(ip, parent_ip, op, regs);
} while_for_each_ftrace_op(op);
- trace_recursion_clear(bit);
+
+ trace_clear_recursion(bit);
}
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
@@ -4145,26 +4135,14 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ignored, struct pt_regs *regs)
{
struct ftrace_ops *op;
- unsigned int bit;
+ int bit;
if (function_trace_stop)
return;
- if (in_interrupt()) {
- if (in_nmi())
- bit = TRACE_INTERNAL_NMI_BIT;
-
- else if (in_irq())
- bit = TRACE_INTERNAL_IRQ_BIT;
- else
- bit = TRACE_INTERNAL_SIRQ_BIT;
- } else
- bit = TRACE_INTERNAL_BIT;
-
- if (unlikely(trace_recursion_test(bit)))
- return;
-
- trace_recursion_set(bit);
+ bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
+ if (bit < 0)
+ return;
/*
* Some of the ops may be dynamically allocated,
@@ -4176,7 +4154,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
op->func(ip, parent_ip, op, regs);
} while_for_each_ftrace_op(op);
preempt_enable_notrace();
- trace_recursion_clear(bit);
+ trace_clear_recursion(bit);
}
/*
OpenPOWER on IntegriCloud