summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/Makefile6
-rw-r--r--arch/x86/kernel/ftrace.c5
-rw-r--r--arch/x86/kernel/process_32.c4
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--include/linux/ftrace.h24
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/Makefile4
-rw-r--r--kernel/extable.c5
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/trace/trace.c36
-rw-r--r--kernel/trace/trace_functions_graph.c33
12 files changed, 86 insertions, 41 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index a3049da61985..1cad9318d217 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -14,12 +14,6 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
endif
-ifdef CONFIG_FUNCTION_GRAPH_TRACER
-# Don't trace __switch_to() but let it for function tracer
-CFLAGS_REMOVE_process_32.o = -pg
-CFLAGS_REMOVE_process_64.o = -pg
-endif
-
#
# vsyscalls (which work on the user stack) should have
# no stack-protector checks:
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index f98c4076a170..1b43086b097a 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -476,7 +476,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
&return_to_handler;
/* Nmi's are currently unsupported */
- if (atomic_read(&in_nmi))
+ if (unlikely(atomic_read(&in_nmi)))
+ return;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
/*
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 0a1302fe6d45..24c2276aa453 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -38,6 +38,7 @@
#include <linux/percpu.h>
#include <linux/prctl.h>
#include <linux/dmi.h>
+#include <linux/ftrace.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -548,7 +549,8 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
* the task-switch, and shows up in ret_from_fork in entry.S,
* for example.
*/
-struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+__notrace_funcgraph struct task_struct *
+__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread,
*next = &next_p->thread;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index c958120fb1b6..fbb321d53d34 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -39,6 +39,7 @@
#include <linux/prctl.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/ftrace.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@@ -551,8 +552,9 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
* - could test fs/gs bitsliced
*
* Kprobes not supported here. Set the probe on schedule instead.
+ * Function graph tracer not supported too.
*/
-struct task_struct *
+__notrace_funcgraph struct task_struct *
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread;
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index b9b4d0a22d10..11cac81eed08 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -369,6 +369,14 @@ struct ftrace_graph_ret {
};
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+/*
+ * Sometimes we don't want to trace a function with the function
+ * graph tracer but we want them to keep traced by the usual function
+ * tracer if the function graph tracer is not configured.
+ */
+#define __notrace_funcgraph notrace
+
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
/* Type of the callback handlers for tracing function graph*/
@@ -393,7 +401,20 @@ static inline int task_curr_ret_stack(struct task_struct *t)
{
return t->curr_ret_stack;
}
+
+static inline void pause_graph_tracing(void)
+{
+ atomic_inc(&current->tracing_graph_pause);
+}
+
+static inline void unpause_graph_tracing(void)
+{
+ atomic_dec(&current->tracing_graph_pause);
+}
#else
+
+#define __notrace_funcgraph
+
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
@@ -401,6 +422,9 @@ static inline int task_curr_ret_stack(struct task_struct *tsk)
{
return -1;
}
+
+static inline void pause_graph_tracing(void) { }
+static inline void unpause_graph_tracing(void) { }
#endif
#ifdef CONFIG_TRACING
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4c152e0acc9e..4b81fc5f7731 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1379,6 +1379,8 @@ struct task_struct {
* because of depth overrun.
*/
atomic_t trace_overrun;
+ /* Pause for the tracing */
+ atomic_t tracing_graph_pause;
#endif
#ifdef CONFIG_TRACING
/* state flags for use by tracers */
diff --git a/kernel/Makefile b/kernel/Makefile
index 703cf3b7389c..19fad003b19d 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -21,10 +21,6 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg
CFLAGS_REMOVE_sched_clock.o = -pg
CFLAGS_REMOVE_sched.o = -pg
endif
-ifdef CONFIG_FUNCTION_GRAPH_TRACER
-CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address()
-CFLAGS_REMOVE_module.o = -pg # For __module_text_address()
-endif
obj-$(CONFIG_FREEZER) += freezer.o
obj-$(CONFIG_PROFILING) += profile.o
diff --git a/kernel/extable.c b/kernel/extable.c
index a26cb2e17023..feb0317cf09a 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -17,6 +17,7 @@
*/
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/ftrace.h>
#include <asm/uaccess.h>
#include <asm/sections.h>
@@ -40,7 +41,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr)
return e;
}
-int core_kernel_text(unsigned long addr)
+__notrace_funcgraph int core_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext &&
addr <= (unsigned long)_etext)
@@ -53,7 +54,7 @@ int core_kernel_text(unsigned long addr)
return 0;
}
-int __kernel_text_address(unsigned long addr)
+__notrace_funcgraph int __kernel_text_address(unsigned long addr)
{
if (core_kernel_text(addr))
return 1;
diff --git a/kernel/module.c b/kernel/module.c
index 89bcf7c1327d..dd2a54155b54 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2704,7 +2704,7 @@ int is_module_address(unsigned long addr)
/* Is this a valid kernel address? */
-struct module *__module_text_address(unsigned long addr)
+__notrace_funcgraph struct module *__module_text_address(unsigned long addr)
{
struct module *mod;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 2971fe48f55e..a12f80efceaa 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1998,6 +1998,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
/* Make sure IRQs see the -1 first: */
barrier();
t->ret_stack = ret_stack_list[start++];
+ atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
}
} while_each_thread(g, t);
@@ -2077,6 +2078,7 @@ void ftrace_graph_init_task(struct task_struct *t)
if (!t->ret_stack)
return;
t->curr_ret_stack = -1;
+ atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
} else
t->ret_stack = NULL;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 7a93c663e52a..8ebe0070c47a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -44,13 +44,14 @@
unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
unsigned long __read_mostly tracing_thresh;
-/* We need to change this state when a selftest is running.
+/*
+ * We need to change this state when a selftest is running.
* A selftest will lurk into the ring-buffer to count the
* entries inserted during the selftest although some concurrent
* insertions into the ring-buffer such as ftrace_printk could occurred
* at the same time, giving false positive or negative results.
*/
-static atomic_t tracing_selftest_running = ATOMIC_INIT(0);
+static bool __read_mostly tracing_selftest_running;
/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] = {
@@ -574,6 +575,8 @@ int register_tracer(struct tracer *type)
unlock_kernel();
mutex_lock(&trace_types_lock);
+ tracing_selftest_running = true;
+
for (t = trace_types; t; t = t->next) {
if (strcmp(type->name, t->name) == 0) {
/* already found */
@@ -598,7 +601,6 @@ int register_tracer(struct tracer *type)
struct trace_array *tr = &global_trace;
int i;
- atomic_set(&tracing_selftest_running, 1);
/*
* Run a selftest on this tracer.
* Here we reset the trace buffer, and set the current
@@ -613,7 +615,6 @@ int register_tracer(struct tracer *type)
/* the test is responsible for initializing and enabling */
pr_info("Testing tracer %s: ", type->name);
ret = type->selftest(type, tr);
- atomic_set(&tracing_selftest_running, 0);
/* the test is responsible for resetting too */
current_trace = saved_tracer;
if (ret) {
@@ -635,6 +636,7 @@ int register_tracer(struct tracer *type)
max_tracer_type_len = len;
out:
+ tracing_selftest_running = false;
mutex_unlock(&trace_types_lock);
lock_kernel();
@@ -3588,24 +3590,17 @@ static __init int tracer_init_debugfs(void)
int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
{
- /*
- * Raw Spinlock because a normal spinlock would be traced here
- * and append an irrelevant couple spin_lock_irqsave/
- * spin_unlock_irqrestore traced by ftrace around this
- * TRACE_PRINTK trace.
- */
- static raw_spinlock_t trace_buf_lock =
- (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ static DEFINE_SPINLOCK(trace_buf_lock);
static char trace_buf[TRACE_BUF_SIZE];
struct ring_buffer_event *event;
struct trace_array *tr = &global_trace;
struct trace_array_cpu *data;
- struct print_entry *entry;
- unsigned long flags, irq_flags;
int cpu, len = 0, size, pc;
+ struct print_entry *entry;
+ unsigned long irq_flags;
- if (tracing_disabled || atomic_read(&tracing_selftest_running))
+ if (tracing_disabled || tracing_selftest_running)
return 0;
pc = preempt_count();
@@ -3616,8 +3611,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
if (unlikely(atomic_read(&data->disabled)))
goto out;
- local_irq_save(flags);
- __raw_spin_lock(&trace_buf_lock);
+ pause_graph_tracing();
+ spin_lock_irqsave(&trace_buf_lock, irq_flags);
len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
len = min(len, TRACE_BUF_SIZE-1);
@@ -3628,7 +3623,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
if (!event)
goto out_unlock;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
+ tracing_generic_entry_update(&entry->ent, irq_flags, pc);
entry->ent.type = TRACE_PRINT;
entry->ip = ip;
entry->depth = depth;
@@ -3638,9 +3633,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
out_unlock:
- __raw_spin_unlock(&trace_buf_lock);
- local_irq_restore(flags);
-
+ spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
+ unpause_graph_tracing();
out:
preempt_enable_notrace();
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 32b7fb9a19df..af60eef4cbcc 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -570,11 +570,36 @@ print_graph_function(struct trace_iterator *iter)
}
}
+static void print_graph_headers(struct seq_file *s)
+{
+ /* 1st line */
+ seq_printf(s, "# ");
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
+ seq_printf(s, "CPU ");
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
+ seq_printf(s, "TASK/PID ");
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD)
+ seq_printf(s, "OVERHEAD/");
+ seq_printf(s, "DURATION FUNCTION CALLS\n");
+
+ /* 2nd line */
+ seq_printf(s, "# ");
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
+ seq_printf(s, "| ");
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
+ seq_printf(s, "| | ");
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
+ seq_printf(s, "| ");
+ seq_printf(s, "| | | | |\n");
+ } else
+ seq_printf(s, " | | | | |\n");
+}
static struct tracer graph_trace __read_mostly = {
- .name = "function_graph",
- .init = graph_trace_init,
- .reset = graph_trace_reset,
- .print_line = print_graph_function,
+ .name = "function_graph",
+ .init = graph_trace_init,
+ .reset = graph_trace_reset,
+ .print_line = print_graph_function,
+ .print_header = print_graph_headers,
.flags = &tracer_flags,
};
OpenPOWER on IntegriCloud