summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/Kconfig7
-rw-r--r--kernel/trace/ftrace.c5
-rw-r--r--kernel/trace/trace.c65
-rw-r--r--kernel/trace/trace_boot.c11
4 files changed, 70 insertions, 18 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index dde1d46f77e5..28f2644484d9 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -164,9 +164,8 @@ config BOOT_TRACER
representation of the delays during initcalls - but the raw
/debug/tracing/trace text output is readable too.
- ( Note that tracing self tests can't be enabled if this tracer is
- selected, because the self-tests are an initcall as well and that
- would invalidate the boot trace. )
+ You must pass in ftrace=initcall to the kernel command line
+ to enable this on bootup.
config TRACE_BRANCH_PROFILING
bool "Trace likely/unlikely profiler"
@@ -326,7 +325,7 @@ config FTRACE_SELFTEST
config FTRACE_STARTUP_TEST
bool "Perform a startup test on ftrace"
- depends on TRACING && DEBUG_KERNEL && !BOOT_TRACER
+ depends on TRACING && DEBUG_KERNEL
select FTRACE_SELFTEST
help
This option performs a series of startup tests on ftrace. On bootup
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 7e9a20b69939..68610031780b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1729,9 +1729,12 @@ static void clear_ftrace_pid(struct pid *pid)
{
struct task_struct *p;
+ rcu_read_lock();
do_each_pid_task(pid, PIDTYPE_PID, p) {
clear_tsk_trace_trace(p);
} while_each_pid_task(pid, PIDTYPE_PID, p);
+ rcu_read_unlock();
+
put_pid(pid);
}
@@ -1739,9 +1742,11 @@ static void set_ftrace_pid(struct pid *pid)
{
struct task_struct *p;
+ rcu_read_lock();
do_each_pid_task(pid, PIDTYPE_PID, p) {
set_tsk_trace_trace(p);
} while_each_pid_task(pid, PIDTYPE_PID, p);
+ rcu_read_unlock();
}
static void clear_ftrace_pid_task(struct pid **pid)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 152d0969adf8..bbdfaa2cbdb9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -53,6 +53,11 @@ unsigned long __read_mostly tracing_thresh;
*/
static bool __read_mostly tracing_selftest_running;
+/*
+ * If a tracer is running, we do not want to run SELFTEST.
+ */
+static bool __read_mostly tracing_selftest_disabled;
+
/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] = {
{ }
@@ -110,14 +115,19 @@ static cpumask_var_t __read_mostly tracing_buffer_mask;
*/
int ftrace_dump_on_oops;
-static int tracing_set_tracer(char *buf);
+static int tracing_set_tracer(const char *buf);
+
+#define BOOTUP_TRACER_SIZE 100
+static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata;
+static char *default_bootup_tracer;
static int __init set_ftrace(char *str)
{
- tracing_set_tracer(str);
+ strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE);
+ default_bootup_tracer = bootup_tracer_buf;
return 1;
}
-__setup("ftrace", set_ftrace);
+__setup("ftrace=", set_ftrace);
static int __init set_ftrace_dump_on_oops(char *str)
{
@@ -469,7 +479,7 @@ int register_tracer(struct tracer *type)
type->flags->opts = dummy_tracer_opt;
#ifdef CONFIG_FTRACE_STARTUP_TEST
- if (type->selftest) {
+ if (type->selftest && !tracing_selftest_disabled) {
struct tracer *saved_tracer = current_trace;
struct trace_array *tr = &global_trace;
int i;
@@ -511,8 +521,25 @@ int register_tracer(struct tracer *type)
out:
tracing_selftest_running = false;
mutex_unlock(&trace_types_lock);
- lock_kernel();
+ if (!ret && default_bootup_tracer) {
+ if (!strncmp(default_bootup_tracer, type->name,
+ BOOTUP_TRACER_SIZE)) {
+ printk(KERN_INFO "Starting tracer '%s'\n",
+ type->name);
+ /* Do we want this tracer to start on bootup? */
+ tracing_set_tracer(type->name);
+ default_bootup_tracer = NULL;
+ /* disable other selftests, since this will break it. */
+ tracing_selftest_disabled = 1;
+#ifdef CONFIG_FTRACE_STARTUP_TEST
+ printk(KERN_INFO "Disabling FTRACE selftests due"
+ " to running tracer '%s'\n", type->name);
+#endif
+ }
+ }
+
+ lock_kernel();
return ret;
}
@@ -2166,7 +2193,7 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
-static int tracing_set_tracer(char *buf)
+static int tracing_set_tracer(const char *buf)
{
struct trace_array *tr = &global_trace;
struct tracer *t;
@@ -3061,12 +3088,9 @@ __init static int tracer_alloc_buffers(void)
trace_init_cmdlines();
register_tracer(&nop_trace);
+ current_trace = &nop_trace;
#ifdef CONFIG_BOOT_TRACER
register_tracer(&boot_tracer);
- current_trace = &boot_tracer;
- current_trace->init(&global_trace);
-#else
- current_trace = &nop_trace;
#endif
/* All seems OK, enable tracing */
tracing_disabled = 0;
@@ -3084,5 +3108,26 @@ out_free_buffer_mask:
out:
return ret;
}
+
+__init static int clear_boot_tracer(void)
+{
+ /*
+ * The default tracer at boot buffer is an init section.
+ * This function is called in lateinit. If we did not
+ * find the boot tracer, then clear it out, to prevent
+ * later registration from accessing the buffer that is
+ * about to be freed.
+ */
+ if (!default_bootup_tracer)
+ return 0;
+
+ printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
+ default_bootup_tracer);
+ default_bootup_tracer = NULL;
+
+ return 0;
+}
+
early_initcall(tracer_alloc_buffers);
fs_initcall(tracer_init_debugfs);
+late_initcall(clear_boot_tracer);
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index 0e94b3d091f7..1f07895977a0 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -28,13 +28,13 @@ void start_boot_trace(void)
void enable_boot_trace(void)
{
- if (pre_initcalls_finished)
+ if (boot_trace && pre_initcalls_finished)
tracing_start_sched_switch_record();
}
void disable_boot_trace(void)
{
- if (pre_initcalls_finished)
+ if (boot_trace && pre_initcalls_finished)
tracing_stop_sched_switch_record();
}
@@ -43,6 +43,9 @@ static int boot_trace_init(struct trace_array *tr)
int cpu;
boot_trace = tr;
+ if (!tr)
+ return 0;
+
for_each_cpu(cpu, cpu_possible_mask)
tracing_reset(tr, cpu);
@@ -132,7 +135,7 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
unsigned long irq_flags;
struct trace_array *tr = boot_trace;
- if (!pre_initcalls_finished)
+ if (!tr || !pre_initcalls_finished)
return;
/* Get its name now since this function could
@@ -164,7 +167,7 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
unsigned long irq_flags;
struct trace_array *tr = boot_trace;
- if (!pre_initcalls_finished)
+ if (!tr || !pre_initcalls_finished)
return;
sprint_symbol(bt->func, (unsigned long)fn);
OpenPOWER on IntegriCloud