summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-31 11:52:01 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-31 11:52:01 -0800
commitb21c07040304b8716e38a4a0e4ab60f386357e61 (patch)
treeeb2cf03ec35b9a5090c0adaab659a766e02c3b10 /kernel
parent4e58fb7305449cf8c5a86dd97dfc1812221be77c (diff)
parentfb7ae981cb9fe8665b9da97e8734745e030c151d (diff)
downloadtalos-obmc-linux-b21c07040304b8716e38a4a0e4ab60f386357e61.tar.gz
talos-obmc-linux-b21c07040304b8716e38a4a0e4ab60f386357e61.zip
Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: tracing: Fix sign fields in ftrace_define_fields_##call() tracing/syscalls: Fix typo in SYSCALL_DEFINE0 tracing/kprobe: Show sign of fields in trace_kprobe format files ksym_tracer: Remove trace_stat ksym_tracer: Fix race when incrementing count ksym_tracer: Fix to allow writing newline to ksym_trace_filter ksym_tracer: Fix to make the tracer work tracing: Kconfig spelling fixes and cleanups tracing: Fix setting tracer specific options Documentation: Update ftrace-design.txt Documentation: Update tracepoint-analysis.txt Documentation: Update mmiotrace.txt
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hw_breakpoint.c10
-rw-r--r--kernel/trace/Kconfig112
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_export.c7
-rw-r--r--kernel/trace/trace_kprobe.c7
-rw-r--r--kernel/trace/trace_ksym.c140
6 files changed, 129 insertions, 149 deletions
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index dbcbf6a33a08..50dbd5999588 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -40,6 +40,7 @@
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/init.h>
+#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/hw_breakpoint.h>
@@ -388,7 +389,8 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
if (!cpu_events)
return ERR_PTR(-ENOMEM);
- for_each_possible_cpu(cpu) {
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(cpu_events, cpu);
bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
@@ -399,18 +401,20 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
goto fail;
}
}
+ put_online_cpus();
return cpu_events;
fail:
- for_each_possible_cpu(cpu) {
+ for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(cpu_events, cpu);
if (IS_ERR(*pevent))
break;
unregister_hw_breakpoint(*pevent);
}
+ put_online_cpus();
+
free_percpu(cpu_events);
- /* return the error if any */
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index d006554888dc..6c22d8a2f289 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -12,17 +12,17 @@ config NOP_TRACER
config HAVE_FTRACE_NMI_ENTER
bool
help
- See Documentation/trace/ftrace-implementation.txt
+ See Documentation/trace/ftrace-design.txt
config HAVE_FUNCTION_TRACER
bool
help
- See Documentation/trace/ftrace-implementation.txt
+ See Documentation/trace/ftrace-design.txt
config HAVE_FUNCTION_GRAPH_TRACER
bool
help
- See Documentation/trace/ftrace-implementation.txt
+ See Documentation/trace/ftrace-design.txt
config HAVE_FUNCTION_GRAPH_FP_TEST
bool
@@ -34,17 +34,17 @@ config HAVE_FUNCTION_GRAPH_FP_TEST
config HAVE_FUNCTION_TRACE_MCOUNT_TEST
bool
help
- See Documentation/trace/ftrace-implementation.txt
+ See Documentation/trace/ftrace-design.txt
config HAVE_DYNAMIC_FTRACE
bool
help
- See Documentation/trace/ftrace-implementation.txt
+ See Documentation/trace/ftrace-design.txt
config HAVE_FTRACE_MCOUNT_RECORD
bool
help
- See Documentation/trace/ftrace-implementation.txt
+ See Documentation/trace/ftrace-design.txt
config HAVE_HW_BRANCH_TRACER
bool
@@ -52,7 +52,7 @@ config HAVE_HW_BRANCH_TRACER
config HAVE_SYSCALL_TRACEPOINTS
bool
help
- See Documentation/trace/ftrace-implementation.txt
+ See Documentation/trace/ftrace-design.txt
config TRACER_MAX_TRACE
bool
@@ -83,7 +83,7 @@ config RING_BUFFER_ALLOW_SWAP
# This allows those options to appear when no other tracer is selected. But the
# options do not appear when something else selects it. We need the two options
# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
-# hidding of the automatic options.
+# hiding of the automatic options.
config TRACING
bool
@@ -119,7 +119,7 @@ menuconfig FTRACE
bool "Tracers"
default y if DEBUG_KERNEL
help
- Enable the kernel tracing infrastructure.
+ Enable the kernel tracing infrastructure.
if FTRACE
@@ -133,7 +133,7 @@ config FUNCTION_TRACER
help
Enable the kernel to trace every kernel function. This is done
by using a compiler feature to insert a small, 5-byte No-Operation
- instruction to the beginning of every kernel function, which NOP
+ instruction at the beginning of every kernel function, which NOP
sequence is then dynamically patched into a tracer call when
tracing is enabled by the administrator. If it's runtime disabled
(the bootup default), then the overhead of the instructions is very
@@ -150,7 +150,7 @@ config FUNCTION_GRAPH_TRACER
and its entry.
Its first purpose is to trace the duration of functions and
draw a call graph for each thread with some information like
- the return value. This is done by setting the current return
+ the return value. This is done by setting the current return
address on the current task structure into a stack of calls.
@@ -173,7 +173,7 @@ config IRQSOFF_TRACER
echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
- (Note that kernel size and overhead increases with this option
+ (Note that kernel size and overhead increase with this option
enabled. This option and the preempt-off timing option can be
used together or separately.)
@@ -186,7 +186,7 @@ config PREEMPT_TRACER
select TRACER_MAX_TRACE
select RING_BUFFER_ALLOW_SWAP
help
- This option measures the time spent in preemption off critical
+ This option measures the time spent in preemption-off critical
sections, with microsecond accuracy.
The default measurement method is a maximum search, which is
@@ -195,7 +195,7 @@ config PREEMPT_TRACER
echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
- (Note that kernel size and overhead increases with this option
+ (Note that kernel size and overhead increase with this option
enabled. This option and the irqs-off timing option can be
used together or separately.)
@@ -222,7 +222,7 @@ config ENABLE_DEFAULT_TRACERS
depends on !GENERIC_TRACER
select TRACING
help
- This tracer hooks to various trace points in the kernel
+ This tracer hooks to various trace points in the kernel,
allowing the user to pick and choose which trace point they
want to trace. It also includes the sched_switch tracer plugin.
@@ -265,19 +265,19 @@ choice
The likely/unlikely profiler only looks at the conditions that
are annotated with a likely or unlikely macro.
- The "all branch" profiler will profile every if statement in the
+ The "all branch" profiler will profile every if-statement in the
kernel. This profiler will also enable the likely/unlikely
- profiler as well.
+ profiler.
- Either of the above profilers add a bit of overhead to the system.
- If unsure choose "No branch profiling".
+ Either of the above profilers adds a bit of overhead to the system.
+ If unsure, choose "No branch profiling".
config BRANCH_PROFILE_NONE
bool "No branch profiling"
help
- No branch profiling. Branch profiling adds a bit of overhead.
- Only enable it if you want to analyse the branching behavior.
- Otherwise keep it disabled.
+ No branch profiling. Branch profiling adds a bit of overhead.
+ Only enable it if you want to analyse the branching behavior.
+ Otherwise keep it disabled.
config PROFILE_ANNOTATED_BRANCHES
bool "Trace likely/unlikely profiler"
@@ -288,7 +288,7 @@ config PROFILE_ANNOTATED_BRANCHES
/sys/kernel/debug/tracing/profile_annotated_branch
- Note: this will add a significant overhead, only turn this
+ Note: this will add a significant overhead; only turn this
on if you need to profile the system's use of these macros.
config PROFILE_ALL_BRANCHES
@@ -305,7 +305,7 @@ config PROFILE_ALL_BRANCHES
This configuration, when enabled, will impose a great overhead
on the system. This should only be enabled when the system
- is to be analyzed
+ is to be analyzed in much detail.
endchoice
config TRACING_BRANCHES
@@ -335,7 +335,7 @@ config POWER_TRACER
depends on X86
select GENERIC_TRACER
help
- This tracer helps developers to analyze and optimize the kernels
+ This tracer helps developers to analyze and optimize the kernel's
power management decisions, specifically the C-state and P-state
behavior.
@@ -391,14 +391,14 @@ config HW_BRANCH_TRACER
select GENERIC_TRACER
help
This tracer records all branches on the system in a circular
- buffer giving access to the last N branches for each cpu.
+ buffer, giving access to the last N branches for each cpu.
config KMEMTRACE
bool "Trace SLAB allocations"
select GENERIC_TRACER
help
kmemtrace provides tracing for slab allocator functions, such as
- kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
+ kmalloc, kfree, kmem_cache_alloc, kmem_cache_free, etc. Collected
data is then fed to the userspace application in order to analyse
allocation hotspots, internal fragmentation and so on, making it
possible to see how well an allocator performs, as well as debug
@@ -417,15 +417,15 @@ config WORKQUEUE_TRACER
bool "Trace workqueues"
select GENERIC_TRACER
help
- The workqueue tracer provides some statistical informations
+ The workqueue tracer provides some statistical information
about each cpu workqueue thread such as the number of the
works inserted and executed since their creation. It can help
- to evaluate the amount of work each of them have to perform.
+ to evaluate the amount of work each of them has to perform.
For example it can help a developer to decide whether he should
- choose a per cpu workqueue instead of a singlethreaded one.
+ choose a per-cpu workqueue instead of a singlethreaded one.
config BLK_DEV_IO_TRACE
- bool "Support for tracing block io actions"
+ bool "Support for tracing block IO actions"
depends on SYSFS
depends on BLOCK
select RELAY
@@ -456,15 +456,15 @@ config KPROBE_EVENT
select TRACING
default y
help
- This allows the user to add tracing events (similar to tracepoints) on the fly
- via the ftrace interface. See Documentation/trace/kprobetrace.txt
- for more details.
+ This allows the user to add tracing events (similar to tracepoints)
+ on the fly via the ftrace interface. See
+ Documentation/trace/kprobetrace.txt for more details.
Those events can be inserted wherever kprobes can probe, and record
various register and memory values.
- This option is also required by perf-probe subcommand of perf tools. If
- you want to use perf tools, this option is strongly recommended.
+ This option is also required by perf-probe subcommand of perf tools.
+ If you want to use perf tools, this option is strongly recommended.
config DYNAMIC_FTRACE
bool "enable/disable ftrace tracepoints dynamically"
@@ -472,32 +472,32 @@ config DYNAMIC_FTRACE
depends on HAVE_DYNAMIC_FTRACE
default y
help
- This option will modify all the calls to ftrace dynamically
- (will patch them out of the binary image and replaces them
- with a No-Op instruction) as they are called. A table is
- created to dynamically enable them again.
+ This option will modify all the calls to ftrace dynamically
+ (will patch them out of the binary image and replace them
+ with a No-Op instruction) as they are called. A table is
+ created to dynamically enable them again.
- This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise
- has native performance as long as no tracing is active.
+ This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
+ otherwise has native performance as long as no tracing is active.
- The changes to the code are done by a kernel thread that
- wakes up once a second and checks to see if any ftrace calls
- were made. If so, it runs stop_machine (stops all CPUS)
- and modifies the code to jump over the call to ftrace.
+ The changes to the code are done by a kernel thread that
+ wakes up once a second and checks to see if any ftrace calls
+ were made. If so, it runs stop_machine (stops all CPUS)
+ and modifies the code to jump over the call to ftrace.
config FUNCTION_PROFILER
bool "Kernel function profiler"
depends on FUNCTION_TRACER
default n
help
- This option enables the kernel function profiler. A file is created
- in debugfs called function_profile_enabled which defaults to zero.
- When a 1 is echoed into this file profiling begins, and when a
- zero is entered, profiling stops. A file in the trace_stats
- directory called functions, that show the list of functions that
- have been hit and their counters.
+ This option enables the kernel function profiler. A file is created
+ in debugfs called function_profile_enabled which defaults to zero.
+ When a 1 is echoed into this file profiling begins, and when a
+ zero is entered, profiling stops. A "functions" file is created in
+ the trace_stats directory; this file shows the list of functions that
+ have been hit and their counters.
- If in doubt, say N
+ If in doubt, say N.
config FTRACE_MCOUNT_RECORD
def_bool y
@@ -556,8 +556,8 @@ config RING_BUFFER_BENCHMARK
tristate "Ring buffer benchmark stress tester"
depends on RING_BUFFER
help
- This option creates a test to stress the ring buffer and bench mark it.
- It creates its own ring buffer such that it will not interfer with
+ This option creates a test to stress the ring buffer and benchmark it.
+ It creates its own ring buffer such that it will not interfere with
any other users of the ring buffer (such as ftrace). It then creates
a producer and consumer that will run for 10 seconds and sleep for
10 seconds. Each interval it will print out the number of events
@@ -566,7 +566,7 @@ config RING_BUFFER_BENCHMARK
It does not disable interrupts or raise its priority, so it may be
affected by processes that are running.
- If unsure, say N
+ If unsure, say N.
endif # FTRACE
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8b9f20ab8eed..0df1b0f2cb9e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3949,7 +3949,7 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (!!(topt->flags->val & topt->opt->bit) != val) {
mutex_lock(&trace_types_lock);
ret = __set_tracer_option(current_trace, topt->flags,
- topt->opt, val);
+ topt->opt, !val);
mutex_unlock(&trace_types_lock);
if (ret)
return ret;
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 458e5bfe26d0..d4fa5dc1ee4e 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -158,7 +158,8 @@ ftrace_format_##name(struct ftrace_event_call *unused, \
BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
ret = trace_define_field(event_call, #type "[" #len "]", #item, \
offsetof(typeof(field), item), \
- sizeof(field.item), 0, FILTER_OTHER); \
+ sizeof(field.item), \
+ is_signed_type(type), FILTER_OTHER); \
if (ret) \
return ret;
@@ -168,8 +169,8 @@ ftrace_format_##name(struct ftrace_event_call *unused, \
ret = trace_define_field(event_call, #type "[" #len "]", #item, \
offsetof(typeof(field), \
container.item), \
- sizeof(field.container.item), 0, \
- FILTER_OTHER); \
+ sizeof(field.container.item), \
+ is_signed_type(type), FILTER_OTHER); \
if (ret) \
return ret;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 375f81a568dc..6ea90c0e2c96 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1201,10 +1201,11 @@ static int __probe_event_show_format(struct trace_seq *s,
#undef SHOW_FIELD
#define SHOW_FIELD(type, item, name) \
do { \
- ret = trace_seq_printf(s, "\tfield: " #type " %s;\t" \
- "offset:%u;\tsize:%u;\n", name, \
+ ret = trace_seq_printf(s, "\tfield:" #type " %s;\t" \
+ "offset:%u;\tsize:%u;\tsigned:%d;\n", name,\
(unsigned int)offsetof(typeof(field), item),\
- (unsigned int)sizeof(type)); \
+ (unsigned int)sizeof(type), \
+ is_signed_type(type)); \
if (!ret) \
return 0; \
} while (0)
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c
index faf37fa4408c..94103cdcf9d8 100644
--- a/kernel/trace/trace_ksym.c
+++ b/kernel/trace/trace_ksym.c
@@ -26,12 +26,13 @@
#include <linux/fs.h>
#include "trace_output.h"
-#include "trace_stat.h"
#include "trace.h"
#include <linux/hw_breakpoint.h>
#include <asm/hw_breakpoint.h>
+#include <asm/atomic.h>
+
/*
* For now, let us restrict the no. of symbols traced simultaneously to number
* of available hardware breakpoint registers.
@@ -44,7 +45,7 @@ struct trace_ksym {
struct perf_event **ksym_hbp;
struct perf_event_attr attr;
#ifdef CONFIG_PROFILE_KSYM_TRACER
- unsigned long counter;
+ atomic64_t counter;
#endif
struct hlist_node ksym_hlist;
};
@@ -69,9 +70,8 @@ void ksym_collect_stats(unsigned long hbp_hit_addr)
rcu_read_lock();
hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) {
- if ((entry->attr.bp_addr == hbp_hit_addr) &&
- (entry->counter <= MAX_UL_INT)) {
- entry->counter++;
+ if (entry->attr.bp_addr == hbp_hit_addr) {
+ atomic64_inc(&entry->counter);
break;
}
}
@@ -197,7 +197,6 @@ int process_new_ksym_entry(char *ksymname, int op, unsigned long addr)
entry->attr.bp_addr = addr;
entry->attr.bp_len = HW_BREAKPOINT_LEN_4;
- ret = -EAGAIN;
entry->ksym_hbp = register_wide_hw_breakpoint(&entry->attr,
ksym_hbp_handler);
@@ -300,8 +299,8 @@ static ssize_t ksym_trace_filter_write(struct file *file,
* 2: echo 0 > ksym_trace_filter
* 3: echo "*:---" > ksym_trace_filter
*/
- if (!buf[0] || !strcmp(buf, "0") ||
- !strcmp(buf, "*:---")) {
+ if (!input_string[0] || !strcmp(input_string, "0") ||
+ !strcmp(input_string, "*:---")) {
__ksym_trace_reset();
ret = 0;
goto out;
@@ -444,102 +443,77 @@ struct tracer ksym_tracer __read_mostly =
.print_line = ksym_trace_output
};
-__init static int init_ksym_trace(void)
-{
- struct dentry *d_tracer;
- struct dentry *entry;
-
- d_tracer = tracing_init_dentry();
- ksym_filter_entry_count = 0;
-
- entry = debugfs_create_file("ksym_trace_filter", 0644, d_tracer,
- NULL, &ksym_tracing_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'ksym_trace_filter' file\n");
-
- return register_tracer(&ksym_tracer);
-}
-device_initcall(init_ksym_trace);
-
-
#ifdef CONFIG_PROFILE_KSYM_TRACER
-static int ksym_tracer_stat_headers(struct seq_file *m)
+static int ksym_profile_show(struct seq_file *m, void *v)
{
+ struct hlist_node *node;
+ struct trace_ksym *entry;
+ int access_type = 0;
+ char fn_name[KSYM_NAME_LEN];
+
seq_puts(m, " Access Type ");
seq_puts(m, " Symbol Counter\n");
seq_puts(m, " ----------- ");
seq_puts(m, " ------ -------\n");
- return 0;
-}
-static int ksym_tracer_stat_show(struct seq_file *m, void *v)
-{
- struct hlist_node *stat = v;
- struct trace_ksym *entry;
- int access_type = 0;
- char fn_name[KSYM_NAME_LEN];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) {
- entry = hlist_entry(stat, struct trace_ksym, ksym_hlist);
+ access_type = entry->attr.bp_type;
- access_type = entry->attr.bp_type;
+ switch (access_type) {
+ case HW_BREAKPOINT_R:
+ seq_puts(m, " R ");
+ break;
+ case HW_BREAKPOINT_W:
+ seq_puts(m, " W ");
+ break;
+ case HW_BREAKPOINT_R | HW_BREAKPOINT_W:
+ seq_puts(m, " RW ");
+ break;
+ default:
+ seq_puts(m, " NA ");
+ }
- switch (access_type) {
- case HW_BREAKPOINT_R:
- seq_puts(m, " R ");
- break;
- case HW_BREAKPOINT_W:
- seq_puts(m, " W ");
- break;
- case HW_BREAKPOINT_R | HW_BREAKPOINT_W:
- seq_puts(m, " RW ");
- break;
- default:
- seq_puts(m, " NA ");
+ if (lookup_symbol_name(entry->attr.bp_addr, fn_name) >= 0)
+ seq_printf(m, " %-36s", fn_name);
+ else
+ seq_printf(m, " %-36s", "<NA>");
+ seq_printf(m, " %15llu\n",
+ (unsigned long long)atomic64_read(&entry->counter));
}
-
- if (lookup_symbol_name(entry->attr.bp_addr, fn_name) >= 0)
- seq_printf(m, " %-36s", fn_name);
- else
- seq_printf(m, " %-36s", "<NA>");
- seq_printf(m, " %15lu\n", entry->counter);
+ rcu_read_unlock();
return 0;
}
-static void *ksym_tracer_stat_start(struct tracer_stat *trace)
+static int ksym_profile_open(struct inode *node, struct file *file)
{
- return ksym_filter_head.first;
-}
-
-static void *
-ksym_tracer_stat_next(void *v, int idx)
-{
- struct hlist_node *stat = v;
-
- return stat->next;
+ return single_open(file, ksym_profile_show, NULL);
}
-static struct tracer_stat ksym_tracer_stats = {
- .name = "ksym_tracer",
- .stat_start = ksym_tracer_stat_start,
- .stat_next = ksym_tracer_stat_next,
- .stat_headers = ksym_tracer_stat_headers,
- .stat_show = ksym_tracer_stat_show
+static const struct file_operations ksym_profile_fops = {
+ .open = ksym_profile_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
+#endif /* CONFIG_PROFILE_KSYM_TRACER */
-__init static int ksym_tracer_stat_init(void)
+__init static int init_ksym_trace(void)
{
- int ret;
+ struct dentry *d_tracer;
- ret = register_stat_tracer(&ksym_tracer_stats);
- if (ret) {
- printk(KERN_WARNING "Warning: could not register "
- "ksym tracer stats\n");
- return 1;
- }
+ d_tracer = tracing_init_dentry();
- return 0;
+ trace_create_file("ksym_trace_filter", 0644, d_tracer,
+ NULL, &ksym_tracing_fops);
+
+#ifdef CONFIG_PROFILE_KSYM_TRACER
+ trace_create_file("ksym_profile", 0444, d_tracer,
+ NULL, &ksym_profile_fops);
+#endif
+
+ return register_tracer(&ksym_tracer);
}
-fs_initcall(ksym_tracer_stat_init);
-#endif /* CONFIG_PROFILE_KSYM_TRACER */
+device_initcall(init_ksym_trace);
OpenPOWER on IntegriCloud