summaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-05-20 14:38:55 +0200
committerIngo Molnar <mingo@elte.hu>2010-05-20 14:38:55 +0200
commitdfacc4d6c98b89609250269f518c1f54c30454ef (patch)
treee7effbee7bdc85d18f7b26ab9cb5c9f700d1481a /include/trace
parentf869097e884d8cb65b2bb7831ca57b7dffb66fdd (diff)
parent85cb68b27c428d477169f3aa46c72dba103a17bd (diff)
downloadblackbird-op-linux-dfacc4d6c98b89609250269f518c1f54c30454ef.tar.gz
blackbird-op-linux-dfacc4d6c98b89609250269f518c1f54c30454ef.zip
Merge branch 'perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into perf/core
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/define_trace.h5
-rw-r--r--include/trace/events/module.h18
-rw-r--r--include/trace/events/napi.h10
-rw-r--r--include/trace/events/sched.h32
-rw-r--r--include/trace/events/signal.h52
-rw-r--r--include/trace/ftrace.h33
6 files changed, 75 insertions, 75 deletions
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 5acfb1eb4df9..1dfab5401511 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -65,6 +65,10 @@
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+/* Make all open coded DECLARE_TRACE nops */
+#undef DECLARE_TRACE
+#define DECLARE_TRACE(name, proto, args)
+
#ifdef CONFIG_EVENT_TRACING
#include <trace/ftrace.h>
#endif
@@ -75,6 +79,7 @@
#undef DEFINE_EVENT
#undef DEFINE_EVENT_PRINT
#undef TRACE_HEADER_MULTI_READ
+#undef DECLARE_TRACE
/* Only undef what we defined in this file */
#ifdef UNDEF_TRACE_INCLUDE_FILE
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index 4b0f48ba16a6..c7bb2f0482fe 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -51,11 +51,14 @@ TRACE_EVENT(module_free,
TP_printk("%s", __get_str(name))
);
+#ifdef CONFIG_MODULE_UNLOAD
+/* trace_module_get/put are only used if CONFIG_MODULE_UNLOAD is defined */
+
DECLARE_EVENT_CLASS(module_refcnt,
- TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
+ TP_PROTO(struct module *mod, unsigned long ip),
- TP_ARGS(mod, ip, refcnt),
+ TP_ARGS(mod, ip),
TP_STRUCT__entry(
__field( unsigned long, ip )
@@ -65,7 +68,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
TP_fast_assign(
__entry->ip = ip;
- __entry->refcnt = refcnt;
+ __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs);
__assign_str(name, mod->name);
),
@@ -75,17 +78,18 @@ DECLARE_EVENT_CLASS(module_refcnt,
DEFINE_EVENT(module_refcnt, module_get,
- TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
+ TP_PROTO(struct module *mod, unsigned long ip),
- TP_ARGS(mod, ip, refcnt)
+ TP_ARGS(mod, ip)
);
DEFINE_EVENT(module_refcnt, module_put,
- TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
+ TP_PROTO(struct module *mod, unsigned long ip),
- TP_ARGS(mod, ip, refcnt)
+ TP_ARGS(mod, ip)
);
+#endif /* CONFIG_MODULE_UNLOAD */
TRACE_EVENT(module_request,
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
index a8989c4547e7..188deca2f3c7 100644
--- a/include/trace/events/napi.h
+++ b/include/trace/events/napi.h
@@ -1,4 +1,7 @@
-#ifndef _TRACE_NAPI_H_
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM napi
+
+#if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_NAPI_H_
#include <linux/netdevice.h>
@@ -8,4 +11,7 @@ DECLARE_TRACE(napi_poll,
TP_PROTO(struct napi_struct *napi),
TP_ARGS(napi));
-#endif
+#endif /* _TRACE_NAPI_H_ */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index cfceb0b73e20..4f733ecea46e 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -51,15 +51,12 @@ TRACE_EVENT(sched_kthread_stop_ret,
/*
* Tracepoint for waiting on task to unschedule:
- *
- * (NOTE: the 'rq' argument is not used by generic trace events,
- * but used by the latency tracer plugin. )
*/
TRACE_EVENT(sched_wait_task,
- TP_PROTO(struct rq *rq, struct task_struct *p),
+ TP_PROTO(struct task_struct *p),
- TP_ARGS(rq, p),
+ TP_ARGS(p),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -79,15 +76,12 @@ TRACE_EVENT(sched_wait_task,
/*
* Tracepoint for waking up a task:
- *
- * (NOTE: the 'rq' argument is not used by generic trace events,
- * but used by the latency tracer plugin. )
*/
DECLARE_EVENT_CLASS(sched_wakeup_template,
- TP_PROTO(struct rq *rq, struct task_struct *p, int success),
+ TP_PROTO(struct task_struct *p, int success),
- TP_ARGS(rq, p, success),
+ TP_ARGS(p, success),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -111,31 +105,25 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
);
DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
- TP_PROTO(struct rq *rq, struct task_struct *p, int success),
- TP_ARGS(rq, p, success));
+ TP_PROTO(struct task_struct *p, int success),
+ TP_ARGS(p, success));
/*
* Tracepoint for waking up a new task:
- *
- * (NOTE: the 'rq' argument is not used by generic trace events,
- * but used by the latency tracer plugin. )
*/
DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
- TP_PROTO(struct rq *rq, struct task_struct *p, int success),
- TP_ARGS(rq, p, success));
+ TP_PROTO(struct task_struct *p, int success),
+ TP_ARGS(p, success));
/*
* Tracepoint for task switches, performed by the scheduler:
- *
- * (NOTE: the 'rq' argument is not used by generic trace events,
- * but used by the latency tracer plugin. )
*/
TRACE_EVENT(sched_switch,
- TP_PROTO(struct rq *rq, struct task_struct *prev,
+ TP_PROTO(struct task_struct *prev,
struct task_struct *next),
- TP_ARGS(rq, prev, next),
+ TP_ARGS(prev, next),
TP_STRUCT__entry(
__array( char, prev_comm, TASK_COMM_LEN )
diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h
index a510b75ac304..814566c99d29 100644
--- a/include/trace/events/signal.h
+++ b/include/trace/events/signal.h
@@ -100,18 +100,7 @@ TRACE_EVENT(signal_deliver,
__entry->sa_handler, __entry->sa_flags)
);
-/**
- * signal_overflow_fail - called when signal queue is overflow
- * @sig: signal number
- * @group: signal to process group or not (bool)
- * @info: pointer to struct siginfo
- *
- * Kernel fails to generate 'sig' signal with 'info' siginfo, because
- * siginfo queue is overflow, and the signal is dropped.
- * 'group' is not 0 if the signal will be sent to a process group.
- * 'sig' is always one of RT signals.
- */
-TRACE_EVENT(signal_overflow_fail,
+DECLARE_EVENT_CLASS(signal_queue_overflow,
TP_PROTO(int sig, int group, struct siginfo *info),
@@ -135,6 +124,24 @@ TRACE_EVENT(signal_overflow_fail,
);
/**
+ * signal_overflow_fail - called when signal queue is overflow
+ * @sig: signal number
+ * @group: signal to process group or not (bool)
+ * @info: pointer to struct siginfo
+ *
+ * Kernel fails to generate 'sig' signal with 'info' siginfo, because
+ * siginfo queue is overflow, and the signal is dropped.
+ * 'group' is not 0 if the signal will be sent to a process group.
+ * 'sig' is always one of RT signals.
+ */
+DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail,
+
+ TP_PROTO(int sig, int group, struct siginfo *info),
+
+ TP_ARGS(sig, group, info)
+);
+
+/**
* signal_lose_info - called when siginfo is lost
* @sig: signal number
* @group: signal to process group or not (bool)
@@ -145,28 +152,13 @@ TRACE_EVENT(signal_overflow_fail,
* 'group' is not 0 if the signal will be sent to a process group.
* 'sig' is always one of non-RT signals.
*/
-TRACE_EVENT(signal_lose_info,
+DEFINE_EVENT(signal_queue_overflow, signal_lose_info,
TP_PROTO(int sig, int group, struct siginfo *info),
- TP_ARGS(sig, group, info),
-
- TP_STRUCT__entry(
- __field( int, sig )
- __field( int, group )
- __field( int, errno )
- __field( int, code )
- ),
-
- TP_fast_assign(
- __entry->sig = sig;
- __entry->group = group;
- TP_STORE_SIGINFO(__entry, info);
- ),
-
- TP_printk("sig=%d group=%d errno=%d code=%d",
- __entry->sig, __entry->group, __entry->errno, __entry->code)
+ TP_ARGS(sig, group, info)
);
+
#endif /* _TRACE_SIGNAL_H */
/* This part must be outside protection */
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 0a29df092922..1016b2162935 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -154,9 +154,11 @@
*
* field = (typeof(field))entry;
*
- * p = get_cpu_var(ftrace_event_seq);
+ * p = &get_cpu_var(ftrace_event_seq);
* trace_seq_init(p);
- * ret = trace_seq_printf(s, <TP_printk> "\n");
+ * ret = trace_seq_printf(s, "%s: ", <call>);
+ * if (ret)
+ * ret = trace_seq_printf(s, <TP_printk> "\n");
* put_cpu();
* if (!ret)
* return TRACE_TYPE_PARTIAL_LINE;
@@ -450,38 +452,38 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
*
* static void ftrace_raw_event_<call>(proto)
* {
+ * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
* struct ring_buffer_event *event;
* struct ftrace_raw_<call> *entry; <-- defined in stage 1
* struct ring_buffer *buffer;
* unsigned long irq_flags;
+ * int __data_size;
* int pc;
*
* local_save_flags(irq_flags);
* pc = preempt_count();
*
+ * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
+ *
* event = trace_current_buffer_lock_reserve(&buffer,
* event_<call>.id,
- * sizeof(struct ftrace_raw_<call>),
+ * sizeof(*entry) + __data_size,
* irq_flags, pc);
* if (!event)
* return;
* entry = ring_buffer_event_data(event);
*
- * <assign>; <-- Here we assign the entries by the __field and
- * __array macros.
+ * { <assign>; } <-- Here we assign the entries by the __field and
+ * __array macros.
*
- * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
+ * if (!filter_current_check_discard(buffer, event_call, entry, event))
+ * trace_current_buffer_unlock_commit(buffer,
+ * event, irq_flags, pc);
* }
*
* static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
* {
- * int ret;
- *
- * ret = register_trace_<call>(ftrace_raw_event_<call>);
- * if (!ret)
- * pr_info("event trace: Could not activate trace point "
- * "probe to <call>");
- * return ret;
+ * return register_trace_<call>(ftrace_raw_event_<call>);
* }
*
* static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
@@ -493,6 +495,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
* .trace = ftrace_raw_output_<call>, <-- stage 2
* };
*
+ * static const char print_fmt_<call>[] = <TP_printk>;
+ *
* static struct ftrace_event_call __used
* __attribute__((__aligned__(4)))
* __attribute__((section("_ftrace_events"))) event_<call> = {
@@ -501,6 +505,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
* .raw_init = trace_event_raw_init,
* .regfunc = ftrace_reg_event_<call>,
* .unregfunc = ftrace_unreg_event_<call>,
+ * .print_fmt = print_fmt_<call>,
+ * .define_fields = ftrace_define_fields_<call>,
* }
*
*/
@@ -569,7 +575,6 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
return; \
entry = ring_buffer_event_data(event); \
\
- \
tstruct \
\
{ assign; } \
OpenPOWER on IntegriCloud