summaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/power.h3
-rw-r--r--include/trace/events/printk.h41
-rw-r--r--include/trace/events/rcu.h63
-rw-r--r--include/trace/events/sched.h77
-rw-r--r--include/trace/events/signal.h85
5 files changed, 131 insertions, 138 deletions
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 1bcc2a8c00e2..cae9a94f025d 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -65,7 +65,6 @@ TRACE_EVENT(machine_suspend,
TP_printk("state=%lu", (unsigned long)__entry->state)
);
-/* This code will be removed after deprecation time exceeded (2.6.41) */
#ifdef CONFIG_EVENT_POWER_TRACING_DEPRECATED
/*
@@ -151,6 +150,8 @@ enum {
events get removed */
static inline void trace_power_start(u64 type, u64 state, u64 cpuid) {};
static inline void trace_power_end(u64 cpuid) {};
+static inline void trace_power_start_rcuidle(u64 type, u64 state, u64 cpuid) {};
+static inline void trace_power_end_rcuidle(u64 cpuid) {};
static inline void trace_power_frequency(u64 type, u64 state, u64 cpuid) {};
#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */
diff --git a/include/trace/events/printk.h b/include/trace/events/printk.h
new file mode 100644
index 000000000000..94ec79cc011a
--- /dev/null
+++ b/include/trace/events/printk.h
@@ -0,0 +1,41 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM printk
+
+#if !defined(_TRACE_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PRINTK_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT_CONDITION(console,
+ TP_PROTO(const char *log_buf, unsigned start, unsigned end,
+ unsigned log_buf_len),
+
+ TP_ARGS(log_buf, start, end, log_buf_len),
+
+ TP_CONDITION(start != end),
+
+ TP_STRUCT__entry(
+ __dynamic_array(char, msg, end - start + 1)
+ ),
+
+ TP_fast_assign(
+ if ((start & (log_buf_len - 1)) > (end & (log_buf_len - 1))) {
+ memcpy(__get_dynamic_array(msg),
+ log_buf + (start & (log_buf_len - 1)),
+ log_buf_len - (start & (log_buf_len - 1)));
+ memcpy((char *)__get_dynamic_array(msg) +
+ log_buf_len - (start & (log_buf_len - 1)),
+ log_buf, end & (log_buf_len - 1));
+ } else
+ memcpy(__get_dynamic_array(msg),
+ log_buf + (start & (log_buf_len - 1)),
+ end - start);
+ ((char *)__get_dynamic_array(msg))[end - start] = 0;
+ ),
+
+ TP_printk("%s", __get_str(msg))
+);
+#endif /* _TRACE_PRINTK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index d2d88bed891b..337099783f37 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -313,19 +313,22 @@ TRACE_EVENT(rcu_prep_idle,
/*
* Tracepoint for the registration of a single RCU callback function.
* The first argument is the type of RCU, the second argument is
- * a pointer to the RCU callback itself, and the third element is the
- * new RCU callback queue length for the current CPU.
+ * a pointer to the RCU callback itself, the third element is the
+ * number of lazy callbacks queued, and the fourth element is the
+ * total number of callbacks queued.
*/
TRACE_EVENT(rcu_callback,
- TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen),
+ TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy,
+ long qlen),
- TP_ARGS(rcuname, rhp, qlen),
+ TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
TP_STRUCT__entry(
__field(char *, rcuname)
__field(void *, rhp)
__field(void *, func)
+ __field(long, qlen_lazy)
__field(long, qlen)
),
@@ -333,11 +336,13 @@ TRACE_EVENT(rcu_callback,
__entry->rcuname = rcuname;
__entry->rhp = rhp;
__entry->func = rhp->func;
+ __entry->qlen_lazy = qlen_lazy;
__entry->qlen = qlen;
),
- TP_printk("%s rhp=%p func=%pf %ld",
- __entry->rcuname, __entry->rhp, __entry->func, __entry->qlen)
+ TP_printk("%s rhp=%p func=%pf %ld/%ld",
+ __entry->rcuname, __entry->rhp, __entry->func,
+ __entry->qlen_lazy, __entry->qlen)
);
/*
@@ -345,20 +350,21 @@ TRACE_EVENT(rcu_callback,
* kfree() form. The first argument is the RCU type, the second argument
* is a pointer to the RCU callback, the third argument is the offset
* of the callback within the enclosing RCU-protected data structure,
- * and the fourth argument is the new RCU callback queue length for the
- * current CPU.
+ * the fourth argument is the number of lazy callbacks queued, and the
+ * fifth argument is the total number of callbacks queued.
*/
TRACE_EVENT(rcu_kfree_callback,
TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
- long qlen),
+ long qlen_lazy, long qlen),
- TP_ARGS(rcuname, rhp, offset, qlen),
+ TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
TP_STRUCT__entry(
__field(char *, rcuname)
__field(void *, rhp)
__field(unsigned long, offset)
+ __field(long, qlen_lazy)
__field(long, qlen)
),
@@ -366,41 +372,45 @@ TRACE_EVENT(rcu_kfree_callback,
__entry->rcuname = rcuname;
__entry->rhp = rhp;
__entry->offset = offset;
+ __entry->qlen_lazy = qlen_lazy;
__entry->qlen = qlen;
),
- TP_printk("%s rhp=%p func=%ld %ld",
+ TP_printk("%s rhp=%p func=%ld %ld/%ld",
__entry->rcuname, __entry->rhp, __entry->offset,
- __entry->qlen)
+ __entry->qlen_lazy, __entry->qlen)
);
/*
* Tracepoint for marking the beginning rcu_do_batch, performed to start
* RCU callback invocation. The first argument is the RCU flavor,
- * the second is the total number of callbacks (including those that
- * are not yet ready to be invoked), and the third argument is the
- * current RCU-callback batch limit.
+ * the second is the number of lazy callbacks queued, the third is
+ * the total number of callbacks queued, and the fourth argument is
+ * the current RCU-callback batch limit.
*/
TRACE_EVENT(rcu_batch_start,
- TP_PROTO(char *rcuname, long qlen, int blimit),
+ TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),
- TP_ARGS(rcuname, qlen, blimit),
+ TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
TP_STRUCT__entry(
__field(char *, rcuname)
+ __field(long, qlen_lazy)
__field(long, qlen)
__field(int, blimit)
),
TP_fast_assign(
__entry->rcuname = rcuname;
+ __entry->qlen_lazy = qlen_lazy;
__entry->qlen = qlen;
__entry->blimit = blimit;
),
- TP_printk("%s CBs=%ld bl=%d",
- __entry->rcuname, __entry->qlen, __entry->blimit)
+ TP_printk("%s CBs=%ld/%ld bl=%d",
+ __entry->rcuname, __entry->qlen_lazy, __entry->qlen,
+ __entry->blimit)
);
/*
@@ -531,16 +541,21 @@ TRACE_EVENT(rcu_torture_read,
#else /* #ifdef CONFIG_RCU_TRACE */
#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
-#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, qsmask) do { } while (0)
+#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
+ qsmask) do { } while (0)
#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
-#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0)
+#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
+ grplo, grphi, gp_tasks) do { } \
+ while (0)
#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
#define trace_rcu_prep_idle(reason) do { } while (0)
-#define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
-#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
-#define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
+#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
+#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
+ do { } while (0)
+#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
+ do { } while (0)
#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 6ba596b07a72..fbc7b1ad929b 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -6,6 +6,7 @@
#include <linux/sched.h>
#include <linux/tracepoint.h>
+#include <linux/binfmts.h>
/*
* Tracepoint for calling kthread_stop, performed to end a kthread:
@@ -276,6 +277,32 @@ TRACE_EVENT(sched_process_fork,
);
/*
+ * Tracepoint for exec:
+ */
+TRACE_EVENT(sched_process_exec,
+
+ TP_PROTO(struct task_struct *p, pid_t old_pid,
+ struct linux_binprm *bprm),
+
+ TP_ARGS(p, old_pid, bprm),
+
+ TP_STRUCT__entry(
+ __string( filename, bprm->filename )
+ __field( pid_t, pid )
+ __field( pid_t, old_pid )
+ ),
+
+ TP_fast_assign(
+ __assign_str(filename, bprm->filename);
+ __entry->pid = p->pid;
+ __entry->old_pid = p->pid;
+ ),
+
+ TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
+ __entry->pid, __entry->old_pid)
+);
+
+/*
* XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
* adding sched_stat support to SCHED_FIFO/RR would be welcome.
*/
@@ -370,56 +397,6 @@ TRACE_EVENT(sched_stat_runtime,
(unsigned long long)__entry->vruntime)
);
-#ifdef CREATE_TRACE_POINTS
-static inline u64 trace_get_sleeptime(struct task_struct *tsk)
-{
-#ifdef CONFIG_SCHEDSTATS
- u64 block, sleep;
-
- block = tsk->se.statistics.block_start;
- sleep = tsk->se.statistics.sleep_start;
- tsk->se.statistics.block_start = 0;
- tsk->se.statistics.sleep_start = 0;
-
- return block ? block : sleep ? sleep : 0;
-#else
- return 0;
-#endif
-}
-#endif
-
-/*
- * Tracepoint for accounting sleeptime (time the task is sleeping
- * or waiting for I/O).
- */
-TRACE_EVENT(sched_stat_sleeptime,
-
- TP_PROTO(struct task_struct *tsk, u64 now),
-
- TP_ARGS(tsk, now),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( u64, sleeptime )
- ),
-
- TP_fast_assign(
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
- __entry->pid = tsk->pid;
- __entry->sleeptime = trace_get_sleeptime(tsk);
- __entry->sleeptime = __entry->sleeptime ?
- now - __entry->sleeptime : 0;
- )
- TP_perf_assign(
- __perf_count(__entry->sleeptime);
- ),
-
- TP_printk("comm=%s pid=%d sleeptime=%Lu [ns]",
- __entry->comm, __entry->pid,
- (unsigned long long)__entry->sleeptime)
-);
-
/*
* Tracepoint for showing priority inheritance modifying a tasks
* priority.
diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h
index 17df43464df0..39a8a430d90f 100644
--- a/include/trace/events/signal.h
+++ b/include/trace/events/signal.h
@@ -23,11 +23,23 @@
} \
} while (0)
+#ifndef TRACE_HEADER_MULTI_READ
+enum {
+ TRACE_SIGNAL_DELIVERED,
+ TRACE_SIGNAL_IGNORED,
+ TRACE_SIGNAL_ALREADY_PENDING,
+ TRACE_SIGNAL_OVERFLOW_FAIL,
+ TRACE_SIGNAL_LOSE_INFO,
+};
+#endif
+
/**
* signal_generate - called when a signal is generated
* @sig: signal number
* @info: pointer to struct siginfo
* @task: pointer to struct task_struct
+ * @group: shared or private
+ * @result: TRACE_SIGNAL_*
*
* Current process sends a 'sig' signal to 'task' process with
* 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV,
@@ -37,9 +49,10 @@
*/
TRACE_EVENT(signal_generate,
- TP_PROTO(int sig, struct siginfo *info, struct task_struct *task),
+ TP_PROTO(int sig, struct siginfo *info, struct task_struct *task,
+ int group, int result),
- TP_ARGS(sig, info, task),
+ TP_ARGS(sig, info, task, group, result),
TP_STRUCT__entry(
__field( int, sig )
@@ -47,6 +60,8 @@ TRACE_EVENT(signal_generate,
__field( int, code )
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
+ __field( int, group )
+ __field( int, result )
),
TP_fast_assign(
@@ -54,11 +69,14 @@ TRACE_EVENT(signal_generate,
TP_STORE_SIGINFO(__entry, info);
memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
__entry->pid = task->pid;
+ __entry->group = group;
+ __entry->result = result;
),
- TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d",
+ TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d grp=%d res=%d",
__entry->sig, __entry->errno, __entry->code,
- __entry->comm, __entry->pid)
+ __entry->comm, __entry->pid, __entry->group,
+ __entry->result)
);
/**
@@ -101,65 +119,6 @@ TRACE_EVENT(signal_deliver,
__entry->sa_handler, __entry->sa_flags)
);
-DECLARE_EVENT_CLASS(signal_queue_overflow,
-
- TP_PROTO(int sig, int group, struct siginfo *info),
-
- TP_ARGS(sig, group, info),
-
- TP_STRUCT__entry(
- __field( int, sig )
- __field( int, group )
- __field( int, errno )
- __field( int, code )
- ),
-
- TP_fast_assign(
- __entry->sig = sig;
- __entry->group = group;
- TP_STORE_SIGINFO(__entry, info);
- ),
-
- TP_printk("sig=%d group=%d errno=%d code=%d",
- __entry->sig, __entry->group, __entry->errno, __entry->code)
-);
-
-/**
- * signal_overflow_fail - called when signal queue is overflow
- * @sig: signal number
- * @group: signal to process group or not (bool)
- * @info: pointer to struct siginfo
- *
- * Kernel fails to generate 'sig' signal with 'info' siginfo, because
- * siginfo queue is overflow, and the signal is dropped.
- * 'group' is not 0 if the signal will be sent to a process group.
- * 'sig' is always one of RT signals.
- */
-DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail,
-
- TP_PROTO(int sig, int group, struct siginfo *info),
-
- TP_ARGS(sig, group, info)
-);
-
-/**
- * signal_lose_info - called when siginfo is lost
- * @sig: signal number
- * @group: signal to process group or not (bool)
- * @info: pointer to struct siginfo
- *
- * Kernel generates 'sig' signal but loses 'info' siginfo, because siginfo
- * queue is overflow.
- * 'group' is not 0 if the signal will be sent to a process group.
- * 'sig' is always one of non-RT signals.
- */
-DEFINE_EVENT(signal_queue_overflow, signal_lose_info,
-
- TP_PROTO(int sig, int group, struct siginfo *info),
-
- TP_ARGS(sig, group, info)
-);
-
#endif /* _TRACE_SIGNAL_H */
/* This part must be outside protection */
OpenPOWER on IntegriCloud