summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace_sysprof.c80
1 files changed, 76 insertions, 4 deletions
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index ba55b871b3d9..b1137c11ef8b 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
- *
+ * Copyright (C) 2004, 2005, Soeren Sandmann
*/
#include <linux/kallsyms.h>
#include <linux/debugfs.h>
@@ -11,13 +11,17 @@
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/module.h>
+#include <linux/irq.h>
#include <linux/fs.h>
#include "trace.h"
-static struct trace_array *ctx_trace;
+static struct trace_array *sysprof_trace;
static int __read_mostly tracer_enabled;
+/*
+ * 10 msecs for now:
+ */
static const unsigned long sample_period = 1000000;
/*
@@ -25,10 +29,78 @@ static const unsigned long sample_period = 1000000;
*/
static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer);
+struct stack_frame {
+ const void __user *next_fp;
+ unsigned long return_address;
+};
+
+static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
+{
+ if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
+ return 0;
+
+ if (__copy_from_user_inatomic(frame, frame_pointer, sizeof(*frame)))
+ return 0;
+
+ return 1;
+}
+
+#define SYSPROF_MAX_ADDRESSES 512
+
+static void timer_notify(struct pt_regs *regs, int cpu)
+{
+ const void __user *frame_pointer;
+ struct trace_array_cpu *data;
+ struct stack_frame frame;
+ struct trace_array *tr;
+ int is_user;
+ int i;
+
+ if (!regs)
+ return;
+
+ tr = sysprof_trace;
+ data = tr->data[cpu];
+ is_user = user_mode(regs);
+
+ if (!current || current->pid == 0)
+ return;
+
+ if (is_user && current->state != TASK_RUNNING)
+ return;
+
+ if (!is_user) {
+ /* kernel */
+ ftrace(tr, data, current->pid, 1, 0);
+ return;
+
+ }
+
+ trace_special(tr, data, 0, current->pid, regs->ip);
+
+ frame_pointer = (void __user *)regs->bp;
+
+ for (i = 0; i < SYSPROF_MAX_ADDRESSES; i++) {
+ if (!copy_stack_frame(frame_pointer, &frame))
+ break;
+ if ((unsigned long)frame_pointer < regs->sp)
+ break;
+
+ trace_special(tr, data, 1, frame.return_address,
+ (unsigned long)frame_pointer);
+ frame_pointer = frame.next_fp;
+ }
+
+ trace_special(tr, data, 2, current->pid, i);
+
+ if (i == SYSPROF_MAX_ADDRESSES)
+ trace_special(tr, data, -1, -1, -1);
+}
+
static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
{
/* trace here */
- panic_timeout++;
+ timer_notify(get_irq_regs(), smp_processor_id());
hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
@@ -100,7 +172,7 @@ static notrace void stop_stack_trace(struct trace_array *tr)
static notrace void stack_trace_init(struct trace_array *tr)
{
- ctx_trace = tr;
+ sysprof_trace = tr;
if (tr->ctrl)
start_stack_trace(tr);
OpenPOWER on IntegriCloud