summaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-06-30 23:03:51 +0200
committerFrederic Weisbecker <fweisbec@gmail.com>2010-08-19 01:30:59 +0200
commit56962b4449af34070bb1994621ef4f0265eed4d8 (patch)
treeb4c5dfee35d272c71cba80e75a51cb3e7070e430 /kernel/perf_event.c
parent70791ce9ba68a5921c9905ef05d23f62a90bc10c (diff)
downloadblackbird-op-linux-56962b4449af34070bb1994621ef4f0265eed4d8.tar.gz
blackbird-op-linux-56962b4449af34070bb1994621ef4f0265eed4d8.zip
perf: Generalize some arch callchain code
- Most archs use one callchain buffer per cpu, except x86 that needs to deal with NMIs. Provide a default perf_callchain_buffer() implementation that x86 overrides. - Centralize all the kernel/user regs handling and invoke new arch handlers from there: perf_callchain_user() / perf_callchain_kernel() That avoid all the user_mode(), current->mm checks and so... - Invert some parameters in perf_callchain_*() helpers: entry to the left, regs to the right, following the traditional (dst, src). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Paul Mackerras <paulus@samba.org> Tested-by: Will Deacon <will.deacon@arm.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Stephane Eranian <eranian@google.com> Cc: David Miller <davem@davemloft.net> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Borislav Petkov <bp@amd64.org>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c40
1 files changed, 38 insertions, 2 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index c772a3d4000d..02efde6c8798 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -2937,13 +2937,49 @@ void perf_event_do_pending(void)
__perf_pending_run();
}
+DEFINE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
+
/*
* Callchain support -- arch specific
*/
-__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+__weak struct perf_callchain_entry *perf_callchain_buffer(void)
{
- return NULL;
+ return &__get_cpu_var(perf_callchain_entry);
+}
+
+__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+{
+}
+
+__weak void perf_callchain_user(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+{
+}
+
+static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+{
+ struct perf_callchain_entry *entry;
+
+ entry = perf_callchain_buffer();
+ if (!entry)
+ return NULL;
+
+ entry->nr = 0;
+
+ if (!user_mode(regs)) {
+ perf_callchain_kernel(entry, regs);
+ if (current->mm)
+ regs = task_pt_regs(current);
+ else
+ regs = NULL;
+ }
+
+ if (regs)
+ perf_callchain_user(entry, regs);
+
+ return entry;
}
OpenPOWER on IntegriCloud