diff options
author | Mike Frysinger <vapier@gentoo.org> | 2009-06-09 07:25:09 -0400 |
---|---|---|
committer | Mike Frysinger <vapier@gentoo.org> | 2009-06-13 07:20:15 -0400 |
commit | 1c873be744410e26fb91ee9228c90adff6eabe15 (patch) | |
tree | ee326bbd1399e4562e4584b3c1b800c2cfdea285 /arch/blackfin/kernel/ftrace-entry.S | |
parent | 6fa68e7a7f0641e8b7f263141d600877cdc2b5f2 (diff) | |
download | blackbird-op-linux-1c873be744410e26fb91ee9228c90adff6eabe15.tar.gz blackbird-op-linux-1c873be744410e26fb91ee9228c90adff6eabe15.zip |
Blackfin: initial support for ftrace
Just the basic ftrace support here -- mcount and the ftrace stub.
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Diffstat (limited to 'arch/blackfin/kernel/ftrace-entry.S')
-rw-r--r-- | arch/blackfin/kernel/ftrace-entry.S | 72 |
1 files changed, 72 insertions, 0 deletions
diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S new file mode 100644 index 000000000000..ce71487b515f --- /dev/null +++ b/arch/blackfin/kernel/ftrace-entry.S @@ -0,0 +1,72 @@ +/* + * mcount and friends -- ftrace stuff + * + * Copyright (C) 2009 Analog Devices Inc. + * Licensed under the GPL-2 or later. + */ + +#include <linux/linkage.h> +#include <asm/ftrace.h> + +.text + +/* GCC will have called us before setting up the function prologue, so we + * can clobber the normal scratch registers, but we need to make sure to + * save/restore the registers used for argument passing (R0-R2) in case + * the profiled function is using them. With data registers, R3 is the + * only one we can blow away. With pointer registers, we have P0-P2. + * + * Upon entry, the RETS will point to the top of the current profiled + * function. And since GCC setup the frame for us, the previous function + * will be waiting there. mmmm pie. + */ +ENTRY(__mcount) + /* save third function arg early so we can do testing below */ + [--sp] = r2; + + /* load the function pointer to the tracer */ + p0.l = _ftrace_trace_function; + p0.h = _ftrace_trace_function; + r3 = [p0]; + + /* optional micro optimization: don't call the stub tracer */ + r2.l = _ftrace_stub; + r2.h = _ftrace_stub; + cc = r2 == r3; + if ! cc jump .Ldo_trace; + + r2 = [sp++]; + rts; + +.Ldo_trace: + + /* save first/second function arg and the return register */ + [--sp] = r0; + [--sp] = r1; + [--sp] = rets; + + /* setup the tracer function */ + p0 = r3; + + /* tracer(ulong frompc, ulong selfpc): + * frompc: the pc that did the call to ... + * selfpc: ... this location + * the selfpc itself will need adjusting for the mcount call + */ + r1 = rets; + r0 = [fp + 4]; + r1 += -MCOUNT_INSN_SIZE; + + /* call the tracer */ + call (p0); + + /* restore state and get out of dodge */ + rets = [sp++]; + r1 = [sp++]; + r0 = [sp++]; + r2 = [sp++]; + +.globl _ftrace_stub +_ftrace_stub: + rts; +ENDPROC(__mcount) |