diff options
author | Petri Gynther <pgynther@google.com> | 2014-07-23 22:55:02 -0700 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2014-07-30 20:37:42 +0200 |
commit | dce0e7d54a710400c0056d86d0f0ed972133b25d (patch) | |
tree | 09c4e4a9ff99b1d11150e62c21f519264513254c /arch/mips/kernel/ftrace.c | |
parent | b1442d39fac2fcfbe6a4814979020e993ca59c9e (diff) | |
download | blackbird-op-linux-dce0e7d54a710400c0056d86d0f0ed972133b25d.tar.gz blackbird-op-linux-dce0e7d54a710400c0056d86d0f0ed972133b25d.zip |
MIPS: ftrace: Fix dynamic tracing of kernel modules
Dynamic tracing of kernel modules is broken on 32-bit MIPS. When modules
are loaded, the kernel crashes when dynamic tracing is enabled with:
cd /sys/kernel/debug/tracing
echo > set_ftrace_filter
echo function > current_tracer
1) arch/mips/kernel/ftrace.c
When the kernel boots, or when a module is initialized, ftrace_make_nop()
modifies every _mcount call site to eliminate the ftrace overhead.
However, when ftrace is later enabled for a call site, ftrace_make_call()
does not currently restore the _mcount call correctly for module call sites.
Added ftrace_modify_code_2r() and modified ftrace_make_call() to fix this.
2) arch/mips/kernel/mcount.S
_mcount assembly routine is supposed to have the caller's _mcount call site
address in register a0. However, a0 is currently not calculated correctly for
module call sites. a0 should be (ra - 20) or (ra - 24), depending on whether
the kernel was built with KBUILD_MCOUNT_RA_ADDRESS or not.
This fix has been tested on Broadcom BMIPS5000 processor. Dynamic tracing
now works for both built-in functions and module functions.
Signed-off-by: Petri Gynther <pgynther@google.com>
Cc: linux-mips@linux-mips.org
Cc: rostedt@goodmis.org
Cc: alcooperx@gmail.com
Cc: cminyard@mvista.com
Patchwork: https://patchwork.linux-mips.org/patch/7476/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel/ftrace.c')
-rw-r--r-- | arch/mips/kernel/ftrace.c | 56 |
1 files changed, 47 insertions, 9 deletions
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 60e7e5e45af1..2a72208e319c 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -63,7 +63,7 @@ static inline int in_kernel_space(unsigned long ip) ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) static unsigned int insn_jal_ftrace_caller __read_mostly; -static unsigned int insn_lui_v1_hi16_mcount __read_mostly; +static unsigned int insn_la_mcount[2] __read_mostly; static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly; static inline void ftrace_dyn_arch_init_insns(void) @@ -71,10 +71,10 @@ static inline void ftrace_dyn_arch_init_insns(void) u32 *buf; unsigned int v1; - /* lui v1, hi16_mcount */ + /* la v1, _mcount */ v1 = 3; - buf = (u32 *)&insn_lui_v1_hi16_mcount; - UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR); + buf = (u32 *)&insn_la_mcount[0]; + UASM_i_LA(&buf, v1, MCOUNT_ADDR); /* jal (ftrace_caller + 8), jump over the first two instruction */ buf = (u32 *)&insn_jal_ftrace_caller; @@ -111,14 +111,47 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, unsigned int new_code2) { int faulted; + mm_segment_t old_fs; safe_store_code(new_code1, ip, faulted); if (unlikely(faulted)) return -EFAULT; - safe_store_code(new_code2, ip + 4, faulted); + + ip += 4; + safe_store_code(new_code2, ip, faulted); if (unlikely(faulted)) return -EFAULT; + + ip -= 4; + old_fs = get_fs(); + set_fs(get_ds()); flush_icache_range(ip, ip + 8); + set_fs(old_fs); + + return 0; +} + +static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1, + unsigned int new_code2) +{ + int faulted; + mm_segment_t old_fs; + + ip += 4; + safe_store_code(new_code2, ip, faulted); + if (unlikely(faulted)) + return -EFAULT; + + ip -= 4; + safe_store_code(new_code1, ip, faulted); + if (unlikely(faulted)) + return -EFAULT; + + old_fs = get_fs(); + set_fs(get_ds()); + flush_icache_range(ip, ip + 8); + set_fs(old_fs); + return 0; } #endif @@ -130,13 +163,14 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, * * move at, ra * jal _mcount --> nop + * sub sp, sp, 8 --> nop (CONFIG_32BIT) * * 2. For modules: * * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT * * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) - * addiu v1, v1, low_16bit_of_mcount + * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT) * move at, ra * move $12, ra_address * jalr v1 @@ -145,7 +179,7 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, * 2.2 For the Other situations * * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) - * addiu v1, v1, low_16bit_of_mcount + * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT) * move at, ra * jalr v1 * nop | move $12, ra_address | sub sp, sp, 8 @@ -184,10 +218,14 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) unsigned int new; unsigned long ip = rec->ip; - new = in_kernel_space(ip) ? insn_jal_ftrace_caller : - insn_lui_v1_hi16_mcount; + new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; +#ifdef CONFIG_64BIT return ftrace_modify_code(ip, new); +#else + return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ? + INSN_NOP : insn_la_mcount[1]); +#endif } #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call)) |