diff options
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/Makefile | 3 | ||||
-rw-r--r-- | arch/mips/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/mips/kernel/cevt-r4k.c | 5 | ||||
-rw-r--r-- | arch/mips/kernel/cpu-bugs64.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/cpu-probe.c | 23 | ||||
-rw-r--r-- | arch/mips/kernel/ftrace.c | 184 | ||||
-rw-r--r-- | arch/mips/kernel/kgdb.c | 211 | ||||
-rw-r--r-- | arch/mips/kernel/kprobes.c | 557 | ||||
-rw-r--r-- | arch/mips/kernel/mcount.S | 59 | ||||
-rw-r--r-- | arch/mips/kernel/mips-mt-fpaff.c | 87 | ||||
-rw-r--r-- | arch/mips/kernel/scall64-n32.S | 9 | ||||
-rw-r--r-- | arch/mips/kernel/smp.c | 4 | ||||
-rw-r--r-- | arch/mips/kernel/smtc.c | 3 | ||||
-rw-r--r-- | arch/mips/kernel/syscall.c | 51 | ||||
-rw-r--r-- | arch/mips/kernel/traps.c | 52 | ||||
-rw-r--r-- | arch/mips/kernel/vdso.c | 4 |
16 files changed, 1035 insertions, 220 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 7a6ac501cbb5..06f848299785 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -76,6 +76,7 @@ obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o obj-$(CONFIG_IRQ_GIC) += irq-gic.o +obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_32BIT) += scall32-o32.o obj-$(CONFIG_64BIT) += scall64-64.o obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o @@ -101,6 +102,4 @@ obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/ -EXTRA_CFLAGS += -Werror - CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS) diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index ca6c83218caa..6b30fb2caa67 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -126,7 +126,6 @@ void output_thread_defines(void) thread.cp0_baduaddr); OFFSET(THREAD_ECODE, task_struct, \ thread.error_code); - OFFSET(THREAD_TRAPNO, task_struct, thread.trap_no); OFFSET(THREAD_TRAMP, task_struct, \ thread.irix_trampoline); OFFSET(THREAD_OLDCTX, task_struct, \ diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 0b2450ceb13f..2a4d50ff5e2c 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -163,7 +163,6 @@ int c0_compare_int_usable(void) int __cpuinit r4k_clockevent_init(void) { - uint64_t mips_freq = mips_hpt_frequency; unsigned int cpu = smp_processor_id(); struct clock_event_device *cd; unsigned int irq; @@ -188,9 +187,9 @@ int __cpuinit r4k_clockevent_init(void) cd->name = "MIPS"; cd->features = CLOCK_EVT_FEAT_ONESHOT; + clockevent_set_clock(cd, mips_hpt_frequency); + /* Calculate the min / max delta */ - cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); - cd->shift = 32; cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); cd->min_delta_ns = clockevent_delta2ns(0x300, cd); diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c index 408d0a07b3a3..b8bb8ba60869 100644 --- a/arch/mips/kernel/cpu-bugs64.c +++ b/arch/mips/kernel/cpu-bugs64.c @@ -239,7 +239,7 @@ static inline void check_daddi(void) panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); } -int daddiu_bug __cpuinitdata = -1; +int daddiu_bug = -1; static inline void check_daddiu(void) { diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 3562b854f2cd..b1b304ea2128 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -187,6 +187,7 @@ void __init check_wait(void) case CPU_BCM6358: case CPU_CAVIUM_OCTEON: case CPU_CAVIUM_OCTEON_PLUS: + case CPU_JZRISC: cpu_wait = r4k_wait; break; @@ -760,6 +761,9 @@ static void __cpuinit decode_configs(struct cpuinfo_mips *c) ok = decode_config4(c); mips_probe_watch_registers(c); + + if (cpu_has_mips_r2) + c->core = read_c0_ebase() & 0x3ff; } static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) @@ -956,6 +960,22 @@ platform: } } +static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) +{ + decode_configs(c); + /* JZRISC does not implement the CP0 counter. */ + c->options &= ~MIPS_CPU_COUNTER; + switch (c->processor_id & 0xff00) { + case PRID_IMP_JZRISC: + c->cputype = CPU_JZRISC; + __cpu_name[cpu] = "Ingenic JZRISC"; + break; + default: + panic("Unknown Ingenic Processor ID!"); + break; + } +} + const char *__cpu_name[NR_CPUS]; const char *__elf_platform; @@ -994,6 +1014,9 @@ __cpuinit void cpu_probe(void) case PRID_COMP_CAVIUM: cpu_probe_cavium(c, cpu); break; + case PRID_COMP_INGENIC: + cpu_probe_ingenic(c, cpu); + break; } BUG_ON(!__cpu_name[cpu]); diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index e9e64e0ff7aa..5a84a1f11231 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -2,7 +2,7 @@ * Code for replacing ftrace calls with jumps. * * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> - * Copyright (C) 2009 DSLab, Lanzhou University, China + * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China * Author: Wu Zhangjin <wuzhangjin@gmail.com> * * Thanks goes to Steven Rostedt for writing the original x86 version. @@ -12,18 +12,62 @@ #include <linux/init.h> #include <linux/ftrace.h> -#include <asm/cacheflush.h> #include <asm/asm.h> #include <asm/asm-offsets.h> +#include <asm/cacheflush.h> +#include <asm/uasm.h> + +/* + * If the Instruction Pointer is in module space (0xc0000000), return true; + * otherwise, it is in kernel space (0x80000000), return false. + * + * FIXME: This will not work when the kernel space and module space are the + * same. If they are the same, we need to modify scripts/recordmcount.pl, + * ftrace_make_nop/call() and the other related parts to ensure the + * enabling/disabling of the calling site to _mcount is right for both kernel + * and module. + */ + +static inline int in_module(unsigned long ip) +{ + return ip & 0x40000000; +} #ifdef CONFIG_DYNAMIC_FTRACE #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ -#define jump_insn_encode(op_code, addr) \ - ((unsigned int)((op_code) | (((addr) >> 2) & ADDR_MASK))) -static unsigned int ftrace_nop = 0x00000000; +#define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */ +#define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */ +#define INSN_NOP 0x00000000 /* nop */ +#define INSN_JAL(addr) \ + ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) + +static unsigned int insn_jal_ftrace_caller __read_mostly; +static unsigned int insn_lui_v1_hi16_mcount __read_mostly; +static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly; + +static inline void ftrace_dyn_arch_init_insns(void) +{ + u32 *buf; + unsigned int v1; + + /* lui v1, hi16_mcount */ + v1 = 3; + buf = (u32 *)&insn_lui_v1_hi16_mcount; + UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR); + + /* jal (ftrace_caller + 8), jump over the first two instruction */ + buf = (u32 *)&insn_jal_ftrace_caller; + uasm_i_jal(&buf, (FTRACE_ADDR + 8)); + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* j ftrace_graph_caller */ + buf = (u32 *)&insn_j_ftrace_graph_caller; + uasm_i_j(&buf, (unsigned long)ftrace_graph_caller); +#endif +} static int ftrace_modify_code(unsigned long ip, unsigned int new_code) { @@ -40,67 +84,56 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) return 0; } -static int lui_v1; -static int jal_mcount; - int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { unsigned int new; - int faulted; unsigned long ip = rec->ip; - /* We have compiled module with -mlong-calls, but compiled the kernel - * without it, we need to cope with them respectively. */ - if (ip & 0x40000000) { - /* record it for ftrace_make_call */ - if (lui_v1 == 0) { - /* lui_v1 = *(unsigned int *)ip; */ - safe_load_code(lui_v1, ip, faulted); - - if (unlikely(faulted)) - return -EFAULT; - } - - /* lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) + /* + * We have compiled module with -mlong-calls, but compiled the kernel + * without it, we need to cope with them respectively. + */ + if (in_module(ip)) { +#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) + /* + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) + * addiu v1, v1, low_16bit_of_mcount + * move at, ra + * move $12, ra_address + * jalr v1 + * sub sp, sp, 8 + * 1: offset = 5 instructions + */ + new = INSN_B_1F_5; +#else + /* + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) * addiu v1, v1, low_16bit_of_mcount * move at, ra * jalr v1 - * nop - * 1f: (ip + 12) + * nop | move $12, ra_address | sub sp, sp, 8 + * 1: offset = 4 instructions */ - new = 0x10000004; + new = INSN_B_1F_4; +#endif } else { - /* record/calculate it for ftrace_make_call */ - if (jal_mcount == 0) { - /* We can record it directly like this: - * jal_mcount = *(unsigned int *)ip; - * Herein, jump over the first two nop instructions */ - jal_mcount = jump_insn_encode(JAL, (MCOUNT_ADDR + 8)); - } - - /* move at, ra - * jalr v1 --> nop + /* + * move at, ra + * jal _mcount --> nop */ - new = ftrace_nop; + new = INSN_NOP; } return ftrace_modify_code(ip, new); } -static int modified; /* initialized as 0 by default */ - int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned int new; unsigned long ip = rec->ip; - /* We just need to remove the "b ftrace_stub" at the fist time! */ - if (modified == 0) { - modified = 1; - ftrace_modify_code(addr, ftrace_nop); - } /* ip, module: 0xc0000000, kernel: 0x80000000 */ - new = (ip & 0x40000000) ? lui_v1 : jal_mcount; + new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller; return ftrace_modify_code(ip, new); } @@ -111,44 +144,48 @@ int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned int new; - new = jump_insn_encode(JAL, (unsigned long)func); + new = INSN_JAL((unsigned long)func); return ftrace_modify_code(FTRACE_CALL_IP, new); } int __init ftrace_dyn_arch_init(void *data) { + /* Encode the instructions when booting */ + ftrace_dyn_arch_init_insns(); + + /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */ + ftrace_modify_code(MCOUNT_ADDR, INSN_NOP); + /* The return code is retured via data */ *(unsigned long *)data = 0; return 0; } -#endif /* CONFIG_DYNAMIC_FTRACE */ +#endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_DYNAMIC_FTRACE extern void ftrace_graph_call(void); -#define JMP 0x08000000 /* jump to target directly */ -#define CALL_FTRACE_GRAPH_CALLER \ - jump_insn_encode(JMP, (unsigned long)(&ftrace_graph_caller)) #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call)) int ftrace_enable_ftrace_graph_caller(void) { return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, - CALL_FTRACE_GRAPH_CALLER); + insn_j_ftrace_graph_caller); } int ftrace_disable_ftrace_graph_caller(void) { - return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, ftrace_nop); + return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP); } -#endif /* !CONFIG_DYNAMIC_FTRACE */ +#endif /* CONFIG_DYNAMIC_FTRACE */ #ifndef KBUILD_MCOUNT_RA_ADDRESS + #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */ #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ @@ -162,17 +199,17 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, unsigned int code; int faulted; - /* in module or kernel? */ - if (self_addr & 0x40000000) { - /* module: move to the instruction "lui v1, HI_16BIT_OF_MCOUNT" */ - ip = self_addr - 20; - } else { - /* kernel: move to the instruction "move ra, at" */ - ip = self_addr - 12; - } + /* + * For module, move the ip from calling site of mcount to the + * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for + * kernel, move to the instruction "move ra, at"(offset is 12) + */ + ip = self_addr - (in_module(self_addr) ? 20 : 12); - /* search the text until finding the non-store instruction or "s{d,w} - * ra, offset(sp)" instruction */ + /* + * search the text until finding the non-store instruction or "s{d,w} + * ra, offset(sp)" instruction + */ do { ip -= 4; @@ -181,10 +218,11 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, if (unlikely(faulted)) return 0; - - /* If we hit the non-store instruction before finding where the + /* + * If we hit the non-store instruction before finding where the * ra is stored, then this is a leaf function and it does not - * store the ra on the stack. */ + * store the ra on the stack + */ if ((code & S_R_SP) != S_R_SP) return parent_addr; @@ -202,7 +240,7 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, return 0; } -#endif +#endif /* !KBUILD_MCOUNT_RA_ADDRESS */ /* * Hook the return address and push it in the stack of return addrs @@ -220,7 +258,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; - /* "parent" is the stack address saved the return address of the caller + /* + * "parent" is the stack address saved the return address of the caller * of _mcount. * * if the gcc < 4.5, a leaf function does not save the return address @@ -242,10 +281,11 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, goto out; #ifndef KBUILD_MCOUNT_RA_ADDRESS parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, - (unsigned long)parent, - fp); - /* If fails when getting the stack address of the non-leaf function's - * ra, stop function graph tracer and return */ + (unsigned long)parent, fp); + /* + * If fails when getting the stack address of the non-leaf function's + * ra, stop function graph tracer and return + */ if (parent == 0) goto out; #endif @@ -272,4 +312,4 @@ out: ftrace_graph_stop(); WARN_ON(1); } -#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index 9b78ff6e9b84..1f4e2fa64140 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c @@ -50,6 +50,151 @@ static struct hard_trap_info { { 0, 0} /* Must be last */ }; +struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = +{ + { "zero", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) }, + { "at", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) }, + { "v0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) }, + { "v1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) }, + { "a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) }, + { "a1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) }, + { "a2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) }, + { "a3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) }, + { "t0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) }, + { "t1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) }, + { "t2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) }, + { "t3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) }, + { "t4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) }, + { "t5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) }, + { "t6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) }, + { "t7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) }, + { "s0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) }, + { "s1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) }, + { "s2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) }, + { "s3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) }, + { "s4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) }, + { "s5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) }, + { "s6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) }, + { "s7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) }, + { "t8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) }, + { "t9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) }, + { "k0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) }, + { "k1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) }, + { "gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) }, + { "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) }, + { "s8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) }, + { "ra", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) }, + { "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_status) }, + { "lo", GDB_SIZEOF_REG, offsetof(struct pt_regs, lo) }, + { "hi", GDB_SIZEOF_REG, offsetof(struct pt_regs, hi) }, + { "bad", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_badvaddr) }, + { "cause", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_cause) }, + { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_epc) }, + { "f0", GDB_SIZEOF_REG, 0 }, + { "f1", GDB_SIZEOF_REG, 1 }, + { "f2", GDB_SIZEOF_REG, 2 }, + { "f3", GDB_SIZEOF_REG, 3 }, + { "f4", GDB_SIZEOF_REG, 4 }, + { "f5", GDB_SIZEOF_REG, 5 }, + { "f6", GDB_SIZEOF_REG, 6 }, + { "f7", GDB_SIZEOF_REG, 7 }, + { "f8", GDB_SIZEOF_REG, 8 }, + { "f9", GDB_SIZEOF_REG, 9 }, + { "f10", GDB_SIZEOF_REG, 10 }, + { "f11", GDB_SIZEOF_REG, 11 }, + { "f12", GDB_SIZEOF_REG, 12 }, + { "f13", GDB_SIZEOF_REG, 13 }, + { "f14", GDB_SIZEOF_REG, 14 }, + { "f15", GDB_SIZEOF_REG, 15 }, + { "f16", GDB_SIZEOF_REG, 16 }, + { "f17", GDB_SIZEOF_REG, 17 }, + { "f18", GDB_SIZEOF_REG, 18 }, + { "f19", GDB_SIZEOF_REG, 19 }, + { "f20", GDB_SIZEOF_REG, 20 }, + { "f21", GDB_SIZEOF_REG, 21 }, + { "f22", GDB_SIZEOF_REG, 22 }, + { "f23", GDB_SIZEOF_REG, 23 }, + { "f24", GDB_SIZEOF_REG, 24 }, + { "f25", GDB_SIZEOF_REG, 25 }, + { "f26", GDB_SIZEOF_REG, 26 }, + { "f27", GDB_SIZEOF_REG, 27 }, + { "f28", GDB_SIZEOF_REG, 28 }, + { "f29", GDB_SIZEOF_REG, 29 }, + { "f30", GDB_SIZEOF_REG, 30 }, + { "f31", GDB_SIZEOF_REG, 31 }, + { "fsr", GDB_SIZEOF_REG, 0 }, + { "fir", GDB_SIZEOF_REG, 0 }, +}; + +int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) +{ + int fp_reg; + + if (regno < 0 || regno >= DBG_MAX_REG_NUM) + return -EINVAL; + + if (dbg_reg_def[regno].offset != -1 && regno < 38) { + memcpy((void *)regs + dbg_reg_def[regno].offset, mem, + dbg_reg_def[regno].size); + } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) { + /* FP registers 38 -> 69 */ + if (!(regs->cp0_status & ST0_CU1)) + return 0; + if (regno == 70) { + /* Process the fcr31/fsr (register 70) */ + memcpy((void *)¤t->thread.fpu.fcr31, mem, + dbg_reg_def[regno].size); + goto out_save; + } else if (regno == 71) { + /* Ignore the fir (register 71) */ + goto out_save; + } + fp_reg = dbg_reg_def[regno].offset; + memcpy((void *)¤t->thread.fpu.fpr[fp_reg], mem, + dbg_reg_def[regno].size); +out_save: + restore_fp(current); + } + + return 0; +} + +char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) +{ + int fp_reg; + + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return NULL; + + if (dbg_reg_def[regno].offset != -1 && regno < 38) { + /* First 38 registers */ + memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, + dbg_reg_def[regno].size); + } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) { + /* FP registers 38 -> 69 */ + if (!(regs->cp0_status & ST0_CU1)) + goto out; + save_fp(current); + if (regno == 70) { + /* Process the fcr31/fsr (register 70) */ + memcpy(mem, (void *)¤t->thread.fpu.fcr31, + dbg_reg_def[regno].size); + goto out; + } else if (regno == 71) { + /* Ignore the fir (register 71) */ + memset(mem, 0, dbg_reg_def[regno].size); + goto out; + } + fp_reg = dbg_reg_def[regno].offset; + memcpy(mem, (void *)¤t->thread.fpu.fpr[fp_reg], + dbg_reg_def[regno].size); + } + +out: + return dbg_reg_def[regno].name; + +} + void arch_kgdb_breakpoint(void) { __asm__ __volatile__( @@ -84,64 +229,6 @@ static int compute_signal(int tt) return SIGHUP; /* default for things we don't know about */ } -void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) -{ - int reg; - -#if (KGDB_GDB_REG_SIZE == 32) - u32 *ptr = (u32 *)gdb_regs; -#else - u64 *ptr = (u64 *)gdb_regs; -#endif - - for (reg = 0; reg < 32; reg++) - *(ptr++) = regs->regs[reg]; - - *(ptr++) = regs->cp0_status; - *(ptr++) = regs->lo; - *(ptr++) = regs->hi; - *(ptr++) = regs->cp0_badvaddr; - *(ptr++) = regs->cp0_cause; - *(ptr++) = regs->cp0_epc; - - /* FP REGS */ - if (!(current && (regs->cp0_status & ST0_CU1))) - return; - - save_fp(current); - for (reg = 0; reg < 32; reg++) - *(ptr++) = current->thread.fpu.fpr[reg]; -} - -void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) -{ - int reg; - -#if (KGDB_GDB_REG_SIZE == 32) - const u32 *ptr = (u32 *)gdb_regs; -#else - const u64 *ptr = (u64 *)gdb_regs; -#endif - - for (reg = 0; reg < 32; reg++) - regs->regs[reg] = *(ptr++); - - regs->cp0_status = *(ptr++); - regs->lo = *(ptr++); - regs->hi = *(ptr++); - regs->cp0_badvaddr = *(ptr++); - regs->cp0_cause = *(ptr++); - regs->cp0_epc = *(ptr++); - - /* FP REGS from current */ - if (!(current && (regs->cp0_status & ST0_CU1))) - return; - - for (reg = 0; reg < 32; reg++) - current->thread.fpu.fpr[reg] = *(ptr++); - restore_fp(current); -} - /* * Similar to regs_to_gdb_regs() except that process is sleeping and so * we may not be able to get all the info. @@ -242,7 +329,7 @@ static struct notifier_block kgdb_notifier = { }; /* - * Handle the 's' and 'c' commands + * Handle the 'c' command */ int kgdb_arch_handle_exception(int vector, int signo, int err_code, char *remcom_in_buffer, char *remcom_out_buffer, @@ -250,20 +337,14 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, { char *ptr; unsigned long address; - int cpu = smp_processor_id(); switch (remcom_in_buffer[0]) { - case 's': case 'c': /* handle the optional parameter */ ptr = &remcom_in_buffer[1]; if (kgdb_hex2long(&ptr, &address)) regs->cp0_epc = address; - atomic_set(&kgdb_cpu_doing_single_step, -1); - if (remcom_in_buffer[0] == 's') - atomic_set(&kgdb_cpu_doing_single_step, cpu); - return 0; } diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c new file mode 100644 index 000000000000..ee28683fc2ac --- /dev/null +++ b/arch/mips/kernel/kprobes.c @@ -0,0 +1,557 @@ +/* + * Kernel Probes (KProbes) + * arch/mips/kernel/kprobes.c + * + * Copyright 2006 Sony Corp. + * Copyright 2010 Cavium Networks + * + * Some portions copied from the powerpc version. + * + * Copyright (C) IBM Corporation, 2002, 2004 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kprobes.h> +#include <linux/preempt.h> +#include <linux/kdebug.h> +#include <linux/slab.h> + +#include <asm/ptrace.h> +#include <asm/break.h> +#include <asm/inst.h> + +static const union mips_instruction breakpoint_insn = { + .b_format = { + .opcode = spec_op, + .code = BRK_KPROBE_BP, + .func = break_op + } +}; + +static const union mips_instruction breakpoint2_insn = { + .b_format = { + .opcode = spec_op, + .code = BRK_KPROBE_SSTEPBP, + .func = break_op + } +}; + +DEFINE_PER_CPU(struct kprobe *, current_kprobe); +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +static int __kprobes insn_has_delayslot(union mips_instruction insn) +{ + switch (insn.i_format.opcode) { + + /* + * This group contains: + * jr and jalr are in r_format format. + */ + case spec_op: + switch (insn.r_format.func) { + case jr_op: + case jalr_op: + break; + default: + goto insn_ok; + } + + /* + * This group contains: + * bltz_op, bgez_op, bltzl_op, bgezl_op, + * bltzal_op, bgezal_op, bltzall_op, bgezall_op. + */ + case bcond_op: + + /* + * These are unconditional and in j_format. + */ + case jal_op: + case j_op: + + /* + * These are conditional and in i_format. + */ + case beq_op: + case beql_op: + case bne_op: + case bnel_op: + case blez_op: + case blezl_op: + case bgtz_op: + case bgtzl_op: + + /* + * These are the FPA/cp1 branch instructions. + */ + case cop1_op: + +#ifdef CONFIG_CPU_CAVIUM_OCTEON + case lwc2_op: /* This is bbit0 on Octeon */ + case ldc2_op: /* This is bbit032 on Octeon */ + case swc2_op: /* This is bbit1 on Octeon */ + case sdc2_op: /* This is bbit132 on Octeon */ +#endif + return 1; + default: + break; + } +insn_ok: + return 0; +} + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + union mips_instruction insn; + union mips_instruction prev_insn; + int ret = 0; + + prev_insn = p->addr[-1]; + insn = p->addr[0]; + + if (insn_has_delayslot(insn) || insn_has_delayslot(prev_insn)) { + pr_notice("Kprobes for branch and jump instructions are not supported\n"); + ret = -EINVAL; + goto out; + } + + /* insn: must be on special executable page on mips. */ + p->ainsn.insn = get_insn_slot(); + if (!p->ainsn.insn) { + ret = -ENOMEM; + goto out; + } + + /* + * In the kprobe->ainsn.insn[] array we store the original + * instruction at index zero and a break trap instruction at + * index one. + */ + + memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t)); + p->ainsn.insn[1] = breakpoint2_insn; + p->opcode = *p->addr; + +out: + return ret; +} + +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + *p->addr = breakpoint_insn; + flush_insn_slot(p); +} + +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + *p->addr = p->opcode; + flush_insn_slot(p); +} + +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ + free_insn_slot(p->ainsn.insn, 0); +} + +static void save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; + kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR; + kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR; + kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc; +} + +static void restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; + kcb->kprobe_status = kcb->prev_kprobe.status; + kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR; + kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR; + kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc; +} + +static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + __get_cpu_var(current_kprobe) = p; + kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE); + kcb->kprobe_saved_epc = regs->cp0_epc; +} + +static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) +{ + regs->cp0_status &= ~ST0_IE; + + /* single step inline if the instruction is a break */ + if (p->opcode.word == breakpoint_insn.word || + p->opcode.word == breakpoint2_insn.word) + regs->cp0_epc = (unsigned long)p->addr; + else + regs->cp0_epc = (unsigned long)&p->ainsn.insn[0]; +} + +static int __kprobes kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *p; + int ret = 0; + kprobe_opcode_t *addr; + struct kprobe_ctlblk *kcb; + + addr = (kprobe_opcode_t *) regs->cp0_epc; + + /* + * We don't want to be preempted for the entire + * duration of kprobe processing + */ + preempt_disable(); + kcb = get_kprobe_ctlblk(); + + /* Check we're not actually recursing */ + if (kprobe_running()) { + p = get_kprobe(addr); + if (p) { + if (kcb->kprobe_status == KPROBE_HIT_SS && + p->ainsn.insn->word == breakpoint_insn.word) { + regs->cp0_status &= ~ST0_IE; + regs->cp0_status |= kcb->kprobe_saved_SR; + goto no_kprobe; + } + /* + * We have reentered the kprobe_handler(), since + * another probe was hit while within the handler. + * We here save the original kprobes variables and + * just single step on the instruction of the new probe + * without calling any user handlers. + */ + save_previous_kprobe(kcb); + set_current_kprobe(p, regs, kcb); + kprobes_inc_nmissed_count(p); + prepare_singlestep(p, regs); + kcb->kprobe_status = KPROBE_REENTER; + return 1; + } else { + if (addr->word != breakpoint_insn.word) { + /* + * The breakpoint instruction was removed by + * another cpu right after we hit, no further + * handling of this interrupt is appropriate + */ + ret = 1; + goto no_kprobe; + } + p = __get_cpu_var(current_kprobe); + if (p->break_handler && p->break_handler(p, regs)) + goto ss_probe; + } + goto no_kprobe; + } + + p = get_kprobe(addr); + if (!p) { + if (addr->word != breakpoint_insn.word) { + /* + * The breakpoint instruction was removed right + * after we hit it. Another cpu has removed + * either a probepoint or a debugger breakpoint + * at this address. In either case, no further + * handling of this interrupt is appropriate. + */ + ret = 1; + } + /* Not one of ours: let kernel handle it */ + goto no_kprobe; + } + + set_current_kprobe(p, regs, kcb); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + if (p->pre_handler && p->pre_handler(p, regs)) { + /* handler has already set things up, so skip ss setup */ + return 1; + } + +ss_probe: + prepare_singlestep(p, regs); + kcb->kprobe_status = KPROBE_HIT_SS; + return 1; + +no_kprobe: + preempt_enable_no_resched(); + return ret; + +} + +/* + * Called after single-stepping. p->addr is the address of the + * instruction whose first byte has been replaced by the "break 0" + * instruction. To avoid the SMP problems that can occur when we + * temporarily put back the original opcode to single-step, we + * single-stepped a copy of the instruction. The address of this + * copy is p->ainsn.insn. + * + * This function prepares to return from the post-single-step + * breakpoint trap. + */ +static void __kprobes resume_execution(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + unsigned long orig_epc = kcb->kprobe_saved_epc; + regs->cp0_epc = orig_epc + 4; +} + +static inline int post_kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (!cur) + return 0; + + if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + cur->post_handler(cur, regs, 0); + } + + resume_execution(cur, regs, kcb); + + regs->cp0_status |= kcb->kprobe_saved_SR; + + /* Restore back the original saved kprobes variables and continue. */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + goto out; + } + reset_current_kprobe(); +out: + preempt_enable_no_resched(); + + return 1; +} + +static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) + return 1; + + if (kcb->kprobe_status & KPROBE_HIT_SS) { + resume_execution(cur, regs, kcb); + regs->cp0_status |= kcb->kprobe_old_SR; + + reset_current_kprobe(); + preempt_enable_no_resched(); + } + return 0; +} + +/* + * Wrapper routine for handling exceptions. + */ +int __kprobes kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + + struct die_args *args = (struct die_args *)data; + int ret = NOTIFY_DONE; + + switch (val) { + case DIE_BREAK: + if (kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + break; + case DIE_SSTEPBP: + if (post_kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + break; + + case DIE_PAGE_FAULT: + /* kprobe_running() needs smp_processor_id() */ + preempt_disable(); + + if (kprobe_running() + && kprobe_fault_handler(args->regs, args->trapnr)) + ret = NOTIFY_STOP; + preempt_enable(); + break; + default: + break; + } + return ret; +} + +int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct jprobe *jp = container_of(p, struct jprobe, kp); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + kcb->jprobe_saved_regs = *regs; + kcb->jprobe_saved_sp = regs->regs[29]; + + memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp, + MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp)); + + regs->cp0_epc = (unsigned long)(jp->entry); + + return 1; +} + +/* Defined in the inline asm below. */ +void jprobe_return_end(void); + +void __kprobes jprobe_return(void) +{ + /* Assembler quirk necessitates this '0,code' business. */ + asm volatile( + "break 0,%0\n\t" + ".globl jprobe_return_end\n" + "jprobe_return_end:\n" + : : "n" (BRK_KPROBE_BP) : "memory"); +} + +int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (regs->cp0_epc >= (unsigned long)jprobe_return && + regs->cp0_epc <= (unsigned long)jprobe_return_end) { + *regs = kcb->jprobe_saved_regs; + memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack, + MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp)); + preempt_enable_no_resched(); + + return 1; + } + return 0; +} + +/* + * Function return probe trampoline: + * - init_kprobes() establishes a probepoint here + * - When the probed function returns, this probe causes the + * handlers to fire + */ +static void __used kretprobe_trampoline_holder(void) +{ + asm volatile( + ".set push\n\t" + /* Keep the assembler from reordering and placing JR here. */ + ".set noreorder\n\t" + "nop\n\t" + ".global kretprobe_trampoline\n" + "kretprobe_trampoline:\n\t" + "nop\n\t" + ".set pop" + : : : "memory"); +} + +void kretprobe_trampoline(void); + +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + ri->ret_addr = (kprobe_opcode_t *) regs->regs[31]; + + /* Replace the return addr with trampoline addr */ + regs->regs[31] = (unsigned long)kretprobe_trampoline; +} + +/* + * Called when the probe at kretprobe trampoline is hit + */ +static int __kprobes trampoline_probe_handler(struct kprobe *p, + struct pt_regs *regs) +{ + struct kretprobe_instance *ri = NULL; + struct hlist_head *head, empty_rp; + struct hlist_node *node, *tmp; + unsigned long flags, orig_ret_address = 0; + unsigned long trampoline_address = (unsigned long)kretprobe_trampoline; + + INIT_HLIST_HEAD(&empty_rp); + kretprobe_hash_lock(current, &head, &flags); + + /* + * It is possible to have multiple instances associated with a given + * task either because an multiple functions in the call path + * have a return probe installed on them, and/or more than one return + * return probe was registered for a target function. + * + * We can handle this because: + * - instances are always inserted at the head of the list + * - when multiple return probes are registered for the same + * function, the first instance's ret_addr will point to the + * real return address, and all the rest will point to + * kretprobe_trampoline + */ + hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { + if (ri->task != current) + /* another task is sharing our hash bucket */ + continue; + + if (ri->rp && ri->rp->handler) + ri->rp->handler(ri, regs); + + orig_ret_address = (unsigned long)ri->ret_addr; + recycle_rp_inst(ri, &empty_rp); + + if (orig_ret_address != trampoline_address) + /* + * This is the real return address. Any other + * instances associated with this task are for + * other calls deeper on the call stack + */ + break; + } + + kretprobe_assert(ri, orig_ret_address, trampoline_address); + instruction_pointer(regs) = orig_ret_address; + + reset_current_kprobe(); + kretprobe_hash_unlock(current, &flags); + preempt_enable_no_resched(); + + hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { + hlist_del(&ri->hlist); + kfree(ri); + } + /* + * By returning a non-zero value, we are telling + * kprobe_handler() that we don't want the post_handler + * to run (and have re-enabled preemption) + */ + return 1; +} + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline) + return 1; + + return 0; +} + +static struct kprobe trampoline_p = { + .addr = (kprobe_opcode_t *)kretprobe_trampoline, + .pre_handler = trampoline_probe_handler +}; + +int __init arch_init_kprobes(void) +{ + return register_kprobe(&trampoline_p); +} diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 6851fc97a511..4c968e7efb74 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S @@ -6,6 +6,7 @@ * more details. * * Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University, China + * Copyright (C) 2010 DSLab, Lanzhou University, China * Author: Wu Zhangjin <wuzhangjin@gmail.com> */ @@ -45,8 +46,6 @@ PTR_L a5, PT_R9(sp) PTR_L a6, PT_R10(sp) PTR_L a7, PT_R11(sp) -#endif -#ifdef CONFIG_64BIT PTR_ADDIU sp, PT_SIZE #else PTR_ADDIU sp, (PT_SIZE + 8) @@ -58,6 +57,12 @@ move ra, AT .endm +/* + * The -mmcount-ra-address option of gcc 4.5 uses register $12 to pass + * the location of the parent's return address. + */ +#define MCOUNT_RA_ADDRESS_REG $12 + #ifdef CONFIG_DYNAMIC_FTRACE NESTED(ftrace_caller, PT_SIZE, ra) @@ -71,14 +76,14 @@ _mcount: MCOUNT_SAVE_REGS #ifdef KBUILD_MCOUNT_RA_ADDRESS - PTR_S t0, PT_R12(sp) /* t0 saved the location of the return address(at) by -mmcount-ra-address */ + PTR_S MCOUNT_RA_ADDRESS_REG, PT_R12(sp) #endif - move a0, ra /* arg1: next ip, selfaddr */ + move a0, ra /* arg1: self return address */ .globl ftrace_call ftrace_call: nop /* a placeholder for the call to a real tracing function */ - move a1, AT /* arg2: the caller's next ip, parent */ + move a1, AT /* arg2: parent's return address */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call @@ -119,9 +124,9 @@ NESTED(_mcount, PT_SIZE, ra) static_trace: MCOUNT_SAVE_REGS - move a0, ra /* arg1: next ip, selfaddr */ + move a0, ra /* arg1: self return address */ jalr t2 /* (1) call *ftrace_trace_function */ - move a1, AT /* arg2: the caller's next ip, parent */ + move a1, AT /* arg2: parent's return address */ MCOUNT_RESTORE_REGS .globl ftrace_stub @@ -134,35 +139,43 @@ ftrace_stub: #ifdef CONFIG_FUNCTION_GRAPH_TRACER NESTED(ftrace_graph_caller, PT_SIZE, ra) -#ifdef CONFIG_DYNAMIC_FTRACE - PTR_L a1, PT_R31(sp) /* load the original ra from the stack */ -#ifdef KBUILD_MCOUNT_RA_ADDRESS - PTR_L t0, PT_R12(sp) /* load the original t0 from the stack */ -#endif -#else +#ifndef CONFIG_DYNAMIC_FTRACE MCOUNT_SAVE_REGS - move a1, ra /* arg2: next ip, selfaddr */ #endif + /* arg1: Get the location of the parent's return address */ #ifdef KBUILD_MCOUNT_RA_ADDRESS - bnez t0, 1f /* non-leaf func: t0 saved the location of the return address */ +#ifdef CONFIG_DYNAMIC_FTRACE + PTR_L a0, PT_R12(sp) +#else + move a0, MCOUNT_RA_ADDRESS_REG +#endif + bnez a0, 1f /* non-leaf func: stored in MCOUNT_RA_ADDRESS_REG */ nop - PTR_LA t0, PT_R1(sp) /* leaf func: get the location of at(old ra) from our own stack */ -1: move a0, t0 /* arg1: the location of the return address */ +#endif + PTR_LA a0, PT_R1(sp) /* leaf func: the location in current stack */ +1: + + /* arg2: Get self return address */ +#ifdef CONFIG_DYNAMIC_FTRACE + PTR_L a1, PT_R31(sp) #else - PTR_LA a0, PT_R1(sp) /* arg1: &AT -> a0 */ + move a1, ra #endif - jal prepare_ftrace_return + + /* arg3: Get frame pointer of current stack */ #ifdef CONFIG_FRAME_POINTER - move a2, fp /* arg3: frame pointer */ -#else + move a2, fp +#else /* ! CONFIG_FRAME_POINTER */ #ifdef CONFIG_64BIT - PTR_LA a2, PT_SIZE(sp) + PTR_LA a2, PT_SIZE(sp) #else - PTR_LA a2, (PT_SIZE+8)(sp) + PTR_LA a2, (PT_SIZE+8)(sp) #endif #endif + jal prepare_ftrace_return + nop MCOUNT_RESTORE_REGS RETURN_BACK END(ftrace_graph_caller) diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index f5981c499109..2340f11dc29c 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -3,6 +3,7 @@ * Copyright (C) 2005 Mips Technologies, Inc */ #include <linux/cpu.h> +#include <linux/cpuset.h> #include <linux/cpumask.h> #include <linux/delay.h> #include <linux/kernel.h> @@ -39,6 +40,21 @@ static inline struct task_struct *find_process_by_pid(pid_t pid) return pid ? find_task_by_vpid(pid) : current; } +/* + * check the target process has a UID that matches the current process's + */ +static bool check_same_owner(struct task_struct *p) +{ + const struct cred *cred = current_cred(), *pcred; + bool match; + + rcu_read_lock(); + pcred = __task_cred(p); + match = (cred->euid == pcred->euid || + cred->euid == pcred->uid); + rcu_read_unlock(); + return match; +} /* * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process @@ -46,12 +62,10 @@ static inline struct task_struct *find_process_by_pid(pid_t pid) asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr) { - cpumask_t new_mask; - cpumask_t effective_mask; - int retval; - struct task_struct *p; + cpumask_var_t cpus_allowed, new_mask, effective_mask; struct thread_info *ti; - uid_t euid; + struct task_struct *p; + int retval; if (len < sizeof(new_mask)) return -EINVAL; @@ -60,53 +74,74 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, return -EFAULT; get_online_cpus(); - read_lock(&tasklist_lock); + rcu_read_lock(); p = find_process_by_pid(pid); if (!p) { - read_unlock(&tasklist_lock); + rcu_read_unlock(); put_online_cpus(); return -ESRCH; } - /* - * It is not safe to call set_cpus_allowed with the - * tasklist_lock held. We will bump the task_struct's - * usage count and drop tasklist_lock before invoking - * set_cpus_allowed. - */ + /* Prevent p going away */ get_task_struct(p); + rcu_read_unlock(); - euid = current_euid(); + if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { + retval = -ENOMEM; + goto out_put_task; + } + if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { + retval = -ENOMEM; + goto out_free_cpus_allowed; + } + if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) { + retval = -ENOMEM; + goto out_free_new_mask; + } retval = -EPERM; - if (euid != p->cred->euid && euid != p->cred->uid && - !capable(CAP_SYS_NICE)) { - read_unlock(&tasklist_lock); + if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) goto out_unlock; - } retval = security_task_setscheduler(p, 0, NULL); if (retval) goto out_unlock; /* Record new user-specified CPU set for future reference */ - p->thread.user_cpus_allowed = new_mask; - - /* Unlock the task list */ - read_unlock(&tasklist_lock); + cpumask_copy(&p->thread.user_cpus_allowed, new_mask); + again: /* Compute new global allowed CPU set if necessary */ ti = task_thread_info(p); if (test_ti_thread_flag(ti, TIF_FPUBOUND) && - cpus_intersects(new_mask, mt_fpu_cpumask)) { - cpus_and(effective_mask, new_mask, mt_fpu_cpumask); - retval = set_cpus_allowed_ptr(p, &effective_mask); + cpus_intersects(*new_mask, mt_fpu_cpumask)) { + cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask); + retval = set_cpus_allowed_ptr(p, effective_mask); } else { + cpumask_copy(effective_mask, new_mask); clear_ti_thread_flag(ti, TIF_FPUBOUND); - retval = set_cpus_allowed_ptr(p, &new_mask); + retval = set_cpus_allowed_ptr(p, new_mask); } + if (!retval) { + cpuset_cpus_allowed(p, cpus_allowed); + if (!cpumask_subset(effective_mask, cpus_allowed)) { + /* + * We must have raced with a concurrent cpuset + * update. Just reset the cpus_allowed to the + * cpuset's cpus_allowed + */ + cpumask_copy(new_mask, cpus_allowed); + goto again; + } + } out_unlock: + free_cpumask_var(effective_mask); +out_free_new_mask: + free_cpumask_var(new_mask); +out_free_cpus_allowed: + free_cpumask_var(cpus_allowed); +out_put_task: put_task_struct(p); put_online_cpus(); return retval; diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index a5297e2a353a..a3d66137731a 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -400,23 +400,24 @@ EXPORT(sysn32_call_table) PTR sys_ioprio_set PTR sys_ioprio_get PTR compat_sys_utimensat - PTR compat_sys_signalfd /* 5280 */ + PTR compat_sys_signalfd /* 6280 */ PTR sys_ni_syscall PTR sys_eventfd PTR sys_fallocate PTR sys_timerfd_create - PTR compat_sys_timerfd_gettime /* 5285 */ + PTR compat_sys_timerfd_gettime /* 6285 */ PTR compat_sys_timerfd_settime PTR sys_signalfd4 PTR sys_eventfd2 PTR sys_epoll_create1 - PTR sys_dup3 /* 5290 */ + PTR sys_dup3 /* 6290 */ PTR sys_pipe2 PTR sys_inotify_init1 PTR sys_preadv PTR sys_pwritev - PTR compat_sys_rt_tgsigqueueinfo /* 5295 */ + PTR compat_sys_rt_tgsigqueueinfo /* 6295 */ PTR sys_perf_event_open PTR sys_accept4 PTR compat_sys_recvmmsg + PTR sys_getdents .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 6cdca1956b77..383aeb95cb49 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -47,8 +47,12 @@ #endif /* CONFIG_MIPS_MT_SMTC */ volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ + int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ +EXPORT_SYMBOL(__cpu_number_map); + int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ +EXPORT_SYMBOL(__cpu_logical_map); /* Number of TCs (or siblings in Intel speak) per CPU core */ int smp_num_siblings = 1; diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index a95dea5459c4..cfeb2c155896 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -975,8 +975,7 @@ void ipi_decode(struct smtc_ipi *pipi) ipi_call_interrupt(); break; default: - printk("Impossible SMTC IPI Argument 0x%x\n", - (int)arg_copy); + printk("Impossible SMTC IPI Argument %p\n", arg_copy); break; } break; diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index dd81b0f87518..bddce0bca195 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -29,6 +29,8 @@ #include <linux/ipc.h> #include <linux/uaccess.h> #include <linux/slab.h> +#include <linux/random.h> +#include <linux/elf.h> #include <asm/asm.h> #include <asm/branch.h> @@ -116,7 +118,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, (!vmm || addr + len <= vmm->vm_start)) return addr; } - addr = TASK_UNMAPPED_BASE; + addr = current->mm->mmap_base; if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else @@ -134,6 +136,51 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, } } +void arch_pick_mmap_layout(struct mm_struct *mm) +{ + unsigned long random_factor = 0UL; + + if (current->flags & PF_RANDOMIZE) { + random_factor = get_random_int(); + random_factor = random_factor << PAGE_SHIFT; + if (TASK_IS_32BIT_ADDR) + random_factor &= 0xfffffful; + else + random_factor &= 0xffffffful; + } + + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; +} + +static inline unsigned long brk_rnd(void) +{ + unsigned long rnd = get_random_int(); + + rnd = rnd << PAGE_SHIFT; + /* 8MB for 32bit, 256MB for 64bit */ + if (TASK_IS_32BIT_ADDR) + rnd = rnd & 0x7ffffful; + else + rnd = rnd & 0xffffffful; + + return rnd; +} + +unsigned long arch_randomize_brk(struct mm_struct *mm) +{ + unsigned long base = mm->brk; + unsigned long ret; + + ret = PAGE_ALIGN(base + brk_rnd()); + + if (ret < mm->brk) + return mm->brk; + + return ret; +} + SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, off_t, offset) @@ -207,7 +254,7 @@ asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs) int error; char * filename; - filename = getname((char __user *) (long)regs.regs[4]); + filename = getname((const char __user *) (long)regs.regs[4]); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 8bdd6a663c7f..03ec0019032b 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -25,6 +25,7 @@ #include <linux/ptrace.h> #include <linux/kgdb.h> #include <linux/kdebug.h> +#include <linux/kprobes.h> #include <linux/notifier.h> #include <linux/kdb.h> @@ -334,7 +335,7 @@ void show_regs(struct pt_regs *regs) __show_regs((struct pt_regs *)regs); } -void show_registers(const struct pt_regs *regs) +void show_registers(struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); @@ -356,9 +357,14 @@ void show_registers(const struct pt_regs *regs) printk("\n"); } +static int regs_to_trapnr(struct pt_regs *regs) +{ + return (regs->cp0_cause >> 2) & 0x1f; +} + static DEFINE_SPINLOCK(die_lock); -void __noreturn die(const char * str, struct pt_regs * regs) +void __noreturn die(const char *str, struct pt_regs *regs) { static int die_counter; int sig = SIGSEGV; @@ -366,7 +372,7 @@ void __noreturn die(const char * str, struct pt_regs * regs) unsigned long dvpret = dvpe(); #endif /* CONFIG_MIPS_MT_SMTC */ - notify_die(DIE_OOPS, str, (struct pt_regs *)regs, SIGSEGV, 0, 0); + notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV); console_verbose(); spin_lock_irq(&die_lock); @@ -375,7 +381,7 @@ void __noreturn die(const char * str, struct pt_regs * regs) mips_mt_regdump(dvpret); #endif /* CONFIG_MIPS_MT_SMTC */ - if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) + if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) sig = 0; printk("%s[#%d]:\n", str, ++die_counter); @@ -449,7 +455,7 @@ asmlinkage void do_be(struct pt_regs *regs) printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n", data ? "Data" : "Instruction", field, regs->cp0_epc, field, regs->regs[31]); - if (notify_die(DIE_OOPS, "bus error", regs, SIGBUS, 0, 0) + if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS) == NOTIFY_STOP) return; @@ -650,7 +656,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) { siginfo_t info; - if (notify_die(DIE_FP, "FP exception", regs, SIGFPE, 0, 0) + if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP) return; die_if_kernel("FP exception in kernel code", regs); @@ -713,11 +719,11 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, char b[40]; #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP - if (kgdb_ll_trap(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP) + if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) return; #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ - if (notify_die(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP) + if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) return; /* @@ -783,6 +789,25 @@ asmlinkage void do_bp(struct pt_regs *regs) if (bcode >= (1 << 10)) bcode >>= 10; + /* + * notify the kprobe handlers, if instruction is likely to + * pertain to them. + */ + switch (bcode) { + case BRK_KPROBE_BP: + if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) + return; + else + break; + case BRK_KPROBE_SSTEPBP: + if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) + return; + else + break; + default: + break; + } + do_trap_or_bp(regs, bcode, "Break"); return; @@ -815,7 +840,7 @@ asmlinkage void do_ri(struct pt_regs *regs) unsigned int opcode = 0; int status = -1; - if (notify_die(DIE_RI, "RI Fault", regs, SIGSEGV, 0, 0) + if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL) == NOTIFY_STOP) return; @@ -907,11 +932,6 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, return NOTIFY_OK; } -static struct notifier_block default_cu2_notifier = { - .notifier_call = default_cu2_call, - .priority = 0x80000000, /* Run last */ -}; - asmlinkage void do_cpu(struct pt_regs *regs) { unsigned int __user *epc; @@ -976,7 +996,7 @@ asmlinkage void do_cpu(struct pt_regs *regs) case 2: raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); - break; + return; case 3: break; @@ -1734,5 +1754,5 @@ void __init trap_init(void) sort_extable(__start___dbe_table, __stop___dbe_table); - register_cu2_notifier(&default_cu2_notifier); + cu2_notifier(default_cu2_call, 0x80000000); /* Run last */ } diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index b773c1112b14..e5cdfd603f8f 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -61,11 +61,9 @@ static int __init init_vdso(void) vunmap(vdso); - pr_notice("init_vdso successfull\n"); - return 0; } -device_initcall(init_vdso); +subsys_initcall(init_vdso); static unsigned long vdso_addr(unsigned long start) { |