diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-06-14 17:06:10 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-06-14 17:06:10 +0900 |
commit | 57fcfdf9b2bbe8ea47771ffc16c418a20e4173c6 (patch) | |
tree | 23709004b53a40fab173437f398a87801e56da43 | |
parent | da28c597996a964a195529595c37f7aacd6dad09 (diff) | |
download | blackbird-op-linux-57fcfdf9b2bbe8ea47771ffc16c418a20e4173c6.tar.gz blackbird-op-linux-57fcfdf9b2bbe8ea47771ffc16c418a20e4173c6.zip |
sh: kprobes SMP support.
Presently kprobes support relies on several saved opcode variables for
saving and restoring state, without any specific locking. This is
inherently racy on SMP, and given that we already use per-CPU variables
for everything else, convert these over too.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r-- | arch/sh/kernel/kprobes.c | 100 |
1 files changed, 50 insertions, 50 deletions
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c index 4049d99f76e1..1208b09e95c3 100644 --- a/arch/sh/kernel/kprobes.c +++ b/arch/sh/kernel/kprobes.c @@ -20,9 +20,9 @@ DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); -static struct kprobe saved_current_opcode; -static struct kprobe saved_next_opcode; -static struct kprobe saved_next_opcode2; +static DEFINE_PER_CPU(struct kprobe, saved_current_opcode); +static DEFINE_PER_CPU(struct kprobe, saved_next_opcode); +static DEFINE_PER_CPU(struct kprobe, saved_next_opcode2); #define OPCODE_JMP(x) (((x) & 0xF0FF) == 0x402b) #define OPCODE_JSR(x) (((x) & 0xF0FF) == 0x400b) @@ -102,16 +102,21 @@ int __kprobes kprobe_handle_illslot(unsigned long pc) void __kprobes arch_remove_kprobe(struct kprobe *p) { - if (saved_next_opcode.addr != 0x0) { + struct kprobe *saved = &__get_cpu_var(saved_next_opcode); + + if (saved->addr) { arch_disarm_kprobe(p); - arch_disarm_kprobe(&saved_next_opcode); - saved_next_opcode.addr = 0x0; - saved_next_opcode.opcode = 0x0; - - if (saved_next_opcode2.addr != 0x0) { - arch_disarm_kprobe(&saved_next_opcode2); - saved_next_opcode2.addr = 0x0; - saved_next_opcode2.opcode = 0x0; + arch_disarm_kprobe(saved); + + saved->addr = NULL; + saved->opcode = 0; + + saved = &__get_cpu_var(saved_next_opcode2); + if (saved->addr) { + arch_disarm_kprobe(saved); + + saved->addr = NULL; + saved->opcode = 0; } } } @@ -141,57 +146,59 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, */ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) { - kprobe_opcode_t *addr = NULL; - saved_current_opcode.addr = (kprobe_opcode_t *) (regs->pc); - addr = saved_current_opcode.addr; + __get_cpu_var(saved_current_opcode).addr = (kprobe_opcode_t *)regs->pc; if (p != NULL) { + struct kprobe *op1, *op2; + arch_disarm_kprobe(p); + op1 = &__get_cpu_var(saved_next_opcode); + op2 = &__get_cpu_var(saved_next_opcode2); + if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) { unsigned int reg_nr = ((p->opcode >> 8) & 0x000F); - saved_next_opcode.addr = - (kprobe_opcode_t *) regs->regs[reg_nr]; + op1->addr = (kprobe_opcode_t *) regs->regs[reg_nr]; } else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) { unsigned long disp = (p->opcode & 0x0FFF); - saved_next_opcode.addr = + op1->addr = (kprobe_opcode_t *) (regs->pc + 4 + disp * 2); } else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) { unsigned int reg_nr = ((p->opcode >> 8) & 0x000F); - saved_next_opcode.addr = + op1->addr = (kprobe_opcode_t *) (regs->pc + 4 + regs->regs[reg_nr]); } else if (OPCODE_RTS(p->opcode)) { - saved_next_opcode.addr = (kprobe_opcode_t *) regs->pr; + op1->addr = (kprobe_opcode_t *) regs->pr; } else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) { unsigned long disp = (p->opcode & 0x00FF); /* case 1 */ - saved_next_opcode.addr = p->addr + 1; + op1->addr = p->addr + 1; /* case 2 */ - saved_next_opcode2.addr = + op2->addr = (kprobe_opcode_t *) (regs->pc + 4 + disp * 2); - saved_next_opcode2.opcode = *(saved_next_opcode2.addr); - arch_arm_kprobe(&saved_next_opcode2); + op2->opcode = *(op2->addr); + arch_arm_kprobe(op2); } else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) { unsigned long disp = (p->opcode & 0x00FF); /* case 1 */ - saved_next_opcode.addr = p->addr + 2; + op1->addr = p->addr + 2; /* case 2 */ - saved_next_opcode2.addr = + op2->addr = (kprobe_opcode_t *) (regs->pc + 4 + disp * 2); - saved_next_opcode2.opcode = *(saved_next_opcode2.addr); - arch_arm_kprobe(&saved_next_opcode2); + op2->opcode = *(op2->addr); + arch_arm_kprobe(op2); } else { - saved_next_opcode.addr = p->addr + 1; + op1->addr = p->addr + 1; } - saved_next_opcode.opcode = *(saved_next_opcode.addr); - arch_arm_kprobe(&saved_next_opcode); + op1->opcode = *(op1->addr); + arch_arm_kprobe(op1); } } @@ -376,21 +383,23 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs) cur->post_handler(cur, regs, 0); } - if (saved_next_opcode.addr != 0x0) { - arch_disarm_kprobe(&saved_next_opcode); - saved_next_opcode.addr = 0x0; - saved_next_opcode.opcode = 0x0; + p = &__get_cpu_var(saved_next_opcode); + if (p->addr) { + arch_disarm_kprobe(p); + p->addr = NULL; + p->opcode = 0; - addr = saved_current_opcode.addr; - saved_current_opcode.addr = 0x0; + addr = __get_cpu_var(saved_current_opcode).addr; + __get_cpu_var(saved_current_opcode).addr = NULL; p = get_kprobe(addr); arch_arm_kprobe(p); - if (saved_next_opcode2.addr != 0x0) { - arch_disarm_kprobe(&saved_next_opcode2); - saved_next_opcode2.addr = 0x0; - saved_next_opcode2.opcode = 0x0; + p = &__get_cpu_var(saved_next_opcode2); + if (p->addr) { + arch_disarm_kprobe(p); + p->addr = NULL; + p->opcode = 0; } } @@ -572,14 +581,5 @@ static struct kprobe trampoline_p = { int __init arch_init_kprobes(void) { - saved_next_opcode.addr = 0x0; - saved_next_opcode.opcode = 0x0; - - saved_current_opcode.addr = 0x0; - saved_current_opcode.opcode = 0x0; - - saved_next_opcode2.addr = 0x0; - saved_next_opcode2.opcode = 0x0; - return register_kprobe(&trampoline_p); } |