diff options
author | Ananth N Mavinakayanahalli <ananth@in.ibm.com> | 2005-11-07 01:00:07 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-07 07:53:45 -0800 |
commit | e65845235c8120be63001fc1a4ac00c819194bbe (patch) | |
tree | 209a081cc07375290743ceb45f52dc474f45382a | |
parent | 66ff2d0691e00e1e7bfdf398a970310c9a0fe671 (diff) | |
download | blackbird-op-linux-e65845235c8120be63001fc1a4ac00c819194bbe.tar.gz blackbird-op-linux-e65845235c8120be63001fc1a4ac00c819194bbe.zip |
[PATCH] Kprobes: Track kprobe on a per_cpu basis - base changes
Changes to the base kprobe infrastructure to track kprobe execution on a
per-cpu basis.
Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/linux/kprobes.h | 31 | ||||
-rw-r--r-- | kernel/kprobes.c | 43 |
2 files changed, 50 insertions, 24 deletions
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index e30afdca7917..6720305a31e8 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -33,6 +33,7 @@ #include <linux/list.h> #include <linux/notifier.h> #include <linux/smp.h> +#include <linux/percpu.h> #include <asm/kprobes.h> @@ -106,6 +107,9 @@ struct jprobe { kprobe_opcode_t *entry; /* probe handling code to jump to */ }; +DECLARE_PER_CPU(struct kprobe *, current_kprobe); +DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + #ifdef ARCH_SUPPORTS_KRETPROBES extern void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs); #else /* ARCH_SUPPORTS_KRETPROBES */ @@ -146,13 +150,6 @@ struct kretprobe_instance { void lock_kprobes(void); void unlock_kprobes(void); -/* kprobe running now on this CPU? */ -static inline int kprobe_running(void) -{ - extern unsigned int kprobe_cpu; - return kprobe_cpu == smp_processor_id(); -} - extern int arch_prepare_kprobe(struct kprobe *p); extern void arch_copy_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p); @@ -167,6 +164,22 @@ extern void free_insn_slot(kprobe_opcode_t *slot); struct kprobe *get_kprobe(void *addr); struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); +/* kprobe_running() will just return the current_kprobe on this CPU */ +static inline struct kprobe *kprobe_running(void) +{ + return (__get_cpu_var(current_kprobe)); +} + +static inline void reset_current_kprobe(void) +{ + __get_cpu_var(current_kprobe) = NULL; +} + +static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) +{ + return (&__get_cpu_var(kprobe_ctlblk)); +} + int register_kprobe(struct kprobe *p); void unregister_kprobe(struct kprobe *p); int setjmp_pre_handler(struct kprobe *, struct pt_regs *); @@ -183,9 +196,9 @@ void add_rp_inst(struct kretprobe_instance *ri); void kprobe_flush_task(struct task_struct *tk); void recycle_rp_inst(struct kretprobe_instance *ri); #else /* CONFIG_KPROBES */ -static inline int kprobe_running(void) +static inline struct kprobe *kprobe_running(void) { - return 0; + return NULL; } static inline int register_kprobe(struct kprobe *p) { diff --git a/kernel/kprobes.c b/kernel/kprobes.c index ce4915dd683a..6da8f9b33d1e 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -51,7 +51,7 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; unsigned int kprobe_cpu = NR_CPUS; static DEFINE_SPINLOCK(kprobe_lock); -static struct kprobe *curr_kprobe; +static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; /* * kprobe->ainsn.insn points to the copy of the instruction to be @@ -188,6 +188,17 @@ void __kprobes unlock_kprobes(void) local_irq_restore(flags); } +/* We have preemption disabled.. so it is safe to use __ versions */ +static inline void set_kprobe_instance(struct kprobe *kp) +{ + __get_cpu_var(kprobe_instance) = kp; +} + +static inline void reset_kprobe_instance(void) +{ + __get_cpu_var(kprobe_instance) = NULL; +} + /* You have to be holding the kprobe_lock */ struct kprobe __kprobes *get_kprobe(void *addr) { @@ -213,11 +224,11 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) list_for_each_entry(kp, &p->list, list) { if (kp->pre_handler) { - curr_kprobe = kp; + set_kprobe_instance(kp); if (kp->pre_handler(kp, regs)) return 1; } - curr_kprobe = NULL; + reset_kprobe_instance(); } return 0; } @@ -229,9 +240,9 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, list_for_each_entry(kp, &p->list, list) { if (kp->post_handler) { - curr_kprobe = kp; + set_kprobe_instance(kp); kp->post_handler(kp, regs, flags); - curr_kprobe = NULL; + reset_kprobe_instance(); } } return; @@ -240,12 +251,14 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr) { + struct kprobe *cur = __get_cpu_var(kprobe_instance); + /* * if we faulted "during" the execution of a user specified * probe handler, invoke just that probe's fault handler */ - if (curr_kprobe && curr_kprobe->fault_handler) { - if (curr_kprobe->fault_handler(curr_kprobe, regs, trapnr)) + if (cur && cur->fault_handler) { + if (cur->fault_handler(cur, regs, trapnr)) return 1; } return 0; @@ -253,15 +266,15 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) { - struct kprobe *kp = curr_kprobe; - if (curr_kprobe && kp->break_handler) { - if (kp->break_handler(kp, regs)) { - curr_kprobe = NULL; - return 1; - } + struct kprobe *cur = __get_cpu_var(kprobe_instance); + int ret = 0; + + if (cur && cur->break_handler) { + if (cur->break_handler(cur, regs)) + ret = 1; } - curr_kprobe = NULL; - return 0; + reset_kprobe_instance(); + return ret; } struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) |