diff options
Diffstat (limited to 'arch/ppc64')
-rw-r--r-- | arch/ppc64/Kconfig | 4 | ||||
-rw-r--r-- | arch/ppc64/Makefile | 2 | ||||
-rw-r--r-- | arch/ppc64/kernel/kprobes.c | 40 | ||||
-rw-r--r-- | arch/ppc64/kernel/misc.S | 6 | ||||
-rw-r--r-- | arch/ppc64/kernel/pSeries_reconfig.c | 2 | ||||
-rw-r--r-- | arch/ppc64/kernel/sys_ppc32.c | 31 | ||||
-rw-r--r-- | arch/ppc64/kernel/time.c | 7 | ||||
-rw-r--r-- | arch/ppc64/kernel/traps.c | 5 | ||||
-rw-r--r-- | arch/ppc64/kernel/vmlinux.lds.S | 1 | ||||
-rw-r--r-- | arch/ppc64/mm/fault.c | 5 |
10 files changed, 39 insertions, 64 deletions
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig index 13b262f10216..deca68ad644a 100644 --- a/arch/ppc64/Kconfig +++ b/arch/ppc64/Kconfig @@ -44,6 +44,10 @@ config SCHED_NO_NO_OMIT_FRAME_POINTER bool default y +config ARCH_MAY_HAVE_PC_FDC + bool + default y + # We optimistically allocate largepages from the VM, so make the limit # large enough (16MB). This badly named config option is actually # max order + 1 diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile index 6350cce82efb..8189953a372c 100644 --- a/arch/ppc64/Makefile +++ b/arch/ppc64/Makefile @@ -49,7 +49,7 @@ NM := $(NM) --synthetic endif -CHECKFLAGS += -m64 -D__powerpc__ +CHECKFLAGS += -m64 -D__powerpc__ -D__powerpc64__ LDFLAGS := -m elf64ppc LDFLAGS_vmlinux := -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD) diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c index a3d519518fb8..7e80d49c589a 100644 --- a/arch/ppc64/kernel/kprobes.c +++ b/arch/ppc64/kernel/kprobes.c @@ -44,7 +44,7 @@ static struct kprobe *kprobe_prev; static unsigned long kprobe_status_prev, kprobe_saved_msr_prev; static struct pt_regs jprobe_saved_regs; -int arch_prepare_kprobe(struct kprobe *p) +int __kprobes arch_prepare_kprobe(struct kprobe *p) { int ret = 0; kprobe_opcode_t insn = *p->addr; @@ -68,27 +68,27 @@ int arch_prepare_kprobe(struct kprobe *p) return ret; } -void arch_copy_kprobe(struct kprobe *p) +void __kprobes arch_copy_kprobe(struct kprobe *p) { memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); p->opcode = *p->addr; } -void arch_arm_kprobe(struct kprobe *p) +void __kprobes arch_arm_kprobe(struct kprobe *p) { *p->addr = BREAKPOINT_INSTRUCTION; flush_icache_range((unsigned long) p->addr, (unsigned long) p->addr + sizeof(kprobe_opcode_t)); } -void arch_disarm_kprobe(struct kprobe *p) +void __kprobes arch_disarm_kprobe(struct kprobe *p) { *p->addr = p->opcode; flush_icache_range((unsigned long) p->addr, (unsigned long) p->addr + sizeof(kprobe_opcode_t)); } -void arch_remove_kprobe(struct kprobe *p) +void __kprobes arch_remove_kprobe(struct kprobe *p) { up(&kprobe_mutex); free_insn_slot(p->ainsn.insn); @@ -102,7 +102,7 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) regs->msr |= MSR_SE; /* single step inline if it is a trap variant */ - if (IS_TW(insn) || IS_TD(insn) || IS_TWI(insn) || IS_TDI(insn)) + if (is_trap(insn)) regs->nip = (unsigned long)p->addr; else regs->nip = (unsigned long)p->ainsn.insn; @@ -122,7 +122,8 @@ static inline void restore_previous_kprobe(void) kprobe_saved_msr = kprobe_saved_msr_prev; } -void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) +void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, + struct pt_regs *regs) { struct kretprobe_instance *ri; @@ -151,7 +152,9 @@ static inline int kprobe_handler(struct pt_regs *regs) Disarm the probe we just hit, and ignore it. */ p = get_kprobe(addr); if (p) { - if (kprobe_status == KPROBE_HIT_SS) { + kprobe_opcode_t insn = *p->ainsn.insn; + if (kprobe_status == KPROBE_HIT_SS && + is_trap(insn)) { regs->msr &= ~MSR_SE; regs->msr |= kprobe_saved_msr; unlock_kprobes(); @@ -191,8 +194,7 @@ static inline int kprobe_handler(struct pt_regs *regs) * trap variant, it could belong to someone else */ kprobe_opcode_t cur_insn = *addr; - if (IS_TW(cur_insn) || IS_TD(cur_insn) || - IS_TWI(cur_insn) || IS_TDI(cur_insn)) + if (is_trap(cur_insn)) goto no_kprobe; /* * The breakpoint instruction was removed right @@ -244,7 +246,7 @@ void kretprobe_trampoline_holder(void) /* * Called when the probe at kretprobe trampoline is hit */ -int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) +int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head; @@ -308,7 +310,7 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) * single-stepped a copy of the instruction. The address of this * copy is p->ainsn.insn. */ -static void resume_execution(struct kprobe *p, struct pt_regs *regs) +static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) { int ret; unsigned int insn = *p->ainsn.insn; @@ -373,8 +375,8 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) /* * Wrapper routine to for handling exceptions. */ -int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, - void *data) +int __kprobes kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data) { struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; @@ -402,11 +404,11 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, default: break; } - preempt_enable(); + preempt_enable_no_resched(); return ret; } -int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) +int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct jprobe *jp = container_of(p, struct jprobe, kp); @@ -419,16 +421,16 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) return 1; } -void jprobe_return(void) +void __kprobes jprobe_return(void) { asm volatile("trap" ::: "memory"); } -void jprobe_return_end(void) +void __kprobes jprobe_return_end(void) { }; -int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) +int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) { /* * FIXME - we should ideally be validating that we got here 'cos diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S index 474df0a862bf..6d860c1d9fa0 100644 --- a/arch/ppc64/kernel/misc.S +++ b/arch/ppc64/kernel/misc.S @@ -183,7 +183,7 @@ PPC64_CACHES: * flush all bytes from start through stop-1 inclusive */ -_GLOBAL(__flush_icache_range) +_KPROBE(__flush_icache_range) /* * Flush the data cache to memory @@ -223,7 +223,7 @@ _GLOBAL(__flush_icache_range) bdnz 2b isync blr - + .previous .text /* * Like above, but only do the D-cache. * @@ -957,7 +957,7 @@ _GLOBAL(sys_call_table32) .llong .ppc_fork .llong .sys_read .llong .sys_write - .llong .sys32_open /* 5 */ + .llong .compat_sys_open /* 5 */ .llong .sys_close .llong .sys32_waitpid .llong .sys32_creat diff --git a/arch/ppc64/kernel/pSeries_reconfig.c b/arch/ppc64/kernel/pSeries_reconfig.c index dc2a69d412a2..58c61219d08e 100644 --- a/arch/ppc64/kernel/pSeries_reconfig.c +++ b/arch/ppc64/kernel/pSeries_reconfig.c @@ -111,7 +111,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist struct device_node *np; int err = -ENOMEM; - np = kcalloc(1, sizeof(*np), GFP_KERNEL); + np = kzalloc(sizeof(*np), GFP_KERNEL); if (!np) goto out_err; diff --git a/arch/ppc64/kernel/sys_ppc32.c b/arch/ppc64/kernel/sys_ppc32.c index 206619080e66..214914a95a50 100644 --- a/arch/ppc64/kernel/sys_ppc32.c +++ b/arch/ppc64/kernel/sys_ppc32.c @@ -867,37 +867,6 @@ off_t ppc32_lseek(unsigned int fd, u32 offset, unsigned int origin) return sys_lseek(fd, (int)offset, origin); } -/* - * This is just a version for 32-bit applications which does - * not force O_LARGEFILE on. - */ -asmlinkage long sys32_open(const char __user * filename, int flags, int mode) -{ - char * tmp; - int fd, error; - - tmp = getname(filename); - fd = PTR_ERR(tmp); - if (!IS_ERR(tmp)) { - fd = get_unused_fd(); - if (fd >= 0) { - struct file * f = filp_open(tmp, flags, mode); - error = PTR_ERR(f); - if (IS_ERR(f)) - goto out_error; - fd_install(fd, f); - } -out: - putname(tmp); - } - return fd; - -out_error: - put_unused_fd(fd); - fd = error; - goto out; -} - /* Note: it is necessary to treat bufsiz as an unsigned int, * with the corresponding cast to a signed int to insure that the * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c index 91ef95ccda4f..9939c206afa4 100644 --- a/arch/ppc64/kernel/time.c +++ b/arch/ppc64/kernel/time.c @@ -128,7 +128,7 @@ static __inline__ void timer_check_rtc(void) * We should have an rtc call that only sets the minutes and * seconds like on Intel to avoid problems with non UTC clocks. */ - if ( (time_status & STA_UNSYNC) == 0 && + if (ntp_synced() && xtime.tv_sec - last_rtc_update >= 659 && abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ && jiffies - wall_jiffies == 1) { @@ -435,10 +435,7 @@ int do_settimeofday(struct timespec *tv) */ last_rtc_update = new_sec - 658; - time_adjust = 0; /* stop active adjtime() */ - time_status |= STA_UNSYNC; - time_maxerror = NTP_PHASE_LIMIT; - time_esterror = NTP_PHASE_LIMIT; + ntp_clear(); delta_xsec = mulhdu( (tb_last_stamp-do_gtod.varp->tb_orig_stamp), do_gtod.varp->tb_to_xs ); diff --git a/arch/ppc64/kernel/traps.c b/arch/ppc64/kernel/traps.c index a8d5e83ee89f..7467ae508e6e 100644 --- a/arch/ppc64/kernel/traps.c +++ b/arch/ppc64/kernel/traps.c @@ -30,6 +30,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/delay.h> +#include <linux/kprobes.h> #include <asm/kdebug.h> #include <asm/pgtable.h> @@ -220,7 +221,7 @@ void instruction_breakpoint_exception(struct pt_regs *regs) _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); } -void single_step_exception(struct pt_regs *regs) +void __kprobes single_step_exception(struct pt_regs *regs) { regs->msr &= ~MSR_SE; /* Turn off 'trace' bit */ @@ -398,7 +399,7 @@ check_bug_trap(struct pt_regs *regs) return 0; } -void program_check_exception(struct pt_regs *regs) +void __kprobes program_check_exception(struct pt_regs *regs) { if (debugger_fault_handler(regs)) return; diff --git a/arch/ppc64/kernel/vmlinux.lds.S b/arch/ppc64/kernel/vmlinux.lds.S index 4103cc13f8d6..0306510bc4ff 100644 --- a/arch/ppc64/kernel/vmlinux.lds.S +++ b/arch/ppc64/kernel/vmlinux.lds.S @@ -15,6 +15,7 @@ SECTIONS *(.text .text.*) SCHED_TEXT LOCK_TEXT + KPROBES_TEXT *(.fixup) . = ALIGN(4096); _etext = .; diff --git a/arch/ppc64/mm/fault.c b/arch/ppc64/mm/fault.c index 20b0f37e8bf8..772f0714a5b7 100644 --- a/arch/ppc64/mm/fault.c +++ b/arch/ppc64/mm/fault.c @@ -29,6 +29,7 @@ #include <linux/interrupt.h> #include <linux/smp_lock.h> #include <linux/module.h> +#include <linux/kprobes.h> #include <asm/page.h> #include <asm/pgtable.h> @@ -84,8 +85,8 @@ static int store_updates_sp(struct pt_regs *regs) * The return value is 0 if the fault was handled, or the signal * number if this is a kernel fault that can't be handled here. */ -int do_page_fault(struct pt_regs *regs, unsigned long address, - unsigned long error_code) +int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, + unsigned long error_code) { struct vm_area_struct * vma; struct mm_struct *mm = current->mm; |