diff options
Diffstat (limited to 'arch/ppc/kernel')
-rw-r--r-- | arch/ppc/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/ppc/kernel/align.c | 12 | ||||
-rw-r--r-- | arch/ppc/kernel/cpu_setup_6xx.S | 42 | ||||
-rw-r--r-- | arch/ppc/kernel/entry.S | 59 | ||||
-rw-r--r-- | arch/ppc/kernel/fpu.S | 133 | ||||
-rw-r--r-- | arch/ppc/kernel/head.S | 163 | ||||
-rw-r--r-- | arch/ppc/kernel/head_44x.S | 6 | ||||
-rw-r--r-- | arch/ppc/kernel/head_booke.h | 7 | ||||
-rw-r--r-- | arch/ppc/kernel/head_fsl_booke.S | 8 | ||||
-rw-r--r-- | arch/ppc/kernel/misc.S | 12 | ||||
-rw-r--r-- | arch/ppc/kernel/ptrace.c | 5 | ||||
-rw-r--r-- | arch/ppc/kernel/traps.c | 2 |
12 files changed, 266 insertions, 184 deletions
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile index 86bc878cb3ee..b284451802c9 100644 --- a/arch/ppc/kernel/Makefile +++ b/arch/ppc/kernel/Makefile @@ -9,6 +9,7 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o extra-$(CONFIG_8xx) := head_8xx.o extra-$(CONFIG_6xx) += idle_6xx.o extra-$(CONFIG_POWER4) += idle_power4.o +extra-$(CONFIG_PPC_FPU) += fpu.o extra-y += vmlinux.lds obj-y := entry.o traps.o irq.o idle.o time.o misc.o \ diff --git a/arch/ppc/kernel/align.c b/arch/ppc/kernel/align.c index 79c929475037..ff81da9598d8 100644 --- a/arch/ppc/kernel/align.c +++ b/arch/ppc/kernel/align.c @@ -290,6 +290,10 @@ fix_alignment(struct pt_regs *regs) /* lwm, stmw */ nb = (32 - reg) * 4; } + + if (!access_ok((flags & ST? VERIFY_WRITE: VERIFY_READ), addr, nb+nb0)) + return -EFAULT; /* bad address */ + rptr = (unsigned char *) ®s->gpr[reg]; if (flags & LD) { for (i = 0; i < nb; ++i) @@ -368,16 +372,24 @@ fix_alignment(struct pt_regs *regs) /* Single-precision FP load and store require conversions... */ case LD+F+S: +#ifdef CONFIG_PPC_FPU preempt_disable(); enable_kernel_fp(); cvt_fd(&data.f, &data.d, ¤t->thread.fpscr); preempt_enable(); +#else + return 0; +#endif break; case ST+F+S: +#ifdef CONFIG_PPC_FPU preempt_disable(); enable_kernel_fp(); cvt_df(&data.d, &data.f, ¤t->thread.fpscr); preempt_enable(); +#else + return 0; +#endif break; } diff --git a/arch/ppc/kernel/cpu_setup_6xx.S b/arch/ppc/kernel/cpu_setup_6xx.S index 74f781b486a3..468721d9ebd2 100644 --- a/arch/ppc/kernel/cpu_setup_6xx.S +++ b/arch/ppc/kernel/cpu_setup_6xx.S @@ -30,12 +30,14 @@ _GLOBAL(__setup_cpu_604) blr _GLOBAL(__setup_cpu_750) mflr r4 + bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 mtlr r4 blr _GLOBAL(__setup_cpu_750cx) mflr r4 + bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750cx @@ -43,6 +45,7 @@ _GLOBAL(__setup_cpu_750cx) blr _GLOBAL(__setup_cpu_750fx) mflr r4 + bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750fx @@ -50,6 +53,7 @@ _GLOBAL(__setup_cpu_750fx) blr _GLOBAL(__setup_cpu_7400) mflr r4 + bl __init_fpu_registers bl setup_7400_workarounds bl setup_common_caches bl setup_750_7400_hid0 @@ -57,6 +61,7 @@ _GLOBAL(__setup_cpu_7400) blr _GLOBAL(__setup_cpu_7410) mflr r4 + bl __init_fpu_registers bl setup_7410_workarounds bl setup_common_caches bl setup_750_7400_hid0 @@ -80,7 +85,7 @@ setup_common_caches: bne 1f /* don't invalidate the D-cache */ ori r8,r8,HID0_DCI /* unless it wasn't enabled */ 1: sync - mtspr SPRN_HID0,r8 /* enable and invalidate caches */ + mtspr SPRN_HID0,r8 /* enable and invalidate caches */ sync mtspr SPRN_HID0,r11 /* enable caches */ sync @@ -152,9 +157,13 @@ setup_7410_workarounds: setup_750_7400_hid0: mfspr r11,SPRN_HID0 ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC + oris r11,r11,HID0_DPM@h BEGIN_FTR_SECTION - oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */ -END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) + xori r11,r11,HID0_BTIC +END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) +BEGIN_FTR_SECTION + xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */ +END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) li r3,HID0_SPD andc r11,r11,r3 /* clear SPD: enable speculative */ li r3,0 @@ -218,13 +227,15 @@ setup_745x_specifics: /* All of the bits we have to set..... */ - ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_LRSTK | HID0_BTIC + ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE + ori r11,r11,HID0_LRSTK | HID0_BTIC + oris r11,r11,HID0_DPM@h BEGIN_FTR_SECTION xori r11,r11,HID0_BTIC END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) BEGIN_FTR_SECTION - oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */ -END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) + xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */ +END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) /* All of the bits we have to clear.... */ @@ -248,6 +259,25 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) isync blr +/* + * Initialize the FPU registers. This is needed to work around an errata + * in some 750 cpus where using a not yet initialized FPU register after + * power on reset may hang the CPU + */ +_GLOBAL(__init_fpu_registers) + mfmsr r10 + ori r11,r10,MSR_FP + mtmsr r11 + isync + addis r9,r3,empty_zero_page@ha + addi r9,r9,empty_zero_page@l + REST_32FPRS(0,r9) + sync + mtmsr r10 + isync + blr + + /* Definitions for the table use to save CPU states */ #define CS_HID0 0 #define CS_HID1 4 diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S index 035217d6c0f1..5f075dbc4ee7 100644 --- a/arch/ppc/kernel/entry.S +++ b/arch/ppc/kernel/entry.S @@ -563,6 +563,65 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) addi r1,r1,INT_FRAME_SIZE blr + .globl fast_exception_return +fast_exception_return: +#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) + andi. r10,r9,MSR_RI /* check for recoverable interrupt */ + beq 1f /* if not, we've got problems */ +#endif + +2: REST_4GPRS(3, r11) + lwz r10,_CCR(r11) + REST_GPR(1, r11) + mtcr r10 + lwz r10,_LINK(r11) + mtlr r10 + REST_GPR(10, r11) + mtspr SPRN_SRR1,r9 + mtspr SPRN_SRR0,r12 + REST_GPR(9, r11) + REST_GPR(12, r11) + lwz r11,GPR11(r11) + SYNC + RFI + +#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) +/* check if the exception happened in a restartable section */ +1: lis r3,exc_exit_restart_end@ha + addi r3,r3,exc_exit_restart_end@l + cmplw r12,r3 + bge 3f + lis r4,exc_exit_restart@ha + addi r4,r4,exc_exit_restart@l + cmplw r12,r4 + blt 3f + lis r3,fee_restarts@ha + tophys(r3,r3) + lwz r5,fee_restarts@l(r3) + addi r5,r5,1 + stw r5,fee_restarts@l(r3) + mr r12,r4 /* restart at exc_exit_restart */ + b 2b + + .comm fee_restarts,4 + +/* aargh, a nonrecoverable interrupt, panic */ +/* aargh, we don't know which trap this is */ +/* but the 601 doesn't implement the RI bit, so assume it's OK */ +3: +BEGIN_FTR_SECTION + b 2b +END_FTR_SECTION_IFSET(CPU_FTR_601) + li r10,-1 + stw r10,TRAP(r11) + addi r3,r1,STACK_FRAME_OVERHEAD + lis r10,MSR_KERNEL@h + ori r10,r10,MSR_KERNEL@l + bl transfer_to_handler_full + .long nonrecoverable_exception + .long ret_from_except +#endif + .globl sigreturn_exit sigreturn_exit: subi r1,r3,STACK_FRAME_OVERHEAD diff --git a/arch/ppc/kernel/fpu.S b/arch/ppc/kernel/fpu.S new file mode 100644 index 000000000000..6189b26f640f --- /dev/null +++ b/arch/ppc/kernel/fpu.S @@ -0,0 +1,133 @@ +/* + * FPU support code, moved here from head.S so that it can be used + * by chips which use other head-whatever.S files. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include <linux/config.h> +#include <asm/processor.h> +#include <asm/page.h> +#include <asm/mmu.h> +#include <asm/pgtable.h> +#include <asm/cputable.h> +#include <asm/cache.h> +#include <asm/thread_info.h> +#include <asm/ppc_asm.h> +#include <asm/offsets.h> + +/* + * This task wants to use the FPU now. + * On UP, disable FP for the task which had the FPU previously, + * and save its floating-point registers in its thread_struct. + * Load up this task's FP registers from its thread_struct, + * enable the FPU for the current task and return to the task. + */ + .globl load_up_fpu +load_up_fpu: + mfmsr r5 + ori r5,r5,MSR_FP +#ifdef CONFIG_PPC64BRIDGE + clrldi r5,r5,1 /* turn off 64-bit mode */ +#endif /* CONFIG_PPC64BRIDGE */ + SYNC + MTMSRD(r5) /* enable use of fpu now */ + isync +/* + * For SMP, we don't do lazy FPU switching because it just gets too + * horrendously complex, especially when a task switches from one CPU + * to another. Instead we call giveup_fpu in switch_to. + */ +#ifndef CONFIG_SMP + tophys(r6,0) /* get __pa constant */ + addis r3,r6,last_task_used_math@ha + lwz r4,last_task_used_math@l(r3) + cmpwi 0,r4,0 + beq 1f + add r4,r4,r6 + addi r4,r4,THREAD /* want last_task_used_math->thread */ + SAVE_32FPRS(0, r4) + mffs fr0 + stfd fr0,THREAD_FPSCR-4(r4) + lwz r5,PT_REGS(r4) + add r5,r5,r6 + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + li r10,MSR_FP|MSR_FE0|MSR_FE1 + andc r4,r4,r10 /* disable FP for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#endif /* CONFIG_SMP */ + /* enable use of FP after return */ + mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ + lwz r4,THREAD_FPEXC_MODE(r5) + ori r9,r9,MSR_FP /* enable FP for current */ + or r9,r9,r4 + lfd fr0,THREAD_FPSCR-4(r5) + mtfsf 0xff,fr0 + REST_32FPRS(0, r5) +#ifndef CONFIG_SMP + subi r4,r5,THREAD + sub r4,r4,r6 + stw r4,last_task_used_math@l(r3) +#endif /* CONFIG_SMP */ + /* restore registers and return */ + /* we haven't used ctr or xer or lr */ + b fast_exception_return + +/* + * FP unavailable trap from kernel - print a message, but let + * the task use FP in the kernel until it returns to user mode. + */ + .globl KernelFP +KernelFP: + lwz r3,_MSR(r1) + ori r3,r3,MSR_FP + stw r3,_MSR(r1) /* enable use of FP after return */ + lis r3,86f@h + ori r3,r3,86f@l + mr r4,r2 /* current */ + lwz r5,_NIP(r1) + bl printk + b ret_from_except +86: .string "floating point used in kernel (task=%p, pc=%x)\n" + .align 4,0 + +/* + * giveup_fpu(tsk) + * Disable FP for the task given as the argument, + * and save the floating-point registers in its thread_struct. + * Enables the FPU for use in the kernel on return. + */ + .globl giveup_fpu +giveup_fpu: + mfmsr r5 + ori r5,r5,MSR_FP + SYNC_601 + ISYNC_601 + MTMSRD(r5) /* enable use of fpu now */ + SYNC_601 + isync + cmpwi 0,r3,0 + beqlr- /* if no previous owner, done */ + addi r3,r3,THREAD /* want THREAD of task */ + lwz r5,PT_REGS(r3) + cmpwi 0,r5,0 + SAVE_32FPRS(0, r3) + mffs fr0 + stfd fr0,THREAD_FPSCR-4(r3) + beq 1f + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + li r3,MSR_FP|MSR_FE0|MSR_FE1 + andc r4,r4,r3 /* disable FP for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#ifndef CONFIG_SMP + li r5,0 + lis r4,last_task_used_math@ha + stw r5,last_task_used_math@l(r4) +#endif /* CONFIG_SMP */ + blr diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S index 1a89a71e0acc..a931d773715f 100644 --- a/arch/ppc/kernel/head.S +++ b/arch/ppc/kernel/head.S @@ -775,133 +775,6 @@ InstructionSegment: EXC_XFER_STD(0x480, UnknownException) #endif /* CONFIG_PPC64BRIDGE */ -/* - * This task wants to use the FPU now. - * On UP, disable FP for the task which had the FPU previously, - * and save its floating-point registers in its thread_struct. - * Load up this task's FP registers from its thread_struct, - * enable the FPU for the current task and return to the task. - */ -load_up_fpu: - mfmsr r5 - ori r5,r5,MSR_FP -#ifdef CONFIG_PPC64BRIDGE - clrldi r5,r5,1 /* turn off 64-bit mode */ -#endif /* CONFIG_PPC64BRIDGE */ - SYNC - MTMSRD(r5) /* enable use of fpu now */ - isync -/* - * For SMP, we don't do lazy FPU switching because it just gets too - * horrendously complex, especially when a task switches from one CPU - * to another. Instead we call giveup_fpu in switch_to. - */ -#ifndef CONFIG_SMP - tophys(r6,0) /* get __pa constant */ - addis r3,r6,last_task_used_math@ha - lwz r4,last_task_used_math@l(r3) - cmpwi 0,r4,0 - beq 1f - add r4,r4,r6 - addi r4,r4,THREAD /* want last_task_used_math->thread */ - SAVE_32FPRS(0, r4) - mffs fr0 - stfd fr0,THREAD_FPSCR-4(r4) - lwz r5,PT_REGS(r4) - add r5,r5,r6 - lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) - li r10,MSR_FP|MSR_FE0|MSR_FE1 - andc r4,r4,r10 /* disable FP for previous task */ - stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) -1: -#endif /* CONFIG_SMP */ - /* enable use of FP after return */ - mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ - lwz r4,THREAD_FPEXC_MODE(r5) - ori r9,r9,MSR_FP /* enable FP for current */ - or r9,r9,r4 - lfd fr0,THREAD_FPSCR-4(r5) - mtfsf 0xff,fr0 - REST_32FPRS(0, r5) -#ifndef CONFIG_SMP - subi r4,r5,THREAD - sub r4,r4,r6 - stw r4,last_task_used_math@l(r3) -#endif /* CONFIG_SMP */ - /* restore registers and return */ - /* we haven't used ctr or xer or lr */ - /* fall through to fast_exception_return */ - - .globl fast_exception_return -fast_exception_return: - andi. r10,r9,MSR_RI /* check for recoverable interrupt */ - beq 1f /* if not, we've got problems */ -2: REST_4GPRS(3, r11) - lwz r10,_CCR(r11) - REST_GPR(1, r11) - mtcr r10 - lwz r10,_LINK(r11) - mtlr r10 - REST_GPR(10, r11) - mtspr SPRN_SRR1,r9 - mtspr SPRN_SRR0,r12 - REST_GPR(9, r11) - REST_GPR(12, r11) - lwz r11,GPR11(r11) - SYNC - RFI - -/* check if the exception happened in a restartable section */ -1: lis r3,exc_exit_restart_end@ha - addi r3,r3,exc_exit_restart_end@l - cmplw r12,r3 - bge 3f - lis r4,exc_exit_restart@ha - addi r4,r4,exc_exit_restart@l - cmplw r12,r4 - blt 3f - lis r3,fee_restarts@ha - tophys(r3,r3) - lwz r5,fee_restarts@l(r3) - addi r5,r5,1 - stw r5,fee_restarts@l(r3) - mr r12,r4 /* restart at exc_exit_restart */ - b 2b - - .comm fee_restarts,4 - -/* aargh, a nonrecoverable interrupt, panic */ -/* aargh, we don't know which trap this is */ -/* but the 601 doesn't implement the RI bit, so assume it's OK */ -3: -BEGIN_FTR_SECTION - b 2b -END_FTR_SECTION_IFSET(CPU_FTR_601) - li r10,-1 - stw r10,TRAP(r11) - addi r3,r1,STACK_FRAME_OVERHEAD - li r10,MSR_KERNEL - bl transfer_to_handler_full - .long nonrecoverable_exception - .long ret_from_except - -/* - * FP unavailable trap from kernel - print a message, but let - * the task use FP in the kernel until it returns to user mode. - */ -KernelFP: - lwz r3,_MSR(r1) - ori r3,r3,MSR_FP - stw r3,_MSR(r1) /* enable use of FP after return */ - lis r3,86f@h - ori r3,r3,86f@l - mr r4,r2 /* current */ - lwz r5,_NIP(r1) - bl printk - b ret_from_except -86: .string "floating point used in kernel (task=%p, pc=%x)\n" - .align 4,0 - #ifdef CONFIG_ALTIVEC /* Note that the AltiVec support is closely modeled after the FP * support. Changes to one are likely to be applicable to the @@ -1016,42 +889,6 @@ giveup_altivec: #endif /* CONFIG_ALTIVEC */ /* - * giveup_fpu(tsk) - * Disable FP for the task given as the argument, - * and save the floating-point registers in its thread_struct. - * Enables the FPU for use in the kernel on return. - */ - .globl giveup_fpu -giveup_fpu: - mfmsr r5 - ori r5,r5,MSR_FP - SYNC_601 - ISYNC_601 - MTMSRD(r5) /* enable use of fpu now */ - SYNC_601 - isync - cmpwi 0,r3,0 - beqlr- /* if no previous owner, done */ - addi r3,r3,THREAD /* want THREAD of task */ - lwz r5,PT_REGS(r3) - cmpwi 0,r5,0 - SAVE_32FPRS(0, r3) - mffs fr0 - stfd fr0,THREAD_FPSCR-4(r3) - beq 1f - lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) - li r3,MSR_FP|MSR_FE0|MSR_FE1 - andc r4,r4,r3 /* disable FP for previous task */ - stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) -1: -#ifndef CONFIG_SMP - li r5,0 - lis r4,last_task_used_math@ha - stw r5,last_task_used_math@l(r4) -#endif /* CONFIG_SMP */ - blr - -/* * This code is jumped to from the startup code to copy * the kernel image to physical address 0. */ diff --git a/arch/ppc/kernel/head_44x.S b/arch/ppc/kernel/head_44x.S index 9ed8165a3d6c..9b6a8e513657 100644 --- a/arch/ppc/kernel/head_44x.S +++ b/arch/ppc/kernel/head_44x.S @@ -426,7 +426,11 @@ interrupt_base: PROGRAM_EXCEPTION /* Floating Point Unavailable Interrupt */ +#ifdef CONFIG_PPC_FPU + FP_UNAVAILABLE_EXCEPTION +#else EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) +#endif /* System Call Interrupt */ START_EXCEPTION(SystemCall) @@ -686,8 +690,10 @@ _GLOBAL(giveup_altivec) * * The 44x core does not have an FPU. */ +#ifndef CONFIG_PPC_FPU _GLOBAL(giveup_fpu) blr +#endif /* * extern void abort(void) diff --git a/arch/ppc/kernel/head_booke.h b/arch/ppc/kernel/head_booke.h index 884dac916bce..f213d12eec08 100644 --- a/arch/ppc/kernel/head_booke.h +++ b/arch/ppc/kernel/head_booke.h @@ -337,4 +337,11 @@ label: addi r3,r1,STACK_FRAME_OVERHEAD; \ EXC_XFER_LITE(0x0900, timer_interrupt) +#define FP_UNAVAILABLE_EXCEPTION \ + START_EXCEPTION(FloatingPointUnavailable) \ + NORMAL_EXCEPTION_PROLOG; \ + bne load_up_fpu; /* if from user, just load it up */ \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + EXC_XFER_EE_LITE(0x800, KernelFP) + #endif /* __HEAD_BOOKE_H__ */ diff --git a/arch/ppc/kernel/head_fsl_booke.S b/arch/ppc/kernel/head_fsl_booke.S index d64bf61d2b1f..f22ddce36135 100644 --- a/arch/ppc/kernel/head_fsl_booke.S +++ b/arch/ppc/kernel/head_fsl_booke.S @@ -504,7 +504,11 @@ interrupt_base: PROGRAM_EXCEPTION /* Floating Point Unavailable Interrupt */ +#ifdef CONFIG_PPC_FPU + FP_UNAVAILABLE_EXCEPTION +#else EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) +#endif /* System Call Interrupt */ START_EXCEPTION(SystemCall) @@ -916,10 +920,12 @@ _GLOBAL(giveup_spe) /* * extern void giveup_fpu(struct task_struct *prev) * - * The e500 core does not have an FPU. + * Not all FSL Book-E cores have an FPU */ +#ifndef CONFIG_PPC_FPU _GLOBAL(giveup_fpu) blr +#endif /* * extern void abort(void) diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S index 73f7c23b0dd4..e4f1615ec13f 100644 --- a/arch/ppc/kernel/misc.S +++ b/arch/ppc/kernel/misc.S @@ -1096,17 +1096,7 @@ _GLOBAL(_get_SP) * and exceptions as if the cpu had performed the load or store. */ -#if defined(CONFIG_4xx) || defined(CONFIG_E500) -_GLOBAL(cvt_fd) - lfs 0,0(r3) - stfd 0,0(r4) - blr - -_GLOBAL(cvt_df) - lfd 0,0(r3) - stfs 0,0(r4) - blr -#else +#ifdef CONFIG_PPC_FPU _GLOBAL(cvt_fd) lfd 0,-4(r5) /* load up fpscr value */ mtfsf 0xff,0 diff --git a/arch/ppc/kernel/ptrace.c b/arch/ppc/kernel/ptrace.c index 426b6f7d9de3..59d59a8dc249 100644 --- a/arch/ppc/kernel/ptrace.c +++ b/arch/ppc/kernel/ptrace.c @@ -26,6 +26,7 @@ #include <linux/ptrace.h> #include <linux/user.h> #include <linux/security.h> +#include <linux/signal.h> #include <asm/uaccess.h> #include <asm/page.h> @@ -356,7 +357,7 @@ int sys_ptrace(long request, long pid, long addr, long data) case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ case PTRACE_CONT: { /* restart after signal. */ ret = -EIO; - if ((unsigned long) data > _NSIG) + if (!valid_signal(data)) break; if (request == PTRACE_SYSCALL) { set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); @@ -389,7 +390,7 @@ int sys_ptrace(long request, long pid, long addr, long data) case PTRACE_SINGLESTEP: { /* set the trap flag. */ ret = -EIO; - if ((unsigned long) data > _NSIG) + if (!valid_signal(data)) break; clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); set_single_step(child); diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c index 361865c4bc84..f8e7e324a173 100644 --- a/arch/ppc/kernel/traps.c +++ b/arch/ppc/kernel/traps.c @@ -176,7 +176,7 @@ static inline int check_io_access(struct pt_regs *regs) #else #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) #endif -#define REASON_FP 0 +#define REASON_FP ESR_FP #define REASON_ILLEGAL ESR_PIL #define REASON_PRIVILEGED ESR_PPR #define REASON_TRAP ESR_PTR |