summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/ppc/Kconfig4
-rw-r--r--arch/ppc/Makefile1
-rw-r--r--arch/ppc/kernel/Makefile1
-rw-r--r--arch/ppc/kernel/align.c8
-rw-r--r--arch/ppc/kernel/entry.S59
-rw-r--r--arch/ppc/kernel/fpu.S133
-rw-r--r--arch/ppc/kernel/head.S163
-rw-r--r--arch/ppc/kernel/head_44x.S6
-rw-r--r--arch/ppc/kernel/head_booke.h7
-rw-r--r--arch/ppc/kernel/head_fsl_booke.S8
-rw-r--r--arch/ppc/kernel/misc.S12
-rw-r--r--arch/ppc/kernel/traps.c2
12 files changed, 228 insertions, 176 deletions
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 74aa1e92a395..c3d941345e3d 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -53,6 +53,7 @@ choice
config 6xx
bool "6xx/7xx/74xx/52xx/82xx/83xx"
+ select PPC_FPU
help
There are four types of PowerPC chips supported. The more common
types (601, 603, 604, 740, 750, 7400), the Motorola embedded
@@ -86,6 +87,9 @@ config E500
endchoice
+config PPC_FPU
+ bool
+
config BOOKE
bool
depends on E500
diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile
index 73cbdda5b597..0432a25b4735 100644
--- a/arch/ppc/Makefile
+++ b/arch/ppc/Makefile
@@ -53,6 +53,7 @@ head-$(CONFIG_FSL_BOOKE) := arch/ppc/kernel/head_fsl_booke.o
head-$(CONFIG_6xx) += arch/ppc/kernel/idle_6xx.o
head-$(CONFIG_POWER4) += arch/ppc/kernel/idle_power4.o
+head-$(CONFIG_PPC_FPU) += arch/ppc/kernel/fpu.o
core-y += arch/ppc/kernel/ arch/ppc/platforms/ \
arch/ppc/mm/ arch/ppc/lib/ arch/ppc/syslib/
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index 86bc878cb3ee..b284451802c9 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -9,6 +9,7 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
extra-$(CONFIG_8xx) := head_8xx.o
extra-$(CONFIG_6xx) += idle_6xx.o
extra-$(CONFIG_POWER4) += idle_power4.o
+extra-$(CONFIG_PPC_FPU) += fpu.o
extra-y += vmlinux.lds
obj-y := entry.o traps.o irq.o idle.o time.o misc.o \
diff --git a/arch/ppc/kernel/align.c b/arch/ppc/kernel/align.c
index 79c929475037..40d356c66c41 100644
--- a/arch/ppc/kernel/align.c
+++ b/arch/ppc/kernel/align.c
@@ -368,16 +368,24 @@ fix_alignment(struct pt_regs *regs)
/* Single-precision FP load and store require conversions... */
case LD+F+S:
+#ifdef CONFIG_PPC_FPU
preempt_disable();
enable_kernel_fp();
cvt_fd(&data.f, &data.d, &current->thread.fpscr);
preempt_enable();
+#else
+ return 0;
+#endif
break;
case ST+F+S:
+#ifdef CONFIG_PPC_FPU
preempt_disable();
enable_kernel_fp();
cvt_df(&data.d, &data.f, &current->thread.fpscr);
preempt_enable();
+#else
+ return 0;
+#endif
break;
}
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index 035217d6c0f1..5f075dbc4ee7 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -563,6 +563,65 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
addi r1,r1,INT_FRAME_SIZE
blr
+ .globl fast_exception_return
+fast_exception_return:
+#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
+ andi. r10,r9,MSR_RI /* check for recoverable interrupt */
+ beq 1f /* if not, we've got problems */
+#endif
+
+2: REST_4GPRS(3, r11)
+ lwz r10,_CCR(r11)
+ REST_GPR(1, r11)
+ mtcr r10
+ lwz r10,_LINK(r11)
+ mtlr r10
+ REST_GPR(10, r11)
+ mtspr SPRN_SRR1,r9
+ mtspr SPRN_SRR0,r12
+ REST_GPR(9, r11)
+ REST_GPR(12, r11)
+ lwz r11,GPR11(r11)
+ SYNC
+ RFI
+
+#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
+/* check if the exception happened in a restartable section */
+1: lis r3,exc_exit_restart_end@ha
+ addi r3,r3,exc_exit_restart_end@l
+ cmplw r12,r3
+ bge 3f
+ lis r4,exc_exit_restart@ha
+ addi r4,r4,exc_exit_restart@l
+ cmplw r12,r4
+ blt 3f
+ lis r3,fee_restarts@ha
+ tophys(r3,r3)
+ lwz r5,fee_restarts@l(r3)
+ addi r5,r5,1
+ stw r5,fee_restarts@l(r3)
+ mr r12,r4 /* restart at exc_exit_restart */
+ b 2b
+
+ .comm fee_restarts,4
+
+/* aargh, a nonrecoverable interrupt, panic */
+/* aargh, we don't know which trap this is */
+/* but the 601 doesn't implement the RI bit, so assume it's OK */
+3:
+BEGIN_FTR_SECTION
+ b 2b
+END_FTR_SECTION_IFSET(CPU_FTR_601)
+ li r10,-1
+ stw r10,TRAP(r11)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ lis r10,MSR_KERNEL@h
+ ori r10,r10,MSR_KERNEL@l
+ bl transfer_to_handler_full
+ .long nonrecoverable_exception
+ .long ret_from_except
+#endif
+
.globl sigreturn_exit
sigreturn_exit:
subi r1,r3,STACK_FRAME_OVERHEAD
diff --git a/arch/ppc/kernel/fpu.S b/arch/ppc/kernel/fpu.S
new file mode 100644
index 000000000000..6189b26f640f
--- /dev/null
+++ b/arch/ppc/kernel/fpu.S
@@ -0,0 +1,133 @@
+/*
+ * FPU support code, moved here from head.S so that it can be used
+ * by chips which use other head-whatever.S files.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/cputable.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/offsets.h>
+
+/*
+ * This task wants to use the FPU now.
+ * On UP, disable FP for the task which had the FPU previously,
+ * and save its floating-point registers in its thread_struct.
+ * Load up this task's FP registers from its thread_struct,
+ * enable the FPU for the current task and return to the task.
+ */
+ .globl load_up_fpu
+load_up_fpu:
+ mfmsr r5
+ ori r5,r5,MSR_FP
+#ifdef CONFIG_PPC64BRIDGE
+ clrldi r5,r5,1 /* turn off 64-bit mode */
+#endif /* CONFIG_PPC64BRIDGE */
+ SYNC
+ MTMSRD(r5) /* enable use of fpu now */
+ isync
+/*
+ * For SMP, we don't do lazy FPU switching because it just gets too
+ * horrendously complex, especially when a task switches from one CPU
+ * to another. Instead we call giveup_fpu in switch_to.
+ */
+#ifndef CONFIG_SMP
+ tophys(r6,0) /* get __pa constant */
+ addis r3,r6,last_task_used_math@ha
+ lwz r4,last_task_used_math@l(r3)
+ cmpwi 0,r4,0
+ beq 1f
+ add r4,r4,r6
+ addi r4,r4,THREAD /* want last_task_used_math->thread */
+ SAVE_32FPRS(0, r4)
+ mffs fr0
+ stfd fr0,THREAD_FPSCR-4(r4)
+ lwz r5,PT_REGS(r4)
+ add r5,r5,r6
+ lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ li r10,MSR_FP|MSR_FE0|MSR_FE1
+ andc r4,r4,r10 /* disable FP for previous task */
+ stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* CONFIG_SMP */
+ /* enable use of FP after return */
+ mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
+ lwz r4,THREAD_FPEXC_MODE(r5)
+ ori r9,r9,MSR_FP /* enable FP for current */
+ or r9,r9,r4
+ lfd fr0,THREAD_FPSCR-4(r5)
+ mtfsf 0xff,fr0
+ REST_32FPRS(0, r5)
+#ifndef CONFIG_SMP
+ subi r4,r5,THREAD
+ sub r4,r4,r6
+ stw r4,last_task_used_math@l(r3)
+#endif /* CONFIG_SMP */
+ /* restore registers and return */
+ /* we haven't used ctr or xer or lr */
+ b fast_exception_return
+
+/*
+ * FP unavailable trap from kernel - print a message, but let
+ * the task use FP in the kernel until it returns to user mode.
+ */
+ .globl KernelFP
+KernelFP:
+ lwz r3,_MSR(r1)
+ ori r3,r3,MSR_FP
+ stw r3,_MSR(r1) /* enable use of FP after return */
+ lis r3,86f@h
+ ori r3,r3,86f@l
+ mr r4,r2 /* current */
+ lwz r5,_NIP(r1)
+ bl printk
+ b ret_from_except
+86: .string "floating point used in kernel (task=%p, pc=%x)\n"
+ .align 4,0
+
+/*
+ * giveup_fpu(tsk)
+ * Disable FP for the task given as the argument,
+ * and save the floating-point registers in its thread_struct.
+ * Enables the FPU for use in the kernel on return.
+ */
+ .globl giveup_fpu
+giveup_fpu:
+ mfmsr r5
+ ori r5,r5,MSR_FP
+ SYNC_601
+ ISYNC_601
+ MTMSRD(r5) /* enable use of fpu now */
+ SYNC_601
+ isync
+ cmpwi 0,r3,0
+ beqlr- /* if no previous owner, done */
+ addi r3,r3,THREAD /* want THREAD of task */
+ lwz r5,PT_REGS(r3)
+ cmpwi 0,r5,0
+ SAVE_32FPRS(0, r3)
+ mffs fr0
+ stfd fr0,THREAD_FPSCR-4(r3)
+ beq 1f
+ lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ li r3,MSR_FP|MSR_FE0|MSR_FE1
+ andc r4,r4,r3 /* disable FP for previous task */
+ stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#ifndef CONFIG_SMP
+ li r5,0
+ lis r4,last_task_used_math@ha
+ stw r5,last_task_used_math@l(r4)
+#endif /* CONFIG_SMP */
+ blr
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S
index 1a89a71e0acc..a931d773715f 100644
--- a/arch/ppc/kernel/head.S
+++ b/arch/ppc/kernel/head.S
@@ -775,133 +775,6 @@ InstructionSegment:
EXC_XFER_STD(0x480, UnknownException)
#endif /* CONFIG_PPC64BRIDGE */
-/*
- * This task wants to use the FPU now.
- * On UP, disable FP for the task which had the FPU previously,
- * and save its floating-point registers in its thread_struct.
- * Load up this task's FP registers from its thread_struct,
- * enable the FPU for the current task and return to the task.
- */
-load_up_fpu:
- mfmsr r5
- ori r5,r5,MSR_FP
-#ifdef CONFIG_PPC64BRIDGE
- clrldi r5,r5,1 /* turn off 64-bit mode */
-#endif /* CONFIG_PPC64BRIDGE */
- SYNC
- MTMSRD(r5) /* enable use of fpu now */
- isync
-/*
- * For SMP, we don't do lazy FPU switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another. Instead we call giveup_fpu in switch_to.
- */
-#ifndef CONFIG_SMP
- tophys(r6,0) /* get __pa constant */
- addis r3,r6,last_task_used_math@ha
- lwz r4,last_task_used_math@l(r3)
- cmpwi 0,r4,0
- beq 1f
- add r4,r4,r6
- addi r4,r4,THREAD /* want last_task_used_math->thread */
- SAVE_32FPRS(0, r4)
- mffs fr0
- stfd fr0,THREAD_FPSCR-4(r4)
- lwz r5,PT_REGS(r4)
- add r5,r5,r6
- lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- li r10,MSR_FP|MSR_FE0|MSR_FE1
- andc r4,r4,r10 /* disable FP for previous task */
- stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
- /* enable use of FP after return */
- mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
- lwz r4,THREAD_FPEXC_MODE(r5)
- ori r9,r9,MSR_FP /* enable FP for current */
- or r9,r9,r4
- lfd fr0,THREAD_FPSCR-4(r5)
- mtfsf 0xff,fr0
- REST_32FPRS(0, r5)
-#ifndef CONFIG_SMP
- subi r4,r5,THREAD
- sub r4,r4,r6
- stw r4,last_task_used_math@l(r3)
-#endif /* CONFIG_SMP */
- /* restore registers and return */
- /* we haven't used ctr or xer or lr */
- /* fall through to fast_exception_return */
-
- .globl fast_exception_return
-fast_exception_return:
- andi. r10,r9,MSR_RI /* check for recoverable interrupt */
- beq 1f /* if not, we've got problems */
-2: REST_4GPRS(3, r11)
- lwz r10,_CCR(r11)
- REST_GPR(1, r11)
- mtcr r10
- lwz r10,_LINK(r11)
- mtlr r10
- REST_GPR(10, r11)
- mtspr SPRN_SRR1,r9
- mtspr SPRN_SRR0,r12
- REST_GPR(9, r11)
- REST_GPR(12, r11)
- lwz r11,GPR11(r11)
- SYNC
- RFI
-
-/* check if the exception happened in a restartable section */
-1: lis r3,exc_exit_restart_end@ha
- addi r3,r3,exc_exit_restart_end@l
- cmplw r12,r3
- bge 3f
- lis r4,exc_exit_restart@ha
- addi r4,r4,exc_exit_restart@l
- cmplw r12,r4
- blt 3f
- lis r3,fee_restarts@ha
- tophys(r3,r3)
- lwz r5,fee_restarts@l(r3)
- addi r5,r5,1
- stw r5,fee_restarts@l(r3)
- mr r12,r4 /* restart at exc_exit_restart */
- b 2b
-
- .comm fee_restarts,4
-
-/* aargh, a nonrecoverable interrupt, panic */
-/* aargh, we don't know which trap this is */
-/* but the 601 doesn't implement the RI bit, so assume it's OK */
-3:
-BEGIN_FTR_SECTION
- b 2b
-END_FTR_SECTION_IFSET(CPU_FTR_601)
- li r10,-1
- stw r10,TRAP(r11)
- addi r3,r1,STACK_FRAME_OVERHEAD
- li r10,MSR_KERNEL
- bl transfer_to_handler_full
- .long nonrecoverable_exception
- .long ret_from_except
-
-/*
- * FP unavailable trap from kernel - print a message, but let
- * the task use FP in the kernel until it returns to user mode.
- */
-KernelFP:
- lwz r3,_MSR(r1)
- ori r3,r3,MSR_FP
- stw r3,_MSR(r1) /* enable use of FP after return */
- lis r3,86f@h
- ori r3,r3,86f@l
- mr r4,r2 /* current */
- lwz r5,_NIP(r1)
- bl printk
- b ret_from_except
-86: .string "floating point used in kernel (task=%p, pc=%x)\n"
- .align 4,0
-
#ifdef CONFIG_ALTIVEC
/* Note that the AltiVec support is closely modeled after the FP
* support. Changes to one are likely to be applicable to the
@@ -1016,42 +889,6 @@ giveup_altivec:
#endif /* CONFIG_ALTIVEC */
/*
- * giveup_fpu(tsk)
- * Disable FP for the task given as the argument,
- * and save the floating-point registers in its thread_struct.
- * Enables the FPU for use in the kernel on return.
- */
- .globl giveup_fpu
-giveup_fpu:
- mfmsr r5
- ori r5,r5,MSR_FP
- SYNC_601
- ISYNC_601
- MTMSRD(r5) /* enable use of fpu now */
- SYNC_601
- isync
- cmpwi 0,r3,0
- beqlr- /* if no previous owner, done */
- addi r3,r3,THREAD /* want THREAD of task */
- lwz r5,PT_REGS(r3)
- cmpwi 0,r5,0
- SAVE_32FPRS(0, r3)
- mffs fr0
- stfd fr0,THREAD_FPSCR-4(r3)
- beq 1f
- lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- li r3,MSR_FP|MSR_FE0|MSR_FE1
- andc r4,r4,r3 /* disable FP for previous task */
- stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#ifndef CONFIG_SMP
- li r5,0
- lis r4,last_task_used_math@ha
- stw r5,last_task_used_math@l(r4)
-#endif /* CONFIG_SMP */
- blr
-
-/*
* This code is jumped to from the startup code to copy
* the kernel image to physical address 0.
*/
diff --git a/arch/ppc/kernel/head_44x.S b/arch/ppc/kernel/head_44x.S
index 9ed8165a3d6c..9b6a8e513657 100644
--- a/arch/ppc/kernel/head_44x.S
+++ b/arch/ppc/kernel/head_44x.S
@@ -426,7 +426,11 @@ interrupt_base:
PROGRAM_EXCEPTION
/* Floating Point Unavailable Interrupt */
+#ifdef CONFIG_PPC_FPU
+ FP_UNAVAILABLE_EXCEPTION
+#else
EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE)
+#endif
/* System Call Interrupt */
START_EXCEPTION(SystemCall)
@@ -686,8 +690,10 @@ _GLOBAL(giveup_altivec)
*
* The 44x core does not have an FPU.
*/
+#ifndef CONFIG_PPC_FPU
_GLOBAL(giveup_fpu)
blr
+#endif
/*
* extern void abort(void)
diff --git a/arch/ppc/kernel/head_booke.h b/arch/ppc/kernel/head_booke.h
index 884dac916bce..f213d12eec08 100644
--- a/arch/ppc/kernel/head_booke.h
+++ b/arch/ppc/kernel/head_booke.h
@@ -337,4 +337,11 @@ label:
addi r3,r1,STACK_FRAME_OVERHEAD; \
EXC_XFER_LITE(0x0900, timer_interrupt)
+#define FP_UNAVAILABLE_EXCEPTION \
+ START_EXCEPTION(FloatingPointUnavailable) \
+ NORMAL_EXCEPTION_PROLOG; \
+ bne load_up_fpu; /* if from user, just load it up */ \
+ addi r3,r1,STACK_FRAME_OVERHEAD; \
+ EXC_XFER_EE_LITE(0x800, KernelFP)
+
#endif /* __HEAD_BOOKE_H__ */
diff --git a/arch/ppc/kernel/head_fsl_booke.S b/arch/ppc/kernel/head_fsl_booke.S
index d64bf61d2b1f..f22ddce36135 100644
--- a/arch/ppc/kernel/head_fsl_booke.S
+++ b/arch/ppc/kernel/head_fsl_booke.S
@@ -504,7 +504,11 @@ interrupt_base:
PROGRAM_EXCEPTION
/* Floating Point Unavailable Interrupt */
+#ifdef CONFIG_PPC_FPU
+ FP_UNAVAILABLE_EXCEPTION
+#else
EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE)
+#endif
/* System Call Interrupt */
START_EXCEPTION(SystemCall)
@@ -916,10 +920,12 @@ _GLOBAL(giveup_spe)
/*
* extern void giveup_fpu(struct task_struct *prev)
*
- * The e500 core does not have an FPU.
+ * Not all FSL Book-E cores have an FPU
*/
+#ifndef CONFIG_PPC_FPU
_GLOBAL(giveup_fpu)
blr
+#endif
/*
* extern void abort(void)
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index 73f7c23b0dd4..e4f1615ec13f 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -1096,17 +1096,7 @@ _GLOBAL(_get_SP)
* and exceptions as if the cpu had performed the load or store.
*/
-#if defined(CONFIG_4xx) || defined(CONFIG_E500)
-_GLOBAL(cvt_fd)
- lfs 0,0(r3)
- stfd 0,0(r4)
- blr
-
-_GLOBAL(cvt_df)
- lfd 0,0(r3)
- stfs 0,0(r4)
- blr
-#else
+#ifdef CONFIG_PPC_FPU
_GLOBAL(cvt_fd)
lfd 0,-4(r5) /* load up fpscr value */
mtfsf 0xff,0
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index 361865c4bc84..f8e7e324a173 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -176,7 +176,7 @@ static inline int check_io_access(struct pt_regs *regs)
#else
#define get_mc_reason(regs) (mfspr(SPRN_MCSR))
#endif
-#define REASON_FP 0
+#define REASON_FP ESR_FP
#define REASON_ILLEGAL ESR_PIL
#define REASON_PRIVILEGED ESR_PPR
#define REASON_TRAP ESR_PTR
OpenPOWER on IntegriCloud