summaryrefslogtreecommitdiffstats
path: root/asm
diff options
context:
space:
mode:
Diffstat (limited to 'asm')
-rw-r--r--asm/head.S135
-rw-r--r--asm/misc.S138
2 files changed, 139 insertions, 134 deletions
diff --git a/asm/head.S b/asm/head.S
index 5e7dc896..0ed1acdd 100644
--- a/asm/head.S
+++ b/asm/head.S
@@ -23,13 +23,6 @@
#define EPAPR_MAGIC 0x65504150
-/* Power management instructions */
-#define PPC_INST_NAP .long 0x4c000364
-#define PPC_INST_SLEEP .long 0x4c0003a4
-#define PPC_INST_RVWINKLE .long 0x4c0003e4
-
-#define PPC_INST_STOP .long 0x4c0002e4
-
#define GET_STACK(stack_reg,pir_reg) \
sldi stack_reg,pir_reg,STACK_SHIFT; \
addis stack_reg,stack_reg,CPU_STACKS_OFFSET@ha; \
@@ -565,133 +558,6 @@ call_relocate:
1: /* Fatal relocate failure */
attn
-.global enable_machine_check
-enable_machine_check:
- mflr %r0
- bcl 20,31,$+4
-0: mflr %r3
- addi %r3,%r3,(1f - 0b)
- mtspr SPR_HSRR0,%r3
- mfmsr %r3
- ori %r3,%r3,MSR_ME
- mtspr SPR_HSRR1,%r3
- hrfid
-1: mtlr %r0
- blr
-
-.global disable_machine_check
-disable_machine_check:
- mflr %r0
- bcl 20,31,$+4
-0: mflr %r3
- addi %r3,%r3,(1f - 0b)
- mtspr SPR_HSRR0,%r3
- mfmsr %r3
- li %r4,MSR_ME
- andc %r3,%r3,%r4
- mtspr SPR_HSRR1,%r3
- hrfid
-1: mtlr %r0
- blr
-
-pm_save_regs:
- SAVE_GPR(2,%r1)
- SAVE_GPR(14,%r1)
- SAVE_GPR(15,%r1)
- SAVE_GPR(16,%r1)
- SAVE_GPR(17,%r1)
- SAVE_GPR(18,%r1)
- SAVE_GPR(19,%r1)
- SAVE_GPR(20,%r1)
- SAVE_GPR(21,%r1)
- SAVE_GPR(22,%r1)
- SAVE_GPR(23,%r1)
- SAVE_GPR(24,%r1)
- SAVE_GPR(25,%r1)
- SAVE_GPR(26,%r1)
- SAVE_GPR(27,%r1)
- SAVE_GPR(28,%r1)
- SAVE_GPR(29,%r1)
- SAVE_GPR(30,%r1)
- SAVE_GPR(31,%r1)
- mfcr %r4
- mfxer %r5
- mfspr %r6,SPR_HSPRG0
- mfspr %r7,SPR_HSPRG1
- stw %r4,STACK_CR(%r1)
- stw %r5,STACK_XER(%r1)
- std %r6,STACK_GPR0(%r1)
- std %r7,STACK_GPR1(%r1)
- blr
-
-.global enter_p8_pm_state
-enter_p8_pm_state:
- /* Before entering map or rvwinkle, we create a stack frame
- * and save our non-volatile registers.
- *
- * We also save these SPRs:
- *
- * - HSPRG0 in GPR0 slot
- * - HSPRG1 in GPR1 slot
- *
- * - xxx TODO: HIDs
- * - TODO: Mask MSR:ME during the process
- *
- * On entry, r3 indicates:
- *
- * 0 = nap
- * 1 = rvwinkle
- */
- mflr %r0
- std %r0,16(%r1)
- stdu %r1,-STACK_FRAMESIZE(%r1)
-
- bl pm_save_regs
-
- /* Save stack pointer in struct cpu_thread */
- std %r1,CPUTHREAD_SAVE_R1(%r13)
-
- /* Winkle or nap ? */
- cmpli %cr0,0,%r3,0
- bne 1f
-
- /* nap sequence */
- ptesync
-0: ld %r0,CPUTHREAD_SAVE_R1(%r13)
- cmpd cr0,%r0,%r0
- bne 0b
- PPC_INST_NAP
- b .
-
- /* rvwinkle sequence */
-1: ptesync
-0: ld %r0,CPUTHREAD_SAVE_R1(%r13)
- cmpd cr0,%r0,%r0
- bne 0b
- PPC_INST_RVWINKLE
- b .
-
-.global enter_p9_pm_lite_state
-enter_p9_pm_lite_state:
- mtspr SPR_PSSCR,%r3
- PPC_INST_STOP
- blr
-
-.global enter_p9_pm_state
-enter_p9_pm_state:
- mflr %r0
- std %r0,16(%r1)
- stdu %r1,-STACK_FRAMESIZE(%r1)
-
- bl pm_save_regs
-
- /* Save stack pointer in struct cpu_thread */
- std %r1,CPUTHREAD_SAVE_R1(%r13)
-
- mtspr SPR_PSSCR,%r3
- PPC_INST_STOP
- b .
-
/* This is a little piece of code that is copied down to
* 0x100 for handling sresets and power management wakeups.
* This matches the 0x200 handler closely.
@@ -725,6 +591,7 @@ reset_patch_start:
reset_patch_end:
/* Wakeup vector in r3 */
+.global reset_wakeup
reset_wakeup:
/* Get PIR */
mfspr %r31,SPR_PIR
diff --git a/asm/misc.S b/asm/misc.S
index 6ef11a35..223fd3d5 100644
--- a/asm/misc.S
+++ b/asm/misc.S
@@ -24,6 +24,35 @@
.section ".text","ax"
.balign 0x10
+.global enable_machine_check
+enable_machine_check:
+ mflr %r0
+ bcl 20,31,$+4
+0: mflr %r3
+ addi %r3,%r3,(1f - 0b)
+ mtspr SPR_HSRR0,%r3
+ mfmsr %r3
+ ori %r3,%r3,MSR_ME
+ mtspr SPR_HSRR1,%r3
+ hrfid
+1: mtlr %r0
+ blr
+
+.global disable_machine_check
+disable_machine_check:
+ mflr %r0
+ bcl 20,31,$+4
+0: mflr %r3
+ addi %r3,%r3,(1f - 0b)
+ mtspr SPR_HSRR0,%r3
+ mfmsr %r3
+ li %r4,MSR_ME
+ andc %r3,%r3,%r4
+ mtspr SPR_HSRR1,%r3
+ hrfid
+1: mtlr %r0
+ blr
+
/* void set_hid0(unsigned long hid0) */
.global set_hid0
set_hid0:
@@ -126,3 +155,112 @@ cleanup_global_tlb:
ptesync
blr
+
+
+/* Power management instructions */
+#define PPC_INST_NAP .long 0x4c000364
+#define PPC_INST_SLEEP .long 0x4c0003a4
+#define PPC_INST_RVWINKLE .long 0x4c0003e4
+
+#define PPC_INST_STOP .long 0x4c0002e4
+
+#define SAVE_GPR(reg,sp) std %r##reg,STACK_GPR##reg(sp)
+#define REST_GPR(reg,sp) ld %r##reg,STACK_GPR##reg(sp)
+
+pm_save_regs:
+ SAVE_GPR(2,%r1)
+ SAVE_GPR(14,%r1)
+ SAVE_GPR(15,%r1)
+ SAVE_GPR(16,%r1)
+ SAVE_GPR(17,%r1)
+ SAVE_GPR(18,%r1)
+ SAVE_GPR(19,%r1)
+ SAVE_GPR(20,%r1)
+ SAVE_GPR(21,%r1)
+ SAVE_GPR(22,%r1)
+ SAVE_GPR(23,%r1)
+ SAVE_GPR(24,%r1)
+ SAVE_GPR(25,%r1)
+ SAVE_GPR(26,%r1)
+ SAVE_GPR(27,%r1)
+ SAVE_GPR(28,%r1)
+ SAVE_GPR(29,%r1)
+ SAVE_GPR(30,%r1)
+ SAVE_GPR(31,%r1)
+ mfcr %r4
+ mfxer %r5
+ mfspr %r6,SPR_HSPRG0
+ mfspr %r7,SPR_HSPRG1
+ stw %r4,STACK_CR(%r1)
+ stw %r5,STACK_XER(%r1)
+ std %r6,STACK_GPR0(%r1)
+ std %r7,STACK_GPR1(%r1)
+ blr
+
+.global enter_p8_pm_state
+enter_p8_pm_state:
+ /* Before entering map or rvwinkle, we create a stack frame
+ * and save our non-volatile registers.
+ *
+ * We also save these SPRs:
+ *
+ * - HSPRG0 in GPR0 slot
+ * - HSPRG1 in GPR1 slot
+ *
+ * - xxx TODO: HIDs
+ * - TODO: Mask MSR:ME during the process
+ *
+ * On entry, r3 indicates:
+ *
+ * 0 = nap
+ * 1 = rvwinkle
+ */
+ mflr %r0
+ std %r0,16(%r1)
+ stdu %r1,-STACK_FRAMESIZE(%r1)
+
+ bl pm_save_regs
+
+ /* Save stack pointer in struct cpu_thread */
+ std %r1,CPUTHREAD_SAVE_R1(%r13)
+
+ /* Winkle or nap ? */
+ cmpli %cr0,0,%r3,0
+ bne 1f
+
+ /* nap sequence */
+ ptesync
+0: ld %r0,CPUTHREAD_SAVE_R1(%r13)
+ cmpd cr0,%r0,%r0
+ bne 0b
+ PPC_INST_NAP
+ b .
+
+ /* rvwinkle sequence */
+1: ptesync
+0: ld %r0,CPUTHREAD_SAVE_R1(%r13)
+ cmpd cr0,%r0,%r0
+ bne 0b
+ PPC_INST_RVWINKLE
+ b .
+
+.global enter_p9_pm_lite_state
+enter_p9_pm_lite_state:
+ mtspr SPR_PSSCR,%r3
+ PPC_INST_STOP
+ blr
+
+.global enter_p9_pm_state
+enter_p9_pm_state:
+ mflr %r0
+ std %r0,16(%r1)
+ stdu %r1,-STACK_FRAMESIZE(%r1)
+
+ bl pm_save_regs
+
+ /* Save stack pointer in struct cpu_thread */
+ std %r1,CPUTHREAD_SAVE_R1(%r13)
+
+ mtspr SPR_PSSCR,%r3
+ PPC_INST_STOP
+ b .
OpenPOWER on IntegriCloud