summaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-omap2/sleep34xx.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-omap2/sleep34xx.S')
-rw-r--r--arch/arm/mach-omap2/sleep34xx.S282
1 files changed, 133 insertions, 149 deletions
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
index 951a0be66cf7..63f10669571a 100644
--- a/arch/arm/mach-omap2/sleep34xx.S
+++ b/arch/arm/mach-omap2/sleep34xx.S
@@ -64,6 +64,11 @@
#define SDRC_DLLA_STATUS_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS)
#define SDRC_DLLA_CTRL_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL)
+/*
+ * This file needs be built unconditionally as ARM to interoperate correctly
+ * with non-Thumb-2-capable firmware.
+ */
+ .arm
/*
* API functions
@@ -82,6 +87,8 @@ ENTRY(get_restore_pointer)
stmfd sp!, {lr} @ save registers on stack
adr r0, restore
ldmfd sp!, {pc} @ restore regs and return
+ENDPROC(get_restore_pointer)
+ .align
ENTRY(get_restore_pointer_sz)
.word . - get_restore_pointer
@@ -91,6 +98,8 @@ ENTRY(get_omap3630_restore_pointer)
stmfd sp!, {lr} @ save registers on stack
adr r0, restore_3630
ldmfd sp!, {pc} @ restore regs and return
+ENDPROC(get_omap3630_restore_pointer)
+ .align
ENTRY(get_omap3630_restore_pointer_sz)
.word . - get_omap3630_restore_pointer
@@ -100,6 +109,8 @@ ENTRY(get_es3_restore_pointer)
stmfd sp!, {lr} @ save registers on stack
adr r0, restore_es3
ldmfd sp!, {pc} @ restore regs and return
+ENDPROC(get_es3_restore_pointer)
+ .align
ENTRY(get_es3_restore_pointer_sz)
.word . - get_es3_restore_pointer
@@ -113,8 +124,10 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
stmfd sp!, {lr} @ save registers on stack
/* Setup so that we will disable and enable l2 */
mov r1, #0x1
- str r1, l2dis_3630
+ adrl r2, l2dis_3630 @ may be too distant for plain adr
+ str r1, [r2]
ldmfd sp!, {pc} @ restore regs and return
+ENDPROC(enable_omap3630_toggle_l2_on_restore)
.text
/* Function to call rom code to save secure ram context */
@@ -132,20 +145,22 @@ ENTRY(save_secure_ram_context)
mov r1, #0 @ set task id for ROM code in r1
mov r2, #4 @ set some flags in r2, r6
mov r6, #0xff
- mcr p15, 0, r0, c7, c10, 4 @ data write barrier
- mcr p15, 0, r0, c7, c10, 5 @ data memory barrier
- .word 0xE1600071 @ call SMI monitor (smi #1)
+ dsb @ data write barrier
+ dmb @ data memory barrier
+ smc #1 @ call SMI monitor (smi #1)
nop
nop
nop
nop
ldmfd sp!, {r1-r12, pc}
+ .align
sram_phy_addr_mask:
.word SRAM_BASE_P
high_mask:
.word 0xffff
api_params:
.word 0x4, 0x0, 0x0, 0x1, 0x1
+ENDPROC(save_secure_ram_context)
ENTRY(save_secure_ram_context_sz)
.word . - save_secure_ram_context
@@ -175,12 +190,12 @@ ENTRY(omap34xx_cpu_suspend)
stmfd sp!, {r0-r12, lr} @ save registers on stack
/*
- * r0 contains restore pointer in sdram
+ * r0 contains CPU context save/restore pointer in sdram
* r1 contains information about saving context:
* 0 - No context lost
* 1 - Only L1 and logic lost
- * 2 - Only L2 lost
- * 3 - Both L1 and L2 lost
+ * 2 - Only L2 lost (Even L1 is retained we clean it along with L2)
+ * 3 - Both L1 and L2 lost and logic lost
*/
/* Directly jump to WFI is the context save is not required */
@@ -201,89 +216,74 @@ save_context_wfi:
beq clean_caches
l1_logic_lost:
- /* Store sp and spsr to SDRAM */
- mov r4, sp
- mrs r5, spsr
- mov r6, lr
+ mov r4, sp @ Store sp
+ mrs r5, spsr @ Store spsr
+ mov r6, lr @ Store lr
stmia r8!, {r4-r6}
- /* Save all ARM registers */
- /* Coprocessor access control register */
- mrc p15, 0, r6, c1, c0, 2
- stmia r8!, {r6}
- /* TTBR0, TTBR1 and Translation table base control */
- mrc p15, 0, r4, c2, c0, 0
- mrc p15, 0, r5, c2, c0, 1
- mrc p15, 0, r6, c2, c0, 2
- stmia r8!, {r4-r6}
- /*
- * Domain access control register, data fault status register,
- * and instruction fault status register
- */
- mrc p15, 0, r4, c3, c0, 0
- mrc p15, 0, r5, c5, c0, 0
- mrc p15, 0, r6, c5, c0, 1
- stmia r8!, {r4-r6}
- /*
- * Data aux fault status register, instruction aux fault status,
- * data fault address register and instruction fault address register
- */
- mrc p15, 0, r4, c5, c1, 0
- mrc p15, 0, r5, c5, c1, 1
- mrc p15, 0, r6, c6, c0, 0
- mrc p15, 0, r7, c6, c0, 2
- stmia r8!, {r4-r7}
- /*
- * user r/w thread and process ID, user r/o thread and process ID,
- * priv only thread and process ID, cache size selection
- */
- mrc p15, 0, r4, c13, c0, 2
- mrc p15, 0, r5, c13, c0, 3
- mrc p15, 0, r6, c13, c0, 4
- mrc p15, 2, r7, c0, c0, 0
+
+ mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register
+ mrc p15, 0, r5, c2, c0, 0 @ TTBR0
+ mrc p15, 0, r6, c2, c0, 1 @ TTBR1
+ mrc p15, 0, r7, c2, c0, 2 @ TTBCR
stmia r8!, {r4-r7}
- /* Data TLB lockdown, instruction TLB lockdown registers */
- mrc p15, 0, r5, c10, c0, 0
- mrc p15, 0, r6, c10, c0, 1
- stmia r8!, {r5-r6}
- /* Secure or non secure vector base address, FCSE PID, Context PID*/
- mrc p15, 0, r4, c12, c0, 0
- mrc p15, 0, r5, c13, c0, 0
- mrc p15, 0, r6, c13, c0, 1
- stmia r8!, {r4-r6}
- /* Primary remap, normal remap registers */
- mrc p15, 0, r4, c10, c2, 0
- mrc p15, 0, r5, c10, c2, 1
- stmia r8!,{r4-r5}
- /* Store current cpsr*/
- mrs r2, cpsr
- stmia r8!, {r2}
+ mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register
+ mrc p15, 0, r5, c10, c2, 0 @ PRRR
+ mrc p15, 0, r6, c10, c2, 1 @ NMRR
+ stmia r8!,{r4-r6}
- mrc p15, 0, r4, c1, c0, 0
- /* save control register */
+ mrc p15, 0, r4, c13, c0, 1 @ Context ID
+ mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
+ mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
+ mrs r7, cpsr @ Store current cpsr
+ stmia r8!, {r4-r7}
+
+ mrc p15, 0, r4, c1, c0, 0 @ save control register
stmia r8!, {r4}
clean_caches:
/*
- * Clean Data or unified cache to POU
- * How to invalidate only L1 cache???? - #FIX_ME#
- * mcr p15, 0, r11, c7, c11, 1
- */
- cmp r1, #0x1 @ Check whether L2 inval is required
- beq omap3_do_wfi
-
-clean_l2:
- /*
* jump out to kernel flush routine
* - reuse that code is better
* - it executes in a cached space so is faster than refetch per-block
* - should be faster and will change with kernel
* - 'might' have to copy address, load and jump to it
+ * Flush all data from the L1 data cache before disabling
+ * SCTLR.C bit.
*/
ldr r1, kernel_flush
mov lr, pc
bx r1
+ /*
+ * Clear the SCTLR.C bit to prevent further data cache
+ * allocation. Clearing SCTLR.C would make all the data accesses
+ * strongly ordered and would not hit the cache.
+ */
+ mrc p15, 0, r0, c1, c0, 0
+ bic r0, r0, #(1 << 2) @ Disable the C bit
+ mcr p15, 0, r0, c1, c0, 0
+ isb
+
+ /*
+ * Invalidate L1 data cache. Even though only invalidate is
+ * necessary exported flush API is used here. Doing clean
+ * on already clean cache would be almost NOP.
+ */
+ ldr r1, kernel_flush
+ blx r1
+ /*
+ * The kernel doesn't interwork: v7_flush_dcache_all in particluar will
+ * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
+ * This sequence switches back to ARM. Note that .align may insert a
+ * nop: bx pc needs to be word-aligned in order to work.
+ */
+ THUMB( .thumb )
+ THUMB( .align )
+ THUMB( bx pc )
+ THUMB( nop )
+ .arm
+
omap3_do_wfi:
ldr r4, sdrc_power @ read the SDRC_POWER register
ldr r5, [r4] @ read the contents of SDRC_POWER
@@ -291,9 +291,8 @@ omap3_do_wfi:
str r5, [r4] @ write back to SDRC_POWER register
/* Data memory barrier and Data sync barrier */
- mov r1, #0
- mcr p15, 0, r1, c7, c10, 4
- mcr p15, 0, r1, c7, c10, 5
+ dsb
+ dmb
/*
* ===================================
@@ -319,6 +318,12 @@ omap3_do_wfi:
nop
bl wait_sdrc_ok
+ mrc p15, 0, r0, c1, c0, 0
+ tst r0, #(1 << 2) @ Check C bit enabled?
+ orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
+ mcreq p15, 0, r0, c1, c0, 0
+ isb
+
/*
* ===================================
* == Exit point from non-OFF modes ==
@@ -408,9 +413,9 @@ skipl2dis:
mov r2, #4 @ set some flags in r2, r6
mov r6, #0xff
adr r3, l2_inv_api_params @ r3 points to dummy parameters
- mcr p15, 0, r0, c7, c10, 4 @ data write barrier
- mcr p15, 0, r0, c7, c10, 5 @ data memory barrier
- .word 0xE1600071 @ call SMI monitor (smi #1)
+ dsb @ data write barrier
+ dmb @ data memory barrier
+ smc #1 @ call SMI monitor (smi #1)
/* Write to Aux control register to set some bits */
mov r0, #42 @ set service ID for PPA
mov r12, r0 @ copy secure Service ID in r12
@@ -419,9 +424,9 @@ skipl2dis:
mov r6, #0xff
ldr r4, scratchpad_base
ldr r3, [r4, #0xBC] @ r3 points to parameters
- mcr p15, 0, r0, c7, c10, 4 @ data write barrier
- mcr p15, 0, r0, c7, c10, 5 @ data memory barrier
- .word 0xE1600071 @ call SMI monitor (smi #1)
+ dsb @ data write barrier
+ dmb @ data memory barrier
+ smc #1 @ call SMI monitor (smi #1)
#ifdef CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE
/* Restore L2 aux control register */
@@ -434,29 +439,30 @@ skipl2dis:
ldr r4, scratchpad_base
ldr r3, [r4, #0xBC]
adds r3, r3, #8 @ r3 points to parameters
- mcr p15, 0, r0, c7, c10, 4 @ data write barrier
- mcr p15, 0, r0, c7, c10, 5 @ data memory barrier
- .word 0xE1600071 @ call SMI monitor (smi #1)
+ dsb @ data write barrier
+ dmb @ data memory barrier
+ smc #1 @ call SMI monitor (smi #1)
#endif
b logic_l1_restore
+ .align
l2_inv_api_params:
.word 0x1, 0x00
l2_inv_gp:
/* Execute smi to invalidate L2 cache */
mov r12, #0x1 @ set up to invalidate L2
- .word 0xE1600070 @ Call SMI monitor (smieq)
+ smc #0 @ Call SMI monitor (smieq)
/* Write to Aux control register to set some bits */
ldr r4, scratchpad_base
ldr r3, [r4,#0xBC]
ldr r0, [r3,#4]
mov r12, #0x3
- .word 0xE1600070 @ Call SMI monitor (smieq)
+ smc #0 @ Call SMI monitor (smieq)
ldr r4, scratchpad_base
ldr r3, [r4,#0xBC]
ldr r0, [r3,#12]
mov r12, #0x2
- .word 0xE1600070 @ Call SMI monitor (smieq)
+ smc #0 @ Call SMI monitor (smieq)
logic_l1_restore:
ldr r1, l2dis_3630
cmp r1, #0x1 @ Test if L2 re-enable needed on 3630
@@ -475,68 +481,29 @@ skipl2reen:
ldr r4, scratchpad_base
ldr r3, [r4,#0xBC]
adds r3, r3, #16
+
ldmia r3!, {r4-r6}
- mov sp, r4
- msr spsr_cxsf, r5
- mov lr, r6
-
- ldmia r3!, {r4-r9}
- /* Coprocessor access Control Register */
- mcr p15, 0, r4, c1, c0, 2
-
- /* TTBR0 */
- MCR p15, 0, r5, c2, c0, 0
- /* TTBR1 */
- MCR p15, 0, r6, c2, c0, 1
- /* Translation table base control register */
- MCR p15, 0, r7, c2, c0, 2
- /* Domain access Control Register */
- MCR p15, 0, r8, c3, c0, 0
- /* Data fault status Register */
- MCR p15, 0, r9, c5, c0, 0
-
- ldmia r3!,{r4-r8}
- /* Instruction fault status Register */
- MCR p15, 0, r4, c5, c0, 1
- /* Data Auxiliary Fault Status Register */
- MCR p15, 0, r5, c5, c1, 0
- /* Instruction Auxiliary Fault Status Register*/
- MCR p15, 0, r6, c5, c1, 1
- /* Data Fault Address Register */
- MCR p15, 0, r7, c6, c0, 0
- /* Instruction Fault Address Register*/
- MCR p15, 0, r8, c6, c0, 2
- ldmia r3!,{r4-r7}
+ mov sp, r4 @ Restore sp
+ msr spsr_cxsf, r5 @ Restore spsr
+ mov lr, r6 @ Restore lr
+
+ ldmia r3!, {r4-r7}
+ mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register
+ mcr p15, 0, r5, c2, c0, 0 @ TTBR0
+ mcr p15, 0, r6, c2, c0, 1 @ TTBR1
+ mcr p15, 0, r7, c2, c0, 2 @ TTBCR
- /* User r/w thread and process ID */
- MCR p15, 0, r4, c13, c0, 2
- /* User ro thread and process ID */
- MCR p15, 0, r5, c13, c0, 3
- /* Privileged only thread and process ID */
- MCR p15, 0, r6, c13, c0, 4
- /* Cache size selection */
- MCR p15, 2, r7, c0, c0, 0
- ldmia r3!,{r4-r8}
- /* Data TLB lockdown registers */
- MCR p15, 0, r4, c10, c0, 0
- /* Instruction TLB lockdown registers */
- MCR p15, 0, r5, c10, c0, 1
- /* Secure or Nonsecure Vector Base Address */
- MCR p15, 0, r6, c12, c0, 0
- /* FCSE PID */
- MCR p15, 0, r7, c13, c0, 0
- /* Context PID */
- MCR p15, 0, r8, c13, c0, 1
-
- ldmia r3!,{r4-r5}
- /* Primary memory remap register */
- MCR p15, 0, r4, c10, c2, 0
- /* Normal memory remap register */
- MCR p15, 0, r5, c10, c2, 1
-
- /* Restore cpsr */
- ldmia r3!,{r4} @ load CPSR from SDRAM
- msr cpsr, r4 @ store cpsr
+ ldmia r3!,{r4-r6}
+ mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register
+ mcr p15, 0, r5, c10, c2, 0 @ PRRR
+ mcr p15, 0, r6, c10, c2, 1 @ NMRR
+
+
+ ldmia r3!,{r4-r7}
+ mcr p15, 0, r4, c13, c0, 1 @ Context ID
+ mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
+ mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
+ msr cpsr, r7 @ store cpsr
/* Enabling MMU here */
mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
@@ -594,12 +561,17 @@ usettbr0:
ldr r2, cache_pred_disable_mask
and r4, r2
mcr p15, 0, r4, c1, c0, 0
+ dsb
+ isb
+ ldr r0, =restoremmu_on
+ bx r0
/*
* ==============================
* == Exit point from OFF mode ==
* ==============================
*/
+restoremmu_on:
ldmfd sp!, {r0-r12, pc} @ restore regs and return
@@ -609,6 +581,7 @@ usettbr0:
/* This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 */
.text
+ .align 3
ENTRY(es3_sdrc_fix)
ldr r4, sdrc_syscfg @ get config addr
ldr r5, [r4] @ get value
@@ -636,6 +609,7 @@ ENTRY(es3_sdrc_fix)
str r5, [r4] @ kick off refreshes
bx lr
+ .align
sdrc_syscfg:
.word SDRC_SYSCONFIG_P
sdrc_mr_0:
@@ -650,6 +624,7 @@ sdrc_emr2_1:
.word SDRC_EMR2_1_P
sdrc_manual_1:
.word SDRC_MANUAL_1_P
+ENDPROC(es3_sdrc_fix)
ENTRY(es3_sdrc_fix_sz)
.word . - es3_sdrc_fix
@@ -684,6 +659,12 @@ wait_sdrc_ready:
bic r5, r5, #0x40
str r5, [r4]
+/*
+ * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
+ * base instead.
+ * Be careful not to clobber r7 when maintaing this code.
+ */
+
is_dll_in_lock_mode:
/* Is dll in lock mode? */
ldr r4, sdrc_dlla_ctrl
@@ -691,10 +672,11 @@ is_dll_in_lock_mode:
tst r5, #0x4
bxne lr @ Return if locked
/* wait till dll locks */
+ adr r7, kick_counter
wait_dll_lock_timed:
ldr r4, wait_dll_lock_counter
add r4, r4, #1
- str r4, wait_dll_lock_counter
+ str r4, [r7, #wait_dll_lock_counter - kick_counter]
ldr r4, sdrc_dlla_status
/* Wait 20uS for lock */
mov r6, #8
@@ -720,9 +702,10 @@ kick_dll:
dsb
ldr r4, kick_counter
add r4, r4, #1
- str r4, kick_counter
+ str r4, [r7] @ kick_counter
b wait_dll_lock_timed
+ .align
cm_idlest1_core:
.word CM_IDLEST1_CORE_V
cm_idlest_ckgen:
@@ -765,6 +748,7 @@ kick_counter:
.word 0
wait_dll_lock_counter:
.word 0
+ENDPROC(omap34xx_cpu_suspend)
ENTRY(omap34xx_cpu_suspend_sz)
.word . - omap34xx_cpu_suspend
OpenPOWER on IntegriCloud