summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/entry-armv.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/entry-armv.S')
-rw-r--r--arch/arm/kernel/entry-armv.S50
1 files changed, 45 insertions, 5 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 2a8d27e18fa7..ec48d70c6d8b 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -3,6 +3,7 @@
*
* Copyright (C) 1996,1997,1998 Russell King.
* ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
+ * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -18,8 +19,6 @@
#include <asm/memory.h>
#include <asm/glue.h>
#include <asm/vfpmacros.h>
-#include <asm/hardware.h> /* should be moved into entry-macro.S */
-#include <asm/arch/irqs.h> /* should be moved into entry-macro.S */
#include <asm/arch/entry-macro.S>
#include "entry-header.S"
@@ -106,14 +105,24 @@ common_invalid:
/*
* SVC mode handlers
*/
+
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
+#define SPFIX(code...) code
+#else
+#define SPFIX(code...)
+#endif
+
.macro svc_entry
sub sp, sp, #S_FRAME_SIZE
+ SPFIX( tst sp, #4 )
+ SPFIX( bicne sp, sp, #4 )
stmib sp, {r1 - r12}
ldmia r0, {r1 - r3}
add r5, sp, #S_SP @ here for interlock avoidance
mov r4, #-1 @ "" "" "" ""
add r0, sp, #S_FRAME_SIZE @ "" "" "" ""
+ SPFIX( addne r0, r0, #4 )
str r1, [sp] @ save the "real" r0 copied
@ from the exception stack
@@ -304,7 +313,14 @@ __pabt_svc:
/*
* User mode handlers
+ *
+ * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
*/
+
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
+#error "sizeof(struct pt_regs) must be a multiple of 8"
+#endif
+
.macro usr_entry
sub sp, sp, #S_FRAME_SIZE
stmib sp, {r1 - r12}
@@ -317,10 +333,14 @@ __pabt_svc:
@ from the exception stack
#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+#ifndef CONFIG_MMU
+#warning "NPTL on non MMU needs fixing"
+#else
@ make sure our user space atomic helper is aborted
cmp r2, #TASK_SIZE
bichs r3, r3, #PSR_Z_BIT
#endif
+#endif
@
@ We are now ready to fill in the remaining blanks on the stack:
@@ -540,9 +560,13 @@ ENTRY(__switch_to)
add ip, r1, #TI_CPU_SAVE
ldr r3, [r2, #TI_TP_VALUE]
stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
+#ifndef CONFIG_MMU
+ add r2, r2, #TI_CPU_DOMAIN
+#else
ldr r6, [r2, #TI_CPU_DOMAIN]!
+#endif
#if __LINUX_ARM_ARCH__ >= 6
-#ifdef CONFIG_CPU_MPCORE
+#ifdef CONFIG_CPU_32v6K
clrex
#else
strex r5, r4, [ip] @ Clear exclusive monitor
@@ -558,7 +582,9 @@ ENTRY(__switch_to)
mov r4, #0xffff0fff
str r3, [r4, #-15] @ TLS val at 0xffff0ff0
#endif
+#ifdef CONFIG_MMU
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
+#endif
#ifdef CONFIG_VFP
@ Always disable VFP so we can lazily save/restore the old
@ state. This occurs in the context of the previous thread.
@@ -683,7 +709,12 @@ __kuser_memory_barrier: @ 0xffff0fa0
* The C flag is also set if *ptr was changed to allow for assembly
* optimization in the calling code.
*
- * Note: this routine already includes memory barriers as needed.
+ * Notes:
+ *
+ * - This routine already includes memory barriers as needed.
+ *
+ * - A failure might be transient, i.e. it is possible, although unlikely,
+ * that "failure" be returned even if *ptr == oldval.
*
* For example, a user space atomic_add implementation could look like this:
*
@@ -713,8 +744,11 @@ __kuser_cmpxchg: @ 0xffff0fc0
* The kernel itself must perform the operation.
* A special ghost syscall is used for that (see traps.c).
*/
+ stmfd sp!, {r7, lr}
+ mov r7, #0xff00 @ 0xfff0 into r7 for EABI
+ orr r7, r7, #0xf0
swi #0x9ffff0
- mov pc, lr
+ ldmfd sp!, {r7, pc}
#elif __LINUX_ARM_ARCH__ < 6
@@ -731,12 +765,18 @@ __kuser_cmpxchg: @ 0xffff0fc0
* exception happening just after the str instruction which would
* clear the Z flag although the exchange was done.
*/
+#ifdef CONFIG_MMU
teq ip, ip @ set Z flag
ldr ip, [r2] @ load current val
add r3, r2, #1 @ prepare store ptr
teqeq ip, r0 @ compare with oldval if still allowed
streq r1, [r3, #-1]! @ store newval if still allowed
subs r0, r2, r3 @ if r2 == r3 the str occured
+#else
+#warning "NPTL on non MMU needs fixing"
+ mov r0, #-1
+ adds r0, r0, #0
+#endif
mov pc, lr
#else
OpenPOWER on IntegriCloud