summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/Kconfig26
-rw-r--r--arch/arm/Kconfig.debug1
-rw-r--r--arch/arm/Makefile11
-rw-r--r--arch/arm/boot/Makefile9
-rw-r--r--arch/arm/boot/compressed/head.S184
-rw-r--r--arch/arm/include/asm/assembler.h84
-rw-r--r--arch/arm/include/asm/elf.h3
-rw-r--r--arch/arm/include/asm/futex.h1
-rw-r--r--arch/arm/include/asm/memory.h6
-rw-r--r--arch/arm/include/asm/mmu_context.h2
-rw-r--r--arch/arm/include/asm/page-nommu.h3
-rw-r--r--arch/arm/include/asm/ptrace.h8
-rw-r--r--arch/arm/include/asm/uaccess.h7
-rw-r--r--arch/arm/include/asm/unified.h126
-rw-r--r--arch/arm/kernel/entry-armv.S169
-rw-r--r--arch/arm/kernel/entry-common.S28
-rw-r--r--arch/arm/kernel/entry-header.S92
-rw-r--r--arch/arm/kernel/head-common.S15
-rw-r--r--arch/arm/kernel/head-nommu.S16
-rw-r--r--arch/arm/kernel/head.S28
-rw-r--r--arch/arm/kernel/module.c53
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/ptrace.c8
-rw-r--r--arch/arm/kernel/setup.c28
-rw-r--r--arch/arm/kernel/unwind.c4
-rw-r--r--arch/arm/lib/ashldi3.S4
-rw-r--r--arch/arm/lib/ashrdi3.S4
-rw-r--r--arch/arm/lib/backtrace.S8
-rw-r--r--arch/arm/lib/clear_user.S15
-rw-r--r--arch/arm/lib/copy_from_user.S19
-rw-r--r--arch/arm/lib/copy_template.S24
-rw-r--r--arch/arm/lib/copy_to_user.S19
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S48
-rw-r--r--arch/arm/lib/div64.S4
-rw-r--r--arch/arm/lib/findbit.S34
-rw-r--r--arch/arm/lib/getuser.S5
-rw-r--r--arch/arm/lib/io-writesw-armv4.S5
-rw-r--r--arch/arm/lib/lshrdi3.S4
-rw-r--r--arch/arm/lib/memcpy.S7
-rw-r--r--arch/arm/lib/memmove.S28
-rw-r--r--arch/arm/lib/putuser.S15
-rw-r--r--arch/arm/lib/sha1.S2
-rw-r--r--arch/arm/lib/strncpy_from_user.S2
-rw-r--r--arch/arm/lib/strnlen_user.S2
-rw-r--r--arch/arm/mach-integrator/include/mach/hardware.h4
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c16
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c2
-rw-r--r--arch/arm/mach-omap2/mcbsp.c41
-rw-r--r--arch/arm/mach-omap2/serial.c10
-rw-r--r--arch/arm/mach-realview/Kconfig2
-rw-r--r--arch/arm/mach-realview/include/mach/hardware.h4
-rw-r--r--arch/arm/mach-realview/platsmp.c18
-rw-r--r--arch/arm/mm/alignment.c20
-rw-r--r--arch/arm/mm/cache-v7.S16
-rw-r--r--arch/arm/mm/dma-mapping.c94
-rw-r--r--arch/arm/mm/fault.c23
-rw-r--r--arch/arm/mm/nommu.c1
-rw-r--r--arch/arm/mm/proc-macros.S2
-rw-r--r--arch/arm/mm/proc-v7.S7
-rw-r--r--arch/arm/plat-omap/gpio.c249
-rw-r--r--arch/arm/plat-omap/include/mach/dma.h88
-rw-r--r--arch/arm/plat-omap/include/mach/mcbsp.h8
-rw-r--r--arch/arm/plat-omap/mcbsp.c2
-rw-r--r--arch/arm/vfp/entry.S2
-rw-r--r--arch/arm/vfp/vfphw.S48
65 files changed, 1397 insertions, 423 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index aef63c8e3d2d..dc70660fe6bd 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -46,10 +46,6 @@ config GENERIC_CLOCKEVENTS_BROADCAST
depends on GENERIC_CLOCKEVENTS
default y if SMP && !LOCAL_TIMERS
-config MMU
- bool
- default y
-
config NO_IOPORT
bool
@@ -188,6 +184,13 @@ source "kernel/Kconfig.freezer"
menu "System Type"
+config MMU
+ bool "MMU-based Paged Memory Management Support"
+ default y
+ help
+ Select if you want MMU-based virtualised addressing space
+ support by paged memory management. If unsure, say 'Y'.
+
choice
prompt "ARM system type"
default ARCH_VERSATILE
@@ -983,6 +986,21 @@ config HZ
default AT91_TIMER_HZ if ARCH_AT91
default 100
+config THUMB2_KERNEL
+ bool "Compile the kernel in Thumb-2 mode"
+ depends on CPU_V7 && EXPERIMENTAL
+ select AEABI
+ select ARM_ASM_UNIFIED
+ help
+ By enabling this option, the kernel will be compiled in
+ Thumb-2 mode. A compiler/assembler that understand the unified
+ ARM-Thumb syntax is needed.
+
+ If unsure, say N.
+
+config ARM_ASM_UNIFIED
+ bool
+
config AEABI
bool "Use the ARM EABI to compile the kernel"
help
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index a89e4734b8f0..1a6f70e52921 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -8,6 +8,7 @@ source "lib/Kconfig.debug"
# n, but then RMK will have to kill you ;).
config FRAME_POINTER
bool
+ depends on !THUMB2_KERNEL
default y if !ARM_UNWIND
help
If you say N here, the resulting kernel will be slightly smaller and
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index c877d6df23d1..e150f232458d 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -93,9 +93,16 @@ ifeq ($(CONFIG_ARM_UNWIND),y)
CFLAGS_ABI +=-funwind-tables
endif
+ifeq ($(CONFIG_THUMB2_KERNEL),y)
+AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=thumb,-Wa$(comma)-mauto-it)
+AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
+CFLAGS_THUMB2 :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
+AFLAGS_THUMB2 :=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb
+endif
+
# Need -Uarm for gcc < 3.x
-KBUILD_CFLAGS +=$(CFLAGS_ABI) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
-KBUILD_AFLAGS +=$(CFLAGS_ABI) $(arch-y) $(tune-y) -msoft-float
+KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_THUMB2) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
+KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_THUMB2) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
CHECKFLAGS += -D__arm__
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index da226abce2d0..4a590f4113e2 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -61,7 +61,7 @@ endif
quiet_cmd_uimage = UIMAGE $@
cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A arm -O linux -T kernel \
- -C none -a $(LOADADDR) -e $(LOADADDR) \
+ -C none -a $(LOADADDR) -e $(STARTADDR) \
-n 'Linux-$(KERNELRELEASE)' -d $< $@
ifeq ($(CONFIG_ZBOOT_ROM),y)
@@ -70,6 +70,13 @@ else
$(obj)/uImage: LOADADDR=$(ZRELADDR)
endif
+ifeq ($(CONFIG_THUMB2_KERNEL),y)
+# Set bit 0 to 1 so that "mov pc, rx" switches to Thumb-2 mode
+$(obj)/uImage: STARTADDR=$(shell echo $(LOADADDR) | sed -e "s/.$$/1/")
+else
+$(obj)/uImage: STARTADDR=$(LOADADDR)
+endif
+
$(obj)/uImage: $(obj)/zImage FORCE
$(call if_changed,uimage)
@echo ' Image $@ is ready'
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 4515728c5345..fa6fbf45cf3b 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -140,7 +140,8 @@ start:
tst r2, #3 @ not user?
bne not_angel
mov r0, #0x17 @ angel_SWIreason_EnterSVC
- swi 0x123456 @ angel_SWI_ARM
+ ARM( swi 0x123456 ) @ angel_SWI_ARM
+ THUMB( svc 0xab ) @ angel_SWI_THUMB
not_angel:
mrs r2, cpsr @ turn off interrupts to
orr r2, r2, #0xc0 @ prevent angel from running
@@ -161,7 +162,9 @@ not_angel:
.text
adr r0, LC0
- ldmia r0, {r1, r2, r3, r4, r5, r6, ip, sp}
+ ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, ip, sp} )
+ THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, ip} )
+ THUMB( ldr sp, [r0, #28] )
subs r0, r0, r1 @ calculate the delta offset
@ if delta is zero, we are
@@ -263,22 +266,25 @@ not_relocated: mov r0, #0
* r6 = processor ID
* r7 = architecture ID
* r8 = atags pointer
- * r9-r14 = corrupted
+ * r9-r12,r14 = corrupted
*/
add r1, r5, r0 @ end of decompressed kernel
adr r2, reloc_start
ldr r3, LC1
add r3, r2, r3
-1: ldmia r2!, {r9 - r14} @ copy relocation code
- stmia r1!, {r9 - r14}
- ldmia r2!, {r9 - r14}
- stmia r1!, {r9 - r14}
+1: ldmia r2!, {r9 - r12, r14} @ copy relocation code
+ stmia r1!, {r9 - r12, r14}
+ ldmia r2!, {r9 - r12, r14}
+ stmia r1!, {r9 - r12, r14}
cmp r2, r3
blo 1b
- add sp, r1, #128 @ relocate the stack
+ mov sp, r1
+ add sp, sp, #128 @ relocate the stack
bl cache_clean_flush
- add pc, r5, r0 @ call relocation code
+ ARM( add pc, r5, r0 ) @ call relocation code
+ THUMB( add r12, r5, r0 )
+ THUMB( mov pc, r12 ) @ call relocation code
/*
* We're not in danger of overwriting ourselves. Do this the simple way.
@@ -291,6 +297,7 @@ wont_overwrite: mov r0, r4
bl decompress_kernel
b call_kernel
+ .align 2
.type LC0, #object
LC0: .word LC0 @ r1
.word __bss_start @ r2
@@ -431,6 +438,7 @@ ENDPROC(__setup_mmu)
__armv4_mmu_cache_on:
mov r12, lr
+#ifdef CONFIG_MMU
bl __setup_mmu
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
@@ -444,10 +452,12 @@ __armv4_mmu_cache_on:
bl __common_mmu_cache_on
mov r0, #0
mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
+#endif
mov pc, r12
__armv7_mmu_cache_on:
mov r12, lr
+#ifdef CONFIG_MMU
mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
tst r11, #0xf @ VMSA
blne __setup_mmu
@@ -455,9 +465,11 @@ __armv7_mmu_cache_on:
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
tst r11, #0xf @ VMSA
mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
+#endif
mrc p15, 0, r0, c1, c0, 0 @ read control reg
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x003c @ write buffer
+#ifdef CONFIG_MMU
#ifdef CONFIG_CPU_ENDIAN_BE8
orr r0, r0, #1 << 25 @ big-endian page tables
#endif
@@ -465,6 +477,7 @@ __armv7_mmu_cache_on:
movne r1, #-1
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
+#endif
mcr p15, 0, r0, c1, c0, 0 @ load control register
mrc p15, 0, r0, c1, c0, 0 @ and read it back
mov r0, #0
@@ -498,6 +511,7 @@ __arm6_mmu_cache_on:
mov pc, r12
__common_mmu_cache_on:
+#ifndef CONFIG_THUMB2_KERNEL
#ifndef DEBUG
orr r0, r0, #0x000d @ Write buffer, mmu
#endif
@@ -509,6 +523,7 @@ __common_mmu_cache_on:
1: mcr p15, 0, r0, c1, c0, 0 @ load control register
mrc p15, 0, r0, c1, c0, 0 @ and read it back to
sub pc, lr, r0, lsr #32 @ properly flush pipeline
+#endif
/*
* All code following this line is relocatable. It is relocated by
@@ -522,7 +537,7 @@ __common_mmu_cache_on:
* r6 = processor ID
* r7 = architecture ID
* r8 = atags pointer
- * r9-r14 = corrupted
+ * r9-r12,r14 = corrupted
*/
.align 5
reloc_start: add r9, r5, r0
@@ -531,13 +546,14 @@ reloc_start: add r9, r5, r0
mov r1, r4
1:
.rept 4
- ldmia r5!, {r0, r2, r3, r10 - r14} @ relocate kernel
- stmia r1!, {r0, r2, r3, r10 - r14}
+ ldmia r5!, {r0, r2, r3, r10 - r12, r14} @ relocate kernel
+ stmia r1!, {r0, r2, r3, r10 - r12, r14}
.endr
cmp r5, r9
blo 1b
- add sp, r1, #128 @ relocate the stack
+ mov sp, r1
+ add sp, sp, #128 @ relocate the stack
debug_reloc_end
call_kernel: bl cache_clean_flush
@@ -571,7 +587,9 @@ call_cache_fn: adr r12, proc_types
ldr r2, [r12, #4] @ get mask
eor r1, r1, r6 @ (real ^ match)
tst r1, r2 @ & mask
- addeq pc, r12, r3 @ call cache function
+ ARM( addeq pc, r12, r3 ) @ call cache function
+ THUMB( addeq r12, r3 )
+ THUMB( moveq pc, r12 ) @ call cache function
add r12, r12, #4*5
b 1b
@@ -589,13 +607,15 @@ call_cache_fn: adr r12, proc_types
* methods. Writeback caches _must_ have the flush method
* defined.
*/
+ .align 2
.type proc_types,#object
proc_types:
.word 0x41560600 @ ARM6/610
.word 0xffffffe0
- b __arm6_mmu_cache_off @ works, but slow
- b __arm6_mmu_cache_off
+ W(b) __arm6_mmu_cache_off @ works, but slow
+ W(b) __arm6_mmu_cache_off
mov pc, lr
+ THUMB( nop )
@ b __arm6_mmu_cache_on @ untested
@ b __arm6_mmu_cache_off
@ b __armv3_mmu_cache_flush
@@ -603,76 +623,84 @@ proc_types:
.word 0x00000000 @ old ARM ID
.word 0x0000f000
mov pc, lr
+ THUMB( nop )
mov pc, lr
+ THUMB( nop )
mov pc, lr
+ THUMB( nop )
.word 0x41007000 @ ARM7/710
.word 0xfff8fe00
- b __arm7_mmu_cache_off
- b __arm7_mmu_cache_off
+ W(b) __arm7_mmu_cache_off
+ W(b) __arm7_mmu_cache_off
mov pc, lr
+ THUMB( nop )
.word 0x41807200 @ ARM720T (writethrough)
.word 0xffffff00
- b __armv4_mmu_cache_on
- b __armv4_mmu_cache_off
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
mov pc, lr
+ THUMB( nop )
.word 0x41007400 @ ARM74x
.word 0xff00ff00
- b __armv3_mpu_cache_on
- b __armv3_mpu_cache_off
- b __armv3_mpu_cache_flush
+ W(b) __armv3_mpu_cache_on
+ W(b) __armv3_mpu_cache_off
+ W(b) __armv3_mpu_cache_flush
.word 0x41009400 @ ARM94x
.word 0xff00ff00
- b __armv4_mpu_cache_on
- b __armv4_mpu_cache_off
- b __armv4_mpu_cache_flush
+ W(b) __armv4_mpu_cache_on
+ W(b) __armv4_mpu_cache_off
+ W(b) __armv4_mpu_cache_flush
.word 0x00007000 @ ARM7 IDs
.word 0x0000f000
mov pc, lr
+ THUMB( nop )
mov pc, lr
+ THUMB( nop )
mov pc, lr
+ THUMB( nop )
@ Everything from here on will be the new ID system.
.word 0x4401a100 @ sa110 / sa1100
.word 0xffffffe0
- b __armv4_mmu_cache_on
- b __armv4_mmu_cache_off
- b __armv4_mmu_cache_flush
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv4_mmu_cache_flush
.word 0x6901b110 @ sa1110
.word 0xfffffff0
- b __armv4_mmu_cache_on
- b __armv4_mmu_cache_off
- b __armv4_mmu_cache_flush
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv4_mmu_cache_flush
.word 0x56056930
.word 0xff0ffff0 @ PXA935
- b __armv4_mmu_cache_on
- b __armv4_mmu_cache_off
- b __armv4_mmu_cache_flush
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv4_mmu_cache_flush
.word 0x56158000 @ PXA168
.word 0xfffff000
- b __armv4_mmu_cache_on
- b __armv4_mmu_cache_off
- b __armv5tej_mmu_cache_flush
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv5tej_mmu_cache_flush
.word 0x56056930
.word 0xff0ffff0 @ PXA935
- b __armv4_mmu_cache_on
- b __armv4_mmu_cache_off
- b __armv4_mmu_cache_flush
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv4_mmu_cache_flush
.word 0x56050000 @ Feroceon
.word 0xff0f0000
- b __armv4_mmu_cache_on
- b __armv4_mmu_cache_off
- b __armv5tej_mmu_cache_flush
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv5tej_mmu_cache_flush
#ifdef CONFIG_CPU_FEROCEON_OLD_ID
/* this conflicts with the standard ARMv5TE entry */
@@ -685,47 +713,50 @@ proc_types:
.word 0x66015261 @ FA526
.word 0xff01fff1
- b __fa526_cache_on
- b __armv4_mmu_cache_off
- b __fa526_cache_flush
+ W(b) __fa526_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __fa526_cache_flush
@ These match on the architecture ID
.word 0x00020000 @ ARMv4T
.word 0x000f0000
- b __armv4_mmu_cache_on
- b __armv4_mmu_cache_off
- b __armv4_mmu_cache_flush
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv4_mmu_cache_flush
.word 0x00050000 @ ARMv5TE
.word 0x000f0000
- b __armv4_mmu_cache_on
- b __armv4_mmu_cache_off
- b __armv4_mmu_cache_flush
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv4_mmu_cache_flush
.word 0x00060000 @ ARMv5TEJ
.word 0x000f0000
- b __armv4_mmu_cache_on
- b __armv4_mmu_cache_off
- b __armv5tej_mmu_cache_flush
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv4_mmu_cache_flush
.word 0x0007b000 @ ARMv6
.word 0x000ff000
- b __armv4_mmu_cache_on
- b __armv4_mmu_cache_off
- b __armv6_mmu_cache_flush
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv6_mmu_cache_flush
.word 0x000f0000 @ new CPU Id
.word 0x000f0000
- b __armv7_mmu_cache_on
- b __armv7_mmu_cache_off
- b __armv7_mmu_cache_flush
+ W(b) __armv7_mmu_cache_on
+ W(b) __armv7_mmu_cache_off
+ W(b) __armv7_mmu_cache_flush
.word 0 @ unrecognised type
.word 0
mov pc, lr
+ THUMB( nop )
mov pc, lr
+ THUMB( nop )
mov pc, lr
+ THUMB( nop )
.size proc_types, . - proc_types
@@ -760,22 +791,30 @@ __armv3_mpu_cache_off:
mov pc, lr
__armv4_mmu_cache_off:
+#ifdef CONFIG_MMU
mrc p15, 0, r0, c1, c0
bic r0, r0, #0x000d
mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
+#endif
mov pc, lr
__armv7_mmu_cache_off:
mrc p15, 0, r0, c1, c0
+#ifdef CONFIG_MMU
bic r0, r0, #0x000d
+#else
+ bic r0, r0, #0x000c
+#endif
mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
mov r12, lr
bl __armv7_mmu_cache_flush
mov r0, #0
+#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
+#endif
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
mcr p15, 0, r0, c7, c10, 4 @ DSB
mcr p15, 0, r0, c7, c5, 4 @ ISB
@@ -852,7 +891,7 @@ __armv7_mmu_cache_flush:
b iflush
hierarchical:
mcr p15, 0, r10, c7, c10, 5 @ DMB
- stmfd sp!, {r0-r5, r7, r9, r11}
+ stmfd sp!, {r0-r7, r9-r11}
mrc p15, 1, r0, c0, c0, 1 @ read clidr
ands r3, r0, #0x7000000 @ extract loc from clidr
mov r3, r3, lsr #23 @ left align loc bit field
@@ -877,8 +916,12 @@ loop1:
loop2:
mov r9, r4 @ create working copy of max way size
loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
+ ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
+ ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
+ THUMB( lsl r6, r9, r5 )
+ THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
+ THUMB( lsl r6, r7, r2 )
+ THUMB( orr r11, r11, r6 ) @ factor index number into r11
mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
subs r9, r9, #1 @ decrement the way
bge loop3
@@ -889,7 +932,7 @@ skip:
cmp r3, r10
bgt loop1
finished:
- ldmfd sp!, {r0-r5, r7, r9, r11}
+ ldmfd sp!, {r0-r7, r9-r11}
mov r10, #0 @ swith back to cache level 0
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
iflush:
@@ -923,9 +966,13 @@ __armv4_mmu_cache_flush:
mov r11, #8
mov r11, r11, lsl r3 @ cache line size in bytes
no_cache_id:
- bic r1, pc, #63 @ align to longest cache line
+ mov r1, pc
+ bic r1, r1, #63 @ align to longest cache line
add r2, r1, r2
-1: ldr r3, [r1], r11 @ s/w flush D cache
+1:
+ ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
+ THUMB( ldr r3, [r1] ) @ s/w flush D cache
+ THUMB( add r1, r1, r11 )
teq r1, r2
bne 1b
@@ -945,6 +992,7 @@ __armv3_mpu_cache_flush:
* memory, which again must be relocatable.
*/
#ifdef DEBUG
+ .align 2
.type phexbuf,#object
phexbuf: .space 12
.size phexbuf, . - phexbuf
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 15f8a092b700..2b60c7d05770 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -127,3 +127,87 @@
#endif
#endif
.endm
+
+#ifdef CONFIG_THUMB2_KERNEL
+ .macro setmode, mode, reg
+ mov \reg, #\mode
+ msr cpsr_c, \reg
+ .endm
+#else
+ .macro setmode, mode, reg
+ msr cpsr_c, #\mode
+ .endm
+#endif
+
+/*
+ * STRT/LDRT access macros with ARM and Thumb-2 variants
+ */
+#ifdef CONFIG_THUMB2_KERNEL
+
+ .macro usraccoff, instr, reg, ptr, inc, off, cond, abort
+9999:
+ .if \inc == 1
+ \instr\cond\()bt \reg, [\ptr, #\off]
+ .elseif \inc == 4
+ \instr\cond\()t \reg, [\ptr, #\off]
+ .else
+ .error "Unsupported inc macro argument"
+ .endif
+
+ .section __ex_table,"a"
+ .align 3
+ .long 9999b, \abort
+ .previous
+ .endm
+
+ .macro usracc, instr, reg, ptr, inc, cond, rept, abort
+ @ explicit IT instruction needed because of the label
+ @ introduced by the USER macro
+ .ifnc \cond,al
+ .if \rept == 1
+ itt \cond
+ .elseif \rept == 2
+ ittt \cond
+ .else
+ .error "Unsupported rept macro argument"
+ .endif
+ .endif
+
+ @ Slightly optimised to avoid incrementing the pointer twice
+ usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
+ .if \rept == 2
+ usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort
+ .endif
+
+ add\cond \ptr, #\rept * \inc
+ .endm
+
+#else /* !CONFIG_THUMB2_KERNEL */
+
+ .macro usracc, instr, reg, ptr, inc, cond, rept, abort
+ .rept \rept
+9999:
+ .if \inc == 1
+ \instr\cond\()bt \reg, [\ptr], #\inc
+ .elseif \inc == 4
+ \instr\cond\()t \reg, [\ptr], #\inc
+ .else
+ .error "Unsupported inc macro argument"
+ .endif
+
+ .section __ex_table,"a"
+ .align 3
+ .long 9999b, \abort
+ .previous
+ .endr
+ .endm
+
+#endif /* CONFIG_THUMB2_KERNEL */
+
+ .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
+ usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
+ .endm
+
+ .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
+ usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
+ .endm
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index c207504de84d..c3b911ee9151 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -55,6 +55,9 @@ typedef struct user_fp elf_fpregset_t;
#define R_ARM_MOVW_ABS_NC 43
#define R_ARM_MOVT_ABS 44
+#define R_ARM_THM_CALL 10
+#define R_ARM_THM_JUMP24 30
+
/*
* These are used to set parameters in the core dumps.
*/
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 9ee743b95de8..bfcc15929a7f 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -99,6 +99,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: ldrt %0, [%3]\n"
" teq %0, %1\n"
+ " it eq @ explicit IT needed for the 2b label\n"
"2: streqt %2, [%3]\n"
"3:\n"
" .section __ex_table,\"a\"\n"
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 85763db87449..376be1a62866 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -44,7 +44,13 @@
* The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 32MB of the kernel text.
*/
+#ifndef CONFIG_THUMB2_KERNEL
#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024)
+#else
+/* smaller range for Thumb-2 symbols relocation (2^24)*/
+#define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024)
+#endif
+
#if TASK_SIZE > MODULES_VADDR
#error Top of user space clashes with start of module space
#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 263fed05ea33..bcdb9291ef0c 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -62,8 +62,10 @@ static inline void check_context(struct mm_struct *mm)
static inline void check_context(struct mm_struct *mm)
{
+#ifdef CONFIG_MMU
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
__check_kvm_seq(mm);
+#endif
}
#define init_new_context(tsk,mm) 0
diff --git a/arch/arm/include/asm/page-nommu.h b/arch/arm/include/asm/page-nommu.h
index 3574c0deb37f..d1b162a18dcb 100644
--- a/arch/arm/include/asm/page-nommu.h
+++ b/arch/arm/include/asm/page-nommu.h
@@ -43,7 +43,4 @@ typedef unsigned long pgprot_t;
#define __pmd(x) (x)
#define __pgprot(x) (x)
-extern unsigned long memory_start;
-extern unsigned long memory_end;
-
#endif
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 67b833c9b6b9..bbecccda76d0 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -82,6 +82,14 @@
#define PSR_ENDSTATE 0
#endif
+/*
+ * These are 'magic' values for PTRACE_PEEKUSR that return info about where a
+ * process is located in memory.
+ */
+#define PT_TEXT_ADDR 0x10000
+#define PT_DATA_ADDR 0x10004
+#define PT_TEXT_END_ADDR 0x10008
+
#ifndef __ASSEMBLY__
/*
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 0da9bc9b3b1d..1d6bd40a4322 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -17,6 +17,7 @@
#include <asm/memory.h>
#include <asm/domain.h>
#include <asm/system.h>
+#include <asm/unified.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -365,8 +366,10 @@ do { \
#define __put_user_asm_dword(x,__pu_addr,err) \
__asm__ __volatile__( \
- "1: strt " __reg_oper1 ", [%1], #4\n" \
- "2: strt " __reg_oper0 ", [%1]\n" \
+ ARM( "1: strt " __reg_oper1 ", [%1], #4\n" ) \
+ ARM( "2: strt " __reg_oper0 ", [%1]\n" ) \
+ THUMB( "1: strt " __reg_oper1 ", [%1]\n" ) \
+ THUMB( "2: strt " __reg_oper0 ", [%1, #4]\n" ) \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
new file mode 100644
index 000000000000..073e85b9b961
--- /dev/null
+++ b/arch/arm/include/asm/unified.h
@@ -0,0 +1,126 @@
+/*
+ * include/asm-arm/unified.h - Unified Assembler Syntax helper macros
+ *
+ * Copyright (C) 2008 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_UNIFIED_H
+#define __ASM_UNIFIED_H
+
+#if defined(__ASSEMBLY__) && defined(CONFIG_ARM_ASM_UNIFIED)
+ .syntax unified
+#endif
+
+#ifdef CONFIG_THUMB2_KERNEL
+
+#if __GNUC__ < 4
+#error Thumb-2 kernel requires gcc >= 4
+#endif
+
+/* The CPSR bit describing the instruction set (Thumb) */
+#define PSR_ISETSTATE PSR_T_BIT
+
+#define ARM(x...)
+#define THUMB(x...) x
+#define W(instr) instr.w
+#define BSYM(sym) sym + 1
+
+#else /* !CONFIG_THUMB2_KERNEL */
+
+/* The CPSR bit describing the instruction set (ARM) */
+#define PSR_ISETSTATE 0
+
+#define ARM(x...) x
+#define THUMB(x...)
+#define W(instr) instr
+#define BSYM(sym) sym
+
+#endif /* CONFIG_THUMB2_KERNEL */
+
+#ifndef CONFIG_ARM_ASM_UNIFIED
+
+/*
+ * If the unified assembly syntax isn't used (in ARM mode), these
+ * macros expand to an empty string
+ */
+#ifdef __ASSEMBLY__
+ .macro it, cond
+ .endm
+ .macro itt, cond
+ .endm
+ .macro ite, cond
+ .endm
+ .macro ittt, cond
+ .endm
+ .macro itte, cond
+ .endm
+ .macro itet, cond
+ .endm
+ .macro itee, cond
+ .endm
+ .macro itttt, cond
+ .endm
+ .macro ittte, cond
+ .endm
+ .macro ittet, cond
+ .endm
+ .macro ittee, cond
+ .endm
+ .macro itett, cond
+ .endm
+ .macro itete, cond
+ .endm
+ .macro iteet, cond
+ .endm
+ .macro iteee, cond
+ .endm
+#else /* !__ASSEMBLY__ */
+__asm__(
+" .macro it, cond\n"
+" .endm\n"
+" .macro itt, cond\n"
+" .endm\n"
+" .macro ite, cond\n"
+" .endm\n"
+" .macro ittt, cond\n"
+" .endm\n"
+" .macro itte, cond\n"
+" .endm\n"
+" .macro itet, cond\n"
+" .endm\n"
+" .macro itee, cond\n"
+" .endm\n"
+" .macro itttt, cond\n"
+" .endm\n"
+" .macro ittte, cond\n"
+" .endm\n"
+" .macro ittet, cond\n"
+" .endm\n"
+" .macro ittee, cond\n"
+" .endm\n"
+" .macro itett, cond\n"
+" .endm\n"
+" .macro itete, cond\n"
+" .endm\n"
+" .macro iteet, cond\n"
+" .endm\n"
+" .macro iteee, cond\n"
+" .endm\n");
+#endif /* __ASSEMBLY__ */
+
+#endif /* CONFIG_ARM_ASM_UNIFIED */
+
+#endif /* !__ASM_UNIFIED_H */
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index fc8af43c5000..468425f937dd 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -34,7 +34,7 @@
@
@ routine called with r0 = irq number, r1 = struct pt_regs *
@
- adrne lr, 1b
+ adrne lr, BSYM(1b)
bne asm_do_IRQ
#ifdef CONFIG_SMP
@@ -46,13 +46,13 @@
*/
test_for_ipi r0, r6, r5, lr
movne r0, sp
- adrne lr, 1b
+ adrne lr, BSYM(1b)
bne do_IPI
#ifdef CONFIG_LOCAL_TIMERS
test_for_ltirq r0, r6, r5, lr
movne r0, sp
- adrne lr, 1b
+ adrne lr, BSYM(1b)
bne do_local_timer
#endif
#endif
@@ -70,7 +70,10 @@
*/
.macro inv_entry, reason
sub sp, sp, #S_FRAME_SIZE
- stmib sp, {r1 - lr}
+ ARM( stmib sp, {r1 - lr} )
+ THUMB( stmia sp, {r0 - r12} )
+ THUMB( str sp, [sp, #S_SP] )
+ THUMB( str lr, [sp, #S_LR] )
mov r1, #\reason
.endm
@@ -126,17 +129,24 @@ ENDPROC(__und_invalid)
.macro svc_entry, stack_hole=0
UNWIND(.fnstart )
UNWIND(.save {r0 - pc} )
- sub sp, sp, #(S_FRAME_SIZE + \stack_hole)
+ sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+#ifdef CONFIG_THUMB2_KERNEL
+ SPFIX( str r0, [sp] ) @ temporarily saved
+ SPFIX( mov r0, sp )
+ SPFIX( tst r0, #4 ) @ test original stack alignment
+ SPFIX( ldr r0, [sp] ) @ restored
+#else
SPFIX( tst sp, #4 )
- SPFIX( bicne sp, sp, #4 )
- stmib sp, {r1 - r12}
+#endif
+ SPFIX( subeq sp, sp, #4 )
+ stmia sp, {r1 - r12}
ldmia r0, {r1 - r3}
- add r5, sp, #S_SP @ here for interlock avoidance
+ add r5, sp, #S_SP - 4 @ here for interlock avoidance
mov r4, #-1 @ "" "" "" ""
- add r0, sp, #(S_FRAME_SIZE + \stack_hole)
- SPFIX( addne r0, r0, #4 )
- str r1, [sp] @ save the "real" r0 copied
+ add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+ SPFIX( addeq r0, r0, #4 )
+ str r1, [sp, #-4]! @ save the "real" r0 copied
@ from the exception stack
mov r1, lr
@@ -196,9 +206,8 @@ __dabt_svc:
@
@ restore SPSR and restart the instruction
@
- ldr r0, [sp, #S_PSR]
- msr spsr_cxsf, r0
- ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+ ldr r2, [sp, #S_PSR]
+ svc_exit r2 @ return from exception
UNWIND(.fnend )
ENDPROC(__dabt_svc)
@@ -225,13 +234,12 @@ __irq_svc:
tst r0, #_TIF_NEED_RESCHED
blne svc_preempt
#endif
- ldr r0, [sp, #S_PSR] @ irqs are already disabled
- msr spsr_cxsf, r0
+ ldr r4, [sp, #S_PSR] @ irqs are already disabled
#ifdef CONFIG_TRACE_IRQFLAGS
- tst r0, #PSR_I_BIT
+ tst r4, #PSR_I_BIT
bleq trace_hardirqs_on
#endif
- ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+ svc_exit r4 @ return from exception
UNWIND(.fnend )
ENDPROC(__irq_svc)
@@ -266,7 +274,7 @@ __und_svc:
@ r0 - instruction
@
ldr r0, [r2, #-4]
- adr r9, 1f
+ adr r9, BSYM(1f)
bl call_fpe
mov r0, sp @ struct pt_regs *regs
@@ -280,9 +288,8 @@ __und_svc:
@
@ restore SPSR and restart the instruction
@
- ldr lr, [sp, #S_PSR] @ Get SVC cpsr
- msr spsr_cxsf, lr
- ldmia sp, {r0 - pc}^ @ Restore SVC registers
+ ldr r2, [sp, #S_PSR] @ Get SVC cpsr
+ svc_exit r2 @ return from exception
UNWIND(.fnend )
ENDPROC(__und_svc)
@@ -323,9 +330,8 @@ __pabt_svc:
@
@ restore SPSR and restart the instruction
@
- ldr r0, [sp, #S_PSR]
- msr spsr_cxsf, r0
- ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+ ldr r2, [sp, #S_PSR]
+ svc_exit r2 @ return from exception
UNWIND(.fnend )
ENDPROC(__pabt_svc)
@@ -353,7 +359,8 @@ ENDPROC(__pabt_svc)
UNWIND(.fnstart )
UNWIND(.cantunwind ) @ don't unwind the user space
sub sp, sp, #S_FRAME_SIZE
- stmib sp, {r1 - r12}
+ ARM( stmib sp, {r1 - r12} )
+ THUMB( stmia sp, {r0 - r12} )
ldmia r0, {r1 - r3}
add r0, sp, #S_PC @ here for interlock avoidance
@@ -372,7 +379,8 @@ ENDPROC(__pabt_svc)
@ Also, separately save sp_usr and lr_usr
@
stmia r0, {r2 - r4}
- stmdb r0, {sp, lr}^
+ ARM( stmdb r0, {sp, lr}^ )
+ THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
@
@ Enable the alignment trap while in kernel mode
@@ -427,7 +435,7 @@ __dabt_usr:
@
enable_irq
mov r2, sp
- adr lr, ret_from_exception
+ adr lr, BSYM(ret_from_exception)
b do_DataAbort
UNWIND(.fnend )
ENDPROC(__dabt_usr)
@@ -452,7 +460,9 @@ __irq_usr:
ldr r0, [tsk, #TI_PREEMPT]
str r8, [tsk, #TI_PREEMPT]
teq r0, r7
- strne r0, [r0, -r0]
+ ARM( strne r0, [r0, -r0] )
+ THUMB( movne r0, #0 )
+ THUMB( strne r0, [r0] )
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
@@ -476,9 +486,10 @@ __und_usr:
@
@ r0 - instruction
@
- adr r9, ret_from_exception
- adr lr, __und_usr_unknown
+ adr r9, BSYM(ret_from_exception)
+ adr lr, BSYM(__und_usr_unknown)
tst r3, #PSR_T_BIT @ Thumb mode?
+ itet eq @ explicit IT needed for the 1f label
subeq r4, r2, #4 @ ARM instr at LR - 4
subne r4, r2, #2 @ Thumb instr at LR - 2
1: ldreqt r0, [r4]
@@ -488,7 +499,10 @@ __und_usr:
beq call_fpe
@ Thumb instruction
#if __LINUX_ARM_ARCH__ >= 7
-2: ldrht r5, [r4], #2
+2:
+ ARM( ldrht r5, [r4], #2 )
+ THUMB( ldrht r5, [r4] )
+ THUMB( add r4, r4, #2 )
and r0, r5, #0xf800 @ mask bits 111x x... .... ....
cmp r0, #0xe800 @ 32bit instruction if xx != 0
blo __und_usr_unknown
@@ -577,9 +591,11 @@ call_fpe:
moveq pc, lr
get_thread_info r10 @ get current thread
and r8, r0, #0x00000f00 @ mask out CP number
+ THUMB( lsr r8, r8, #8 )
mov r7, #1
add r6, r10, #TI_USED_CP
- strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[]
+ ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
+ THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
#ifdef CONFIG_IWMMXT
@ Test if we need to give access to iWMMXt coprocessors
ldr r5, [r10, #TI_FLAGS]
@@ -587,36 +603,38 @@ call_fpe:
movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
bcs iwmmxt_task_enable
#endif
- add pc, pc, r8, lsr #6
- mov r0, r0
-
- mov pc, lr @ CP#0
- b do_fpe @ CP#1 (FPE)
- b do_fpe @ CP#2 (FPE)
- mov pc, lr @ CP#3
+ ARM( add pc, pc, r8, lsr #6 )
+ THUMB( lsl r8, r8, #2 )
+ THUMB( add pc, r8 )
+ nop
+
+ W(mov) pc, lr @ CP#0
+ W(b) do_fpe @ CP#1 (FPE)
+ W(b) do_fpe @ CP#2 (FPE)
+ W(mov) pc, lr @ CP#3
#ifdef CONFIG_CRUNCH
b crunch_task_enable @ CP#4 (MaverickCrunch)
b crunch_task_enable @ CP#5 (MaverickCrunch)
b crunch_task_enable @ CP#6 (MaverickCrunch)
#else
- mov pc, lr @ CP#4
- mov pc, lr @ CP#5
- mov pc, lr @ CP#6
+ W(mov) pc, lr @ CP#4
+ W(mov) pc, lr @ CP#5
+ W(mov) pc, lr @ CP#6
#endif
- mov pc, lr @ CP#7
- mov pc, lr @ CP#8
- mov pc, lr @ CP#9
+ W(mov) pc, lr @ CP#7
+ W(mov) pc, lr @ CP#8
+ W(mov) pc, lr @ CP#9
#ifdef CONFIG_VFP
- b do_vfp @ CP#10 (VFP)
- b do_vfp @ CP#11 (VFP)
+ W(b) do_vfp @ CP#10 (VFP)
+ W(b) do_vfp @ CP#11 (VFP)
#else
- mov pc, lr @ CP#10 (VFP)
- mov pc, lr @ CP#11 (VFP)
+ W(mov) pc, lr @ CP#10 (VFP)
+ W(mov) pc, lr @ CP#11 (VFP)
#endif
- mov pc, lr @ CP#12
- mov pc, lr @ CP#13
- mov pc, lr @ CP#14 (Debug)
- mov pc, lr @ CP#15 (Control)
+ W(mov) pc, lr @ CP#12
+ W(mov) pc, lr @ CP#13
+ W(mov) pc, lr @ CP#14 (Debug)
+ W(mov) pc, lr @ CP#15 (Control)
#ifdef CONFIG_NEON
.align 6
@@ -667,7 +685,7 @@ no_fp: mov pc, lr
__und_usr_unknown:
enable_irq
mov r0, sp
- adr lr, ret_from_exception
+ adr lr, BSYM(ret_from_exception)
b do_undefinstr
ENDPROC(__und_usr_unknown)
@@ -711,7 +729,10 @@ ENTRY(__switch_to)
UNWIND(.cantunwind )
add ip, r1, #TI_CPU_SAVE
ldr r3, [r2, #TI_TP_VALUE]
- stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
+ ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
+ THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
+ THUMB( str sp, [ip], #4 )
+ THUMB( str lr, [ip], #4 )
#ifdef CONFIG_MMU
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
@@ -736,8 +757,12 @@ ENTRY(__switch_to)
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
bl atomic_notifier_call_chain
+ THUMB( mov ip, r4 )
mov r0, r5
- ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
+ ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
+ THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
+ THUMB( ldr sp, [ip], #4 )
+ THUMB( ldr pc, [ip] )
UNWIND(.fnend )
ENDPROC(__switch_to)
@@ -772,6 +797,7 @@ ENDPROC(__switch_to)
* if your compiled code is not going to use the new instructions for other
* purpose.
*/
+ THUMB( .arm )
.macro usr_ret, reg
#ifdef CONFIG_ARM_THUMB
@@ -1020,6 +1046,7 @@ __kuser_helper_version: @ 0xffff0ffc
.globl __kuser_helper_end
__kuser_helper_end:
+ THUMB( .thumb )
/*
* Vector stubs.
@@ -1054,17 +1081,23 @@ vector_\name:
@ Prepare for SVC32 mode. IRQs remain disabled.
@
mrs r0, cpsr
- eor r0, r0, #(\mode ^ SVC_MODE)
+ eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
msr spsr_cxsf, r0
@
@ the branch table must immediately follow this code
@
and lr, lr, #0x0f
+ THUMB( adr r0, 1f )
+ THUMB( ldr lr, [r0, lr, lsl #2] )
mov r0, sp
- ldr lr, [pc, lr, lsl #2]
+ ARM( ldr lr, [pc, lr, lsl #2] )
movs pc, lr @ branch to handler in SVC mode
ENDPROC(vector_\name)
+
+ .align 2
+ @ handler addresses follow this label
+1:
.endm
.globl __stubs_start
@@ -1202,14 +1235,16 @@ __stubs_end:
.globl __vectors_start
__vectors_start:
- swi SYS_ERROR0
- b vector_und + stubs_offset
- ldr pc, .LCvswi + stubs_offset
- b vector_pabt + stubs_offset
- b vector_dabt + stubs_offset
- b vector_addrexcptn + stubs_offset
- b vector_irq + stubs_offset
- b vector_fiq + stubs_offset
+ ARM( swi SYS_ERROR0 )
+ THUMB( svc #0 )
+ THUMB( nop )
+ W(b) vector_und + stubs_offset
+ W(ldr) pc, .LCvswi + stubs_offset
+ W(b) vector_pabt + stubs_offset
+ W(b) vector_dabt + stubs_offset
+ W(b) vector_addrexcptn + stubs_offset
+ W(b) vector_irq + stubs_offset
+ W(b) vector_fiq + stubs_offset
.globl __vectors_end
__vectors_end:
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 366e5097a41a..a0540c9f1f0c 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -33,14 +33,7 @@ ret_fast_syscall:
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
- @ fast_restore_user_regs
- ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
- ldr lr, [sp, #S_OFF + S_PC]! @ get pc
- msr spsr_cxsf, r1 @ save in spsr_svc
- ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
- mov r0, r0
- add sp, sp, #S_FRAME_SIZE - S_PC
- movs pc, lr @ return & move spsr_svc into cpsr
+ restore_user_regs fast = 1, offset = S_OFF
UNWIND(.fnend )
/*
@@ -73,14 +66,7 @@ no_work_pending:
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
- @ slow_restore_user_regs
- ldr r1, [sp, #S_PSR] @ get calling cpsr
- ldr lr, [sp, #S_PC]! @ get pc
- msr spsr_cxsf, r1 @ save in spsr_svc
- ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
- mov r0, r0
- add sp, sp, #S_FRAME_SIZE - S_PC
- movs pc, lr @ return & move spsr_svc into cpsr
+ restore_user_regs fast = 0, offset = 0
ENDPROC(ret_to_user)
/*
@@ -182,8 +168,10 @@ ftrace_stub:
ENTRY(vector_swi)
sub sp, sp, #S_FRAME_SIZE
stmia sp, {r0 - r12} @ Calling r0 - r12
- add r8, sp, #S_PC
- stmdb r8, {sp, lr}^ @ Calling sp, lr
+ ARM( add r8, sp, #S_PC )
+ ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
+ THUMB( mov r8, sp )
+ THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
mrs r8, spsr @ called from non-FIQ mode, so ok.
str lr, [sp, #S_PC] @ Save calling PC
str r8, [sp, #S_PSR] @ Save CPSR
@@ -272,7 +260,7 @@ ENTRY(vector_swi)
bne __sys_trace
cmp scno, #NR_syscalls @ check upper syscall limit
- adr lr, ret_fast_syscall @ return address
+ adr lr, BSYM(ret_fast_syscall) @ return address
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
add r1, sp, #S_OFF
@@ -293,7 +281,7 @@ __sys_trace:
mov r0, #0 @ trace entry [IP = 0]
bl syscall_trace
- adr lr, __sys_trace_return @ return address
+ adr lr, BSYM(__sys_trace_return) @ return address
mov scno, r0 @ syscall number (possibly new)
add r1, sp, #S_R0 + S_OFF @ pointer to regs
cmp scno, #NR_syscalls @ check upper syscall limit
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 87ab4e157997..a4eaf4f920c5 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -36,11 +36,6 @@
#endif
.endm
- .macro get_thread_info, rd
- mov \rd, sp, lsr #13
- mov \rd, \rd, lsl #13
- .endm
-
.macro alignment_trap, rtemp
#ifdef CONFIG_ALIGNMENT_TRAP
ldr \rtemp, .LCcralign
@@ -49,6 +44,93 @@
#endif
.endm
+ @
+ @ Store/load the USER SP and LR registers by switching to the SYS
+ @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
+ @ available. Should only be called from SVC mode
+ @
+ .macro store_user_sp_lr, rd, rtemp, offset = 0
+ mrs \rtemp, cpsr
+ eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+ msr cpsr_c, \rtemp @ switch to the SYS mode
+
+ str sp, [\rd, #\offset] @ save sp_usr
+ str lr, [\rd, #\offset + 4] @ save lr_usr
+
+ eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+ msr cpsr_c, \rtemp @ switch back to the SVC mode
+ .endm
+
+ .macro load_user_sp_lr, rd, rtemp, offset = 0
+ mrs \rtemp, cpsr
+ eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+ msr cpsr_c, \rtemp @ switch to the SYS mode
+
+ ldr sp, [\rd, #\offset] @ load sp_usr
+ ldr lr, [\rd, #\offset + 4] @ load lr_usr
+
+ eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
+ msr cpsr_c, \rtemp @ switch back to the SVC mode
+ .endm
+
+#ifndef CONFIG_THUMB2_KERNEL
+ .macro svc_exit, rpsr
+ msr spsr_cxsf, \rpsr
+ ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+ .endm
+
+ .macro restore_user_regs, fast = 0, offset = 0
+ ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
+ ldr lr, [sp, #\offset + S_PC]! @ get pc
+ msr spsr_cxsf, r1 @ save in spsr_svc
+ .if \fast
+ ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
+ .else
+ ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
+ .endif
+ add sp, sp, #S_FRAME_SIZE - S_PC
+ movs pc, lr @ return & move spsr_svc into cpsr
+ .endm
+
+ .macro get_thread_info, rd
+ mov \rd, sp, lsr #13
+ mov \rd, \rd, lsl #13
+ .endm
+#else /* CONFIG_THUMB2_KERNEL */
+ .macro svc_exit, rpsr
+ ldr r0, [sp, #S_SP] @ top of the stack
+ ldr r1, [sp, #S_PC] @ return address
+ tst r0, #4 @ orig stack 8-byte aligned?
+ stmdb r0, {r1, \rpsr} @ rfe context
+ ldmia sp, {r0 - r12}
+ ldr lr, [sp, #S_LR]
+ addeq sp, sp, #S_FRAME_SIZE - 8 @ aligned
+ addne sp, sp, #S_FRAME_SIZE - 4 @ not aligned
+ rfeia sp!
+ .endm
+
+ .macro restore_user_regs, fast = 0, offset = 0
+ mov r2, sp
+ load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
+ ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
+ ldr lr, [sp, #\offset + S_PC] @ get pc
+ add sp, sp, #\offset + S_SP
+ msr spsr_cxsf, r1 @ save in spsr_svc
+ .if \fast
+ ldmdb sp, {r1 - r12} @ get calling r1 - r12
+ .else
+ ldmdb sp, {r0 - r12} @ get calling r0 - r12
+ .endif
+ add sp, sp, #S_FRAME_SIZE - S_SP
+ movs pc, lr @ return & move spsr_svc into cpsr
+ .endm
+
+ .macro get_thread_info, rd
+ mov \rd, sp
+ lsr \rd, \rd, #13
+ mov \rd, \rd, lsl #13
+ .endm
+#endif /* !CONFIG_THUMB2_KERNEL */
/*
* These are the registers used in the syscall handler, and allow us to
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 991952c644d1..93ad576b2d74 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -14,6 +14,7 @@
#define ATAG_CORE 0x54410001
#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)
+ .align 2
.type __switch_data, %object
__switch_data:
.long __mmap_switched
@@ -51,7 +52,9 @@ __mmap_switched:
strcc fp, [r6],#4
bcc 1b
- ldmia r3, {r4, r5, r6, r7, sp}
+ ARM( ldmia r3, {r4, r5, r6, r7, sp})
+ THUMB( ldmia r3, {r4, r5, r6, r7} )
+ THUMB( ldr sp, [r3, #16] )
str r9, [r4] @ Save processor ID
str r1, [r5] @ Save machine type
str r2, [r6] @ Save atags pointer
@@ -155,7 +158,8 @@ ENDPROC(__error)
*/
__lookup_processor_type:
adr r3, 3f
- ldmda r3, {r5 - r7}
+ ldmia r3, {r5 - r7}
+ add r3, r3, #8
sub r3, r3, r7 @ get offset between virt&phys
add r5, r5, r3 @ convert virt addresses to
add r6, r6, r3 @ physical address space
@@ -185,9 +189,10 @@ ENDPROC(lookup_processor_type)
* Look in <asm/procinfo.h> and arch/arm/kernel/arch.[ch] for
* more information about the __proc_info and __arch_info structures.
*/
- .long __proc_info_begin
+ .align 2
+3: .long __proc_info_begin
.long __proc_info_end
-3: .long .
+4: .long .
.long __arch_info_begin
.long __arch_info_end
@@ -203,7 +208,7 @@ ENDPROC(lookup_processor_type)
* r5 = mach_info pointer in physical address space
*/
__lookup_machine_type:
- adr r3, 3b
+ adr r3, 4b
ldmia r3, {r4, r5, r6}
sub r3, r3, r4 @ get offset between virt&phys
add r5, r5, r3 @ convert virt addresses to
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index cc87e1765ed2..e5dfc2895e24 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -34,7 +34,7 @@
*/
.section ".text.head", "ax"
ENTRY(stext)
- msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
+ setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
@ and irqs disabled
#ifndef CONFIG_CPU_CP15
ldr r9, =CONFIG_PROCESSOR_ID
@@ -50,8 +50,10 @@ ENTRY(stext)
ldr r13, __switch_data @ address to jump to after
@ the initialization is done
- adr lr, __after_proc_init @ return (PIC) address
- add pc, r10, #PROCINFO_INITFUNC
+ adr lr, BSYM(__after_proc_init) @ return (PIC) address
+ ARM( add pc, r10, #PROCINFO_INITFUNC )
+ THUMB( add r12, r10, #PROCINFO_INITFUNC )
+ THUMB( mov pc, r12 )
ENDPROC(stext)
/*
@@ -59,7 +61,10 @@ ENDPROC(stext)
*/
__after_proc_init:
#ifdef CONFIG_CPU_CP15
- mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ /*
+ * CP15 system control register value returned in r0 from
+ * the CPU init function.
+ */
#ifdef CONFIG_ALIGNMENT_TRAP
orr r0, r0, #CR_A
#else
@@ -82,7 +87,8 @@ __after_proc_init:
mcr p15, 0, r0, c1, c0, 0 @ write control reg
#endif /* CONFIG_CPU_CP15 */
- mov pc, r13 @ clear the BSS and jump
+ mov r3, r13
+ mov pc, r3 @ clear the BSS and jump
@ to start_kernel
ENDPROC(__after_proc_init)
.ltorg
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 21e17dc94cb5..38ccbe1d3b2c 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -76,7 +76,7 @@
*/
.section ".text.head", "ax"
ENTRY(stext)
- msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
+ setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
@ and irqs disabled
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type @ r5=procinfo r9=cpuid
@@ -97,8 +97,10 @@ ENTRY(stext)
*/
ldr r13, __switch_data @ address to jump to after
@ mmu has been enabled
- adr lr, __enable_mmu @ return (PIC) address
- add pc, r10, #PROCINFO_INITFUNC
+ adr lr, BSYM(__enable_mmu) @ return (PIC) address
+ ARM( add pc, r10, #PROCINFO_INITFUNC )
+ THUMB( add r12, r10, #PROCINFO_INITFUNC )
+ THUMB( mov pc, r12 )
ENDPROC(stext)
#if defined(CONFIG_SMP)
@@ -110,7 +112,7 @@ ENTRY(secondary_startup)
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
- msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
+ setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type
movs r10, r5 @ invalid processor?
@@ -121,12 +123,15 @@ ENTRY(secondary_startup)
* Use the page tables supplied from __cpu_up.
*/
adr r4, __secondary_data
- ldmia r4, {r5, r7, r13} @ address to jump to after
+ ldmia r4, {r5, r7, r12} @ address to jump to after
sub r4, r4, r5 @ mmu has been enabled
ldr r4, [r7, r4] @ get secondary_data.pgdir
- adr lr, __enable_mmu @ return address
- add pc, r10, #PROCINFO_INITFUNC @ initialise processor
- @ (return control reg)
+ adr lr, BSYM(__enable_mmu) @ return address
+ mov r13, r12 @ __secondary_switched address
+ ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor
+ @ (return control reg)
+ THUMB( add r12, r10, #PROCINFO_INITFUNC )
+ THUMB( mov pc, r12 )
ENDPROC(secondary_startup)
/*
@@ -193,8 +198,8 @@ __turn_mmu_on:
mcr p15, 0, r0, c1, c0, 0 @ write control reg
mrc p15, 0, r3, c0, c0, 0 @ read id reg
mov r3, r3
- mov r3, r3
- mov pc, r13
+ mov r3, r13
+ mov pc, r3
ENDPROC(__turn_mmu_on)
@@ -235,7 +240,8 @@ __create_page_tables:
* will be removed by paging_init(). We use our current program
* counter to determine corresponding section base address.
*/
- mov r6, pc, lsr #20 @ start of kernel section
+ mov r6, pc
+ mov r6, r6, lsr #20 @ start of kernel section
orr r3, r7, r6, lsl #20 @ flags + kernel base
str r3, [r4, r6, lsl #2] @ identity mapping
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index bac03c81489d..f28c5e9c51ea 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -102,6 +102,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
unsigned long loc;
Elf32_Sym *sym;
s32 offset;
+ u32 upper, lower, sign, j1, j2;
offset = ELF32_R_SYM(rel->r_info);
if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
@@ -184,6 +185,58 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
(offset & 0x0fff);
break;
+ case R_ARM_THM_CALL:
+ case R_ARM_THM_JUMP24:
+ upper = *(u16 *)loc;
+ lower = *(u16 *)(loc + 2);
+
+ /*
+ * 25 bit signed address range (Thumb-2 BL and B.W
+ * instructions):
+ * S:I1:I2:imm10:imm11:0
+ * where:
+ * S = upper[10] = offset[24]
+ * I1 = ~(J1 ^ S) = offset[23]
+ * I2 = ~(J2 ^ S) = offset[22]
+ * imm10 = upper[9:0] = offset[21:12]
+ * imm11 = lower[10:0] = offset[11:1]
+ * J1 = lower[13]
+ * J2 = lower[11]
+ */
+ sign = (upper >> 10) & 1;
+ j1 = (lower >> 13) & 1;
+ j2 = (lower >> 11) & 1;
+ offset = (sign << 24) | ((~(j1 ^ sign) & 1) << 23) |
+ ((~(j2 ^ sign) & 1) << 22) |
+ ((upper & 0x03ff) << 12) |
+ ((lower & 0x07ff) << 1);
+ if (offset & 0x01000000)
+ offset -= 0x02000000;
+ offset += sym->st_value - loc;
+
+ /* only Thumb addresses allowed (no interworking) */
+ if (!(offset & 1) ||
+ offset <= (s32)0xff000000 ||
+ offset >= (s32)0x01000000) {
+ printk(KERN_ERR
+ "%s: relocation out of range, section "
+ "%d reloc %d sym '%s'\n", module->name,
+ relindex, i, strtab + sym->st_name);
+ return -ENOEXEC;
+ }
+
+ sign = (offset >> 24) & 1;
+ j1 = sign ^ (~(offset >> 23) & 1);
+ j2 = sign ^ (~(offset >> 22) & 1);
+ *(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) |
+ ((offset >> 12) & 0x03ff));
+ *(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
+ (j1 << 13) | (j2 << 11) |
+ ((offset >> 1) & 0x07ff));
+ upper = *(u16 *)loc;
+ lower = *(u16 *)(loc + 2);
+ break;
+
default:
printk(KERN_ERR "%s: unknown relocation: %u\n",
module->name, ELF32_R_TYPE(rel->r_info));
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 39196dff478c..790fbee92ec5 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -388,7 +388,7 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
regs.ARM_r2 = (unsigned long)fn;
regs.ARM_r3 = (unsigned long)kernel_thread_exit;
regs.ARM_pc = (unsigned long)kernel_thread_helper;
- regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE;
+ regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE;
return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
}
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 89882a1d0187..a2ea3854cb3c 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -521,7 +521,13 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
return -EIO;
tmp = 0;
- if (off < sizeof(struct pt_regs))
+ if (off == PT_TEXT_ADDR)
+ tmp = tsk->mm->start_code;
+ else if (off == PT_DATA_ADDR)
+ tmp = tsk->mm->start_data;
+ else if (off == PT_TEXT_END_ADDR)
+ tmp = tsk->mm->end_code;
+ else if (off < sizeof(struct pt_regs))
tmp = get_user_reg(tsk, off >> 2);
return put_user(tmp, ret);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index bc5e4128f9f3..d4d4f77c91b2 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -25,6 +25,7 @@
#include <linux/smp.h>
#include <linux/fs.h>
+#include <asm/unified.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/elf.h>
@@ -327,25 +328,38 @@ void cpu_init(void)
}
/*
+ * Define the placement constraint for the inline asm directive below.
+ * In Thumb-2, msr with an immediate value is not allowed.
+ */
+#ifdef CONFIG_THUMB2_KERNEL
+#define PLC "r"
+#else
+#define PLC "I"
+#endif
+
+ /*
* setup stacks for re-entrant exception handlers
*/
__asm__ (
"msr cpsr_c, %1\n\t"
- "add sp, %0, %2\n\t"
+ "add r14, %0, %2\n\t"
+ "mov sp, r14\n\t"
"msr cpsr_c, %3\n\t"
- "add sp, %0, %4\n\t"
+ "add r14, %0, %4\n\t"
+ "mov sp, r14\n\t"
"msr cpsr_c, %5\n\t"
- "add sp, %0, %6\n\t"
+ "add r14, %0, %6\n\t"
+ "mov sp, r14\n\t"
"msr cpsr_c, %7"
:
: "r" (stk),
- "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
+ PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
"I" (offsetof(struct stack, irq[0])),
- "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
+ PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
"I" (offsetof(struct stack, abt[0])),
- "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
+ PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
"I" (offsetof(struct stack, und[0])),
- "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
+ PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
: "r14");
}
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index dd56e11f339a..39baf1128bfa 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -62,7 +62,11 @@ struct unwind_ctrl_block {
};
enum regs {
+#ifdef CONFIG_THUMB2_KERNEL
+ FP = 7,
+#else
FP = 11,
+#endif
SP = 13,
LR = 14,
PC = 15
diff --git a/arch/arm/lib/ashldi3.S b/arch/arm/lib/ashldi3.S
index 1154d924080b..638deb13da1c 100644
--- a/arch/arm/lib/ashldi3.S
+++ b/arch/arm/lib/ashldi3.S
@@ -43,7 +43,9 @@ ENTRY(__aeabi_llsl)
rsb ip, r2, #32
movmi ah, ah, lsl r2
movpl ah, al, lsl r3
- orrmi ah, ah, al, lsr ip
+ ARM( orrmi ah, ah, al, lsr ip )
+ THUMB( lsrmi r3, al, ip )
+ THUMB( orrmi ah, ah, r3 )
mov al, al, lsl r2
mov pc, lr
diff --git a/arch/arm/lib/ashrdi3.S b/arch/arm/lib/ashrdi3.S
index 9f8b35572f8c..015e8aa5a1d1 100644
--- a/arch/arm/lib/ashrdi3.S
+++ b/arch/arm/lib/ashrdi3.S
@@ -43,7 +43,9 @@ ENTRY(__aeabi_lasr)
rsb ip, r2, #32
movmi al, al, lsr r2
movpl al, ah, asr r3
- orrmi al, al, ah, lsl ip
+ ARM( orrmi al, al, ah, lsl ip )
+ THUMB( lslmi r3, ah, ip )
+ THUMB( orrmi al, al, r3 )
mov ah, ah, asr r2
mov pc, lr
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index b0951d0e8b2c..aaf7220d9e30 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -38,7 +38,9 @@ ENDPROC(c_backtrace)
beq no_frame @ we have no stack frames
tst r1, #0x10 @ 26 or 32-bit mode?
- moveq mask, #0xfc000003 @ mask for 26-bit
+ ARM( moveq mask, #0xfc000003 )
+ THUMB( moveq mask, #0xfc000000 )
+ THUMB( orreq mask, #0x03 )
movne mask, #0 @ mask for 32-bit
1: stmfd sp!, {pc} @ calculate offset of PC stored
@@ -126,7 +128,9 @@ ENDPROC(c_backtrace)
mov reg, #10
mov r7, #0
1: mov r3, #1
- tst instr, r3, lsl reg
+ ARM( tst instr, r3, lsl reg )
+ THUMB( lsl r3, reg )
+ THUMB( tst instr, r3 )
beq 2f
add r7, r7, #1
teq r7, #6
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 844f56785ebc..1279abd8b886 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -27,21 +27,20 @@ WEAK(__clear_user)
ands ip, r0, #3
beq 1f
cmp ip, #2
-USER( strbt r2, [r0], #1)
-USER( strlebt r2, [r0], #1)
-USER( strltbt r2, [r0], #1)
+ strusr r2, r0, 1
+ strusr r2, r0, 1, le
+ strusr r2, r0, 1, lt
rsb ip, ip, #4
sub r1, r1, ip @ 7 6 5 4 3 2 1
1: subs r1, r1, #8 @ -1 -2 -3 -4 -5 -6 -7
-USER( strplt r2, [r0], #4)
-USER( strplt r2, [r0], #4)
+ strusr r2, r0, 4, pl, rept=2
bpl 1b
adds r1, r1, #4 @ 3 2 1 0 -1 -2 -3
-USER( strplt r2, [r0], #4)
+ strusr r2, r0, 4, pl
2: tst r1, #2 @ 1x 1x 0x 0x 1x 1x 0x
-USER( strnebt r2, [r0], #1)
-USER( strnebt r2, [r0], #1)
+ strusr r2, r0, 1, ne, rept=2
tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1
+ it ne @ explicit IT needed for the label
USER( strnebt r2, [r0])
mov r0, #0
ldmfd sp!, {r1, pc}
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 56799a165cc4..e4fe124acedc 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -33,11 +33,15 @@
* Number of bytes NOT copied.
*/
+#ifndef CONFIG_THUMB2_KERNEL
+#define LDR1W_SHIFT 0
+#else
+#define LDR1W_SHIFT 1
+#endif
+#define STR1W_SHIFT 0
+
.macro ldr1w ptr reg abort
-100: ldrt \reg, [\ptr], #4
- .section __ex_table, "a"
- .long 100b, \abort
- .previous
+ ldrusr \reg, \ptr, 4, abort=\abort
.endm
.macro ldr4w ptr reg1 reg2 reg3 reg4 abort
@@ -53,14 +57,11 @@
.endm
.macro ldr1b ptr reg cond=al abort
-100: ldr\cond\()bt \reg, [\ptr], #1
- .section __ex_table, "a"
- .long 100b, \abort
- .previous
+ ldrusr \reg, \ptr, 1, \cond, abort=\abort
.endm
.macro str1w ptr reg abort
- str \reg, [\ptr], #4
+ W(str) \reg, [\ptr], #4
.endm
.macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
diff --git a/arch/arm/lib/copy_template.S b/arch/arm/lib/copy_template.S
index 139cce646055..805e3f8fb007 100644
--- a/arch/arm/lib/copy_template.S
+++ b/arch/arm/lib/copy_template.S
@@ -57,6 +57,13 @@
*
* Restore registers with the values previously saved with the
* 'preserv' macro. Called upon code termination.
+ *
+ * LDR1W_SHIFT
+ * STR1W_SHIFT
+ *
+ * Correction to be applied to the "ip" register when branching into
+ * the ldr1w or str1w instructions (some of these macros may expand to
+ * than one 32bit instruction in Thumb-2)
*/
@@ -99,9 +106,15 @@
5: ands ip, r2, #28
rsb ip, ip, #32
+#if LDR1W_SHIFT > 0
+ lsl ip, ip, #LDR1W_SHIFT
+#endif
addne pc, pc, ip @ C is always clear here
b 7f
-6: nop
+6:
+ .rept (1 << LDR1W_SHIFT)
+ W(nop)
+ .endr
ldr1w r1, r3, abort=20f
ldr1w r1, r4, abort=20f
ldr1w r1, r5, abort=20f
@@ -110,9 +123,16 @@
ldr1w r1, r8, abort=20f
ldr1w r1, lr, abort=20f
+#if LDR1W_SHIFT < STR1W_SHIFT
+ lsl ip, ip, #STR1W_SHIFT - LDR1W_SHIFT
+#elif LDR1W_SHIFT > STR1W_SHIFT
+ lsr ip, ip, #LDR1W_SHIFT - STR1W_SHIFT
+#endif
add pc, pc, ip
nop
- nop
+ .rept (1 << STR1W_SHIFT)
+ W(nop)
+ .endr
str1w r0, r3, abort=20f
str1w r0, r4, abort=20f
str1w r0, r5, abort=20f
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 878820f0a320..1a71e1584442 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -33,8 +33,15 @@
* Number of bytes NOT copied.
*/
+#define LDR1W_SHIFT 0
+#ifndef CONFIG_THUMB2_KERNEL
+#define STR1W_SHIFT 0
+#else
+#define STR1W_SHIFT 1
+#endif
+
.macro ldr1w ptr reg abort
- ldr \reg, [\ptr], #4
+ W(ldr) \reg, [\ptr], #4
.endm
.macro ldr4w ptr reg1 reg2 reg3 reg4 abort
@@ -50,10 +57,7 @@
.endm
.macro str1w ptr reg abort
-100: strt \reg, [\ptr], #4
- .section __ex_table, "a"
- .long 100b, \abort
- .previous
+ strusr \reg, \ptr, 4, abort=\abort
.endm
.macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
@@ -68,10 +72,7 @@
.endm
.macro str1b ptr reg cond=al abort
-100: str\cond\()bt \reg, [\ptr], #1
- .section __ex_table, "a"
- .long 100b, \abort
- .previous
+ strusr \reg, \ptr, 1, \cond, abort=\abort
.endm
.macro enter reg1 reg2
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 14677fb4b0c4..fd0e9dcd9fdc 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -26,50 +26,28 @@
.endm
.macro load1b, reg1
-9999: ldrbt \reg1, [r0], $1
- .section __ex_table, "a"
- .align 3
- .long 9999b, 6001f
- .previous
+ ldrusr \reg1, r0, 1
.endm
.macro load2b, reg1, reg2
-9999: ldrbt \reg1, [r0], $1
-9998: ldrbt \reg2, [r0], $1
- .section __ex_table, "a"
- .long 9999b, 6001f
- .long 9998b, 6001f
- .previous
+ ldrusr \reg1, r0, 1
+ ldrusr \reg2, r0, 1
.endm
.macro load1l, reg1
-9999: ldrt \reg1, [r0], $4
- .section __ex_table, "a"
- .align 3
- .long 9999b, 6001f
- .previous
+ ldrusr \reg1, r0, 4
.endm
.macro load2l, reg1, reg2
-9999: ldrt \reg1, [r0], $4
-9998: ldrt \reg2, [r0], $4
- .section __ex_table, "a"
- .long 9999b, 6001f
- .long 9998b, 6001f
- .previous
+ ldrusr \reg1, r0, 4
+ ldrusr \reg2, r0, 4
.endm
.macro load4l, reg1, reg2, reg3, reg4
-9999: ldrt \reg1, [r0], $4
-9998: ldrt \reg2, [r0], $4
-9997: ldrt \reg3, [r0], $4
-9996: ldrt \reg4, [r0], $4
- .section __ex_table, "a"
- .long 9999b, 6001f
- .long 9998b, 6001f
- .long 9997b, 6001f
- .long 9996b, 6001f
- .previous
+ ldrusr \reg1, r0, 4
+ ldrusr \reg2, r0, 4
+ ldrusr \reg3, r0, 4
+ ldrusr \reg4, r0, 4
.endm
/*
@@ -92,14 +70,14 @@
*/
.section .fixup,"ax"
.align 4
-6001: mov r4, #-EFAULT
+9001: mov r4, #-EFAULT
ldr r5, [fp, #4] @ *err_ptr
str r4, [r5]
ldmia sp, {r1, r2} @ retrieve dst, len
add r2, r2, r1
mov r0, #0 @ zero the buffer
-6002: teq r2, r1
+9002: teq r2, r1
strneb r0, [r1], #1
- bne 6002b
+ bne 9002b
load_regs
.previous
diff --git a/arch/arm/lib/div64.S b/arch/arm/lib/div64.S
index 1425e789ba86..faa7748142da 100644
--- a/arch/arm/lib/div64.S
+++ b/arch/arm/lib/div64.S
@@ -177,7 +177,9 @@ ENTRY(__do_div64)
mov yh, xh, lsr ip
mov yl, xl, lsr ip
rsb ip, ip, #32
- orr yl, yl, xh, lsl ip
+ ARM( orr yl, yl, xh, lsl ip )
+ THUMB( lsl xh, xh, ip )
+ THUMB( orr yl, yl, xh )
mov xh, xl, lsl ip
mov xh, xh, lsr ip
mov pc, lr
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index 8c4defc4f3c4..1e4cbd4e7be9 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -25,7 +25,10 @@ ENTRY(_find_first_zero_bit_le)
teq r1, #0
beq 3f
mov r2, #0
-1: ldrb r3, [r0, r2, lsr #3]
+1:
+ ARM( ldrb r3, [r0, r2, lsr #3] )
+ THUMB( lsr r3, r2, #3 )
+ THUMB( ldrb r3, [r0, r3] )
eors r3, r3, #0xff @ invert bits
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
@@ -44,7 +47,9 @@ ENTRY(_find_next_zero_bit_le)
beq 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
- ldrb r3, [r0, r2, lsr #3]
+ ARM( ldrb r3, [r0, r2, lsr #3] )
+ THUMB( lsr r3, r2, #3 )
+ THUMB( ldrb r3, [r0, r3] )
eor r3, r3, #0xff @ now looking for a 1 bit
movs r3, r3, lsr ip @ shift off unused bits
bne .L_found
@@ -61,7 +66,10 @@ ENTRY(_find_first_bit_le)
teq r1, #0
beq 3f
mov r2, #0
-1: ldrb r3, [r0, r2, lsr #3]
+1:
+ ARM( ldrb r3, [r0, r2, lsr #3] )
+ THUMB( lsr r3, r2, #3 )
+ THUMB( ldrb r3, [r0, r3] )
movs r3, r3
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
@@ -80,7 +88,9 @@ ENTRY(_find_next_bit_le)
beq 3b
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
- ldrb r3, [r0, r2, lsr #3]
+ ARM( ldrb r3, [r0, r2, lsr #3] )
+ THUMB( lsr r3, r2, #3 )
+ THUMB( ldrb r3, [r0, r3] )
movs r3, r3, lsr ip @ shift off unused bits
bne .L_found
orr r2, r2, #7 @ if zero, then no bits here
@@ -95,7 +105,9 @@ ENTRY(_find_first_zero_bit_be)
beq 3f
mov r2, #0
1: eor r3, r2, #0x18 @ big endian byte ordering
- ldrb r3, [r0, r3, lsr #3]
+ ARM( ldrb r3, [r0, r3, lsr #3] )
+ THUMB( lsr r3, #3 )
+ THUMB( ldrb r3, [r0, r3] )
eors r3, r3, #0xff @ invert bits
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
@@ -111,7 +123,9 @@ ENTRY(_find_next_zero_bit_be)
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
eor r3, r2, #0x18 @ big endian byte ordering
- ldrb r3, [r0, r3, lsr #3]
+ ARM( ldrb r3, [r0, r3, lsr #3] )
+ THUMB( lsr r3, #3 )
+ THUMB( ldrb r3, [r0, r3] )
eor r3, r3, #0xff @ now looking for a 1 bit
movs r3, r3, lsr ip @ shift off unused bits
bne .L_found
@@ -125,7 +139,9 @@ ENTRY(_find_first_bit_be)
beq 3f
mov r2, #0
1: eor r3, r2, #0x18 @ big endian byte ordering
- ldrb r3, [r0, r3, lsr #3]
+ ARM( ldrb r3, [r0, r3, lsr #3] )
+ THUMB( lsr r3, #3 )
+ THUMB( ldrb r3, [r0, r3] )
movs r3, r3
bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
@@ -141,7 +157,9 @@ ENTRY(_find_next_bit_be)
ands ip, r2, #7
beq 1b @ If new byte, goto old routine
eor r3, r2, #0x18 @ big endian byte ordering
- ldrb r3, [r0, r3, lsr #3]
+ ARM( ldrb r3, [r0, r3, lsr #3] )
+ THUMB( lsr r3, #3 )
+ THUMB( ldrb r3, [r0, r3] )
movs r3, r3, lsr ip @ shift off unused bits
bne .L_found
orr r2, r2, #7 @ if zero, then no bits here
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 6763088b7607..a1814d927122 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -36,8 +36,13 @@ ENTRY(__get_user_1)
ENDPROC(__get_user_1)
ENTRY(__get_user_2)
+#ifdef CONFIG_THUMB2_KERNEL
+2: ldrbt r2, [r0]
+3: ldrbt r3, [r0, #1]
+#else
2: ldrbt r2, [r0], #1
3: ldrbt r3, [r0]
+#endif
#ifndef __ARMEB__
orr r2, r2, r3, lsl #8
#else
diff --git a/arch/arm/lib/io-writesw-armv4.S b/arch/arm/lib/io-writesw-armv4.S
index d6585612c86b..ff4f71b579ee 100644
--- a/arch/arm/lib/io-writesw-armv4.S
+++ b/arch/arm/lib/io-writesw-armv4.S
@@ -75,7 +75,10 @@ ENTRY(__raw_writesw)
#endif
.Loutsw_noalign:
- ldr r3, [r1, -r3]!
+ ARM( ldr r3, [r1, -r3]! )
+ THUMB( rsb r3, r3, #0 )
+ THUMB( ldr r3, [r1, r3] )
+ THUMB( sub r1, r3 )
subcs r2, r2, #1
bcs 2f
subs r2, r2, #2
diff --git a/arch/arm/lib/lshrdi3.S b/arch/arm/lib/lshrdi3.S
index 99ea338bf87c..f83d449141f7 100644
--- a/arch/arm/lib/lshrdi3.S
+++ b/arch/arm/lib/lshrdi3.S
@@ -43,7 +43,9 @@ ENTRY(__aeabi_llsr)
rsb ip, r2, #32
movmi al, al, lsr r2
movpl al, ah, lsr r3
- orrmi al, al, ah, lsl ip
+ ARM( orrmi al, al, ah, lsl ip )
+ THUMB( lslmi r3, ah, ip )
+ THUMB( orrmi al, al, r3 )
mov ah, ah, lsr r2
mov pc, lr
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
index e0d002641d3f..a9b9e2287a09 100644
--- a/arch/arm/lib/memcpy.S
+++ b/arch/arm/lib/memcpy.S
@@ -13,8 +13,11 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#define LDR1W_SHIFT 0
+#define STR1W_SHIFT 0
+
.macro ldr1w ptr reg abort
- ldr \reg, [\ptr], #4
+ W(ldr) \reg, [\ptr], #4
.endm
.macro ldr4w ptr reg1 reg2 reg3 reg4 abort
@@ -30,7 +33,7 @@
.endm
.macro str1w ptr reg abort
- str \reg, [\ptr], #4
+ W(str) \reg, [\ptr], #4
.endm
.macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S
index 12549187088c..5025c863713d 100644
--- a/arch/arm/lib/memmove.S
+++ b/arch/arm/lib/memmove.S
@@ -75,24 +75,24 @@ ENTRY(memmove)
addne pc, pc, ip @ C is always clear here
b 7f
6: nop
- ldr r3, [r1, #-4]!
- ldr r4, [r1, #-4]!
- ldr r5, [r1, #-4]!
- ldr r6, [r1, #-4]!
- ldr r7, [r1, #-4]!
- ldr r8, [r1, #-4]!
- ldr lr, [r1, #-4]!
+ W(ldr) r3, [r1, #-4]!
+ W(ldr) r4, [r1, #-4]!
+ W(ldr) r5, [r1, #-4]!
+ W(ldr) r6, [r1, #-4]!
+ W(ldr) r7, [r1, #-4]!
+ W(ldr) r8, [r1, #-4]!
+ W(ldr) lr, [r1, #-4]!
add pc, pc, ip
nop
nop
- str r3, [r0, #-4]!
- str r4, [r0, #-4]!
- str r5, [r0, #-4]!
- str r6, [r0, #-4]!
- str r7, [r0, #-4]!
- str r8, [r0, #-4]!
- str lr, [r0, #-4]!
+ W(str) r3, [r0, #-4]!
+ W(str) r4, [r0, #-4]!
+ W(str) r5, [r0, #-4]!
+ W(str) r6, [r0, #-4]!
+ W(str) r7, [r0, #-4]!
+ W(str) r8, [r0, #-4]!
+ W(str) lr, [r0, #-4]!
CALGN( bcs 2b )
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 864f3c1c4f18..02fedbf07c0d 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -37,6 +37,15 @@ ENDPROC(__put_user_1)
ENTRY(__put_user_2)
mov ip, r2, lsr #8
+#ifdef CONFIG_THUMB2_KERNEL
+#ifndef __ARMEB__
+2: strbt r2, [r0]
+3: strbt ip, [r0, #1]
+#else
+2: strbt ip, [r0]
+3: strbt r2, [r0, #1]
+#endif
+#else /* !CONFIG_THUMB2_KERNEL */
#ifndef __ARMEB__
2: strbt r2, [r0], #1
3: strbt ip, [r0]
@@ -44,6 +53,7 @@ ENTRY(__put_user_2)
2: strbt ip, [r0], #1
3: strbt r2, [r0]
#endif
+#endif /* CONFIG_THUMB2_KERNEL */
mov r0, #0
mov pc, lr
ENDPROC(__put_user_2)
@@ -55,8 +65,13 @@ ENTRY(__put_user_4)
ENDPROC(__put_user_4)
ENTRY(__put_user_8)
+#ifdef CONFIG_THUMB2_KERNEL
+5: strt r2, [r0]
+6: strt r3, [r0, #4]
+#else
5: strt r2, [r0], #4
6: strt r3, [r0]
+#endif
mov r0, #0
mov pc, lr
ENDPROC(__put_user_8)
diff --git a/arch/arm/lib/sha1.S b/arch/arm/lib/sha1.S
index a16fb208c841..09b548cac1a4 100644
--- a/arch/arm/lib/sha1.S
+++ b/arch/arm/lib/sha1.S
@@ -187,6 +187,7 @@ ENTRY(sha_transform)
ENDPROC(sha_transform)
+ .align 2
.L_sha_K:
.word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6
@@ -195,6 +196,7 @@ ENDPROC(sha_transform)
* void sha_init(__u32 *buf)
*/
+ .align 2
.L_sha_initial_digest:
.word 0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0
diff --git a/arch/arm/lib/strncpy_from_user.S b/arch/arm/lib/strncpy_from_user.S
index 330373c26dd9..1c9814f346c6 100644
--- a/arch/arm/lib/strncpy_from_user.S
+++ b/arch/arm/lib/strncpy_from_user.S
@@ -23,7 +23,7 @@
ENTRY(__strncpy_from_user)
mov ip, r1
1: subs r2, r2, #1
-USER( ldrplbt r3, [r1], #1)
+ ldrusr r3, r1, 1, pl
bmi 2f
strb r3, [r0], #1
teq r3, #0
diff --git a/arch/arm/lib/strnlen_user.S b/arch/arm/lib/strnlen_user.S
index 90bb9d020836..7855b2906659 100644
--- a/arch/arm/lib/strnlen_user.S
+++ b/arch/arm/lib/strnlen_user.S
@@ -23,7 +23,7 @@
ENTRY(__strnlen_user)
mov r2, r0
1:
-USER( ldrbt r3, [r0], #1)
+ ldrusr r3, r0, 1
teq r3, #0
beq 2f
subs r1, r1, #1
diff --git a/arch/arm/mach-integrator/include/mach/hardware.h b/arch/arm/mach-integrator/include/mach/hardware.h
index 1251319ef9ae..d795642fad22 100644
--- a/arch/arm/mach-integrator/include/mach/hardware.h
+++ b/arch/arm/mach-integrator/include/mach/hardware.h
@@ -36,8 +36,12 @@
#define PCIO_BASE PCI_IO_VADDR
#define PCIMEM_BASE PCI_MEMORY_VADDR
+#ifdef CONFIG_MMU
/* macro to get at IO space when running virtually */
#define IO_ADDRESS(x) (((x) >> 4) + IO_BASE)
+#else
+#define IO_ADDRESS(x) (x)
+#endif
#define pcibios_assign_all_busses() 1
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 4ac04055c2ea..452931b2690e 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -49,14 +49,14 @@
#define INTCP_PA_CLCD_BASE 0xc0000000
-#define INTCP_VA_CIC_BASE 0xf1000040
-#define INTCP_VA_PIC_BASE 0xf1400000
-#define INTCP_VA_SIC_BASE 0xfca00000
+#define INTCP_VA_CIC_BASE IO_ADDRESS(INTEGRATOR_HDR_BASE) + 0x40
+#define INTCP_VA_PIC_BASE IO_ADDRESS(INTEGRATOR_IC_BASE)
+#define INTCP_VA_SIC_BASE IO_ADDRESS(0xca000000)
#define INTCP_PA_ETH_BASE 0xc8000000
#define INTCP_ETH_SIZE 0x10
-#define INTCP_VA_CTRL_BASE 0xfcb00000
+#define INTCP_VA_CTRL_BASE IO_ADDRESS(0xcb000000)
#define INTCP_FLASHPROG 0x04
#define CINTEGRATOR_FLASHPROG_FLVPPEN (1 << 0)
#define CINTEGRATOR_FLASHPROG_FLWREN (1 << 1)
@@ -121,12 +121,12 @@ static struct map_desc intcp_io_desc[] __initdata = {
.length = SZ_4K,
.type = MT_DEVICE
}, {
- .virtual = 0xfca00000,
+ .virtual = IO_ADDRESS(0xca000000),
.pfn = __phys_to_pfn(0xca000000),
.length = SZ_4K,
.type = MT_DEVICE
}, {
- .virtual = 0xfcb00000,
+ .virtual = IO_ADDRESS(0xcb000000),
.pfn = __phys_to_pfn(0xcb000000),
.length = SZ_4K,
.type = MT_DEVICE
@@ -394,8 +394,8 @@ static struct platform_device *intcp_devs[] __initdata = {
*/
static unsigned int mmc_status(struct device *dev)
{
- unsigned int status = readl(0xfca00004);
- writel(8, 0xfcb00008);
+ unsigned int status = readl(IO_ADDRESS(0xca000000) + 4);
+ writel(8, IO_ADDRESS(0xcb000000) + 8);
return status & 8;
}
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 57e477bd89c6..7e1e721f0324 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -39,7 +39,7 @@ static struct platform_device *sdp4430_devices[] __initdata = {
};
static struct omap_uart_config sdp4430_uart_config __initdata = {
- .enabled_uarts = (1 << 0) | (1 << 1) | (1 << 2),
+ .enabled_uarts = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3),
};
static struct omap_lcd_config sdp4430_lcd_config __initdata = {
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index a5c0f0435cd6..d49dfb5e931f 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -169,6 +169,42 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
#define OMAP34XX_MCBSP_PDATA_SZ 0
#endif
+static struct omap_mcbsp_platform_data omap44xx_mcbsp_pdata[] = {
+ {
+ .phys_base = OMAP44XX_MCBSP1_BASE,
+ .dma_rx_sync = OMAP44XX_DMA_MCBSP1_RX,
+ .dma_tx_sync = OMAP44XX_DMA_MCBSP1_TX,
+ .rx_irq = INT_24XX_MCBSP1_IRQ_RX,
+ .tx_irq = INT_24XX_MCBSP1_IRQ_TX,
+ .ops = &omap2_mcbsp_ops,
+ },
+ {
+ .phys_base = OMAP44XX_MCBSP2_BASE,
+ .dma_rx_sync = OMAP44XX_DMA_MCBSP2_RX,
+ .dma_tx_sync = OMAP44XX_DMA_MCBSP2_TX,
+ .rx_irq = INT_24XX_MCBSP2_IRQ_RX,
+ .tx_irq = INT_24XX_MCBSP2_IRQ_TX,
+ .ops = &omap2_mcbsp_ops,
+ },
+ {
+ .phys_base = OMAP44XX_MCBSP3_BASE,
+ .dma_rx_sync = OMAP44XX_DMA_MCBSP3_RX,
+ .dma_tx_sync = OMAP44XX_DMA_MCBSP3_TX,
+ .rx_irq = INT_24XX_MCBSP3_IRQ_RX,
+ .tx_irq = INT_24XX_MCBSP3_IRQ_TX,
+ .ops = &omap2_mcbsp_ops,
+ },
+ {
+ .phys_base = OMAP44XX_MCBSP4_BASE,
+ .dma_rx_sync = OMAP44XX_DMA_MCBSP4_RX,
+ .dma_tx_sync = OMAP44XX_DMA_MCBSP4_TX,
+ .rx_irq = INT_24XX_MCBSP4_IRQ_RX,
+ .tx_irq = INT_24XX_MCBSP4_IRQ_TX,
+ .ops = &omap2_mcbsp_ops,
+ },
+};
+#define OMAP44XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap44xx_mcbsp_pdata)
+
static int __init omap2_mcbsp_init(void)
{
if (cpu_is_omap2420())
@@ -177,6 +213,8 @@ static int __init omap2_mcbsp_init(void)
omap_mcbsp_count = OMAP2430_MCBSP_PDATA_SZ;
if (cpu_is_omap34xx())
omap_mcbsp_count = OMAP34XX_MCBSP_PDATA_SZ;
+ if (cpu_is_omap44xx())
+ omap_mcbsp_count = OMAP44XX_MCBSP_PDATA_SZ;
mcbsp_ptr = kzalloc(omap_mcbsp_count * sizeof(struct omap_mcbsp *),
GFP_KERNEL);
@@ -192,6 +230,9 @@ static int __init omap2_mcbsp_init(void)
if (cpu_is_omap34xx())
omap_mcbsp_register_board_cfg(omap34xx_mcbsp_pdata,
OMAP34XX_MCBSP_PDATA_SZ);
+ if (cpu_is_omap44xx())
+ omap_mcbsp_register_board_cfg(omap44xx_mcbsp_pdata,
+ OMAP44XX_MCBSP_PDATA_SZ);
return omap_mcbsp_init();
}
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index a7421a50410b..ce22344b94e7 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -109,6 +109,16 @@ static struct plat_serial8250_port serial_platform_data2[] = {
.regshift = 2,
.uartclk = OMAP24XX_BASE_BAUD * 16,
}, {
+#ifdef CONFIG_ARCH_OMAP4
+ .membase = IO_ADDRESS(OMAP_UART4_BASE),
+ .mapbase = OMAP_UART4_BASE,
+ .irq = 70,
+ .flags = UPF_BOOT_AUTOCONF,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = OMAP24XX_BASE_BAUD * 16,
+ }, {
+#endif
.flags = 0
}
};
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
index d4cfa2145386..dfc9b0bc6eb2 100644
--- a/arch/arm/mach-realview/Kconfig
+++ b/arch/arm/mach-realview/Kconfig
@@ -75,7 +75,7 @@ config MACH_REALVIEW_PBX
config REALVIEW_HIGH_PHYS_OFFSET
bool "High physical base address for the RealView platform"
- depends on !MACH_REALVIEW_PB1176
+ depends on MMU && !MACH_REALVIEW_PB1176
default y
help
RealView boards other than PB1176 have the RAM available at
diff --git a/arch/arm/mach-realview/include/mach/hardware.h b/arch/arm/mach-realview/include/mach/hardware.h
index b42c14f89acb..8a638d15797f 100644
--- a/arch/arm/mach-realview/include/mach/hardware.h
+++ b/arch/arm/mach-realview/include/mach/hardware.h
@@ -25,6 +25,7 @@
#include <asm/sizes.h>
/* macro to get at IO space when running virtually */
+#ifdef CONFIG_MMU
/*
* Statically mapped addresses:
*
@@ -33,6 +34,9 @@
* 1fxx xxxx -> fexx xxxx
*/
#define IO_ADDRESS(x) (((x) & 0x03ffffff) + 0xfb000000)
+#else
+#define IO_ADDRESS(x) (x)
+#endif
#define __io_address(n) __io(IO_ADDRESS(n))
#endif
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
index ac0e83f1cc3a..a88458b4799d 100644
--- a/arch/arm/mach-realview/platsmp.c
+++ b/arch/arm/mach-realview/platsmp.c
@@ -20,6 +20,7 @@
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/localtimer.h>
+#include <asm/unified.h>
#include <mach/board-eb.h>
#include <mach/board-pb11mp.h>
@@ -137,26 +138,19 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
static void __init poke_milo(void)
{
- extern void secondary_startup(void);
-
/* nobody is to be released from the pen yet */
pen_release = -1;
/*
- * write the address of secondary startup into the system-wide
- * flags register, then clear the bottom two bits, which is what
- * BootMonitor is waiting for
+ * Write the address of secondary startup into the system-wide flags
+ * register. The BootMonitor waits for this register to become
+ * non-zero.
*/
-#if 1
#define REALVIEW_SYS_FLAGSS_OFFSET 0x30
- __raw_writel(virt_to_phys(realview_secondary_startup),
- __io_address(REALVIEW_SYS_BASE) +
- REALVIEW_SYS_FLAGSS_OFFSET);
#define REALVIEW_SYS_FLAGSC_OFFSET 0x34
- __raw_writel(3,
+ __raw_writel(BSYM(virt_to_phys(realview_secondary_startup)),
__io_address(REALVIEW_SYS_BASE) +
- REALVIEW_SYS_FLAGSC_OFFSET);
-#endif
+ REALVIEW_SYS_FLAGSS_OFFSET);
mb();
}
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 03cd27d917b9..b270d6228fe2 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -159,7 +159,9 @@ union offset_union {
#define __get8_unaligned_check(ins,val,addr,err) \
__asm__( \
- "1: "ins" %1, [%2], #1\n" \
+ ARM( "1: "ins" %1, [%2], #1\n" ) \
+ THUMB( "1: "ins" %1, [%2]\n" ) \
+ THUMB( " add %2, %2, #1\n" ) \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
@@ -215,7 +217,9 @@ union offset_union {
do { \
unsigned int err = 0, v = val, a = addr; \
__asm__( FIRST_BYTE_16 \
- "1: "ins" %1, [%2], #1\n" \
+ ARM( "1: "ins" %1, [%2], #1\n" ) \
+ THUMB( "1: "ins" %1, [%2]\n" ) \
+ THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
"2: "ins" %1, [%2]\n" \
"3:\n" \
@@ -245,11 +249,17 @@ union offset_union {
do { \
unsigned int err = 0, v = val, a = addr; \
__asm__( FIRST_BYTE_32 \
- "1: "ins" %1, [%2], #1\n" \
+ ARM( "1: "ins" %1, [%2], #1\n" ) \
+ THUMB( "1: "ins" %1, [%2]\n" ) \
+ THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
- "2: "ins" %1, [%2], #1\n" \
+ ARM( "2: "ins" %1, [%2], #1\n" ) \
+ THUMB( "2: "ins" %1, [%2]\n" ) \
+ THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
- "3: "ins" %1, [%2], #1\n" \
+ ARM( "3: "ins" %1, [%2], #1\n" ) \
+ THUMB( "3: "ins" %1, [%2]\n" ) \
+ THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \
"4: "ins" %1, [%2]\n" \
"5:\n" \
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index be93ff02a98d..bda0ec31a4e2 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -21,7 +21,7 @@
*
* Flush the whole D-cache.
*
- * Corrupted registers: r0-r5, r7, r9-r11
+ * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
*
* - mm - mm_struct describing address space
*/
@@ -51,8 +51,12 @@ loop1:
loop2:
mov r9, r4 @ create working copy of max way size
loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
+ ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
+ THUMB( lsl r6, r9, r5 )
+ THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
+ ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
+ THUMB( lsl r6, r7, r2 )
+ THUMB( orr r11, r11, r6 ) @ factor index number into r11
mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
subs r9, r9, #1 @ decrement the way
bge loop3
@@ -82,11 +86,13 @@ ENDPROC(v7_flush_dcache_all)
*
*/
ENTRY(v7_flush_kern_cache_all)
- stmfd sp!, {r4-r5, r7, r9-r11, lr}
+ ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
bl v7_flush_dcache_all
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
- ldmfd sp!, {r4-r5, r7, r9-r11, lr}
+ ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
mov pc, lr
ENDPROC(v7_flush_kern_cache_all)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 510c179b0ac8..b30925fcbcdc 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -36,7 +36,34 @@
#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
+static u64 get_coherent_dma_mask(struct device *dev)
+{
+ u64 mask = ISA_DMA_THRESHOLD;
+
+ if (dev) {
+ mask = dev->coherent_dma_mask;
+
+ /*
+ * Sanity check the DMA mask - it must be non-zero, and
+ * must be able to be satisfied by a DMA allocation.
+ */
+ if (mask == 0) {
+ dev_warn(dev, "coherent DMA mask is unset\n");
+ return 0;
+ }
+
+ if ((~mask) & ISA_DMA_THRESHOLD) {
+ dev_warn(dev, "coherent DMA mask %#llx is smaller "
+ "than system GFP_DMA mask %#llx\n",
+ mask, (unsigned long long)ISA_DMA_THRESHOLD);
+ return 0;
+ }
+ }
+ return mask;
+}
+
+#ifdef CONFIG_MMU
/*
* These are the page tables (2MB each) covering uncached, DMA consistent allocations
*/
@@ -152,7 +179,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
struct page *page;
struct arm_vm_region *c;
unsigned long order;
- u64 mask = ISA_DMA_THRESHOLD, limit;
+ u64 mask = get_coherent_dma_mask(dev);
+ u64 limit;
if (!consistent_pte[0]) {
printk(KERN_ERR "%s: not initialised\n", __func__);
@@ -160,25 +188,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
return NULL;
}
- if (dev) {
- mask = dev->coherent_dma_mask;
-
- /*
- * Sanity check the DMA mask - it must be non-zero, and
- * must be able to be satisfied by a DMA allocation.
- */
- if (mask == 0) {
- dev_warn(dev, "coherent DMA mask is unset\n");
- goto no_page;
- }
-
- if ((~mask) & ISA_DMA_THRESHOLD) {
- dev_warn(dev, "coherent DMA mask %#llx is smaller "
- "than system GFP_DMA mask %#llx\n",
- mask, (unsigned long long)ISA_DMA_THRESHOLD);
- goto no_page;
- }
- }
+ if (!mask)
+ goto no_page;
/*
* Sanity check the allocation size.
@@ -267,6 +278,31 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
*handle = ~0;
return NULL;
}
+#else /* !CONFIG_MMU */
+static void *
+__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
+ pgprot_t prot)
+{
+ void *virt;
+ u64 mask = get_coherent_dma_mask(dev);
+
+ if (!mask)
+ goto error;
+
+ if (mask != 0xffffffff)
+ gfp |= GFP_DMA;
+ virt = kmalloc(size, gfp);
+ if (!virt)
+ goto error;
+
+ *handle = virt_to_dma(dev, virt);
+ return virt;
+
+error:
+ *handle = ~0;
+ return NULL;
+}
+#endif /* CONFIG_MMU */
/*
* Allocate DMA-coherent memory space and return both the kernel remapped
@@ -311,9 +347,10 @@ EXPORT_SYMBOL(dma_alloc_writecombine);
static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
+ int ret = -ENXIO;
+#ifdef CONFIG_MMU
unsigned long flags, user_size, kern_size;
struct arm_vm_region *c;
- int ret = -ENXIO;
user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
@@ -334,6 +371,7 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
vma->vm_page_prot);
}
}
+#endif /* CONFIG_MMU */
return ret;
}
@@ -358,6 +396,7 @@ EXPORT_SYMBOL(dma_mmap_writecombine);
* free a page as defined by the above mapping.
* Must not be called with IRQs disabled.
*/
+#ifdef CONFIG_MMU
void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
{
struct arm_vm_region *c;
@@ -444,6 +483,14 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
__func__, cpu_addr);
dump_stack();
}
+#else /* !CONFIG_MMU */
+void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
+{
+ if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
+ return;
+ kfree(cpu_addr);
+}
+#endif /* CONFIG_MMU */
EXPORT_SYMBOL(dma_free_coherent);
/*
@@ -451,10 +498,12 @@ EXPORT_SYMBOL(dma_free_coherent);
*/
static int __init consistent_init(void)
{
+ int ret = 0;
+#ifdef CONFIG_MMU
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
- int ret = 0, i = 0;
+ int i = 0;
u32 base = CONSISTENT_BASE;
do {
@@ -477,6 +526,7 @@ static int __init consistent_init(void)
consistent_pte[i++] = pte;
base += (1 << PGDIR_SHIFT);
} while (base < CONSISTENT_END);
+#endif /* !CONFIG_MMU */
return ret;
}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 6fdcbb709827..556c8daf087d 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -16,6 +16,7 @@
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/page-flags.h>
+#include <linux/sched.h>
#include <asm/system.h>
#include <asm/pgtable.h>
@@ -23,6 +24,7 @@
#include "fault.h"
+#ifdef CONFIG_MMU
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
@@ -97,6 +99,10 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
printk("\n");
}
+#else /* CONFIG_MMU */
+void show_pte(struct mm_struct *mm, unsigned long addr)
+{ }
+#endif /* CONFIG_MMU */
/*
* Oops. The kernel tried to access some page that wasn't present.
@@ -171,6 +177,7 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
__do_kernel_fault(mm, addr, fsr, regs);
}
+#ifdef CONFIG_MMU
#define VM_FAULT_BADMAP 0x010000
#define VM_FAULT_BADACCESS 0x020000
@@ -322,6 +329,13 @@ no_context:
__do_kernel_fault(mm, addr, fsr, regs);
return 0;
}
+#else /* CONFIG_MMU */
+static int
+do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ return 0;
+}
+#endif /* CONFIG_MMU */
/*
* First Level Translation Fault Handler
@@ -340,6 +354,7 @@ no_context:
* interrupt or a critical region, and should only copy the information
* from the master page table, nothing more.
*/
+#ifdef CONFIG_MMU
static int __kprobes
do_translation_fault(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
@@ -378,6 +393,14 @@ bad_area:
do_bad_area(addr, fsr, regs);
return 0;
}
+#else /* CONFIG_MMU */
+static int
+do_translation_fault(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ return 0;
+}
+#endif /* CONFIG_MMU */
/*
* Some section permission faults need to be handled gracefully.
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index ad7bacc693b2..900811cc9130 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -12,6 +12,7 @@
#include <asm/cacheflush.h>
#include <asm/sections.h>
#include <asm/page.h>
+#include <asm/setup.h>
#include <asm/mach/arch.h>
#include "mm.h"
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 54b1f721dec8..f1559c227784 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -77,6 +77,7 @@
* Sanity check the PTE configuration for the code below - which makes
* certain assumptions about how these bits are layed out.
*/
+#ifdef CONFIG_MMU
#if L_PTE_SHARED != PTE_EXT_SHARED
#error PTE shared bit mismatch
#endif
@@ -90,6 +91,7 @@
L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
#error Invalid Linux PTE bit settings
#endif
+#endif /* CONFIG_MMU */
/*
* The ARMv6 and ARMv7 set_pte_ext translation function.
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 180a08d03a03..f3fa1c32fe92 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -127,7 +127,9 @@ ENDPROC(cpu_v7_switch_mm)
*/
ENTRY(cpu_v7_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
+ ARM( str r1, [r0], #-2048 ) @ linux version
+ THUMB( str r1, [r0] ) @ linux version
+ THUMB( sub r0, r0, #2048 )
bic r3, r1, #0x000003f0
bic r3, r3, #PTE_TYPE_MASK
@@ -232,7 +234,6 @@ __v7_setup:
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
mov r10, #0x1f @ domains 0, 1 = manager
mcr p15, 0, r10, c3, c0, 0 @ load domain access register
-#endif
/*
* Memory region attributes with SCTLR.TRE=1
*
@@ -265,6 +266,7 @@ __v7_setup:
ldr r6, =0x40e040e0 @ NMRR
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
+#endif
adr r5, v7_crval
ldmia r5, {r5, r6}
#ifdef CONFIG_CPU_ENDIAN_BE8
@@ -273,6 +275,7 @@ __v7_setup:
mrc p15, 0, r0, c1, c0, 0 @ read control register
bic r0, r0, r5 @ clear bits them
orr r0, r0, r6 @ set them
+ THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions
mov pc, lr @ return to head.S:__ret
ENDPROC(__v7_setup)
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 77bad14633e1..00940dc6bb50 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -138,6 +138,32 @@
#define OMAP24XX_GPIO_CLEARDATAOUT 0x0090
#define OMAP24XX_GPIO_SETDATAOUT 0x0094
+#define OMAP4_GPIO_REVISION 0x0000
+#define OMAP4_GPIO_SYSCONFIG 0x0010
+#define OMAP4_GPIO_EOI 0x0020
+#define OMAP4_GPIO_IRQSTATUSRAW0 0x0024
+#define OMAP4_GPIO_IRQSTATUSRAW1 0x0028
+#define OMAP4_GPIO_IRQSTATUS0 0x002c
+#define OMAP4_GPIO_IRQSTATUS1 0x0030
+#define OMAP4_GPIO_IRQSTATUSSET0 0x0034
+#define OMAP4_GPIO_IRQSTATUSSET1 0x0038
+#define OMAP4_GPIO_IRQSTATUSCLR0 0x003c
+#define OMAP4_GPIO_IRQSTATUSCLR1 0x0040
+#define OMAP4_GPIO_IRQWAKEN0 0x0044
+#define OMAP4_GPIO_IRQWAKEN1 0x0048
+#define OMAP4_GPIO_SYSSTATUS 0x0104
+#define OMAP4_GPIO_CTRL 0x0130
+#define OMAP4_GPIO_OE 0x0134
+#define OMAP4_GPIO_DATAIN 0x0138
+#define OMAP4_GPIO_DATAOUT 0x013c
+#define OMAP4_GPIO_LEVELDETECT0 0x0140
+#define OMAP4_GPIO_LEVELDETECT1 0x0144
+#define OMAP4_GPIO_RISINGDETECT 0x0148
+#define OMAP4_GPIO_FALLINGDETECT 0x014c
+#define OMAP4_GPIO_DEBOUNCENABLE 0x0150
+#define OMAP4_GPIO_DEBOUNCINGTIME 0x0154
+#define OMAP4_GPIO_CLEARDATAOUT 0x0190
+#define OMAP4_GPIO_SETDATAOUT 0x0194
/*
* omap34xx specific GPIO registers
*/
@@ -386,12 +412,16 @@ static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
reg += OMAP850_GPIO_DIR_CONTROL;
break;
#endif
-#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) || \
- defined(CONFIG_ARCH_OMAP4)
+#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
case METHOD_GPIO_24XX:
reg += OMAP24XX_GPIO_OE;
break;
#endif
+#if defined(CONFIG_ARCH_OMAP4)
+ case METHOD_GPIO_24XX:
+ reg += OMAP4_GPIO_OE;
+ break;
+#endif
default:
WARN_ON(1);
return;
@@ -459,8 +489,7 @@ static void _set_gpio_dataout(struct gpio_bank *bank, int gpio, int enable)
l &= ~(1 << gpio);
break;
#endif
-#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) || \
- defined(CONFIG_ARCH_OMAP4)
+#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
case METHOD_GPIO_24XX:
if (enable)
reg += OMAP24XX_GPIO_SETDATAOUT;
@@ -469,6 +498,15 @@ static void _set_gpio_dataout(struct gpio_bank *bank, int gpio, int enable)
l = 1 << gpio;
break;
#endif
+#ifdef CONFIG_ARCH_OMAP4
+ case METHOD_GPIO_24XX:
+ if (enable)
+ reg += OMAP4_GPIO_SETDATAOUT;
+ else
+ reg += OMAP4_GPIO_CLEARDATAOUT;
+ l = 1 << gpio;
+ break;
+#endif
default:
WARN_ON(1);
return;
@@ -511,12 +549,16 @@ static int __omap_get_gpio_datain(int gpio)
reg += OMAP850_GPIO_DATA_INPUT;
break;
#endif
-#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) || \
- defined(CONFIG_ARCH_OMAP4)
+#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
case METHOD_GPIO_24XX:
reg += OMAP24XX_GPIO_DATAIN;
break;
#endif
+#ifdef CONFIG_ARCH_OMAP4
+ case METHOD_GPIO_24XX:
+ reg += OMAP4_GPIO_DATAIN;
+ break;
+#endif
default:
return -EINVAL;
}
@@ -544,7 +586,11 @@ void omap_set_gpio_debounce(int gpio, int enable)
bank = get_gpio_bank(gpio);
reg = bank->base;
+#ifdef CONFIG_ARCH_OMAP4
+ reg += OMAP4_GPIO_DEBOUNCENABLE;
+#else
reg += OMAP24XX_GPIO_DEBOUNCE_EN;
+#endif
spin_lock_irqsave(&bank->lock, flags);
val = __raw_readl(reg);
@@ -581,7 +627,11 @@ void omap_set_gpio_debounce_time(int gpio, int enc_time)
reg = bank->base;
enc_time &= 0xff;
+#ifdef CONFIG_ARCH_OMAP4
+ reg += OMAP4_GPIO_DEBOUNCINGTIME;
+#else
reg += OMAP24XX_GPIO_DEBOUNCE_VAL;
+#endif
__raw_writel(enc_time, reg);
}
EXPORT_SYMBOL(omap_set_gpio_debounce_time);
@@ -593,23 +643,46 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
{
void __iomem *base = bank->base;
u32 gpio_bit = 1 << gpio;
+ u32 val;
- MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT0, gpio_bit,
- trigger & IRQ_TYPE_LEVEL_LOW);
- MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT1, gpio_bit,
- trigger & IRQ_TYPE_LEVEL_HIGH);
- MOD_REG_BIT(OMAP24XX_GPIO_RISINGDETECT, gpio_bit,
- trigger & IRQ_TYPE_EDGE_RISING);
- MOD_REG_BIT(OMAP24XX_GPIO_FALLINGDETECT, gpio_bit,
- trigger & IRQ_TYPE_EDGE_FALLING);
-
+ if (cpu_is_omap44xx()) {
+ MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT0, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_LOW);
+ MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT1, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_HIGH);
+ MOD_REG_BIT(OMAP4_GPIO_RISINGDETECT, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_RISING);
+ MOD_REG_BIT(OMAP4_GPIO_FALLINGDETECT, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_FALLING);
+ } else {
+ MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT0, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_LOW);
+ MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT1, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_HIGH);
+ MOD_REG_BIT(OMAP24XX_GPIO_RISINGDETECT, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_RISING);
+ MOD_REG_BIT(OMAP24XX_GPIO_FALLINGDETECT, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_FALLING);
+ }
if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
- if (trigger != 0)
- __raw_writel(1 << gpio, bank->base
+ if (cpu_is_omap44xx()) {
+ if (trigger != 0)
+ __raw_writel(1 << gpio, bank->base+
+ OMAP4_GPIO_IRQWAKEN0);
+ else {
+ val = __raw_readl(bank->base +
+ OMAP4_GPIO_IRQWAKEN0);
+ __raw_writel(val & (~(1 << gpio)), bank->base +
+ OMAP4_GPIO_IRQWAKEN0);
+ }
+ } else {
+ if (trigger != 0)
+ __raw_writel(1 << gpio, bank->base
+ OMAP24XX_GPIO_SETWKUENA);
- else
- __raw_writel(1 << gpio, bank->base
+ else
+ __raw_writel(1 << gpio, bank->base
+ OMAP24XX_GPIO_CLEARWKUENA);
+ }
} else {
if (trigger != 0)
bank->enabled_non_wakeup_gpios |= gpio_bit;
@@ -617,9 +690,15 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
bank->enabled_non_wakeup_gpios &= ~gpio_bit;
}
- bank->level_mask =
- __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0) |
- __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
+ if (cpu_is_omap44xx()) {
+ bank->level_mask =
+ __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT0) |
+ __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT1);
+ } else {
+ bank->level_mask =
+ __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0) |
+ __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
+ }
}
#endif
@@ -783,12 +862,16 @@ static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
reg += OMAP850_GPIO_INT_STATUS;
break;
#endif
-#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) || \
- defined(CONFIG_ARCH_OMAP4)
+#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
case METHOD_GPIO_24XX:
reg += OMAP24XX_GPIO_IRQSTATUS1;
break;
#endif
+#if defined(CONFIG_ARCH_OMAP4)
+ case METHOD_GPIO_24XX:
+ reg += OMAP4_GPIO_IRQSTATUS0;
+ break;
+#endif
default:
WARN_ON(1);
return;
@@ -798,12 +881,16 @@ static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
/* Workaround for clearing DSP GPIO interrupts to allow retention */
#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
reg = bank->base + OMAP24XX_GPIO_IRQSTATUS2;
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
+#endif
+#if defined(CONFIG_ARCH_OMAP4)
+ reg = bank->base + OMAP4_GPIO_IRQSTATUS1;
+#endif
+ if (cpu_is_omap24xx() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
__raw_writel(gpio_mask, reg);
/* Flush posted write for the irq status to avoid spurious interrupts */
__raw_readl(reg);
-#endif
+ }
}
static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
@@ -853,13 +940,18 @@ static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
inv = 1;
break;
#endif
-#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) || \
- defined(CONFIG_ARCH_OMAP4)
+#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
case METHOD_GPIO_24XX:
reg += OMAP24XX_GPIO_IRQENABLE1;
mask = 0xffffffff;
break;
#endif
+#if defined(CONFIG_ARCH_OMAP4)
+ case METHOD_GPIO_24XX:
+ reg += OMAP4_GPIO_IRQSTATUSSET0;
+ mask = 0xffffffff;
+ break;
+#endif
default:
WARN_ON(1);
return 0;
@@ -927,8 +1019,7 @@ static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask, int enab
l |= gpio_mask;
break;
#endif
-#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) || \
- defined(CONFIG_ARCH_OMAP4)
+#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
case METHOD_GPIO_24XX:
if (enable)
reg += OMAP24XX_GPIO_SETIRQENABLE1;
@@ -937,6 +1028,15 @@ static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask, int enab
l = gpio_mask;
break;
#endif
+#ifdef CONFIG_ARCH_OMAP4
+ case METHOD_GPIO_24XX:
+ if (enable)
+ reg += OMAP4_GPIO_IRQSTATUSSET0;
+ else
+ reg += OMAP4_GPIO_IRQSTATUSCLR0;
+ l = gpio_mask;
+ break;
+#endif
default:
WARN_ON(1);
return;
@@ -1112,11 +1212,14 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
if (bank->method == METHOD_GPIO_850)
isr_reg = bank->base + OMAP850_GPIO_INT_STATUS;
#endif
-#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) || \
- defined(CONFIG_ARCH_OMAP4)
+#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
if (bank->method == METHOD_GPIO_24XX)
isr_reg = bank->base + OMAP24XX_GPIO_IRQSTATUS1;
#endif
+#if defined(CONFIG_ARCH_OMAP4)
+ if (bank->method == METHOD_GPIO_24XX)
+ isr_reg = bank->base + OMAP4_GPIO_IRQSTATUS0;
+#endif
while(1) {
u32 isr_saved, level_mask = 0;
u32 enabled;
@@ -1553,7 +1656,7 @@ static int __init _omap_gpio_init(void)
gpio_bank_count = OMAP34XX_NR_GPIOS;
gpio_bank = gpio_bank_44xx;
- rev = __raw_readl(gpio_bank[0].base + OMAP24XX_GPIO_REVISION);
+ rev = __raw_readl(gpio_bank[0].base + OMAP4_GPIO_REVISION);
printk(KERN_INFO "OMAP44xx GPIO hardware version %d.%d\n",
(rev >> 4) & 0x0f, rev & 0x0f);
}
@@ -1587,7 +1690,16 @@ static int __init _omap_gpio_init(void)
static const u32 non_wakeup_gpios[] = {
0xe203ffc0, 0x08700040
};
-
+ if (cpu_is_omap44xx()) {
+ __raw_writel(0xffffffff, bank->base +
+ OMAP4_GPIO_IRQSTATUSCLR0);
+ __raw_writew(0x0015, bank->base +
+ OMAP4_GPIO_SYSCONFIG);
+ __raw_writel(0x00000000, bank->base +
+ OMAP4_GPIO_DEBOUNCENABLE);
+ /* Initialize interface clock ungated, module enabled */
+ __raw_writel(0, bank->base + OMAP4_GPIO_CTRL);
+ } else {
__raw_writel(0x00000000, bank->base + OMAP24XX_GPIO_IRQENABLE1);
__raw_writel(0xffffffff, bank->base + OMAP24XX_GPIO_IRQSTATUS1);
__raw_writew(0x0015, bank->base + OMAP24XX_GPIO_SYSCONFIG);
@@ -1595,12 +1707,12 @@ static int __init _omap_gpio_init(void)
/* Initialize interface clock ungated, module enabled */
__raw_writel(0, bank->base + OMAP24XX_GPIO_CTRL);
+ }
if (i < ARRAY_SIZE(non_wakeup_gpios))
bank->non_wakeup_gpios = non_wakeup_gpios[i];
gpio_count = 32;
}
#endif
-
/* REVISIT eventually switch from OMAP-specific gpio structs
* over to the generic ones
*/
@@ -1686,14 +1798,20 @@ static int omap_gpio_suspend(struct sys_device *dev, pm_message_t mesg)
wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
break;
#endif
-#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) || \
- defined(CONFIG_ARCH_OMAP4)
+#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
case METHOD_GPIO_24XX:
wake_status = bank->base + OMAP24XX_GPIO_WAKE_EN;
wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
break;
#endif
+#ifdef CONFIG_ARCH_OMAP4
+ case METHOD_GPIO_24XX:
+ wake_status = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ break;
+#endif
default:
continue;
}
@@ -1728,13 +1846,18 @@ static int omap_gpio_resume(struct sys_device *dev)
wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
break;
#endif
-#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) || \
- defined(CONFIG_ARCH_OMAP4)
+#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
case METHOD_GPIO_24XX:
wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
break;
#endif
+#ifdef CONFIG_ARCH_OMAP4
+ case METHOD_GPIO_24XX:
+ wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ break;
+#endif
default:
continue;
}
@@ -1778,21 +1901,29 @@ void omap2_gpio_prepare_for_retention(void)
if (!(bank->enabled_non_wakeup_gpios))
continue;
-#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) || \
- defined(CONFIG_ARCH_OMAP4)
+#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
bank->saved_datain = __raw_readl(bank->base + OMAP24XX_GPIO_DATAIN);
l1 = __raw_readl(bank->base + OMAP24XX_GPIO_FALLINGDETECT);
l2 = __raw_readl(bank->base + OMAP24XX_GPIO_RISINGDETECT);
#endif
+#ifdef CONFIG_ARCH_OMAP4
+ bank->saved_datain = __raw_readl(bank->base +
+ OMAP4_GPIO_DATAIN);
+ l1 = __raw_readl(bank->base + OMAP4_GPIO_FALLINGDETECT);
+ l2 = __raw_readl(bank->base + OMAP4_GPIO_RISINGDETECT);
+#endif
bank->saved_fallingdetect = l1;
bank->saved_risingdetect = l2;
l1 &= ~bank->enabled_non_wakeup_gpios;
l2 &= ~bank->enabled_non_wakeup_gpios;
-#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) || \
- defined(CONFIG_ARCH_OMAP4)
+#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
__raw_writel(l1, bank->base + OMAP24XX_GPIO_FALLINGDETECT);
__raw_writel(l2, bank->base + OMAP24XX_GPIO_RISINGDETECT);
#endif
+#ifdef CONFIG_ARCH_OMAP4
+ __raw_writel(l1, bank->base + OMAP4_GPIO_FALLINGDETECT);
+ __raw_writel(l2, bank->base + OMAP4_GPIO_RISINGDETECT);
+#endif
c++;
}
if (!c) {
@@ -1814,27 +1945,29 @@ void omap2_gpio_resume_after_retention(void)
if (!(bank->enabled_non_wakeup_gpios))
continue;
-#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) || \
- defined(CONFIG_ARCH_OMAP4)
+#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
__raw_writel(bank->saved_fallingdetect,
bank->base + OMAP24XX_GPIO_FALLINGDETECT);
__raw_writel(bank->saved_risingdetect,
bank->base + OMAP24XX_GPIO_RISINGDETECT);
+ l = __raw_readl(bank->base + OMAP24XX_GPIO_DATAIN);
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ __raw_writel(bank->saved_fallingdetect,
+ bank->base + OMAP4_GPIO_FALLINGDETECT);
+ __raw_writel(bank->saved_risingdetect,
+ bank->base + OMAP4_GPIO_RISINGDETECT);
+ l = __raw_readl(bank->base + OMAP4_GPIO_DATAIN);
#endif
/* Check if any of the non-wakeup interrupt GPIOs have changed
* state. If so, generate an IRQ by software. This is
* horribly racy, but it's the best we can do to work around
* this silicon bug. */
-#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) || \
- defined(CONFIG_ARCH_OMAP4)
- l = __raw_readl(bank->base + OMAP24XX_GPIO_DATAIN);
-#endif
l ^= bank->saved_datain;
l &= bank->non_wakeup_gpios;
if (l) {
u32 old0, old1;
-#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) || \
- defined(CONFIG_ARCH_OMAP4)
+#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
old0 = __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0);
old1 = __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
__raw_writel(old0 | l, bank->base + OMAP24XX_GPIO_LEVELDETECT0);
@@ -1842,6 +1975,20 @@ void omap2_gpio_resume_after_retention(void)
__raw_writel(old0, bank->base + OMAP24XX_GPIO_LEVELDETECT0);
__raw_writel(old1, bank->base + OMAP24XX_GPIO_LEVELDETECT1);
#endif
+#ifdef CONFIG_ARCH_OMAP4
+ old0 = __raw_readl(bank->base +
+ OMAP4_GPIO_LEVELDETECT0);
+ old1 = __raw_readl(bank->base +
+ OMAP4_GPIO_LEVELDETECT1);
+ __raw_writel(old0 | l, bank->base +
+ OMAP4_GPIO_LEVELDETECT0);
+ __raw_writel(old1 | l, bank->base +
+ OMAP4_GPIO_LEVELDETECT1);
+ __raw_writel(old0, bank->base +
+ OMAP4_GPIO_LEVELDETECT0);
+ __raw_writel(old1, bank->base +
+ OMAP4_GPIO_LEVELDETECT1);
+#endif
}
}
diff --git a/arch/arm/plat-omap/include/mach/dma.h b/arch/arm/plat-omap/include/mach/dma.h
index 7b939cc01962..72f680b7180d 100644
--- a/arch/arm/plat-omap/include/mach/dma.h
+++ b/arch/arm/plat-omap/include/mach/dma.h
@@ -122,6 +122,11 @@
#define OMAP_DMA4_CCFN(n) (0x60 * (n) + 0xc0)
#define OMAP_DMA4_COLOR(n) (0x60 * (n) + 0xc4)
+/* Additional registers available on OMAP4 */
+#define OMAP_DMA4_CDP(n) (0x60 * (n) + 0xd0)
+#define OMAP_DMA4_CNDP(n) (0x60 * (n) + 0xd4)
+#define OMAP_DMA4_CCDN(n) (0x60 * (n) + 0xd8)
+
/* Dummy defines to keep multi-omap compiles happy */
#define OMAP1_DMA_REVISION 0
#define OMAP1_DMA_IRQSTATUS_L0 0
@@ -311,6 +316,89 @@
#define OMAP34XX_DMA_USIM_TX 79 /* S_DMA_78 */
#define OMAP34XX_DMA_USIM_RX 80 /* S_DMA_79 */
+/* DMA request lines for 44xx */
+#define OMAP44XX_DMA_DSS_DISPC_REQ 6 /* S_DMA_5 */
+#define OMAP44XX_DMA_SYS_REQ2 7 /* S_DMA_6 */
+#define OMAP44XX_DMA_ISS_REQ1 9 /* S_DMA_8 */
+#define OMAP44XX_DMA_ISS_REQ2 10 /* S_DMA_9 */
+#define OMAP44XX_DMA_ISS_REQ3 12 /* S_DMA_11 */
+#define OMAP44XX_DMA_ISS_REQ4 13 /* S_DMA_12 */
+#define OMAP44XX_DMA_DSS_RFBI_REQ 14 /* S_DMA_13 */
+#define OMAP44XX_DMA_SPI3_TX0 15 /* S_DMA_14 */
+#define OMAP44XX_DMA_SPI3_RX0 16 /* S_DMA_15 */
+#define OMAP44XX_DMA_MCBSP2_TX 17 /* S_DMA_16 */
+#define OMAP44XX_DMA_MCBSP2_RX 18 /* S_DMA_17 */
+#define OMAP44XX_DMA_MCBSP3_TX 19 /* S_DMA_18 */
+#define OMAP44XX_DMA_MCBSP3_RX 20 /* S_DMA_19 */
+#define OMAP44XX_DMA_SPI3_TX1 23 /* S_DMA_22 */
+#define OMAP44XX_DMA_SPI3_RX1 24 /* S_DMA_23 */
+#define OMAP44XX_DMA_I2C3_TX 25 /* S_DMA_24 */
+#define OMAP44XX_DMA_I2C3_RX 26 /* S_DMA_25 */
+#define OMAP44XX_DMA_I2C1_TX 27 /* S_DMA_26 */
+#define OMAP44XX_DMA_I2C1_RX 28 /* S_DMA_27 */
+#define OMAP44XX_DMA_I2C2_TX 29 /* S_DMA_28 */
+#define OMAP44XX_DMA_I2C2_RX 30 /* S_DMA_29 */
+#define OMAP44XX_DMA_MCBSP4_TX 31 /* S_DMA_30 */
+#define OMAP44XX_DMA_MCBSP4_RX 32 /* S_DMA_31 */
+#define OMAP44XX_DMA_MCBSP1_TX 33 /* S_DMA_32 */
+#define OMAP44XX_DMA_MCBSP1_RX 34 /* S_DMA_33 */
+#define OMAP44XX_DMA_SPI1_TX0 35 /* S_DMA_34 */
+#define OMAP44XX_DMA_SPI1_RX0 36 /* S_DMA_35 */
+#define OMAP44XX_DMA_SPI1_TX1 37 /* S_DMA_36 */
+#define OMAP44XX_DMA_SPI1_RX1 38 /* S_DMA_37 */
+#define OMAP44XX_DMA_SPI1_TX2 39 /* S_DMA_38 */
+#define OMAP44XX_DMA_SPI1_RX2 40 /* S_DMA_39 */
+#define OMAP44XX_DMA_SPI1_TX3 41 /* S_DMA_40 */
+#define OMAP44XX_DMA_SPI1_RX3 42 /* S_DMA_41 */
+#define OMAP44XX_DMA_SPI2_TX0 43 /* S_DMA_42 */
+#define OMAP44XX_DMA_SPI2_RX0 44 /* S_DMA_43 */
+#define OMAP44XX_DMA_SPI2_TX1 45 /* S_DMA_44 */
+#define OMAP44XX_DMA_SPI2_RX1 46 /* S_DMA_45 */
+#define OMAP44XX_DMA_MMC2_TX 47 /* S_DMA_46 */
+#define OMAP44XX_DMA_MMC2_RX 48 /* S_DMA_47 */
+#define OMAP44XX_DMA_UART1_TX 49 /* S_DMA_48 */
+#define OMAP44XX_DMA_UART1_RX 50 /* S_DMA_49 */
+#define OMAP44XX_DMA_UART2_TX 51 /* S_DMA_50 */
+#define OMAP44XX_DMA_UART2_RX 52 /* S_DMA_51 */
+#define OMAP44XX_DMA_UART3_TX 53 /* S_DMA_52 */
+#define OMAP44XX_DMA_UART3_RX 54 /* S_DMA_53 */
+#define OMAP44XX_DMA_UART4_TX 55 /* S_DMA_54 */
+#define OMAP44XX_DMA_UART4_RX 56 /* S_DMA_55 */
+#define OMAP44XX_DMA_MMC4_TX 57 /* S_DMA_56 */
+#define OMAP44XX_DMA_MMC4_RX 58 /* S_DMA_57 */
+#define OMAP44XX_DMA_MMC5_TX 59 /* S_DMA_58 */
+#define OMAP44XX_DMA_MMC5_RX 60 /* S_DMA_59 */
+#define OMAP44XX_DMA_MMC1_TX 61 /* S_DMA_60 */
+#define OMAP44XX_DMA_MMC1_RX 62 /* S_DMA_61 */
+#define OMAP44XX_DMA_SYS_REQ3 64 /* S_DMA_63 */
+#define OMAP44XX_DMA_MCPDM_UP 65 /* S_DMA_64 */
+#define OMAP44XX_DMA_MCPDM_DL 66 /* S_DMA_65 */
+#define OMAP44XX_DMA_SPI4_TX0 70 /* S_DMA_69 */
+#define OMAP44XX_DMA_SPI4_RX0 71 /* S_DMA_70 */
+#define OMAP44XX_DMA_DSS_DSI1_REQ0 72 /* S_DMA_71 */
+#define OMAP44XX_DMA_DSS_DSI1_REQ1 73 /* S_DMA_72 */
+#define OMAP44XX_DMA_DSS_DSI1_REQ2 74 /* S_DMA_73 */
+#define OMAP44XX_DMA_DSS_DSI1_REQ3 75 /* S_DMA_74 */
+#define OMAP44XX_DMA_DSS_HDMI_REQ 76 /* S_DMA_75 */
+#define OMAP44XX_DMA_MMC3_TX 77 /* S_DMA_76 */
+#define OMAP44XX_DMA_MMC3_RX 78 /* S_DMA_77 */
+#define OMAP44XX_DMA_USIM_TX 79 /* S_DMA_78 */
+#define OMAP44XX_DMA_USIM_RX 80 /* S_DMA_79 */
+#define OMAP44XX_DMA_DSS_DSI2_REQ0 81 /* S_DMA_80 */
+#define OMAP44XX_DMA_DSS_DSI2_REQ1 82 /* S_DMA_81 */
+#define OMAP44XX_DMA_DSS_DSI2_REQ2 83 /* S_DMA_82 */
+#define OMAP44XX_DMA_DSS_DSI2_REQ3 84 /* S_DMA_83 */
+#define OMAP44XX_DMA_ABE_REQ0 101 /* S_DMA_100 */
+#define OMAP44XX_DMA_ABE_REQ1 102 /* S_DMA_101 */
+#define OMAP44XX_DMA_ABE_REQ2 103 /* S_DMA_102 */
+#define OMAP44XX_DMA_ABE_REQ3 104 /* S_DMA_103 */
+#define OMAP44XX_DMA_ABE_REQ4 105 /* S_DMA_104 */
+#define OMAP44XX_DMA_ABE_REQ5 106 /* S_DMA_105 */
+#define OMAP44XX_DMA_ABE_REQ6 107 /* S_DMA_106 */
+#define OMAP44XX_DMA_ABE_REQ7 108 /* S_DMA_107 */
+#define OMAP44XX_DMA_I2C4_TX 124 /* S_DMA_123 */
+#define OMAP44XX_DMA_I2C4_RX 125 /* S_DMA_124 */
+
/*----------------------------------------------------------------------------*/
/* Hardware registers for LCD DMA */
diff --git a/arch/arm/plat-omap/include/mach/mcbsp.h b/arch/arm/plat-omap/include/mach/mcbsp.h
index bb154ea76769..ec6f81e06d39 100644
--- a/arch/arm/plat-omap/include/mach/mcbsp.h
+++ b/arch/arm/plat-omap/include/mach/mcbsp.h
@@ -53,6 +53,11 @@
#define OMAP34XX_MCBSP4_BASE 0x49026000
#define OMAP34XX_MCBSP5_BASE 0x48096000
+#define OMAP44XX_MCBSP1_BASE 0x49022000
+#define OMAP44XX_MCBSP2_BASE 0x49024000
+#define OMAP44XX_MCBSP3_BASE 0x49026000
+#define OMAP44XX_MCBSP4_BASE 0x48074000
+
#if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP730)
#define OMAP_MCBSP_REG_DRR2 0x00
@@ -98,7 +103,8 @@
#define AUDIO_DMA_TX OMAP_DMA_MCBSP1_TX
#define AUDIO_DMA_RX OMAP_DMA_MCBSP1_RX
-#elif defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
+#elif defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) || \
+ defined(CONFIG_ARCH_OMAP4)
#define OMAP_MCBSP_REG_DRR2 0x00
#define OMAP_MCBSP_REG_DRR1 0x04
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index efa0e0111f38..e42fa7cfc795 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -191,7 +191,7 @@ void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg *config)
OMAP_MCBSP_WRITE(io_base, MCR2, config->mcr2);
OMAP_MCBSP_WRITE(io_base, MCR1, config->mcr1);
OMAP_MCBSP_WRITE(io_base, PCR0, config->pcr0);
- if (cpu_is_omap2430() || cpu_is_omap34xx()) {
+ if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
OMAP_MCBSP_WRITE(io_base, XCCR, config->xccr);
OMAP_MCBSP_WRITE(io_base, RCCR, config->rccr);
}
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index a2bed62aec21..4fa9903b83cf 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -42,6 +42,7 @@ ENTRY(vfp_null_entry)
mov pc, lr
ENDPROC(vfp_null_entry)
+ .align 2
.LCvfp:
.word vfp_vector
@@ -61,6 +62,7 @@ ENTRY(vfp_testing_entry)
mov pc, r9 @ we have handled the fault
ENDPROC(vfp_testing_entry)
+ .align 2
VFP_arch_address:
.word VFP_arch
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 1aeae38725dd..66dc2d03b7fc 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -209,40 +209,55 @@ ENDPROC(vfp_save_state)
last_VFP_context_address:
.word last_VFP_context
-ENTRY(vfp_get_float)
- add pc, pc, r0, lsl #3
+ .macro tbl_branch, base, tmp, shift
+#ifdef CONFIG_THUMB2_KERNEL
+ adr \tmp, 1f
+ add \tmp, \tmp, \base, lsl \shift
+ mov pc, \tmp
+#else
+ add pc, pc, \base, lsl \shift
mov r0, r0
+#endif
+1:
+ .endm
+
+ENTRY(vfp_get_float)
+ tbl_branch r0, r3, #3
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
- mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0
+1: mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0
mov pc, lr
- mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1
+ .org 1b + 8
+1: mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1
mov pc, lr
+ .org 1b + 8
.endr
ENDPROC(vfp_get_float)
ENTRY(vfp_put_float)
- add pc, pc, r1, lsl #3
- mov r0, r0
+ tbl_branch r1, r3, #3
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
- mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0
+1: mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0
mov pc, lr
- mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1
+ .org 1b + 8
+1: mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1
mov pc, lr
+ .org 1b + 8
.endr
ENDPROC(vfp_put_float)
ENTRY(vfp_get_double)
- add pc, pc, r0, lsl #3
- mov r0, r0
+ tbl_branch r0, r3, #3
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
- fmrrd r0, r1, d\dr
+1: fmrrd r0, r1, d\dr
mov pc, lr
+ .org 1b + 8
.endr
#ifdef CONFIG_VFPv3
@ d16 - d31 registers
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
- mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr
+1: mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr
mov pc, lr
+ .org 1b + 8
.endr
#endif
@@ -253,17 +268,18 @@ ENTRY(vfp_get_double)
ENDPROC(vfp_get_double)
ENTRY(vfp_put_double)
- add pc, pc, r2, lsl #3
- mov r0, r0
+ tbl_branch r2, r3, #3
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
- fmdrr d\dr, r0, r1
+1: fmdrr d\dr, r0, r1
mov pc, lr
+ .org 1b + 8
.endr
#ifdef CONFIG_VFPv3
@ d16 - d31 registers
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
- mcrr p11, 3, r1, r2, c\dr @ fmdrr r1, r2, d\dr
+1: mcrr p11, 3, r1, r2, c\dr @ fmdrr r1, r2, d\dr
mov pc, lr
+ .org 1b + 8
.endr
#endif
ENDPROC(vfp_put_double)
OpenPOWER on IntegriCloud