summaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/kernel/head.S38
-rw-r--r--arch/arm/kernel/hw_breakpoint.c44
-rw-r--r--arch/arm/kernel/module.c22
-rw-r--r--arch/arm/kernel/perf_event.c2
-rw-r--r--arch/arm/mach-sa1100/collie.c3
-rw-r--r--arch/arm/mm/Kconfig6
-rw-r--r--arch/arm/oprofile/common.c2
8 files changed, 82 insertions, 37 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5cff165b7eb0..26d45e5b636b 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1391,7 +1391,7 @@ config AEABI
config OABI_COMPAT
bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
- depends on AEABI && EXPERIMENTAL
+ depends on AEABI && EXPERIMENTAL && !THUMB2_KERNEL
default y
help
This option preserves the old syscall interface along with the
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index c0225da3fb21..f06ff9feb0db 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -391,6 +391,7 @@ ENDPROC(__turn_mmu_on)
#ifdef CONFIG_SMP_ON_UP
+ __INIT
__fixup_smp:
and r3, r9, #0x000f0000 @ architecture version
teq r3, #0x000f0000 @ CPU ID supported?
@@ -415,18 +416,7 @@ __fixup_smp_on_up:
sub r3, r0, r3
add r4, r4, r3
add r5, r5, r3
-2: cmp r4, r5
- movhs pc, lr
- ldmia r4!, {r0, r6}
- ARM( str r6, [r0, r3] )
- THUMB( add r0, r0, r3 )
-#ifdef __ARMEB__
- THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
-#endif
- THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
- THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
- THUMB( strh r6, [r0] )
- b 2b
+ b __do_fixup_smp_on_up
ENDPROC(__fixup_smp)
.align
@@ -440,7 +430,31 @@ smp_on_up:
ALT_SMP(.long 1)
ALT_UP(.long 0)
.popsection
+#endif
+ .text
+__do_fixup_smp_on_up:
+ cmp r4, r5
+ movhs pc, lr
+ ldmia r4!, {r0, r6}
+ ARM( str r6, [r0, r3] )
+ THUMB( add r0, r0, r3 )
+#ifdef __ARMEB__
+ THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
#endif
+ THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
+ THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
+ THUMB( strh r6, [r0] )
+ b __do_fixup_smp_on_up
+ENDPROC(__do_fixup_smp_on_up)
+
+ENTRY(fixup_smp)
+ stmfd sp!, {r4 - r6, lr}
+ mov r4, r0
+ add r5, r0, r1
+ mov r3, #0
+ bl __do_fixup_smp_on_up
+ ldmfd sp!, {r4 - r6, pc}
+ENDPROC(fixup_smp)
#include "head-common.S"
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index c9f3f0467570..d600bd350704 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -137,11 +137,10 @@ static u8 get_debug_arch(void)
u32 didr;
/* Do we implement the extended CPUID interface? */
- if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
- pr_warning("CPUID feature registers not supported. "
- "Assuming v6 debug is present.\n");
+ if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf),
+ "CPUID feature registers not supported. "
+ "Assuming v6 debug is present.\n"))
return ARM_DEBUG_ARCH_V6;
- }
ARM_DBG_READ(c0, 0, didr);
return (didr >> 16) & 0xf;
@@ -152,6 +151,12 @@ u8 arch_get_debug_arch(void)
return debug_arch;
}
+static int debug_arch_supported(void)
+{
+ u8 arch = get_debug_arch();
+ return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14;
+}
+
/* Determine number of BRP register available. */
static int get_num_brp_resources(void)
{
@@ -268,6 +273,9 @@ out:
int hw_breakpoint_slots(int type)
{
+ if (!debug_arch_supported())
+ return 0;
+
/*
* We can be called early, so don't rely on
* our static variables being initialised.
@@ -834,11 +842,11 @@ static void reset_ctrl_regs(void *unused)
/*
* v7 debug contains save and restore registers so that debug state
- * can be maintained across low-power modes without leaving
- * the debug logic powered up. It is IMPLEMENTATION DEFINED whether
- * we can write to the debug registers out of reset, so we must
- * unlock the OS Lock Access Register to avoid taking undefined
- * instruction exceptions later on.
+ * can be maintained across low-power modes without leaving the debug
+ * logic powered up. It is IMPLEMENTATION DEFINED whether we can access
+ * the debug registers out of reset, so we must unlock the OS Lock
+ * Access Register to avoid taking undefined instruction exceptions
+ * later on.
*/
if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
/*
@@ -882,7 +890,7 @@ static int __init arch_hw_breakpoint_init(void)
debug_arch = get_debug_arch();
- if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) {
+ if (!debug_arch_supported()) {
pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
return 0;
}
@@ -899,18 +907,18 @@ static int __init arch_hw_breakpoint_init(void)
pr_info("%d breakpoint(s) reserved for watchpoint "
"single-step.\n", core_num_reserved_brps);
+ /*
+ * Reset the breakpoint resources. We assume that a halting
+ * debugger will leave the world in a nice state for us.
+ */
+ on_each_cpu(reset_ctrl_regs, NULL, 1);
+
ARM_DBG_READ(c1, 0, dscr);
if (dscr & ARM_DSCR_HDBGEN) {
+ max_watchpoint_len = 4;
pr_warning("halting debug mode enabled. Assuming maximum "
- "watchpoint size of 4 bytes.");
+ "watchpoint size of %u bytes.", max_watchpoint_len);
} else {
- /*
- * Reset the breakpoint resources. We assume that a halting
- * debugger will leave the world in a nice state for us.
- */
- smp_call_function(reset_ctrl_regs, NULL, 1);
- reset_ctrl_regs(NULL);
-
/* Work out the maximum supported watchpoint length. */
max_watchpoint_len = get_max_wp_len();
pr_info("maximum watchpoint size is %u bytes.\n",
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 2cfe8161b478..6d4105e6872f 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -22,6 +22,7 @@
#include <asm/pgtable.h>
#include <asm/sections.h>
+#include <asm/smp_plat.h>
#include <asm/unwind.h>
#ifdef CONFIG_XIP_KERNEL
@@ -268,12 +269,28 @@ struct mod_unwind_map {
const Elf_Shdr *txt_sec;
};
+static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
+ const Elf_Shdr *sechdrs, const char *name)
+{
+ const Elf_Shdr *s, *se;
+ const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++)
+ if (strcmp(name, secstrs + s->sh_name) == 0)
+ return s;
+
+ return NULL;
+}
+
+extern void fixup_smp(const void *, unsigned long);
+
int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *mod)
{
+ const Elf_Shdr * __maybe_unused s = NULL;
#ifdef CONFIG_ARM_UNWIND
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
- const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
+ const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
struct mod_unwind_map maps[ARM_SEC_MAX];
int i;
@@ -315,6 +332,9 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
maps[i].txt_sec->sh_addr,
maps[i].txt_sec->sh_size);
#endif
+ s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
+ if (s && !is_smp())
+ fixup_smp((void *)s->sh_addr, s->sh_size);
return 0;
}
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 5efa2647a2fb..d150ad1ccb5d 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -700,7 +700,7 @@ user_backtrace(struct frame_tail __user *tail,
* Frame pointers should strictly progress back up the stack
* (towards higher addresses).
*/
- if (tail >= buftail.fp)
+ if (tail + 1 >= buftail.fp)
return NULL;
return buftail.fp - 1;
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index d43c5ef58eb6..bd3e1bfdd6aa 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -241,6 +241,9 @@ static struct locomo_platform_data locomo_info = {
struct platform_device collie_locomo_device = {
.name = "locomo",
.id = 0,
+ .dev = {
+ .platform_data = &locomo_info,
+ },
.num_resources = ARRAY_SIZE(locomo_resources),
.resource = locomo_resources,
};
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 9d30c6f804b9..e4509bae8fc4 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -405,7 +405,7 @@ config CPU_V6
config CPU_32v6K
bool "Support ARM V6K processor extensions" if !SMP
depends on CPU_V6 || CPU_V7
- default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
+ default y if SMP
help
Say Y here if your ARMv6 processor supports the 'K' extension.
This enables the kernel to use some instructions not present
@@ -416,7 +416,7 @@ config CPU_32v6K
# ARMv7
config CPU_V7
bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
- select CPU_32v6K if !ARCH_OMAP2
+ select CPU_32v6K
select CPU_32v7
select CPU_ABRT_EV7
select CPU_PABRT_V7
@@ -644,7 +644,7 @@ config ARM_THUMBEE
config SWP_EMULATE
bool "Emulate SWP/SWPB instructions"
- depends on CPU_V7 && !CPU_V6
+ depends on !CPU_USE_DOMAINS && CPU_V7 && !CPU_V6
select HAVE_PROC_CPU if PROC_FS
default y if SMP
help
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index 8aa974491dfc..6adda2b5fa31 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -85,7 +85,7 @@ static struct frame_tail* user_backtrace(struct frame_tail *tail)
/* frame pointers should strictly progress back up the stack
* (towards higher addresses) */
- if (tail >= buftail[0].fp)
+ if (tail + 1 >= buftail[0].fp)
return NULL;
return buftail[0].fp-1;
OpenPOWER on IntegriCloud