diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/calls.S | 2 | ||||
-rw-r--r-- | arch/arm/kernel/early_printk.c | 17 | ||||
-rw-r--r-- | arch/arm/kernel/entry-common.S | 12 | ||||
-rw-r--r-- | arch/arm/kernel/head.S | 26 | ||||
-rw-r--r-- | arch/arm/kernel/hw_breakpoint.c | 10 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 9 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 100 | ||||
-rw-r--r-- | arch/arm/kernel/sched_clock.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 27 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 8 | ||||
-rw-r--r-- | arch/arm/kernel/smp_tlb.c | 78 | ||||
-rw-r--r-- | arch/arm/kernel/smp_twd.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/suspend.c | 1 | ||||
-rw-r--r-- | arch/arm/kernel/tcm.c | 1 | ||||
-rw-r--r-- | arch/arm/kernel/tcm.h | 17 |
17 files changed, 187 insertions, 133 deletions
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 5ce738b43508..923eec7105cf 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -110,7 +110,7 @@ int main(void) BLANK(); #endif #ifdef CONFIG_CPU_HAS_ASID - DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); + DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter)); BLANK(); #endif DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 0cc57611fc4f..c6ca7e376773 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -387,7 +387,7 @@ /* 375 */ CALL(sys_setns) CALL(sys_process_vm_readv) CALL(sys_process_vm_writev) - CALL(sys_ni_syscall) /* reserved for sys_kcmp */ + CALL(sys_kcmp) CALL(sys_finit_module) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls diff --git a/arch/arm/kernel/early_printk.c b/arch/arm/kernel/early_printk.c index 85aa2b292692..43076536965c 100644 --- a/arch/arm/kernel/early_printk.c +++ b/arch/arm/kernel/early_printk.c @@ -29,28 +29,17 @@ static void early_console_write(struct console *con, const char *s, unsigned n) early_write(s, n); } -static struct console early_console = { +static struct console early_console_dev = { .name = "earlycon", .write = early_console_write, .flags = CON_PRINTBUFFER | CON_BOOT, .index = -1, }; -asmlinkage void early_printk(const char *fmt, ...) -{ - char buf[512]; - int n; - va_list ap; - - va_start(ap, fmt); - n = vscnprintf(buf, sizeof(buf), fmt, ap); - early_write(buf, n); - va_end(ap); -} - static int __init setup_early_printk(char *buf) { - register_console(&early_console); + early_console = &early_console_dev; + register_console(&early_console_dev); return 0; } diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 3248cde504ed..fefd7f971437 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -276,7 +276,13 @@ ENDPROC(ftrace_graph_caller_old) */ .macro mcount_enter +/* + * This pad compensates for the push {lr} at the call site. Note that we are + * unable to unwind through a function which does not otherwise save its lr. + */ + UNWIND(.pad #4) stmdb sp!, {r0-r3, lr} + UNWIND(.save {r0-r3, lr}) .endm .macro mcount_get_lr reg @@ -289,6 +295,7 @@ ENDPROC(ftrace_graph_caller_old) .endm ENTRY(__gnu_mcount_nc) +UNWIND(.fnstart) #ifdef CONFIG_DYNAMIC_FTRACE mov ip, lr ldmia sp!, {lr} @@ -296,17 +303,22 @@ ENTRY(__gnu_mcount_nc) #else __mcount #endif +UNWIND(.fnend) ENDPROC(__gnu_mcount_nc) #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(ftrace_caller) +UNWIND(.fnstart) __ftrace_caller +UNWIND(.fnend) ENDPROC(ftrace_caller) #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) +UNWIND(.fnstart) __ftrace_graph_caller +UNWIND(.fnend) ENDPROC(ftrace_graph_caller) #endif diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 486a15ae9011..8bac553fe213 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -184,13 +184,22 @@ __create_page_tables: orr r3, r3, #3 @ PGD block type mov r6, #4 @ PTRS_PER_PGD mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER -1: str r3, [r0], #4 @ set bottom PGD entry bits +1: +#ifdef CONFIG_CPU_ENDIAN_BE8 str r7, [r0], #4 @ set top PGD entry bits + str r3, [r0], #4 @ set bottom PGD entry bits +#else + str r3, [r0], #4 @ set bottom PGD entry bits + str r7, [r0], #4 @ set top PGD entry bits +#endif add r3, r3, #0x1000 @ next PMD table subs r6, r6, #1 bne 1b add r4, r4, #0x1000 @ point to the PMD tables +#ifdef CONFIG_CPU_ENDIAN_BE8 + add r4, r4, #4 @ we only write the bottom word +#endif #endif ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags @@ -258,6 +267,11 @@ __create_page_tables: addne r6, r6, #1 << SECTION_SHIFT strne r6, [r3] +#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) + sub r4, r4, #4 @ Fixup page table pointer + @ for 64-bit descriptors +#endif + #ifdef CONFIG_DEBUG_LL #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) /* @@ -276,13 +290,17 @@ __create_page_tables: orr r3, r7, r3, lsl #SECTION_SHIFT #ifdef CONFIG_ARM_LPAE mov r7, #1 << (54 - 32) @ XN +#ifdef CONFIG_CPU_ENDIAN_BE8 + str r7, [r0], #4 + str r3, [r0], #4 #else - orr r3, r3, #PMD_SECT_XN -#endif str r3, [r0], #4 -#ifdef CONFIG_ARM_LPAE str r7, [r0], #4 #endif +#else + orr r3, r3, #PMD_SECT_XN + str r3, [r0], #4 +#endif #else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */ /* we don't need any serial debugging mappings */ diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 5eae53e7a2e1..1fd749ee4a1b 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused) } if (err) { - pr_warning("CPU %d debug is powered down!\n", cpu); + pr_warn_once("CPU %d debug is powered down!\n", cpu); cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); return; } @@ -987,7 +987,7 @@ clear_vcr: isb(); if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { - pr_warning("CPU %d failed to disable vector catch\n", cpu); + pr_warn_once("CPU %d failed to disable vector catch\n", cpu); return; } @@ -1007,7 +1007,7 @@ clear_vcr: } if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { - pr_warning("CPU %d failed to clear debug register pairs\n", cpu); + pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu); return; } @@ -1023,7 +1023,7 @@ out_mdbgen: static int __cpuinit dbg_reset_notify(struct notifier_block *self, unsigned long action, void *cpu) { - if (action == CPU_ONLINE) + if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE) smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1); return NOTIFY_OK; @@ -1043,7 +1043,7 @@ static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = { +static struct notifier_block dbg_cpu_pm_nb = { .notifier_call = dbg_cpu_pm_notify, }; diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 31e0eb353cd8..8c3094d0f7b7 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -253,7 +253,10 @@ validate_event(struct pmu_hw_events *hw_events, struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct pmu *leader_pmu = event->group_leader->pmu; - if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) + return 1; + + if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; return armpmu->get_event_idx(hw_events, event) >= 0; @@ -400,7 +403,7 @@ __hw_perf_event_init(struct perf_event *event) } if (event->group_leader != event) { - if (validate_group(event) != 0); + if (validate_group(event) != 0) return -EINVAL; } @@ -484,7 +487,7 @@ const struct dev_pm_ops armpmu_dev_pm_ops = { SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL) }; -static void __init armpmu_init(struct arm_pmu *armpmu) +static void armpmu_init(struct arm_pmu *armpmu) { atomic_set(&armpmu->active_events, 0); mutex_init(&armpmu->reserve_mutex); diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 8c79a9e70b83..039cffb053a7 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -774,7 +774,7 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] /* * PMXEVTYPER: Event selection reg */ -#define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */ +#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */ #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ /* diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 047d3e40e470..c9a5e2ce8aa9 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -57,38 +57,6 @@ static const char *isa_modes[] = { "ARM" , "Thumb" , "Jazelle", "ThumbEE" }; -static volatile int hlt_counter; - -void disable_hlt(void) -{ - hlt_counter++; -} - -EXPORT_SYMBOL(disable_hlt); - -void enable_hlt(void) -{ - hlt_counter--; - BUG_ON(hlt_counter < 0); -} - -EXPORT_SYMBOL(enable_hlt); - -static int __init nohlt_setup(char *__unused) -{ - hlt_counter = 1; - return 1; -} - -static int __init hlt_setup(char *__unused) -{ - hlt_counter = 0; - return 1; -} - -__setup("nohlt", nohlt_setup); -__setup("hlt", hlt_setup); - extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); typedef void (*phys_reset_t)(unsigned long); @@ -172,54 +140,38 @@ static void default_idle(void) local_irq_enable(); } -/* - * The idle thread. - * We always respect 'hlt_counter' to prevent low power idle. - */ -void cpu_idle(void) +void arch_cpu_idle_prepare(void) { local_fiq_enable(); +} - /* endless idle loop with no priority at all */ - while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - ledtrig_cpu(CPU_LED_IDLE_START); - while (!need_resched()) { -#ifdef CONFIG_HOTPLUG_CPU - if (cpu_is_offline(smp_processor_id())) - cpu_die(); +void arch_cpu_idle_enter(void) +{ + ledtrig_cpu(CPU_LED_IDLE_START); +#ifdef CONFIG_PL310_ERRATA_769419 + wmb(); #endif +} - /* - * We need to disable interrupts here - * to ensure we don't miss a wakeup call. - */ - local_irq_disable(); -#ifdef CONFIG_PL310_ERRATA_769419 - wmb(); +void arch_cpu_idle_exit(void) +{ + ledtrig_cpu(CPU_LED_IDLE_END); +} + +#ifdef CONFIG_HOTPLUG_CPU +void arch_cpu_idle_dead(void) +{ + cpu_die(); +} #endif - if (hlt_counter) { - local_irq_enable(); - cpu_relax(); - } else if (!need_resched()) { - stop_critical_timings(); - if (cpuidle_idle_call()) - default_idle(); - start_critical_timings(); - /* - * default_idle functions must always - * return with IRQs enabled. - */ - WARN_ON(irqs_disabled()); - } else - local_irq_enable(); - } - ledtrig_cpu(CPU_LED_IDLE_END); - rcu_idle_exit(); - tick_nohz_idle_exit(); - schedule_preempt_disabled(); - } + +/* + * Called from the core idle loop. + */ +void arch_cpu_idle(void) +{ + if (cpuidle_idle_call()) + default_idle(); } static char reboot_mode = 'h'; diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index bd6f56b9ec21..59d2adb764a9 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void) static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; -static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) +static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) { return (cyc * mult) >> shift; } -static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) +static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask) { u64 epoch_ns; u32 epoch_cyc; diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 3f6cbb2e3eda..234e339196c0 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -56,7 +56,6 @@ #include <asm/virt.h> #include "atags.h" -#include "tcm.h" #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) @@ -353,6 +352,23 @@ void __init early_print(const char *str, ...) printk("%s", buf); } +static void __init cpuid_init_hwcaps(void) +{ + unsigned int divide_instrs; + + if (cpu_architecture() < CPU_ARCH_ARMv7) + return; + + divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24; + + switch (divide_instrs) { + case 2: + elf_hwcap |= HWCAP_IDIVA; + case 1: + elf_hwcap |= HWCAP_IDIVT; + } +} + static void __init feat_v6_fixup(void) { int id = read_cpuid_id(); @@ -483,8 +499,11 @@ static void __init setup_processor(void) snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", list->elf_name, ENDIANNESS); elf_hwcap = list->elf_hwcap; + + cpuid_init_hwcaps(); + #ifndef CONFIG_ARM_THUMB - elf_hwcap &= ~HWCAP_THUMB; + elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); #endif feat_v6_fixup(); @@ -524,7 +543,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size) size -= start & ~PAGE_MASK; bank->start = PAGE_ALIGN(start); -#ifndef CONFIG_LPAE +#ifndef CONFIG_ARM_LPAE if (bank->start + size < bank->start) { printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in " "32-bit physical address space\n", (long long)start); @@ -778,8 +797,6 @@ void __init setup_arch(char **cmdline_p) reserve_crashkernel(); - tcm_init(); - #ifdef CONFIG_MULTI_IRQ_HANDLER handle_arch_irq = mdesc->handle_irq; #endif diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 1bdfd87c8e41..4619177bcfe6 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -285,6 +285,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) * switch away from it before attempting any exclusive accesses. */ cpu_switch_mm(mm->pgd, mm); + local_flush_bp_all(); enter_lazy_tlb(mm, current); local_flush_tlb_all(); @@ -335,7 +336,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) /* * OK, it's off to the idle thread for us */ - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } void __init smp_cpus_done(unsigned int max_cpus) @@ -479,7 +480,7 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) evt->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_DUMMY; - evt->rating = 400; + evt->rating = 100; evt->mult = 1; evt->set_mode = broadcast_timer_set_mode; @@ -672,9 +673,6 @@ static int cpufreq_callback(struct notifier_block *nb, if (freq->flags & CPUFREQ_CONST_LOOPS) return NOTIFY_OK; - if (arm_delay_ops.const_clock) - return NOTIFY_OK; - if (!per_cpu(l_p_j_ref, cpu)) { per_cpu(l_p_j_ref, cpu) = per_cpu(cpu_data, cpu).loops_per_jiffy; diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index 02c5d2ce23bf..e82e1d248772 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c @@ -12,6 +12,7 @@ #include <asm/smp_plat.h> #include <asm/tlbflush.h> +#include <asm/mmu_context.h> /**********************************************************************/ @@ -64,12 +65,77 @@ static inline void ipi_flush_tlb_kernel_range(void *arg) local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); } +static inline void ipi_flush_bp_all(void *ignored) +{ + local_flush_bp_all(); +} + +#ifdef CONFIG_ARM_ERRATA_798181 +static int erratum_a15_798181(void) +{ + unsigned int midr = read_cpuid_id(); + + /* Cortex-A15 r0p0..r3p2 affected */ + if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) + return 0; + return 1; +} +#else +static int erratum_a15_798181(void) +{ + return 0; +} +#endif + +static void ipi_flush_tlb_a15_erratum(void *arg) +{ + dmb(); +} + +static void broadcast_tlb_a15_erratum(void) +{ + if (!erratum_a15_798181()) + return; + + dummy_flush_tlb_a15_erratum(); + smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum, + NULL, 1); +} + +static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) +{ + int cpu; + cpumask_t mask = { CPU_BITS_NONE }; + + if (!erratum_a15_798181()) + return; + + dummy_flush_tlb_a15_erratum(); + for_each_online_cpu(cpu) { + if (cpu == smp_processor_id()) + continue; + /* + * We only need to send an IPI if the other CPUs are running + * the same ASID as the one being invalidated. There is no + * need for locking around the active_asids check since the + * switch_mm() function has at least one dmb() (as required by + * this workaround) in case a context switch happens on + * another CPU after the condition below. + */ + if (atomic64_read(&mm->context.id) == + atomic64_read(&per_cpu(active_asids, cpu))) + cpumask_set_cpu(cpu, &mask); + } + smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); +} + void flush_tlb_all(void) { if (tlb_ops_need_broadcast()) on_each_cpu(ipi_flush_tlb_all, NULL, 1); else local_flush_tlb_all(); + broadcast_tlb_a15_erratum(); } void flush_tlb_mm(struct mm_struct *mm) @@ -78,6 +144,7 @@ void flush_tlb_mm(struct mm_struct *mm) on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); else local_flush_tlb_mm(mm); + broadcast_tlb_mm_a15_erratum(mm); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) @@ -90,6 +157,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) &ta, 1); } else local_flush_tlb_page(vma, uaddr); + broadcast_tlb_mm_a15_erratum(vma->vm_mm); } void flush_tlb_kernel_page(unsigned long kaddr) @@ -100,6 +168,7 @@ void flush_tlb_kernel_page(unsigned long kaddr) on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); } else local_flush_tlb_kernel_page(kaddr); + broadcast_tlb_a15_erratum(); } void flush_tlb_range(struct vm_area_struct *vma, @@ -114,6 +183,7 @@ void flush_tlb_range(struct vm_area_struct *vma, &ta, 1); } else local_flush_tlb_range(vma, start, end); + broadcast_tlb_mm_a15_erratum(vma->vm_mm); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) @@ -125,5 +195,13 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); } else local_flush_tlb_kernel_range(start, end); + broadcast_tlb_a15_erratum(); } +void flush_bp_all(void) +{ + if (tlb_ops_need_broadcast()) + on_each_cpu(ipi_flush_bp_all, NULL, 1); + else + local_flush_bp_all(); +} diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index c092115d903a..3f2565037480 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -22,6 +22,7 @@ #include <linux/of_irq.h> #include <linux/of_address.h> +#include <asm/smp_plat.h> #include <asm/smp_twd.h> #include <asm/localtimer.h> @@ -373,6 +374,9 @@ void __init twd_local_timer_of_register(void) struct device_node *np; int err; + if (!is_smp() || !setup_max_cpus) + return; + np = of_find_matching_node(NULL, twd_of_match); if (!np) return; diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index 358bca3a995e..c59c97ea8268 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c @@ -68,6 +68,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ret = __cpu_suspend(arg, fn); if (ret == 0) { cpu_switch_mm(mm->pgd, mm); + local_flush_bp_all(); local_flush_tlb_all(); } diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index 30ae6bb4a310..f50f19e5c138 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -17,7 +17,6 @@ #include <asm/mach/map.h> #include <asm/memory.h> #include <asm/system_info.h> -#include "tcm.h" static struct gen_pool *tcm_pool; static bool dtcm_present; diff --git a/arch/arm/kernel/tcm.h b/arch/arm/kernel/tcm.h deleted file mode 100644 index 8015ad434a40..000000000000 --- a/arch/arm/kernel/tcm.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright (C) 2008-2009 ST-Ericsson AB - * License terms: GNU General Public License (GPL) version 2 - * TCM memory handling for ARM systems - * - * Author: Linus Walleij <linus.walleij@stericsson.com> - * Author: Rickard Andersson <rickard.andersson@stericsson.com> - */ - -#ifdef CONFIG_HAVE_TCM -void __init tcm_init(void); -#else -/* No TCM support, just blank inlines to be optimized out */ -inline void tcm_init(void) -{ -} -#endif |