diff options
author | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2010-08-16 18:42:58 +0100 |
---|---|---|
committer | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2010-08-16 18:42:58 +0100 |
commit | e4862f2f6f5653dfb67f3ba2b6f0bc74516ed51a (patch) | |
tree | 1db5a0540a4eecfad9b7daee476b985e82ddc810 /arch/arm/kernel | |
parent | ec62dbd7eb8e3dddb221da89ecbcea0fc3dee8c1 (diff) | |
parent | b2c1e07b81a126e5846dfc3d36f559d861df59f4 (diff) | |
download | blackbird-op-linux-e4862f2f6f5653dfb67f3ba2b6f0bc74516ed51a.tar.gz blackbird-op-linux-e4862f2f6f5653dfb67f3ba2b6f0bc74516ed51a.zip |
Merge branch 'for-2.6.36' into for-2.6.37
Fairly simple conflicts, the most serious ones are the i.MX ones which I
suspect now need another rename.
Conflicts:
arch/arm/mach-mx2/clock_imx27.c
arch/arm/mach-mx2/devices.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-omap2/board-zoom2.c
sound/soc/fsl/mpc5200_dma.c
sound/soc/fsl/mpc5200_dma.h
sound/soc/fsl/mpc8610_hpcd.c
sound/soc/pxa/spitz.c
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/Makefile | 5 | ||||
-rw-r--r-- | arch/arm/kernel/asm-offsets.c | 3 | ||||
-rw-r--r-- | arch/arm/kernel/compat.c | 7 | ||||
-rw-r--r-- | arch/arm/kernel/compat.h | 2 | ||||
-rw-r--r-- | arch/arm/kernel/crash_dump.c | 60 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 45 | ||||
-rw-r--r-- | arch/arm/kernel/entry-common.S | 124 | ||||
-rw-r--r-- | arch/arm/kernel/etm.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/irq.c | 41 | ||||
-rw-r--r-- | arch/arm/kernel/kgdb.c | 124 | ||||
-rw-r--r-- | arch/arm/kernel/kprobes-decode.c | 5 | ||||
-rw-r--r-- | arch/arm/kernel/machine_kexec.c | 14 | ||||
-rw-r--r-- | arch/arm/kernel/module.c | 34 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 20 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 65 | ||||
-rw-r--r-- | arch/arm/kernel/ptrace.c | 96 | ||||
-rw-r--r-- | arch/arm/kernel/relocate_kernel.S | 6 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 111 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 17 | ||||
-rw-r--r-- | arch/arm/kernel/smp_twd.c | 3 | ||||
-rw-r--r-- | arch/arm/kernel/sys_arm.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/sys_oabi-compat.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/tcm.c | 118 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 41 |
24 files changed, 718 insertions, 237 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 26d302c28e13..980b78e31328 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -13,10 +13,12 @@ CFLAGS_REMOVE_return_address.o = -pg # Object file lists. -obj-y := compat.o elf.o entry-armv.o entry-common.o irq.o \ +obj-y := elf.o entry-armv.o entry-common.o irq.o \ process.o ptrace.o return_address.o setup.o signal.o \ sys_arm.o stacktrace.o time.o traps.o +obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += compat.o + obj-$(CONFIG_LEDS) += leds.o obj-$(CONFIG_OC_ETM) += etm.o @@ -39,6 +41,7 @@ obj-$(CONFIG_ARM_THUMBEE) += thumbee.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_ARM_UNWIND) += unwind.o obj-$(CONFIG_HAVE_TCM) += tcm.o +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312 diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 883511522fca..85f2a019f77b 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -40,6 +40,9 @@ int main(void) { DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); +#ifdef CONFIG_CC_STACKPROTECTOR + DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); +#endif BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); diff --git a/arch/arm/kernel/compat.c b/arch/arm/kernel/compat.c index 0a1385442f43..925652318b8b 100644 --- a/arch/arm/kernel/compat.c +++ b/arch/arm/kernel/compat.c @@ -217,10 +217,3 @@ void __init convert_to_tag_list(struct tag *tags) struct param_struct *params = (struct param_struct *)tags; build_tag_list(params, ¶ms->u2); } - -void __init squash_mem_tags(struct tag *tag) -{ - for (; tag->hdr.size; tag = tag_next(tag)) - if (tag->hdr.tag == ATAG_MEM) - tag->hdr.tag = ATAG_NONE; -} diff --git a/arch/arm/kernel/compat.h b/arch/arm/kernel/compat.h index 27e61a68bd1c..39264ab1b9c6 100644 --- a/arch/arm/kernel/compat.h +++ b/arch/arm/kernel/compat.h @@ -9,5 +9,3 @@ */ extern void convert_to_tag_list(struct tag *tags); - -extern void squash_mem_tags(struct tag *tag); diff --git a/arch/arm/kernel/crash_dump.c b/arch/arm/kernel/crash_dump.c new file mode 100644 index 000000000000..cd3b853a8a6d --- /dev/null +++ b/arch/arm/kernel/crash_dump.c @@ -0,0 +1,60 @@ +/* + * arch/arm/kernel/crash_dump.c + * + * Copyright (C) 2010 Nokia Corporation. + * Author: Mika Westerberg + * + * This code is taken from arch/x86/kernel/crash_dump_64.c + * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) + * Copyright (C) IBM Corporation, 2004. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/errno.h> +#include <linux/crash_dump.h> +#include <linux/uaccess.h> +#include <linux/io.h> + +/* stores the physical address of elf header of crash image */ +unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; + +/** + * copy_oldmem_page() - copy one page from old kernel memory + * @pfn: page frame number to be copied + * @buf: buffer where the copied page is placed + * @csize: number of bytes to copy + * @offset: offset in bytes into the page + * @userbuf: if set, @buf is int he user address space + * + * This function copies one page from old kernel memory into buffer pointed by + * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes + * copied or negative error in case of failure. + */ +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, + size_t csize, unsigned long offset, + int userbuf) +{ + void *vaddr; + + if (!csize) + return 0; + + vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!vaddr) + return -ENOMEM; + + if (userbuf) { + if (copy_to_user(buf, vaddr + offset, csize)) { + iounmap(vaddr); + return -EFAULT; + } + } else { + memcpy(buf, vaddr + offset, csize); + } + + iounmap(vaddr); + return csize; +} diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 7ee48e7f8f31..bb8e93a76407 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -22,6 +22,7 @@ #include <asm/thread_notify.h> #include <asm/unwind.h> #include <asm/unistd.h> +#include <asm/tls.h> #include "entry-header.S" @@ -162,8 +163,6 @@ ENDPROC(__und_invalid) @ r4 - orig_r0 (see pt_regs definition in ptrace.h) @ stmia r5, {r0 - r4} - - asm_trace_hardirqs_off .endm .align 5 @@ -204,7 +203,7 @@ __dabt_svc: @ @ IRQs off again before pulling preserved data off the stack @ - disable_irq + disable_irq_notrace @ @ restore SPSR and restart the instruction @@ -218,6 +217,9 @@ ENDPROC(__dabt_svc) __irq_svc: svc_entry +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_off +#endif #ifdef CONFIG_PREEMPT get_thread_info tsk ldr r8, [tsk, #TI_PREEMPT] @ get preempt count @@ -291,7 +293,7 @@ __und_svc: @ @ IRQs off again before pulling preserved data off the stack @ -1: disable_irq +1: disable_irq_notrace @ @ restore SPSR and restart the instruction @@ -327,7 +329,7 @@ __pabt_svc: @ @ IRQs off again before pulling preserved data off the stack @ - disable_irq + disable_irq_notrace @ @ restore SPSR and restart the instruction @@ -393,8 +395,6 @@ ENDPROC(__pabt_svc) @ Clear FP to mark the first stack frame @ zero_fp - - asm_trace_hardirqs_off .endm .macro kuser_cmpxchg_check @@ -465,9 +465,6 @@ __irq_usr: THUMB( movne r0, #0 ) THUMB( strne r0, [r0] ) #endif -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_on -#endif mov why, #0 b ret_to_user @@ -739,11 +736,11 @@ ENTRY(__switch_to) #ifdef CONFIG_MMU ldr r6, [r2, #TI_CPU_DOMAIN] #endif -#if defined(CONFIG_HAS_TLS_REG) - mcr p15, 0, r3, c13, c0, 3 @ set TLS register -#elif !defined(CONFIG_TLS_REG_EMUL) - mov r4, #0xffff0fff - str r3, [r4, #-15] @ TLS val at 0xffff0ff0 + set_tls r3, r4, r5 +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + ldr r7, [r2, #TI_TASK] + ldr r8, =__stack_chk_guard + ldr r7, [r7, #TSK_STACK_CANARY] #endif #ifdef CONFIG_MMU mcr p15, 0, r6, c3, c0, 0 @ Set domain register @@ -753,6 +750,9 @@ ENTRY(__switch_to) ldr r0, =thread_notify_head mov r1, #THREAD_NOTIFY_SWITCH bl atomic_notifier_call_chain +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + str r7, [r8] +#endif THUMB( mov ip, r4 ) mov r0, r5 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously @@ -1009,17 +1009,12 @@ kuser_cmpxchg_fixup: */ __kuser_get_tls: @ 0xffff0fe0 - -#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL) - ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0 -#else - mrc p15, 0, r0, c13, c0, 3 @ read TLS register -#endif + ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init usr_ret lr - - .rep 5 - .word 0 @ pad up to __kuser_helper_version - .endr + mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code + .rep 4 + .word 0 @ 0xffff0ff0 software TLS value, then + .endr @ pad up to __kuser_helper_version /* * Reference declaration: diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 2c1db77d7848..f05a35a59694 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -92,75 +92,111 @@ ENDPROC(ret_from_fork) #define CALL(x) .long x #ifdef CONFIG_FUNCTION_TRACER +/* + * When compiling with -pg, gcc inserts a call to the mcount routine at the + * start of every function. In mcount, apart from the function's address (in + * lr), we need to get hold of the function's caller's address. + * + * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this: + * + * bl mcount + * + * These versions have the limitation that in order for the mcount routine to + * be able to determine the function's caller's address, an APCS-style frame + * pointer (which is set up with something like the code below) is required. + * + * mov ip, sp + * push {fp, ip, lr, pc} + * sub fp, ip, #4 + * + * With EABI, these frame pointers are not available unless -mapcs-frame is + * specified, and if building as Thumb-2, not even then. + * + * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount, + * with call sites like: + * + * push {lr} + * bl __gnu_mcount_nc + * + * With these compilers, frame pointers are not necessary. + * + * mcount can be thought of as a function called in the middle of a subroutine + * call. As such, it needs to be transparent for both the caller and the + * callee: the original lr needs to be restored when leaving mcount, and no + * registers should be clobbered. (In the __gnu_mcount_nc implementation, we + * clobber the ip register. This is OK because the ARM calling convention + * allows it to be clobbered in subroutines and doesn't use it to hold + * parameters.) + */ #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(mcount) - stmdb sp!, {r0-r3, lr} - mov r0, lr - sub r0, r0, #MCOUNT_INSN_SIZE + stmdb sp!, {r0-r3, lr} + mov r0, lr + sub r0, r0, #MCOUNT_INSN_SIZE .globl mcount_call mcount_call: - bl ftrace_stub - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} + bl ftrace_stub + ldr lr, [fp, #-4] @ restore lr + ldmia sp!, {r0-r3, pc} ENTRY(ftrace_caller) - stmdb sp!, {r0-r3, lr} - ldr r1, [fp, #-4] - mov r0, lr - sub r0, r0, #MCOUNT_INSN_SIZE + stmdb sp!, {r0-r3, lr} + ldr r1, [fp, #-4] + mov r0, lr + sub r0, r0, #MCOUNT_INSN_SIZE .globl ftrace_call ftrace_call: - bl ftrace_stub - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} + bl ftrace_stub + ldr lr, [fp, #-4] @ restore lr + ldmia sp!, {r0-r3, pc} #else ENTRY(__gnu_mcount_nc) - stmdb sp!, {r0-r3, lr} - ldr r0, =ftrace_trace_function - ldr r2, [r0] - adr r0, ftrace_stub - cmp r0, r2 - bne gnu_trace - ldmia sp!, {r0-r3, ip, lr} - mov pc, ip + stmdb sp!, {r0-r3, lr} + ldr r0, =ftrace_trace_function + ldr r2, [r0] + adr r0, ftrace_stub + cmp r0, r2 + bne gnu_trace + ldmia sp!, {r0-r3, ip, lr} + mov pc, ip gnu_trace: - ldr r1, [sp, #20] @ lr of instrumented routine - mov r0, lr - sub r0, r0, #MCOUNT_INSN_SIZE - mov lr, pc - mov pc, r2 - ldmia sp!, {r0-r3, ip, lr} - mov pc, ip + ldr r1, [sp, #20] @ lr of instrumented routine + mov r0, lr + sub r0, r0, #MCOUNT_INSN_SIZE + mov lr, pc + mov pc, r2 + ldmia sp!, {r0-r3, ip, lr} + mov pc, ip ENTRY(mcount) - stmdb sp!, {r0-r3, lr} - ldr r0, =ftrace_trace_function - ldr r2, [r0] - adr r0, ftrace_stub - cmp r0, r2 - bne trace - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} + stmdb sp!, {r0-r3, lr} + ldr r0, =ftrace_trace_function + ldr r2, [r0] + adr r0, ftrace_stub + cmp r0, r2 + bne trace + ldr lr, [fp, #-4] @ restore lr + ldmia sp!, {r0-r3, pc} trace: - ldr r1, [fp, #-4] @ lr of instrumented routine - mov r0, lr - sub r0, r0, #MCOUNT_INSN_SIZE - mov lr, pc - mov pc, r2 - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} + ldr r1, [fp, #-4] @ lr of instrumented routine + mov r0, lr + sub r0, r0, #MCOUNT_INSN_SIZE + mov lr, pc + mov pc, r2 + ldr lr, [fp, #-4] @ restore lr + ldmia sp!, {r0-r3, pc} #endif /* CONFIG_DYNAMIC_FTRACE */ .globl ftrace_stub ftrace_stub: - mov pc, lr + mov pc, lr #endif /* CONFIG_FUNCTION_TRACER */ diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c index 827753966301..56418f98cd01 100644 --- a/arch/arm/kernel/etm.c +++ b/arch/arm/kernel/etm.c @@ -543,7 +543,9 @@ static int __init etm_probe(struct amba_device *dev, struct amba_id *id) t->etm_portsz = 1; etm_unlock(t); - ret = etm_readl(t, CSCR_PRSR); + (void)etm_readl(t, ETMMR_PDSR); + /* dummy first read */ + (void)etm_readl(&tracer, ETMMR_OSSRR); t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf; etm_writel(t, 0x440, ETMR_CTRL); diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 3b3d2c80509c..c0d5c3b3a760 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -47,12 +47,14 @@ #define irq_finish(irq) do { } while (0) #endif +unsigned int arch_nr_irqs; void (*init_arch_irq)(void) __initdata = NULL; unsigned long irq_err_count; int show_interrupts(struct seq_file *p, void *v) { int i = *(loff_t *) v, cpu; + struct irq_desc *desc; struct irqaction * action; unsigned long flags; @@ -67,24 +69,25 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); } - if (i < NR_IRQS) { - raw_spin_lock_irqsave(&irq_desc[i].lock, flags); - action = irq_desc[i].action; + if (i < nr_irqs) { + desc = irq_to_desc(i); + raw_spin_lock_irqsave(&desc->lock, flags); + action = desc->action; if (!action) goto unlock; seq_printf(p, "%3d: ", i); for_each_present_cpu(cpu) seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); - seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-"); + seq_printf(p, " %10s", desc->chip->name ? : "-"); seq_printf(p, " %s", action->name); for (action = action->next; action; action = action->next) seq_printf(p, ", %s", action->name); seq_putc(p, '\n'); unlock: - raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); - } else if (i == NR_IRQS) { + raw_spin_unlock_irqrestore(&desc->lock, flags); + } else if (i == nr_irqs) { #ifdef CONFIG_FIQ show_fiq_list(p, v); #endif @@ -112,7 +115,7 @@ asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs) * Some hardware gives randomly wrong interrupts. Rather * than crashing, do something sensible. */ - if (unlikely(irq >= NR_IRQS)) { + if (unlikely(irq >= nr_irqs)) { if (printk_ratelimit()) printk(KERN_WARNING "Bad IRQ%u\n", irq); ack_bad_irq(irq); @@ -132,12 +135,12 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) struct irq_desc *desc; unsigned long flags; - if (irq >= NR_IRQS) { + if (irq >= nr_irqs) { printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); return; } - desc = irq_desc + irq; + desc = irq_to_desc(irq); raw_spin_lock_irqsave(&desc->lock, flags); desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; if (iflags & IRQF_VALID) @@ -151,14 +154,25 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) void __init init_IRQ(void) { + struct irq_desc *desc; int irq; - for (irq = 0; irq < NR_IRQS; irq++) - irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE; + for (irq = 0; irq < nr_irqs; irq++) { + desc = irq_to_desc_alloc_node(irq, 0); + desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; + } init_arch_irq(); } +#ifdef CONFIG_SPARSE_IRQ +int __init arch_probe_nr_irqs(void) +{ + nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS; + return 0; +} +#endif + #ifdef CONFIG_HOTPLUG_CPU static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) @@ -178,10 +192,9 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) void migrate_irqs(void) { unsigned int i, cpu = smp_processor_id(); + struct irq_desc *desc; - for (i = 0; i < NR_IRQS; i++) { - struct irq_desc *desc = irq_desc + i; - + for_each_irq_desc(i, desc) { if (desc->node == cpu) { unsigned int newcpu = cpumask_any_and(desc->affinity, cpu_online_mask); diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index c868a8864117..778c2f7024ff 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -10,57 +10,62 @@ * Deepak Saxena <dsaxena@plexity.net> */ #include <linux/irq.h> +#include <linux/kdebug.h> #include <linux/kgdb.h> #include <asm/traps.h> -/* Make a local copy of the registers passed into the handler (bletch) */ -void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs) +struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { - int regno; - - /* Initialize all to zero. */ - for (regno = 0; regno < GDB_MAX_REGS; regno++) - gdb_regs[regno] = 0; + { "r0", 4, offsetof(struct pt_regs, ARM_r0)}, + { "r1", 4, offsetof(struct pt_regs, ARM_r1)}, + { "r2", 4, offsetof(struct pt_regs, ARM_r2)}, + { "r3", 4, offsetof(struct pt_regs, ARM_r3)}, + { "r4", 4, offsetof(struct pt_regs, ARM_r4)}, + { "r5", 4, offsetof(struct pt_regs, ARM_r5)}, + { "r6", 4, offsetof(struct pt_regs, ARM_r6)}, + { "r7", 4, offsetof(struct pt_regs, ARM_r7)}, + { "r8", 4, offsetof(struct pt_regs, ARM_r8)}, + { "r9", 4, offsetof(struct pt_regs, ARM_r9)}, + { "r10", 4, offsetof(struct pt_regs, ARM_r10)}, + { "fp", 4, offsetof(struct pt_regs, ARM_fp)}, + { "ip", 4, offsetof(struct pt_regs, ARM_ip)}, + { "sp", 4, offsetof(struct pt_regs, ARM_sp)}, + { "lr", 4, offsetof(struct pt_regs, ARM_lr)}, + { "pc", 4, offsetof(struct pt_regs, ARM_pc)}, + { "f0", 12, -1 }, + { "f1", 12, -1 }, + { "f2", 12, -1 }, + { "f3", 12, -1 }, + { "f4", 12, -1 }, + { "f5", 12, -1 }, + { "f6", 12, -1 }, + { "f7", 12, -1 }, + { "fps", 4, -1 }, + { "cpsr", 4, offsetof(struct pt_regs, ARM_cpsr)}, +}; - gdb_regs[_R0] = kernel_regs->ARM_r0; - gdb_regs[_R1] = kernel_regs->ARM_r1; - gdb_regs[_R2] = kernel_regs->ARM_r2; - gdb_regs[_R3] = kernel_regs->ARM_r3; - gdb_regs[_R4] = kernel_regs->ARM_r4; - gdb_regs[_R5] = kernel_regs->ARM_r5; - gdb_regs[_R6] = kernel_regs->ARM_r6; - gdb_regs[_R7] = kernel_regs->ARM_r7; - gdb_regs[_R8] = kernel_regs->ARM_r8; - gdb_regs[_R9] = kernel_regs->ARM_r9; - gdb_regs[_R10] = kernel_regs->ARM_r10; - gdb_regs[_FP] = kernel_regs->ARM_fp; - gdb_regs[_IP] = kernel_regs->ARM_ip; - gdb_regs[_SPT] = kernel_regs->ARM_sp; - gdb_regs[_LR] = kernel_regs->ARM_lr; - gdb_regs[_PC] = kernel_regs->ARM_pc; - gdb_regs[_CPSR] = kernel_regs->ARM_cpsr; +char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return NULL; + + if (dbg_reg_def[regno].offset != -1) + memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, + dbg_reg_def[regno].size); + else + memset(mem, 0, dbg_reg_def[regno].size); + return dbg_reg_def[regno].name; } -/* Copy local gdb registers back to kgdb regs, for later copy to kernel */ -void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs) +int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) { - kernel_regs->ARM_r0 = gdb_regs[_R0]; - kernel_regs->ARM_r1 = gdb_regs[_R1]; - kernel_regs->ARM_r2 = gdb_regs[_R2]; - kernel_regs->ARM_r3 = gdb_regs[_R3]; - kernel_regs->ARM_r4 = gdb_regs[_R4]; - kernel_regs->ARM_r5 = gdb_regs[_R5]; - kernel_regs->ARM_r6 = gdb_regs[_R6]; - kernel_regs->ARM_r7 = gdb_regs[_R7]; - kernel_regs->ARM_r8 = gdb_regs[_R8]; - kernel_regs->ARM_r9 = gdb_regs[_R9]; - kernel_regs->ARM_r10 = gdb_regs[_R10]; - kernel_regs->ARM_fp = gdb_regs[_FP]; - kernel_regs->ARM_ip = gdb_regs[_IP]; - kernel_regs->ARM_sp = gdb_regs[_SPT]; - kernel_regs->ARM_lr = gdb_regs[_LR]; - kernel_regs->ARM_pc = gdb_regs[_PC]; - kernel_regs->ARM_cpsr = gdb_regs[_CPSR]; + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return -EINVAL; + + if (dbg_reg_def[regno].offset != -1) + memcpy((void *)regs + dbg_reg_def[regno].offset, mem, + dbg_reg_def[regno].size); + return 0; } void @@ -176,6 +181,33 @@ void kgdb_roundup_cpus(unsigned long flags) local_irq_disable(); } +static int __kgdb_notify(struct die_args *args, unsigned long cmd) +{ + struct pt_regs *regs = args->regs; + + if (kgdb_handle_exception(1, args->signr, cmd, regs)) + return NOTIFY_DONE; + return NOTIFY_STOP; +} +static int +kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) +{ + unsigned long flags; + int ret; + + local_irq_save(flags); + ret = __kgdb_notify(ptr, cmd); + local_irq_restore(flags); + + return ret; +} + +static struct notifier_block kgdb_notifier = { + .notifier_call = kgdb_notify, + .priority = -INT_MAX, +}; + + /** * kgdb_arch_init - Perform any architecture specific initalization. * @@ -184,6 +216,11 @@ void kgdb_roundup_cpus(unsigned long flags) */ int kgdb_arch_init(void) { + int ret = register_die_notifier(&kgdb_notifier); + + if (ret != 0) + return ret; + register_undef_hook(&kgdb_brkpt_hook); register_undef_hook(&kgdb_compiled_brkpt_hook); @@ -200,6 +237,7 @@ void kgdb_arch_exit(void) { unregister_undef_hook(&kgdb_brkpt_hook); unregister_undef_hook(&kgdb_compiled_brkpt_hook); + unregister_die_notifier(&kgdb_notifier); } /* diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index da1f94906a4e..8bccbfa693ff 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c @@ -583,13 +583,14 @@ static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs) { insn_llret_3arg_fn_t *i_fn = (insn_llret_3arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; + long ppc = (long)p->addr + 8; union reg_pair fnr; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; long rdv; - long rnv = regs->uregs[rn]; - long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */ + long rnv = (rn == 15) ? ppc : regs->uregs[rn]; + long rmv = (rm == 15) ? ppc : regs->uregs[rm]; long cpsr = regs->ARM_cpsr; fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn); diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 598ca61e7bca..1fc74cbd1a19 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -37,12 +37,12 @@ void machine_kexec_cleanup(struct kimage *image) { } -void machine_shutdown(void) -{ -} - void machine_crash_shutdown(struct pt_regs *regs) { + local_irq_disable(); + crash_save_cpu(regs, smp_processor_id()); + + printk(KERN_INFO "Loading crashdump kernel...\n"); } void machine_kexec(struct kimage *image) @@ -74,7 +74,11 @@ void machine_kexec(struct kimage *image) (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); - cpu_proc_fin(); + local_irq_disable(); + local_fiq_disable(); setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/ + flush_cache_all(); + cpu_proc_fin(); + flush_cache_all(); cpu_reset(reboot_code_buffer_phys); } diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index c628bdf6c430..6b4605893f1e 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -102,7 +102,9 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned long loc; Elf32_Sym *sym; s32 offset; +#ifdef CONFIG_THUMB2_KERNEL u32 upper, lower, sign, j1, j2; +#endif offset = ELF32_R_SYM(rel->r_info); if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) { @@ -185,6 +187,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, (offset & 0x0fff); break; +#ifdef CONFIG_THUMB2_KERNEL case R_ARM_THM_CALL: case R_ARM_THM_JUMP24: upper = *(u16 *)loc; @@ -233,9 +236,40 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, *(u16 *)(loc + 2) = (u16)((lower & 0xd000) | (j1 << 13) | (j2 << 11) | ((offset >> 1) & 0x07ff)); + break; + + case R_ARM_THM_MOVW_ABS_NC: + case R_ARM_THM_MOVT_ABS: upper = *(u16 *)loc; lower = *(u16 *)(loc + 2); + + /* + * MOVT/MOVW instructions encoding in Thumb-2: + * + * i = upper[10] + * imm4 = upper[3:0] + * imm3 = lower[14:12] + * imm8 = lower[7:0] + * + * imm16 = imm4:i:imm3:imm8 + */ + offset = ((upper & 0x000f) << 12) | + ((upper & 0x0400) << 1) | + ((lower & 0x7000) >> 4) | (lower & 0x00ff); + offset = (offset ^ 0x8000) - 0x8000; + offset += sym->st_value; + + if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS) + offset >>= 16; + + *(u16 *)loc = (u16)((upper & 0xfbf0) | + ((offset & 0xf000) >> 12) | + ((offset & 0x0800) >> 1)); + *(u16 *)(loc + 2) = (u16)((lower & 0x8f00) | + ((offset & 0x0700) << 4) | + (offset & 0x00ff)); break; +#endif default: printk(KERN_ERR "%s: unknown relocation: %u\n", diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index c45768614c8a..417c392ddf1c 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -164,20 +164,20 @@ armpmu_event_set_period(struct perf_event *event, struct hw_perf_event *hwc, int idx) { - s64 left = atomic64_read(&hwc->period_left); + s64 left = local64_read(&hwc->period_left); s64 period = hwc->sample_period; int ret = 0; if (unlikely(left <= -period)) { left = period; - atomic64_set(&hwc->period_left, left); + local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (unlikely(left <= 0)) { left += period; - atomic64_set(&hwc->period_left, left); + local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } @@ -185,7 +185,7 @@ armpmu_event_set_period(struct perf_event *event, if (left > (s64)armpmu->max_period) left = armpmu->max_period; - atomic64_set(&hwc->prev_count, (u64)-left); + local64_set(&hwc->prev_count, (u64)-left); armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); @@ -201,21 +201,21 @@ armpmu_event_update(struct perf_event *event, { int shift = 64 - 32; s64 prev_raw_count, new_raw_count; - s64 delta; + u64 delta; again: - prev_raw_count = atomic64_read(&hwc->prev_count); + prev_raw_count = local64_read(&hwc->prev_count); new_raw_count = armpmu->read_counter(idx); - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count) goto again; delta = (new_raw_count << shift) - (prev_raw_count << shift); delta >>= shift; - atomic64_add(delta, &event->count); - atomic64_sub(delta, &hwc->period_left); + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); return new_raw_count; } @@ -478,7 +478,7 @@ __hw_perf_event_init(struct perf_event *event) if (!hwc->sample_period) { hwc->sample_period = armpmu->max_period; hwc->last_period = hwc->sample_period; - atomic64_set(&hwc->period_left, hwc->sample_period); + local64_set(&hwc->period_left, hwc->sample_period); } err = 0; diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index acf5e6fdb6dc..401e38be1f78 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -28,7 +28,9 @@ #include <linux/tick.h> #include <linux/utsname.h> #include <linux/uaccess.h> +#include <linux/random.h> +#include <asm/cacheflush.h> #include <asm/leds.h> #include <asm/processor.h> #include <asm/system.h> @@ -36,6 +38,12 @@ #include <asm/stacktrace.h> #include <asm/mach/time.h> +#ifdef CONFIG_CC_STACKPROTECTOR +#include <linux/stackprotector.h> +unsigned long __stack_chk_guard __read_mostly; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + static const char *processor_modes[] = { "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", @@ -84,10 +92,9 @@ __setup("hlt", hlt_setup); void arm_machine_restart(char mode, const char *cmd) { - /* - * Clean and disable cache, and turn off interrupts - */ - cpu_proc_fin(); + /* Disable interrupts first */ + local_irq_disable(); + local_fiq_disable(); /* * Tell the mm system that we are going to reboot - @@ -96,6 +103,15 @@ void arm_machine_restart(char mode, const char *cmd) */ setup_mm_for_reboot(mode); + /* Clean and invalidate caches */ + flush_cache_all(); + + /* Turn off caching */ + cpu_proc_fin(); + + /* Push out any further dirty data, and ensure cache is empty */ + flush_cache_all(); + /* * Now call the architecture specific reboot code. */ @@ -189,19 +205,29 @@ int __init reboot_setup(char *str) __setup("reboot=", reboot_setup); -void machine_halt(void) +void machine_shutdown(void) { +#ifdef CONFIG_SMP + smp_send_stop(); +#endif } +void machine_halt(void) +{ + machine_shutdown(); + while (1); +} void machine_power_off(void) { + machine_shutdown(); if (pm_power_off) pm_power_off(); } void machine_restart(char *cmd) { + machine_shutdown(); arm_pm_restart(reboot_mode, cmd); } @@ -351,17 +377,21 @@ EXPORT_SYMBOL(dump_fpu); /* * Shuffle the argument into the correct register before calling the - * thread function. r1 is the thread argument, r2 is the pointer to - * the thread function, and r3 points to the exit function. + * thread function. r4 is the thread argument, r5 is the pointer to + * the thread function, and r6 points to the exit function. */ extern void kernel_thread_helper(void); asm( ".pushsection .text\n" " .align\n" " .type kernel_thread_helper, #function\n" "kernel_thread_helper:\n" -" mov r0, r1\n" -" mov lr, r3\n" -" mov pc, r2\n" +#ifdef CONFIG_TRACE_IRQFLAGS +" bl trace_hardirqs_on\n" +#endif +" msr cpsr_c, r7\n" +" mov r0, r4\n" +" mov lr, r6\n" +" mov pc, r5\n" " .size kernel_thread_helper, . - kernel_thread_helper\n" " .popsection"); @@ -391,11 +421,12 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) memset(®s, 0, sizeof(regs)); - regs.ARM_r1 = (unsigned long)arg; - regs.ARM_r2 = (unsigned long)fn; - regs.ARM_r3 = (unsigned long)kernel_thread_exit; + regs.ARM_r4 = (unsigned long)arg; + regs.ARM_r5 = (unsigned long)fn; + regs.ARM_r6 = (unsigned long)kernel_thread_exit; + regs.ARM_r7 = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE; regs.ARM_pc = (unsigned long)kernel_thread_helper; - regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE; + regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT; return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); } @@ -421,3 +452,9 @@ unsigned long get_wchan(struct task_struct *p) } while (count ++ < 16); return 0; } + +unsigned long arch_randomize_brk(struct mm_struct *mm) +{ + unsigned long range_end = mm->brk + 0x02000000; + return randomize_range(mm->brk, range_end, 0) ? : mm->brk; +} diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 3f562a7c0a99..f99d489822d5 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -52,6 +52,102 @@ #define BREAKINST_THUMB 0xde01 #endif +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define REG_OFFSET_NAME(r) \ + {.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)} +#define REG_OFFSET_END {.name = NULL, .offset = 0} + +static const struct pt_regs_offset regoffset_table[] = { + REG_OFFSET_NAME(r0), + REG_OFFSET_NAME(r1), + REG_OFFSET_NAME(r2), + REG_OFFSET_NAME(r3), + REG_OFFSET_NAME(r4), + REG_OFFSET_NAME(r5), + REG_OFFSET_NAME(r6), + REG_OFFSET_NAME(r7), + REG_OFFSET_NAME(r8), + REG_OFFSET_NAME(r9), + REG_OFFSET_NAME(r10), + REG_OFFSET_NAME(fp), + REG_OFFSET_NAME(ip), + REG_OFFSET_NAME(sp), + REG_OFFSET_NAME(lr), + REG_OFFSET_NAME(pc), + REG_OFFSET_NAME(cpsr), + REG_OFFSET_NAME(ORIG_r0), + REG_OFFSET_END, +}; + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +/** + * regs_query_register_name() - query register name from its offset + * @offset: the offset of a register in struct pt_regs. + * + * regs_query_register_name() returns the name of a register from its + * offset in struct pt_regs. If the @offset is invalid, this returns NULL; + */ +const char *regs_query_register_name(unsigned int offset) +{ + const struct pt_regs_offset *roff; + for (roff = regoffset_table; roff->name != NULL; roff++) + if (roff->offset == offset) + return roff->name; + return NULL; +} + +/** + * regs_within_kernel_stack() - check the address in the stack + * @regs: pt_regs which contains kernel stack pointer. + * @addr: address which is checked. + * + * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). + * If @addr is within the kernel stack, it returns true. If not, returns false. + */ +bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + return ((addr & ~(THREAD_SIZE - 1)) == + (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specified by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); + addr += n; + if (regs_within_kernel_stack(regs, (unsigned long)addr)) + return *addr; + else + return 0; +} + /* * this routine will get a word off of the processes privileged stack. * the offset is how far from the base addr as stored in the THREAD. diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S index 61930eb09029..fd26f8d65151 100644 --- a/arch/arm/kernel/relocate_kernel.S +++ b/arch/arm/kernel/relocate_kernel.S @@ -10,6 +10,12 @@ relocate_new_kernel: ldr r0,kexec_indirection_page ldr r1,kexec_start_address + /* + * If there is no indirection page (we are doing crashdumps) + * skip any relocation. + */ + cmp r0, #0 + beq 2f 0: /* top, read another word for the indirection page */ ldr r3, [r0],#4 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 122d999bdc7c..d5231ae7355a 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -19,12 +19,15 @@ #include <linux/seq_file.h> #include <linux/screen_info.h> #include <linux/init.h> +#include <linux/kexec.h> +#include <linux/crash_dump.h> #include <linux/root_dev.h> #include <linux/cpu.h> #include <linux/interrupt.h> #include <linux/smp.h> #include <linux/fs.h> #include <linux/proc_fs.h> +#include <linux/memblock.h> #include <asm/unified.h> #include <asm/cpu.h> @@ -44,7 +47,9 @@ #include <asm/traps.h> #include <asm/unwind.h> +#if defined(CONFIG_DEPRECATED_PARAM_STRUCT) #include "compat.h" +#endif #include "atags.h" #include "tcm.h" @@ -269,6 +274,21 @@ static void __init cacheid_init(void) extern struct proc_info_list *lookup_processor_type(unsigned int); extern struct machine_desc *lookup_machine_type(unsigned int); +static void __init feat_v6_fixup(void) +{ + int id = read_cpuid_id(); + + if ((id & 0xff0f0000) != 0x41070000) + return; + + /* + * HWCAP_TLS is available only on 1136 r1p0 and later, + * see also kuser_get_tls_init. + */ + if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0)) + elf_hwcap &= ~HWCAP_TLS; +} + static void __init setup_processor(void) { struct proc_info_list *list; @@ -311,6 +331,8 @@ static void __init setup_processor(void) elf_hwcap &= ~HWCAP_THUMB; #endif + feat_v6_fixup(); + cacheid_init(); cpu_proc_init(); } @@ -402,13 +424,12 @@ static int __init arm_add_memory(unsigned long start, unsigned long size) size -= start & ~PAGE_MASK; bank->start = PAGE_ALIGN(start); bank->size = size & PAGE_MASK; - bank->node = PHYS_TO_NID(start); /* * Check whether this memory region has non-zero size or * invalid node number. */ - if (bank->size == 0 || bank->node >= MAX_NUMNODES) + if (bank->size == 0) return -EINVAL; meminfo.nr_banks++; @@ -663,6 +684,86 @@ static int __init customize_machine(void) } arch_initcall(customize_machine); +#ifdef CONFIG_KEXEC +static inline unsigned long long get_total_mem(void) +{ + unsigned long total; + + total = max_low_pfn - min_low_pfn; + return total << PAGE_SHIFT; +} + +/** + * reserve_crashkernel() - reserves memory are for crash kernel + * + * This function reserves memory area given in "crashkernel=" kernel command + * line parameter. The memory reserved is used by a dump capture kernel when + * primary kernel is crashing. + */ +static void __init reserve_crashkernel(void) +{ + unsigned long long crash_size, crash_base; + unsigned long long total_mem; + int ret; + + total_mem = get_total_mem(); + ret = parse_crashkernel(boot_command_line, total_mem, + &crash_size, &crash_base); + if (ret) + return; + + ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE); + if (ret < 0) { + printk(KERN_WARNING "crashkernel reservation failed - " + "memory is in use (0x%lx)\n", (unsigned long)crash_base); + return; + } + + printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " + "for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(crash_size >> 20), + (unsigned long)(crash_base >> 20), + (unsigned long)(total_mem >> 20)); + + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; + insert_resource(&iomem_resource, &crashk_res); +} +#else +static inline void reserve_crashkernel(void) {} +#endif /* CONFIG_KEXEC */ + +/* + * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by + * is_kdump_kernel() to determine if we are booting after a panic. Hence + * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE. + */ + +#ifdef CONFIG_CRASH_DUMP +/* + * elfcorehdr= specifies the location of elf core header stored by the crashed + * kernel. This option will be passed by kexec loader to the capture kernel. + */ +static int __init setup_elfcorehdr(char *arg) +{ + char *end; + + if (!arg) + return -EINVAL; + + elfcorehdr_addr = memparse(arg, &end); + return end > arg ? 0 : -EINVAL; +} +early_param("elfcorehdr", setup_elfcorehdr); +#endif /* CONFIG_CRASH_DUMP */ + +static void __init squash_mem_tags(struct tag *tag) +{ + for (; tag->hdr.size; tag = tag_next(tag)) + if (tag->hdr.tag == ATAG_MEM) + tag->hdr.tag = ATAG_NONE; +} + void __init setup_arch(char **cmdline_p) { struct tag *tags = (struct tag *)&init_tags; @@ -683,12 +784,14 @@ void __init setup_arch(char **cmdline_p) else if (mdesc->boot_params) tags = phys_to_virt(mdesc->boot_params); +#if defined(CONFIG_DEPRECATED_PARAM_STRUCT) /* * If we have the old style parameters, convert them to * a tag list. */ if (tags->hdr.tag != ATAG_CORE) convert_to_tag_list(tags); +#endif if (tags->hdr.tag != ATAG_CORE) tags = (struct tag *)&init_tags; @@ -716,12 +819,15 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); + arm_memblock_init(&meminfo, mdesc); + paging_init(mdesc); request_standard_resources(&meminfo, mdesc); #ifdef CONFIG_SMP smp_init_cpus(); #endif + reserve_crashkernel(); cpu_init(); tcm_init(); @@ -729,6 +835,7 @@ void __init setup_arch(char **cmdline_p) /* * Set up various architecture-specific pointers */ + arch_nr_irqs = mdesc->nr_irqs; init_arch_irq = mdesc->init_irq; system_timer = mdesc->timer; init_machine = mdesc->init_machine; diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index b8c3d0f689d9..40dc74f2b27f 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -429,7 +429,11 @@ static void smp_timer_broadcast(const struct cpumask *mask) { send_ipi_message(mask, IPI_TIMER); } +#else +#define smp_timer_broadcast NULL +#endif +#ifndef CONFIG_LOCAL_TIMERS static void broadcast_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { @@ -444,7 +448,6 @@ static void local_timer_setup(struct clock_event_device *evt) evt->rating = 400; evt->mult = 1; evt->set_mode = broadcast_timer_set_mode; - evt->broadcast = smp_timer_broadcast; clockevents_register_device(evt); } @@ -456,6 +459,7 @@ void __cpuinit percpu_timer_setup(void) struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); evt->cpumask = cpumask_of(cpu); + evt->broadcast = smp_timer_broadcast; local_timer_setup(evt); } @@ -467,10 +471,13 @@ static DEFINE_SPINLOCK(stop_lock); */ static void ipi_cpu_stop(unsigned int cpu) { - spin_lock(&stop_lock); - printk(KERN_CRIT "CPU%u: stopping\n", cpu); - dump_stack(); - spin_unlock(&stop_lock); + if (system_state == SYSTEM_BOOTING || + system_state == SYSTEM_RUNNING) { + spin_lock(&stop_lock); + printk(KERN_CRIT "CPU%u: stopping\n", cpu); + dump_stack(); + spin_unlock(&stop_lock); + } set_cpu_online(cpu, false); diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 7c5f0c024db7..35882fbf37f9 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -132,7 +132,8 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk) twd_calibrate_rate(); clk->name = "local_timer"; - clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; + clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_C3STOP; clk->rating = 350; clk->set_mode = twd_set_mode; clk->set_next_event = twd_set_next_event; diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index c23501842b98..5b7c541a4c63 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c @@ -62,7 +62,7 @@ asmlinkage int sys_vfork(struct pt_regs *regs) /* sys_execve() executes a new program. * This is called indirectly via a small wrapper */ -asmlinkage int sys_execve(char __user *filenamei, char __user * __user *argv, +asmlinkage int sys_execve(const char __user *filenamei, char __user * __user *argv, char __user * __user *envp, struct pt_regs *regs) { int error; @@ -84,7 +84,7 @@ int kernel_execve(const char *filename, char *const argv[], char *const envp[]) int ret; memset(®s, 0, sizeof(struct pt_regs)); - ret = do_execve((char *)filename, (char __user * __user *)argv, + ret = do_execve(filename, (char __user * __user *)argv, (char __user * __user *)envp, ®s); if (ret < 0) goto out; diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 33ff678e32f2..4ad8da15ef2b 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -141,7 +141,7 @@ static long cp_oldabi_stat64(struct kstat *stat, return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; } -asmlinkage long sys_oabi_stat64(char __user * filename, +asmlinkage long sys_oabi_stat64(const char __user * filename, struct oldabi_stat64 __user * statbuf) { struct kstat stat; @@ -151,7 +151,7 @@ asmlinkage long sys_oabi_stat64(char __user * filename, return error; } -asmlinkage long sys_oabi_lstat64(char __user * filename, +asmlinkage long sys_oabi_lstat64(const char __user * filename, struct oldabi_stat64 __user * statbuf) { struct kstat stat; @@ -172,7 +172,7 @@ asmlinkage long sys_oabi_fstat64(unsigned long fd, } asmlinkage long sys_oabi_fstatat64(int dfd, - char __user *filename, + const char __user *filename, struct oldabi_stat64 __user *statbuf, int flag) { diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index e50303868f1b..26685c2f7a49 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -13,38 +13,35 @@ #include <linux/ioport.h> #include <linux/genalloc.h> #include <linux/string.h> /* memcpy */ -#include <asm/page.h> /* PAGE_SHIFT */ #include <asm/cputype.h> #include <asm/mach/map.h> #include <mach/memory.h> #include "tcm.h" -/* Scream and warn about misuse */ -#if !defined(ITCM_OFFSET) || !defined(ITCM_END) || \ - !defined(DTCM_OFFSET) || !defined(DTCM_END) -#error "TCM support selected but offsets not defined!" -#endif - static struct gen_pool *tcm_pool; /* TCM section definitions from the linker */ extern char __itcm_start, __sitcm_text, __eitcm_text; extern char __dtcm_start, __sdtcm_data, __edtcm_data; +/* These will be increased as we run */ +u32 dtcm_end = DTCM_OFFSET; +u32 itcm_end = ITCM_OFFSET; + /* * TCM memory resources */ static struct resource dtcm_res = { .name = "DTCM RAM", .start = DTCM_OFFSET, - .end = DTCM_END, + .end = DTCM_OFFSET, .flags = IORESOURCE_MEM }; static struct resource itcm_res = { .name = "ITCM RAM", .start = ITCM_OFFSET, - .end = ITCM_END, + .end = ITCM_OFFSET, .flags = IORESOURCE_MEM }; @@ -52,8 +49,8 @@ static struct map_desc dtcm_iomap[] __initdata = { { .virtual = DTCM_OFFSET, .pfn = __phys_to_pfn(DTCM_OFFSET), - .length = (DTCM_END - DTCM_OFFSET + 1), - .type = MT_UNCACHED + .length = 0, + .type = MT_MEMORY_DTCM } }; @@ -61,8 +58,8 @@ static struct map_desc itcm_iomap[] __initdata = { { .virtual = ITCM_OFFSET, .pfn = __phys_to_pfn(ITCM_OFFSET), - .length = (ITCM_END - ITCM_OFFSET + 1), - .type = MT_UNCACHED + .length = 0, + .type = MT_MEMORY_ITCM } }; @@ -93,14 +90,24 @@ void tcm_free(void *addr, size_t len) } EXPORT_SYMBOL(tcm_free); - -static void __init setup_tcm_bank(u8 type, u32 offset, u32 expected_size) +static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, + u32 *offset) { const int tcm_sizes[16] = { 0, -1, -1, 4, 8, 16, 32, 64, 128, 256, 512, 1024, -1, -1, -1, -1 }; u32 tcm_region; int tcm_size; + /* + * If there are more than one TCM bank of this type, + * select the TCM bank to operate on in the TCM selection + * register. + */ + if (banks > 1) + asm("mcr p15, 0, %0, c9, c2, 0" + : /* No output operands */ + : "r" (bank)); + /* Read the special TCM region register c9, 0 */ if (!type) asm("mrc p15, 0, %0, c9, c1, 0" @@ -111,26 +118,24 @@ static void __init setup_tcm_bank(u8 type, u32 offset, u32 expected_size) tcm_size = tcm_sizes[(tcm_region >> 2) & 0x0f]; if (tcm_size < 0) { - pr_err("CPU: %sTCM of unknown size!\n", - type ? "I" : "D"); + pr_err("CPU: %sTCM%d of unknown size\n", + type ? "I" : "D", bank); + return -EINVAL; + } else if (tcm_size > 32) { + pr_err("CPU: %sTCM%d larger than 32k found\n", + type ? "I" : "D", bank); + return -EINVAL; } else { - pr_info("CPU: found %sTCM %dk @ %08x, %senabled\n", + pr_info("CPU: found %sTCM%d %dk @ %08x, %senabled\n", type ? "I" : "D", + bank, tcm_size, (tcm_region & 0xfffff000U), (tcm_region & 1) ? "" : "not "); } - if (tcm_size != expected_size) { - pr_crit("CPU: %sTCM was detected %dk but expected %dk!\n", - type ? "I" : "D", - tcm_size, - expected_size); - /* Adjust to the expected size? what can we do... */ - } - /* Force move the TCM bank to where we want it, enable */ - tcm_region = offset | (tcm_region & 0x00000ffeU) | 1; + tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1; if (!type) asm("mcr p15, 0, %0, c9, c1, 0" @@ -141,10 +146,15 @@ static void __init setup_tcm_bank(u8 type, u32 offset, u32 expected_size) : /* No output operands */ : "r" (tcm_region)); - pr_debug("CPU: moved %sTCM %dk to %08x, enabled\n", - type ? "I" : "D", - tcm_size, - (tcm_region & 0xfffff000U)); + /* Increase offset */ + *offset += (tcm_size << 10); + + pr_info("CPU: moved %sTCM%d %dk to %08x, enabled\n", + type ? "I" : "D", + bank, + tcm_size, + (tcm_region & 0xfffff000U)); + return 0; } /* @@ -153,34 +163,52 @@ static void __init setup_tcm_bank(u8 type, u32 offset, u32 expected_size) void __init tcm_init(void) { u32 tcm_status = read_cpuid_tcmstatus(); + u8 dtcm_banks = (tcm_status >> 16) & 0x03; + u8 itcm_banks = (tcm_status & 0x03); char *start; char *end; char *ram; + int ret; + int i; /* Setup DTCM if present */ - if (tcm_status & (1 << 16)) { - setup_tcm_bank(0, DTCM_OFFSET, - (DTCM_END - DTCM_OFFSET + 1) >> 10); + if (dtcm_banks > 0) { + for (i = 0; i < dtcm_banks; i++) { + ret = setup_tcm_bank(0, i, dtcm_banks, &dtcm_end); + if (ret) + return; + } + dtcm_res.end = dtcm_end - 1; request_resource(&iomem_resource, &dtcm_res); + dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET; iotable_init(dtcm_iomap, 1); /* Copy data from RAM to DTCM */ start = &__sdtcm_data; end = &__edtcm_data; ram = &__dtcm_start; + /* This means you compiled more code than fits into DTCM */ + BUG_ON((end - start) > (dtcm_end - DTCM_OFFSET)); memcpy(start, ram, (end-start)); pr_debug("CPU DTCM: copied data from %p - %p\n", start, end); } /* Setup ITCM if present */ - if (tcm_status & 1) { - setup_tcm_bank(1, ITCM_OFFSET, - (ITCM_END - ITCM_OFFSET + 1) >> 10); + if (itcm_banks > 0) { + for (i = 0; i < itcm_banks; i++) { + ret = setup_tcm_bank(1, i, itcm_banks, &itcm_end); + if (ret) + return; + } + itcm_res.end = itcm_end - 1; request_resource(&iomem_resource, &itcm_res); + itcm_iomap[0].length = itcm_end - ITCM_OFFSET; iotable_init(itcm_iomap, 1); /* Copy code from RAM to ITCM */ start = &__sitcm_text; end = &__eitcm_text; ram = &__itcm_start; + /* This means you compiled more code than fits into ITCM */ + BUG_ON((end - start) > (itcm_end - ITCM_OFFSET)); memcpy(start, ram, (end-start)); pr_debug("CPU ITCM: copied code from %p - %p\n", start, end); } @@ -208,10 +236,10 @@ static int __init setup_tcm_pool(void) pr_debug("Setting up TCM memory pool\n"); /* Add the rest of DTCM to the TCM pool */ - if (tcm_status & (1 << 16)) { - if (dtcm_pool_start < DTCM_END) { + if (tcm_status & (0x03 << 16)) { + if (dtcm_pool_start < dtcm_end) { ret = gen_pool_add(tcm_pool, dtcm_pool_start, - DTCM_END - dtcm_pool_start + 1, -1); + dtcm_end - dtcm_pool_start, -1); if (ret) { pr_err("CPU DTCM: could not add DTCM " \ "remainder to pool!\n"); @@ -219,16 +247,16 @@ static int __init setup_tcm_pool(void) } pr_debug("CPU DTCM: Added %08x bytes @ %08x to " \ "the TCM memory pool\n", - DTCM_END - dtcm_pool_start + 1, + dtcm_end - dtcm_pool_start, dtcm_pool_start); } } /* Add the rest of ITCM to the TCM pool */ - if (tcm_status & 1) { - if (itcm_pool_start < ITCM_END) { + if (tcm_status & 0x03) { + if (itcm_pool_start < itcm_end) { ret = gen_pool_add(tcm_pool, itcm_pool_start, - ITCM_END - itcm_pool_start + 1, -1); + itcm_end - itcm_pool_start, -1); if (ret) { pr_err("CPU ITCM: could not add ITCM " \ "remainder to pool!\n"); @@ -236,7 +264,7 @@ static int __init setup_tcm_pool(void) } pr_debug("CPU ITCM: Added %08x bytes @ %08x to " \ "the TCM memory pool\n", - ITCM_END - itcm_pool_start + 1, + itcm_end - itcm_pool_start, itcm_pool_start); } } diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 1621e5327b2a..cda78d59aa31 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -30,6 +30,7 @@ #include <asm/unistd.h> #include <asm/traps.h> #include <asm/unwind.h> +#include <asm/tls.h> #include "ptrace.h" #include "signal.h" @@ -518,17 +519,20 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) case NR(set_tls): thread->tp_value = regs->ARM_r0; -#if defined(CONFIG_HAS_TLS_REG) - asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) ); -#elif !defined(CONFIG_TLS_REG_EMUL) - /* - * User space must never try to access this directly. - * Expect your app to break eventually if you do so. - * The user helper at 0xffff0fe0 must be used instead. - * (see entry-armv.S for details) - */ - *((unsigned int *)0xffff0ff0) = regs->ARM_r0; -#endif + if (tls_emu) + return 0; + if (has_tls_reg) { + asm ("mcr p15, 0, %0, c13, c0, 3" + : : "r" (regs->ARM_r0)); + } else { + /* + * User space must never try to access this directly. + * Expect your app to break eventually if you do so. + * The user helper at 0xffff0fe0 must be used instead. + * (see entry-armv.S for details) + */ + *((unsigned int *)0xffff0ff0) = regs->ARM_r0; + } return 0; #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG @@ -743,6 +747,16 @@ void __init trap_init(void) return; } +static void __init kuser_get_tls_init(unsigned long vectors) +{ + /* + * vectors + 0xfe0 = __kuser_get_tls + * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 + */ + if (tls_emu || has_tls_reg) + memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4); +} + void __init early_trap_init(void) { unsigned long vectors = CONFIG_VECTORS_BASE; @@ -761,6 +775,11 @@ void __init early_trap_init(void) memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); /* + * Do processor specific fixups for the kuser helpers + */ + kuser_get_tls_init(vectors); + + /* * Copy signal return handlers into the vector page, and * set sigreturn to be a pointer to these. */ |