diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/ecard.c | 1 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 45 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 22 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 11 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 20 | ||||
-rw-r--r-- | arch/arm/kernel/ptrace.c | 9 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 22 | ||||
-rw-r--r-- | arch/arm/kernel/smp_twd.c | 125 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 5 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 1 |
10 files changed, 197 insertions, 64 deletions
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c index 4dd0edab6a65..1651d4950744 100644 --- a/arch/arm/kernel/ecard.c +++ b/arch/arm/kernel/ecard.c @@ -242,6 +242,7 @@ static void ecard_init_pgtables(struct mm_struct *mm) memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE)); + vma.vm_flags = VM_EXEC; vma.vm_mm = mm; flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE); diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 5bb91bf3d47f..b2abfa18f137 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event, u64 armpmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, - int idx, int overflow) + int idx) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); u64 delta, prev_raw_count, new_raw_count; @@ -193,13 +193,7 @@ again: new_raw_count) != prev_raw_count) goto again; - new_raw_count &= armpmu->max_period; - prev_raw_count &= armpmu->max_period; - - if (overflow) - delta = armpmu->max_period - prev_raw_count + new_raw_count + 1; - else - delta = new_raw_count - prev_raw_count; + delta = (new_raw_count - prev_raw_count) & armpmu->max_period; local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); @@ -216,7 +210,7 @@ armpmu_read(struct perf_event *event) if (hwc->idx < 0) return; - armpmu_event_update(event, hwc, hwc->idx, 0); + armpmu_event_update(event, hwc, hwc->idx); } static void @@ -232,7 +226,7 @@ armpmu_stop(struct perf_event *event, int flags) if (!(hwc->state & PERF_HES_STOPPED)) { armpmu->disable(hwc, hwc->idx); barrier(); /* why? */ - armpmu_event_update(event, hwc, hwc->idx, 0); + armpmu_event_update(event, hwc, hwc->idx); hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; } } @@ -518,7 +512,13 @@ __hw_perf_event_init(struct perf_event *event) hwc->config_base |= (unsigned long)mapping; if (!hwc->sample_period) { - hwc->sample_period = armpmu->max_period; + /* + * For non-sampling runs, limit the sample_period to half + * of the counter width. That way, the new counter value + * is far less likely to overtake the previous one unless + * you have some serious IRQ latency issues. + */ + hwc->sample_period = armpmu->max_period >> 1; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); } @@ -680,6 +680,28 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu) } /* + * PMU hardware loses all context when a CPU goes offline. + * When a CPU is hotplugged back in, since some hardware registers are + * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading + * junk values out of them. + */ +static int __cpuinit pmu_cpu_notify(struct notifier_block *b, + unsigned long action, void *hcpu) +{ + if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) + return NOTIFY_DONE; + + if (cpu_pmu && cpu_pmu->reset) + cpu_pmu->reset(NULL); + + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata pmu_cpu_notifier = { + .notifier_call = pmu_cpu_notify, +}; + +/* * CPU PMU identification and registration. */ static int __init @@ -730,6 +752,7 @@ init_hw_perf_events(void) pr_info("enabled with %s PMU driver, %d counters available\n", cpu_pmu->name, cpu_pmu->num_events); cpu_pmu_init(cpu_pmu); + register_cpu_notifier(&pmu_cpu_notifier); armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); } else { pr_info("no hardware support available\n"); diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 533be9930ec2..b78af0cc6ef3 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -467,23 +467,6 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static int counter_is_active(unsigned long pmcr, int idx) -{ - unsigned long mask = 0; - if (idx == ARMV6_CYCLE_COUNTER) - mask = ARMV6_PMCR_CCOUNT_IEN; - else if (idx == ARMV6_COUNTER0) - mask = ARMV6_PMCR_COUNT0_IEN; - else if (idx == ARMV6_COUNTER1) - mask = ARMV6_PMCR_COUNT1_IEN; - - if (mask) - return pmcr & mask; - - WARN_ONCE(1, "invalid counter number (%d)\n", idx); - return 0; -} - static irqreturn_t armv6pmu_handle_irq(int irq_num, void *dev) @@ -513,7 +496,8 @@ armv6pmu_handle_irq(int irq_num, struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; - if (!counter_is_active(pmcr, idx)) + /* Ignore if we don't have an event. */ + if (!event) continue; /* @@ -524,7 +508,7 @@ armv6pmu_handle_irq(int irq_num, continue; hwc = &event->hw; - armpmu_event_update(event, hwc, idx, 1); + armpmu_event_update(event, hwc, idx); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 6933244c68f9..4d7095af2ab3 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -809,6 +809,11 @@ static inline int armv7_pmnc_disable_intens(int idx) counter = ARMV7_IDX_TO_COUNTER(idx); asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); + isb(); + /* Clear the overflow flag in case an interrupt is pending. */ + asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter))); + isb(); + return idx; } @@ -955,6 +960,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; + /* Ignore if we don't have an event. */ + if (!event) + continue; + /* * We have a single interrupt for all counters. Check that * each counter has overflowed before we process it. @@ -963,7 +972,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) continue; hwc = &event->hw; - armpmu_event_update(event, hwc, idx, 1); + armpmu_event_update(event, hwc, idx); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 3b99d8269829..71a21e6712f5 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -255,11 +255,14 @@ xscale1pmu_handle_irq(int irq_num, void *dev) struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; + if (!event) + continue; + if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) continue; hwc = &event->hw; - armpmu_event_update(event, hwc, idx, 1); + armpmu_event_update(event, hwc, idx); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; @@ -592,11 +595,14 @@ xscale2pmu_handle_irq(int irq_num, void *dev) struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; - if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) + if (!event) + continue; + + if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx)) continue; hwc = &event->hw; - armpmu_event_update(event, hwc, idx, 1); + armpmu_event_update(event, hwc, idx); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; @@ -663,7 +669,7 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) static void xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) { - unsigned long flags, ien, evtsel; + unsigned long flags, ien, evtsel, of_flags; struct pmu_hw_events *events = cpu_pmu->get_hw_events(); ien = xscale2pmu_read_int_enable(); @@ -672,26 +678,31 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) switch (idx) { case XSCALE_CYCLE_COUNTER: ien &= ~XSCALE2_CCOUNT_INT_EN; + of_flags = XSCALE2_CCOUNT_OVERFLOW; break; case XSCALE_COUNTER0: ien &= ~XSCALE2_COUNT0_INT_EN; evtsel &= ~XSCALE2_COUNT0_EVT_MASK; evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; + of_flags = XSCALE2_COUNT0_OVERFLOW; break; case XSCALE_COUNTER1: ien &= ~XSCALE2_COUNT1_INT_EN; evtsel &= ~XSCALE2_COUNT1_EVT_MASK; evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; + of_flags = XSCALE2_COUNT1_OVERFLOW; break; case XSCALE_COUNTER2: ien &= ~XSCALE2_COUNT2_INT_EN; evtsel &= ~XSCALE2_COUNT2_EVT_MASK; evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; + of_flags = XSCALE2_COUNT2_OVERFLOW; break; case XSCALE_COUNTER3: ien &= ~XSCALE2_COUNT3_INT_EN; evtsel &= ~XSCALE2_COUNT3_EVT_MASK; evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; + of_flags = XSCALE2_COUNT3_OVERFLOW; break; default: WARN_ONCE(1, "invalid counter number (%d)\n", idx); @@ -701,6 +712,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) raw_spin_lock_irqsave(&events->pmu_lock, flags); xscale2pmu_write_event_select(evtsel); xscale2pmu_write_int_enable(ien); + xscale2pmu_write_overflow_flags(of_flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index e33870ff0ac0..ede6443c34d9 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -23,6 +23,7 @@ #include <linux/perf_event.h> #include <linux/hw_breakpoint.h> #include <linux/regset.h> +#include <linux/audit.h> #include <asm/pgtable.h> #include <asm/system.h> @@ -904,6 +905,12 @@ long arch_ptrace(struct task_struct *child, long request, return ret; } +#ifdef __ARMEB__ +#define AUDIT_ARCH_NR AUDIT_ARCH_ARMEB +#else +#define AUDIT_ARCH_NR AUDIT_ARCH_ARM +#endif + asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) { unsigned long ip; @@ -918,7 +925,7 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) if (!ip) audit_syscall_exit(regs); else - audit_syscall_entry(AUDIT_ARCH_ARMEB, scno, regs->ARM_r0, + audit_syscall_entry(AUDIT_ARCH_NR, scno, regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); if (!test_thread_flag(TIF_SYSCALL_TRACE)) diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index cdeb727527d3..1ad84a6c9bfb 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -246,6 +246,8 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid) store_cpu_topology(cpuid); } +static void percpu_timer_setup(void); + /* * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. @@ -459,7 +461,20 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) clockevents_register_device(evt); } -void __cpuinit percpu_timer_setup(void) +static struct local_timer_ops *lt_ops; + +#ifdef CONFIG_LOCAL_TIMERS +int local_timer_register(struct local_timer_ops *ops) +{ + if (lt_ops) + return -EBUSY; + + lt_ops = ops; + return 0; +} +#endif + +static void __cpuinit percpu_timer_setup(void) { unsigned int cpu = smp_processor_id(); struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); @@ -467,7 +482,7 @@ void __cpuinit percpu_timer_setup(void) evt->cpumask = cpumask_of(cpu); evt->broadcast = smp_timer_broadcast; - if (local_timer_setup(evt)) + if (!lt_ops || lt_ops->setup(evt)) broadcast_timer_setup(evt); } @@ -482,7 +497,8 @@ static void percpu_timer_stop(void) unsigned int cpu = smp_processor_id(); struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); - local_timer_stop(evt); + if (lt_ops) + lt_ops->stop(evt); } #endif diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 4285daa077b0..fef42b21cecb 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -18,20 +18,23 @@ #include <linux/smp.h> #include <linux/jiffies.h> #include <linux/clockchips.h> -#include <linux/irq.h> +#include <linux/interrupt.h> #include <linux/io.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> #include <asm/smp_twd.h> #include <asm/localtimer.h> #include <asm/hardware/gic.h> /* set up by the platform code */ -void __iomem *twd_base; +static void __iomem *twd_base; static struct clk *twd_clk; static unsigned long twd_timer_rate; static struct clock_event_device __percpu **twd_evt; +static int twd_ppi; static void twd_set_mode(enum clock_event_mode mode, struct clock_event_device *clk) @@ -77,7 +80,7 @@ static int twd_set_next_event(unsigned long evt, * If a local timer interrupt has occurred, acknowledge and return 1. * Otherwise, return 0. */ -int twd_timer_ack(void) +static int twd_timer_ack(void) { if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) { __raw_writel(1, twd_base + TWD_TIMER_INTSTAT); @@ -87,7 +90,7 @@ int twd_timer_ack(void) return 0; } -void twd_timer_stop(struct clock_event_device *clk) +static void twd_timer_stop(struct clock_event_device *clk) { twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk); disable_percpu_irq(clk->irq); @@ -129,7 +132,7 @@ static struct notifier_block twd_cpufreq_nb = { static int twd_cpufreq_init(void) { - if (!IS_ERR(twd_clk)) + if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) return cpufreq_register_notifier(&twd_cpufreq_nb, CPUFREQ_TRANSITION_NOTIFIER); @@ -222,28 +225,10 @@ static struct clk *twd_get_clock(void) /* * Setup the local clock events for a CPU. */ -void __cpuinit twd_timer_setup(struct clock_event_device *clk) +static int __cpuinit twd_timer_setup(struct clock_event_device *clk) { struct clock_event_device **this_cpu_clk; - if (!twd_evt) { - int err; - - twd_evt = alloc_percpu(struct clock_event_device *); - if (!twd_evt) { - pr_err("twd: can't allocate memory\n"); - return; - } - - err = request_percpu_irq(clk->irq, twd_handler, - "twd", twd_evt); - if (err) { - pr_err("twd: can't register interrupt %d (%d)\n", - clk->irq, err); - return; - } - } - if (!twd_clk) twd_clk = twd_get_clock(); @@ -260,6 +245,7 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk) clk->rating = 350; clk->set_mode = twd_set_mode; clk->set_next_event = twd_set_next_event; + clk->irq = twd_ppi; this_cpu_clk = __this_cpu_ptr(twd_evt); *this_cpu_clk = clk; @@ -267,4 +253,95 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk) clockevents_config_and_register(clk, twd_timer_rate, 0xf, 0xffffffff); enable_percpu_irq(clk->irq, 0); + + return 0; +} + +static struct local_timer_ops twd_lt_ops __cpuinitdata = { + .setup = twd_timer_setup, + .stop = twd_timer_stop, +}; + +static int __init twd_local_timer_common_register(void) +{ + int err; + + twd_evt = alloc_percpu(struct clock_event_device *); + if (!twd_evt) { + err = -ENOMEM; + goto out_free; + } + + err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt); + if (err) { + pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err); + goto out_free; + } + + err = local_timer_register(&twd_lt_ops); + if (err) + goto out_irq; + + return 0; + +out_irq: + free_percpu_irq(twd_ppi, twd_evt); +out_free: + iounmap(twd_base); + twd_base = NULL; + free_percpu(twd_evt); + + return err; } + +int __init twd_local_timer_register(struct twd_local_timer *tlt) +{ + if (twd_base || twd_evt) + return -EBUSY; + + twd_ppi = tlt->res[1].start; + + twd_base = ioremap(tlt->res[0].start, resource_size(&tlt->res[0])); + if (!twd_base) + return -ENOMEM; + + return twd_local_timer_common_register(); +} + +#ifdef CONFIG_OF +const static struct of_device_id twd_of_match[] __initconst = { + { .compatible = "arm,cortex-a9-twd-timer", }, + { .compatible = "arm,cortex-a5-twd-timer", }, + { .compatible = "arm,arm11mp-twd-timer", }, + { }, +}; + +void __init twd_local_timer_of_register(void) +{ + struct device_node *np; + int err; + + np = of_find_matching_node(NULL, twd_of_match); + if (!np) { + err = -ENODEV; + goto out; + } + + twd_ppi = irq_of_parse_and_map(np, 0); + if (!twd_ppi) { + err = -EINVAL; + goto out; + } + + twd_base = of_iomap(np, 0); + if (!twd_base) { + err = -ENOMEM; + goto out; + } + + err = twd_local_timer_common_register(); + +out: + WARN(err, "twd_local_timer_of_register failed (%d)\n", err); +} +#endif diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 99a572702509..f84dfe67724f 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -266,6 +266,7 @@ void die(const char *str, struct pt_regs *regs, int err) { struct thread_info *thread = current_thread_info(); int ret; + enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE; oops_enter(); @@ -273,7 +274,9 @@ void die(const char *str, struct pt_regs *regs, int err) console_verbose(); bust_spinlocks(1); if (!user_mode(regs)) - report_bug(regs->ARM_pc, regs); + bug_type = report_bug(regs->ARM_pc, regs); + if (bug_type != BUG_TRAP_TYPE_NONE) + str = "Oops - BUG"; ret = __die(str, err, thread, regs); if (regs && kexec_should_crash(thread->task)) diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 1e19691e0406..43a31fb06318 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -10,6 +10,7 @@ #include <asm/page.h> #define PROC_INFO \ + . = ALIGN(4); \ VMLINUX_SYMBOL(__proc_info_begin) = .; \ *(.proc.info.init) \ VMLINUX_SYMBOL(__proc_info_end) = .; |