summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/boot/.gitignore1
-rw-r--r--arch/arm/include/asm/pmu.h2
-rw-r--r--arch/arm/kernel/ecard.c1
-rw-r--r--arch/arm/kernel/perf_event.c45
-rw-r--r--arch/arm/kernel/perf_event_v6.c22
-rw-r--r--arch/arm/kernel/perf_event_v7.c11
-rw-r--r--arch/arm/kernel/perf_event_xscale.c20
-rw-r--r--arch/arm/mach-omap2/id.c1
-rw-r--r--arch/arm/mach-omap2/mailbox.c3
-rw-r--r--arch/arm/mach-omap2/omap-iommu.c3
-rw-r--r--arch/arm/mach-omap2/omap4-common.c2
-rw-r--r--arch/arm/mach-omap2/twl-common.c1
-rw-r--r--arch/arm/mach-s3c2440/s3c244x.c1
-rw-r--r--arch/arm/mach-ux500/Kconfig2
-rw-r--r--arch/arm/mach-vexpress/Kconfig2
-rw-r--r--arch/arm/mm/proc-v7.S4
-rw-r--r--arch/arm/plat-omap/include/plat/irqs.h10
-rw-r--r--arch/arm/plat-spear/time.c6
-rw-r--r--arch/c6x/include/asm/processor.h4
-rw-r--r--arch/x86/lib/delay.c4
21 files changed, 94 insertions, 53 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a48aecc17eac..dfb0312f4e73 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1280,7 +1280,7 @@ config ARM_ERRATA_743622
depends on CPU_V7
help
This option enables the workaround for the 743622 Cortex-A9
- (r2p0..r2p2) erratum. Under very rare conditions, a faulty
+ (r2p*) erratum. Under very rare conditions, a faulty
optimisation in the Cortex-A9 Store Buffer may lead to data
corruption. This workaround sets a specific bit in the diagnostic
register of the Cortex-A9 which disables the Store Buffer
diff --git a/arch/arm/boot/.gitignore b/arch/arm/boot/.gitignore
index ce1c5ff746e7..3c79f85975aa 100644
--- a/arch/arm/boot/.gitignore
+++ b/arch/arm/boot/.gitignore
@@ -3,3 +3,4 @@ zImage
xipImage
bootpImage
uImage
+*.dtb
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index b5a5be2536c1..90114faa9f3c 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -134,7 +134,7 @@ int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
u64 armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
- int idx, int overflow);
+ int idx);
int armpmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
index 4dd0edab6a65..1651d4950744 100644
--- a/arch/arm/kernel/ecard.c
+++ b/arch/arm/kernel/ecard.c
@@ -242,6 +242,7 @@ static void ecard_init_pgtables(struct mm_struct *mm)
memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
+ vma.vm_flags = VM_EXEC;
vma.vm_mm = mm;
flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 5bb91bf3d47f..b2abfa18f137 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event,
u64
armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
- int idx, int overflow)
+ int idx)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
u64 delta, prev_raw_count, new_raw_count;
@@ -193,13 +193,7 @@ again:
new_raw_count) != prev_raw_count)
goto again;
- new_raw_count &= armpmu->max_period;
- prev_raw_count &= armpmu->max_period;
-
- if (overflow)
- delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
- else
- delta = new_raw_count - prev_raw_count;
+ delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
@@ -216,7 +210,7 @@ armpmu_read(struct perf_event *event)
if (hwc->idx < 0)
return;
- armpmu_event_update(event, hwc, hwc->idx, 0);
+ armpmu_event_update(event, hwc, hwc->idx);
}
static void
@@ -232,7 +226,7 @@ armpmu_stop(struct perf_event *event, int flags)
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(hwc, hwc->idx);
barrier(); /* why? */
- armpmu_event_update(event, hwc, hwc->idx, 0);
+ armpmu_event_update(event, hwc, hwc->idx);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
@@ -518,7 +512,13 @@ __hw_perf_event_init(struct perf_event *event)
hwc->config_base |= (unsigned long)mapping;
if (!hwc->sample_period) {
- hwc->sample_period = armpmu->max_period;
+ /*
+ * For non-sampling runs, limit the sample_period to half
+ * of the counter width. That way, the new counter value
+ * is far less likely to overtake the previous one unless
+ * you have some serious IRQ latency issues.
+ */
+ hwc->sample_period = armpmu->max_period >> 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
@@ -680,6 +680,28 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu)
}
/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+{
+ if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+ return NOTIFY_DONE;
+
+ if (cpu_pmu && cpu_pmu->reset)
+ cpu_pmu->reset(NULL);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
+ .notifier_call = pmu_cpu_notify,
+};
+
+/*
* CPU PMU identification and registration.
*/
static int __init
@@ -730,6 +752,7 @@ init_hw_perf_events(void)
pr_info("enabled with %s PMU driver, %d counters available\n",
cpu_pmu->name, cpu_pmu->num_events);
cpu_pmu_init(cpu_pmu);
+ register_cpu_notifier(&pmu_cpu_notifier);
armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
} else {
pr_info("no hardware support available\n");
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 533be9930ec2..b78af0cc6ef3 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -467,23 +467,6 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static int counter_is_active(unsigned long pmcr, int idx)
-{
- unsigned long mask = 0;
- if (idx == ARMV6_CYCLE_COUNTER)
- mask = ARMV6_PMCR_CCOUNT_IEN;
- else if (idx == ARMV6_COUNTER0)
- mask = ARMV6_PMCR_COUNT0_IEN;
- else if (idx == ARMV6_COUNTER1)
- mask = ARMV6_PMCR_COUNT1_IEN;
-
- if (mask)
- return pmcr & mask;
-
- WARN_ONCE(1, "invalid counter number (%d)\n", idx);
- return 0;
-}
-
static irqreturn_t
armv6pmu_handle_irq(int irq_num,
void *dev)
@@ -513,7 +496,8 @@ armv6pmu_handle_irq(int irq_num,
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!counter_is_active(pmcr, idx))
+ /* Ignore if we don't have an event. */
+ if (!event)
continue;
/*
@@ -524,7 +508,7 @@ armv6pmu_handle_irq(int irq_num,
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 6933244c68f9..4d7095af2ab3 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -809,6 +809,11 @@ static inline int armv7_pmnc_disable_intens(int idx)
counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
+ isb();
+ /* Clear the overflow flag in case an interrupt is pending. */
+ asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
+ isb();
+
return idx;
}
@@ -955,6 +960,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
+ /* Ignore if we don't have an event. */
+ if (!event)
+ continue;
+
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
@@ -963,7 +972,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 3b99d8269829..71a21e6712f5 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -255,11 +255,14 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
+ if (!event)
+ continue;
+
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
@@ -592,11 +595,14 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
+ if (!event)
+ continue;
+
+ if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
@@ -663,7 +669,7 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
static void
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
- unsigned long flags, ien, evtsel;
+ unsigned long flags, ien, evtsel, of_flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable();
@@ -672,26 +678,31 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
switch (idx) {
case XSCALE_CYCLE_COUNTER:
ien &= ~XSCALE2_CCOUNT_INT_EN;
+ of_flags = XSCALE2_CCOUNT_OVERFLOW;
break;
case XSCALE_COUNTER0:
ien &= ~XSCALE2_COUNT0_INT_EN;
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
+ of_flags = XSCALE2_COUNT0_OVERFLOW;
break;
case XSCALE_COUNTER1:
ien &= ~XSCALE2_COUNT1_INT_EN;
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
+ of_flags = XSCALE2_COUNT1_OVERFLOW;
break;
case XSCALE_COUNTER2:
ien &= ~XSCALE2_COUNT2_INT_EN;
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
+ of_flags = XSCALE2_COUNT2_OVERFLOW;
break;
case XSCALE_COUNTER3:
ien &= ~XSCALE2_COUNT3_INT_EN;
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
+ of_flags = XSCALE2_COUNT3_OVERFLOW;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
@@ -701,6 +712,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
+ xscale2pmu_write_overflow_flags(of_flags);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 6c5826605eae..719ee423abe2 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -343,6 +343,7 @@ static void __init omap3_check_revision(const char **cpu_rev)
case 0xb944:
omap_revision = AM335X_REV_ES1_0;
*cpu_rev = "1.0";
+ break;
case 0xb8f2:
switch (rev) {
case 0:
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index 2cc1aa004b94..415a6f1cf419 100644
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -420,8 +420,7 @@ static void __exit omap2_mbox_exit(void)
platform_driver_unregister(&omap2_mbox_driver);
}
-/* must be ready before omap3isp is probed */
-subsys_initcall(omap2_mbox_init);
+module_init(omap2_mbox_init);
module_exit(omap2_mbox_exit);
MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
index b8822048e409..ac49384d0285 100644
--- a/arch/arm/mach-omap2/omap-iommu.c
+++ b/arch/arm/mach-omap2/omap-iommu.c
@@ -150,7 +150,8 @@ err_out:
platform_device_put(omap_iommu_pdev[i]);
return err;
}
-module_init(omap_iommu_init);
+/* must be ready before omap3isp is probed */
+subsys_initcall(omap_iommu_init);
static void __exit omap_iommu_exit(void)
{
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index ebc595091312..70de277f5c15 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -31,6 +31,7 @@
#include "common.h"
#include "omap4-sar-layout.h"
+#include <linux/export.h>
#ifdef CONFIG_CACHE_L2X0
static void __iomem *l2cache_base;
@@ -55,6 +56,7 @@ void omap_bus_sync(void)
isb();
}
}
+EXPORT_SYMBOL(omap_bus_sync);
/* Steal one page physical memory for barrier implementation */
int __init omap_barrier_reserve_memblock(void)
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index 10b20c652e5d..4b57757bf9d1 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -270,7 +270,6 @@ static struct regulator_init_data omap4_vusb_idata = {
.constraints = {
.min_uV = 3300000,
.max_uV = 3300000,
- .apply_uV = true,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_MODE
diff --git a/arch/arm/mach-s3c2440/s3c244x.c b/arch/arm/mach-s3c2440/s3c244x.c
index 744930a870eb..d15852f642b7 100644
--- a/arch/arm/mach-s3c2440/s3c244x.c
+++ b/arch/arm/mach-s3c2440/s3c244x.c
@@ -207,3 +207,4 @@ void s3c244x_restart(char mode, const char *cmd)
/* we'll take a jump through zero as a poor second */
soft_restart(0);
+}
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 52af00446a63..c59e8b892d6b 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -5,7 +5,7 @@ config UX500_SOC_COMMON
default y
select ARM_GIC
select HAS_MTU
- select ARM_ERRATA_753970
+ select PL310_ERRATA_753970
select ARM_ERRATA_754322
select ARM_ERRATA_764369
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 9b3d0fbaee72..88c3ba151e87 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -7,7 +7,7 @@ config ARCH_VEXPRESS_CA9X4
select ARM_GIC
select ARM_ERRATA_720789
select ARM_ERRATA_751472
- select ARM_ERRATA_753970
+ select PL310_ERRATA_753970
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 0404ccbb8aa3..f1c8486f7501 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -230,9 +230,7 @@ __v7_setup:
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
#ifdef CONFIG_ARM_ERRATA_743622
- teq r6, #0x20 @ present in r2p0
- teqne r6, #0x21 @ present in r2p1
- teqne r6, #0x22 @ present in r2p2
+ teq r5, #0x00200000 @ only present in r2p*
mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
orreq r10, r10, #1 << 6 @ set bit #6
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index 2efd6454bce0..37bbbbb981b2 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -428,8 +428,16 @@
#define OMAP_GPMC_NR_IRQS 8
#define OMAP_GPMC_IRQ_END (OMAP_GPMC_IRQ_BASE + OMAP_GPMC_NR_IRQS)
+/* PRCM IRQ handler */
+#ifdef CONFIG_ARCH_OMAP2PLUS
+#define OMAP_PRCM_IRQ_BASE (OMAP_GPMC_IRQ_END)
+#define OMAP_PRCM_NR_IRQS 64
+#define OMAP_PRCM_IRQ_END (OMAP_PRCM_IRQ_BASE + OMAP_PRCM_NR_IRQS)
+#else
+#define OMAP_PRCM_IRQ_END OMAP_GPMC_IRQ_END
+#endif
-#define NR_IRQS OMAP_GPMC_IRQ_END
+#define NR_IRQS OMAP_PRCM_IRQ_END
#define OMAP_IRQ_BIT(irq) (1 << ((irq) % 32))
diff --git a/arch/arm/plat-spear/time.c b/arch/arm/plat-spear/time.c
index 0c77e4298675..abb5bdecd509 100644
--- a/arch/arm/plat-spear/time.c
+++ b/arch/arm/plat-spear/time.c
@@ -145,11 +145,13 @@ static void clockevent_set_mode(enum clock_event_mode mode,
static int clockevent_next_event(unsigned long cycles,
struct clock_event_device *clk_event_dev)
{
- u16 val;
+ u16 val = readw(gpt_base + CR(CLKEVT));
+
+ if (val & CTRL_ENABLE)
+ writew(val & ~CTRL_ENABLE, gpt_base + CR(CLKEVT));
writew(cycles, gpt_base + LOAD(CLKEVT));
- val = readw(gpt_base + CR(CLKEVT));
val |= CTRL_ENABLE | CTRL_INT_ENABLE;
writew(val, gpt_base + CR(CLKEVT));
diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h
index 8154c4ee8c9c..77ecbded1f37 100644
--- a/arch/c6x/include/asm/processor.h
+++ b/arch/c6x/include/asm/processor.h
@@ -122,8 +122,8 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
extern unsigned long get_wchan(struct task_struct *p);
-#define KSTK_EIP(tsk) (task_pt_regs(task)->pc)
-#define KSTK_ESP(tsk) (task_pt_regs(task)->sp)
+#define KSTK_EIP(task) (task_pt_regs(task)->pc)
+#define KSTK_ESP(task) (task_pt_regs(task)->sp)
#define cpu_relax() do { } while (0)
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index fc45ba887d05..e395693abdb1 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -48,9 +48,9 @@ static void delay_loop(unsigned long loops)
}
/* TSC based delay: */
-static void delay_tsc(unsigned long loops)
+static void delay_tsc(unsigned long __loops)
{
- unsigned long bclock, now;
+ u32 bclock, now, loops = __loops;
int cpu;
preempt_disable();
OpenPOWER on IntegriCloud