summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/irq.c2
-rw-r--r--arch/arm/kernel/kprobes-test-arm.c4
-rw-r--r--arch/arm/kernel/machine_kexec.c7
-rw-r--r--arch/arm/kernel/perf_event.c4
-rw-r--r--arch/arm/kernel/sched_clock.c18
-rw-r--r--arch/arm/kernel/smp.c14
-rw-r--r--arch/arm/kernel/smp_twd.c4
8 files changed, 31 insertions, 26 deletions
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 417bac1846bd..34711757ba59 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -88,9 +88,9 @@ ENTRY(ret_from_fork)
bl schedule_tail
cmp r5, #0
movne r0, r4
- movne lr, pc
+ adrne lr, BSYM(1f)
movne pc, r5
- get_thread_info tsk
+1: get_thread_info tsk
b ret_slow_syscall
ENDPROC(ret_from_fork)
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 16cedb42c0c3..896165096d6a 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -34,6 +34,7 @@
#include <linux/list.h>
#include <linux/kallsyms.h>
#include <linux/proc_fs.h>
+#include <linux/export.h>
#include <asm/exception.h>
#include <asm/mach/arch.h>
@@ -109,6 +110,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
/* Order is clear bits in "clr" then set bits in "set" */
irq_modify_status(irq, clr, set & ~clr);
}
+EXPORT_SYMBOL_GPL(set_irq_flags);
void __init init_IRQ(void)
{
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c
index 38c1a3b103a0..839312905067 100644
--- a/arch/arm/kernel/kprobes-test-arm.c
+++ b/arch/arm/kernel/kprobes-test-arm.c
@@ -366,7 +366,9 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED(".word 0xe04f0392 @ umaal r0, pc, r2, r3")
TEST_UNSUPPORTED(".word 0xe0500090 @ undef")
TEST_UNSUPPORTED(".word 0xe05fff9f @ undef")
+#endif
+#if __LINUX_ARM_ARCH__ >= 7
TEST_RRR( "mls r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
TEST_RRR( "mlshi r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
TEST_RR( "mls lr, r",1, VAL2,", r",2, VAL3,", r13")
@@ -456,6 +458,8 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED(".word 0xe1700090") /* Unallocated space */
#if __LINUX_ARM_ARCH__ >= 6
TEST_UNSUPPORTED("ldrex r2, [sp]")
+#endif
+#if (__LINUX_ARM_ARCH__ >= 7) || defined(CONFIG_CPU_32v6K)
TEST_UNSUPPORTED("strexd r0, r2, r3, [sp]")
TEST_UNSUPPORTED("ldrexd r2, r3, [sp]")
TEST_UNSUPPORTED("strexb r0, r2, [sp]")
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index e29c3337ca81..8ef8c9337809 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -45,10 +45,9 @@ int machine_kexec_prepare(struct kimage *image)
for (i = 0; i < image->nr_segments; i++) {
current_segment = &image->segment[i];
- err = memblock_is_region_memory(current_segment->mem,
- current_segment->memsz);
- if (err)
- return - EINVAL;
+ if (!memblock_is_region_memory(current_segment->mem,
+ current_segment->memsz))
+ return -EINVAL;
err = get_user(header, (__be32*)current_segment->buf);
if (err)
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 93971b1a4f0b..53c0304b734a 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -96,6 +96,10 @@ armpmu_event_set_period(struct perf_event *event,
s64 period = hwc->sample_period;
int ret = 0;
+ /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
+ if (unlikely(period != hwc->last_period))
+ left = period - (hwc->last_period - left);
+
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index e21bac20d90d..fc6692e2b603 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -107,13 +107,6 @@ static void sched_clock_poll(unsigned long wrap_ticks)
update_sched_clock();
}
-void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
- unsigned long rate)
-{
- setup_sched_clock(read, bits, rate);
- cd.needs_suspend = true;
-}
-
void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
{
unsigned long r, w;
@@ -189,18 +182,15 @@ void __init sched_clock_postinit(void)
static int sched_clock_suspend(void)
{
sched_clock_poll(sched_clock_timer.data);
- if (cd.needs_suspend)
- cd.suspended = true;
+ cd.suspended = true;
return 0;
}
static void sched_clock_resume(void)
{
- if (cd.needs_suspend) {
- cd.epoch_cyc = read_sched_clock();
- cd.epoch_cyc_copy = cd.epoch_cyc;
- cd.suspended = false;
- }
+ cd.epoch_cyc = read_sched_clock();
+ cd.epoch_cyc_copy = cd.epoch_cyc;
+ cd.suspended = false;
}
static struct syscore_ops sched_clock_ops = {
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 8e20754dd31d..fbc8b2623d82 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -294,18 +294,24 @@ static void percpu_timer_setup(void);
asmlinkage void __cpuinit secondary_start_kernel(void)
{
struct mm_struct *mm = &init_mm;
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu;
+
+ /*
+ * The identity mapping is uncached (strongly ordered), so
+ * switch away from it before attempting any exclusive accesses.
+ */
+ cpu_switch_mm(mm->pgd, mm);
+ enter_lazy_tlb(mm, current);
+ local_flush_tlb_all();
/*
* All kernel threads share the same mm context; grab a
* reference and switch to it.
*/
+ cpu = smp_processor_id();
atomic_inc(&mm->mm_count);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
- cpu_switch_mm(mm->pgd, mm);
- enter_lazy_tlb(mm, current);
- local_flush_tlb_all();
printk("CPU%u: Booted secondary processor\n", cpu);
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index e1f906989bb8..b22d700fea27 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -42,10 +42,10 @@ static void twd_set_mode(enum clock_event_mode mode,
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- /* timer load already set up */
ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
| TWD_TIMER_CONTROL_PERIODIC;
- __raw_writel(twd_timer_rate / HZ, twd_base + TWD_TIMER_LOAD);
+ __raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
+ twd_base + TWD_TIMER_LOAD);
break;
case CLOCK_EVT_MODE_ONESHOT:
/* period set, and timer enabled in 'next_event' hook */
OpenPOWER on IntegriCloud