diff options
author | Will Deacon <will.deacon@arm.com> | 2010-12-02 18:01:49 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-12-04 11:18:08 +0000 |
commit | 961ec6daa7b14f376c30d447a830fa4783a2112c (patch) | |
tree | fd4952bd65fa9e991de7687eaef4b2e5af7a0e70 /arch/arm/kernel/perf_event_v7.c | |
parent | 4d6b7a779be34e1df296abc1dc555134a8cf34af (diff) | |
download | blackbird-op-linux-961ec6daa7b14f376c30d447a830fa4783a2112c.tar.gz blackbird-op-linux-961ec6daa7b14f376c30d447a830fa4783a2112c.zip |
ARM: 6521/1: perf: use raw_spinlock_t for pmu_lock
For kernels built with PREEMPT_RT, critical sections protected
by standard spinlocks are preemptible. This is not acceptable
on perf as (a) we may be scheduled onto a different CPU whilst
reading/writing banked PMU registers and (b) the latency when
reading the PMU registers becomes unpredictable.
This patch upgrades the pmu_lock spinlock to a raw_spinlock
instead.
Reported-by: Jamie Iles <jamie@jamieiles.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/perf_event_v7.c')
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index a68ff1c10dec..2e1402556fa0 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -689,7 +689,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ - spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&pmu_lock, flags); /* * Disable counter @@ -713,7 +713,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) */ armv7_pmnc_enable_counter(idx); - spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&pmu_lock, flags); } static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) @@ -723,7 +723,7 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) /* * Disable counter and interrupt */ - spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&pmu_lock, flags); /* * Disable counter @@ -735,7 +735,7 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) */ armv7_pmnc_disable_intens(idx); - spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&pmu_lock, flags); } static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) @@ -805,20 +805,20 @@ static void armv7pmu_start(void) { unsigned long flags; - spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&pmu_lock, flags); /* Enable all counters */ armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); - spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&pmu_lock, flags); } static void armv7pmu_stop(void) { unsigned long flags; - spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&pmu_lock, flags); /* Disable all counters */ armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); - spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&pmu_lock, flags); } static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, |