summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Cree <mcree@orcon.net.nz>2010-09-12 17:37:24 +1200
committerIngo Molnar <mingo@elte.hu>2010-09-15 10:43:59 +0200
commit65175c07653534294257f75baa03a36edad86870 (patch)
treeafcf8d92b202673b83413349469f42c4a2976363
parentd9ca07a05ce1c42ac9717e54eaea4546a3a80978 (diff)
downloadblackbird-op-linux-65175c07653534294257f75baa03a36edad86870.tar.gz
blackbird-op-linux-65175c07653534294257f75baa03a36edad86870.zip
alpha: Fix HW performance counters to be stopped properly
Also fix a few compile errors due to undefined and duplicated variables. Signed-off-by: Michael Cree <mcree@orcon.net.nz> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1284269844-23251-1-git-send-email-mcree@orcon.net.nz> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/alpha/kernel/perf_event.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index a25fe9eb4739..1cc49683fb69 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -422,9 +422,10 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
static int alpha_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
int n0;
int ret;
- unsigned long flags;
+ unsigned long irq_flags;
/*
* The Sparc code has the IRQ disable first followed by the perf
@@ -435,7 +436,7 @@ static int alpha_pmu_add(struct perf_event *event, int flags)
* final PMI to occur before we disable interrupts.
*/
perf_pmu_disable(event->pmu);
- local_irq_save(flags);
+ local_irq_save(irq_flags);
/* Default to error to be returned */
ret = -EAGAIN;
@@ -458,7 +459,7 @@ static int alpha_pmu_add(struct perf_event *event, int flags)
if (!(flags & PERF_EF_START))
hwc->state |= PERF_HES_STOPPED;
- local_irq_restore(flags);
+ local_irq_restore(irq_flags);
perf_pmu_enable(event->pmu);
return ret;
@@ -474,11 +475,11 @@ static void alpha_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
- unsigned long flags;
+ unsigned long irq_flags;
int j;
perf_pmu_disable(event->pmu);
- local_irq_save(flags);
+ local_irq_save(irq_flags);
for (j = 0; j < cpuc->n_events; j++) {
if (event == cpuc->event[j]) {
@@ -504,7 +505,7 @@ static void alpha_pmu_del(struct perf_event *event, int flags)
}
}
- local_irq_restore(flags);
+ local_irq_restore(irq_flags);
perf_pmu_enable(event->pmu);
}
@@ -523,7 +524,7 @@ static void alpha_pmu_stop(struct perf_event *event, int flags)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (!(hwc->state & PERF_HES_STOPPED)) {
- cpuc->idx_mask &= !(1UL<<hwc->idx);
+ cpuc->idx_mask &= ~(1UL<<hwc->idx);
hwc->state |= PERF_HES_STOPPED;
}
@@ -533,7 +534,7 @@ static void alpha_pmu_stop(struct perf_event *event, int flags)
}
if (cpuc->enabled)
- wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
+ wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx));
}
@@ -849,7 +850,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
/* Interrupts coming too quickly; "throttle" the
* counter, i.e., disable it for a little while.
*/
- cpuc->idx_mask &= ~(1UL<<idx);
+ alpha_pmu_stop(event, 0);
}
}
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
OpenPOWER on IntegriCloud