summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-25 17:39:04 +0200
committerIngo Molnar <mingo@elte.hu>2009-05-25 21:41:12 +0200
commit48e22d56ecdeddd1ffb42a02fccba5c6ef42b133 (patch)
tree43f69f34e888053a9a8b6405995a2ae9f5b173e2 /arch
parentff99be573e02e9f7edc23b472c7f9a5ddba12795 (diff)
downloadtalos-obmc-linux-48e22d56ecdeddd1ffb42a02fccba5c6ef42b133.tar.gz
talos-obmc-linux-48e22d56ecdeddd1ffb42a02fccba5c6ef42b133.zip
perf_counter: x86: Remove interrupt throttle
remove the x86 specific interrupt throttle Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <20090525153931.616671838@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c47
2 files changed, 5 insertions, 44 deletions
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index b4f64402a82a..89b63b5fad33 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -763,8 +763,6 @@ static void local_apic_timer_interrupt(void)
inc_irq_stat(apic_timer_irqs);
evt->event_handler(evt);
-
- perf_counter_unthrottle();
}
/*
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index c14437faf5d2..8c8177f859fe 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -719,11 +719,6 @@ static void intel_pmu_save_and_restart(struct perf_counter *counter)
}
/*
- * Maximum interrupt frequency of 100KHz per CPU
- */
-#define PERFMON_MAX_INTERRUPTS (100000/HZ)
-
-/*
* This handler is triggered by the local APIC, so the APIC IRQ handling
* rules apply:
*/
@@ -775,15 +770,14 @@ again:
if (status)
goto again;
- if (++cpuc->interrupts != PERFMON_MAX_INTERRUPTS)
- perf_enable();
+ perf_enable();
return 1;
}
static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
{
- int cpu, idx, throttle = 0, handled = 0;
+ int cpu, idx, handled = 0;
struct cpu_hw_counters *cpuc;
struct perf_counter *counter;
struct hw_perf_counter *hwc;
@@ -792,16 +786,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
cpu = smp_processor_id();
cpuc = &per_cpu(cpu_hw_counters, cpu);
- if (++cpuc->interrupts == PERFMON_MAX_INTERRUPTS) {
- throttle = 1;
- __perf_disable();
- cpuc->enabled = 0;
- barrier();
- }
-
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- int disable = 0;
-
if (!test_bit(idx, cpuc->active_mask))
continue;
@@ -809,45 +794,23 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
hwc = &counter->hw;
if (counter->hw_event.nmi != nmi)
- goto next;
+ continue;
val = x86_perf_counter_update(counter, hwc, idx);
if (val & (1ULL << (x86_pmu.counter_bits - 1)))
- goto next;
+ continue;
/* counter overflow */
x86_perf_counter_set_period(counter, hwc, idx);
handled = 1;
inc_irq_stat(apic_perf_irqs);
- disable = perf_counter_overflow(counter, nmi, regs, 0);
-
-next:
- if (disable || throttle)
+ if (perf_counter_overflow(counter, nmi, regs, 0))
amd_pmu_disable_counter(hwc, idx);
}
return handled;
}
-void perf_counter_unthrottle(void)
-{
- struct cpu_hw_counters *cpuc;
-
- if (!x86_pmu_initialized())
- return;
-
- cpuc = &__get_cpu_var(cpu_hw_counters);
- if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
- /*
- * Clear them before re-enabling irqs/NMIs again:
- */
- cpuc->interrupts = 0;
- perf_enable();
- } else {
- cpuc->interrupts = 0;
- }
-}
-
void smp_perf_counter_interrupt(struct pt_regs *regs)
{
irq_enter();
OpenPOWER on IntegriCloud