diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2007-05-02 19:27:06 +0200 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2007-05-02 19:27:06 +0200 |
commit | 6b37f5a20c0e5c334c010a587058354215433e92 (patch) | |
tree | de875cded9229b46f94a10aa2c1379060247280e | |
parent | fbc16f2c2a0e16dbd75ac85d3b6db97f92b642ba (diff) | |
download | blackbird-op-linux-6b37f5a20c0e5c334c010a587058354215433e92.tar.gz blackbird-op-linux-6b37f5a20c0e5c334c010a587058354215433e92.zip |
[PATCH] x86-64: fix cpu MHz reporting on constant_tsc cpus
This patch fixes the reporting of cpu_mhz in /proc/cpuinfo on CPUs with
a constant TSC rate and a kernel with disabled cpufreq.
Signed-off-by: Mark Langsdorf <mark.langsdorf@amd.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Andi Kleen <ak@suse.de>
arch/x86_64/kernel/apic.c | 2 -
arch/x86_64/kernel/time.c | 58 +++++++++++++++++++++++++++++++++++++++---
arch/x86_64/kernel/tsc.c | 12 +++++---
arch/x86_64/kernel/tsc_sync.c | 2 -
include/asm-x86_64/proto.h | 1
5 files changed, 65 insertions(+), 10 deletions(-)
-rw-r--r-- | arch/x86_64/kernel/apic.c | 2 | ||||
-rw-r--r-- | arch/x86_64/kernel/time.c | 58 | ||||
-rw-r--r-- | arch/x86_64/kernel/tsc.c | 12 | ||||
-rw-r--r-- | arch/x86_64/kernel/tsc_sync.c | 2 | ||||
-rw-r--r-- | include/asm-x86_64/proto.h | 1 |
5 files changed, 65 insertions, 10 deletions
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c index bd3e45d47c37..3421f21b6c70 100644 --- a/arch/x86_64/kernel/apic.c +++ b/arch/x86_64/kernel/apic.c @@ -843,7 +843,7 @@ static int __init calibrate_APIC_clock(void) } while ((tsc - tsc_start) < TICK_COUNT && (apic - apic_start) < TICK_COUNT); - result = (apic_start - apic) * 1000L * cpu_khz / + result = (apic_start - apic) * 1000L * tsc_khz / (tsc - tsc_start); } printk("result %d\n", result); diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index 811b8f987b50..5f862e216a42 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c @@ -43,6 +43,7 @@ #include <asm/apic.h> #include <asm/hpet.h> #include <asm/mpspec.h> +#include <asm/nmi.h> static char *timename = NULL; @@ -249,6 +250,51 @@ static unsigned long get_cmos_time(void) return mktime(year, mon, day, hour, min, sec); } +/* calibrate_cpu is used on systems with fixed rate TSCs to determine + * processor frequency */ +#define TICK_COUNT 100000000 +static unsigned int __init tsc_calibrate_cpu_khz(void) +{ + int tsc_start, tsc_now; + int i, no_ctr_free; + unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0; + unsigned long flags; + + for (i = 0; i < 4; i++) + if (avail_to_resrv_perfctr_nmi_bit(i)) + break; + no_ctr_free = (i == 4); + if (no_ctr_free) { + i = 3; + rdmsrl(MSR_K7_EVNTSEL3, evntsel3); + wrmsrl(MSR_K7_EVNTSEL3, 0); + rdmsrl(MSR_K7_PERFCTR3, pmc3); + } else { + reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i); + reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i); + } + local_irq_save(flags); + /* start meauring cycles, incrementing from 0 */ + wrmsrl(MSR_K7_PERFCTR0 + i, 0); + wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76); + rdtscl(tsc_start); + do { + rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now); + tsc_now = get_cycles_sync(); + } while ((tsc_now - tsc_start) < TICK_COUNT); + + local_irq_restore(flags); + if (no_ctr_free) { + wrmsrl(MSR_K7_EVNTSEL3, 0); + wrmsrl(MSR_K7_PERFCTR3, pmc3); + wrmsrl(MSR_K7_EVNTSEL3, evntsel3); + } else { + release_perfctr_nmi(MSR_K7_PERFCTR0 + i); + release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); + } + + return pmc_now * tsc_khz / (tsc_now - tsc_start); +} /* * pit_calibrate_tsc() uses the speaker output (channel 2) of @@ -336,14 +382,20 @@ void __init time_init(void) if (hpet_use_timer) { /* set tick_nsec to use the proper rate for HPET */ tick_nsec = TICK_NSEC_HPET; - cpu_khz = hpet_calibrate_tsc(); + tsc_khz = hpet_calibrate_tsc(); timename = "HPET"; } else { pit_init(); - cpu_khz = pit_calibrate_tsc(); + tsc_khz = pit_calibrate_tsc(); timename = "PIT"; } + cpu_khz = tsc_khz; + if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && + boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 == 16) + cpu_khz = tsc_calibrate_cpu_khz(); + if (unsynchronized_tsc()) mark_tsc_unstable(); @@ -352,7 +404,7 @@ void __init time_init(void) else vgetcpu_mode = VGETCPU_LSL; - set_cyc2ns_scale(cpu_khz); + set_cyc2ns_scale(tsc_khz); printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000); init_tsc_clocksource(); diff --git a/arch/x86_64/kernel/tsc.c b/arch/x86_64/kernel/tsc.c index 1a0edbbffaa0..5c84992c676d 100644 --- a/arch/x86_64/kernel/tsc.c +++ b/arch/x86_64/kernel/tsc.c @@ -13,6 +13,8 @@ static int notsc __initdata = 0; unsigned int cpu_khz; /* TSC clocks / usec, not used here */ EXPORT_SYMBOL(cpu_khz); +unsigned int tsc_khz; +EXPORT_SYMBOL(tsc_khz); static unsigned int cyc2ns_scale __read_mostly; @@ -77,7 +79,7 @@ static void handle_cpufreq_delayed_get(struct work_struct *v) static unsigned int ref_freq = 0; static unsigned long loops_per_jiffy_ref = 0; -static unsigned long cpu_khz_ref = 0; +static unsigned long tsc_khz_ref = 0; static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) @@ -99,7 +101,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, if (!ref_freq) { ref_freq = freq->old; loops_per_jiffy_ref = *lpj; - cpu_khz_ref = cpu_khz; + tsc_khz_ref = tsc_khz; } if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || @@ -107,12 +109,12 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); - cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new); + tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); if (!(freq->flags & CPUFREQ_CONST_LOOPS)) mark_tsc_unstable(); } - set_cyc2ns_scale(cpu_khz_ref); + set_cyc2ns_scale(tsc_khz_ref); return 0; } @@ -213,7 +215,7 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable); void __init init_tsc_clocksource(void) { if (!notsc) { - clocksource_tsc.mult = clocksource_khz2mult(cpu_khz, + clocksource_tsc.mult = clocksource_khz2mult(tsc_khz, clocksource_tsc.shift); if (check_tsc_unstable()) clocksource_tsc.rating = 0; diff --git a/arch/x86_64/kernel/tsc_sync.c b/arch/x86_64/kernel/tsc_sync.c index 014f0db45dfa..72d444dede9b 100644 --- a/arch/x86_64/kernel/tsc_sync.c +++ b/arch/x86_64/kernel/tsc_sync.c @@ -50,7 +50,7 @@ static __cpuinit void check_tsc_warp(void) /* * The measurement runs for 20 msecs: */ - end = start + cpu_khz * 20ULL; + end = start + tsc_khz * 20ULL; now = start; for (i = 0; ; i++) { diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index f64949fae61a..78427021d94a 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h @@ -92,6 +92,7 @@ extern unsigned long table_start, table_end; extern int exception_trace; extern unsigned cpu_khz; +extern unsigned tsc_khz; extern void no_iommu_init(void); extern int force_iommu, no_iommu; |