summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c83
2 files changed, 85 insertions, 2 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 25423a5b80ed..edcde52bd170 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -368,6 +368,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
if (c->x86 >= 6)
set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
+ /* Enable Performance counter for K7 and later */
+ if (c->x86 > 6 && c->x86 <= 0x11)
+ set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
+
if (!c->x86_model_id[0]) {
switch (c->x86) {
case 0xf:
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index a3c88529bb72..266618aa1a03 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -74,6 +74,24 @@ static int pmc_intel_event_map(int event)
}
/*
+ * AMD Performance Monitor K7 and later.
+ */
+static const int amd_perfmon_event_map[] =
+{
+ [PERF_COUNT_CPU_CYCLES] = 0x0076,
+ [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
+ [PERF_COUNT_CACHE_REFERENCES] = 0x0080,
+ [PERF_COUNT_CACHE_MISSES] = 0x0081,
+ [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
+ [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
+};
+
+static int pmc_amd_event_map(int event)
+{
+ return amd_perfmon_event_map[event];
+}
+
+/*
* Propagate counter elapsed time into the generic counter.
* Can only be executed on the CPU where the counter is active.
* Returns the delta events processed.
@@ -151,8 +169,9 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
* so we install an artificial 1<<31 period regardless of
* the generic counter period:
*/
- if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
- hwc->irq_period = 0x7FFFFFFF;
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
+ hwc->irq_period = 0x7FFFFFFF;
atomic64_set(&hwc->period_left, hwc->irq_period);
@@ -184,6 +203,22 @@ static u64 pmc_intel_save_disable_all(void)
return ctrl;
}
+static u64 pmc_amd_save_disable_all(void)
+{
+ int idx;
+ u64 val, ctrl = 0;
+
+ for (idx = 0; idx < nr_counters_generic; idx++) {
+ rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
+ if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
+ ctrl |= (1 << idx);
+ val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
+ }
+
+ return ctrl;
+}
+
u64 hw_perf_save_disable(void)
{
if (unlikely(!perf_counters_initialized))
@@ -198,6 +233,20 @@ static void pmc_intel_restore_all(u64 ctrl)
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
}
+static void pmc_amd_restore_all(u64 ctrl)
+{
+ u64 val;
+ int idx;
+
+ for (idx = 0; idx < nr_counters_generic; idx++) {
+ if (ctrl & (1 << idx)) {
+ rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
+ }
+ }
+}
+
void hw_perf_restore(u64 ctrl)
{
if (unlikely(!perf_counters_initialized))
@@ -314,6 +363,9 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
{
unsigned int event;
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ return -1;
+
if (unlikely(hwc->nmi))
return -1;
@@ -401,6 +453,7 @@ void perf_counter_print_debug(void)
cpu = smp_processor_id();
cpuc = &per_cpu(cpu_hw_counters, cpu);
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
@@ -411,6 +464,7 @@ void perf_counter_print_debug(void)
printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status);
printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow);
printk(KERN_INFO "CPU#%d: fixed: %016llx\n", cpu, fixed);
+ }
printk(KERN_INFO "CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used);
for (idx = 0; idx < nr_counters_generic; idx++) {
@@ -588,6 +642,9 @@ void perf_counter_unthrottle(void)
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
return;
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ return;
+
if (unlikely(!perf_counters_initialized))
return;
@@ -692,6 +749,15 @@ static struct pmc_x86_ops pmc_intel_ops = {
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
};
+static struct pmc_x86_ops pmc_amd_ops = {
+ .save_disable_all = pmc_amd_save_disable_all,
+ .restore_all = pmc_amd_restore_all,
+ .eventsel = MSR_K7_EVNTSEL0,
+ .perfctr = MSR_K7_PERFCTR0,
+ .event_map = pmc_amd_event_map,
+ .max_events = ARRAY_SIZE(amd_perfmon_event_map),
+};
+
static struct pmc_x86_ops *pmc_intel_init(void)
{
union cpuid10_eax eax;
@@ -719,6 +785,16 @@ static struct pmc_x86_ops *pmc_intel_init(void)
return &pmc_intel_ops;
}
+static struct pmc_x86_ops *pmc_amd_init(void)
+{
+ nr_counters_generic = 4;
+ nr_counters_fixed = 0;
+
+ printk(KERN_INFO "AMD Performance Monitoring support detected.\n");
+
+ return &pmc_amd_ops;
+}
+
void __init init_hw_perf_counters(void)
{
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
@@ -728,6 +804,9 @@ void __init init_hw_perf_counters(void)
case X86_VENDOR_INTEL:
pmc_ops = pmc_intel_init();
break;
+ case X86_VENDOR_AMD:
+ pmc_ops = pmc_amd_init();
+ break;
}
if (!pmc_ops)
return;
OpenPOWER on IntegriCloud