summaryrefslogtreecommitdiffstats
path: root/arch/x86/events/intel/ds.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/events/intel/ds.c')
-rw-r--r--arch/x86/events/intel/ds.c54
1 files changed, 53 insertions, 1 deletions
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index f1269e804e9b..dc43cc124e09 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -7,6 +7,7 @@
#include <asm/perf_event.h>
#include <asm/tlbflush.h>
#include <asm/insn.h>
+#include <asm/io.h>
#include "../perf_event.h"
@@ -902,6 +903,9 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
*/
static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
{
+ if (cpuc->n_pebs == cpuc->n_pebs_via_pt)
+ return false;
+
return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
}
@@ -919,6 +923,9 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
u64 threshold;
int reserved;
+ if (cpuc->n_pebs_via_pt)
+ return;
+
if (x86_pmu.flags & PMU_FL_PEBS_ALL)
reserved = x86_pmu.max_pebs_events + x86_pmu.num_counters_fixed;
else
@@ -1059,10 +1066,40 @@ void intel_pmu_pebs_add(struct perf_event *event)
cpuc->n_pebs++;
if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
cpuc->n_large_pebs++;
+ if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT)
+ cpuc->n_pebs_via_pt++;
pebs_update_state(needed_cb, cpuc, event, true);
}
+static void intel_pmu_pebs_via_pt_disable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ if (!is_pebs_pt(event))
+ return;
+
+ if (!(cpuc->pebs_enabled & ~PEBS_VIA_PT_MASK))
+ cpuc->pebs_enabled &= ~PEBS_VIA_PT_MASK;
+}
+
+static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ struct debug_store *ds = cpuc->ds;
+
+ if (!is_pebs_pt(event))
+ return;
+
+ if (!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS))
+ cpuc->pebs_enabled |= PEBS_PMI_AFTER_EACH_RECORD;
+
+ cpuc->pebs_enabled |= PEBS_OUTPUT_PT;
+
+ wrmsrl(MSR_RELOAD_PMC0 + hwc->idx, ds->pebs_event_reset[hwc->idx]);
+}
+
void intel_pmu_pebs_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -1100,6 +1137,8 @@ void intel_pmu_pebs_enable(struct perf_event *event)
} else {
ds->pebs_event_reset[hwc->idx] = 0;
}
+
+ intel_pmu_pebs_via_pt_enable(event);
}
void intel_pmu_pebs_del(struct perf_event *event)
@@ -1111,6 +1150,8 @@ void intel_pmu_pebs_del(struct perf_event *event)
cpuc->n_pebs--;
if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
cpuc->n_large_pebs--;
+ if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT)
+ cpuc->n_pebs_via_pt--;
pebs_update_state(needed_cb, cpuc, event, false);
}
@@ -1120,7 +1161,8 @@ void intel_pmu_pebs_disable(struct perf_event *event)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
- if (cpuc->n_pebs == cpuc->n_large_pebs)
+ if (cpuc->n_pebs == cpuc->n_large_pebs &&
+ cpuc->n_pebs != cpuc->n_pebs_via_pt)
intel_pmu_drain_pebs_buffer();
cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
@@ -1131,6 +1173,8 @@ void intel_pmu_pebs_disable(struct perf_event *event)
else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
cpuc->pebs_enabled &= ~(1ULL << 63);
+ intel_pmu_pebs_via_pt_disable(event);
+
if (cpuc->enabled)
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
@@ -1670,6 +1714,8 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
old = ((s64)(prev_raw_count << shift) >> shift);
local64_add(new - old + count * period, &event->count);
+ local64_set(&hwc->period_left, -new);
+
perf_event_update_userpage(event);
return 0;
@@ -2031,6 +2077,12 @@ void __init intel_ds_init(void)
PERF_SAMPLE_REGS_INTR);
}
pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual);
+
+ if (x86_pmu.intel_cap.pebs_output_pt_available) {
+ pr_cont("PEBS-via-PT, ");
+ x86_get_pmu()->capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
+ }
+
break;
default:
OpenPOWER on IntegriCloud