summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c24
1 files changed, 23 insertions, 1 deletions
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 8ee51a252cf1..e6bec74be131 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -261,6 +261,28 @@ static int get_kernel(unsigned long pc, unsigned long mmcra)
return is_kernel;
}
+static bool pmc_overflow(unsigned long val)
+{
+ if ((int)val < 0)
+ return true;
+
+ /*
+ * Events on POWER7 can roll back if a speculative event doesn't
+ * eventually complete. Unfortunately in some rare cases they will
+ * raise a performance monitor exception. We need to catch this to
+ * ensure we reset the PMC. In all cases the PMC will be 256 or less
+ * cycles from overflow.
+ *
+ * We only do this if the first pass fails to find any overflowing
+ * PMCs because a user might set a period of less than 256 and we
+ * don't want to mistakenly reset them.
+ */
+ if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
+ return true;
+
+ return false;
+}
+
static void power4_handle_interrupt(struct pt_regs *regs,
struct op_counter_config *ctr)
{
@@ -281,7 +303,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
val = classic_ctr_read(i);
- if (val < 0) {
+ if (pmc_overflow(val)) {
if (oprofile_running && ctr[i].enabled) {
oprofile_add_ext_sample(pc, regs, i, is_kernel);
classic_ctr_write(i, reset_value[i]);
OpenPOWER on IntegriCloud