summaryrefslogtreecommitdiffstats
path: root/arch/x86/events/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/events/core.c')
-rw-r--r--arch/x86/events/core.c157
1 files changed, 130 insertions, 27 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 81b005e4c7d9..3bb738f5a472 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -49,6 +49,7 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.enabled = 1,
};
+DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key);
DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key);
u64 __read_mostly hw_cache_event_ids
@@ -375,7 +376,7 @@ int x86_add_exclusive(unsigned int what)
* LBR and BTS are still mutually exclusive.
*/
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
- return 0;
+ goto out;
if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
mutex_lock(&pmc_reserve_mutex);
@@ -387,6 +388,7 @@ int x86_add_exclusive(unsigned int what)
mutex_unlock(&pmc_reserve_mutex);
}
+out:
atomic_inc(&active_events);
return 0;
@@ -397,11 +399,15 @@ fail_unlock:
void x86_del_exclusive(unsigned int what)
{
+ atomic_dec(&active_events);
+
+ /*
+ * See the comment in x86_add_exclusive().
+ */
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
return;
atomic_dec(&x86_pmu.lbr_exclusive[what]);
- atomic_dec(&active_events);
}
int x86_setup_perfctr(struct perf_event *event)
@@ -612,6 +618,7 @@ void x86_pmu_disable_all(void)
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
u64 val;
if (!test_bit(idx, cpuc->active_mask))
@@ -621,6 +628,8 @@ void x86_pmu_disable_all(void)
continue;
val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(x86_pmu_config_addr(idx), val);
+ if (is_counter_pair(hwc))
+ wrmsrl(x86_pmu_config_addr(idx + 1), 0);
}
}
@@ -693,7 +702,7 @@ struct sched_state {
int counter; /* counter index */
int unassigned; /* number of events to be assigned left */
int nr_gp; /* number of GP counters used */
- unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ u64 used;
};
/* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
@@ -750,8 +759,12 @@ static bool perf_sched_restore_state(struct perf_sched *sched)
sched->saved_states--;
sched->state = sched->saved[sched->saved_states];
- /* continue with next counter: */
- clear_bit(sched->state.counter++, sched->state.used);
+ /* this assignment didn't work out */
+ /* XXX broken vs EVENT_PAIR */
+ sched->state.used &= ~BIT_ULL(sched->state.counter);
+
+ /* try the next one */
+ sched->state.counter++;
return true;
}
@@ -776,20 +789,32 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
idx = INTEL_PMC_IDX_FIXED;
for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
- if (!__test_and_set_bit(idx, sched->state.used))
- goto done;
+ u64 mask = BIT_ULL(idx);
+
+ if (sched->state.used & mask)
+ continue;
+
+ sched->state.used |= mask;
+ goto done;
}
}
/* Grab the first unused counter starting with idx */
idx = sched->state.counter;
for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
- if (!__test_and_set_bit(idx, sched->state.used)) {
- if (sched->state.nr_gp++ >= sched->max_gp)
- return false;
+ u64 mask = BIT_ULL(idx);
- goto done;
- }
+ if (c->flags & PERF_X86_EVENT_PAIR)
+ mask |= mask << 1;
+
+ if (sched->state.used & mask)
+ continue;
+
+ if (sched->state.nr_gp++ >= sched->max_gp)
+ return false;
+
+ sched->state.used |= mask;
+ goto done;
}
return false;
@@ -866,12 +891,10 @@ EXPORT_SYMBOL_GPL(perf_assign_events);
int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
{
struct event_constraint *c;
- unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
struct perf_event *e;
int n0, i, wmin, wmax, unsched = 0;
struct hw_perf_event *hwc;
-
- bitmap_zero(used_mask, X86_PMC_IDX_MAX);
+ u64 used_mask = 0;
/*
* Compute the number of events already present; see x86_pmu_add(),
@@ -914,6 +937,8 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
* fastpath, try to reuse previous register
*/
for (i = 0; i < n; i++) {
+ u64 mask;
+
hwc = &cpuc->event_list[i]->hw;
c = cpuc->event_constraint[i];
@@ -925,11 +950,16 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (!test_bit(hwc->idx, c->idxmsk))
break;
+ mask = BIT_ULL(hwc->idx);
+ if (is_counter_pair(hwc))
+ mask |= mask << 1;
+
/* not already used */
- if (test_bit(hwc->idx, used_mask))
+ if (used_mask & mask)
break;
- __set_bit(hwc->idx, used_mask);
+ used_mask |= mask;
+
if (assign)
assign[i] = hwc->idx;
}
@@ -952,6 +982,15 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
READ_ONCE(cpuc->excl_cntrs->exclusive_present))
gpmax /= 2;
+ /*
+ * Reduce the amount of available counters to allow fitting
+ * the extra Merge events needed by large increment events.
+ */
+ if (x86_pmu.flags & PMU_FL_PAIR) {
+ gpmax = x86_pmu.num_counters - cpuc->n_pair;
+ WARN_ON(gpmax <= 0);
+ }
+
unsched = perf_assign_events(cpuc->event_constraint, n, wmin,
wmax, gpmax, assign);
}
@@ -1005,12 +1044,35 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
/* current number of events already accepted */
n = cpuc->n_events;
+ if (!cpuc->n_events)
+ cpuc->pebs_output = 0;
+
+ if (!cpuc->is_fake && leader->attr.precise_ip) {
+ /*
+ * For PEBS->PT, if !aux_event, the group leader (PT) went
+ * away, the group was broken down and this singleton event
+ * can't schedule any more.
+ */
+ if (is_pebs_pt(leader) && !leader->aux_event)
+ return -EINVAL;
+
+ /*
+ * pebs_output: 0: no PEBS so far, 1: PT, 2: DS
+ */
+ if (cpuc->pebs_output &&
+ cpuc->pebs_output != is_pebs_pt(leader) + 1)
+ return -EINVAL;
+
+ cpuc->pebs_output = is_pebs_pt(leader) + 1;
+ }
if (is_x86_event(leader)) {
if (n >= max_count)
return -EINVAL;
cpuc->event_list[n] = leader;
n++;
+ if (is_counter_pair(&leader->hw))
+ cpuc->n_pair++;
}
if (!dogrp)
return n;
@@ -1025,6 +1087,8 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
cpuc->event_list[n] = event;
n++;
+ if (is_counter_pair(&event->hw))
+ cpuc->n_pair++;
}
return n;
}
@@ -1211,6 +1275,13 @@ int x86_perf_event_set_period(struct perf_event *event)
wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
/*
+ * Clear the Merge event counter's upper 16 bits since
+ * we currently declare a 48-bit counter width
+ */
+ if (is_counter_pair(hwc))
+ wrmsrl(x86_pmu_event_addr(idx + 1), 0);
+
+ /*
* Due to erratum on certan cpu we need
* a second write to be sure the register
* is updated properly
@@ -1236,7 +1307,7 @@ void x86_pmu_enable_event(struct perf_event *event)
* Add a single event to the PMU.
*
* The event is added to the group of enabled events
- * but only if it can be scehduled with existing events.
+ * but only if it can be scheduled with existing events.
*/
static int x86_pmu_add(struct perf_event *event, int flags)
{
@@ -1620,9 +1691,12 @@ static struct attribute_group x86_pmu_format_group __ro_after_init = {
ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page)
{
- struct perf_pmu_events_attr *pmu_attr = \
+ struct perf_pmu_events_attr *pmu_attr =
container_of(attr, struct perf_pmu_events_attr, attr);
- u64 config = x86_pmu.event_map(pmu_attr->id);
+ u64 config = 0;
+
+ if (pmu_attr->id < x86_pmu.max_events)
+ config = x86_pmu.event_map(pmu_attr->id);
/* string trumps id */
if (pmu_attr->event_str)
@@ -1691,6 +1765,9 @@ is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
struct perf_pmu_events_attr *pmu_attr;
+ if (idx >= x86_pmu.max_events)
+ return 0;
+
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
/* str trumps id */
return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0;
@@ -2087,7 +2164,7 @@ static int x86_pmu_event_init(struct perf_event *event)
static void refresh_pce(void *ignored)
{
- load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm));
+ load_mm_cr4_irqsoff(this_cpu_read(cpu_tlbstate.loaded_mm));
}
static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
@@ -2160,21 +2237,26 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
if (x86_pmu.attr_rdpmc_broken)
return -ENOTSUPP;
- if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) {
+ if (val != x86_pmu.attr_rdpmc) {
/*
- * Changing into or out of always available, aka
- * perf-event-bypassing mode. This path is extremely slow,
+ * Changing into or out of never available or always available,
+ * aka perf-event-bypassing mode. This path is extremely slow,
* but only root can trigger it, so it's okay.
*/
+ if (val == 0)
+ static_branch_inc(&rdpmc_never_available_key);
+ else if (x86_pmu.attr_rdpmc == 0)
+ static_branch_dec(&rdpmc_never_available_key);
+
if (val == 2)
static_branch_inc(&rdpmc_always_available_key);
- else
+ else if (x86_pmu.attr_rdpmc == 2)
static_branch_dec(&rdpmc_always_available_key);
+
on_each_cpu(refresh_pce, NULL, 1);
+ x86_pmu.attr_rdpmc = val;
}
- x86_pmu.attr_rdpmc = val;
-
return count;
}
@@ -2222,6 +2304,13 @@ static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
x86_pmu.sched_task(ctx, sched_in);
}
+static void x86_pmu_swap_task_ctx(struct perf_event_context *prev,
+ struct perf_event_context *next)
+{
+ if (x86_pmu.swap_task_ctx)
+ x86_pmu.swap_task_ctx(prev, next);
+}
+
void perf_check_microcode(void)
{
if (x86_pmu.check_microcode)
@@ -2241,6 +2330,17 @@ static int x86_pmu_check_period(struct perf_event *event, u64 value)
return 0;
}
+static int x86_pmu_aux_output_match(struct perf_event *event)
+{
+ if (!(pmu.capabilities & PERF_PMU_CAP_AUX_OUTPUT))
+ return 0;
+
+ if (x86_pmu.aux_output_match)
+ return x86_pmu.aux_output_match(event);
+
+ return 0;
+}
+
static struct pmu pmu = {
.pmu_enable = x86_pmu_enable,
.pmu_disable = x86_pmu_disable,
@@ -2265,7 +2365,10 @@ static struct pmu pmu = {
.event_idx = x86_pmu_event_idx,
.sched_task = x86_pmu_sched_task,
.task_ctx_size = sizeof(struct x86_perf_task_context),
+ .swap_task_ctx = x86_pmu_swap_task_ctx,
.check_period = x86_pmu_check_period,
+
+ .aux_output_match = x86_pmu_aux_output_match,
};
void arch_perf_update_userpage(struct perf_event *event,
OpenPOWER on IntegriCloud