summaryrefslogtreecommitdiffstats
path: root/arch/x86/events/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/events/core.c')
-rw-r--r--arch/x86/events/core.c45
1 files changed, 35 insertions, 10 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 7b21455d7504..f118af9f0718 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -49,6 +49,7 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.enabled = 1,
};
+DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key);
DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key);
u64 __read_mostly hw_cache_event_ids
@@ -375,7 +376,7 @@ int x86_add_exclusive(unsigned int what)
* LBR and BTS are still mutually exclusive.
*/
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
- return 0;
+ goto out;
if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
mutex_lock(&pmc_reserve_mutex);
@@ -387,6 +388,7 @@ int x86_add_exclusive(unsigned int what)
mutex_unlock(&pmc_reserve_mutex);
}
+out:
atomic_inc(&active_events);
return 0;
@@ -397,11 +399,15 @@ fail_unlock:
void x86_del_exclusive(unsigned int what)
{
+ atomic_dec(&active_events);
+
+ /*
+ * See the comment in x86_add_exclusive().
+ */
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
return;
atomic_dec(&x86_pmu.lbr_exclusive[what]);
- atomic_dec(&active_events);
}
int x86_setup_perfctr(struct perf_event *event)
@@ -1641,9 +1647,12 @@ static struct attribute_group x86_pmu_format_group __ro_after_init = {
ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page)
{
- struct perf_pmu_events_attr *pmu_attr = \
+ struct perf_pmu_events_attr *pmu_attr =
container_of(attr, struct perf_pmu_events_attr, attr);
- u64 config = x86_pmu.event_map(pmu_attr->id);
+ u64 config = 0;
+
+ if (pmu_attr->id < x86_pmu.max_events)
+ config = x86_pmu.event_map(pmu_attr->id);
/* string trumps id */
if (pmu_attr->event_str)
@@ -1712,6 +1721,9 @@ is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
struct perf_pmu_events_attr *pmu_attr;
+ if (idx >= x86_pmu.max_events)
+ return 0;
+
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
/* str trumps id */
return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0;
@@ -2181,21 +2193,26 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
if (x86_pmu.attr_rdpmc_broken)
return -ENOTSUPP;
- if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) {
+ if (val != x86_pmu.attr_rdpmc) {
/*
- * Changing into or out of always available, aka
- * perf-event-bypassing mode. This path is extremely slow,
+ * Changing into or out of never available or always available,
+ * aka perf-event-bypassing mode. This path is extremely slow,
* but only root can trigger it, so it's okay.
*/
+ if (val == 0)
+ static_branch_inc(&rdpmc_never_available_key);
+ else if (x86_pmu.attr_rdpmc == 0)
+ static_branch_dec(&rdpmc_never_available_key);
+
if (val == 2)
static_branch_inc(&rdpmc_always_available_key);
- else
+ else if (x86_pmu.attr_rdpmc == 2)
static_branch_dec(&rdpmc_always_available_key);
+
on_each_cpu(refresh_pce, NULL, 1);
+ x86_pmu.attr_rdpmc = val;
}
- x86_pmu.attr_rdpmc = val;
-
return count;
}
@@ -2243,6 +2260,13 @@ static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
x86_pmu.sched_task(ctx, sched_in);
}
+static void x86_pmu_swap_task_ctx(struct perf_event_context *prev,
+ struct perf_event_context *next)
+{
+ if (x86_pmu.swap_task_ctx)
+ x86_pmu.swap_task_ctx(prev, next);
+}
+
void perf_check_microcode(void)
{
if (x86_pmu.check_microcode)
@@ -2297,6 +2321,7 @@ static struct pmu pmu = {
.event_idx = x86_pmu_event_idx,
.sched_task = x86_pmu_sched_task,
.task_ctx_size = sizeof(struct x86_perf_task_context),
+ .swap_task_ctx = x86_pmu_swap_task_ctx,
.check_period = x86_pmu_check_period,
.aux_output_match = x86_pmu_aux_output_match,
OpenPOWER on IntegriCloud