diff options
Diffstat (limited to 'arch/x86/events/core.c')
| -rw-r--r-- | arch/x86/events/core.c | 80 |
1 files changed, 55 insertions, 25 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 88797c80b3e0..106911b603bd 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -27,6 +27,7 @@ #include <linux/cpu.h> #include <linux/bitops.h> #include <linux/device.h> +#include <linux/nospec.h> #include <asm/apic.h> #include <asm/stacktrace.h> @@ -48,7 +49,7 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; -struct static_key rdpmc_always_available = STATIC_KEY_INIT_FALSE; +DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key); u64 __read_mostly hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] @@ -304,17 +305,20 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) config = attr->config; - cache_type = (config >> 0) & 0xff; + cache_type = (config >> 0) & 0xff; if (cache_type >= PERF_COUNT_HW_CACHE_MAX) return -EINVAL; + cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX); cache_op = (config >> 8) & 0xff; if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) return -EINVAL; + cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX); cache_result = (config >> 16) & 0xff; if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL; + cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX); val = hw_cache_event_ids[cache_type][cache_op][cache_result]; @@ -421,6 +425,8 @@ int x86_setup_perfctr(struct perf_event *event) if (attr->config >= x86_pmu.max_events) return -EINVAL; + attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events); + /* * The generic map: */ @@ -990,7 +996,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, if (!dogrp) return n; - list_for_each_entry(event, &leader->sibling_list, group_entry) { + for_each_sibling_event(event, leader) { if (!is_x86_event(event) || event->state <= PERF_EVENT_STATE_OFF) continue; @@ -1027,6 +1033,27 @@ static inline void x86_assign_hw_event(struct perf_event *event, } } +/** + * x86_perf_rdpmc_index - Return PMC counter used for event + * @event: the perf_event to which the PMC counter was assigned + * + * The counter assigned to this performance event may change if interrupts + * are enabled. This counter should thus never be used while interrupts are + * enabled. Before this function is used to obtain the assigned counter the + * event should be checked for validity using, for example, + * perf_event_read_local(), within the same interrupt disabled section in + * which this counter is planned to be used. + * + * Return: The index of the performance monitoring counter assigned to + * @perf_event. + */ +int x86_perf_rdpmc_index(struct perf_event *event) +{ + lockdep_assert_irqs_disabled(); + + return event->hw.event_base_rdpmc; +} + static inline int match_prev_assignment(struct hw_perf_event *hwc, struct cpu_hw_events *cpuc, int i) @@ -1156,16 +1183,13 @@ int x86_perf_event_set_period(struct perf_event *event) per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; - if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) || - local64_read(&hwc->prev_count) != (u64)-left) { - /* - * The hw event starts counting from this event offset, - * mark it to be able to extra future deltas: - */ - local64_set(&hwc->prev_count, (u64)-left); + /* + * The hw event starts counting from this event offset, + * mark it to be able to extra future deltas: + */ + local64_set(&hwc->prev_count, (u64)-left); - wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); - } + wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); /* * Due to erratum on certan cpu we need @@ -1581,7 +1605,7 @@ static void __init pmu_check_apic(void) } -static struct attribute_group x86_pmu_format_group = { +static struct attribute_group x86_pmu_format_group __ro_after_init = { .name = "format", .attrs = NULL, }; @@ -1628,20 +1652,20 @@ __init struct attribute **merge_attr(struct attribute **a, struct attribute **b) struct attribute **new; int j, i; - for (j = 0; a[j]; j++) + for (j = 0; a && a[j]; j++) ; - for (i = 0; b[i]; i++) + for (i = 0; b && b[i]; i++) j++; j++; - new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL); + new = kmalloc_array(j, sizeof(struct attribute *), GFP_KERNEL); if (!new) return NULL; j = 0; - for (i = 0; a[i]; i++) + for (i = 0; a && a[i]; i++) new[j++] = a[i]; - for (i = 0; b[i]; i++) + for (i = 0; b && b[i]; i++) new[j++] = b[i]; new[j] = NULL; @@ -1712,7 +1736,7 @@ static struct attribute *events_attr[] = { NULL, }; -static struct attribute_group x86_pmu_events_group = { +static struct attribute_group x86_pmu_events_group __ro_after_init = { .name = "events", .attrs = events_attr, }; @@ -1773,6 +1797,10 @@ static int __init init_hw_perf_events(void) case X86_VENDOR_AMD: err = amd_pmu_init(); break; + case X86_VENDOR_HYGON: + err = amd_pmu_init(); + x86_pmu.name = "HYGON"; + break; default: err = -ENOTSUPP; } @@ -1884,6 +1912,8 @@ early_initcall(init_hw_perf_events); static inline void x86_pmu_read(struct perf_event *event) { + if (x86_pmu.read) + return x86_pmu.read(event); x86_perf_event_update(event); } @@ -2207,9 +2237,9 @@ static ssize_t set_attr_rdpmc(struct device *cdev, * but only root can trigger it, so it's okay. */ if (val == 2) - static_key_slow_inc(&rdpmc_always_available); + static_branch_inc(&rdpmc_always_available_key); else - static_key_slow_dec(&rdpmc_always_available); + static_branch_dec(&rdpmc_always_available_key); on_each_cpu(refresh_pce, NULL, 1); } @@ -2225,7 +2255,7 @@ static struct attribute *x86_pmu_attrs[] = { NULL, }; -static struct attribute_group x86_pmu_attr_group = { +static struct attribute_group x86_pmu_attr_group __ro_after_init = { .attrs = x86_pmu_attrs, }; @@ -2243,7 +2273,7 @@ static struct attribute *x86_pmu_caps_attrs[] = { NULL }; -static struct attribute_group x86_pmu_caps_group = { +static struct attribute_group x86_pmu_caps_group __ro_after_init = { .name = "caps", .attrs = x86_pmu_caps_attrs, }; @@ -2392,7 +2422,7 @@ static unsigned long get_segment_base(unsigned int segment) #ifdef CONFIG_IA32_EMULATION -#include <asm/compat.h> +#include <linux/compat.h> static inline int perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry) @@ -2460,7 +2490,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs perf_callchain_store(entry, regs->ip); - if (!current->mm) + if (!nmi_uaccess_okay()) return; if (perf_callchain_user32(regs, entry)) |

