diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-02-17 08:38:13 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-02-17 08:38:13 -0800 |
commit | dd6f29da695dbfe5a8ff84ebfdd2110d68e8f511 (patch) | |
tree | 015ae77d5d6abcb1ec56d79c51541ddf5f07ff46 /kernel | |
parent | c5f1ac5e9afb199638414be77cbc22eb68e14d97 (diff) | |
parent | 528871b456026e6127d95b1b2bd8e3a003dc1614 (diff) | |
download | talos-obmc-linux-dd6f29da695dbfe5a8ff84ebfdd2110d68e8f511.tar.gz talos-obmc-linux-dd6f29da695dbfe5a8ff84ebfdd2110d68e8f511.zip |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar:
"Two fixes on the kernel side: fix an over-eager condition that failed
larger perf ring-buffer sizes, plus fix crashes in the Intel BTS code
for a corner case, found by fuzzing"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf/core: Fix impossible ring-buffer sizes warning
perf/x86: Add check_period PMU callback
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 16 | ||||
-rw-r--r-- | kernel/events/ring_buffer.c | 2 |
2 files changed, 17 insertions, 1 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index e5ede6918050..26d6edab051a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event, } } +static int perf_event_check_period(struct perf_event *event, u64 value) +{ + return event->pmu->check_period(event, value); +} + static int perf_event_period(struct perf_event *event, u64 __user *arg) { u64 value; @@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) if (event->attr.freq && value > sysctl_perf_event_sample_rate) return -EINVAL; + if (perf_event_check_period(event, value)) + return -EINVAL; + event_function_call(event, __perf_event_period, &value); return 0; @@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu) return 0; } +static int perf_event_nop_int(struct perf_event *event, u64 value) +{ + return 0; +} + static DEFINE_PER_CPU(unsigned int, nop_txn_flags); static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) @@ -9691,6 +9704,9 @@ got_cpu_context: pmu->pmu_disable = perf_pmu_nop_void; } + if (!pmu->check_period) + pmu->check_period = perf_event_nop_int; + if (!pmu->event_idx) pmu->event_idx = perf_event_idx_default; diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 309ef5a64af5..5ab4fe3b1dcc 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -734,7 +734,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) size = sizeof(struct ring_buffer); size += nr_pages * sizeof(void *); - if (order_base_2(size) >= MAX_ORDER) + if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER) goto fail; rb = kzalloc(size, GFP_KERNEL); |