diff options
Diffstat (limited to 'tools/perf/util/evlist.c')
-rw-r--r-- | tools/perf/util/evlist.c | 332 |
1 files changed, 56 insertions, 276 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 6a0d7ffbeba0..ac35cd214feb 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -25,6 +25,7 @@ #include "parse-events.h" #include <subcmd/parse-options.h> +#include <fcntl.h> #include <sys/ioctl.h> #include <sys/mman.h> @@ -33,9 +34,6 @@ #include <linux/log2.h> #include <linux/err.h> -static void perf_mmap__munmap(struct perf_mmap *map); -static void perf_mmap__put(struct perf_mmap *map); - #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) @@ -128,7 +126,7 @@ static void perf_evlist__purge(struct perf_evlist *evlist) void perf_evlist__exit(struct perf_evlist *evlist) { zfree(&evlist->mmap); - zfree(&evlist->backward_mmap); + zfree(&evlist->overwrite_mmap); fdarray__exit(&evlist->pollfd); } @@ -260,7 +258,7 @@ int perf_evlist__add_dummy(struct perf_evlist *evlist) .config = PERF_COUNT_SW_DUMMY, .size = sizeof(attr), /* to capture ABI version */ }; - struct perf_evsel *evsel = perf_evsel__new(&attr); + struct perf_evsel *evsel = perf_evsel__new_idx(&attr, evlist->nr_entries); if (evsel == NULL) return -ENOMEM; @@ -678,11 +676,11 @@ static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value) { int i; - if (!evlist->backward_mmap) + if (!evlist->overwrite_mmap) return 0; for (i = 0; i < evlist->nr_mmaps; i++) { - int fd = evlist->backward_mmap[i].fd; + int fd = evlist->overwrite_mmap[i].fd; int err; if (fd < 0) @@ -704,129 +702,6 @@ static int perf_evlist__resume(struct perf_evlist *evlist) return perf_evlist__set_paused(evlist, false); } -/* When check_messup is true, 'end' must points to a good entry */ -static union perf_event * -perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start, - u64 end, u64 *prev) -{ - unsigned char *data = md->base + page_size; - union perf_event *event = NULL; - int diff = end - start; - - if (check_messup) { - /* - * If we're further behind than half the buffer, there's a chance - * the writer will bite our tail and mess up the samples under us. - * - * If we somehow ended up ahead of the 'end', we got messed up. - * - * In either case, truncate and restart at 'end'. - */ - if (diff > md->mask / 2 || diff < 0) { - fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); - - /* - * 'end' points to a known good entry, start there. - */ - start = end; - diff = 0; - } - } - - if (diff >= (int)sizeof(event->header)) { - size_t size; - - event = (union perf_event *)&data[start & md->mask]; - size = event->header.size; - - if (size < sizeof(event->header) || diff < (int)size) { - event = NULL; - goto broken_event; - } - - /* - * Event straddles the mmap boundary -- header should always - * be inside due to u64 alignment of output. - */ - if ((start & md->mask) + size != ((start + size) & md->mask)) { - unsigned int offset = start; - unsigned int len = min(sizeof(*event), size), cpy; - void *dst = md->event_copy; - - do { - cpy = min(md->mask + 1 - (offset & md->mask), len); - memcpy(dst, &data[offset & md->mask], cpy); - offset += cpy; - dst += cpy; - len -= cpy; - } while (len); - - event = (union perf_event *) md->event_copy; - } - - start += size; - } - -broken_event: - if (prev) - *prev = start; - - return event; -} - -union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup) -{ - u64 head; - u64 old = md->prev; - - /* - * Check if event was unmapped due to a POLLHUP/POLLERR. - */ - if (!refcount_read(&md->refcnt)) - return NULL; - - head = perf_mmap__read_head(md); - - return perf_mmap__read(md, check_messup, old, head, &md->prev); -} - -union perf_event * -perf_mmap__read_backward(struct perf_mmap *md) -{ - u64 head, end; - u64 start = md->prev; - - /* - * Check if event was unmapped due to a POLLHUP/POLLERR. - */ - if (!refcount_read(&md->refcnt)) - return NULL; - - head = perf_mmap__read_head(md); - if (!head) - return NULL; - - /* - * 'head' pointer starts from 0. Kernel minus sizeof(record) form - * it each time when kernel writes to it, so in fact 'head' is - * negative. 'end' pointer is made manually by adding the size of - * the ring buffer to 'head' pointer, means the validate data can - * read is the whole ring buffer. If 'end' is positive, the ring - * buffer has not fully filled, so we must adjust 'end' to 0. - * - * However, since both 'head' and 'end' is unsigned, we can't - * simply compare 'end' against 0. Here we compare '-head' and - * the size of the ring buffer, where -head is the number of bytes - * kernel write to the ring buffer. - */ - if (-head < (u64)(md->mask + 1)) - end = 0; - else - end = head + md->mask + 1; - - return perf_mmap__read(md, false, start, end, &md->prev); -} - union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx) { struct perf_mmap *md = &evlist->mmap[idx]; @@ -837,7 +712,7 @@ union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int * No need for read-write ring buffer: kernel stop outputting when * it hit md->prev (perf_mmap__consume()). */ - return perf_mmap__read_forward(md, evlist->overwrite); + return perf_mmap__read_forward(md); } union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx) @@ -857,94 +732,14 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) return perf_evlist__mmap_read_forward(evlist, idx); } -void perf_mmap__read_catchup(struct perf_mmap *md) -{ - u64 head; - - if (!refcount_read(&md->refcnt)) - return; - - head = perf_mmap__read_head(md); - md->prev = head; -} - void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx) { perf_mmap__read_catchup(&evlist->mmap[idx]); } -static bool perf_mmap__empty(struct perf_mmap *md) -{ - return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base; -} - -static void perf_mmap__get(struct perf_mmap *map) -{ - refcount_inc(&map->refcnt); -} - -static void perf_mmap__put(struct perf_mmap *md) -{ - BUG_ON(md->base && refcount_read(&md->refcnt) == 0); - - if (refcount_dec_and_test(&md->refcnt)) - perf_mmap__munmap(md); -} - -void perf_mmap__consume(struct perf_mmap *md, bool overwrite) -{ - if (!overwrite) { - u64 old = md->prev; - - perf_mmap__write_tail(md, old); - } - - if (refcount_read(&md->refcnt) == 1 && perf_mmap__empty(md)) - perf_mmap__put(md); -} - void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) { - perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite); -} - -int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused, - struct auxtrace_mmap_params *mp __maybe_unused, - void *userpg __maybe_unused, - int fd __maybe_unused) -{ - return 0; -} - -void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused) -{ -} - -void __weak auxtrace_mmap_params__init( - struct auxtrace_mmap_params *mp __maybe_unused, - off_t auxtrace_offset __maybe_unused, - unsigned int auxtrace_pages __maybe_unused, - bool auxtrace_overwrite __maybe_unused) -{ -} - -void __weak auxtrace_mmap_params__set_idx( - struct auxtrace_mmap_params *mp __maybe_unused, - struct perf_evlist *evlist __maybe_unused, - int idx __maybe_unused, - bool per_cpu __maybe_unused) -{ -} - -static void perf_mmap__munmap(struct perf_mmap *map) -{ - if (map->base != NULL) { - munmap(map->base, perf_mmap__mmap_len(map)); - map->base = NULL; - map->fd = -1; - refcount_set(&map->refcnt, 0); - } - auxtrace_mmap__munmap(&map->auxtrace_mmap); + perf_mmap__consume(&evlist->mmap[idx], false); } static void perf_evlist__munmap_nofree(struct perf_evlist *evlist) @@ -955,16 +750,16 @@ static void perf_evlist__munmap_nofree(struct perf_evlist *evlist) for (i = 0; i < evlist->nr_mmaps; i++) perf_mmap__munmap(&evlist->mmap[i]); - if (evlist->backward_mmap) + if (evlist->overwrite_mmap) for (i = 0; i < evlist->nr_mmaps; i++) - perf_mmap__munmap(&evlist->backward_mmap[i]); + perf_mmap__munmap(&evlist->overwrite_mmap[i]); } void perf_evlist__munmap(struct perf_evlist *evlist) { perf_evlist__munmap_nofree(evlist); zfree(&evlist->mmap); - zfree(&evlist->backward_mmap); + zfree(&evlist->overwrite_mmap); } static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist) @@ -995,48 +790,6 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist) return map; } -struct mmap_params { - int prot; - int mask; - struct auxtrace_mmap_params auxtrace_mp; -}; - -static int perf_mmap__mmap(struct perf_mmap *map, - struct mmap_params *mp, int fd) -{ - /* - * The last one will be done at perf_evlist__mmap_consume(), so that we - * make sure we don't prevent tools from consuming every last event in - * the ring buffer. - * - * I.e. we can get the POLLHUP meaning that the fd doesn't exist - * anymore, but the last events for it are still in the ring buffer, - * waiting to be consumed. - * - * Tools can chose to ignore this at their own discretion, but the - * evlist layer can't just drop it when filtering events in - * perf_evlist__filter_pollfd(). - */ - refcount_set(&map->refcnt, 2); - map->prev = 0; - map->mask = mp->mask; - map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot, - MAP_SHARED, fd, 0); - if (map->base == MAP_FAILED) { - pr_debug2("failed to mmap perf event ring buffer, error %d\n", - errno); - map->base = NULL; - return -1; - } - map->fd = fd; - - if (auxtrace_mmap__mmap(&map->auxtrace_mmap, - &mp->auxtrace_mp, map->base, fd)) - return -1; - - return 0; -} - static bool perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, struct perf_evsel *evsel) @@ -1048,7 +801,7 @@ perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, struct mmap_params *mp, int cpu_idx, - int thread, int *_output, int *_output_backward) + int thread, int *_output, int *_output_overwrite) { struct perf_evsel *evsel; int revent; @@ -1060,18 +813,20 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, int fd; int cpu; + mp->prot = PROT_READ | PROT_WRITE; if (evsel->attr.write_backward) { - output = _output_backward; - maps = evlist->backward_mmap; + output = _output_overwrite; + maps = evlist->overwrite_mmap; if (!maps) { maps = perf_evlist__alloc_mmap(evlist); if (!maps) return -1; - evlist->backward_mmap = maps; + evlist->overwrite_mmap = maps; if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); } + mp->prot &= ~PROT_WRITE; } if (evsel->system_wide && thread) @@ -1132,14 +887,14 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, pr_debug2("perf event ring buffer mmapped per cpu\n"); for (cpu = 0; cpu < nr_cpus; cpu++) { int output = -1; - int output_backward = -1; + int output_overwrite = -1; auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu, true); for (thread = 0; thread < nr_threads; thread++) { if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, - thread, &output, &output_backward)) + thread, &output, &output_overwrite)) goto out_unmap; } } @@ -1160,13 +915,13 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, pr_debug2("perf event ring buffer mmapped per thread\n"); for (thread = 0; thread < nr_threads; thread++) { int output = -1; - int output_backward = -1; + int output_overwrite = -1; auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread, false); if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, - &output, &output_backward)) + &output, &output_overwrite)) goto out_unmap; } @@ -1300,15 +1055,18 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, * Return: %0 on success, negative error code otherwise. */ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, - bool overwrite, unsigned int auxtrace_pages, + unsigned int auxtrace_pages, bool auxtrace_overwrite) { struct perf_evsel *evsel; const struct cpu_map *cpus = evlist->cpus; const struct thread_map *threads = evlist->threads; - struct mmap_params mp = { - .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), - }; + /* + * Delay setting mp.prot: set it before calling perf_mmap__mmap. + * Its value is decided by evsel's write_backward. + * So &mp should not be passed through const pointer. + */ + struct mmap_params mp; if (!evlist->mmap) evlist->mmap = perf_evlist__alloc_mmap(evlist); @@ -1318,7 +1076,6 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) return -ENOMEM; - evlist->overwrite = overwrite; evlist->mmap_len = perf_evlist__mmap_size(pages); pr_debug("mmap size %zuB\n", evlist->mmap_len); mp.mask = evlist->mmap_len - page_size - 1; @@ -1339,10 +1096,9 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, return perf_evlist__mmap_per_cpu(evlist, &mp); } -int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, - bool overwrite) +int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages) { - return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false); + return perf_evlist__mmap_ex(evlist, pages, 0, false); } int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) @@ -1350,7 +1106,8 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) struct cpu_map *cpus; struct thread_map *threads; - threads = thread_map__new_str(target->pid, target->tid, target->uid); + threads = thread_map__new_str(target->pid, target->tid, target->uid, + target->per_thread); if (!threads) return -1; @@ -1830,6 +1587,17 @@ int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *even return perf_evsel__parse_sample(evsel, event, sample); } +int perf_evlist__parse_sample_timestamp(struct perf_evlist *evlist, + union perf_event *event, + u64 *timestamp) +{ + struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event); + + if (!evsel) + return -EFAULT; + return perf_evsel__parse_sample_timestamp(evsel, event, timestamp); +} + size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) { struct perf_evsel *evsel; @@ -1987,13 +1755,13 @@ void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, RESUME, } action = NONE; - if (!evlist->backward_mmap) + if (!evlist->overwrite_mmap) return; switch (old_state) { case BKW_MMAP_NOTREADY: { if (state != BKW_MMAP_RUNNING) - goto state_err;; + goto state_err; break; } case BKW_MMAP_RUNNING: { @@ -2034,3 +1802,15 @@ void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, state_err: return; } + +bool perf_evlist__exclude_kernel(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + + evlist__for_each_entry(evlist, evsel) { + if (!evsel->attr.exclude_kernel) + return false; + } + + return true; +} |