diff options
Diffstat (limited to 'tools/perf/util/evlist.c')
-rw-r--r-- | tools/perf/util/evlist.c | 552 |
1 files changed, 404 insertions, 148 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 86a03836a83f..097b3ed77fdd 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -15,6 +15,7 @@ #include "evlist.h" #include "evsel.h" #include "debug.h" +#include "asm/bug.h" #include <unistd.h> #include "parse-events.h" @@ -27,8 +28,8 @@ #include <linux/log2.h> #include <linux/err.h> -static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx); -static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx); +static void perf_mmap__munmap(struct perf_mmap *map); +static void perf_mmap__put(struct perf_mmap *map); #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) @@ -44,6 +45,7 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, perf_evlist__set_maps(evlist, cpus, threads); fdarray__init(&evlist->pollfd, 64); evlist->workload.pid = -1; + evlist->bkw_mmap_state = BKW_MMAP_NOTREADY; } struct perf_evlist *perf_evlist__new(void) @@ -99,7 +101,7 @@ static void perf_evlist__update_id_pos(struct perf_evlist *evlist) { struct perf_evsel *evsel; - evlist__for_each(evlist, evsel) + evlist__for_each_entry(evlist, evsel) perf_evsel__calc_id_pos(evsel); perf_evlist__set_id_pos(evlist); @@ -109,7 +111,7 @@ static void perf_evlist__purge(struct perf_evlist *evlist) { struct perf_evsel *pos, *n; - evlist__for_each_safe(evlist, n, pos) { + evlist__for_each_entry_safe(evlist, n, pos) { list_del_init(&pos->node); pos->evlist = NULL; perf_evsel__delete(pos); @@ -121,11 +123,15 @@ static void perf_evlist__purge(struct perf_evlist *evlist) void perf_evlist__exit(struct perf_evlist *evlist) { zfree(&evlist->mmap); + zfree(&evlist->backward_mmap); fdarray__exit(&evlist->pollfd); } void perf_evlist__delete(struct perf_evlist *evlist) { + if (evlist == NULL) + return; + perf_evlist__munmap(evlist); perf_evlist__close(evlist); cpu_map__put(evlist->cpus); @@ -160,7 +166,7 @@ static void perf_evlist__propagate_maps(struct perf_evlist *evlist) { struct perf_evsel *evsel; - evlist__for_each(evlist, evsel) + evlist__for_each_entry(evlist, evsel) __perf_evlist__propagate_maps(evlist, evsel); } @@ -189,7 +195,7 @@ void perf_evlist__splice_list_tail(struct perf_evlist *evlist, { struct perf_evsel *evsel, *temp; - __evlist__for_each_safe(list, temp, evsel) { + __evlist__for_each_entry_safe(list, temp, evsel) { list_del_init(&evsel->node); perf_evlist__add(evlist, evsel); } @@ -204,7 +210,7 @@ void __perf_evlist__set_leader(struct list_head *list) leader->nr_members = evsel->idx - leader->idx + 1; - __evlist__for_each(list, evsel) { + __evlist__for_each_entry(list, evsel) { evsel->leader = leader; } } @@ -233,31 +239,13 @@ void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr) int perf_evlist__add_default(struct perf_evlist *evlist) { - struct perf_event_attr attr = { - .type = PERF_TYPE_HARDWARE, - .config = PERF_COUNT_HW_CPU_CYCLES, - }; - struct perf_evsel *evsel; - - event_attr_init(&attr); + struct perf_evsel *evsel = perf_evsel__new_cycles(); - perf_event_attr__set_max_precise_ip(&attr); - - evsel = perf_evsel__new(&attr); if (evsel == NULL) - goto error; - - /* use asprintf() because free(evsel) assumes name is allocated */ - if (asprintf(&evsel->name, "cycles%.*s", - attr.precise_ip ? attr.precise_ip + 1 : 0, ":ppp") < 0) - goto error_free; + return -ENOMEM; perf_evlist__add(evlist, evsel); return 0; -error_free: - perf_evsel__delete(evsel); -error: - return -ENOMEM; } int perf_evlist__add_dummy(struct perf_evlist *evlist) @@ -295,7 +283,7 @@ static int perf_evlist__add_attrs(struct perf_evlist *evlist, return 0; out_delete_partial_list: - __evlist__for_each_safe(&head, n, evsel) + __evlist__for_each_entry_safe(&head, n, evsel) perf_evsel__delete(evsel); return -1; } @@ -316,7 +304,7 @@ perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) { struct perf_evsel *evsel; - evlist__for_each(evlist, evsel) { + evlist__for_each_entry(evlist, evsel) { if (evsel->attr.type == PERF_TYPE_TRACEPOINT && (int)evsel->attr.config == id) return evsel; @@ -331,7 +319,7 @@ perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, { struct perf_evsel *evsel; - evlist__for_each(evlist, evsel) { + evlist__for_each_entry(evlist, evsel) { if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) && (strcmp(evsel->name, name) == 0)) return evsel; @@ -366,7 +354,7 @@ void perf_evlist__disable(struct perf_evlist *evlist) { struct perf_evsel *pos; - evlist__for_each(evlist, pos) { + evlist__for_each_entry(evlist, pos) { if (!perf_evsel__is_group_leader(pos) || !pos->fd) continue; perf_evsel__disable(pos); @@ -379,7 +367,7 @@ void perf_evlist__enable(struct perf_evlist *evlist) { struct perf_evsel *pos; - evlist__for_each(evlist, pos) { + evlist__for_each_entry(evlist, pos) { if (!perf_evsel__is_group_leader(pos) || !pos->fd) continue; perf_evsel__enable(pos); @@ -447,7 +435,7 @@ int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) int nfds = 0; struct perf_evsel *evsel; - evlist__for_each(evlist, evsel) { + evlist__for_each_entry(evlist, evsel) { if (evsel->system_wide) nfds += nr_cpus; else @@ -461,15 +449,16 @@ int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) return 0; } -static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx) +static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, + struct perf_mmap *map, short revent) { - int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP); + int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP); /* * Save the idx so that when we filter out fds POLLHUP'ed we can * close the associated evlist->mmap[] entry. */ if (pos >= 0) { - evlist->pollfd.priv[pos].idx = idx; + evlist->pollfd.priv[pos].ptr = map; fcntl(fd, F_SETFL, O_NONBLOCK); } @@ -479,20 +468,22 @@ static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) { - return __perf_evlist__add_pollfd(evlist, fd, -1); + return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN); } -static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd) +static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd, + void *arg __maybe_unused) { - struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd); + struct perf_mmap *map = fda->priv[fd].ptr; - perf_evlist__mmap_put(evlist, fda->priv[fd].idx); + if (map) + perf_mmap__put(map); } int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask) { return fdarray__filter(&evlist->pollfd, revents_and_mask, - perf_evlist__munmap_filtered); + perf_evlist__munmap_filtered, NULL); } int perf_evlist__poll(struct perf_evlist *evlist, int timeout) @@ -646,8 +637,8 @@ static int perf_evlist__event2id(struct perf_evlist *evlist, return 0; } -static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, - union perf_event *event) +struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, + union perf_event *event) { struct perf_evsel *first = perf_evlist__first(evlist); struct hlist_head *head; @@ -679,53 +670,82 @@ static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, return NULL; } -union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) +static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value) +{ + int i; + + if (!evlist->backward_mmap) + return 0; + + for (i = 0; i < evlist->nr_mmaps; i++) { + int fd = evlist->backward_mmap[i].fd; + int err; + + if (fd < 0) + continue; + err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0); + if (err) + return err; + } + return 0; +} + +static int perf_evlist__pause(struct perf_evlist *evlist) +{ + return perf_evlist__set_paused(evlist, true); +} + +static int perf_evlist__resume(struct perf_evlist *evlist) +{ + return perf_evlist__set_paused(evlist, false); +} + +/* When check_messup is true, 'end' must points to a good entry */ +static union perf_event * +perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start, + u64 end, u64 *prev) { - struct perf_mmap *md = &evlist->mmap[idx]; - u64 head; - u64 old = md->prev; unsigned char *data = md->base + page_size; union perf_event *event = NULL; + int diff = end - start; - /* - * Check if event was unmapped due to a POLLHUP/POLLERR. - */ - if (!atomic_read(&md->refcnt)) - return NULL; - - head = perf_mmap__read_head(md); - if (evlist->overwrite) { + if (check_messup) { /* * If we're further behind than half the buffer, there's a chance * the writer will bite our tail and mess up the samples under us. * - * If we somehow ended up ahead of the head, we got messed up. + * If we somehow ended up ahead of the 'end', we got messed up. * - * In either case, truncate and restart at head. + * In either case, truncate and restart at 'end'. */ - int diff = head - old; if (diff > md->mask / 2 || diff < 0) { fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); /* - * head points to a known good entry, start there. + * 'end' points to a known good entry, start there. */ - old = head; + start = end; + diff = 0; } } - if (old != head) { + if (diff >= (int)sizeof(event->header)) { size_t size; - event = (union perf_event *)&data[old & md->mask]; + event = (union perf_event *)&data[start & md->mask]; size = event->header.size; + if (size < sizeof(event->header) || diff < (int)size) { + event = NULL; + goto broken_event; + } + /* * Event straddles the mmap boundary -- header should always * be inside due to u64 alignment of output. */ - if ((old & md->mask) + size != ((old + size) & md->mask)) { - unsigned int offset = old; + if ((start & md->mask) + size != ((start + size) & md->mask)) { + unsigned int offset = start; unsigned int len = min(sizeof(*event), size), cpy; void *dst = md->event_copy; @@ -740,44 +760,148 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) event = (union perf_event *) md->event_copy; } - old += size; + start += size; } - md->prev = old; +broken_event: + if (prev) + *prev = start; return event; } -static bool perf_mmap__empty(struct perf_mmap *md) +union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup) { - return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base; + u64 head; + u64 old = md->prev; + + /* + * Check if event was unmapped due to a POLLHUP/POLLERR. + */ + if (!atomic_read(&md->refcnt)) + return NULL; + + head = perf_mmap__read_head(md); + + return perf_mmap__read(md, check_messup, old, head, &md->prev); } -static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx) +union perf_event * +perf_mmap__read_backward(struct perf_mmap *md) { - atomic_inc(&evlist->mmap[idx].refcnt); + u64 head, end; + u64 start = md->prev; + + /* + * Check if event was unmapped due to a POLLHUP/POLLERR. + */ + if (!atomic_read(&md->refcnt)) + return NULL; + + head = perf_mmap__read_head(md); + if (!head) + return NULL; + + /* + * 'head' pointer starts from 0. Kernel minus sizeof(record) form + * it each time when kernel writes to it, so in fact 'head' is + * negative. 'end' pointer is made manually by adding the size of + * the ring buffer to 'head' pointer, means the validate data can + * read is the whole ring buffer. If 'end' is positive, the ring + * buffer has not fully filled, so we must adjust 'end' to 0. + * + * However, since both 'head' and 'end' is unsigned, we can't + * simply compare 'end' against 0. Here we compare '-head' and + * the size of the ring buffer, where -head is the number of bytes + * kernel write to the ring buffer. + */ + if (-head < (u64)(md->mask + 1)) + end = 0; + else + end = head + md->mask + 1; + + return perf_mmap__read(md, false, start, end, &md->prev); } -static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx) +union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx) { - BUG_ON(atomic_read(&evlist->mmap[idx].refcnt) == 0); + struct perf_mmap *md = &evlist->mmap[idx]; - if (atomic_dec_and_test(&evlist->mmap[idx].refcnt)) - __perf_evlist__munmap(evlist, idx); + /* + * Check messup is required for forward overwritable ring buffer: + * memory pointed by md->prev can be overwritten in this case. + * No need for read-write ring buffer: kernel stop outputting when + * it hit md->prev (perf_mmap__consume()). + */ + return perf_mmap__read_forward(md, evlist->overwrite); } -void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) +union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx) { struct perf_mmap *md = &evlist->mmap[idx]; - if (!evlist->overwrite) { + /* + * No need to check messup for backward ring buffer: + * We can always read arbitrary long data from a backward + * ring buffer unless we forget to pause it before reading. + */ + return perf_mmap__read_backward(md); +} + +union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) +{ + return perf_evlist__mmap_read_forward(evlist, idx); +} + +void perf_mmap__read_catchup(struct perf_mmap *md) +{ + u64 head; + + if (!atomic_read(&md->refcnt)) + return; + + head = perf_mmap__read_head(md); + md->prev = head; +} + +void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx) +{ + perf_mmap__read_catchup(&evlist->mmap[idx]); +} + +static bool perf_mmap__empty(struct perf_mmap *md) +{ + return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base; +} + +static void perf_mmap__get(struct perf_mmap *map) +{ + atomic_inc(&map->refcnt); +} + +static void perf_mmap__put(struct perf_mmap *md) +{ + BUG_ON(md->base && atomic_read(&md->refcnt) == 0); + + if (atomic_dec_and_test(&md->refcnt)) + perf_mmap__munmap(md); +} + +void perf_mmap__consume(struct perf_mmap *md, bool overwrite) +{ + if (!overwrite) { u64 old = md->prev; perf_mmap__write_tail(md, old); } if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md)) - perf_evlist__mmap_put(evlist, idx); + perf_mmap__put(md); +} + +void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) +{ + perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite); } int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused, @@ -808,36 +932,52 @@ void __weak auxtrace_mmap_params__set_idx( { } -static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) +static void perf_mmap__munmap(struct perf_mmap *map) { - if (evlist->mmap[idx].base != NULL) { - munmap(evlist->mmap[idx].base, evlist->mmap_len); - evlist->mmap[idx].base = NULL; - atomic_set(&evlist->mmap[idx].refcnt, 0); + if (map->base != NULL) { + munmap(map->base, perf_mmap__mmap_len(map)); + map->base = NULL; + map->fd = -1; + atomic_set(&map->refcnt, 0); } - auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap); + auxtrace_mmap__munmap(&map->auxtrace_mmap); } -void perf_evlist__munmap(struct perf_evlist *evlist) +static void perf_evlist__munmap_nofree(struct perf_evlist *evlist) { int i; - if (evlist->mmap == NULL) - return; + if (evlist->mmap) + for (i = 0; i < evlist->nr_mmaps; i++) + perf_mmap__munmap(&evlist->mmap[i]); - for (i = 0; i < evlist->nr_mmaps; i++) - __perf_evlist__munmap(evlist, i); + if (evlist->backward_mmap) + for (i = 0; i < evlist->nr_mmaps; i++) + perf_mmap__munmap(&evlist->backward_mmap[i]); +} +void perf_evlist__munmap(struct perf_evlist *evlist) +{ + perf_evlist__munmap_nofree(evlist); zfree(&evlist->mmap); + zfree(&evlist->backward_mmap); } -static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) +static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist) { + int i; + struct perf_mmap *map; + evlist->nr_mmaps = cpu_map__nr(evlist->cpus); if (cpu_map__empty(evlist->cpus)) evlist->nr_mmaps = thread_map__nr(evlist->threads); - evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); - return evlist->mmap != NULL ? 0 : -ENOMEM; + map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); + if (!map) + return NULL; + + for (i = 0; i < evlist->nr_mmaps; i++) + map[i].fd = -1; + return map; } struct mmap_params { @@ -846,8 +986,8 @@ struct mmap_params { struct auxtrace_mmap_params auxtrace_mp; }; -static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx, - struct mmap_params *mp, int fd) +static int perf_mmap__mmap(struct perf_mmap *map, + struct mmap_params *mp, int fd) { /* * The last one will be done at perf_evlist__mmap_consume(), so that we @@ -862,34 +1002,61 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx, * evlist layer can't just drop it when filtering events in * perf_evlist__filter_pollfd(). */ - atomic_set(&evlist->mmap[idx].refcnt, 2); - evlist->mmap[idx].prev = 0; - evlist->mmap[idx].mask = mp->mask; - evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot, - MAP_SHARED, fd, 0); - if (evlist->mmap[idx].base == MAP_FAILED) { + atomic_set(&map->refcnt, 2); + map->prev = 0; + map->mask = mp->mask; + map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot, + MAP_SHARED, fd, 0); + if (map->base == MAP_FAILED) { pr_debug2("failed to mmap perf event ring buffer, error %d\n", errno); - evlist->mmap[idx].base = NULL; + map->base = NULL; return -1; } + map->fd = fd; - if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap, - &mp->auxtrace_mp, evlist->mmap[idx].base, fd)) + if (auxtrace_mmap__mmap(&map->auxtrace_mmap, + &mp->auxtrace_mp, map->base, fd)) return -1; return 0; } +static bool +perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, + struct perf_evsel *evsel) +{ + if (evsel->attr.write_backward) + return false; + return true; +} + static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, struct mmap_params *mp, int cpu, - int thread, int *output) + int thread, int *_output, int *_output_backward) { struct perf_evsel *evsel; + int revent; - evlist__for_each(evlist, evsel) { + evlist__for_each_entry(evlist, evsel) { + struct perf_mmap *maps = evlist->mmap; + int *output = _output; int fd; + if (evsel->attr.write_backward) { + output = _output_backward; + maps = evlist->backward_mmap; + + if (!maps) { + maps = perf_evlist__alloc_mmap(evlist); + if (!maps) + return -1; + evlist->backward_mmap = maps; + if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) + perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); + } + } + if (evsel->system_wide && thread) continue; @@ -897,15 +1064,18 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, if (*output == -1) { *output = fd; - if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0) + + if (perf_mmap__mmap(&maps[idx], mp, *output) < 0) return -1; } else { if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) return -1; - perf_evlist__mmap_get(evlist, idx); + perf_mmap__get(&maps[idx]); } + revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0; + /* * The system_wide flag causes a selected event to be opened * always without a pid. Consequently it will never get a @@ -914,8 +1084,8 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, * Therefore don't add it for polling. */ if (!evsel->system_wide && - __perf_evlist__add_pollfd(evlist, fd, idx) < 0) { - perf_evlist__mmap_put(evlist, idx); + __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) { + perf_mmap__put(&maps[idx]); return -1; } @@ -941,13 +1111,14 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, pr_debug2("perf event ring buffer mmapped per cpu\n"); for (cpu = 0; cpu < nr_cpus; cpu++) { int output = -1; + int output_backward = -1; auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu, true); for (thread = 0; thread < nr_threads; thread++) { if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, - thread, &output)) + thread, &output, &output_backward)) goto out_unmap; } } @@ -955,8 +1126,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, return 0; out_unmap: - for (cpu = 0; cpu < nr_cpus; cpu++) - __perf_evlist__munmap(evlist, cpu); + perf_evlist__munmap_nofree(evlist); return -1; } @@ -969,43 +1139,51 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, pr_debug2("perf event ring buffer mmapped per thread\n"); for (thread = 0; thread < nr_threads; thread++) { int output = -1; + int output_backward = -1; auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread, false); if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, - &output)) + &output, &output_backward)) goto out_unmap; } return 0; out_unmap: - for (thread = 0; thread < nr_threads; thread++) - __perf_evlist__munmap(evlist, thread); + perf_evlist__munmap_nofree(evlist); return -1; } -static size_t perf_evlist__mmap_size(unsigned long pages) +unsigned long perf_event_mlock_kb_in_pages(void) { - if (pages == UINT_MAX) { - int max; + unsigned long pages; + int max; - if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) { - /* - * Pick a once upon a time good value, i.e. things look - * strange since we can't read a sysctl value, but lets not - * die yet... - */ - max = 512; - } else { - max -= (page_size / 1024); - } + if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) { + /* + * Pick a once upon a time good value, i.e. things look + * strange since we can't read a sysctl value, but lets not + * die yet... + */ + max = 512; + } else { + max -= (page_size / 1024); + } + + pages = (max * 1024) / page_size; + if (!is_power_of_2(pages)) + pages = rounddown_pow_of_two(pages); - pages = (max * 1024) / page_size; - if (!is_power_of_2(pages)) - pages = rounddown_pow_of_two(pages); - } else if (!is_power_of_2(pages)) + return pages; +} + +static size_t perf_evlist__mmap_size(unsigned long pages) +{ + if (pages == UINT_MAX) + pages = perf_event_mlock_kb_in_pages(); + else if (!is_power_of_2(pages)) return 0; return (pages + 1) * page_size; @@ -1107,7 +1285,9 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), }; - if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) + if (!evlist->mmap) + evlist->mmap = perf_evlist__alloc_mmap(evlist); + if (!evlist->mmap) return -ENOMEM; if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) @@ -1121,7 +1301,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len, auxtrace_pages, auxtrace_overwrite); - evlist__for_each(evlist, evsel) { + evlist__for_each_entry(evlist, evsel) { if ((evsel->attr.read_format & PERF_FORMAT_ID) && evsel->sample_id == NULL && perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) @@ -1192,6 +1372,24 @@ void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, perf_evlist__propagate_maps(evlist); } +void __perf_evlist__set_sample_bit(struct perf_evlist *evlist, + enum perf_event_sample_format bit) +{ + struct perf_evsel *evsel; + + evlist__for_each_entry(evlist, evsel) + __perf_evsel__set_sample_bit(evsel, bit); +} + +void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist, + enum perf_event_sample_format bit) +{ + struct perf_evsel *evsel; + + evlist__for_each_entry(evlist, evsel) + __perf_evsel__reset_sample_bit(evsel, bit); +} + int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel) { struct perf_evsel *evsel; @@ -1199,7 +1397,7 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **e const int ncpus = cpu_map__nr(evlist->cpus), nthreads = thread_map__nr(evlist->threads); - evlist__for_each(evlist, evsel) { + evlist__for_each_entry(evlist, evsel) { if (evsel->filter == NULL) continue; @@ -1222,7 +1420,7 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) struct perf_evsel *evsel; int err = 0; - evlist__for_each(evlist, evsel) { + evlist__for_each_entry(evlist, evsel) { if (evsel->attr.type != PERF_TYPE_TRACEPOINT) continue; @@ -1276,7 +1474,7 @@ bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) if (evlist->id_pos < 0 || evlist->is_pos < 0) return false; - evlist__for_each(evlist, pos) { + evlist__for_each_entry(evlist, pos) { if (pos->id_pos != evlist->id_pos || pos->is_pos != evlist->is_pos) return false; @@ -1292,7 +1490,7 @@ u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist) if (evlist->combined_sample_type) return evlist->combined_sample_type; - evlist__for_each(evlist, evsel) + evlist__for_each_entry(evlist, evsel) evlist->combined_sample_type |= evsel->attr.sample_type; return evlist->combined_sample_type; @@ -1309,7 +1507,7 @@ u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist) struct perf_evsel *evsel; u64 branch_type = 0; - evlist__for_each(evlist, evsel) + evlist__for_each_entry(evlist, evsel) branch_type |= evsel->attr.branch_sample_type; return branch_type; } @@ -1320,7 +1518,7 @@ bool perf_evlist__valid_read_format(struct perf_evlist *evlist) u64 read_format = first->attr.read_format; u64 sample_type = first->attr.sample_type; - evlist__for_each(evlist, pos) { + evlist__for_each_entry(evlist, pos) { if (read_format != pos->attr.read_format) return false; } @@ -1377,7 +1575,7 @@ bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) { struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; - evlist__for_each_continue(evlist, pos) { + evlist__for_each_entry_continue(evlist, pos) { if (first->attr.sample_id_all != pos->attr.sample_id_all) return false; } @@ -1404,7 +1602,7 @@ void perf_evlist__close(struct perf_evlist *evlist) int nthreads = thread_map__nr(evlist->threads); int n; - evlist__for_each_reverse(evlist, evsel) { + evlist__for_each_entry_reverse(evlist, evsel) { n = evsel->cpus ? evsel->cpus->nr : ncpus; perf_evsel__close(evsel, n, nthreads); } @@ -1458,7 +1656,7 @@ int perf_evlist__open(struct perf_evlist *evlist) perf_evlist__update_id_pos(evlist); - evlist__for_each(evlist, evsel) { + evlist__for_each_entry(evlist, evsel) { err = perf_evsel__open(evsel, evsel->cpus, evsel->threads); if (err < 0) goto out_err; @@ -1619,7 +1817,7 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) struct perf_evsel *evsel; size_t printed = 0; - evlist__for_each(evlist, evsel) { + evlist__for_each_entry(evlist, evsel) { printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", perf_evsel__name(evsel)); } @@ -1631,7 +1829,7 @@ int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size) { int printed, value; - char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf)); + char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); switch (err) { case EACCES: @@ -1683,7 +1881,7 @@ out_default: int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size) { - char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf)); + char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0; switch (err) { @@ -1721,7 +1919,7 @@ void perf_evlist__to_front(struct perf_evlist *evlist, if (move_evsel == perf_evlist__first(evlist)) return; - evlist__for_each_safe(evlist, n, evsel) { + evlist__for_each_entry_safe(evlist, n, evsel) { if (evsel->leader == move_evsel->leader) list_move_tail(&evsel->node, &move); } @@ -1737,7 +1935,7 @@ void perf_evlist__set_tracking_event(struct perf_evlist *evlist, if (tracking_evsel->tracking) return; - evlist__for_each(evlist, evsel) { + evlist__for_each_entry(evlist, evsel) { if (evsel != tracking_evsel) evsel->tracking = false; } @@ -1751,7 +1949,7 @@ perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, { struct perf_evsel *evsel; - evlist__for_each(evlist, evsel) { + evlist__for_each_entry(evlist, evsel) { if (!evsel->name) continue; if (strcmp(str, evsel->name) == 0) @@ -1760,3 +1958,61 @@ perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, return NULL; } + +void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, + enum bkw_mmap_state state) +{ + enum bkw_mmap_state old_state = evlist->bkw_mmap_state; + enum action { + NONE, + PAUSE, + RESUME, + } action = NONE; + + if (!evlist->backward_mmap) + return; + + switch (old_state) { + case BKW_MMAP_NOTREADY: { + if (state != BKW_MMAP_RUNNING) + goto state_err;; + break; + } + case BKW_MMAP_RUNNING: { + if (state != BKW_MMAP_DATA_PENDING) + goto state_err; + action = PAUSE; + break; + } + case BKW_MMAP_DATA_PENDING: { + if (state != BKW_MMAP_EMPTY) + goto state_err; + break; + } + case BKW_MMAP_EMPTY: { + if (state != BKW_MMAP_RUNNING) + goto state_err; + action = RESUME; + break; + } + default: + WARN_ONCE(1, "Shouldn't get there\n"); + } + + evlist->bkw_mmap_state = state; + + switch (action) { + case PAUSE: + perf_evlist__pause(evlist); + break; + case RESUME: + perf_evlist__resume(evlist); + break; + case NONE: + default: + break; + } + +state_err: + return; +} |