diff options
Diffstat (limited to 'tools/perf/util/mmap.c')
-rw-r--r-- | tools/perf/util/mmap.c | 244 |
1 files changed, 115 insertions, 129 deletions
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c index 9fe5f9c7d577..fc832676a798 100644 --- a/tools/perf/util/mmap.c +++ b/tools/perf/util/mmap.c @@ -21,50 +21,28 @@ size_t perf_mmap__mmap_len(struct perf_mmap *map) } /* When check_messup is true, 'end' must points to a good entry */ -static union perf_event *perf_mmap__read(struct perf_mmap *map, bool check_messup, - u64 start, u64 end, u64 *prev) +static union perf_event *perf_mmap__read(struct perf_mmap *map, + u64 *startp, u64 end) { unsigned char *data = map->base + page_size; union perf_event *event = NULL; - int diff = end - start; - - if (check_messup) { - /* - * If we're further behind than half the buffer, there's a chance - * the writer will bite our tail and mess up the samples under us. - * - * If we somehow ended up ahead of the 'end', we got messed up. - * - * In either case, truncate and restart at 'end'. - */ - if (diff > map->mask / 2 || diff < 0) { - fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); - - /* - * 'end' points to a known good entry, start there. - */ - start = end; - diff = 0; - } - } + int diff = end - *startp; if (diff >= (int)sizeof(event->header)) { size_t size; - event = (union perf_event *)&data[start & map->mask]; + event = (union perf_event *)&data[*startp & map->mask]; size = event->header.size; - if (size < sizeof(event->header) || diff < (int)size) { - event = NULL; - goto broken_event; - } + if (size < sizeof(event->header) || diff < (int)size) + return NULL; /* * Event straddles the mmap boundary -- header should always * be inside due to u64 alignment of output. */ - if ((start & map->mask) + size != ((start + size) & map->mask)) { - unsigned int offset = start; + if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) { + unsigned int offset = *startp; unsigned int len = min(sizeof(*event), size), cpy; void *dst = map->event_copy; @@ -79,36 +57,27 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map, bool check_messu event = (union perf_event *)map->event_copy; } - start += size; + *startp += size; } -broken_event: - if (prev) - *prev = start; - return event; } -union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup) -{ - u64 head; - u64 old = map->prev; - - /* - * Check if event was unmapped due to a POLLHUP/POLLERR. - */ - if (!refcount_read(&map->refcnt)) - return NULL; - - head = perf_mmap__read_head(map); - - return perf_mmap__read(map, check_messup, old, head, &map->prev); -} - -union perf_event *perf_mmap__read_backward(struct perf_mmap *map) +/* + * Read event from ring buffer one by one. + * Return one event for each call. + * + * Usage: + * perf_mmap__read_init() + * while(event = perf_mmap__read_event()) { + * //process the event + * perf_mmap__consume() + * } + * perf_mmap__read_done() + */ +union perf_event *perf_mmap__read_event(struct perf_mmap *map) { - u64 head, end; - u64 start = map->prev; + union perf_event *event; /* * Check if event was unmapped due to a POLLHUP/POLLERR. @@ -116,40 +85,16 @@ union perf_event *perf_mmap__read_backward(struct perf_mmap *map) if (!refcount_read(&map->refcnt)) return NULL; - head = perf_mmap__read_head(map); - if (!head) - return NULL; - - /* - * 'head' pointer starts from 0. Kernel minus sizeof(record) form - * it each time when kernel writes to it, so in fact 'head' is - * negative. 'end' pointer is made manually by adding the size of - * the ring buffer to 'head' pointer, means the validate data can - * read is the whole ring buffer. If 'end' is positive, the ring - * buffer has not fully filled, so we must adjust 'end' to 0. - * - * However, since both 'head' and 'end' is unsigned, we can't - * simply compare 'end' against 0. Here we compare '-head' and - * the size of the ring buffer, where -head is the number of bytes - * kernel write to the ring buffer. - */ - if (-head < (u64)(map->mask + 1)) - end = 0; - else - end = head + map->mask + 1; - - return perf_mmap__read(map, false, start, end, &map->prev); -} + /* non-overwirte doesn't pause the ringbuffer */ + if (!map->overwrite) + map->end = perf_mmap__read_head(map); -void perf_mmap__read_catchup(struct perf_mmap *map) -{ - u64 head; + event = perf_mmap__read(map, &map->start, map->end); - if (!refcount_read(&map->refcnt)) - return; + if (!map->overwrite) + map->prev = map->start; - head = perf_mmap__read_head(map); - map->prev = head; + return event; } static bool perf_mmap__empty(struct perf_mmap *map) @@ -170,9 +115,9 @@ void perf_mmap__put(struct perf_mmap *map) perf_mmap__munmap(map); } -void perf_mmap__consume(struct perf_mmap *map, bool overwrite) +void perf_mmap__consume(struct perf_mmap *map) { - if (!overwrite) { + if (!map->overwrite) { u64 old = map->prev; perf_mmap__write_tail(map, old); @@ -222,7 +167,7 @@ void perf_mmap__munmap(struct perf_mmap *map) int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd) { /* - * The last one will be done at perf_evlist__mmap_consume(), so that we + * The last one will be done at perf_mmap__consume(), so that we * make sure we don't prevent tools from consuming every last event in * the ring buffer. * @@ -254,19 +199,18 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd) return 0; } -static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end) +static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end) { struct perf_event_header *pheader; - u64 evt_head = head; + u64 evt_head = *start; int size = mask + 1; - pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head); - pheader = (struct perf_event_header *)(buf + (head & mask)); - *start = head; + pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start); + pheader = (struct perf_event_header *)(buf + (*start & mask)); while (true) { - if (evt_head - head >= (unsigned int)size) { - pr_debug("Finished reading backward ring buffer: rewind\n"); - if (evt_head - head > (unsigned int)size) + if (evt_head - *start >= (unsigned int)size) { + pr_debug("Finished reading overwrite ring buffer: rewind\n"); + if (evt_head - *start > (unsigned int)size) evt_head -= pheader->size; *end = evt_head; return 0; @@ -275,7 +219,7 @@ static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 pheader = (struct perf_event_header *)(buf + (evt_head & mask)); if (pheader->size == 0) { - pr_debug("Finished reading backward ring buffer: get start\n"); + pr_debug("Finished reading overwrite ring buffer: get start\n"); *end = evt_head; return 0; } @@ -287,48 +231,73 @@ static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 return -1; } -static int rb_find_range(void *data, int mask, u64 head, u64 old, - u64 *start, u64 *end, bool backward) +/* + * Report the start and end of the available data in ringbuffer + */ +static int __perf_mmap__read_init(struct perf_mmap *md) { - if (!backward) { - *start = old; - *end = head; - return 0; + u64 head = perf_mmap__read_head(md); + u64 old = md->prev; + unsigned char *data = md->base + page_size; + unsigned long size; + + md->start = md->overwrite ? head : old; + md->end = md->overwrite ? old : head; + + if (md->start == md->end) + return -EAGAIN; + + size = md->end - md->start; + if (size > (unsigned long)(md->mask) + 1) { + if (!md->overwrite) { + WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n"); + + md->prev = head; + perf_mmap__consume(md); + return -EAGAIN; + } + + /* + * Backward ring buffer is full. We still have a chance to read + * most of data from it. + */ + if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end)) + return -EINVAL; } - return backward_rb_find_range(data, mask, head, start, end); + return 0; +} + +int perf_mmap__read_init(struct perf_mmap *map) +{ + /* + * Check if event was unmapped due to a POLLHUP/POLLERR. + */ + if (!refcount_read(&map->refcnt)) + return -ENOENT; + + return __perf_mmap__read_init(map); } -int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward, - void *to, int push(void *to, void *buf, size_t size)) +int perf_mmap__push(struct perf_mmap *md, void *to, + int push(void *to, void *buf, size_t size)) { u64 head = perf_mmap__read_head(md); - u64 old = md->prev; - u64 end = head, start = old; unsigned char *data = md->base + page_size; unsigned long size; void *buf; int rc = 0; - if (rb_find_range(data, md->mask, head, old, &start, &end, backward)) - return -1; + rc = perf_mmap__read_init(md); + if (rc < 0) + return (rc == -EAGAIN) ? 0 : -1; - if (start == end) - return 0; + size = md->end - md->start; - size = end - start; - if (size > (unsigned long)(md->mask) + 1) { - WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n"); - - md->prev = head; - perf_mmap__consume(md, overwrite || backward); - return 0; - } - - if ((start & md->mask) + size != (end & md->mask)) { - buf = &data[start & md->mask]; - size = md->mask + 1 - (start & md->mask); - start += size; + if ((md->start & md->mask) + size != (md->end & md->mask)) { + buf = &data[md->start & md->mask]; + size = md->mask + 1 - (md->start & md->mask); + md->start += size; if (push(to, buf, size) < 0) { rc = -1; @@ -336,9 +305,9 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward, } } - buf = &data[start & md->mask]; - size = end - start; - start += size; + buf = &data[md->start & md->mask]; + size = md->end - md->start; + md->start += size; if (push(to, buf, size) < 0) { rc = -1; @@ -346,7 +315,24 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward, } md->prev = head; - perf_mmap__consume(md, overwrite || backward); + perf_mmap__consume(md); out: return rc; } + +/* + * Mandatory for overwrite mode + * The direction of overwrite mode is backward. + * The last perf_mmap__read() will set tail to map->prev. + * Need to correct the map->prev to head which is the end of next read. + */ +void perf_mmap__read_done(struct perf_mmap *map) +{ + /* + * Check if event was unmapped due to a POLLHUP/POLLERR. + */ + if (!refcount_read(&map->refcnt)) + return; + + map->prev = perf_mmap__read_head(map); +} |