diff options
Diffstat (limited to 'tools/perf/builtin-top.c')
-rw-r--r-- | tools/perf/builtin-top.c | 187 |
1 files changed, 110 insertions, 77 deletions
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index b46b3c9f57a0..1f60124eb19b 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -24,27 +24,31 @@ #include "util/bpf-event.h" #include "util/config.h" #include "util/color.h" +#include "util/dso.h" #include "util/evlist.h" #include "util/evsel.h" +#include "util/evsel_config.h" #include "util/event.h" #include "util/machine.h" #include "util/map.h" +#include "util/mmap.h" #include "util/session.h" #include "util/symbol.h" -#include "util/thread.h" -#include "util/thread_map.h" +#include "util/synthetic-events.h" #include "util/top.h" +#include "util/util.h" #include <linux/rbtree.h> #include <subcmd/parse-options.h> #include "util/parse-events.h" +#include "util/callchain.h" #include "util/cpumap.h" -#include "util/xyarray.h" #include "util/sort.h" #include "util/string2.h" #include "util/term.h" #include "util/intlist.h" #include "util/parse-branch-options.h" #include "arch/common.h" +#include "ui/ui.h" #include "util/debug.h" #include "util/ordered-events.h" @@ -75,6 +79,7 @@ #include <linux/stringify.h> #include <linux/time64.h> #include <linux/types.h> +#include <linux/err.h> #include <linux/ctype.h> @@ -101,7 +106,7 @@ static void perf_top__resize(struct perf_top *top) static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he) { - struct perf_evsel *evsel; + struct evsel *evsel; struct symbol *sym; struct annotation *notes; struct map *map; @@ -129,7 +134,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he) notes = symbol__annotation(sym); pthread_mutex_lock(¬es->lock); - if (!symbol__hists(sym, top->evlist->nr_entries)) { + if (!symbol__hists(sym, top->evlist->core.nr_entries)) { pthread_mutex_unlock(¬es->lock); pr_err("Not enough memory for annotating '%s' symbol!\n", sym->name); @@ -186,7 +191,7 @@ static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip) static void perf_top__record_precise_ip(struct perf_top *top, struct hist_entry *he, struct perf_sample *sample, - struct perf_evsel *evsel, u64 ip) + struct evsel *evsel, u64 ip) { struct annotation *notes; struct symbol *sym = he->ms.sym; @@ -228,7 +233,7 @@ static void perf_top__record_precise_ip(struct perf_top *top, static void perf_top__show_details(struct perf_top *top) { struct hist_entry *he = top->sym_filter_entry; - struct perf_evsel *evsel; + struct evsel *evsel; struct annotation *notes; struct symbol *symbol; int more; @@ -265,12 +270,52 @@ out_unlock: pthread_mutex_unlock(¬es->lock); } +static void perf_top__resort_hists(struct perf_top *t) +{ + struct evlist *evlist = t->evlist; + struct evsel *pos; + + evlist__for_each_entry(evlist, pos) { + struct hists *hists = evsel__hists(pos); + + /* + * unlink existing entries so that they can be linked + * in a correct order in hists__match() below. + */ + hists__unlink(hists); + + if (evlist->enabled) { + if (t->zero) { + hists__delete_entries(hists); + } else { + hists__decay_entries(hists, t->hide_user_symbols, + t->hide_kernel_symbols); + } + } + + hists__collapse_resort(hists, NULL); + + /* Non-group events are considered as leader */ + if (symbol_conf.event_group && + !perf_evsel__is_group_leader(pos)) { + struct hists *leader_hists = evsel__hists(pos->leader); + + hists__match(leader_hists, hists); + hists__link(leader_hists, hists); + } + } + + evlist__for_each_entry(evlist, pos) { + perf_evsel__output_resort(pos, NULL); + } +} + static void perf_top__print_sym_table(struct perf_top *top) { char bf[160]; int printed = 0; const int win_width = top->winsize.ws_col - 1; - struct perf_evsel *evsel = top->sym_evsel; + struct evsel *evsel = top->sym_evsel; struct hists *hists = evsel__hists(evsel); puts(CONSOLE_CLEAR); @@ -296,17 +341,7 @@ static void perf_top__print_sym_table(struct perf_top *top) return; } - if (top->evlist->enabled) { - if (top->zero) { - hists__delete_entries(hists); - } else { - hists__decay_entries(hists, top->hide_user_symbols, - top->hide_kernel_symbols); - } - } - - hists__collapse_resort(hists, NULL); - perf_evsel__output_resort(evsel, NULL); + perf_top__resort_hists(top); hists__output_recalc_col_len(hists, top->print_entries - printed); putchar('\n'); @@ -404,7 +439,7 @@ static void perf_top__print_mapped_keys(struct perf_top *top) fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs); fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries); - if (top->evlist->nr_entries > 1) + if (top->evlist->core.nr_entries > 1) fprintf(stdout, "\t[E] active event counter. \t(%s)\n", perf_evsel__name(top->sym_evsel)); fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter); @@ -439,7 +474,7 @@ static int perf_top__key_mapped(struct perf_top *top, int c) case 'S': return 1; case 'E': - return top->evlist->nr_entries > 1 ? 1 : 0; + return top->evlist->core.nr_entries > 1 ? 1 : 0; default: break; } @@ -485,7 +520,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c) } break; case 'E': - if (top->evlist->nr_entries > 1) { + if (top->evlist->core.nr_entries > 1) { /* Select 0 as the default event: */ int counter = 0; @@ -496,8 +531,8 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c) prompt_integer(&counter, "Enter details event counter"); - if (counter >= top->evlist->nr_entries) { - top->sym_evsel = perf_evlist__first(top->evlist); + if (counter >= top->evlist->core.nr_entries) { + top->sym_evsel = evlist__first(top->evlist); fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel)); sleep(1); break; @@ -506,7 +541,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c) if (top->sym_evsel->idx == counter) break; } else - top->sym_evsel = perf_evlist__first(top->evlist); + top->sym_evsel = evlist__first(top->evlist); break; case 'f': prompt_integer(&top->count_filter, "Enter display event count filter"); @@ -554,25 +589,11 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c) static void perf_top__sort_new_samples(void *arg) { struct perf_top *t = arg; - struct perf_evsel *evsel = t->sym_evsel; - struct hists *hists; if (t->evlist->selected != NULL) t->sym_evsel = t->evlist->selected; - hists = evsel__hists(evsel); - - if (t->evlist->enabled) { - if (t->zero) { - hists__delete_entries(hists); - } else { - hists__decay_entries(hists, t->hide_user_symbols, - t->hide_kernel_symbols); - } - } - - hists__collapse_resort(hists, NULL); - perf_evsel__output_resort(evsel, NULL); + perf_top__resort_hists(t); if (t->lost || t->drop) pr_warning("Too slow to read ring buffer (change period (-c/-F) or limit CPUs (-C)\n"); @@ -586,7 +607,7 @@ static void stop_top(void) static void *display_thread_tui(void *arg) { - struct perf_evsel *pos; + struct evsel *pos; struct perf_top *top = arg; const char *help = "For a higher level overview, try: perf top --sort comm,dso"; struct hist_browser_timer hbt = { @@ -602,6 +623,8 @@ static void *display_thread_tui(void *arg) */ unshare(CLONE_FS); + prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0); + perf_top__sort_new_samples(top); /* @@ -652,6 +675,8 @@ static void *display_thread(void *arg) */ unshare(CLONE_FS); + prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0); + display_setup_sig(); pthread__unblock_sigwinch(); repeat: @@ -693,7 +718,7 @@ static int hist_iter__top_callback(struct hist_entry_iter *iter, { struct perf_top *top = arg; struct hist_entry *he = iter->he; - struct perf_evsel *evsel = iter->evsel; + struct evsel *evsel = iter->evsel; if (perf_hpp_list.sym && single) perf_top__record_precise_ip(top, he, iter->sample, evsel, al->addr); @@ -705,7 +730,7 @@ static int hist_iter__top_callback(struct hist_entry_iter *iter, static void perf_event__process_sample(struct perf_tool *tool, const union perf_event *event, - struct perf_evsel *evsel, + struct evsel *evsel, struct perf_sample *sample, struct machine *machine) { @@ -745,7 +770,7 @@ static void perf_event__process_sample(struct perf_tool *tool, if (!perf_evlist__exclude_kernel(top->session->evlist)) { ui__warning( "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" -"Check /proc/sys/kernel/kptr_restrict.\n\n" +"Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n" "Kernel%s samples will not be resolved.\n", al.map && map__has_symbols(al.map) ? " modules" : ""); @@ -813,7 +838,7 @@ static void perf_event__process_sample(struct perf_tool *tool, static void perf_top__process_lost(struct perf_top *top, union perf_event *event, - struct perf_evsel *evsel) + struct evsel *evsel) { struct hists *hists = evsel__hists(evsel); @@ -825,7 +850,7 @@ perf_top__process_lost(struct perf_top *top, union perf_event *event, static void perf_top__process_lost_samples(struct perf_top *top, union perf_event *event, - struct perf_evsel *evsel) + struct evsel *evsel) { struct hists *hists = evsel__hists(evsel); @@ -839,8 +864,8 @@ static u64 last_timestamp; static void perf_top__mmap_read_idx(struct perf_top *top, int idx) { struct record_opts *opts = &top->record_opts; - struct perf_evlist *evlist = top->evlist; - struct perf_mmap *md; + struct evlist *evlist = top->evlist; + struct mmap *md; union perf_event *event; md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx]; @@ -874,13 +899,13 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx) static void perf_top__mmap_read(struct perf_top *top) { bool overwrite = top->record_opts.overwrite; - struct perf_evlist *evlist = top->evlist; + struct evlist *evlist = top->evlist; int i; if (overwrite) perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING); - for (i = 0; i < top->evlist->nr_mmaps; i++) + for (i = 0; i < top->evlist->core.nr_mmaps; i++) perf_top__mmap_read_idx(top, i); if (overwrite) { @@ -909,10 +934,10 @@ static void perf_top__mmap_read(struct perf_top *top) static int perf_top__overwrite_check(struct perf_top *top) { struct record_opts *opts = &top->record_opts; - struct perf_evlist *evlist = top->evlist; + struct evlist *evlist = top->evlist; struct perf_evsel_config_term *term; struct list_head *config_terms; - struct perf_evsel *evsel; + struct evsel *evsel; int set, overwrite = -1; evlist__for_each_entry(evlist, evsel) { @@ -938,7 +963,7 @@ static int perf_top__overwrite_check(struct perf_top *top) /* has term for current event */ if ((overwrite < 0) && (set >= 0)) { /* if it's first event, set overwrite */ - if (evsel == perf_evlist__first(evlist)) + if (evsel == evlist__first(evlist)) overwrite = set; else return -1; @@ -952,21 +977,21 @@ static int perf_top__overwrite_check(struct perf_top *top) } static int perf_top_overwrite_fallback(struct perf_top *top, - struct perf_evsel *evsel) + struct evsel *evsel) { struct record_opts *opts = &top->record_opts; - struct perf_evlist *evlist = top->evlist; - struct perf_evsel *counter; + struct evlist *evlist = top->evlist; + struct evsel *counter; if (!opts->overwrite) return 0; /* only fall back when first event fails */ - if (evsel != perf_evlist__first(evlist)) + if (evsel != evlist__first(evlist)) return 0; evlist__for_each_entry(evlist, counter) - counter->attr.write_backward = false; + counter->core.attr.write_backward = false; opts->overwrite = false; pr_debug2("fall back to non-overwrite mode\n"); return 1; @@ -975,8 +1000,8 @@ static int perf_top_overwrite_fallback(struct perf_top *top, static int perf_top__start_counters(struct perf_top *top) { char msg[BUFSIZ]; - struct perf_evsel *counter; - struct perf_evlist *evlist = top->evlist; + struct evsel *counter; + struct evlist *evlist = top->evlist; struct record_opts *opts = &top->record_opts; if (perf_top__overwrite_check(top)) { @@ -989,8 +1014,8 @@ static int perf_top__start_counters(struct perf_top *top) evlist__for_each_entry(evlist, counter) { try_again: - if (perf_evsel__open(counter, top->evlist->cpus, - top->evlist->threads) < 0) { + if (evsel__open(counter, top->evlist->core.cpus, + top->evlist->core.threads) < 0) { /* * Specially handle overwrite fall back. @@ -1019,7 +1044,7 @@ try_again: } } - if (perf_evlist__mmap(evlist, opts->mmap_pages) < 0) { + if (evlist__mmap(evlist, opts->mmap_pages) < 0) { ui__error("Failed to mmap with %d (%s)\n", errno, str_error_r(errno, msg, sizeof(msg))); goto out_err; @@ -1100,11 +1125,11 @@ static int deliver_event(struct ordered_events *qe, struct ordered_event *qevent) { struct perf_top *top = qe->data; - struct perf_evlist *evlist = top->evlist; + struct evlist *evlist = top->evlist; struct perf_session *session = top->session; union perf_event *event = qevent->event; struct perf_sample sample; - struct perf_evsel *evsel; + struct evsel *evsel; struct machine *machine; int ret = -1; @@ -1123,8 +1148,11 @@ static int deliver_event(struct ordered_events *qe, evsel = perf_evlist__id2evsel(session->evlist, sample.id); assert(evsel != NULL); - if (event->header.type == PERF_RECORD_SAMPLE) + if (event->header.type == PERF_RECORD_SAMPLE) { + if (evswitch__discard(&top->evswitch, evsel)) + return 0; ++top->samples; + } switch (sample.cpumode) { case PERF_RECORD_MISC_USER: @@ -1222,7 +1250,7 @@ static int __cmd_top(struct perf_top *top) pr_debug("Couldn't synthesize BPF events: Pre-existing BPF programs won't have symbols resolved.\n"); machine__synthesize_threads(&top->session->machines.host, &opts->target, - top->evlist->threads, false, + top->evlist->core.threads, false, top->nr_threads_synthesize); if (top->nr_threads_synthesize > 1) @@ -1255,7 +1283,7 @@ static int __cmd_top(struct perf_top *top) * so leave the check here. */ if (!target__none(&opts->target)) - perf_evlist__enable(top->evlist); + evlist__enable(top->evlist); ret = -1; if (pthread_create(&thread_process, NULL, process_thread, top)) { @@ -1280,7 +1308,7 @@ static int __cmd_top(struct perf_top *top) } /* Wait for a minimal set of events before starting the snapshot */ - perf_evlist__poll(top->evlist, 100); + evlist__poll(top->evlist, 100); perf_top__mmap_read(top); @@ -1290,7 +1318,7 @@ static int __cmd_top(struct perf_top *top) perf_top__mmap_read(top); if (opts->overwrite || (hits == top->samples)) - ret = perf_evlist__poll(top->evlist, 100); + ret = evlist__poll(top->evlist, 100); if (resize) { perf_top__resize(top); @@ -1509,9 +1537,10 @@ int cmd_top(int argc, const char **argv) "number of thread to run event synthesize"), OPT_BOOLEAN(0, "namespaces", &opts->record_namespaces, "Record namespaces events"), + OPTS_EVSWITCH(&top.evswitch), OPT_END() }; - struct perf_evlist *sb_evlist = NULL; + struct evlist *sb_evlist = NULL; const char * const top_usage[] = { "perf top [<options>]", NULL @@ -1524,7 +1553,7 @@ int cmd_top(int argc, const char **argv) top.annotation_opts.min_pcnt = 5; top.annotation_opts.context = 4; - top.evlist = perf_evlist__new(); + top.evlist = evlist__new(); if (top.evlist == NULL) return -ENOMEM; @@ -1536,12 +1565,16 @@ int cmd_top(int argc, const char **argv) if (argc) usage_with_options(top_usage, options); - if (!top.evlist->nr_entries && + if (!top.evlist->core.nr_entries && perf_evlist__add_default(top.evlist) < 0) { pr_err("Not enough memory for event selector list\n"); goto out_delete_evlist; } + status = evswitch__init(&top.evswitch, top.evlist, stderr); + if (status) + goto out_delete_evlist; + if (symbol_conf.report_hierarchy) { /* disable incompatible options */ symbol_conf.event_group = false; @@ -1612,7 +1645,7 @@ int cmd_top(int argc, const char **argv) goto out_delete_evlist; } - top.sym_evsel = perf_evlist__first(top.evlist); + top.sym_evsel = evlist__first(top.evlist); if (!callchain_param.enabled) { symbol_conf.cumulate_callchain = false; @@ -1642,8 +1675,8 @@ int cmd_top(int argc, const char **argv) } top.session = perf_session__new(NULL, false, NULL); - if (top.session == NULL) { - status = -1; + if (IS_ERR(top.session)) { + status = PTR_ERR(top.session); goto out_delete_evlist; } @@ -1661,7 +1694,7 @@ int cmd_top(int argc, const char **argv) perf_evlist__stop_sb_thread(sb_evlist); out_delete_evlist: - perf_evlist__delete(top.evlist); + evlist__delete(top.evlist); perf_session__delete(top.session); return status; |