summaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-top.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-top.c')
-rw-r--r--tools/perf/builtin-top.c102
1 files changed, 57 insertions, 45 deletions
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 35ac016fcb98..aa0c73e57924 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -123,14 +123,9 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
}
notes = symbol__annotation(sym);
- if (notes->src != NULL) {
- pthread_mutex_lock(&notes->lock);
- goto out_assign;
- }
-
pthread_mutex_lock(&notes->lock);
- if (symbol__alloc_hist(sym) < 0) {
+ if (!symbol__hists(sym, top->evlist->nr_entries)) {
pthread_mutex_unlock(&notes->lock);
pr_err("Not enough memory for annotating '%s' symbol!\n",
sym->name);
@@ -138,9 +133,8 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
return err;
}
- err = symbol__annotate(sym, map, evsel, 0, NULL);
+ err = symbol__annotate(sym, map, evsel, 0, &top->annotation_opts, NULL);
if (err == 0) {
-out_assign:
top->sym_filter_entry = he;
} else {
char msg[BUFSIZ];
@@ -188,7 +182,7 @@ static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
static void perf_top__record_precise_ip(struct perf_top *top,
struct hist_entry *he,
struct perf_sample *sample,
- int counter, u64 ip)
+ struct perf_evsel *evsel, u64 ip)
{
struct annotation *notes;
struct symbol *sym = he->ms.sym;
@@ -204,7 +198,7 @@ static void perf_top__record_precise_ip(struct perf_top *top,
if (pthread_mutex_trylock(&notes->lock))
return;
- err = hist_entry__inc_addr_samples(he, sample, counter, ip);
+ err = hist_entry__inc_addr_samples(he, sample, evsel, ip);
pthread_mutex_unlock(&notes->lock);
@@ -249,10 +243,9 @@ static void perf_top__show_details(struct perf_top *top)
goto out_unlock;
printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
- printf(" Events Pcnt (>=%d%%)\n", top->sym_pcnt_filter);
+ printf(" Events Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt);
- more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel,
- 0, top->sym_pcnt_filter, top->print_entries, 4);
+ more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel, &top->annotation_opts);
if (top->evlist->enabled) {
if (top->zero)
@@ -314,7 +307,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
hists__output_recalc_col_len(hists, top->print_entries - printed);
putchar('\n');
hists__fprintf(hists, false, top->print_entries - printed, win_width,
- top->min_percent, stdout, symbol_conf.use_callchain);
+ top->min_percent, stdout, !symbol_conf.use_callchain);
}
static void prompt_integer(int *target, const char *msg)
@@ -412,7 +405,7 @@ static void perf_top__print_mapped_keys(struct perf_top *top)
fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
- fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->sym_pcnt_filter);
+ fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->annotation_opts.min_pcnt);
fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
fprintf(stdout, "\t[S] stop annotation.\n");
@@ -515,7 +508,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
prompt_integer(&top->count_filter, "Enter display event count filter");
break;
case 'F':
- prompt_percent(&top->sym_pcnt_filter,
+ prompt_percent(&top->annotation_opts.min_pcnt,
"Enter details display event filter (percent)");
break;
case 'K':
@@ -613,7 +606,8 @@ static void *display_thread_tui(void *arg)
perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
top->min_percent,
&top->session->header.env,
- !top->record_opts.overwrite);
+ !top->record_opts.overwrite,
+ &top->annotation_opts);
done = 1;
return NULL;
@@ -691,7 +685,7 @@ static int hist_iter__top_callback(struct hist_entry_iter *iter,
struct perf_evsel *evsel = iter->evsel;
if (perf_hpp_list.sym && single)
- perf_top__record_precise_ip(top, he, iter->sample, evsel->idx, al->addr);
+ perf_top__record_precise_ip(top, he, iter->sample, evsel, al->addr);
hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
!(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY));
@@ -742,7 +736,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
"Check /proc/sys/kernel/kptr_restrict.\n\n"
"Kernel%s samples will not be resolved.\n",
- al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
+ al.map && map__has_symbols(al.map) ?
" modules" : "");
if (use_browser <= 0)
sleep(5);
@@ -750,7 +744,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
machine->kptr_restrict_warned = true;
}
- if (al.sym == NULL) {
+ if (al.sym == NULL && al.map != NULL) {
const char *msg = "Kernel samples will not be resolved.\n";
/*
* As we do lazy loading of symtabs we only will know if the
@@ -764,8 +758,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
* invalid --vmlinux ;-)
*/
if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
- al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
- RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
+ __map__is_kernel(al.map) && map__has_symbols(al.map)) {
if (symbol_conf.vmlinux_name) {
char serr[256];
dso__strerror_load(al.map->dso, serr, sizeof(serr));
@@ -817,14 +810,13 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
struct perf_session *session = top->session;
union perf_event *event;
struct machine *machine;
- u64 end, start;
int ret;
md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
- if (perf_mmap__read_init(md, opts->overwrite, &start, &end) < 0)
+ if (perf_mmap__read_init(md) < 0)
return;
- while ((event = perf_mmap__read_event(md, opts->overwrite, &start, end)) != NULL) {
+ while ((event = perf_mmap__read_event(md)) != NULL) {
ret = perf_evlist__parse_sample(evlist, event, &sample);
if (ret) {
pr_err("Can't parse sample, err = %d\n", ret);
@@ -879,7 +871,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
} else
++session->evlist->stats.nr_unknown_events;
next_event:
- perf_mmap__consume(md, opts->overwrite);
+ perf_mmap__consume(md);
}
perf_mmap__read_done(md);
@@ -1085,8 +1077,9 @@ static int __cmd_top(struct perf_top *top)
if (top->session == NULL)
return -1;
- if (!objdump_path) {
- ret = perf_env__lookup_objdump(&top->session->header.env);
+ if (!top->annotation_opts.objdump_path) {
+ ret = perf_env__lookup_objdump(&top->session->header.env,
+ &top->annotation_opts.objdump_path);
if (ret)
goto out_delete;
}
@@ -1141,11 +1134,6 @@ static int __cmd_top(struct perf_top *top)
if (!target__none(&opts->target))
perf_evlist__enable(top->evlist);
- /* Wait for a minimal set of events before starting the snapshot */
- perf_evlist__poll(top->evlist, 100);
-
- perf_top__mmap_read(top);
-
ret = -1;
if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
display_thread), top)) {
@@ -1163,6 +1151,11 @@ static int __cmd_top(struct perf_top *top)
}
}
+ /* Wait for a minimal set of events before starting the snapshot */
+ perf_evlist__poll(top->evlist, 100);
+
+ perf_top__mmap_read(top);
+
while (!done) {
u64 hits = top->samples;
@@ -1224,8 +1217,10 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset)
static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
{
- if (!strcmp(var, "top.call-graph"))
- var = "call-graph.record-mode"; /* fall-through */
+ if (!strcmp(var, "top.call-graph")) {
+ var = "call-graph.record-mode";
+ return perf_default_config(var, value, cb);
+ }
if (!strcmp(var, "top.children")) {
symbol_conf.cumulate_callchain = perf_config_bool(var, value);
return 0;
@@ -1262,10 +1257,17 @@ int cmd_top(int argc, const char **argv)
.uses_mmap = true,
},
.proc_map_timeout = 500,
- .overwrite = 1,
+ /*
+ * FIXME: This will lose PERF_RECORD_MMAP and other metadata
+ * when we pause, fix that and reenable. Probably using a
+ * separate evlist with a dummy event, i.e. a non-overwrite
+ * ring buffer just for metadata events, while PERF_RECORD_SAMPLE
+ * stays in overwrite mode. -acme
+ * */
+ .overwrite = 0,
},
- .max_stack = sysctl_perf_event_max_stack,
- .sym_pcnt_filter = 5,
+ .max_stack = sysctl__max_stack(),
+ .annotation_opts = annotation__default_options,
.nr_threads_synthesize = UINT_MAX,
};
struct record_opts *opts = &top.record_opts;
@@ -1307,7 +1309,9 @@ int cmd_top(int argc, const char **argv)
OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
"symbol to annotate"),
OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
- OPT_UINTEGER('F', "freq", &opts->user_freq, "profile at this frequency"),
+ OPT_CALLBACK('F', "freq", &top.record_opts, "freq or 'max'",
+ "profile at this frequency",
+ record__parse_freq),
OPT_INTEGER('E', "entries", &top.print_entries,
"display this many functions"),
OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
@@ -1345,15 +1349,15 @@ int cmd_top(int argc, const char **argv)
"only consider symbols in these comms"),
OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
"only consider these symbols"),
- OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
+ OPT_BOOLEAN(0, "source", &top.annotation_opts.annotate_src,
"Interleave source code with assembly code (default)"),
- OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
+ OPT_BOOLEAN(0, "asm-raw", &top.annotation_opts.show_asm_raw,
"Display raw encoding of assembly instructions (default)"),
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
- OPT_STRING(0, "objdump", &objdump_path, "path",
+ OPT_STRING(0, "objdump", &top.annotation_opts.objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
- OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
+ OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
OPT_CALLBACK(0, "percent-limit", &top, "percent",
@@ -1375,6 +1379,8 @@ int cmd_top(int argc, const char **argv)
"Show raw trace event output (do not use print fmt or plugins)"),
OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
"Show entries in a hierarchy"),
+ OPT_BOOLEAN(0, "overwrite", &top.record_opts.overwrite,
+ "Use a backward ring buffer, default: no"),
OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
"number of thread to run event synthesize"),
@@ -1389,6 +1395,9 @@ int cmd_top(int argc, const char **argv)
if (status < 0)
return status;
+ top.annotation_opts.min_pcnt = 5;
+ top.annotation_opts.context = 4;
+
top.evlist = perf_evlist__new();
if (top.evlist == NULL)
return -ENOMEM;
@@ -1420,6 +1429,9 @@ int cmd_top(int argc, const char **argv)
}
}
+ if (opts->branch_stack && callchain_param.enabled)
+ symbol_conf.show_branchflag_count = true;
+
sort__mode = SORT_MODE__TOP;
/* display thread wants entries to be collapsed in a different tree */
perf_hpp_list.need_collapse = 1;
@@ -1466,8 +1478,6 @@ int cmd_top(int argc, const char **argv)
goto out_delete_evlist;
}
- symbol_conf.nr_events = top.evlist->nr_entries;
-
if (top.delay_secs < 1)
top.delay_secs = 1;
@@ -1490,6 +1500,8 @@ int cmd_top(int argc, const char **argv)
if (status < 0)
goto out_delete_evlist;
+ annotation_config__init();
+
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
if (symbol__init(NULL) < 0)
return -1;
OpenPOWER on IntegriCloud