diff options
Diffstat (limited to 'tools/perf/builtin-top.c')
| -rw-r--r-- | tools/perf/builtin-top.c | 150 | 
1 files changed, 143 insertions, 7 deletions
| diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index c6ccda52117d..35ac016fcb98 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -283,8 +283,9 @@ static void perf_top__print_sym_table(struct perf_top *top)  	printf("%-*.*s\n", win_width, win_width, graph_dotted_line); -	if (hists->stats.nr_lost_warned != -	    hists->stats.nr_events[PERF_RECORD_LOST]) { +	if (!top->record_opts.overwrite && +	    (hists->stats.nr_lost_warned != +	    hists->stats.nr_events[PERF_RECORD_LOST])) {  		hists->stats.nr_lost_warned =  			      hists->stats.nr_events[PERF_RECORD_LOST];  		color_fprintf(stdout, PERF_COLOR_RED, @@ -611,7 +612,8 @@ static void *display_thread_tui(void *arg)  	perf_evlist__tui_browse_hists(top->evlist, help, &hbt,  				      top->min_percent, -				      &top->session->header.env); +				      &top->session->header.env, +				      !top->record_opts.overwrite);  	done = 1;  	return NULL; @@ -807,15 +809,23 @@ static void perf_event__process_sample(struct perf_tool *tool,  static void perf_top__mmap_read_idx(struct perf_top *top, int idx)  { +	struct record_opts *opts = &top->record_opts; +	struct perf_evlist *evlist = top->evlist;  	struct perf_sample sample;  	struct perf_evsel *evsel; +	struct perf_mmap *md;  	struct perf_session *session = top->session;  	union perf_event *event;  	struct machine *machine; +	u64 end, start;  	int ret; -	while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) { -		ret = perf_evlist__parse_sample(top->evlist, event, &sample); +	md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx]; +	if (perf_mmap__read_init(md, opts->overwrite, &start, &end) < 0) +		return; + +	while ((event = perf_mmap__read_event(md, opts->overwrite, &start, end)) != NULL) { +		ret = perf_evlist__parse_sample(evlist, event, &sample);  		if (ret) {  			pr_err("Can't parse sample, err = %d\n", ret);  			goto next_event; @@ -869,16 +879,120 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)  		} else  			++session->evlist->stats.nr_unknown_events;  next_event: -		perf_evlist__mmap_consume(top->evlist, idx); +		perf_mmap__consume(md, opts->overwrite);  	} + +	perf_mmap__read_done(md);  }  static void perf_top__mmap_read(struct perf_top *top)  { +	bool overwrite = top->record_opts.overwrite; +	struct perf_evlist *evlist = top->evlist; +	unsigned long long start, end;  	int i; +	start = rdclock(); +	if (overwrite) +		perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING); +  	for (i = 0; i < top->evlist->nr_mmaps; i++)  		perf_top__mmap_read_idx(top, i); + +	if (overwrite) { +		perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY); +		perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); +	} +	end = rdclock(); + +	if ((end - start) > (unsigned long long)top->delay_secs * NSEC_PER_SEC) +		ui__warning("Too slow to read ring buffer.\n" +			    "Please try increasing the period (-c) or\n" +			    "decreasing the freq (-F) or\n" +			    "limiting the number of CPUs (-C)\n"); +} + +/* + * Check per-event overwrite term. + * perf top should support consistent term for all events. + * - All events don't have per-event term + *   E.g. "cpu/cpu-cycles/,cpu/instructions/" + *   Nothing change, return 0. + * - All events have same per-event term + *   E.g. "cpu/cpu-cycles,no-overwrite/,cpu/instructions,no-overwrite/ + *   Using the per-event setting to replace the opts->overwrite if + *   they are different, then return 0. + * - Events have different per-event term + *   E.g. "cpu/cpu-cycles,overwrite/,cpu/instructions,no-overwrite/" + *   Return -1 + * - Some of the event set per-event term, but some not. + *   E.g. "cpu/cpu-cycles/,cpu/instructions,no-overwrite/" + *   Return -1 + */ +static int perf_top__overwrite_check(struct perf_top *top) +{ +	struct record_opts *opts = &top->record_opts; +	struct perf_evlist *evlist = top->evlist; +	struct perf_evsel_config_term *term; +	struct list_head *config_terms; +	struct perf_evsel *evsel; +	int set, overwrite = -1; + +	evlist__for_each_entry(evlist, evsel) { +		set = -1; +		config_terms = &evsel->config_terms; +		list_for_each_entry(term, config_terms, list) { +			if (term->type == PERF_EVSEL__CONFIG_TERM_OVERWRITE) +				set = term->val.overwrite ? 1 : 0; +		} + +		/* no term for current and previous event (likely) */ +		if ((overwrite < 0) && (set < 0)) +			continue; + +		/* has term for both current and previous event, compare */ +		if ((overwrite >= 0) && (set >= 0) && (overwrite != set)) +			return -1; + +		/* no term for current event but has term for previous one */ +		if ((overwrite >= 0) && (set < 0)) +			return -1; + +		/* has term for current event */ +		if ((overwrite < 0) && (set >= 0)) { +			/* if it's first event, set overwrite */ +			if (evsel == perf_evlist__first(evlist)) +				overwrite = set; +			else +				return -1; +		} +	} + +	if ((overwrite >= 0) && (opts->overwrite != overwrite)) +		opts->overwrite = overwrite; + +	return 0; +} + +static int perf_top_overwrite_fallback(struct perf_top *top, +				       struct perf_evsel *evsel) +{ +	struct record_opts *opts = &top->record_opts; +	struct perf_evlist *evlist = top->evlist; +	struct perf_evsel *counter; + +	if (!opts->overwrite) +		return 0; + +	/* only fall back when first event fails */ +	if (evsel != perf_evlist__first(evlist)) +		return 0; + +	evlist__for_each_entry(evlist, counter) +		counter->attr.write_backward = false; +	opts->overwrite = false; +	pr_debug2("fall back to non-overwrite mode\n"); +	return 1;  }  static int perf_top__start_counters(struct perf_top *top) @@ -888,12 +1002,33 @@ static int perf_top__start_counters(struct perf_top *top)  	struct perf_evlist *evlist = top->evlist;  	struct record_opts *opts = &top->record_opts; +	if (perf_top__overwrite_check(top)) { +		ui__error("perf top only support consistent per-event " +			  "overwrite setting for all events\n"); +		goto out_err; +	} +  	perf_evlist__config(evlist, opts, &callchain_param);  	evlist__for_each_entry(evlist, counter) {  try_again:  		if (perf_evsel__open(counter, top->evlist->cpus,  				     top->evlist->threads) < 0) { + +			/* +			 * Specially handle overwrite fall back. +			 * Because perf top is the only tool which has +			 * overwrite mode by default, support +			 * both overwrite and non-overwrite mode, and +			 * require consistent mode for all events. +			 * +			 * May move it to generic code with more tools +			 * have similar attribute. +			 */ +			if (perf_missing_features.write_backward && +			    perf_top_overwrite_fallback(top, counter)) +				goto try_again; +  			if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {  				if (verbose > 0)  					ui__warning("%s\n", msg); @@ -1033,7 +1168,7 @@ static int __cmd_top(struct perf_top *top)  		perf_top__mmap_read(top); -		if (hits == top->samples) +		if (opts->overwrite || (hits == top->samples))  			ret = perf_evlist__poll(top->evlist, 100);  		if (resize) { @@ -1127,6 +1262,7 @@ int cmd_top(int argc, const char **argv)  				.uses_mmap   = true,  			},  			.proc_map_timeout    = 500, +			.overwrite	= 1,  		},  		.max_stack	     = sysctl_perf_event_max_stack,  		.sym_pcnt_filter     = 5, | 

