diff options
author | Adrian Hunter <adrian.hunter@intel.com> | 2015-08-13 12:40:57 +0300 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2015-09-04 12:01:05 -0300 |
commit | 86c2786994bd7c0d4b525bbfbe42ac540d0b8166 (patch) | |
tree | d536022cb55f7d0d3d2d98e7b70ad39f84919dee /tools/perf/arch/x86 | |
parent | 1b29ac59b1d692c06ec543a5f35e0d9ebb98e003 (diff) | |
download | talos-op-linux-86c2786994bd7c0d4b525bbfbe42ac540d0b8166.tar.gz talos-op-linux-86c2786994bd7c0d4b525bbfbe42ac540d0b8166.zip |
perf intel-pt: Add support for PERF_RECORD_SWITCH
Add support for selecting and processing PERF_RECORD_SWITCH events for
use by Intel PT. If they are available, they will be used in preference
to sched_switch events.
This enables an unprivileged user to trace multi-threaded or
multi-process workloads with any level of perf_event_paranoid. However
it depends on kernel support for PERF_RECORD_SWITCH.
Without this patch, tracing a multi-threaded workload will decode
without error but all the data will be attributed to the main thread.
Without this patch, tracing a multi-process workload will result in
decoder errors because the decoder will not know which executable is
executing.
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Link: http://lkml.kernel.org/r/1439458857-30636-3-git-send-email-adrian.hunter@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/arch/x86')
-rw-r--r-- | tools/perf/arch/x86/util/intel-pt.c | 55 |
1 files changed, 47 insertions, 8 deletions
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c index 2ca10d796c0b..b02af064f0f9 100644 --- a/tools/perf/arch/x86/util/intel-pt.c +++ b/tools/perf/arch/x86/util/intel-pt.c @@ -624,13 +624,49 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, * threads. */ if (have_timing_info && !cpu_map__empty(cpus)) { - err = intel_pt_track_switches(evlist); - if (err == -EPERM) - pr_debug2("Unable to select sched:sched_switch\n"); - else if (err) - return err; - else - ptr->have_sched_switch = 1; + if (perf_can_record_switch_events()) { + bool cpu_wide = !target__none(&opts->target) && + !target__has_task(&opts->target); + + if (!cpu_wide && perf_can_record_cpu_wide()) { + struct perf_evsel *switch_evsel; + + err = parse_events(evlist, "dummy:u", NULL); + if (err) + return err; + + switch_evsel = perf_evlist__last(evlist); + + switch_evsel->attr.freq = 0; + switch_evsel->attr.sample_period = 1; + switch_evsel->attr.context_switch = 1; + + switch_evsel->system_wide = true; + switch_evsel->no_aux_samples = true; + switch_evsel->immediate = true; + + perf_evsel__set_sample_bit(switch_evsel, TID); + perf_evsel__set_sample_bit(switch_evsel, TIME); + perf_evsel__set_sample_bit(switch_evsel, CPU); + + opts->record_switch_events = false; + ptr->have_sched_switch = 3; + } else { + opts->record_switch_events = true; + if (cpu_wide) + ptr->have_sched_switch = 3; + else + ptr->have_sched_switch = 2; + } + } else { + err = intel_pt_track_switches(evlist); + if (err == -EPERM) + pr_debug2("Unable to select sched:sched_switch\n"); + else if (err) + return err; + else + ptr->have_sched_switch = 1; + } } if (intel_pt_evsel) { @@ -663,8 +699,11 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, tracking_evsel->attr.sample_period = 1; /* In per-cpu case, always need the time of mmap events etc */ - if (!cpu_map__empty(cpus)) + if (!cpu_map__empty(cpus)) { perf_evsel__set_sample_bit(tracking_evsel, TIME); + /* And the CPU for switch events */ + perf_evsel__set_sample_bit(tracking_evsel, CPU); + } } /* |