summaryrefslogtreecommitdiffstats
path: root/tools/perf
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf')
-rw-r--r--tools/perf/Build10
-rw-r--r--tools/perf/Documentation/Build.txt24
-rw-r--r--tools/perf/Documentation/perf-c2c.txt16
-rw-r--r--tools/perf/Documentation/perf-config.txt47
-rw-r--r--tools/perf/Documentation/perf-diff.txt56
-rw-r--r--tools/perf/Documentation/perf-mem.txt2
-rw-r--r--tools/perf/Documentation/perf-record.txt23
-rw-r--r--tools/perf/Documentation/perf-report.txt13
-rw-r--r--tools/perf/Documentation/perf-script.txt9
-rw-r--r--tools/perf/Documentation/perf-stat.txt5
-rw-r--r--tools/perf/Documentation/perf-trace.txt8
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt11
-rw-r--r--tools/perf/Documentation/tips.txt7
-rw-r--r--tools/perf/Makefile.config29
-rw-r--r--tools/perf/Makefile.perf28
-rw-r--r--tools/perf/arch/Build4
-rw-r--r--tools/perf/arch/arm/Build4
-rw-r--r--tools/perf/arch/arm/tests/Build8
-rw-r--r--tools/perf/arch/arm/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/arm/util/Build8
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c98
-rw-r--r--tools/perf/arch/arm/util/cs-etm.h3
-rw-r--r--tools/perf/arch/arm/util/pmu.c3
-rw-r--r--tools/perf/arch/arm64/Build4
-rw-r--r--tools/perf/arch/arm64/annotate/instructions.c2
-rw-r--r--tools/perf/arch/arm64/tests/Build6
-rw-r--r--tools/perf/arch/arm64/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/arm64/util/Build12
-rw-r--r--tools/perf/arch/nds32/Build2
-rw-r--r--tools/perf/arch/nds32/util/Build2
-rw-r--r--tools/perf/arch/powerpc/Build4
-rw-r--r--tools/perf/arch/powerpc/tests/Build6
-rw-r--r--tools/perf/arch/powerpc/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/powerpc/util/Build17
-rw-r--r--tools/perf/arch/powerpc/util/kvm-stat.c2
-rw-r--r--tools/perf/arch/powerpc/util/mem-events.c11
-rw-r--r--tools/perf/arch/powerpc/util/skip-callchain-idx.c3
-rw-r--r--tools/perf/arch/s390/Build2
-rw-r--r--tools/perf/arch/s390/annotate/instructions.c2
-rw-r--r--tools/perf/arch/s390/util/Build12
-rw-r--r--tools/perf/arch/s390/util/kvm-stat.c1
-rw-r--r--tools/perf/arch/sh/Build2
-rw-r--r--tools/perf/arch/sh/util/Build2
-rw-r--r--tools/perf/arch/sparc/Build2
-rw-r--r--tools/perf/arch/sparc/util/Build2
-rw-r--r--tools/perf/arch/x86/Build4
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl6
-rw-r--r--tools/perf/arch/x86/tests/Build14
-rw-r--r--tools/perf/arch/x86/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/x86/util/Build31
-rw-r--r--tools/perf/arch/x86/util/archinsn.c26
-rw-r--r--tools/perf/arch/x86/util/kvm-stat.c1
-rw-r--r--tools/perf/arch/xtensa/Build2
-rw-r--r--tools/perf/arch/xtensa/util/Build2
-rw-r--r--tools/perf/bench/epoll-ctl.c2
-rw-r--r--tools/perf/bench/epoll-wait.c2
-rw-r--r--tools/perf/bench/numa.c7
-rw-r--r--tools/perf/builtin-annotate.c9
-rw-r--r--tools/perf/builtin-buildid-cache.c4
-rw-r--r--tools/perf/builtin-buildid-list.c8
-rw-r--r--tools/perf/builtin-c2c.c31
-rw-r--r--tools/perf/builtin-diff.c190
-rw-r--r--tools/perf/builtin-evlist.c4
-rw-r--r--tools/perf/builtin-inject.c12
-rw-r--r--tools/perf/builtin-kallsyms.c1
-rw-r--r--tools/perf/builtin-kmem.c7
-rw-r--r--tools/perf/builtin-kvm.c8
-rw-r--r--tools/perf/builtin-list.c8
-rw-r--r--tools/perf/builtin-lock.c8
-rw-r--r--tools/perf/builtin-mem.c9
-rw-r--r--tools/perf/builtin-probe.c1
-rw-r--r--tools/perf/builtin-record.c124
-rw-r--r--tools/perf/builtin-report.c125
-rw-r--r--tools/perf/builtin-sched.c63
-rw-r--r--tools/perf/builtin-script.c208
-rw-r--r--tools/perf/builtin-stat.c19
-rw-r--r--tools/perf/builtin-timechart.c8
-rw-r--r--tools/perf/builtin-top.c83
-rw-r--r--tools/perf/builtin-trace.c63
-rw-r--r--tools/perf/builtin.h3
-rw-r--r--tools/perf/design.txt4
-rw-r--r--tools/perf/examples/bpf/augmented_raw_syscalls.c18
-rw-r--r--tools/perf/examples/bpf/augmented_syscalls.c22
-rw-r--r--tools/perf/examples/bpf/etcsnoop.c18
-rw-r--r--tools/perf/include/bpf/bpf.h22
-rw-r--r--tools/perf/perf.c1
-rw-r--r--tools/perf/perf.h9
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/metrics.json2245
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/other.json594
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/metrics.json1982
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/branch.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/cache.json287
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/core.json134
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json168
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/memory.json162
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/other.json65
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv1
-rw-r--r--tools/perf/scripts/Build4
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Build2
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/Build2
-rw-r--r--tools/perf/scripts/python/check-perf-trace.py76
-rw-r--r--tools/perf/scripts/python/compaction-times.py8
-rw-r--r--tools/perf/scripts/python/event_analyzing_sample.py48
-rw-r--r--tools/perf/scripts/python/export-to-postgresql.py77
-rw-r--r--tools/perf/scripts/python/export-to-sqlite.py40
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py899
-rw-r--r--tools/perf/scripts/python/failed-syscalls-by-pid.py43
-rw-r--r--tools/perf/scripts/python/futex-contention.py10
-rw-r--r--tools/perf/scripts/python/intel-pt-events.py60
-rw-r--r--tools/perf/scripts/python/mem-phys-addr.py25
-rwxr-xr-xtools/perf/scripts/python/net_dropmonitor.py12
-rw-r--r--tools/perf/scripts/python/netdev-times.py92
-rw-r--r--tools/perf/scripts/python/powerpc-hcalls.py18
-rw-r--r--tools/perf/scripts/python/sched-migration.py8
-rw-r--r--tools/perf/scripts/python/sctop.py27
-rwxr-xr-xtools/perf/scripts/python/stackcollapse.py9
-rw-r--r--tools/perf/scripts/python/stat-cpi.py11
-rw-r--r--tools/perf/scripts/python/syscall-counts-by-pid.py51
-rw-r--r--tools/perf/scripts/python/syscall-counts.py35
-rw-r--r--tools/perf/tests/attr.py33
-rw-r--r--tools/perf/tests/attr/test-record-C02
-rw-r--r--tools/perf/tests/attr/test-record-basic2
-rw-r--r--tools/perf/tests/attr/test-record-branch-any2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-any2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-any_call2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-any_ret2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-hv2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-ind_call2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-k2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-u2
-rw-r--r--tools/perf/tests/attr/test-record-count2
-rw-r--r--tools/perf/tests/attr/test-record-data2
-rw-r--r--tools/perf/tests/attr/test-record-freq2
-rw-r--r--tools/perf/tests/attr/test-record-graph-default2
-rw-r--r--tools/perf/tests/attr/test-record-graph-dwarf2
-rw-r--r--tools/perf/tests/attr/test-record-graph-fp2
-rw-r--r--tools/perf/tests/attr/test-record-group2
-rw-r--r--tools/perf/tests/attr/test-record-group-sampling2
-rw-r--r--tools/perf/tests/attr/test-record-group12
-rw-r--r--tools/perf/tests/attr/test-record-no-buffering2
-rw-r--r--tools/perf/tests/attr/test-record-no-inherit2
-rw-r--r--tools/perf/tests/attr/test-record-no-samples2
-rw-r--r--tools/perf/tests/attr/test-record-period2
-rw-r--r--tools/perf/tests/attr/test-record-raw2
-rw-r--r--tools/perf/tests/backward-ring-buffer.c2
-rw-r--r--tools/perf/tests/bp_account.c1
-rw-r--r--tools/perf/tests/code-reading.c2
-rw-r--r--tools/perf/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/tests/evsel-tp-sched.c9
-rw-r--r--tools/perf/tests/expr.c5
-rw-r--r--tools/perf/tests/hists_common.c9
-rw-r--r--tools/perf/tests/hists_cumulate.c15
-rw-r--r--tools/perf/tests/hists_filter.c1
-rw-r--r--tools/perf/tests/hists_link.c8
-rw-r--r--tools/perf/tests/hists_output.c33
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c1
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c4
-rw-r--r--tools/perf/tests/parse-events.c30
-rw-r--r--tools/perf/tests/pmu.c2
-rw-r--r--tools/perf/tests/sample-parsing.c2
-rw-r--r--tools/perf/tests/sdt.c1
-rw-r--r--tools/perf/tests/shell/lib/probe.sh5
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh1
-rw-r--r--tools/perf/trace/beauty/Build26
-rw-r--r--tools/perf/trace/beauty/ioctl.c2
-rw-r--r--tools/perf/trace/beauty/msg_flags.c2
-rw-r--r--tools/perf/trace/beauty/waitid_options.c2
-rw-r--r--tools/perf/ui/Build18
-rw-r--r--tools/perf/ui/browser.c10
-rw-r--r--tools/perf/ui/browsers/Build11
-rw-r--r--tools/perf/ui/browsers/annotate.c19
-rw-r--r--tools/perf/ui/browsers/header.c2
-rw-r--r--tools/perf/ui/browsers/hists.c164
-rw-r--r--tools/perf/ui/browsers/map.c1
-rw-r--r--tools/perf/ui/browsers/res_sample.c91
-rw-r--r--tools/perf/ui/browsers/scripts.c274
-rw-r--r--tools/perf/ui/gtk/annotate.c3
-rw-r--r--tools/perf/ui/gtk/hists.c7
-rw-r--r--tools/perf/ui/hist.c1
-rw-r--r--tools/perf/ui/stdio/hist.c7
-rw-r--r--tools/perf/ui/tui/Build8
-rw-r--r--tools/perf/util/Build276
-rw-r--r--tools/perf/util/annotate.c243
-rw-r--r--tools/perf/util/annotate.h22
-rw-r--r--tools/perf/util/archinsn.h12
-rw-r--r--tools/perf/util/auxtrace.c30
-rw-r--r--tools/perf/util/auxtrace.h5
-rw-r--r--tools/perf/util/block-range.c2
-rw-r--r--tools/perf/util/block-range.h6
-rw-r--r--tools/perf/util/bpf-event.c480
-rw-r--r--tools/perf/util/bpf-event.h74
-rw-r--r--tools/perf/util/bpf-loader.c31
-rw-r--r--tools/perf/util/bpf-loader.h7
-rw-r--r--tools/perf/util/bpf_map.c72
-rw-r--r--tools/perf/util/bpf_map.h22
-rw-r--r--tools/perf/util/branch.h27
-rw-r--r--tools/perf/util/build-id.c15
-rw-r--r--tools/perf/util/build-id.h3
-rw-r--r--tools/perf/util/c++/Build4
-rw-r--r--tools/perf/util/c++/clang.cpp4
-rw-r--r--tools/perf/util/callchain.c17
-rw-r--r--tools/perf/util/callchain.h21
-rw-r--r--tools/perf/util/color.c39
-rw-r--r--tools/perf/util/color.h1
-rw-r--r--tools/perf/util/color_config.c47
-rw-r--r--tools/perf/util/comm.c1
-rw-r--r--tools/perf/util/comm.h4
-rw-r--r--tools/perf/util/config.c4
-rw-r--r--tools/perf/util/cpu-set-sched.h50
-rw-r--r--tools/perf/util/cpumap.c23
-rw-r--r--tools/perf/util/cpumap.h1
-rw-r--r--tools/perf/util/cputopo.c277
-rw-r--r--tools/perf/util/cputopo.h33
-rw-r--r--tools/perf/util/cs-etm-decoder/Build2
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c41
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.h16
-rw-r--r--tools/perf/util/cs-etm.c832
-rw-r--r--tools/perf/util/cs-etm.h57
-rw-r--r--tools/perf/util/data-convert-bt.c4
-rw-r--r--tools/perf/util/data.c270
-rw-r--r--tools/perf/util/data.h28
-rw-r--r--tools/perf/util/db-export.c16
-rw-r--r--tools/perf/util/db-export.h3
-rw-r--r--tools/perf/util/drv_configs.c78
-rw-r--r--tools/perf/util/drv_configs.h26
-rw-r--r--tools/perf/util/dso.c54
-rw-r--r--tools/perf/util/dso.h25
-rw-r--r--tools/perf/util/env.c155
-rw-r--r--tools/perf/util/env.h24
-rw-r--r--tools/perf/util/event.c43
-rw-r--r--tools/perf/util/event.h60
-rw-r--r--tools/perf/util/evlist.c150
-rw-r--r--tools/perf/util/evlist.h18
-rw-r--r--tools/perf/util/evsel.c41
-rw-r--r--tools/perf/util/evsel.h10
-rw-r--r--tools/perf/util/header.c584
-rw-r--r--tools/perf/util/header.h7
-rw-r--r--tools/perf/util/hist.c325
-rw-r--r--tools/perf/util/hist.h50
-rw-r--r--tools/perf/util/intel-bts.c26
-rw-r--r--tools/perf/util/intel-pt-decoder/Build2
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c39
-rw-r--r--tools/perf/util/intel-pt.c25
-rw-r--r--tools/perf/util/intlist.h2
-rw-r--r--tools/perf/util/jitdump.c1
-rw-r--r--tools/perf/util/kvm-stat.h7
-rw-r--r--tools/perf/util/machine.c112
-rw-r--r--tools/perf/util/machine.h17
-rw-r--r--tools/perf/util/map.c32
-rw-r--r--tools/perf/util/map.h100
-rw-r--r--tools/perf/util/map_groups.h91
-rw-r--r--tools/perf/util/map_symbol.h22
-rw-r--r--tools/perf/util/mem-events.c2
-rw-r--r--tools/perf/util/metricgroup.c10
-rw-r--r--tools/perf/util/metricgroup.h3
-rw-r--r--tools/perf/util/mmap.c105
-rw-r--r--tools/perf/util/mmap.h3
-rw-r--r--tools/perf/util/ordered-events.c8
-rw-r--r--tools/perf/util/parse-events.c4
-rw-r--r--tools/perf/util/parse-events.y4
-rw-r--r--tools/perf/util/pmu.c16
-rw-r--r--tools/perf/util/pmu.h6
-rw-r--r--tools/perf/util/probe-event.c21
-rw-r--r--tools/perf/util/probe-event.h5
-rw-r--r--tools/perf/util/probe-file.c1
-rw-r--r--tools/perf/util/rb_resort.h8
-rw-r--r--tools/perf/util/rblist.c28
-rw-r--r--tools/perf/util/rblist.h2
-rw-r--r--tools/perf/util/s390-cpumcf-kernel.h62
-rw-r--r--tools/perf/util/s390-cpumsf.c89
-rw-r--r--tools/perf/util/s390-sample-raw.c222
-rw-r--r--tools/perf/util/sample-raw.c18
-rw-r--r--tools/perf/util/sample-raw.h14
-rw-r--r--tools/perf/util/scripting-engines/Build4
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c25
-rw-r--r--tools/perf/util/session.c136
-rw-r--r--tools/perf/util/setup.py9
-rw-r--r--tools/perf/util/sort.c109
-rw-r--r--tools/perf/util/sort.h19
-rw-r--r--tools/perf/util/srccode.h13
-rw-r--r--tools/perf/util/srcline.c45
-rw-r--r--tools/perf/util/srcline.h13
-rw-r--r--tools/perf/util/stat-display.c1
-rw-r--r--tools/perf/util/stat-shadow.c2
-rw-r--r--tools/perf/util/stat.c12
-rw-r--r--tools/perf/util/strlist.h2
-rw-r--r--tools/perf/util/symbol-elf.c25
-rw-r--r--tools/perf/util/symbol-minimal.c1
-rw-r--r--tools/perf/util/symbol.c95
-rw-r--r--tools/perf/util/symbol.h102
-rw-r--r--tools/perf/util/symbol_conf.h76
-rw-r--r--tools/perf/util/symbol_fprintf.c3
-rw-r--r--tools/perf/util/thread-stack.c251
-rw-r--r--tools/perf/util/thread-stack.h9
-rw-r--r--tools/perf/util/thread.c24
-rw-r--r--tools/perf/util/thread.h11
-rw-r--r--tools/perf/util/time-utils.c59
-rw-r--r--tools/perf/util/time-utils.h7
-rw-r--r--tools/perf/util/tool.h5
-rw-r--r--tools/perf/util/unwind-libdw.c2
-rw-r--r--tools/perf/util/unwind-libunwind-local.c1
-rw-r--r--tools/perf/util/unwind-libunwind.c1
-rw-r--r--tools/perf/util/util.c82
-rw-r--r--tools/perf/util/util.h3
-rw-r--r--tools/perf/util/vdso.c1
-rw-r--r--tools/perf/util/zlib.c1
308 files changed, 13248 insertions, 3555 deletions
diff --git a/tools/perf/Build b/tools/perf/Build
index e5232d567611..5f392dbb88fc 100644
--- a/tools/perf/Build
+++ b/tools/perf/Build
@@ -46,10 +46,10 @@ CFLAGS_builtin-trace.o += -DSTRACE_GROUPS_DIR="BUILD_STR($(STRACE_GROUPS_DIR_
CFLAGS_builtin-report.o += -DTIPDIR="BUILD_STR($(tipdir_SQ))"
CFLAGS_builtin-report.o += -DDOCDIR="BUILD_STR($(srcdir_SQ)/Documentation)"
-libperf-y += util/
-libperf-y += arch/
-libperf-y += ui/
-libperf-y += scripts/
-libperf-$(CONFIG_TRACE) += trace/beauty/
+perf-y += util/
+perf-y += arch/
+perf-y += ui/
+perf-y += scripts/
+perf-$(CONFIG_TRACE) += trace/beauty/
gtk-y += ui/gtk/
diff --git a/tools/perf/Documentation/Build.txt b/tools/perf/Documentation/Build.txt
index f6fc6507ba55..3766886c4bca 100644
--- a/tools/perf/Documentation/Build.txt
+++ b/tools/perf/Documentation/Build.txt
@@ -47,3 +47,27 @@ Those objects are then used in final linking:
NOTE this description is omitting other libraries involved, only
focusing on build framework outcomes
+
+3) Build with ASan or UBSan
+==========================
+ $ cd tools/perf
+ $ make DESTDIR=/usr
+ $ make DESTDIR=/usr install
+
+AddressSanitizer (or ASan) is a GCC feature that detects memory corruption bugs
+such as buffer overflows and memory leaks.
+
+ $ cd tools/perf
+ $ make DEBUG=1 EXTRA_CFLAGS='-fno-omit-frame-pointer -fsanitize=address'
+ $ ASAN_OPTIONS=log_path=asan.log ./perf record -a
+
+ASan outputs all detected issues into a log file named 'asan.log.<pid>'.
+
+UndefinedBehaviorSanitizer (or UBSan) is a fast undefined behavior detector
+supported by GCC. UBSan detects undefined behaviors of programs at runtime.
+
+ $ cd tools/perf
+ $ make DEBUG=1 EXTRA_CFLAGS='-fno-omit-frame-pointer -fsanitize=undefined'
+ $ UBSAN_OPTIONS=print_stacktrace=1 ./perf record -a
+
+If UBSan detects any problem at runtime, it outputs a “runtime error:” message.
diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt
index 095aebdc5bb7..e6150f21267d 100644
--- a/tools/perf/Documentation/perf-c2c.txt
+++ b/tools/perf/Documentation/perf-c2c.txt
@@ -19,8 +19,11 @@ C2C stands for Cache To Cache.
The perf c2c tool provides means for Shared Data C2C/HITM analysis. It allows
you to track down the cacheline contentions.
-The tool is based on x86's load latency and precise store facility events
-provided by Intel CPUs. These events provide:
+On x86, the tool is based on load latency and precise store facility events
+provided by Intel CPUs. On PowerPC, the tool uses random instruction sampling
+with thresholding feature.
+
+These events provide:
- memory address of the access
- type of the access (load and store details)
- latency (in cycles) of the load access
@@ -46,7 +49,7 @@ RECORD OPTIONS
-l::
--ldlat::
- Configure mem-loads latency.
+ Configure mem-loads latency. (x86 only)
-k::
--all-kernel::
@@ -119,11 +122,16 @@ Following perf record options are configured by default:
-W,-d,--phys-data,--sample-cpu
Unless specified otherwise with '-e' option, following events are monitored by
-default:
+default on x86:
cpu/mem-loads,ldlat=30/P
cpu/mem-stores/P
+and following on PowerPC:
+
+ cpu/mem-loads/
+ cpu/mem-stores/
+
User can pass any 'perf record' option behind '--' mark, like (to enable
callchains and system wide monitoring):
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index 4ac7775fbc11..462b3cde0675 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -114,12 +114,16 @@ Given a $HOME/.perfconfig like this:
[report]
# Defaults
- sort-order = comm,dso,symbol
+ sort_order = comm,dso,symbol
percent-limit = 0
queue-size = 0
children = true
group = true
+ [llvm]
+ dump-obj = true
+ clang-opt = -g
+
You can hide source code of annotate feature setting the config to false with
% perf config annotate.hide_src_code=true
@@ -553,6 +557,47 @@ trace.*::
trace.show_zeros::
Do not suppress syscall arguments that are equal to zero.
+llvm.*::
+ llvm.clang-path::
+ Path to clang. If omit, search it from $PATH.
+
+ llvm.clang-bpf-cmd-template::
+ Cmdline template. Below lines show its default value. Environment
+ variable is used to pass options.
+ "$CLANG_EXEC -D__KERNEL__ $CLANG_OPTIONS $KERNEL_INC_OPTIONS \
+ -Wno-unused-value -Wno-pointer-sign -working-directory \
+ $WORKING_DIR -c $CLANG_SOURCE -target bpf -O2 -o -"
+
+ llvm.clang-opt::
+ Options passed to clang.
+
+ llvm.kbuild-dir::
+ kbuild directory. If not set, use /lib/modules/`uname -r`/build.
+ If set to "" deliberately, skip kernel header auto-detector.
+
+ llvm.kbuild-opts::
+ Options passed to 'make' when detecting kernel header options.
+
+ llvm.dump-obj::
+ Enable perf dump BPF object files compiled by LLVM.
+
+ llvm.opts::
+ Options passed to llc.
+
+samples.*::
+
+ samples.context::
+ Define how many ns worth of time to show
+ around samples in perf report sample context browser.
+
+scripts.*::
+
+ Any option defines a script that is added to the scripts menu
+ in the interactive perf browser and whose output is displayed.
+ The name of the option is the name, the value is a script command line.
+ The script gets the same options passed as a full perf script,
+ in particular -i perfdata file, --cpu, --tid
+
SEE ALSO
--------
linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
index a79c84ae61aa..da7809b15cc9 100644
--- a/tools/perf/Documentation/perf-diff.txt
+++ b/tools/perf/Documentation/perf-diff.txt
@@ -118,6 +118,62 @@ OPTIONS
sum of shown entries will be always 100%. "absolute" means it retains
the original value before and after the filter is applied.
+--time::
+ Analyze samples within given time window. It supports time
+ percent with multiple time ranges. Time string is 'a%/n,b%/m,...'
+ or 'a%-b%,c%-%d,...'.
+
+ For example:
+
+ Select the second 10% time slice to diff:
+
+ perf diff --time 10%/2
+
+ Select from 0% to 10% time slice to diff:
+
+ perf diff --time 0%-10%
+
+ Select the first and the second 10% time slices to diff:
+
+ perf diff --time 10%/1,10%/2
+
+ Select from 0% to 10% and 30% to 40% slices to diff:
+
+ perf diff --time 0%-10%,30%-40%
+
+ It also supports analyzing samples within a given time window
+ <start>,<stop>. Times have the format seconds.microseconds. If 'start'
+ is not given (i.e., time string is ',x.y') then analysis starts at
+ the beginning of the file. If stop time is not given (i.e, time
+ string is 'x.y,') then analysis goes to the end of the file. Time string is
+ 'a1.b1,c1.d1:a2.b2,c2.d2'. Use ':' to separate timestamps for different
+ perf.data files.
+
+ For example, we get the timestamp information from 'perf script'.
+
+ perf script -i perf.data.old
+ mgen 13940 [000] 3946.361400: ...
+
+ perf script -i perf.data
+ mgen 13940 [000] 3971.150589 ...
+
+ perf diff --time 3946.361400,:3971.150589,
+
+ It analyzes the perf.data.old from the timestamp 3946.361400 to
+ the end of perf.data.old and analyzes the perf.data from the
+ timestamp 3971.150589 to the end of perf.data.
+
+--cpu:: Only diff samples for the list of CPUs provided. Multiple CPUs can
+ be provided as a comma-separated list with no space: 0,1. Ranges of
+ CPUs are specified with -: 0-2. Default is to report samples on all
+ CPUs.
+
+--pid=::
+ Only diff samples for given process ID (comma separated list).
+
+--tid=::
+ Only diff samples for given thread ID (comma separated list).
+
COMPARISON
----------
The comparison is governed by the baseline file. The baseline perf.data
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt
index f8d2167cf3e7..199ea0f0a6c0 100644
--- a/tools/perf/Documentation/perf-mem.txt
+++ b/tools/perf/Documentation/perf-mem.txt
@@ -82,7 +82,7 @@ RECORD OPTIONS
Be more verbose (show counter open errors, etc)
--ldlat <n>::
- Specify desired latency for loads event.
+ Specify desired latency for loads event. (x86 only)
In addition, for report all perf report options are valid, and for record
all perf record options.
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index d232b13ea713..8fe4dffcadd0 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -88,6 +88,20 @@ OPTIONS
If you want to profile write accesses in [0x1000~1008), just set
'mem:0x1000/8:w'.
+ - a BPF source file (ending in .c) or a precompiled object file (ending
+ in .o) selects one or more BPF events.
+ The BPF program can attach to various perf events based on the ELF section
+ names.
+
+ When processing a '.c' file, perf searches an installed LLVM to compile it
+ into an object file first. Optional clang options can be passed via the
+ '--clang-opt' command line option, e.g.:
+
+ perf record --clang-opt "-DLINUX_VERSION_CODE=0x50000" \
+ -e tests/bpf-script-example.c
+
+ Note: '--clang-opt' must be placed before '--event/-e'.
+
- a group of events surrounded by a pair of brace ("{event1,event2,...}").
Each event is separated by commas and the group should be quoted to
prevent the shell interpretation. You also need to use --group on
@@ -440,6 +454,11 @@ Use <n> control blocks in asynchronous (Posix AIO) trace writing mode (default:
Asynchronous mode is supported only when linking Perf tool with libc library
providing implementation for Posix AIO API.
+--affinity=mode::
+Set affinity mask of trace reading thread according to the policy defined by 'mode' value:
+ node - thread affinity mask is set to NUMA node cpu mask of the processed mmap buffer
+ cpu - thread affinity mask is set to cpu of the processed mmap buffer
+
--all-kernel::
Configure all used events to run in kernel space.
@@ -476,6 +495,10 @@ overhead. You can still switch them on with:
--switch-output --no-no-buildid --no-no-buildid-cache
+--switch-max-files=N::
+
+When rotating perf.data with --switch-output, only keep N files.
+
--dry-run::
Parse options then exit. --dry-run can be used to detect errors in cmdline
options.
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 1a27bfe05039..f441baa794ce 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -105,6 +105,8 @@ OPTIONS
guest machine
- sample: Number of sample
- period: Raw number of event count of sample
+ - time: Separate the samples by time stamp with the resolution specified by
+ --time-quantum (default 100ms). Specify with overhead and before it.
By default, comm, dso and symbol keys are used.
(i.e. --sort comm,dso,symbol)
@@ -459,6 +461,10 @@ include::itrace.txt[]
--socket-filter::
Only report the samples on the processor socket that match with this filter
+--samples=N::
+ Save N individual samples for each histogram entry to show context in perf
+ report tui browser.
+
--raw-trace::
When displaying traceevent output, do not use print fmt or plugins.
@@ -477,6 +483,9 @@ include::itrace.txt[]
Please note that not all mmaps are stored, options affecting which ones
are include 'perf record --data', for instance.
+--ns::
+ Show time stamps in nanoseconds.
+
--stats::
Display overall events statistics without any further processing.
(like the one at the end of the perf report -D command)
@@ -494,6 +503,10 @@ include::itrace.txt[]
The period/hits keywords set the base the percentage is computed
on - the samples period or the number of samples (hits).
+--time-quantum::
+ Configure time quantum for time sort key. Default 100ms.
+ Accepts s, us, ms, ns units.
+
include::callchain-overhead-calculation.txt[]
SEE ALSO
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 9e4def08d569..9b0d04dd2a61 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -159,6 +159,12 @@ OPTIONS
the override, and the result of the above is that only S/W and H/W
events are displayed with the given fields.
+ It's possible tp add/remove fields only for specific event type:
+
+ -Fsw:-cpu,-period
+
+ removes cpu and period from software events.
+
For the 'wildcard' option if a user selected field is invalid for an
event type, a message is displayed to the user that the option is
ignored for that type. For example:
@@ -374,6 +380,9 @@ include::itrace.txt[]
Set the maximum number of program blocks to print with brstackasm for
each sample.
+--reltime::
+ Print time stamps relative to trace start.
+
--per-event-dump::
Create per event files with a "perf.data.EVENT.dump" name instead of
printing to stdout, useful, for instance, for generating flamegraphs.
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 4bc2085e5197..39c05f89104e 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -72,9 +72,8 @@ report::
--all-cpus::
system-wide collection from all CPUs (default if no target is specified)
--c::
---scale::
- scale/normalize counter values
+--no-scale::
+ Don't scale/normalize counter values
-d::
--detailed::
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 631e687be4eb..fc6e43262c41 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -210,6 +210,14 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
may happen, for instance, when a thread gets migrated to a different CPU
while processing a syscall.
+--map-dump::
+ Dump BPF maps setup by events passed via -e, for instance the augmented_raw_syscalls
+ living in tools/perf/examples/bpf/augmented_raw_syscalls.c. For now this
+ dumps just boolean map values and integer keys, in time this will print in hex
+ by default and use BTF when available, as well as use functions to do pretty
+ printing using the existing 'perf trace' syscall arg beautifiers to map integer
+ arguments to strings (pid to comm, syscall id to syscall name, etc).
+
PAGEFAULTS
----------
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index dfb218feaad9..593ef49b273c 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -43,11 +43,10 @@ struct perf_file_section {
Flags section:
-The header is followed by different optional headers, described by the bits set
-in flags. Only headers for which the bit is set are included. Each header
-consists of a perf_file_section located after the initial header.
-The respective perf_file_section points to the data of the additional
-header and defines its size.
+For each of the optional features a perf_file_section it placed after the data
+section if the feature bit is set in the perf_header flags bitset. The
+respective perf_file_section points to the data of the additional header and
+defines its size.
Some headers consist of strings, which are defined like this:
@@ -131,7 +130,7 @@ An uint64_t with the total memory in bytes.
HEADER_CMDLINE = 11,
-A perf_header_string with the perf command line used to collect the data.
+A perf_header_string_list with the perf arg-vector used to collect the data.
HEADER_EVENT_DESC = 12,
diff --git a/tools/perf/Documentation/tips.txt b/tools/perf/Documentation/tips.txt
index 849599f39c5e..869965d629ce 100644
--- a/tools/perf/Documentation/tips.txt
+++ b/tools/perf/Documentation/tips.txt
@@ -15,6 +15,7 @@ To see callchains in a more compact form: perf report -g folded
Show individual samples with: perf script
Limit to show entries above 5% only: perf report --percent-limit 5
Profiling branch (mis)predictions with: perf record -b / perf report
+To show assembler sample contexts use perf record -b / perf script -F +brstackinsn --xed
Treat branches as callchains: perf report --branch-history
To count events in every 1000 msec: perf stat -I 1000
Print event counts in CSV format with: perf stat -x,
@@ -34,3 +35,9 @@ Show current config key-value pairs: perf config --list
Show user configuration overrides: perf config --user --list
To add Node.js USDT(User-Level Statically Defined Tracing): perf buildid-cache --add `which node`
To report cacheline events from previous recording: perf c2c report
+To browse sample contexts use perf report --sample 10 and select in context menu
+To separate samples by time use perf report --sort time,overhead,sym
+To set sample time separation other than 100ms with --sort time use --time-quantum
+Add -I to perf report to sample register values visible in perf report context.
+To show IPC for sampling periods use perf record -e '{cycles,instructions}:S' and then browse context
+To show context switches in perf report sample context add --switch-events to perf record.
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index b441c88cafa1..fe3f97e342fa 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -109,6 +109,13 @@ FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
+FEATURE_CHECK_LDFLAGS-libunwind-arm = -lunwind -lunwind-arm
+FEATURE_CHECK_LDFLAGS-libunwind-aarch64 = -lunwind -lunwind-aarch64
+FEATURE_CHECK_LDFLAGS-libunwind-x86 = -lunwind -llzma -lunwind-x86
+FEATURE_CHECK_LDFLAGS-libunwind-x86_64 = -lunwind -llzma -lunwind-x86_64
+
+FEATURE_CHECK_LDFLAGS-libcrypto = -lcrypto
+
ifdef CSINCLUDES
LIBOPENCSD_CFLAGS := -I$(CSINCLUDES)
endif
@@ -218,6 +225,10 @@ FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
+FEATURE_CHECK_LDFLAGS-libaio = -lrt
+
+FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes
+
CFLAGS += -fno-omit-frame-pointer
CFLAGS += -ggdb3
CFLAGS += -funwind-tables
@@ -386,7 +397,8 @@ ifeq ($(feature-setns), 1)
$(call detected,CONFIG_SETNS)
endif
-ifndef NO_CORESIGHT
+ifdef CORESIGHT
+ $(call feature_check,libopencsd)
ifeq ($(feature-libopencsd), 1)
CFLAGS += -DHAVE_CSTRACE_SUPPORT $(LIBOPENCSD_CFLAGS)
LDFLAGS += $(LIBOPENCSD_LDFLAGS)
@@ -482,6 +494,7 @@ endif
ifndef NO_LIBUNWIND
have_libunwind :=
+ $(call feature_check,libunwind-x86)
ifeq ($(feature-libunwind-x86), 1)
$(call detected,CONFIG_LIBUNWIND_X86)
CFLAGS += -DHAVE_LIBUNWIND_X86_SUPPORT
@@ -490,6 +503,7 @@ ifndef NO_LIBUNWIND
have_libunwind = 1
endif
+ $(call feature_check,libunwind-aarch64)
ifeq ($(feature-libunwind-aarch64), 1)
$(call detected,CONFIG_LIBUNWIND_AARCH64)
CFLAGS += -DHAVE_LIBUNWIND_AARCH64_SUPPORT
@@ -701,7 +715,7 @@ else
endif
ifeq ($(feature-libbfd), 1)
- EXTLIBS += -lbfd
+ EXTLIBS += -lbfd -lopcodes
else
# we are on a system that requires -liberty and (maybe) -lz
# to link against -lbfd; test each case individually here
@@ -712,12 +726,15 @@ else
$(call feature_check,libbfd-liberty-z)
ifeq ($(feature-libbfd-liberty), 1)
- EXTLIBS += -lbfd -liberty
+ EXTLIBS += -lbfd -lopcodes -liberty
+ FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -ldl
else
ifeq ($(feature-libbfd-liberty-z), 1)
- EXTLIBS += -lbfd -liberty -lz
+ EXTLIBS += -lbfd -lopcodes -liberty -lz
+ FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -lz -ldl
endif
endif
+ $(call feature_check,disassembler-four-args)
endif
ifdef NO_DEMANGLE
@@ -796,6 +813,10 @@ ifdef HAVE_KVM_STAT_SUPPORT
CFLAGS += -DHAVE_KVM_STAT_SUPPORT
endif
+ifeq ($(feature-disassembler-four-args), 1)
+ CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+endif
+
ifeq (${IS_64_BIT}, 1)
ifndef NO_PERF_READ_VDSO32
$(call feature_check,compile-32)
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 0ee6795d82cc..01f7555fd933 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -102,7 +102,7 @@ include ../scripts/utilities.mak
# When selected, pass LLVM_CONFIG=/path/to/llvm-config to `make' if
# llvm-config is not in $PATH.
#
-# Define NO_CORESIGHT if you do not want support for CoreSight trace decoding.
+# Define CORESIGHT if you DO WANT support for CoreSight trace decoding.
#
# Define NO_AIO if you do not want support of Posix AIO based trace
# streaming for record mode. Currently Posix AIO trace streaming is
@@ -344,9 +344,9 @@ endif
export PERL_PATH
-LIB_FILE=$(OUTPUT)libperf.a
+LIBPERF_A=$(OUTPUT)libperf.a
-PERFLIBS = $(LIB_FILE) $(LIBAPI) $(LIBTRACEEVENT) $(LIBSUBCMD)
+PERFLIBS = $(LIBAPI) $(LIBTRACEEVENT) $(LIBSUBCMD)
ifndef NO_LIBBPF
PERFLIBS += $(LIBBPF)
endif
@@ -549,6 +549,8 @@ JEVENTS_IN := $(OUTPUT)pmu-events/jevents-in.o
PMU_EVENTS_IN := $(OUTPUT)pmu-events/pmu-events-in.o
+LIBPERF_IN := $(OUTPUT)libperf-in.o
+
export JEVENTS
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
@@ -565,9 +567,12 @@ $(JEVENTS): $(JEVENTS_IN)
$(PMU_EVENTS_IN): $(JEVENTS) FORCE
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=pmu-events obj=pmu-events
-$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
+$(LIBPERF_IN): prepare FORCE
+ $(Q)$(MAKE) $(build)=libperf
+
+$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) $(LIBPERF_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS) \
- $(PERF_IN) $(PMU_EVENTS_IN) $(LIBS) -o $@
+ $(PERF_IN) $(PMU_EVENTS_IN) $(LIBPERF_IN) $(LIBS) -o $@
$(GTK_IN): FORCE
$(Q)$(MAKE) $(build)=gtk
@@ -683,12 +688,7 @@ endif
$(patsubst perf-%,%.o,$(PROGRAMS)): $(wildcard */*.h)
-LIBPERF_IN := $(OUTPUT)libperf-in.o
-
-$(LIBPERF_IN): prepare FORCE
- $(Q)$(MAKE) $(build)=libperf
-
-$(LIB_FILE): $(LIBPERF_IN)
+$(LIBPERF_A): $(LIBPERF_IN)
$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBPERF_IN) $(LIB_OBJS)
LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(LDFLAGS)'
@@ -863,8 +863,8 @@ ifndef NO_LIBPYTHON
$(call QUIET_INSTALL, python-scripts) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'; \
- $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \
- $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'; \
+ $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \
+ $(INSTALL) scripts/python/*.py -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'; \
$(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
endif
$(call QUIET_INSTALL, perf_completion-script) \
@@ -910,7 +910,7 @@ python-clean:
$(python-clean)
clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean config-clean fixdep-clean python-clean
- $(call QUIET_CLEAN, core-objs) $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
+ $(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
$(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
$(Q)$(RM) $(OUTPUT).config-detected
$(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)pmu-events/jevents $(OUTPUT)$(LIBJVMTI).so
diff --git a/tools/perf/arch/Build b/tools/perf/arch/Build
index d9b6af837c7d..688818844c11 100644
--- a/tools/perf/arch/Build
+++ b/tools/perf/arch/Build
@@ -1,2 +1,2 @@
-libperf-y += common.o
-libperf-y += $(SRCARCH)/
+perf-y += common.o
+perf-y += $(SRCARCH)/
diff --git a/tools/perf/arch/arm/Build b/tools/perf/arch/arm/Build
index 41bf61da476a..36222e64bbf7 100644
--- a/tools/perf/arch/arm/Build
+++ b/tools/perf/arch/arm/Build
@@ -1,2 +1,2 @@
-libperf-y += util/
-libperf-$(CONFIG_DWARF_UNWIND) += tests/
+perf-y += util/
+perf-$(CONFIG_DWARF_UNWIND) += tests/
diff --git a/tools/perf/arch/arm/tests/Build b/tools/perf/arch/arm/tests/Build
index d9ae2733f9cc..bc8e97380c82 100644
--- a/tools/perf/arch/arm/tests/Build
+++ b/tools/perf/arch/arm/tests/Build
@@ -1,5 +1,5 @@
-libperf-y += regs_load.o
-libperf-y += dwarf-unwind.o
-libperf-y += vectors-page.o
+perf-y += regs_load.o
+perf-y += dwarf-unwind.o
+perf-y += vectors-page.o
-libperf-y += arch-tests.o
+perf-y += arch-tests.o
diff --git a/tools/perf/arch/arm/tests/dwarf-unwind.c b/tools/perf/arch/arm/tests/dwarf-unwind.c
index 9a0242e74cfc..2c35e532bc9a 100644
--- a/tools/perf/arch/arm/tests/dwarf-unwind.c
+++ b/tools/perf/arch/arm/tests/dwarf-unwind.c
@@ -3,6 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
+#include "map_groups.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
diff --git a/tools/perf/arch/arm/util/Build b/tools/perf/arch/arm/util/Build
index e64c5f216448..296f0eac5e18 100644
--- a/tools/perf/arch/arm/util/Build
+++ b/tools/perf/arch/arm/util/Build
@@ -1,6 +1,6 @@
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-$(CONFIG_AUXTRACE) += pmu.o auxtrace.o cs-etm.o
+perf-$(CONFIG_AUXTRACE) += pmu.o auxtrace.o cs-etm.o
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 2f595cd73da6..911426721170 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -5,6 +5,7 @@
*/
#include <api/fs/fs.h>
+#include <linux/bits.h>
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/coresight-pmu.h>
@@ -22,12 +23,10 @@
#include "../../util/thread_map.h"
#include "../../util/cs-etm.h"
+#include <errno.h>
#include <stdlib.h>
#include <sys/stat.h>
-#define ENABLE_SINK_MAX 128
-#define CS_BUS_DEVICE_PATH "/bus/coresight/devices/"
-
struct cs_etm_recording {
struct auxtrace_record itr;
struct perf_pmu *cs_etm_pmu;
@@ -60,10 +59,48 @@ static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
return 0;
}
+static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
+ struct perf_evsel *evsel)
+{
+ char msg[BUFSIZ], path[PATH_MAX], *sink;
+ struct perf_evsel_config_term *term;
+ int ret = -EINVAL;
+ u32 hash;
+
+ if (evsel->attr.config2 & GENMASK(31, 0))
+ return 0;
+
+ list_for_each_entry(term, &evsel->config_terms, list) {
+ if (term->type != PERF_EVSEL__CONFIG_TERM_DRV_CFG)
+ continue;
+
+ sink = term->val.drv_cfg;
+ snprintf(path, PATH_MAX, "sinks/%s", sink);
+
+ ret = perf_pmu__scan_file(pmu, path, "%x", &hash);
+ if (ret != 1) {
+ pr_err("failed to set sink \"%s\" on event %s with %d (%s)\n",
+ sink, perf_evsel__name(evsel), errno,
+ str_error_r(errno, msg, sizeof(msg)));
+ return ret;
+ }
+
+ evsel->attr.config2 |= hash;
+ return 0;
+ }
+
+ /*
+ * No sink was provided on the command line - for _now_ treat
+ * this as an error.
+ */
+ return ret;
+}
+
static int cs_etm_recording_options(struct auxtrace_record *itr,
struct perf_evlist *evlist,
struct record_opts *opts)
{
+ int ret;
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
@@ -92,6 +129,10 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
if (!cs_etm_evsel)
return 0;
+ ret = cs_etm_set_sink_attr(cs_etm_pmu, cs_etm_evsel);
+ if (ret)
+ return ret;
+
if (opts->use_clockid) {
pr_err("Cannot use clockid (-k option) with %s\n",
CORESIGHT_ETM_PMU_NAME);
@@ -598,54 +639,3 @@ struct auxtrace_record *cs_etm_record_init(int *err)
out:
return NULL;
}
-
-static FILE *cs_device__open_file(const char *name)
-{
- struct stat st;
- char path[PATH_MAX];
- const char *sysfs;
-
- sysfs = sysfs__mountpoint();
- if (!sysfs)
- return NULL;
-
- snprintf(path, PATH_MAX,
- "%s" CS_BUS_DEVICE_PATH "%s", sysfs, name);
-
- if (stat(path, &st) < 0)
- return NULL;
-
- return fopen(path, "w");
-
-}
-
-static int __printf(2, 3) cs_device__print_file(const char *name, const char *fmt, ...)
-{
- va_list args;
- FILE *file;
- int ret = -EINVAL;
-
- va_start(args, fmt);
- file = cs_device__open_file(name);
- if (file) {
- ret = vfprintf(file, fmt, args);
- fclose(file);
- }
- va_end(args);
- return ret;
-}
-
-int cs_etm_set_drv_config(struct perf_evsel_config_term *term)
-{
- int ret;
- char enable_sink[ENABLE_SINK_MAX];
-
- snprintf(enable_sink, ENABLE_SINK_MAX, "%s/%s",
- term->val.drv_cfg, "enable_sink");
-
- ret = cs_device__print_file(enable_sink, "%d", 1);
- if (ret < 0)
- return ret;
-
- return 0;
-}
diff --git a/tools/perf/arch/arm/util/cs-etm.h b/tools/perf/arch/arm/util/cs-etm.h
index 1a12e64f5127..a3354bda4fe8 100644
--- a/tools/perf/arch/arm/util/cs-etm.h
+++ b/tools/perf/arch/arm/util/cs-etm.h
@@ -7,9 +7,6 @@
#ifndef INCLUDE__PERF_CS_ETM_H__
#define INCLUDE__PERF_CS_ETM_H__
-#include "../../util/evsel.h"
-
struct auxtrace_record *cs_etm_record_init(int *err);
-int cs_etm_set_drv_config(struct perf_evsel_config_term *term);
#endif
diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c
index e047571e6080..bbc297a7e2e3 100644
--- a/tools/perf/arch/arm/util/pmu.c
+++ b/tools/perf/arch/arm/util/pmu.c
@@ -7,8 +7,8 @@
#include <string.h>
#include <linux/coresight-pmu.h>
#include <linux/perf_event.h>
+#include <linux/string.h>
-#include "cs-etm.h"
#include "arm-spe.h"
#include "../../util/pmu.h"
@@ -19,7 +19,6 @@ struct perf_event_attr
if (!strcmp(pmu->name, CORESIGHT_ETM_PMU_NAME)) {
/* add ETM default config here */
pmu->selectable = true;
- pmu->set_drv_config = cs_etm_set_drv_config;
#if defined(__aarch64__)
} else if (strstarts(pmu->name, ARM_SPE_PMU_NAME)) {
return arm_spe_pmu_default_config(pmu);
diff --git a/tools/perf/arch/arm64/Build b/tools/perf/arch/arm64/Build
index 41bf61da476a..36222e64bbf7 100644
--- a/tools/perf/arch/arm64/Build
+++ b/tools/perf/arch/arm64/Build
@@ -1,2 +1,2 @@
-libperf-y += util/
-libperf-$(CONFIG_DWARF_UNWIND) += tests/
+perf-y += util/
+perf-$(CONFIG_DWARF_UNWIND) += tests/
diff --git a/tools/perf/arch/arm64/annotate/instructions.c b/tools/perf/arch/arm64/annotate/instructions.c
index 76c6345a57d5..8f70a1b282df 100644
--- a/tools/perf/arch/arm64/annotate/instructions.c
+++ b/tools/perf/arch/arm64/annotate/instructions.c
@@ -58,7 +58,7 @@ out_free_source:
}
static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops);
+ struct ins_operands *ops, int max_ins_name);
static struct ins_ops arm64_mov_ops = {
.parse = arm64_mov__parse,
diff --git a/tools/perf/arch/arm64/tests/Build b/tools/perf/arch/arm64/tests/Build
index 883c57ff0c08..41707fea74b3 100644
--- a/tools/perf/arch/arm64/tests/Build
+++ b/tools/perf/arch/arm64/tests/Build
@@ -1,4 +1,4 @@
-libperf-y += regs_load.o
-libperf-y += dwarf-unwind.o
+perf-y += regs_load.o
+perf-y += dwarf-unwind.o
-libperf-y += arch-tests.o
+perf-y += arch-tests.o
diff --git a/tools/perf/arch/arm64/tests/dwarf-unwind.c b/tools/perf/arch/arm64/tests/dwarf-unwind.c
index 5522ce384723..a6a407fa1b8b 100644
--- a/tools/perf/arch/arm64/tests/dwarf-unwind.c
+++ b/tools/perf/arch/arm64/tests/dwarf-unwind.c
@@ -3,6 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
+#include "map_groups.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build
index 68f8a8eb3ad0..3cde540d2fcf 100644
--- a/tools/perf/arch/arm64/util/Build
+++ b/tools/perf/arch/arm64/util/Build
@@ -1,10 +1,10 @@
-libperf-y += header.o
-libperf-y += sym-handling.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-y += header.o
+perf-y += sym-handling.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \
+perf-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \
../../arm/util/auxtrace.o \
../../arm/util/cs-etm.o \
arm-spe.o
diff --git a/tools/perf/arch/nds32/Build b/tools/perf/arch/nds32/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/nds32/Build
+++ b/tools/perf/arch/nds32/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/nds32/util/Build b/tools/perf/arch/nds32/util/Build
index ca623bbf993c..d0bc205fe49a 100644
--- a/tools/perf/arch/nds32/util/Build
+++ b/tools/perf/arch/nds32/util/Build
@@ -1 +1 @@
-libperf-y += header.o
+perf-y += header.o
diff --git a/tools/perf/arch/powerpc/Build b/tools/perf/arch/powerpc/Build
index db52fa22d3a1..a7dd46a5b678 100644
--- a/tools/perf/arch/powerpc/Build
+++ b/tools/perf/arch/powerpc/Build
@@ -1,2 +1,2 @@
-libperf-y += util/
-libperf-y += tests/
+perf-y += util/
+perf-y += tests/
diff --git a/tools/perf/arch/powerpc/tests/Build b/tools/perf/arch/powerpc/tests/Build
index d827ef384b33..3526ab0af9f9 100644
--- a/tools/perf/arch/powerpc/tests/Build
+++ b/tools/perf/arch/powerpc/tests/Build
@@ -1,4 +1,4 @@
-libperf-$(CONFIG_DWARF_UNWIND) += regs_load.o
-libperf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
+perf-$(CONFIG_DWARF_UNWIND) += regs_load.o
+perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
-libperf-y += arch-tests.o
+perf-y += arch-tests.o
diff --git a/tools/perf/arch/powerpc/tests/dwarf-unwind.c b/tools/perf/arch/powerpc/tests/dwarf-unwind.c
index 5f39efef0856..5c178e4a1995 100644
--- a/tools/perf/arch/powerpc/tests/dwarf-unwind.c
+++ b/tools/perf/arch/powerpc/tests/dwarf-unwind.c
@@ -3,6 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
+#include "map_groups.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build
index 2e6595310420..7cf0b8803097 100644
--- a/tools/perf/arch/powerpc/util/Build
+++ b/tools/perf/arch/powerpc/util/Build
@@ -1,10 +1,11 @@
-libperf-y += header.o
-libperf-y += sym-handling.o
-libperf-y += kvm-stat.o
-libperf-y += perf_regs.o
+perf-y += header.o
+perf-y += sym-handling.o
+perf-y += kvm-stat.o
+perf-y += perf_regs.o
+perf-y += mem-events.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_DWARF) += skip-callchain-idx.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += skip-callchain-idx.o
-libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/arch/powerpc/util/kvm-stat.c b/tools/perf/arch/powerpc/util/kvm-stat.c
index 596ad6aedaac..f9db341c47b6 100644
--- a/tools/perf/arch/powerpc/util/kvm-stat.c
+++ b/tools/perf/arch/powerpc/util/kvm-stat.c
@@ -3,6 +3,8 @@
#include "util/kvm-stat.h"
#include "util/parse-events.h"
#include "util/debug.h"
+#include "util/evsel.h"
+#include "util/evlist.h"
#include "book3s_hv_exits.h"
#include "book3s_hcalls.h"
diff --git a/tools/perf/arch/powerpc/util/mem-events.c b/tools/perf/arch/powerpc/util/mem-events.c
new file mode 100644
index 000000000000..d08311f04e95
--- /dev/null
+++ b/tools/perf/arch/powerpc/util/mem-events.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "mem-events.h"
+
+/* PowerPC does not support 'ldlat' parameter. */
+char *perf_mem_events__name(int i)
+{
+ if (i == PERF_MEM_EVENTS__LOAD)
+ return (char *) "cpu/mem-loads/";
+
+ return (char *) "cpu/mem-stores/";
+}
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
index 7c6eeb4633fe..2918bb16c892 100644
--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -16,6 +16,9 @@
#include "util/thread.h"
#include "util/callchain.h"
#include "util/debug.h"
+#include "util/dso.h"
+#include "util/map.h"
+#include "util/symbol.h"
/*
* When saving the callchain on Power, the kernel conservatively saves
diff --git a/tools/perf/arch/s390/Build b/tools/perf/arch/s390/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/s390/Build
+++ b/tools/perf/arch/s390/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c
index de0dd66dbb48..89bb8f2c54ce 100644
--- a/tools/perf/arch/s390/annotate/instructions.c
+++ b/tools/perf/arch/s390/annotate/instructions.c
@@ -46,7 +46,7 @@ static int s390_call__parse(struct arch *arch, struct ins_operands *ops,
}
static int call__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops);
+ struct ins_operands *ops, int max_ins_name);
static struct ins_ops s390_call_ops = {
.parse = s390_call__parse,
diff --git a/tools/perf/arch/s390/util/Build b/tools/perf/arch/s390/util/Build
index 4a233683c684..22797f043b84 100644
--- a/tools/perf/arch/s390/util/Build
+++ b/tools/perf/arch/s390/util/Build
@@ -1,9 +1,9 @@
-libperf-y += header.o
-libperf-y += kvm-stat.o
+perf-y += header.o
+perf-y += kvm-stat.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-y += machine.o
+perf-y += machine.o
-libperf-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-$(CONFIG_AUXTRACE) += auxtrace.o
diff --git a/tools/perf/arch/s390/util/kvm-stat.c b/tools/perf/arch/s390/util/kvm-stat.c
index aaabab5e2830..7e3961a4b292 100644
--- a/tools/perf/arch/s390/util/kvm-stat.c
+++ b/tools/perf/arch/s390/util/kvm-stat.c
@@ -11,6 +11,7 @@
#include <errno.h>
#include "../../util/kvm-stat.h"
+#include "../../util/evsel.h"
#include <asm/sie.h>
define_exit_reasons_table(sie_exit_reasons, sie_intercept_code);
diff --git a/tools/perf/arch/sh/Build b/tools/perf/arch/sh/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/sh/Build
+++ b/tools/perf/arch/sh/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/sh/util/Build b/tools/perf/arch/sh/util/Build
index 954e287bbb89..e813e618954b 100644
--- a/tools/perf/arch/sh/util/Build
+++ b/tools/perf/arch/sh/util/Build
@@ -1 +1 @@
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/arch/sparc/Build b/tools/perf/arch/sparc/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/sparc/Build
+++ b/tools/perf/arch/sparc/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/sparc/util/Build b/tools/perf/arch/sparc/util/Build
index 954e287bbb89..e813e618954b 100644
--- a/tools/perf/arch/sparc/util/Build
+++ b/tools/perf/arch/sparc/util/Build
@@ -1 +1 @@
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/arch/x86/Build b/tools/perf/arch/x86/Build
index db52fa22d3a1..a7dd46a5b678 100644
--- a/tools/perf/arch/x86/Build
+++ b/tools/perf/arch/x86/Build
@@ -1,2 +1,2 @@
-libperf-y += util/
-libperf-y += tests/
+perf-y += util/
+perf-y += tests/
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index f0b1709a5ffb..2ae92fddb6d5 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -343,6 +343,8 @@
332 common statx __x64_sys_statx
333 common io_pgetevents __x64_sys_io_pgetevents
334 common rseq __x64_sys_rseq
+# don't use numbers 387 through 423, add new calls after the last
+# 'common' entry
#
# x32-specific system call numbers start at 512 to avoid cache impact
@@ -361,7 +363,7 @@
520 x32 execve __x32_compat_sys_execve/ptregs
521 x32 ptrace __x32_compat_sys_ptrace
522 x32 rt_sigpending __x32_compat_sys_rt_sigpending
-523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait
+523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait_time64
524 x32 rt_sigqueueinfo __x32_compat_sys_rt_sigqueueinfo
525 x32 sigaltstack __x32_compat_sys_sigaltstack
526 x32 timer_create __x32_compat_sys_timer_create
@@ -375,7 +377,7 @@
534 x32 preadv __x32_compat_sys_preadv64
535 x32 pwritev __x32_compat_sys_pwritev64
536 x32 rt_tgsigqueueinfo __x32_compat_sys_rt_tgsigqueueinfo
-537 x32 recvmmsg __x32_compat_sys_recvmmsg
+537 x32 recvmmsg __x32_compat_sys_recvmmsg_time64
538 x32 sendmmsg __x32_compat_sys_sendmmsg
539 x32 process_vm_readv __x32_compat_sys_process_vm_readv
540 x32 process_vm_writev __x32_compat_sys_process_vm_writev
diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build
index 586849ff83a0..3d83d0c6982d 100644
--- a/tools/perf/arch/x86/tests/Build
+++ b/tools/perf/arch/x86/tests/Build
@@ -1,8 +1,8 @@
-libperf-$(CONFIG_DWARF_UNWIND) += regs_load.o
-libperf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
+perf-$(CONFIG_DWARF_UNWIND) += regs_load.o
+perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
-libperf-y += arch-tests.o
-libperf-y += rdpmc.o
-libperf-y += perf-time-to-tsc.o
-libperf-$(CONFIG_AUXTRACE) += insn-x86.o
-libperf-$(CONFIG_X86_64) += bp-modify.o
+perf-y += arch-tests.o
+perf-y += rdpmc.o
+perf-y += perf-time-to-tsc.o
+perf-$(CONFIG_AUXTRACE) += insn-x86.o
+perf-$(CONFIG_X86_64) += bp-modify.o
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c
index 7879df34569a..6ad0a1cedb13 100644
--- a/tools/perf/arch/x86/tests/dwarf-unwind.c
+++ b/tools/perf/arch/x86/tests/dwarf-unwind.c
@@ -3,6 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
+#include "map_groups.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
index 844b8f335532..47f9c56e744f 100644
--- a/tools/perf/arch/x86/util/Build
+++ b/tools/perf/arch/x86/util/Build
@@ -1,18 +1,19 @@
-libperf-y += header.o
-libperf-y += tsc.o
-libperf-y += pmu.o
-libperf-y += kvm-stat.o
-libperf-y += perf_regs.o
-libperf-y += group.o
-libperf-y += machine.o
-libperf-y += event.o
+perf-y += header.o
+perf-y += tsc.o
+perf-y += pmu.o
+perf-y += kvm-stat.o
+perf-y += perf_regs.o
+perf-y += group.o
+perf-y += machine.o
+perf-y += event.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
-libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-$(CONFIG_AUXTRACE) += auxtrace.o
-libperf-$(CONFIG_AUXTRACE) += intel-pt.o
-libperf-$(CONFIG_AUXTRACE) += intel-bts.o
+perf-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-$(CONFIG_AUXTRACE) += archinsn.o
+perf-$(CONFIG_AUXTRACE) += intel-pt.o
+perf-$(CONFIG_AUXTRACE) += intel-bts.o
diff --git a/tools/perf/arch/x86/util/archinsn.c b/tools/perf/arch/x86/util/archinsn.c
new file mode 100644
index 000000000000..4237bb2e7fa2
--- /dev/null
+++ b/tools/perf/arch/x86/util/archinsn.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "perf.h"
+#include "archinsn.h"
+#include "util/intel-pt-decoder/insn.h"
+#include "machine.h"
+#include "thread.h"
+#include "symbol.h"
+
+void arch_fetch_insn(struct perf_sample *sample,
+ struct thread *thread,
+ struct machine *machine)
+{
+ struct insn insn;
+ int len;
+ bool is64bit = false;
+
+ if (!sample->ip)
+ return;
+ len = thread__memcpy(thread, machine, sample->insn, sample->ip, sizeof(sample->insn), &is64bit);
+ if (len <= 0)
+ return;
+ insn_init(&insn, sample->insn, len, is64bit);
+ insn_get_length(&insn);
+ if (insn_complete(&insn) && insn.length <= len)
+ sample->insn_len = insn.length;
+}
diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
index 081353d7b095..865a9762f22e 100644
--- a/tools/perf/arch/x86/util/kvm-stat.c
+++ b/tools/perf/arch/x86/util/kvm-stat.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include "../../util/kvm-stat.h"
+#include "../../util/evsel.h"
#include <asm/svm.h>
#include <asm/vmx.h>
#include <asm/kvm.h>
diff --git a/tools/perf/arch/xtensa/Build b/tools/perf/arch/xtensa/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/xtensa/Build
+++ b/tools/perf/arch/xtensa/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/xtensa/util/Build b/tools/perf/arch/xtensa/util/Build
index 954e287bbb89..e813e618954b 100644
--- a/tools/perf/arch/xtensa/util/Build
+++ b/tools/perf/arch/xtensa/util/Build
@@ -1 +1 @@
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c
index 0c0a6e824934..2af067859966 100644
--- a/tools/perf/bench/epoll-ctl.c
+++ b/tools/perf/bench/epoll-ctl.c
@@ -224,7 +224,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu)
pthread_attr_t thread_attr, *attrp = NULL;
cpu_set_t cpuset;
unsigned int i, j;
- int ret;
+ int ret = 0;
if (!noaffinity)
pthread_attr_init(&thread_attr);
diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c
index 5a11534e96a0..fe85448abd45 100644
--- a/tools/perf/bench/epoll-wait.c
+++ b/tools/perf/bench/epoll-wait.c
@@ -293,7 +293,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu)
pthread_attr_t thread_attr, *attrp = NULL;
cpu_set_t cpuset;
unsigned int i, j;
- int ret, events = EPOLLIN;
+ int ret = 0, events = EPOLLIN;
if (oneshot)
events |= EPOLLONESHOT;
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 44195514b19e..98ad783efc69 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -34,6 +34,7 @@
#include <sys/types.h>
#include <linux/kernel.h>
#include <linux/time64.h>
+#include <linux/numa.h>
#include <numa.h>
#include <numaif.h>
@@ -298,7 +299,7 @@ static cpu_set_t bind_to_node(int target_node)
CPU_ZERO(&mask);
- if (target_node == -1) {
+ if (target_node == NUMA_NO_NODE) {
for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
CPU_SET(cpu, &mask);
} else {
@@ -339,7 +340,7 @@ static void bind_to_memnode(int node)
unsigned long nodemask;
int ret;
- if (node == -1)
+ if (node == NUMA_NO_NODE)
return;
BUG_ON(g->p.nr_nodes > (int)sizeof(nodemask)*8);
@@ -1363,7 +1364,7 @@ static void init_thread_data(void)
int cpu;
/* Allow all nodes by default: */
- td->bind_node = -1;
+ td->bind_node = NUMA_NO_NODE;
/* Allow all CPUs by default: */
CPU_ZERO(&td->bind_cpumask);
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 93d679eaf1f4..67f9d9ffacfb 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -27,6 +27,7 @@
#include "util/thread.h"
#include "util/sort.h"
#include "util/hist.h"
+#include "util/map.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/data.h"
@@ -227,7 +228,7 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
* the DSO?
*/
if (al->sym != NULL) {
- rb_erase(&al->sym->rb_node,
+ rb_erase_cached(&al->sym->rb_node,
&al->map->dso->symbols);
symbol__delete(al->sym);
dso__reset_find_symbol_cache(al->map->dso);
@@ -305,7 +306,7 @@ static void hists__find_annotations(struct hists *hists,
struct perf_evsel *evsel,
struct perf_annotate *ann)
{
- struct rb_node *nd = rb_first(&hists->entries), *next;
+ struct rb_node *nd = rb_first_cached(&hists->entries), *next;
int key = K_RIGHT;
while (nd) {
@@ -440,7 +441,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
}
if (total_nr_samples == 0) {
- ui__error("The %s file has no samples!\n", session->data->file.path);
+ ui__error("The %s data has no samples!\n", session->data->path);
goto out;
}
@@ -577,7 +578,7 @@ int cmd_annotate(int argc, const char **argv)
if (quiet)
perf_quiet_option();
- data.file.path = input_name;
+ data.path = input_name;
annotate.session = perf_session__new(&data, false, &annotate.tool);
if (annotate.session == NULL)
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 115110a4796a..10457b10e568 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -416,8 +416,8 @@ int cmd_buildid_cache(int argc, const char **argv)
nsi = nsinfo__new(ns_id);
if (missing_filename) {
- data.file.path = missing_filename;
- data.force = force;
+ data.path = missing_filename;
+ data.force = force;
session = perf_session__new(&data, false, NULL);
if (session == NULL)
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index 78abbe8d9d5f..f403e19488b5 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -52,11 +52,9 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
{
struct perf_session *session;
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = force,
};
symbol__elf_init();
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index d340d2e42776..9e6cc868bdb4 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -33,6 +33,7 @@
#include "ui/browsers/hists.h"
#include "thread.h"
#include "mem2node.h"
+#include "symbol.h"
struct c2c_hists {
struct hists hists;
@@ -1969,7 +1970,7 @@ static void calc_width(struct c2c_hist_entry *c2c_he)
set_nodestr(c2c_he);
}
-static int filter_cb(struct hist_entry *he)
+static int filter_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct c2c_hist_entry *c2c_he;
@@ -1986,7 +1987,7 @@ static int filter_cb(struct hist_entry *he)
return 0;
}
-static int resort_cl_cb(struct hist_entry *he)
+static int resort_cl_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct c2c_hist_entry *c2c_he;
struct c2c_hists *c2c_hists;
@@ -2055,6 +2056,12 @@ static int setup_nodes(struct perf_session *session)
if (!set)
return -ENOMEM;
+ nodes[node] = set;
+
+ /* empty node, skip */
+ if (cpu_map__empty(map))
+ continue;
+
for (cpu = 0; cpu < map->nr; cpu++) {
set_bit(map->map[cpu], set);
@@ -2063,8 +2070,6 @@ static int setup_nodes(struct perf_session *session)
cpu2node[map->map[cpu]] = node;
}
-
- nodes[node] = set;
}
setup_nodes_header();
@@ -2073,7 +2078,7 @@ static int setup_nodes(struct perf_session *session)
#define HAS_HITMS(__h) ((__h)->stats.lcl_hitm || (__h)->stats.rmt_hitm)
-static int resort_hitm_cb(struct hist_entry *he)
+static int resort_hitm_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct c2c_hist_entry *c2c_he;
c2c_he = container_of(he, struct c2c_hist_entry, he);
@@ -2088,14 +2093,14 @@ static int resort_hitm_cb(struct hist_entry *he)
static int hists__iterate_cb(struct hists *hists, hists__resort_cb_t cb)
{
- struct rb_node *next = rb_first(&hists->entries);
+ struct rb_node *next = rb_first_cached(&hists->entries);
int ret = 0;
while (next) {
struct hist_entry *he;
he = rb_entry(next, struct hist_entry, rb_node);
- ret = cb(he);
+ ret = cb(he, NULL);
if (ret)
break;
next = rb_next(&he->rb_node);
@@ -2215,7 +2220,7 @@ static void print_pareto(FILE *out)
if (WARN_ONCE(ret, "failed to setup sort entries\n"))
return;
- nd = rb_first(&c2c.hists.hists.entries);
+ nd = rb_first_cached(&c2c.hists.hists.entries);
for (; nd; nd = rb_next(nd)) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
@@ -2283,7 +2288,7 @@ static void perf_c2c__hists_fprintf(FILE *out, struct perf_session *session)
static void c2c_browser__update_nr_entries(struct hist_browser *hb)
{
u64 nr_entries = 0;
- struct rb_node *nd = rb_first(&hb->hists->entries);
+ struct rb_node *nd = rb_first_cached(&hb->hists->entries);
while (nd) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
@@ -2343,7 +2348,7 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
struct c2c_cacheline_browser *cl_browser;
struct hist_browser *browser;
int key = -1;
- const char help[] =
+ static const char help[] =
" ENTER Toggle callchains (if present) \n"
" n Toggle Node details info \n"
" s Toggle full length of symbol and source line columns \n"
@@ -2424,7 +2429,7 @@ static int perf_c2c__hists_browse(struct hists *hists)
{
struct hist_browser *browser;
int key = -1;
- const char help[] =
+ static const char help[] =
" d Display cacheline details \n"
" ENTER Toggle callchains (if present) \n"
" q Quit \n";
@@ -2749,8 +2754,8 @@ static int perf_c2c__report(int argc, const char **argv)
if (!input_name || !strlen(input_name))
input_name = "perf.data";
- data.file.path = input_name;
- data.force = symbol_conf.force;
+ data.path = input_name;
+ data.force = symbol_conf.force;
err = setup_display(display);
if (err)
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 39db2ee32d48..6e7920793729 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -19,12 +19,21 @@
#include "util/util.h"
#include "util/data.h"
#include "util/config.h"
+#include "util/time-utils.h"
#include <errno.h>
#include <inttypes.h>
#include <stdlib.h>
#include <math.h>
+struct perf_diff {
+ struct perf_tool tool;
+ const char *time_str;
+ struct perf_time_interval *ptime_range;
+ int range_size;
+ int range_num;
+};
+
/* Diff command specific HPP columns. */
enum {
PERF_HPP_DIFF__BASELINE,
@@ -74,6 +83,9 @@ static unsigned int sort_compute = 1;
static s64 compute_wdiff_w1;
static s64 compute_wdiff_w2;
+static const char *cpu_list;
+static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
+
enum {
COMPUTE_DELTA,
COMPUTE_RATIO,
@@ -323,22 +335,33 @@ static int formula_fprintf(struct hist_entry *he, struct hist_entry *pair,
return -1;
}
-static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
+static int diff__process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
+ struct perf_diff *pdiff = container_of(tool, struct perf_diff, tool);
struct addr_location al;
struct hists *hists = evsel__hists(evsel);
int ret = -1;
+ if (perf_time__ranges_skip_sample(pdiff->ptime_range, pdiff->range_num,
+ sample->time)) {
+ return 0;
+ }
+
if (machine__resolve(machine, &al, sample) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
+ if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) {
+ ret = 0;
+ goto out_put;
+ }
+
if (!hists__add_entry(hists, &al, NULL, NULL, NULL, sample, true)) {
pr_warning("problem incrementing symbol period, skipping event\n");
goto out_put;
@@ -359,17 +382,19 @@ out_put:
return ret;
}
-static struct perf_tool tool = {
- .sample = diff__process_sample_event,
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
- .comm = perf_event__process_comm,
- .exit = perf_event__process_exit,
- .fork = perf_event__process_fork,
- .lost = perf_event__process_lost,
- .namespaces = perf_event__process_namespaces,
- .ordered_events = true,
- .ordering_requires_timestamps = true,
+static struct perf_diff pdiff = {
+ .tool = {
+ .sample = diff__process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .mmap2 = perf_event__process_mmap2,
+ .comm = perf_event__process_comm,
+ .exit = perf_event__process_exit,
+ .fork = perf_event__process_fork,
+ .lost = perf_event__process_lost,
+ .namespaces = perf_event__process_namespaces,
+ .ordered_events = true,
+ .ordering_requires_timestamps = true,
+ },
};
static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
@@ -429,7 +454,7 @@ get_pair_fmt(struct hist_entry *he, struct diff_hpp_fmt *dfmt)
static void hists__baseline_only(struct hists *hists)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *next;
if (hists__has(hists, need_collapse))
@@ -437,13 +462,13 @@ static void hists__baseline_only(struct hists *hists)
else
root = hists->entries_in;
- next = rb_first(root);
+ next = rb_first_cached(root);
while (next != NULL) {
struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&he->rb_node_in);
if (!hist_entry__next_pair(he)) {
- rb_erase(&he->rb_node_in, root);
+ rb_erase_cached(&he->rb_node_in, root);
hist_entry__delete(he);
}
}
@@ -451,7 +476,7 @@ static void hists__baseline_only(struct hists *hists)
static void hists__precompute(struct hists *hists)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *next;
if (hists__has(hists, need_collapse))
@@ -459,7 +484,7 @@ static void hists__precompute(struct hists *hists)
else
root = hists->entries_in;
- next = rb_first(root);
+ next = rb_first_cached(root);
while (next != NULL) {
struct hist_entry *he, *pair;
struct data__file *d;
@@ -708,7 +733,7 @@ static void data__fprintf(void)
data__for_each_file(i, d)
fprintf(stdout, "# [%d] %s %s\n",
- d->idx, d->data.file.path,
+ d->idx, d->data.path,
!d->idx ? "(Baseline)" : "");
fprintf(stdout, "#\n");
@@ -771,26 +796,127 @@ static void data__free(struct data__file *d)
}
}
+static int abstime_str_dup(char **pstr)
+{
+ char *str = NULL;
+
+ if (pdiff.time_str && strchr(pdiff.time_str, ':')) {
+ str = strdup(pdiff.time_str);
+ if (!str)
+ return -ENOMEM;
+ }
+
+ *pstr = str;
+ return 0;
+}
+
+static int parse_absolute_time(struct data__file *d, char **pstr)
+{
+ char *p = *pstr;
+ int ret;
+
+ /*
+ * Absolute timestamp for one file has the format: a.b,c.d
+ * For multiple files, the format is: a.b,c.d:a.b,c.d
+ */
+ p = strchr(*pstr, ':');
+ if (p) {
+ if (p == *pstr) {
+ pr_err("Invalid time string\n");
+ return -EINVAL;
+ }
+
+ *p = 0;
+ p++;
+ if (*p == 0) {
+ pr_err("Invalid time string\n");
+ return -EINVAL;
+ }
+ }
+
+ ret = perf_time__parse_for_ranges(*pstr, d->session,
+ &pdiff.ptime_range,
+ &pdiff.range_size,
+ &pdiff.range_num);
+ if (ret < 0)
+ return ret;
+
+ if (!p || *p == 0)
+ *pstr = NULL;
+ else
+ *pstr = p;
+
+ return ret;
+}
+
+static int parse_percent_time(struct data__file *d)
+{
+ int ret;
+
+ ret = perf_time__parse_for_ranges(pdiff.time_str, d->session,
+ &pdiff.ptime_range,
+ &pdiff.range_size,
+ &pdiff.range_num);
+ return ret;
+}
+
+static int parse_time_str(struct data__file *d, char *abstime_ostr,
+ char **pabstime_tmp)
+{
+ int ret = 0;
+
+ if (abstime_ostr)
+ ret = parse_absolute_time(d, pabstime_tmp);
+ else if (pdiff.time_str)
+ ret = parse_percent_time(d);
+
+ return ret;
+}
+
static int __cmd_diff(void)
{
struct data__file *d;
- int ret = -EINVAL, i;
+ int ret, i;
+ char *abstime_ostr, *abstime_tmp;
+
+ ret = abstime_str_dup(&abstime_ostr);
+ if (ret)
+ return ret;
+
+ abstime_tmp = abstime_ostr;
+ ret = -EINVAL;
data__for_each_file(i, d) {
- d->session = perf_session__new(&d->data, false, &tool);
+ d->session = perf_session__new(&d->data, false, &pdiff.tool);
if (!d->session) {
- pr_err("Failed to open %s\n", d->data.file.path);
+ pr_err("Failed to open %s\n", d->data.path);
ret = -1;
goto out_delete;
}
+ if (pdiff.time_str) {
+ ret = parse_time_str(d, abstime_ostr, &abstime_tmp);
+ if (ret < 0)
+ goto out_delete;
+ }
+
+ if (cpu_list) {
+ ret = perf_session__cpu_bitmap(d->session, cpu_list,
+ cpu_bitmap);
+ if (ret < 0)
+ goto out_delete;
+ }
+
ret = perf_session__process_events(d->session);
if (ret) {
- pr_err("Failed to process %s\n", d->data.file.path);
+ pr_err("Failed to process %s\n", d->data.path);
goto out_delete;
}
perf_evlist__collapse_resort(d->session->evlist);
+
+ if (pdiff.ptime_range)
+ zfree(&pdiff.ptime_range);
}
data_process();
@@ -802,6 +928,13 @@ static int __cmd_diff(void)
}
free(data__files);
+
+ if (pdiff.ptime_range)
+ zfree(&pdiff.ptime_range);
+
+ if (abstime_ostr)
+ free(abstime_ostr);
+
return ret;
}
@@ -849,6 +982,13 @@ static const struct option options[] = {
OPT_UINTEGER('o', "order", &sort_compute, "Specify compute sorting."),
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
"How to display percentage of filtered entries", parse_filter_percentage),
+ OPT_STRING(0, "time", &pdiff.time_str, "str",
+ "Time span (time percent or absolute timestamp)"),
+ OPT_STRING(0, "cpu", &cpu_list, "cpu", "list of cpus to profile"),
+ OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
+ "only consider symbols in these pids"),
+ OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
+ "only consider symbols in these tids"),
OPT_END()
};
@@ -1289,9 +1429,9 @@ static int data_init(int argc, const char **argv)
data__for_each_file(i, d) {
struct perf_data *data = &d->data;
- data->file.path = use_default ? defaults[i] : argv[i];
- data->mode = PERF_DATA_MODE_READ,
- data->force = force,
+ data->path = use_default ? defaults[i] : argv[i];
+ data->mode = PERF_DATA_MODE_READ,
+ data->force = force,
d->idx = i;
}
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index e06e822ce634..6e4f63b0da4a 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -23,9 +23,7 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
struct perf_session *session;
struct perf_evsel *pos;
struct perf_data data = {
- .file = {
- .path = file_name,
- },
+ .path = file_name,
.mode = PERF_DATA_MODE_READ,
.force = details->force,
};
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index eda41673c4f3..24086b7f1b14 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -12,6 +12,7 @@
#include "util/color.h"
#include "util/evlist.h"
#include "util/evsel.h"
+#include "util/map.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/debug.h"
@@ -19,6 +20,7 @@
#include "util/data.h"
#include "util/auxtrace.h"
#include "util/jit.h"
+#include "util/symbol.h"
#include "util/thread.h"
#include <subcmd/parse-options.h>
@@ -768,10 +770,8 @@ int cmd_inject(int argc, const char **argv)
.input_name = "-",
.samples = LIST_HEAD_INIT(inject.samples),
.output = {
- .file = {
- .path = "-",
- },
- .mode = PERF_DATA_MODE_WRITE,
+ .path = "-",
+ .mode = PERF_DATA_MODE_WRITE,
},
};
struct perf_data data = {
@@ -784,7 +784,7 @@ int cmd_inject(int argc, const char **argv)
"Inject build-ids into the output stream"),
OPT_STRING('i', "input", &inject.input_name, "file",
"input file name"),
- OPT_STRING('o', "output", &inject.output.file.path, "file",
+ OPT_STRING('o', "output", &inject.output.path, "file",
"output file name"),
OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
"Merge sched-stat and sched-switch for getting events "
@@ -832,7 +832,7 @@ int cmd_inject(int argc, const char **argv)
inject.tool.ordered_events = inject.sched_stat;
- data.file.path = inject.input_name;
+ data.path = inject.input_name;
inject.session = perf_session__new(&data, true, &inject.tool);
if (inject.session == NULL)
return -1;
diff --git a/tools/perf/builtin-kallsyms.c b/tools/perf/builtin-kallsyms.c
index 90d1a2305b72..bc7a2bc7aed7 100644
--- a/tools/perf/builtin-kallsyms.c
+++ b/tools/perf/builtin-kallsyms.c
@@ -13,6 +13,7 @@
#include <subcmd/parse-options.h>
#include "debug.h"
#include "machine.h"
+#include "map.h"
#include "symbol.h"
static int __cmd_kallsyms(int argc, const char **argv)
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index b63bca4b0c2a..fa520f4b8095 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -6,6 +6,7 @@
#include "util/evsel.h"
#include "util/util.h"
#include "util/config.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
@@ -334,7 +335,7 @@ static int build_alloc_func_list(void)
struct alloc_func *func;
struct machine *machine = &kmem_session->machines.host;
regex_t alloc_func_regex;
- const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
+ static const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
if (ret) {
@@ -1924,7 +1925,7 @@ int cmd_kmem(int argc, const char **argv)
NULL
};
struct perf_session *session;
- const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n";
+ static const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n";
int ret = perf_config(kmem_config, NULL);
if (ret)
@@ -1948,7 +1949,7 @@ int cmd_kmem(int argc, const char **argv)
return __cmd_record(argc, argv);
}
- data.file.path = input_name;
+ data.path = input_name;
kmem_session = session = perf_session__new(&data, false, &perf_kmem);
if (session == NULL)
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 3d4cbc4e87c7..dbb6f737a3e2 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1080,11 +1080,9 @@ static int read_events(struct perf_kvm_stat *kvm)
.ordered_events = true,
};
struct perf_data file = {
- .file = {
- .path = kvm->file_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = kvm->force,
+ .path = kvm->file_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = kvm->force,
};
kvm->tool = eops;
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index ead221e49f00..a8394b4f1167 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -82,9 +82,9 @@ int cmd_list(int argc, const char **argv)
else if (strcmp(argv[i], "sdt") == 0)
print_sdt_events(NULL, NULL, raw_dump);
else if (strcmp(argv[i], "metric") == 0)
- metricgroup__print(true, false, NULL, raw_dump);
+ metricgroup__print(true, false, NULL, raw_dump, details_flag);
else if (strcmp(argv[i], "metricgroup") == 0)
- metricgroup__print(false, true, NULL, raw_dump);
+ metricgroup__print(false, true, NULL, raw_dump, details_flag);
else if ((sep = strchr(argv[i], ':')) != NULL) {
int sep_idx;
@@ -102,7 +102,7 @@ int cmd_list(int argc, const char **argv)
s[sep_idx] = '\0';
print_tracepoint_events(s, s + sep_idx + 1, raw_dump);
print_sdt_events(s, s + sep_idx + 1, raw_dump);
- metricgroup__print(true, true, s, raw_dump);
+ metricgroup__print(true, true, s, raw_dump, details_flag);
free(s);
} else {
if (asprintf(&s, "*%s*", argv[i]) < 0) {
@@ -119,7 +119,7 @@ int cmd_list(int argc, const char **argv)
details_flag);
print_tracepoint_events(NULL, s, raw_dump);
print_sdt_events(NULL, s, raw_dump);
- metricgroup__print(true, true, NULL, raw_dump);
+ metricgroup__print(true, true, s, raw_dump, details_flag);
free(s);
}
}
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 6e0189df2b3b..b9810a8d350a 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -866,11 +866,9 @@ static int __cmd_report(bool display_info)
.ordered_events = true,
};
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = force,
};
session = perf_session__new(&data, false, &eops);
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 57393e94d156..f45c8b502f63 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -13,6 +13,7 @@
#include "util/data.h"
#include "util/mem-events.h"
#include "util/debug.h"
+#include "util/map.h"
#include "util/symbol.h"
#define MEM_OPERATION_LOAD 0x1
@@ -238,11 +239,9 @@ static int process_sample_event(struct perf_tool *tool,
static int report_raw_events(struct perf_mem *mem)
{
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = mem->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = mem->force,
};
int ret;
struct perf_session *session = perf_session__new(&data, false,
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 99de91698de1..46d3c2deeb40 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -32,6 +32,7 @@
#include "perf.h"
#include "builtin.h"
+#include "namespaces.h"
#include "util/util.h"
#include "util/strlist.h"
#include "util/strfilter.h"
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 882285fb9f64..4e2d953d4bc5 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -23,7 +23,6 @@
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
-#include "util/drv_configs.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/symbol.h"
@@ -39,8 +38,10 @@
#include "util/bpf-loader.h"
#include "util/trigger.h"
#include "util/perf-hooks.h"
+#include "util/cpu-set-sched.h"
#include "util/time-utils.h"
#include "util/units.h"
+#include "util/bpf-event.h"
#include "asm/bug.h"
#include <errno.h>
@@ -61,6 +62,9 @@ struct switch_output {
unsigned long time;
const char *str;
bool set;
+ char **filenames;
+ int num_files;
+ int cur_file;
};
struct record {
@@ -81,12 +85,17 @@ struct record {
bool timestamp_boundary;
struct switch_output switch_output;
unsigned long long samples;
+ cpu_set_t affinity_mask;
};
static volatile int auxtrace_record__snapshot_started;
static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
static DEFINE_TRIGGER(switch_output_trigger);
+static const char *affinity_tags[PERF_AFFINITY_MAX] = {
+ "SYS", "NODE", "CPU"
+};
+
static bool switch_output_signal(struct record *rec)
{
return rec->switch_output.signal &&
@@ -386,7 +395,7 @@ static int record__process_auxtrace(struct perf_tool *tool,
size_t padding;
u8 pad[8] = {0};
- if (!perf_data__is_pipe(data)) {
+ if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
off_t file_offset;
int fd = perf_data__fd(data);
int err;
@@ -531,9 +540,13 @@ static int record__mmap_evlist(struct record *rec,
struct record_opts *opts = &rec->opts;
char msg[512];
+ if (opts->affinity != PERF_AFFINITY_SYS)
+ cpu__setup_cpunode_map();
+
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
- opts->auxtrace_snapshot_mode, opts->nr_cblocks) < 0) {
+ opts->auxtrace_snapshot_mode,
+ opts->nr_cblocks, opts->affinity) < 0) {
if (errno == EPERM) {
pr_err("Permission error mapping pages.\n"
"Consider increasing "
@@ -566,7 +579,6 @@ static int record__open(struct record *rec)
struct perf_evlist *evlist = rec->evlist;
struct perf_session *session = rec->session;
struct record_opts *opts = &rec->opts;
- struct perf_evsel_config_term *err_term;
int rc = 0;
/*
@@ -619,14 +631,6 @@ try_again:
goto out;
}
- if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
- pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
- err_term->val.drv_cfg, perf_evsel__name(pos), errno,
- str_error_r(errno, msg, sizeof(msg)));
- rc = -1;
- goto out;
- }
-
rc = record__mmap(rec);
if (rc)
goto out;
@@ -659,10 +663,9 @@ static int process_sample_event(struct perf_tool *tool,
static int process_buildids(struct record *rec)
{
- struct perf_data *data = &rec->data;
struct perf_session *session = rec->session;
- if (data->size == 0)
+ if (perf_data__size(&rec->data) == 0)
return 0;
/*
@@ -722,6 +725,16 @@ static struct perf_event_header finished_round_event = {
.type = PERF_RECORD_FINISHED_ROUND,
};
+static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
+{
+ if (rec->opts.affinity != PERF_AFFINITY_SYS &&
+ !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
+ CPU_ZERO(&rec->affinity_mask);
+ CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
+ sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
+ }
+}
+
static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
bool overwrite)
{
@@ -749,6 +762,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
struct perf_mmap *map = &maps[i];
if (map->base) {
+ record__adjust_affinity(rec, map);
if (!record__aio_enabled(rec)) {
if (perf_mmap__push(map, rec, record__pushfn) != 0) {
rc = -1;
@@ -826,6 +840,8 @@ static void record__init_features(struct record *rec)
if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
perf_header__clear_feat(&session->header, HEADER_CLOCKID);
+ perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
+
perf_header__clear_feat(&session->header, HEADER_STAT);
}
@@ -839,7 +855,7 @@ record__finish_output(struct record *rec)
return;
rec->session->header.data_size += rec->bytes_written;
- data->size = lseek(perf_data__fd(data), 0, SEEK_CUR);
+ data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
if (!rec->no_buildid) {
process_buildids(rec);
@@ -879,6 +895,7 @@ record__switch_output(struct record *rec, bool at_exit)
{
struct perf_data *data = &rec->data;
int fd, err;
+ char *new_filename;
/* Same Size: "2015122520103046"*/
char timestamp[] = "InvalidTimestamp";
@@ -899,7 +916,7 @@ record__switch_output(struct record *rec, bool at_exit)
fd = perf_data__switch(data, timestamp,
rec->session->header.data_offset,
- at_exit);
+ at_exit, &new_filename);
if (fd >= 0 && !at_exit) {
rec->bytes_written = 0;
rec->session->header.data_size = 0;
@@ -907,7 +924,22 @@ record__switch_output(struct record *rec, bool at_exit)
if (!quiet)
fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
- data->file.path, timestamp);
+ data->path, timestamp);
+
+ if (rec->switch_output.num_files) {
+ int n = rec->switch_output.cur_file + 1;
+
+ if (n >= rec->switch_output.num_files)
+ n = 0;
+ rec->switch_output.cur_file = n;
+ if (rec->switch_output.filenames[n]) {
+ remove(rec->switch_output.filenames[n]);
+ free(rec->switch_output.filenames[n]);
+ }
+ rec->switch_output.filenames[n] = new_filename;
+ } else {
+ free(new_filename);
+ }
/* Output tracking events */
if (!at_exit) {
@@ -1082,6 +1114,11 @@ static int record__synthesize(struct record *rec, bool tail)
return err;
}
+ err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
+ machine, opts);
+ if (err < 0)
+ pr_warning("Couldn't synthesize bpf events.\n");
+
err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
process_synthesized_event, opts->sample_address,
1);
@@ -1100,6 +1137,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
struct perf_data *data = &rec->data;
struct perf_session *session;
bool disabled = false, draining = false;
+ struct perf_evlist *sb_evlist = NULL;
int fd;
atexit(record__sig_exit);
@@ -1200,6 +1238,14 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
goto out_child;
}
+ if (!opts->no_bpf_event)
+ bpf_event__add_sb_event(&sb_evlist, &session->header.env);
+
+ if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
+ pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
+ opts->no_bpf_event = true;
+ }
+
err = record__synthesize(rec, false);
if (err < 0)
goto out_child;
@@ -1445,11 +1491,14 @@ out_child:
fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
perf_data__size(data) / 1024.0 / 1024.0,
- data->file.path, postfix, samples);
+ data->path, postfix, samples);
}
out_delete_session:
perf_session__delete(session);
+
+ if (!opts->no_bpf_event)
+ perf_evlist__stop_sb_thread(sb_evlist);
return status;
}
@@ -1639,6 +1688,21 @@ static int parse_clockid(const struct option *opt, const char *str, int unset)
return -1;
}
+static int record__parse_affinity(const struct option *opt, const char *str, int unset)
+{
+ struct record_opts *opts = (struct record_opts *)opt->value;
+
+ if (unset || !str)
+ return 0;
+
+ if (!strcasecmp(str, "node"))
+ opts->affinity = PERF_AFFINITY_NODE;
+ else if (!strcasecmp(str, "cpu"))
+ opts->affinity = PERF_AFFINITY_CPU;
+
+ return 0;
+}
+
static int record__parse_mmap_pages(const struct option *opt,
const char *str,
int unset __maybe_unused)
@@ -1831,7 +1895,7 @@ static struct option __record_options[] = {
OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
"list of cpus to monitor"),
OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
- OPT_STRING('o', "output", &record.data.file.path, "file",
+ OPT_STRING('o', "output", &record.data.path, "file",
"output file name"),
OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
&record.opts.no_inherit_set,
@@ -1839,6 +1903,7 @@ static struct option __record_options[] = {
OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
"synthesize non-sample events at the end of output"),
OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
+ OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
"Fail if the specified frequency can't be used"),
OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
@@ -1936,9 +2001,11 @@ static struct option __record_options[] = {
OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
"Record timestamp boundary (time of first/last samples)"),
OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
- &record.switch_output.set, "signal,size,time",
- "Switch output when receive SIGUSR2 or cross size,time threshold",
+ &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
+ "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
"signal"),
+ OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
+ "Limit number of switch output generated files"),
OPT_BOOLEAN(0, "dry-run", &dry_run,
"Parse options then exit"),
#ifdef HAVE_AIO_SUPPORT
@@ -1946,6 +2013,9 @@ static struct option __record_options[] = {
&nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
record__aio_parse),
#endif
+ OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
+ "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
+ record__parse_affinity),
OPT_END()
};
@@ -1980,6 +2050,9 @@ int cmd_record(int argc, const char **argv)
# undef REASON
#endif
+ CPU_ZERO(&rec->affinity_mask);
+ rec->opts.affinity = PERF_AFFINITY_SYS;
+
rec->evlist = perf_evlist__new();
if (rec->evlist == NULL)
return -ENOMEM;
@@ -2019,6 +2092,13 @@ int cmd_record(int argc, const char **argv)
alarm(rec->switch_output.time);
}
+ if (rec->switch_output.num_files) {
+ rec->switch_output.filenames = calloc(sizeof(char *),
+ rec->switch_output.num_files);
+ if (!rec->switch_output.filenames)
+ return -EINVAL;
+ }
+
/*
* Allow aliases to facilitate the lookup of symbols for address
* filters. Refer to auxtrace_parse_filters().
@@ -2143,6 +2223,8 @@ int cmd_record(int argc, const char **argv)
if (verbose > 0)
pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
+ pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
+
err = __cmd_record(&record, argc, argv);
out:
perf_evlist__delete(rec->evlist);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 4958095be4fc..4054eb1f98ac 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -16,6 +16,7 @@
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/err.h>
+#include "util/map.h"
#include "util/symbol.h"
#include "util/callchain.h"
#include "util/values.h"
@@ -46,9 +47,11 @@
#include <errno.h>
#include <inttypes.h>
#include <regex.h>
+#include "sane_ctype.h"
#include <signal.h>
#include <linux/bitmap.h>
#include <linux/stringify.h>
+#include <linux/time64.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
@@ -615,6 +618,21 @@ static int report__collapse_hists(struct report *rep)
return ret;
}
+static int hists__resort_cb(struct hist_entry *he, void *arg)
+{
+ struct report *rep = arg;
+ struct symbol *sym = he->ms.sym;
+
+ if (rep->symbol_ipc && sym && !sym->annotate2) {
+ struct perf_evsel *evsel = hists_to_evsel(he->hists);
+
+ symbol__annotate2(sym, he->ms.map, evsel,
+ &annotation__default_options, NULL);
+ }
+
+ return 0;
+}
+
static void report__output_resort(struct report *rep)
{
struct ui_progress prog;
@@ -622,8 +640,10 @@ static void report__output_resort(struct report *rep)
ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
- evlist__for_each_entry(rep->session->evlist, pos)
- perf_evsel__output_resort(pos, &prog);
+ evlist__for_each_entry(rep->session->evlist, pos) {
+ perf_evsel__output_resort_cb(pos, &prog,
+ hists__resort_cb, rep);
+ }
ui_progress__finish();
}
@@ -753,7 +773,8 @@ static int tasks_print(struct report *rep, FILE *fp)
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
- for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&threads->entries); nd;
+ nd = rb_next(nd)) {
task = tasks + itask++;
task->thread = rb_entry(nd, struct thread, rb_node);
@@ -880,7 +901,7 @@ static int __cmd_report(struct report *rep)
rep->nr_entries += evsel__hists(pos)->nr_entries;
if (rep->nr_entries == 0) {
- ui__error("The %s file has no samples!\n", data->file.path);
+ ui__error("The %s data has no samples!\n", data->path);
return 0;
}
@@ -907,6 +928,43 @@ report_parse_callchain_opt(const struct option *opt, const char *arg, int unset)
return parse_callchain_report_opt(arg);
}
+static int
+parse_time_quantum(const struct option *opt, const char *arg,
+ int unset __maybe_unused)
+{
+ unsigned long *time_q = opt->value;
+ char *end;
+
+ *time_q = strtoul(arg, &end, 0);
+ if (end == arg)
+ goto parse_err;
+ if (*time_q == 0) {
+ pr_err("time quantum cannot be 0");
+ return -1;
+ }
+ while (isspace(*end))
+ end++;
+ if (*end == 0)
+ return 0;
+ if (!strcmp(end, "s")) {
+ *time_q *= NSEC_PER_SEC;
+ return 0;
+ }
+ if (!strcmp(end, "ms")) {
+ *time_q *= NSEC_PER_MSEC;
+ return 0;
+ }
+ if (!strcmp(end, "us")) {
+ *time_q *= NSEC_PER_USEC;
+ return 0;
+ }
+ if (!strcmp(end, "ns"))
+ return 0;
+parse_err:
+ pr_err("Cannot parse time quantum `%s'\n", arg);
+ return -1;
+}
+
int
report_parse_ignore_callees_opt(const struct option *opt __maybe_unused,
const char *arg, int unset __maybe_unused)
@@ -956,9 +1014,9 @@ int cmd_report(int argc, const char **argv)
int branch_mode = -1;
bool branch_call_mode = false;
#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
- const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
- CALLCHAIN_REPORT_HELP
- "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
+ static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
+ CALLCHAIN_REPORT_HELP
+ "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
const char * const report_usage[] = {
"perf report [<options>]",
@@ -1025,10 +1083,9 @@ int cmd_report(int argc, const char **argv)
OPT_BOOLEAN(0, "header-only", &report.header_only,
"Show only data header."),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
- "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
- " Please refer the man page for the complete list."),
+ sort_help("sort by key(s):")),
OPT_STRING('F', "fields", &field_order, "key[,keys...]",
- "output field(s): overhead, period, sample plus all of sort keys"),
+ sort_help("output field(s): overhead period sample ")),
OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
"Show sample percentage for different cpu modes"),
OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
@@ -1101,6 +1158,8 @@ int cmd_report(int argc, const char **argv)
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
+ OPT_INTEGER(0, "samples", &symbol_conf.res_sample,
+ "Number of samples to save per histogram entry for individual browsing"),
OPT_CALLBACK(0, "percent-limit", &report, "percent",
"Don't show entries under that percent", parse_percent_limit),
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
@@ -1128,6 +1187,10 @@ int cmd_report(int argc, const char **argv)
OPT_CALLBACK(0, "percent-type", &report.annotation_opts, "local-period",
"Set percent type local/global-period/hits",
annotate_parse_percent_type),
+ OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"),
+ OPT_CALLBACK(0, "time-quantum", &symbol_conf.time_quantum, "time (ms|us|ns|s)",
+ "Set time quantum for time sort key (default 100ms)",
+ parse_time_quantum),
OPT_END()
};
struct perf_data data = {
@@ -1188,8 +1251,8 @@ int cmd_report(int argc, const char **argv)
input_name = "perf.data";
}
- data.file.path = input_name;
- data.force = symbol_conf.force;
+ data.path = input_name;
+ data.force = symbol_conf.force;
repeat:
session = perf_session__new(&data, false, &report.tool);
@@ -1356,36 +1419,13 @@ repeat:
if (symbol__init(&session->header.env) < 0)
goto error;
- report.ptime_range = perf_time__range_alloc(report.time_str,
- &report.range_size);
- if (!report.ptime_range) {
- ret = -ENOMEM;
- goto error;
- }
-
- if (perf_time__parse_str(report.ptime_range, report.time_str) != 0) {
- if (session->evlist->first_sample_time == 0 &&
- session->evlist->last_sample_time == 0) {
- pr_err("HINT: no first/last sample time found in perf data.\n"
- "Please use latest perf binary to execute 'perf record'\n"
- "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
- ret = -EINVAL;
- goto error;
- }
-
- report.range_num = perf_time__percent_parse_str(
- report.ptime_range, report.range_size,
- report.time_str,
- session->evlist->first_sample_time,
- session->evlist->last_sample_time);
-
- if (report.range_num < 0) {
- pr_err("Invalid time string\n");
- ret = -EINVAL;
+ if (report.time_str) {
+ ret = perf_time__parse_for_ranges(report.time_str, session,
+ &report.ptime_range,
+ &report.range_size,
+ &report.range_num);
+ if (ret < 0)
goto error;
- }
- } else {
- report.range_num = 1;
}
if (session->tevent.pevent &&
@@ -1407,7 +1447,8 @@ repeat:
ret = 0;
error:
- zfree(&report.ptime_range);
+ if (report.ptime_range)
+ zfree(&report.ptime_range);
perf_session__delete(session);
return ret;
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index cbf39dab19c1..275f2d92a7bf 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -213,7 +213,7 @@ struct perf_sched {
u64 all_runtime;
u64 all_count;
u64 cpu_last_switched[MAX_CPUS];
- struct rb_root atom_root, sorted_atom_root, merged_atom_root;
+ struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
struct list_head sort_list, cmp_pid;
bool force;
bool skip_merge;
@@ -271,7 +271,7 @@ struct evsel_runtime {
struct idle_thread_runtime {
struct thread_runtime tr;
struct thread *last_thread;
- struct rb_root sorted_root;
+ struct rb_root_cached sorted_root;
struct callchain_root callchain;
struct callchain_cursor cursor;
};
@@ -950,10 +950,10 @@ thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *
}
static struct work_atoms *
-thread_atoms_search(struct rb_root *root, struct thread *thread,
+thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
struct list_head *sort_list)
{
- struct rb_node *node = root->rb_node;
+ struct rb_node *node = root->rb_root.rb_node;
struct work_atoms key = { .thread = thread };
while (node) {
@@ -976,10 +976,11 @@ thread_atoms_search(struct rb_root *root, struct thread *thread,
}
static void
-__thread_latency_insert(struct rb_root *root, struct work_atoms *data,
+__thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
struct list_head *sort_list)
{
- struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
+ bool leftmost = true;
while (*new) {
struct work_atoms *this;
@@ -992,12 +993,14 @@ __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
if (cmp > 0)
new = &((*new)->rb_left);
- else
+ else {
new = &((*new)->rb_right);
+ leftmost = false;
+ }
}
rb_link_node(&data->node, parent, new);
- rb_insert_color(&data->node, root);
+ rb_insert_color_cached(&data->node, root, leftmost);
}
static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
@@ -1447,15 +1450,15 @@ static int sort_dimension__add(const char *tok, struct list_head *list)
static void perf_sched__sort_lat(struct perf_sched *sched)
{
struct rb_node *node;
- struct rb_root *root = &sched->atom_root;
+ struct rb_root_cached *root = &sched->atom_root;
again:
for (;;) {
struct work_atoms *data;
- node = rb_first(root);
+ node = rb_first_cached(root);
if (!node)
break;
- rb_erase(node, root);
+ rb_erase_cached(node, root);
data = rb_entry(node, struct work_atoms, node);
__thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
}
@@ -1782,11 +1785,9 @@ static int perf_sched__read_events(struct perf_sched *sched)
};
struct perf_session *session;
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = sched->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = sched->force,
};
int rc = -1;
@@ -2762,12 +2763,12 @@ static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
return ret;
}
-static size_t timehist_print_idlehist_callchain(struct rb_root *root)
+static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
{
size_t ret = 0;
FILE *fp = stdout;
struct callchain_node *chain;
- struct rb_node *rb_node = rb_first(root);
+ struct rb_node *rb_node = rb_first_cached(root);
printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
@@ -2868,7 +2869,7 @@ static void timehist_print_summary(struct perf_sched *sched,
if (itr == NULL)
continue;
- callchain_param.sort(&itr->sorted_root, &itr->callchain,
+ callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
0, &callchain_param);
printf(" CPU %2d:", i);
@@ -2955,11 +2956,9 @@ static int perf_sched__timehist(struct perf_sched *sched)
{ "sched:sched_migrate_task", timehist_migrate_task_event, },
};
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = sched->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = sched->force,
};
struct perf_session *session;
@@ -3074,11 +3073,12 @@ static void print_bad_events(struct perf_sched *sched)
}
}
-static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
+static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
{
- struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
struct work_atoms *this;
const char *comm = thread__comm_str(data->thread), *this_comm;
+ bool leftmost = true;
while (*new) {
int cmp;
@@ -3092,6 +3092,7 @@ static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
new = &((*new)->rb_left);
} else if (cmp < 0) {
new = &((*new)->rb_right);
+ leftmost = false;
} else {
this->num_merged++;
this->total_runtime += data->total_runtime;
@@ -3109,7 +3110,7 @@ static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
data->num_merged++;
rb_link_node(&data->node, parent, new);
- rb_insert_color(&data->node, root);
+ rb_insert_color_cached(&data->node, root, leftmost);
}
static void perf_sched__merge_lat(struct perf_sched *sched)
@@ -3120,8 +3121,8 @@ static void perf_sched__merge_lat(struct perf_sched *sched)
if (sched->skip_merge)
return;
- while ((node = rb_first(&sched->atom_root))) {
- rb_erase(node, &sched->atom_root);
+ while ((node = rb_first_cached(&sched->atom_root))) {
+ rb_erase_cached(node, &sched->atom_root);
data = rb_entry(node, struct work_atoms, node);
__merge_work_atoms(&sched->merged_atom_root, data);
}
@@ -3143,7 +3144,7 @@ static int perf_sched__lat(struct perf_sched *sched)
printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
printf(" -----------------------------------------------------------------------------------------------------------------\n");
- next = rb_first(&sched->sorted_atom_root);
+ next = rb_first_cached(&sched->sorted_atom_root);
while (next) {
struct work_atoms *work_list;
@@ -3336,7 +3337,7 @@ static int __cmd_record(int argc, const char **argv)
int cmd_sched(int argc, const char **argv)
{
- const char default_sort_order[] = "avg, max, switch, runtime";
+ static const char default_sort_order[] = "avg, max, switch, runtime";
struct perf_sched sched = {
.tool = {
.sample = perf_sched__process_tracepoint_sample,
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index d079f36d342d..61cfd8f70989 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -10,6 +10,7 @@
#include "util/perf_regs.h"
#include "util/session.h"
#include "util/tool.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/trace-event.h"
@@ -28,10 +29,12 @@
#include "util/time-utils.h"
#include "util/path.h"
#include "print_binary.h"
+#include "archinsn.h"
#include <linux/bitmap.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/time64.h>
+#include <sys/utsname.h>
#include "asm/bug.h"
#include "util/mem-events.h"
#include "util/dump-insn.h"
@@ -50,6 +53,8 @@
static char const *script_name;
static char const *generate_script_lang;
+static bool reltime;
+static u64 initial_time;
static bool debug_mode;
static u64 last_timestamp;
static u64 nr_unordered;
@@ -57,11 +62,11 @@ static bool no_callchain;
static bool latency_format;
static bool system_wide;
static bool print_flags;
-static bool nanosecs;
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
static struct perf_stat_config stat_config;
static int max_blocks;
+static bool native_arch;
unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
@@ -148,6 +153,7 @@ static struct {
unsigned int print_ip_opts;
u64 fields;
u64 invalid_fields;
+ u64 user_set_fields;
} output[OUTPUT_TYPE_MAX] = {
[PERF_TYPE_HARDWARE] = {
@@ -344,7 +350,7 @@ static int perf_evsel__do_check_stype(struct perf_evsel *evsel,
if (attr->sample_type & sample_type)
return 0;
- if (output[type].user_set) {
+ if (output[type].user_set_fields & field) {
if (allow_user_set)
return 0;
evname = perf_evsel__name(evsel);
@@ -682,15 +688,21 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
}
if (PRINT_FIELD(TIME)) {
- nsecs = sample->time;
+ u64 t = sample->time;
+ if (reltime) {
+ if (!initial_time)
+ initial_time = sample->time;
+ t = sample->time - initial_time;
+ }
+ nsecs = t;
secs = nsecs / NSEC_PER_SEC;
nsecs -= secs * NSEC_PER_SEC;
- if (nanosecs)
+ if (symbol_conf.nanosecs)
printed += fprintf(fp, "%5lu.%09llu: ", secs, nsecs);
else {
char sample_time[32];
- timestamp__scnprintf_usec(sample->time, sample_time, sizeof(sample_time));
+ timestamp__scnprintf_usec(t, sample_time, sizeof(sample_time));
printed += fprintf(fp, "%12s: ", sample_time);
}
}
@@ -1225,6 +1237,12 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample,
return len + dlen;
}
+__weak void arch_fetch_insn(struct perf_sample *sample __maybe_unused,
+ struct thread *thread __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+}
+
static int perf_sample__fprintf_insn(struct perf_sample *sample,
struct perf_event_attr *attr,
struct thread *thread,
@@ -1232,9 +1250,12 @@ static int perf_sample__fprintf_insn(struct perf_sample *sample,
{
int printed = 0;
+ if (sample->insn_len == 0 && native_arch)
+ arch_fetch_insn(sample, thread, machine);
+
if (PRINT_FIELD(INSNLEN))
printed += fprintf(fp, " ilen: %d", sample->insn_len);
- if (PRINT_FIELD(INSN)) {
+ if (PRINT_FIELD(INSN) && sample->insn_len) {
int i;
printed += fprintf(fp, " insn:");
@@ -1681,13 +1702,8 @@ static void perf_sample__fprint_metric(struct perf_script *script,
.force_header = false,
};
struct perf_evsel *ev2;
- static bool init;
u64 val;
- if (!init) {
- perf_stat__init_shadow_stats();
- init = true;
- }
if (!evsel->stats)
perf_evlist__alloc_stats(script->session->evlist, false);
if (evsel_script(evsel->leader)->gnum++ == 0)
@@ -1794,7 +1810,7 @@ static void process_event(struct perf_script *script,
return;
}
- if (PRINT_FIELD(TRACE)) {
+ if (PRINT_FIELD(TRACE) && sample->raw_data) {
event_format__fprintf(evsel->tp_format, sample->cpu,
sample->raw_data, sample->raw_size, fp);
}
@@ -1925,6 +1941,13 @@ static int cleanup_scripting(void)
return scripting_ops ? scripting_ops->stop_script() : 0;
}
+static bool filter_cpu(struct perf_sample *sample)
+{
+ if (cpu_list)
+ return !test_bit(sample->cpu, cpu_bitmap);
+ return false;
+}
+
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -1959,7 +1982,7 @@ static int process_sample_event(struct perf_tool *tool,
if (al.filtered)
goto out_put;
- if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
+ if (filter_cpu(sample))
goto out_put;
if (scripting_ops)
@@ -2044,9 +2067,11 @@ static int process_comm_event(struct perf_tool *tool,
sample->tid = event->comm.tid;
sample->pid = event->comm.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel,
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
PERF_RECORD_COMM, stdout);
- perf_event__fprintf(event, stdout);
+ perf_event__fprintf(event, stdout);
+ }
ret = 0;
out:
thread__put(thread);
@@ -2080,9 +2105,11 @@ static int process_namespaces_event(struct perf_tool *tool,
sample->tid = event->namespaces.tid;
sample->pid = event->namespaces.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_NAMESPACES, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_NAMESPACES, stdout);
+ perf_event__fprintf(event, stdout);
+ }
ret = 0;
out:
thread__put(thread);
@@ -2114,9 +2141,11 @@ static int process_fork_event(struct perf_tool *tool,
sample->tid = event->fork.tid;
sample->pid = event->fork.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_FORK, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_FORK, stdout);
+ perf_event__fprintf(event, stdout);
+ }
thread__put(thread);
return 0;
@@ -2144,9 +2173,11 @@ static int process_exit_event(struct perf_tool *tool,
sample->tid = event->fork.tid;
sample->pid = event->fork.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_EXIT, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_EXIT, stdout);
+ perf_event__fprintf(event, stdout);
+ }
if (perf_event__process_exit(tool, event, sample, machine) < 0)
err = -1;
@@ -2180,9 +2211,11 @@ static int process_mmap_event(struct perf_tool *tool,
sample->tid = event->mmap.tid;
sample->pid = event->mmap.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_MMAP, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_MMAP, stdout);
+ perf_event__fprintf(event, stdout);
+ }
thread__put(thread);
return 0;
}
@@ -2212,9 +2245,11 @@ static int process_mmap2_event(struct perf_tool *tool,
sample->tid = event->mmap2.tid;
sample->pid = event->mmap2.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_MMAP2, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_MMAP2, stdout);
+ perf_event__fprintf(event, stdout);
+ }
thread__put(thread);
return 0;
}
@@ -2239,9 +2274,11 @@ static int process_switch_event(struct perf_tool *tool,
return -1;
}
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_SWITCH, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_SWITCH, stdout);
+ perf_event__fprintf(event, stdout);
+ }
thread__put(thread);
return 0;
}
@@ -2262,9 +2299,11 @@ process_lost_event(struct perf_tool *tool,
if (thread == NULL)
return -1;
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_LOST, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_LOST, stdout);
+ perf_event__fprintf(event, stdout);
+ }
thread__put(thread);
return 0;
}
@@ -2359,6 +2398,8 @@ static int __cmd_script(struct perf_script *script)
signal(SIGINT, sig_handler);
+ perf_stat__init_shadow_stats();
+
/* override event processing functions */
if (script->show_task_events) {
script->tool.comm = process_comm_event;
@@ -2562,6 +2603,10 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
pr_warning("Overriding previous field request for %s events.\n",
event_type(type));
+ /* Don't override defaults for +- */
+ if (strchr(tok, '+') || strchr(tok, '-'))
+ goto parse;
+
output[type].fields = 0;
output[type].user_set = true;
output[type].wildcard_set = false;
@@ -2630,10 +2675,13 @@ parse:
pr_warning("\'%s\' not valid for %s events. Ignoring.\n",
all_output_options[i].str, event_type(j));
} else {
- if (change == REMOVE)
+ if (change == REMOVE) {
output[j].fields &= ~all_output_options[i].field;
- else
+ output[j].user_set_fields &= ~all_output_options[i].field;
+ } else {
output[j].fields |= all_output_options[i].field;
+ output[j].user_set_fields |= all_output_options[i].field;
+ }
output[j].user_set = true;
output[j].wildcard_set = true;
}
@@ -2646,6 +2694,10 @@ parse:
rc = -EINVAL;
goto out;
}
+ if (change == REMOVE)
+ output[type].fields &= ~all_output_options[i].field;
+ else
+ output[type].fields |= all_output_options[i].field;
output[type].user_set = true;
output[type].wildcard_set = true;
}
@@ -2938,17 +2990,16 @@ static int check_ev_match(char *dir_name, char *scriptname,
* will list all statically runnable scripts, select one, execute it and
* show the output in a perf browser.
*/
-int find_scripts(char **scripts_array, char **scripts_path_array)
+int find_scripts(char **scripts_array, char **scripts_path_array, int num,
+ int pathlen)
{
struct dirent *script_dirent, *lang_dirent;
char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
DIR *scripts_dir, *lang_dir;
struct perf_session *session;
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
};
char *temp;
int i = 0;
@@ -2985,7 +3036,10 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
/* Skip those real time scripts: xxxtop.p[yl] */
if (strstr(script_dirent->d_name, "top."))
continue;
- sprintf(scripts_path_array[i], "%s/%s", lang_path,
+ if (i >= num)
+ break;
+ snprintf(scripts_path_array[i], pathlen, "%s/%s",
+ lang_path,
script_dirent->d_name);
temp = strchr(script_dirent->d_name, '.');
snprintf(scripts_array[i],
@@ -3224,7 +3278,7 @@ static int parse_insn_trace(const struct option *opt __maybe_unused,
{
parse_output_fields(NULL, "+insn,-event,-period", 0);
itrace_parse_synth_opts(opt, "i0ns", 0);
- nanosecs = true;
+ symbol_conf.nanosecs = true;
return 0;
}
@@ -3242,7 +3296,7 @@ static int parse_call_trace(const struct option *opt __maybe_unused,
{
parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent", 0);
itrace_parse_synth_opts(opt, "cewp", 0);
- nanosecs = true;
+ symbol_conf.nanosecs = true;
return 0;
}
@@ -3252,7 +3306,7 @@ static int parse_callret_trace(const struct option *opt __maybe_unused,
{
parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent,+flags", 0);
itrace_parse_synth_opts(opt, "crewp", 0);
- nanosecs = true;
+ symbol_conf.nanosecs = true;
return 0;
}
@@ -3269,6 +3323,7 @@ int cmd_script(int argc, const char **argv)
.set = false,
.default_no_sample = true,
};
+ struct utsname uts;
char *script_path = NULL;
const char **__argv;
int i, j, err = 0;
@@ -3366,6 +3421,7 @@ int cmd_script(int argc, const char **argv)
"Set the maximum stack depth when parsing the callchain, "
"anything beyond the specified depth will be ignored. "
"Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
+ OPT_BOOLEAN(0, "reltime", &reltime, "Show time stamps relative to start"),
OPT_BOOLEAN('I', "show-info", &show_full_info,
"display extended information from perf.data file"),
OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path,
@@ -3387,7 +3443,7 @@ int cmd_script(int argc, const char **argv)
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
OPT_INTEGER(0, "max-blocks", &max_blocks,
"Maximum number of code blocks to dump with brstackinsn"),
- OPT_BOOLEAN(0, "ns", &nanosecs,
+ OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs,
"Use 9 decimal places when displaying time"),
OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
"Instruction Tracing options\n" ITRACE_HELP,
@@ -3421,8 +3477,8 @@ int cmd_script(int argc, const char **argv)
argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- data.file.path = input_name;
- data.force = symbol_conf.force;
+ data.path = input_name;
+ data.force = symbol_conf.force;
if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) {
rec_script_path = get_script_path(argv[1], RECORD_SUFFIX);
@@ -3440,6 +3496,11 @@ int cmd_script(int argc, const char **argv)
}
}
+ if (script.time_str && reltime) {
+ fprintf(stderr, "Don't combine --reltime with --time\n");
+ return -1;
+ }
+
if (itrace_synth_opts.callchain &&
itrace_synth_opts.callchain_sz > scripting_max_stack)
scripting_max_stack = itrace_synth_opts.callchain_sz;
@@ -3607,6 +3668,12 @@ int cmd_script(int argc, const char **argv)
if (symbol__init(&session->header.env) < 0)
goto out_delete;
+ uname(&uts);
+ if (!strcmp(uts.machine, session->header.env.arch) ||
+ (!strcmp(uts.machine, "x86_64") &&
+ !strcmp(session->header.env.arch, "i386")))
+ native_arch = true;
+
script.session = session;
script__setup_sample_type(&script);
@@ -3648,7 +3715,7 @@ int cmd_script(int argc, const char **argv)
goto out_delete;
}
- input = open(data.file.path, O_RDONLY); /* input_name */
+ input = open(data.path, O_RDONLY); /* input_name */
if (input < 0) {
err = -errno;
perror("failed to open file");
@@ -3691,37 +3758,13 @@ int cmd_script(int argc, const char **argv)
if (err < 0)
goto out_delete;
- script.ptime_range = perf_time__range_alloc(script.time_str,
- &script.range_size);
- if (!script.ptime_range) {
- err = -ENOMEM;
- goto out_delete;
- }
-
- /* needs to be parsed after looking up reference time */
- if (perf_time__parse_str(script.ptime_range, script.time_str) != 0) {
- if (session->evlist->first_sample_time == 0 &&
- session->evlist->last_sample_time == 0) {
- pr_err("HINT: no first/last sample time found in perf data.\n"
- "Please use latest perf binary to execute 'perf record'\n"
- "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
- err = -EINVAL;
- goto out_delete;
- }
-
- script.range_num = perf_time__percent_parse_str(
- script.ptime_range, script.range_size,
- script.time_str,
- session->evlist->first_sample_time,
- session->evlist->last_sample_time);
-
- if (script.range_num < 0) {
- pr_err("Invalid time string\n");
- err = -EINVAL;
+ if (script.time_str) {
+ err = perf_time__parse_for_ranges(script.time_str, session,
+ &script.ptime_range,
+ &script.range_size,
+ &script.range_num);
+ if (err < 0)
goto out_delete;
- }
- } else {
- script.range_num = 1;
}
err = __cmd_script(&script);
@@ -3729,7 +3772,8 @@ int cmd_script(int argc, const char **argv)
flush_scripting();
out_delete:
- zfree(&script.ptime_range);
+ if (script.ptime_range)
+ zfree(&script.ptime_range);
perf_evlist__free_stats(session->evlist);
perf_session__delete(session);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 63a3afc7f32b..49ee3c2033ec 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -52,7 +52,6 @@
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
-#include "util/drv_configs.h"
#include "util/color.h"
#include "util/stat.h"
#include "util/header.h"
@@ -83,7 +82,6 @@
#include <unistd.h>
#include <sys/time.h>
#include <sys/resource.h>
-#include <sys/wait.h>
#include "sane_ctype.h"
@@ -418,7 +416,6 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
int status = 0;
const bool forks = (argc > 0);
bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
- struct perf_evsel_config_term *err_term;
if (interval) {
ts.tv_sec = interval / USEC_PER_MSEC;
@@ -515,13 +512,6 @@ try_again:
return -1;
}
- if (perf_evlist__apply_drv_configs(evsel_list, &counter, &err_term)) {
- pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
- err_term->val.drv_cfg, perf_evsel__name(counter), errno,
- str_error_r(errno, msg, sizeof(msg)));
- return -1;
- }
-
if (STAT_RECORD) {
int err, fd = perf_data__fd(&perf_stat.data);
@@ -728,7 +718,8 @@ static struct option stat_options[] = {
"system-wide collection from all CPUs"),
OPT_BOOLEAN('g', "group", &group,
"put the counters into a counter group"),
- OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
+ OPT_BOOLEAN(0, "scale", &stat_config.scale,
+ "Use --no-scale to disable counter scaling for multiplexing"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_INTEGER('r', "repeat", &stat_config.run_count,
@@ -1332,7 +1323,7 @@ static int __cmd_record(int argc, const char **argv)
PARSE_OPT_STOP_AT_NON_OPTION);
if (output_name)
- data->file.path = output_name;
+ data->path = output_name;
if (stat_config.run_count != 1 || forever) {
pr_err("Cannot use -r option with perf stat record.\n");
@@ -1533,8 +1524,8 @@ static int __cmd_report(int argc, const char **argv)
input_name = "perf.data";
}
- perf_stat.data.file.path = input_name;
- perf_stat.data.mode = PERF_DATA_MODE_READ;
+ perf_stat.data.path = input_name;
+ perf_stat.data.mode = PERF_DATA_MODE_READ;
session = perf_session__new(&perf_stat.data, false, &perf_stat.tool);
if (session == NULL)
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 775b99833e51..9b98687a27b9 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -1602,11 +1602,9 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name)
{ "syscalls:sys_exit_select", process_exit_poll },
};
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = tchart->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = tchart->force,
};
struct perf_session *session = perf_session__new(&data, false,
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index f64e312db787..1999d6533d12 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -22,13 +22,14 @@
#include "perf.h"
#include "util/annotate.h"
+#include "util/bpf-event.h"
#include "util/config.h"
#include "util/color.h"
-#include "util/drv_configs.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/event.h"
#include "util/machine.h"
+#include "util/map.h"
#include "util/session.h"
#include "util/symbol.h"
#include "util/thread.h"
@@ -366,7 +367,7 @@ static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
if (p)
*p = 0;
- next = rb_first(&hists->entries);
+ next = rb_first_cached(&hists->entries);
while (next) {
n = rb_entry(next, struct hist_entry, rb_node);
if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
@@ -1184,37 +1185,35 @@ static void init_process_thread(struct perf_top *top)
static int __cmd_top(struct perf_top *top)
{
- char msg[512];
- struct perf_evsel *pos;
- struct perf_evsel_config_term *err_term;
- struct perf_evlist *evlist = top->evlist;
struct record_opts *opts = &top->record_opts;
pthread_t thread, thread_process;
int ret;
- top->session = perf_session__new(NULL, false, NULL);
- if (top->session == NULL)
- return -1;
-
if (!top->annotation_opts.objdump_path) {
ret = perf_env__lookup_objdump(&top->session->header.env,
&top->annotation_opts.objdump_path);
if (ret)
- goto out_delete;
+ return ret;
}
ret = callchain_param__setup_sample_type(&callchain_param);
if (ret)
- goto out_delete;
+ return ret;
if (perf_session__register_idle_thread(top->session) < 0)
- goto out_delete;
+ return ret;
if (top->nr_threads_synthesize > 1)
perf_set_multithreaded();
init_process_thread(top);
+ ret = perf_event__synthesize_bpf_events(top->session, perf_event__process,
+ &top->session->machines.host,
+ &top->record_opts);
+ if (ret < 0)
+ pr_warning("Couldn't synthesize bpf events.\n");
+
machine__synthesize_threads(&top->session->machines.host, &opts->target,
top->evlist->threads, false,
top->nr_threads_synthesize);
@@ -1224,21 +1223,18 @@ static int __cmd_top(struct perf_top *top)
if (perf_hpp_list.socket) {
ret = perf_env__read_cpu_topology_map(&perf_env);
- if (ret < 0)
- goto out_err_cpu_topo;
+ if (ret < 0) {
+ char errbuf[BUFSIZ];
+ const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
+
+ ui__error("Could not read the CPU topology map: %s\n", err);
+ return ret;
+ }
}
ret = perf_top__start_counters(top);
if (ret)
- goto out_delete;
-
- ret = perf_evlist__apply_drv_configs(evlist, &pos, &err_term);
- if (ret) {
- pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
- err_term->val.drv_cfg, perf_evsel__name(pos), errno,
- str_error_r(errno, msg, sizeof(msg)));
- goto out_delete;
- }
+ return ret;
top->session->evlist = top->evlist;
perf_session__set_id_hdr_size(top->session);
@@ -1257,7 +1253,7 @@ static int __cmd_top(struct perf_top *top)
ret = -1;
if (pthread_create(&thread_process, NULL, process_thread, top)) {
ui__error("Could not create process thread.\n");
- goto out_delete;
+ return ret;
}
if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
@@ -1301,19 +1297,7 @@ out_join:
out_join_thread:
pthread_cond_signal(&top->qe.cond);
pthread_join(thread_process, NULL);
-out_delete:
- perf_session__delete(top->session);
- top->session = NULL;
-
return ret;
-
-out_err_cpu_topo: {
- char errbuf[BUFSIZ];
- const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
-
- ui__error("Could not read the CPU topology map: %s\n", err);
- goto out_delete;
-}
}
static int
@@ -1485,6 +1469,7 @@ int cmd_top(int argc, const char **argv)
"Display raw encoding of assembly instructions (default)"),
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
+ OPT_BOOLEAN(0, "no-bpf-event", &top.record_opts.no_bpf_event, "do not record bpf events"),
OPT_STRING(0, "objdump", &top.annotation_opts.objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style",
@@ -1516,6 +1501,7 @@ int cmd_top(int argc, const char **argv)
"number of thread to run event synthesize"),
OPT_END()
};
+ struct perf_evlist *sb_evlist = NULL;
const char * const top_usage[] = {
"perf top [<options>]",
NULL
@@ -1633,8 +1619,9 @@ int cmd_top(int argc, const char **argv)
annotation_config__init();
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
- if (symbol__init(NULL) < 0)
- return -1;
+ status = symbol__init(NULL);
+ if (status < 0)
+ goto out_delete_evlist;
sort__setup_elide(stdout);
@@ -1644,10 +1631,28 @@ int cmd_top(int argc, const char **argv)
signal(SIGWINCH, winch_sig);
}
+ top.session = perf_session__new(NULL, false, NULL);
+ if (top.session == NULL) {
+ status = -1;
+ goto out_delete_evlist;
+ }
+
+ if (!top.record_opts.no_bpf_event)
+ bpf_event__add_sb_event(&sb_evlist, &perf_env);
+
+ if (perf_evlist__start_sb_thread(sb_evlist, target)) {
+ pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
+ opts->no_bpf_event = true;
+ }
+
status = __cmd_top(&top);
+ if (!opts->no_bpf_event)
+ perf_evlist__stop_sb_thread(sb_evlist);
+
out_delete_evlist:
perf_evlist__delete(top.evlist);
+ perf_session__delete(top.session);
return status;
}
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index ed4583128b9c..f5b3a1e9c1dd 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -19,6 +19,7 @@
#include <traceevent/event-parse.h>
#include <api/fs/tracing_path.h>
#include <bpf/bpf.h>
+#include "util/bpf_map.h"
#include "builtin.h"
#include "util/cgroup.h"
#include "util/color.h"
@@ -29,6 +30,8 @@
#include "util/evlist.h"
#include <subcmd/exec-cmd.h>
#include "util/machine.h"
+#include "util/map.h"
+#include "util/symbol.h"
#include "util/path.h"
#include "util/session.h"
#include "util/thread.h"
@@ -85,6 +88,9 @@ struct trace {
*augmented;
} events;
} syscalls;
+ struct {
+ struct bpf_map *map;
+ } dump;
struct record_opts opts;
struct perf_evlist *evlist;
struct machine *host;
@@ -1039,6 +1045,9 @@ static const size_t trace__entry_str_size = 2048;
static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
{
+ if (fd < 0)
+ return NULL;
+
if (fd > ttrace->files.max) {
struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
@@ -2514,19 +2523,30 @@ static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
+ bool found = false;
+ struct perf_evsel *evsel, *tmp;
+ struct parse_events_error err = { .idx = 0, };
+ int ret = parse_events(evlist, "probe:vfs_getname*", &err);
- if (IS_ERR(evsel))
+ if (ret)
return false;
- if (perf_evsel__field(evsel, "pathname") == NULL) {
+ evlist__for_each_entry_safe(evlist, evsel, tmp) {
+ if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
+ continue;
+
+ if (perf_evsel__field(evsel, "pathname")) {
+ evsel->handler = trace__vfs_getname;
+ found = true;
+ continue;
+ }
+
+ list_del_init(&evsel->node);
+ evsel->evlist = NULL;
perf_evsel__delete(evsel);
- return false;
}
- evsel->handler = trace__vfs_getname;
- perf_evlist__add(evlist, evsel);
- return true;
+ return found;
}
static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
@@ -2755,7 +2775,8 @@ static int trace__set_filter_loop_pids(struct trace *trace)
if (parent == NULL)
break;
- if (!strcmp(thread__comm_str(parent), "sshd")) {
+ if (!strcmp(thread__comm_str(parent), "sshd") ||
+ strstarts(thread__comm_str(parent), "gnome-terminal")) {
pids[nr++] = parent->tid;
break;
}
@@ -2980,6 +3001,9 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0)
goto out_error_apply_filters;
+ if (trace->dump.map)
+ bpf_map__fprintf(trace->dump.map, trace->output);
+
err = perf_evlist__mmap(evlist, trace->opts.mmap_pages);
if (err < 0)
goto out_error_mmap;
@@ -3130,11 +3154,9 @@ static int trace__replay(struct trace *trace)
{ "probe:vfs_getname", trace__vfs_getname, },
};
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = trace->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = trace->force,
};
struct perf_session *session;
struct perf_evsel *evsel;
@@ -3669,6 +3691,7 @@ int cmd_trace(int argc, const char **argv)
.max_stack = UINT_MAX,
.max_events = ULONG_MAX,
};
+ const char *map_dump_str = NULL;
const char *output_name = NULL;
const struct option trace_options[] = {
OPT_CALLBACK('e', "event", &trace, "event",
@@ -3701,6 +3724,9 @@ int cmd_trace(int argc, const char **argv)
OPT_CALLBACK(0, "duration", &trace, "float",
"show only events with duration > N.M ms",
trace__set_duration),
+#ifdef HAVE_LIBBPF_SUPPORT
+ OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"),
+#endif
OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_BOOLEAN('T', "time", &trace.full_time,
@@ -3795,6 +3821,14 @@ int cmd_trace(int argc, const char **argv)
err = -1;
+ if (map_dump_str) {
+ trace.dump.map = bpf__find_map_by_name(map_dump_str);
+ if (trace.dump.map == NULL) {
+ pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str);
+ goto out;
+ }
+ }
+
if (trace.trace_pgfaults) {
trace.opts.sample_address = true;
trace.opts.sample_time = true;
@@ -3854,7 +3888,8 @@ int cmd_trace(int argc, const char **argv)
goto init_augmented_syscall_tp;
}
- if (strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_enter") == 0) {
+ if (trace.syscalls.events.augmented->priv == NULL &&
+ strstr(perf_evsel__name(evsel), "syscalls:sys_enter")) {
struct perf_evsel *augmented = trace.syscalls.events.augmented;
if (perf_evsel__init_augmented_syscall_tp(augmented, evsel) ||
perf_evsel__init_augmented_syscall_tp_args(augmented))
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index 05745f3ce912..999fe9170122 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -40,5 +40,6 @@ int cmd_mem(int argc, const char **argv);
int cmd_data(int argc, const char **argv);
int cmd_ftrace(int argc, const char **argv);
-int find_scripts(char **scripts_array, char **scripts_path_array);
+int find_scripts(char **scripts_array, char **scripts_path_array, int num,
+ int pathlen);
#endif
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index a28dca2582aa..0453ba26cdbd 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -222,6 +222,10 @@ The 'exclude_user', 'exclude_kernel' and 'exclude_hv' bits provide a
way to request that counting of events be restricted to times when the
CPU is in user, kernel and/or hypervisor mode.
+Furthermore the 'exclude_host' and 'exclude_guest' bits provide a way
+to request counting of events restricted to guest and host contexts when
+using Linux as the hypervisor.
+
The 'mmap' and 'munmap' bits allow recording of PROT_EXEC mmap/munmap
operations, these can be used to relate userspace IP addresses to actual
code, even after the mapping (or even the whole process) is gone,
diff --git a/tools/perf/examples/bpf/augmented_raw_syscalls.c b/tools/perf/examples/bpf/augmented_raw_syscalls.c
index 53c233370fae..f9b2161e1ca4 100644
--- a/tools/perf/examples/bpf/augmented_raw_syscalls.c
+++ b/tools/perf/examples/bpf/augmented_raw_syscalls.c
@@ -18,23 +18,13 @@
#include <pid_filter.h>
/* bpf-output associated map */
-struct bpf_map SEC("maps") __augmented_syscalls__ = {
- .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(u32),
- .max_entries = __NR_CPUS__,
-};
+bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
struct syscall {
bool enabled;
};
-struct bpf_map SEC("maps") syscalls = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(struct syscall),
- .max_entries = 512,
-};
+bpf_map(syscalls, ARRAY, int, struct syscall, 512);
struct syscall_enter_args {
unsigned long long common_tp_fields;
@@ -141,8 +131,8 @@ int sys_enter(struct syscall_enter_args *args)
len = sizeof(augmented_args.args);
}
- perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
- return 0;
+ /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
+ return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
}
SEC("raw_syscalls:sys_exit")
diff --git a/tools/perf/examples/bpf/augmented_syscalls.c b/tools/perf/examples/bpf/augmented_syscalls.c
index 2ae44813ef2d..524fdb8534b3 100644
--- a/tools/perf/examples/bpf/augmented_syscalls.c
+++ b/tools/perf/examples/bpf/augmented_syscalls.c
@@ -19,12 +19,8 @@
#include <stdio.h>
#include <linux/socket.h>
-struct bpf_map SEC("maps") __augmented_syscalls__ = {
- .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(u32),
- .max_entries = __NR_CPUS__,
-};
+/* bpf-output associated map */
+bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
struct syscall_exit_args {
unsigned long long common_tp_fields;
@@ -55,9 +51,9 @@ int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size; \
len &= sizeof(augmented_args.filename.value) - 1; \
} \
- perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
- &augmented_args, len); \
- return 0; \
+ /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ \
+ return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
+ &augmented_args, len); \
} \
int syscall_exit(syscall)(struct syscall_exit_args *args) \
{ \
@@ -125,10 +121,10 @@ int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
/* addrlen = augmented_args.args.addrlen; */ \
/* */ \
probe_read(&augmented_args.addr, addrlen, args->addr_ptr); \
- perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
- &augmented_args, \
- sizeof(augmented_args) - sizeof(augmented_args.addr) + addrlen); \
- return 0; \
+ /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ \
+ return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
+ &augmented_args, \
+ sizeof(augmented_args) - sizeof(augmented_args.addr) + addrlen);\
} \
int syscall_exit(syscall)(struct syscall_exit_args *args) \
{ \
diff --git a/tools/perf/examples/bpf/etcsnoop.c b/tools/perf/examples/bpf/etcsnoop.c
index b59e8812ee8c..e81b535346c0 100644
--- a/tools/perf/examples/bpf/etcsnoop.c
+++ b/tools/perf/examples/bpf/etcsnoop.c
@@ -21,12 +21,8 @@
#include <stdio.h>
-struct bpf_map SEC("maps") __augmented_syscalls__ = {
- .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(u32),
- .max_entries = __NR_CPUS__,
-};
+/* bpf-output associated map */
+bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
struct augmented_filename {
int size;
@@ -49,11 +45,11 @@ int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
args->filename_ptr); \
if (__builtin_memcmp(augmented_args.filename.value, etc, 4) != 0) \
return 0; \
- perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
- &augmented_args, \
- (sizeof(augmented_args) - sizeof(augmented_args.filename.value) + \
- augmented_args.filename.size)); \
- return 0; \
+ /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ \
+ return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
+ &augmented_args, \
+ (sizeof(augmented_args) - sizeof(augmented_args.filename.value) + \
+ augmented_args.filename.size)); \
}
struct syscall_enter_openat_args {
diff --git a/tools/perf/include/bpf/bpf.h b/tools/perf/include/bpf/bpf.h
index e667577207dc..2eac6d804b2d 100644
--- a/tools/perf/include/bpf/bpf.h
+++ b/tools/perf/include/bpf/bpf.h
@@ -18,6 +18,20 @@ struct bpf_map {
unsigned int numa_node;
};
+#define bpf_map(name, _type, type_key, type_val, _max_entries) \
+struct bpf_map SEC("maps") name = { \
+ .type = BPF_MAP_TYPE_##_type, \
+ .key_size = sizeof(type_key), \
+ .value_size = sizeof(type_val), \
+ .max_entries = _max_entries, \
+}; \
+struct ____btf_map_##name { \
+ type_key key; \
+ type_val value; \
+}; \
+struct ____btf_map_##name __attribute__((section(".maps." #name), used)) \
+ ____btf_map_##name = { }
+
/*
* FIXME: this should receive .max_entries as a parameter, as careful
* tuning of these limits is needed to avoid hitting limits that
@@ -26,13 +40,7 @@ struct bpf_map {
* For the current need, 'perf trace --filter-pids', 64 should
* be good enough, but this surely needs to be revisited.
*/
-#define pid_map(name, value_type) \
-struct bpf_map SEC("maps") name = { \
- .type = BPF_MAP_TYPE_HASH, \
- .key_size = sizeof(pid_t), \
- .value_size = sizeof(value_type), \
- .max_entries = 64, \
-}
+#define pid_map(name, value_type) bpf_map(name, HASH, pid_t, value_type, 64)
static int (*bpf_map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags) = (void *)BPF_FUNC_map_update_elem;
static void *(*bpf_map_lookup_elem)(struct bpf_map *map, void *key) = (void *)BPF_FUNC_map_lookup_elem;
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index a11cb006f968..72df4b6fa36f 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -298,6 +298,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
use_pager = 1;
commit_pager_choice();
+ perf_env__init(&perf_env);
perf_env__set_cmdline(&perf_env, argc, argv);
status = p->fn(argc, argv);
perf_config__exit();
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 388c6dd128b8..c59743def8d3 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -66,6 +66,7 @@ struct record_opts {
bool ignore_missing_thread;
bool strict_freq;
bool sample_id;
+ bool no_bpf_event;
unsigned int freq;
unsigned int mmap_pages;
unsigned int auxtrace_mmap_pages;
@@ -83,6 +84,14 @@ struct record_opts {
clockid_t clockid;
u64 clockid_res_ns;
int nr_cblocks;
+ int affinity;
+};
+
+enum perf_affinity {
+ PERF_AFFINITY_SYS = 0,
+ PERF_AFFINITY_NODE,
+ PERF_AFFINITY_CPU,
+ PERF_AFFINITY_MAX
};
struct option;
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/metrics.json b/tools/perf/pmu-events/arch/powerpc/power8/metrics.json
new file mode 100644
index 000000000000..bffb2d4a6420
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power8/metrics.json
@@ -0,0 +1,2245 @@
+[
+ {
+ "BriefDescription": "% of finished branches that were treated as BC+8",
+ "MetricExpr": "PM_BR_BC_8_CONV / PM_BRU_FIN * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "bc_8_branch_ratio_percent"
+ },
+ {
+ "BriefDescription": "% of finished branches that were pairable but not treated as BC+8",
+ "MetricExpr": "PM_BR_BC_8 / PM_BRU_FIN * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "bc_8_not_converted_branch_ratio_percent"
+ },
+ {
+ "BriefDescription": "Percent of mispredicted branches out of all predicted (correctly and incorrectly) branches that completed",
+ "MetricExpr": "PM_BR_MPRED_CMPL / (PM_BR_PRED_BR0 + PM_BR_PRED_BR1) * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "br_misprediction_percent"
+ },
+ {
+ "BriefDescription": "% of Branch miss predictions per instruction",
+ "MetricExpr": "PM_BR_MPRED_CMPL / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "branch_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Count cache branch misprediction per instruction",
+ "MetricExpr": "PM_BR_MPRED_CCACHE / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ccache_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Percent of count catch mispredictions out of all completed branches that required count cache predictionn",
+ "MetricExpr": "PM_BR_MPRED_CCACHE / (PM_BR_PRED_CCACHE_BR0 + PM_BR_PRED_CCACHE_BR1) * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ccache_misprediction_percent"
+ },
+ {
+ "BriefDescription": "CR MisPredictions per Instruction",
+ "MetricExpr": "PM_BR_MPRED_CR / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "cr_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Link stack branch misprediction",
+ "MetricExpr": "(PM_BR_MPRED_TA - PM_BR_MPRED_CCACHE) / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "lstack_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Percent of link stack mispredictions out of all completed branches that required link stack prediction",
+ "MetricExpr": "(PM_BR_MPRED_TA - PM_BR_MPRED_CCACHE) / (PM_BR_PRED_LSTACK_BR0 + PM_BR_PRED_LSTACK_BR1) * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "lstack_misprediction_percent"
+ },
+ {
+ "BriefDescription": "TA MisPredictions per Instruction",
+ "MetricExpr": "PM_BR_MPRED_TA / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ta_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Percent of target address mispredictions out of all completed branches that required address prediction",
+ "MetricExpr": "PM_BR_MPRED_TA / (PM_BR_PRED_CCACHE_BR0 + PM_BR_PRED_CCACHE_BR1 + PM_BR_PRED_LSTACK_BR0 + PM_BR_PRED_LSTACK_BR1) * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ta_misprediction_percent"
+ },
+ {
+ "BriefDescription": "Percent of branches completed that were taken",
+ "MetricExpr": "PM_BR_TAKEN_CMPL * 100 / PM_BR_CMPL",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "taken_branches_percent"
+ },
+ {
+ "BriefDescription": "Percent of chip+group+sys pumps that were incorrectly predicted",
+ "MetricExpr": "PM_PUMP_MPRED * 100 / (PM_PUMP_CPRED + PM_PUMP_MPRED)",
+ "MetricGroup": "bus_stats",
+ "MetricName": "any_pump_mpred_percent"
+ },
+ {
+ "BriefDescription": "Percent of chip pumps that were correctly predicted as chip pumps the first time",
+ "MetricExpr": "PM_CHIP_PUMP_CPRED * 100 / PM_L2_CHIP_PUMP",
+ "MetricGroup": "bus_stats",
+ "MetricName": "chip_pump_cpred_percent"
+ },
+ {
+ "BriefDescription": "Percent of group pumps that were correctly predicted as group pumps the first time",
+ "MetricExpr": "PM_GRP_PUMP_CPRED * 100 / PM_L2_GROUP_PUMP",
+ "MetricGroup": "bus_stats",
+ "MetricName": "group_pump_cpred_percent"
+ },
+ {
+ "BriefDescription": "Percent of system pumps that were correctly predicted as group pumps the first time",
+ "MetricExpr": "PM_SYS_PUMP_CPRED * 100 / PM_L2_GROUP_PUMP",
+ "MetricGroup": "bus_stats",
+ "MetricName": "sys_pump_cpred_percent"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to CRU or BRU operations",
+ "MetricExpr": "PM_CMPLU_STALL_BRU_CRU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "bru_cru_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to ISU Branch Operations",
+ "MetricExpr": "PM_CMPLU_STALL_BRU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "bru_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which a Group Completed",
+ "MetricExpr": "PM_GRP_CMPL / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "completion_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by CO queue full",
+ "MetricExpr": "PM_CMPLU_STALL_COQ_FULL / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "coq_full_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to CRU Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_BRU_CRU - PM_CMPLU_STALL_BRU) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "cru_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by flushes",
+ "MetricExpr": "PM_CMPLU_STALL_FLUSH / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "flush_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by FXU Multi-Cycle Instructions",
+ "MetricExpr": "PM_CMPLU_STALL_FXLONG / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_multi_cyc_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by FXU",
+ "MetricExpr": "PM_CMPLU_STALL_FXU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Other cycles stalled by FXU",
+ "MetricExpr": "(PM_CMPLU_STALL_FXU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_FXLONG / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_stall_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty due to Branch Mispredicts",
+ "MetricExpr": "PM_GCT_NOSLOT_BR_MPRED / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_br_mpred_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty due to Branch Mispredicts and Icache Misses",
+ "MetricExpr": "PM_GCT_NOSLOT_BR_MPRED_ICMISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_br_mpred_ic_miss_cpi"
+ },
+ {
+ "BriefDescription": "GCT empty cycles",
+ "MetricExpr": "PM_GCT_NOSLOT_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held",
+ "MetricExpr": "(PM_GCT_NOSLOT_DISP_HELD_MAP + PM_GCT_NOSLOT_DISP_HELD_SRQ + PM_GCT_NOSLOT_DISP_HELD_ISSQ + PM_GCT_NOSLOT_DISP_HELD_OTHER) / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held due to issue queue",
+ "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_ISSQ / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_issq_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held due to maps",
+ "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_MAP / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_map_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held due to syncs and other effects",
+ "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_OTHER / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held due to SRQ",
+ "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_SRQ / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_srq_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by GCT empty due to Icache misses",
+ "MetricExpr": "PM_GCT_NOSLOT_IC_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_ic_miss_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by GCT empty due to Icache misses that resolve in the local L2 or L3",
+ "MetricExpr": "(PM_GCT_NOSLOT_IC_MISS - PM_GCT_NOSLOT_IC_L3MISS) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_ic_miss_l2l3_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by GCT empty due to Icache misses that resolve off-chip",
+ "MetricExpr": "PM_GCT_NOSLOT_IC_L3MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_ic_miss_l3miss_cpi"
+ },
+ {
+ "BriefDescription": "Other GCT empty cycles",
+ "MetricExpr": "(PM_GCT_NOSLOT_CYC / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_IC_MISS / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_BR_MPRED / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_BR_MPRED_ICMISS / PM_RUN_INST_CMPL) - ((PM_GCT_NOSLOT_DISP_HELD_MAP / PM_RUN_INST_CMPL) + (PM_GCT_NOSLOT_DISP_HELD_SRQ / PM_RUN_INST_CMPL) + (PM_GCT_NOSLOT_DISP_HELD_ISSQ / PM_RUN_INST_CMPL) + (PM_GCT_NOSLOT_DISP_HELD_OTHER / PM_RUN_INST_CMPL))",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by heavyweight syncs",
+ "MetricExpr": "PM_CMPLU_STALL_HWSYNC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "hwsync_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU",
+ "MetricExpr": "PM_CMPLU_STALL_LSU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses",
+ "MetricExpr": "PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in distant interventions and memory",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L3MISS - PM_CMPLU_STALL_DMISS_LMEM - PM_CMPLU_STALL_DMISS_L21_L31 - PM_CMPLU_STALL_DMISS_REMOTE) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_distant_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in remote or distant caches",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L21_L31 / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l21l31_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in the local L2 or L3, where there was a conflict",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L2L3_CONFLICT / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l2l3_conflict_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in the local L2 or L3",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L2L3 / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l2l3_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in the local L2 or L3, where there was no conflict",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L2L3 - PM_CMPLU_STALL_DMISS_L2L3_CONFLICT) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l2l3_noconflict_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in other core's caches or memory",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L3MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l3miss_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in local memory or local L4",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_LMEM / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_lmem_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in remote interventions and memory",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_REMOTE / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_remote_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by ERAT Translation rejects",
+ "MetricExpr": "PM_CMPLU_STALL_ERAT_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_erat_miss_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU load finishes",
+ "MetricExpr": "PM_CMPLU_STALL_LOAD_FINISH / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_ld_fin_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LHS rejects",
+ "MetricExpr": "PM_CMPLU_STALL_REJECT_LHS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_lhs_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LMQ Full rejects",
+ "MetricExpr": "PM_CMPLU_STALL_REJ_LMQ_FULL / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_lmq_full_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by Other LSU Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_LSU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_REJECT / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_STORE / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_LOAD_FINISH / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_ST_FWD / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU Rejects",
+ "MetricExpr": "PM_CMPLU_STALL_REJECT / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_reject_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by Other LSU Rejects",
+ "MetricExpr": "(PM_CMPLU_STALL_REJECT / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_REJECT_LHS / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_ERAT_MISS / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_REJ_LMQ_FULL / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_reject_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU store forwarding",
+ "MetricExpr": "PM_CMPLU_STALL_ST_FWD / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_st_fwd_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU Stores",
+ "MetricExpr": "PM_CMPLU_STALL_STORE / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_store_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by lightweight syncs",
+ "MetricExpr": "PM_CMPLU_STALL_LWSYNC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lwsync_stall_cpi"
+ },
+ {
+ "MetricExpr": "PM_CMPLU_STALL_MEM_ECC_DELAY / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "mem_ecc_delay_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by nops (nothing next to finish)",
+ "MetricExpr": "PM_CMPLU_STALL_NO_NTF / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "no_ntf_stall_cpi"
+ },
+ {
+ "MetricExpr": "PM_NTCG_ALL_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntcg_all_fin_cpi"
+ },
+ {
+ "MetricExpr": "PM_CMPLU_STALL_NTCG_FLUSH / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntcg_flush_cpi"
+ },
+ {
+ "BriefDescription": "Other thread block stall cycles",
+ "MetricExpr": "(PM_CMPLU_STALL_THRD - PM_CMPLU_STALL_LWSYNC - PM_CMPLU_STALL_HWSYNC - PM_CMPLU_STALL_MEM_ECC_DELAY - PM_CMPLU_STALL_FLUSH - PM_CMPLU_STALL_COQ_FULL) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_block_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles unaccounted for",
+ "MetricExpr": "(PM_RUN_CYC / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_CYC / PM_RUN_INST_CMPL) - (PM_NTCG_ALL_FIN / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_THRD / PM_RUN_INST_CMPL) - (PM_GRP_CMPL / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_cpi"
+ },
+ {
+ "BriefDescription": "Stall cycles unaccounted for",
+ "MetricExpr": "(PM_CMPLU_STALL / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_BRU_CRU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_FXU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_VSU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_LSU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_NTCG_FLUSH / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_NO_NTF / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Run cycles per run instruction",
+ "MetricExpr": "PM_RUN_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "run_cpi"
+ },
+ {
+ "BriefDescription": "Completion Stall Cycles",
+ "MetricExpr": "PM_CMPLU_STALL / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles a thread was blocked",
+ "MetricExpr": "PM_CMPLU_STALL_THRD / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "thread_block_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU",
+ "MetricExpr": "PM_CMPLU_STALL_VSU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by other VSU Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_VSU - PM_CMPLU_STALL_VECTOR - PM_CMPLU_STALL_SCALAR) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU Scalar Operations",
+ "MetricExpr": "PM_CMPLU_STALL_SCALAR / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_scalar_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU Scalar Long Operations",
+ "MetricExpr": "PM_CMPLU_STALL_SCALAR_LONG / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_scalar_long_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by Other VSU Scalar Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_SCALAR / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_SCALAR_LONG / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_scalar_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU Vector Operations",
+ "MetricExpr": "PM_CMPLU_STALL_VECTOR / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_vector_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU Vector Long Operations",
+ "MetricExpr": "PM_CMPLU_STALL_VECTOR_LONG / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_vector_long_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by other VSU Vector Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_VECTOR - PM_CMPLU_STALL_VECTOR_LONG) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_vector_other_cpi"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L4 per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 load hits per instruction where the L2 experienced a Load-Hit-Store conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_lhs_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 load hits per instruction where the L2 did not experience a conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_NO_CONFLICT * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_no_conflict_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 load hits per instruction where the L2 experienced some conflict other than Load-Hit-Store",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_other_conflict_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3 M state, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3 S tate, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L3 load hits per instruction where the load collided with a pending prefetch",
+ "MetricExpr": "PM_DATA_FROM_L3_DISP_CONFLICT * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_conflict_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L3 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L3 load hits per instruction where the L3 did not experience a conflict",
+ "MetricExpr": "PM_DATA_FROM_L3_NO_CONFLICT * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_no_conflict_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from L3 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Local L4 per Inst",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Local Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 demand load misses per run instruction",
+ "MetricExpr": "PM_LD_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "l1_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 misses that result in a cache reload",
+ "MetricExpr": "PM_L1_DCACHE_RELOAD_VALID * 100 / PM_LD_MISS_L1",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_miss_reloads_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L4",
+ "MetricExpr": "PM_DATA_FROM_DL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant Memory",
+ "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L2 with a Load-Hit-Store conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_lhs_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L2 with no conflicts",
+ "MetricExpr": "PM_DATA_FROM_L2_NO_CONFLICT * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_no_conflict_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L2 with some conflict other than Load-Hit-Store",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_other_conflict_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2",
+ "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L3 where the load collided with a pending prefetch",
+ "MetricExpr": "PM_DATA_FROM_L3_DISP_CONFLICT * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_conflict_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L3 load hits per instruction where the line was brought into the L3 by a prefetch operation",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_mepf_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L3 without conflicts",
+ "MetricExpr": "PM_DATA_FROM_L3_NO_CONFLICT * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_no_conflict_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from L3",
+ "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Local L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Local Memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L4",
+ "MetricExpr": "PM_DATA_FROM_RL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote Memory",
+ "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "dL1 miss portion of CPI",
+ "MetricExpr": "( (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)/ (PM_RUN_CYC / PM_RUN_INST_CMPL)) * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dcache_miss_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 distant MOD miss rates with measured DL2L3 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_DL2L3_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DL2L3_MOD_CYC/ PM_MRK_DATA_FROM_DL2L3_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl2l3_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 distant SHR miss rates with measured DL2L3 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_DL2L3_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DL2L3_SHR_CYC/ PM_MRK_DATA_FROM_DL2L3_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl2l3_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of distant L4 miss rates with measured DL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_DL4 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DL4_CYC/ PM_MRK_DATA_FROM_DL4)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of distant memory miss rates with measured DMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_DMEM / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DMEM_CYC/ PM_MRK_DATA_FROM_DMEM)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl21 MOD miss rates with measured L21 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L21_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L21_MOD_CYC/ PM_MRK_DATA_FROM_L21_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l21_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl21 SHR miss rates with measured L21 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L21_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L21_SHR_CYC/ PM_MRK_DATA_FROM_L21_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l21_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2 miss rates with measured L2 latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L2 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L2_CYC/ PM_MRK_DATA_FROM_L2)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL) ) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l2_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl31 MOD miss rates with measured L31 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L31_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L31_MOD_CYC/ PM_MRK_DATA_FROM_L31_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l31_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl31 SHR miss rates with measured L31 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L31_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L31_SHR_CYC/ PM_MRK_DATA_FROM_L31_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l31_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl3 miss rates with measured L3 latency as a % of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L3 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L3_CYC/ PM_MRK_DATA_FROM_L3)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l3_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of Local L4 miss rates with measured LL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_LL4 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_LL4_CYC/ PM_MRK_DATA_FROM_LL4)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "ll4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of Local memory miss rates with measured LMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_LMEM / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_LMEM_CYC/ PM_MRK_DATA_FROM_LMEM)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "lmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 remote MOD miss rates with measured RL2L3 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_RL2L3_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RL2L3_MOD_CYC/ PM_MRK_DATA_FROM_RL2L3_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl2l3_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 shared miss rates with measured RL2L3 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_RL2L3_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RL2L3_SHR_CYC/ PM_MRK_DATA_FROM_RL2L3_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl2l3_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of remote L4 miss rates with measured RL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_RL4 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RL4_CYC/ PM_MRK_DATA_FROM_RL4)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of remote memory miss rates with measured RMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_RMEM / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RMEM_CYC/ PM_MRK_DATA_FROM_RMEM)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "Branch Mispredict flushes per instruction",
+ "MetricExpr": "PM_FLUSH_BR_MPRED / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "general",
+ "MetricName": "br_mpred_flush_rate_percent"
+ },
+ {
+ "BriefDescription": "Cycles per instruction",
+ "MetricExpr": "PM_CYC / PM_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "cpi"
+ },
+ {
+ "BriefDescription": "Percentage Cycles a group completed",
+ "MetricExpr": "PM_GRP_CMPL / PM_CYC * 100",
+ "MetricGroup": "general",
+ "MetricName": "cyc_grp_completed_percent"
+ },
+ {
+ "BriefDescription": "Percentage Cycles a group dispatched",
+ "MetricExpr": "PM_1PLUS_PPC_DISP / PM_CYC * 100",
+ "MetricGroup": "general",
+ "MetricName": "cyc_grp_dispatched_percent"
+ },
+ {
+ "BriefDescription": "Cycles per group",
+ "MetricExpr": "PM_CYC / PM_1PLUS_PPC_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "cyc_per_group"
+ },
+ {
+ "BriefDescription": "GCT empty cycles",
+ "MetricExpr": "(PM_FLUSH_DISP / PM_RUN_INST_CMPL) * 100",
+ "MetricGroup": "general",
+ "MetricName": "disp_flush_rate_percent"
+ },
+ {
+ "BriefDescription": "% DTLB miss rate per inst",
+ "MetricExpr": "PM_DTLB_MISS / PM_RUN_INST_CMPL *100",
+ "MetricGroup": "general",
+ "MetricName": "dtlb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Flush rate (%)",
+ "MetricExpr": "PM_FLUSH * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "flush_rate_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (11 to 14) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_11_14_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_11to14_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (15 to 17) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_15_17_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_15to17_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization 18+ as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_18_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_18plus_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (1 to 2) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_1_2_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_1to2_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (3 to 6) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_3_6_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_3to6_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (7 to 10) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_7_10_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_7to10_slots_percent"
+ },
+ {
+ "BriefDescription": "Avg. group size",
+ "MetricExpr": "PM_INST_CMPL / PM_1PLUS_PPC_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "group_size"
+ },
+ {
+ "BriefDescription": "Instructions per group",
+ "MetricExpr": "PM_INST_CMPL / PM_1PLUS_PPC_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "inst_per_group"
+ },
+ {
+ "BriefDescription": "Instructions per cycles",
+ "MetricExpr": "PM_INST_CMPL / PM_CYC",
+ "MetricGroup": "general",
+ "MetricName": "ipc"
+ },
+ {
+ "BriefDescription": "% ITLB miss rate per inst",
+ "MetricExpr": "PM_ITLB_MISS / PM_RUN_INST_CMPL *100",
+ "MetricGroup": "general",
+ "MetricName": "itlb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 load misses per L1 load ref",
+ "MetricExpr": "PM_LD_MISS_L1 / PM_LD_REF_L1 * 100",
+ "MetricGroup": "general",
+ "MetricName": "l1_ld_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 store misses per run instruction",
+ "MetricExpr": "PM_ST_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l1_st_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 store misses per L1 store ref",
+ "MetricExpr": "PM_ST_MISS_L1 / PM_ST_FIN * 100",
+ "MetricGroup": "general",
+ "MetricName": "l1_st_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "L2 Instruction Miss Rate (per instruction)(%)",
+ "MetricExpr": "PM_INST_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 dmand Load Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 PTEG Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DPTEG_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_pteg_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 store misses per run instruction",
+ "MetricExpr": "PM_ST_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_st_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 Instruction Miss Rate (per instruction)(%)",
+ "MetricExpr": "PM_INST_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 demand Load Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 PTEG Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DPTEG_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_pteg_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Run cycles per cycle",
+ "MetricExpr": "PM_RUN_CYC / PM_CYC*100",
+ "MetricGroup": "general",
+ "MetricName": "run_cycles_percent"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in SMT2 Mode",
+ "MetricExpr": "(PM_RUN_CYC_SMT2_MODE/PM_RUN_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "smt2_cycles_percent"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in SMT4 Mode",
+ "MetricExpr": "(PM_RUN_CYC_SMT4_MODE/PM_RUN_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "smt4_cycles_percent"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in SMT8 Mode",
+ "MetricExpr": "(PM_RUN_CYC_SMT8_MODE/PM_RUN_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "smt8_cycles_percent"
+ },
+ {
+ "BriefDescription": "IPC of all instructions completed by the core while this thread was stalled",
+ "MetricExpr": "PM_CMPLU_STALL_OTHER_CMPL/PM_RUN_CYC",
+ "MetricGroup": "general",
+ "MetricName": "smt_benefit"
+ },
+ {
+ "BriefDescription": "Instruction dispatch-to-completion ratio",
+ "MetricExpr": "PM_INST_DISP / PM_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "speculation"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in Single Thread Mode",
+ "MetricExpr": "(PM_RUN_CYC_ST_MODE/PM_RUN_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "st_cycles_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_INST_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_INST_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L2 per Inst",
+ "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3 other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L3 per Inst",
+ "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_INST_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_INST_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Instruction Cache Miss Rate (Per run Instruction)(%)",
+ "MetricExpr": "PM_L1_ICACHE_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "l1_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% Branches per instruction",
+ "MetricExpr": "PM_BRU_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "branches_per_inst"
+ },
+ {
+ "BriefDescription": "Total Fixed point operations",
+ "MetricExpr": "(PM_FXU0_FIN + PM_FXU1_FIN)/PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fixed_per_inst"
+ },
+ {
+ "BriefDescription": "FXU0 balance",
+ "MetricExpr": "PM_FXU0_FIN / (PM_FXU0_FIN + PM_FXU1_FIN)",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu0_balance"
+ },
+ {
+ "BriefDescription": "Fraction of cycles that FXU0 is in use",
+ "MetricExpr": "PM_FXU0_FIN / PM_RUN_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu0_fin"
+ },
+ {
+ "BriefDescription": "FXU0 only Busy",
+ "MetricExpr": "PM_FXU0_BUSY_FXU1_IDLE / PM_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu0_only_busy"
+ },
+ {
+ "BriefDescription": "Fraction of cycles that FXU1 is in use",
+ "MetricExpr": "PM_FXU1_FIN / PM_RUN_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu1_fin"
+ },
+ {
+ "BriefDescription": "FXU1 only Busy",
+ "MetricExpr": "PM_FXU1_BUSY_FXU0_IDLE / PM_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu1_only_busy"
+ },
+ {
+ "BriefDescription": "Both FXU Busy",
+ "MetricExpr": "PM_FXU_BUSY / PM_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu_both_busy"
+ },
+ {
+ "BriefDescription": "Both FXU Idle",
+ "MetricExpr": "PM_FXU_IDLE / PM_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu_both_idle"
+ },
+ {
+ "BriefDescription": "PCT instruction loads",
+ "MetricExpr": "PM_LD_REF_L1 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "loads_per_inst"
+ },
+ {
+ "BriefDescription": "PCT instruction stores",
+ "MetricExpr": "PM_ST_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "stores_per_inst"
+ },
+ {
+ "BriefDescription": "Icache Fetchs per Icache Miss",
+ "MetricExpr": "(PM_L1_ICACHE_MISS - PM_IC_PREF_WRITE) / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "icache_miss_reload"
+ },
+ {
+ "BriefDescription": "% of ICache reloads due to prefetch",
+ "MetricExpr": "PM_IC_PREF_WRITE * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "icache_pref_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_INST_FROM_DL2L3_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_INST_FROM_DL2L3_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L4",
+ "MetricExpr": "PM_INST_FROM_DL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant Memory",
+ "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core",
+ "MetricExpr": "PM_INST_FROM_L21_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core",
+ "MetricExpr": "PM_INST_FROM_L21_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L2",
+ "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core",
+ "MetricExpr": "PM_INST_FROM_L31_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core",
+ "MetricExpr": "PM_INST_FROM_L31_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L3",
+ "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local L4",
+ "MetricExpr": "PM_INST_FROM_LL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local Memory",
+ "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_INST_FROM_RL2L3_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_INST_FROM_RL2L3_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L4",
+ "MetricExpr": "PM_INST_FROM_RL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote Memory",
+ "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "Average number of stores that gather in the store buffer before being sent to an L2 RC machine",
+ "MetricExpr": "PM_ST_CMPL / (PM_L2_ST / 2)",
+ "MetricGroup": "l2_stats",
+ "MetricName": "avg_stores_gathered"
+ },
+ {
+ "BriefDescription": "L2 Store misses as a % of total L2 Store dispatches (per thread)",
+ "MetricExpr": "PM_L2_ST_MISS / PM_L2_ST * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_st_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 store misses per drained store. A drained store may contain multiple individual stores if they target the same line",
+ "MetricExpr": "PM_L2_ST_MISS / (PM_L2_ST / 2)",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_store_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "average L1 miss latency using marked events",
+ "MetricExpr": "PM_MRK_LD_MISS_L1_CYC / PM_MRK_LD_MISS_L1",
+ "MetricGroup": "latency",
+ "MetricName": "average_dl1miss_latency"
+ },
+ {
+ "BriefDescription": "Average icache miss latency",
+ "MetricExpr": "(PM_IC_DEMAND_CYC / PM_IC_DEMAND_REQ)",
+ "MetricGroup": "latency",
+ "MetricName": "average_il1_miss_latency"
+ },
+ {
+ "BriefDescription": "average service time for SYNC",
+ "MetricExpr": "PM_LSU_SRQ_SYNC_CYC / PM_LSU_SRQ_SYNC",
+ "MetricGroup": "latency",
+ "MetricName": "average_sync_cyc"
+ },
+ {
+ "BriefDescription": "Cycles LMQ slot0 was active on an average",
+ "MetricExpr": "PM_LSU_LMQ_S0_VALID / PM_LSU_LMQ_S0_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_lmq_life_time"
+ },
+ {
+ "BriefDescription": "Average number of cycles LRQ stays active for one load. Slot 0 is VALID ONLY FOR EVEN THREADS",
+ "MetricExpr": "PM_LSU_LRQ_S0_VALID / PM_LSU_LRQ_S0_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_lrq_life_time_even"
+ },
+ {
+ "BriefDescription": "Average number of cycles LRQ stays active for one load. Slot 43 is valid ONLY FOR ODD THREADS",
+ "MetricExpr": "PM_LSU_LRQ_S43_VALID / PM_LSU_LRQ_S43_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_lrq_life_time_odd"
+ },
+ {
+ "BriefDescription": "Average number of cycles SRQ stays active for one load. Slot 0 is VALID ONLY FOR EVEN THREADS",
+ "MetricExpr": "PM_LSU_SRQ_S0_VALID / PM_LSU_SRQ_S0_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_srq_life_time_even"
+ },
+ {
+ "BriefDescription": "Average number of cycles SRQ stays active for one load. Slot 39 is valid ONLY FOR ODD THREADS",
+ "MetricExpr": "PM_LSU_SRQ_S39_VALID / PM_LSU_SRQ_S39_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_srq_life_time_odd"
+ },
+ {
+ "BriefDescription": "Marked background kill latency, measured in L2",
+ "MetricExpr": "PM_MRK_FAB_RSP_BKILL_CYC / PM_MRK_FAB_RSP_BKILL",
+ "MetricGroup": "latency",
+ "MetricName": "bkill_latency"
+ },
+ {
+ "BriefDescription": "Marked dclaim latency, measured in L2",
+ "MetricExpr": "PM_MRK_FAB_RSP_DCLAIM_CYC / PM_MRK_FAB_RSP_DCLAIM",
+ "MetricGroup": "latency",
+ "MetricName": "dclaim_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL2L3_MOD_CYC/ PM_MRK_DATA_FROM_DL2L3_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "dl2l3_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 distant Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL2L3_SHR_CYC/ PM_MRK_DATA_FROM_DL2L3_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "dl2l3_shr_latency"
+ },
+ {
+ "BriefDescription": "Distant L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL4_CYC/ PM_MRK_DATA_FROM_DL4",
+ "MetricGroup": "latency",
+ "MetricName": "dl4_latency"
+ },
+ {
+ "BriefDescription": "Marked Dmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DMEM_CYC/ PM_MRK_DATA_FROM_DMEM",
+ "MetricGroup": "latency",
+ "MetricName": "dmem_latency"
+ },
+ {
+ "BriefDescription": "estimated exposed miss latency for dL1 misses, ie load miss when we were NTC",
+ "MetricExpr": "PM_MRK_LD_MISS_EXPOSED_CYC / PM_MRK_LD_MISS_EXPOSED",
+ "MetricGroup": "latency",
+ "MetricName": "exposed_dl1miss_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from L2.1 in the M state",
+ "MetricExpr": "PM_MRK_DATA_FROM_L21_MOD_CYC/ PM_MRK_DATA_FROM_L21_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "l21_mod_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from L2.1 in the S state",
+ "MetricExpr": "PM_MRK_DATA_FROM_L21_SHR_CYC/ PM_MRK_DATA_FROM_L21_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "l21_shr_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L2 and suffered a conflict at RC machine dispatch time due to load-hit-store",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_LDHITST_CYC/ PM_MRK_DATA_FROM_L2_DISP_CONFLICT_LDHITST",
+ "MetricGroup": "latency",
+ "MetricName": "l2_disp_conflict_ldhitst_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L2 and suffered a conflict at RC machine dispatch time NOT due load-hit-store",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_OTHER_CYC/ PM_MRK_DATA_FROM_L2_DISP_CONFLICT_OTHER",
+ "MetricGroup": "latency",
+ "MetricName": "l2_disp_conflict_other_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L2",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l2_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that were satisfied by lines prefetched into the L3. This information is forwarded from the L3",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_MEPF_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l2_mepf_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L2 and suffered no conflicts",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_NO_CONFLICT_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l2_no_conflict_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L3 and beyond",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2MISS_CYC/ PM_MRK_DATA_FROM_L2MISS",
+ "MetricGroup": "latency",
+ "MetricName": "l2miss_latency"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L31_MOD_CYC/ PM_MRK_DATA_FROM_L31_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "l31_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L31_SHR_CYC/ PM_MRK_DATA_FROM_L31_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "l31_shr_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L3",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3_CYC/ PM_MRK_DATA_FROM_L3",
+ "MetricGroup": "latency",
+ "MetricName": "l3_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L3 and suffered no conflicts",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3_NO_CONFLICT_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l3_no_conflict_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that come from beyond the L3",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3MISS_CYC/ PM_MRK_DATA_FROM_L3MISS",
+ "MetricGroup": "latency",
+ "MetricName": "l3miss_latency"
+ },
+ {
+ "BriefDescription": "Average latency for marked reloads that hit in the L3 on the MEPF state. i.e. lines that were prefetched into the L3",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3_MEPF_CYC/ PM_MRK_DATA_FROM_L3_MEPF",
+ "MetricGroup": "latency",
+ "MetricName": "l3pref_latency"
+ },
+ {
+ "BriefDescription": "Local L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_LL4_CYC/ PM_MRK_DATA_FROM_LL4",
+ "MetricGroup": "latency",
+ "MetricName": "ll4_latency"
+ },
+ {
+ "BriefDescription": "Marked Lmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_LMEM_CYC/ PM_MRK_DATA_FROM_LMEM",
+ "MetricGroup": "latency",
+ "MetricName": "lmem_latency"
+ },
+ {
+ "BriefDescription": "Latency for marked reloads that hit in the L2 or L3 of any other core on a different chip",
+ "MetricExpr": "PM_MRK_DATA_FROM_OFF_CHIP_CACHE_CYC/ PM_MRK_DATA_FROM_OFF_CHIP_CACHE",
+ "MetricGroup": "latency",
+ "MetricName": "off_chip_cache_latency"
+ },
+ {
+ "BriefDescription": "Latency for marked reloads that hit in the L2 or L3 of any other core on the same chip",
+ "MetricExpr": "PM_MRK_DATA_FROM_ON_CHIP_CACHE_CYC/ PM_MRK_DATA_FROM_ON_CHIP_CACHE",
+ "MetricGroup": "latency",
+ "MetricName": "on_chip_cache_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL2L3_MOD_CYC/ PM_MRK_DATA_FROM_RL2L3_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "rl2l3_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL2L3_SHR_CYC/ PM_MRK_DATA_FROM_RL2L3_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "rl2l3_shr_latency"
+ },
+ {
+ "BriefDescription": "Remote L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL4_CYC/ PM_MRK_DATA_FROM_RL4",
+ "MetricGroup": "latency",
+ "MetricName": "rl4_latency"
+ },
+ {
+ "BriefDescription": "Marked Rmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RMEM_CYC/ PM_MRK_DATA_FROM_RMEM",
+ "MetricGroup": "latency",
+ "MetricName": "rmem_latency"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_ERAT_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "erat_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_ERAT_MISS * 100 / (PM_LSU_FIN - PM_LSU_FX_FIN)",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "erat_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "LHS reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LHS *100/ PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lhs_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "LHS reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LHS *100/ (PM_LSU_FIN - PM_LSU_FX_FIN)",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lhs_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "LMQ full reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LMQ_FULL * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lmq_full_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LMQ_FULL * 100 / PM_LD_REF_L1",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lmq_full_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "LSU reject ratio",
+ "MetricExpr": "PM_LSU_REJECT *100/ PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lsu_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "LSU reject ratio",
+ "MetricExpr": "PM_LSU_REJECT *100/ (PM_LSU_FIN - PM_LSU_FX_FIN)",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lsu_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to distant L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_DATA_FROM_DL4",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to remote+distant L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / (PM_DATA_FROM_DL4 + PM_DATA_FROM_RL4)",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_mem"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to remote L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_DATA_FROM_RL4",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_rl4"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from distant memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_DMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from remote and distant memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / (PM_DATA_FROM_DMEM + PM_DATA_FROM_RMEM)",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_mem"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from remote memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_RMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_rmem"
+ },
+ {
+ "BriefDescription": "Number of loads from remote memory per loads from distant memory",
+ "MetricExpr": "PM_DATA_FROM_RMEM / PM_DATA_FROM_DMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_rmem_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Memory locality",
+ "MetricExpr": "(PM_DATA_FROM_LL4 + PM_DATA_FROM_LMEM) * 100/ (PM_DATA_FROM_LMEM + PM_DATA_FROM_LL4 + PM_DATA_FROM_RMEM + PM_DATA_FROM_RL4 + PM_DATA_FROM_DMEM + PM_DATA_FROM_DL4)",
+ "MetricGroup": "memory",
+ "MetricName": "mem_locality_percent"
+ },
+ {
+ "BriefDescription": "DERAT Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_LSU_DERAT_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "derat_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Modified) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Shared) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L2 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L3 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Modified) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Shared) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT misses that result in an ERAT reload",
+ "MetricExpr": "PM_DTLB_MISS * 100 / PM_LSU_DERAT_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "derat_miss_reload_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L4",
+ "MetricExpr": "PM_DPTEG_FROM_DL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant Memory",
+ "MetricExpr": "PM_DPTEG_FROM_DMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L21_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L21_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L2",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L31_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L31_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L3",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local L4",
+ "MetricExpr": "PM_DPTEG_FROM_LL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local Memory",
+ "MetricExpr": "PM_DPTEG_FROM_LMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L4",
+ "MetricExpr": "PM_DPTEG_FROM_RL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote Memory",
+ "MetricExpr": "PM_DPTEG_FROM_RMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "% DERAT miss ratio for 16G page per inst",
+ "MetricExpr": "100 * PM_DERAT_MISS_16G / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_16g_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 16G page",
+ "MetricExpr": "PM_DERAT_MISS_16G / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_16g_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DERAT miss rate for 16M page per inst",
+ "MetricExpr": "PM_DERAT_MISS_16M * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_16m_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 16M page",
+ "MetricExpr": "PM_DERAT_MISS_16M / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_16m_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DERAT miss rate for 4K page per inst",
+ "MetricExpr": "PM_DERAT_MISS_4K * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_4k_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 4K page",
+ "MetricExpr": "PM_DERAT_MISS_4K / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_4k_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DERAT miss ratio for 64K page per inst",
+ "MetricExpr": "PM_DERAT_MISS_64K * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_64k_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 64K page",
+ "MetricExpr": "PM_DERAT_MISS_64K / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_64k_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DSLB_Miss_Rate per inst",
+ "MetricExpr": "PM_DSLB_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "dslb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% ISLB miss rate per inst",
+ "MetricExpr": "PM_ISLB_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "islb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Fraction of hits on any Centaur (local, remote, or distant) on either L4 or DRAM per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_MEMORY / PM_LD_REF_L1",
+ "MetricName": "any_centaur_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Base Completion Cycles",
+ "MetricExpr": "PM_1PLUS_PPC_CMPL / PM_RUN_INST_CMPL",
+ "MetricName": "base_completion_cpi"
+ },
+ {
+ "BriefDescription": "Marked background kill latency, measured in L2",
+ "MetricExpr": "PM_MRK_FAB_RSP_BKILL_CYC / PM_MRK_FAB_RSP_BKILL",
+ "MetricName": "bkill_ratio_percent"
+ },
+ {
+ "BriefDescription": "cycles",
+ "MetricExpr": "PM_RUN_CYC",
+ "MetricName": "custom_secs"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a distant chip's Centaur (L4 or DRAM) per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_DMEM + PM_DATA_FROM_DL4) / PM_LD_REF_L1",
+ "MetricName": "distant_centaur_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads that came from the L3 and beyond",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_l2_miss_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "(PM_DATA_FROM_L31_MOD + PM_DATA_FROM_L31_SHR) * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_l31_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L3 where the lines were brought into the L3 by a prefetch operation",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_l3_mepf_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from beyond the local L3",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_l3_miss_percent"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on the L2 or L3 of a core on a distant chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD / PM_LD_REF_L1",
+ "MetricName": "dl2l3_mod_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the S state on the L2 or L3 of a core on a distant chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR / PM_LD_REF_L1",
+ "MetricName": "dl2l3_shr_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a distant Centaur's cache per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_DL4 / PM_LD_REF_L1",
+ "MetricName": "dl4_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a distant Centaur's DRAM per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_DMEM / PM_LD_REF_L1",
+ "MetricName": "dmem_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Rate of DERAT reloads from L2",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dpteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of DERAT reloads from L3",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dpteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "Overhead of expansion cycles",
+ "MetricExpr": "(PM_GRP_CMPL / PM_RUN_INST_CMPL) - (PM_1PLUS_PPC_CMPL / PM_RUN_INST_CMPL)",
+ "MetricName": "expansion_overhead_cpi"
+ },
+ {
+ "BriefDescription": "Total Fixed point operations executded in the Load/Store Unit following a load/store operation",
+ "MetricExpr": "PM_LSU_FX_FIN/PM_RUN_INST_CMPL",
+ "MetricName": "fixed_in_lsu_per_inst"
+ },
+ {
+ "BriefDescription": "GCT empty cycles",
+ "MetricExpr": "(PM_GCT_NOSLOT_CYC / PM_RUN_CYC) * 100",
+ "MetricName": "gct_empty_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from L2",
+ "MetricExpr": "PM_IPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from L3",
+ "MetricExpr": "PM_IPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from local memory",
+ "MetricExpr": "PM_IPTEG_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from local memory",
+ "MetricExpr": "PM_IPTEG_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Fraction of L1 hits per load ref",
+ "MetricExpr": "(PM_LD_REF_L1 - PM_LD_MISS_L1) / PM_LD_REF_L1",
+ "MetricName": "l1_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L1 load misses per L1 load ref",
+ "MetricExpr": "PM_LD_MISS_L1 / PM_LD_REF_L1",
+ "MetricName": "l1_ld_miss_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on another core's L2 on the same chip per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_L21_MOD + PM_DATA_FROM_L21_SHR) / PM_LD_REF_L1",
+ "MetricName": "l2_1_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on another core's L2 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD / PM_LD_REF_L1",
+ "MetricName": "l2_1_mod_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the S state on another core's L2 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR / PM_LD_REF_L1",
+ "MetricName": "l2_1_shr_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Average number of Castout machines used. 1 of 16 CO machines is sampled every L2 cycle",
+ "MetricExpr": "(PM_CO_USAGE / PM_RUN_CYC) * 16",
+ "MetricName": "l2_co_usage"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load hits per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L2 / PM_LD_REF_L1",
+ "MetricName": "l2_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load misses per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L2MISS / PM_LD_REF_L1",
+ "MetricName": "l2_ld_miss_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load hits per L1 load ref where the L2 experienced a Load-Hit-Store conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST / PM_LD_REF_L1",
+ "MetricName": "l2_lhs_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load hits per L1 load ref where the L2 did not experience a conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_NO_CONFLICT / PM_LD_REF_L1",
+ "MetricName": "l2_no_conflict_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load hits per L1 load ref where the L2 experienced some conflict other than Load-Hit-Store",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER / PM_LD_REF_L1",
+ "MetricName": "l2_other_conflict_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Average number of Read/Claim machines used. 1 of 16 RC machines is sampled every L2 cycle",
+ "MetricExpr": "(PM_RC_USAGE / PM_RUN_CYC) * 16",
+ "MetricName": "l2_rc_usage"
+ },
+ {
+ "BriefDescription": "Average number of Snoop machines used. 1 of 8 SN machines is sampled every L2 cycle",
+ "MetricExpr": "(PM_SN_USAGE / PM_RUN_CYC) * 8",
+ "MetricName": "l2_sn_usage"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "(PM_MRK_DATA_FROM_L31_SHR_CYC + PM_MRK_DATA_FROM_L31_MOD_CYC) / (PM_MRK_DATA_FROM_L31_SHR + PM_MRK_DATA_FROM_L31_MOD)",
+ "MetricName": "l31_latency"
+ },
+ {
+ "BriefDescription": "Fraction of hits on another core's L3 on the same chip per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_L31_MOD + PM_DATA_FROM_L31_SHR) / PM_LD_REF_L1",
+ "MetricName": "l3_1_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on another core's L3 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD / PM_LD_REF_L1",
+ "MetricName": "l3_1_mod_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the S state on another core's L3 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR / PM_LD_REF_L1",
+ "MetricName": "l3_1_shr_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 load hits per load ref where the demand load collided with a pending prefetch",
+ "MetricExpr": "PM_DATA_FROM_L3_DISP_CONFLICT / PM_LD_REF_L1",
+ "MetricName": "l3_conflict_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 load hits per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L3 / PM_LD_REF_L1",
+ "MetricName": "l3_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 load misses per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L3MISS / PM_LD_REF_L1",
+ "MetricName": "l3_ld_miss_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 load hits per load ref where the L3 did not experience a conflict",
+ "MetricExpr": "PM_DATA_FROM_L3_NO_CONFLICT / PM_LD_REF_L1",
+ "MetricName": "l3_no_conflict_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 hits on lines that were not in the MEPF state per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_L3 - PM_DATA_FROM_L3_MEPF) / PM_LD_REF_L1",
+ "MetricName": "l3other_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 hits on lines that were recently prefetched into the L3 (MEPF state) per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF / PM_LD_REF_L1",
+ "MetricName": "l3pref_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a local Centaur's cache per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_LD_REF_L1",
+ "MetricName": "ll4_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a local Centaur's DRAM per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_LD_REF_L1",
+ "MetricName": "lmem_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a local Centaur (L4 or DRAM) per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_LMEM + PM_DATA_FROM_LL4) / PM_LD_REF_L1",
+ "MetricName": "local_centaur_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Cycles stalled by Other LSU Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_REJECT - PM_CMPLU_STALL_DCACHE_MISS - PM_CMPLU_STALL_STORE) / (PM_LD_REF_L1 - PM_LD_MISS_L1)",
+ "MetricName": "lsu_stall_avg_cyc_per_l1hit_stfw"
+ },
+ {
+ "BriefDescription": "Fraction of hits on another core's L2 or L3 on a different chip (remote or distant) per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_OFF_CHIP_CACHE / PM_LD_REF_L1",
+ "MetricName": "off_chip_cache_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on another core's L2 or L3 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_ON_CHIP_CACHE / PM_LD_REF_L1",
+ "MetricName": "on_chip_cache_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a remote chip's Centaur (L4 or DRAM) per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_RMEM + PM_DATA_FROM_RL4) / PM_LD_REF_L1",
+ "MetricName": "remote_centaur_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Percent of all FXU/VSU instructions that got rejected because of unavailable resources or facilities",
+ "MetricExpr": "PM_ISU_REJECT_RES_NA *100/ PM_RUN_INST_CMPL",
+ "MetricName": "resource_na_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on the L2 or L3 of a core on a remote chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD / PM_LD_REF_L1",
+ "MetricName": "rl2l3_mod_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the S state on the L2 or L3 of a core on a remote chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR / PM_LD_REF_L1",
+ "MetricName": "rl2l3_shr_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a remote Centaur's cache per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_RL4 / PM_LD_REF_L1",
+ "MetricName": "rl4_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a remote Centaur's DRAM per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_RMEM / PM_LD_REF_L1",
+ "MetricName": "rmem_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Percent of all FXU/VSU instructions that got rejected due to SAR Bypass",
+ "MetricExpr": "PM_ISU_REJECT_SAR_BYPASS *100/ PM_RUN_INST_CMPL",
+ "MetricName": "sar_bypass_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "Percent of all FXU/VSU instructions that got rejected because of unavailable sources",
+ "MetricExpr": "PM_ISU_REJECT_SRC_NA *100/ PM_RUN_INST_CMPL",
+ "MetricName": "source_na_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "Store forward rate",
+ "MetricExpr": "100 * (PM_LSU0_SRQ_STFWD + PM_LSU1_SRQ_STFWD) / PM_RUN_INST_CMPL",
+ "MetricName": "store_forward_rate_percent"
+ },
+ {
+ "BriefDescription": "Store forward rate",
+ "MetricExpr": "100 * (PM_LSU0_SRQ_STFWD + PM_LSU1_SRQ_STFWD) / (PM_LD_REF_L1 - PM_LD_MISS_L1)",
+ "MetricName": "store_forward_ratio_percent"
+ },
+ {
+ "BriefDescription": "Marked store latency, from core completion to L2 RC machine completion",
+ "MetricExpr": "(PM_MRK_ST_L2DISP_TO_CMPL_CYC + PM_MRK_ST_DRAIN_TO_L2DISP_CYC) / PM_MRK_ST_NEST",
+ "MetricName": "store_latency"
+ },
+ {
+ "BriefDescription": "Cycles stalled by any sync",
+ "MetricExpr": "(PM_CMPLU_STALL_LWSYNC + PM_CMPLU_STALL_HWSYNC) / PM_RUN_INST_CMPL",
+ "MetricName": "sync_stall_cpi"
+ },
+ {
+ "BriefDescription": "Percentage of lines that were prefetched into the L3 and evicted before they were consumed",
+ "MetricExpr": "(PM_L3_CO_MEPF / 2) / PM_L3_PREF_ALL * 100",
+ "MetricName": "wasted_l3_prefetch_percent"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/other.json b/tools/perf/pmu-events/arch/powerpc/power8/other.json
index 704302c3e67d..9dc2f6b70354 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/other.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/other.json
@@ -348,18 +348,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x517082",
- "EventName": "PM_CO_DISP_FAIL",
- "BriefDescription": "CO dispatch failed due to all CO machines being busy",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x527084",
- "EventName": "PM_CO_TM_SC_FOOTPRINT",
- "BriefDescription": "L2 did a cleanifdirty CO to the L3 (ie created an SC line in the L3)",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x3608a",
"EventName": "PM_CO_USAGE",
"BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 CO machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running",
@@ -1578,36 +1566,12 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x617082",
- "EventName": "PM_ISIDE_DISP",
- "BriefDescription": "All i-side dispatch attempts",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x627084",
- "EventName": "PM_ISIDE_DISP_FAIL",
- "BriefDescription": "All i-side dispatch attempts that failed due to a addr collision with another machine",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x627086",
- "EventName": "PM_ISIDE_DISP_FAIL_OTHER",
- "BriefDescription": "All i-side dispatch attempts that failed due to a reason other than addrs collision",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x4608e",
"EventName": "PM_ISIDE_L2MEMACC",
"BriefDescription": "valid when first beat of data comes in for an i-side fetch where data came from mem(or L4)",
"PublicDescription": ""
},
{,
- "EventCode": "0x44608e",
- "EventName": "PM_ISIDE_MRU_TOUCH",
- "BriefDescription": "Iside L2 MRU touch",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x30ac",
"EventName": "PM_ISU_REF_FX0",
"BriefDescription": "FX0 ISU reject",
@@ -1734,222 +1698,36 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x417080",
- "EventName": "PM_L2_CASTOUT_MOD",
- "BriefDescription": "L2 Castouts - Modified (M, Mu, Me)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x417082",
- "EventName": "PM_L2_CASTOUT_SHR",
- "BriefDescription": "L2 Castouts - Shared (T, Te, Si, S)",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x27084",
"EventName": "PM_L2_CHIP_PUMP",
"BriefDescription": "RC requests that were local on chip pump attempts",
"PublicDescription": ""
},
{,
- "EventCode": "0x427086",
- "EventName": "PM_L2_DC_INV",
- "BriefDescription": "Dcache invalidates from L2",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x44608c",
- "EventName": "PM_L2_DISP_ALL_L2MISS",
- "BriefDescription": "All successful Ld/St dispatches for this thread that were an L2miss",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x27086",
"EventName": "PM_L2_GROUP_PUMP",
"BriefDescription": "RC requests that were on Node Pump attempts",
"PublicDescription": ""
},
{,
- "EventCode": "0x626084",
- "EventName": "PM_L2_GRP_GUESS_CORRECT",
- "BriefDescription": "L2 guess grp and guess was correct (data intra-6chip AND ^on-chip)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x626086",
- "EventName": "PM_L2_GRP_GUESS_WRONG",
- "BriefDescription": "L2 guess grp and guess was not correct (ie data on-chip OR beyond-6chip)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x427084",
- "EventName": "PM_L2_IC_INV",
- "BriefDescription": "Icache Invalidates from L2",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x436088",
- "EventName": "PM_L2_INST",
- "BriefDescription": "All successful I-side dispatches for this thread (excludes i_l2mru_tch reqs)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x43608a",
- "EventName": "PM_L2_INST_MISS",
- "BriefDescription": "All successful i-side dispatches that were an L2miss for this thread (excludes i_l2mru_tch reqs)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x416080",
- "EventName": "PM_L2_LD",
- "BriefDescription": "All successful D-side Load dispatches for this thread",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x437088",
- "EventName": "PM_L2_LD_DISP",
- "BriefDescription": "All successful load dispatches",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x43708a",
- "EventName": "PM_L2_LD_HIT",
- "BriefDescription": "All successful load dispatches that were L2 hits",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x426084",
- "EventName": "PM_L2_LD_MISS",
- "BriefDescription": "All successful D-Side Load dispatches that were an L2miss for this thread",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x616080",
- "EventName": "PM_L2_LOC_GUESS_CORRECT",
- "BriefDescription": "L2 guess loc and guess was correct (ie data local)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x616082",
- "EventName": "PM_L2_LOC_GUESS_WRONG",
- "BriefDescription": "L2 guess loc and guess was not correct (ie data not on chip)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x516080",
- "EventName": "PM_L2_RCLD_DISP",
- "BriefDescription": "L2 RC load dispatch attempt",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x516082",
- "EventName": "PM_L2_RCLD_DISP_FAIL_ADDR",
- "BriefDescription": "L2 RC load dispatch attempt failed due to address collision with RC/CO/SN/SQ",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x526084",
- "EventName": "PM_L2_RCLD_DISP_FAIL_OTHER",
- "BriefDescription": "L2 RC load dispatch attempt failed due to other reasons",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x536088",
- "EventName": "PM_L2_RCST_DISP",
- "BriefDescription": "L2 RC store dispatch attempt",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x53608a",
- "EventName": "PM_L2_RCST_DISP_FAIL_ADDR",
- "BriefDescription": "L2 RC store dispatch attempt failed due to address collision with RC/CO/SN/SQ",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x54608c",
- "EventName": "PM_L2_RCST_DISP_FAIL_OTHER",
- "BriefDescription": "L2 RC store dispatch attempt failed due to other reasons",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x537088",
- "EventName": "PM_L2_RC_ST_DONE",
- "BriefDescription": "RC did st to line that was Tx or Sx",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x63708a",
- "EventName": "PM_L2_RTY_LD",
- "BriefDescription": "RC retries on PB for any load from core",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x3708a",
"EventName": "PM_L2_RTY_ST",
"BriefDescription": "RC retries on PB for any store from core",
"PublicDescription": ""
},
{,
- "EventCode": "0x54708c",
- "EventName": "PM_L2_SN_M_RD_DONE",
- "BriefDescription": "SNP dispatched for a read and was M",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x54708e",
- "EventName": "PM_L2_SN_M_WR_DONE",
- "BriefDescription": "SNP dispatched for a write and was M",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x53708a",
- "EventName": "PM_L2_SN_SX_I_DONE",
- "BriefDescription": "SNP dispatched and went from Sx or Tx to Ix",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x17080",
"EventName": "PM_L2_ST",
"BriefDescription": "All successful D-side store dispatches for this thread",
"PublicDescription": ""
},
{,
- "EventCode": "0x44708c",
- "EventName": "PM_L2_ST_DISP",
- "BriefDescription": "All successful store dispatches",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x44708e",
- "EventName": "PM_L2_ST_HIT",
- "BriefDescription": "All successful store dispatches that were L2Hits",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x17082",
"EventName": "PM_L2_ST_MISS",
"BriefDescription": "All successful D-side store dispatches for this thread that were L2 Miss",
"PublicDescription": ""
},
{,
- "EventCode": "0x636088",
- "EventName": "PM_L2_SYS_GUESS_CORRECT",
- "BriefDescription": "L2 guess sys and guess was correct (ie data beyond-6chip)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x63608a",
- "EventName": "PM_L2_SYS_GUESS_WRONG",
- "BriefDescription": "L2 guess sys and guess was not correct (ie data ^beyond-6chip)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x617080",
- "EventName": "PM_L2_SYS_PUMP",
- "BriefDescription": "RC requests that were system pump attempts",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x1e05e",
"EventName": "PM_L2_TM_REQ_ABORT",
"BriefDescription": "TM abort",
@@ -1962,36 +1740,12 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x23808a",
- "EventName": "PM_L3_CINJ",
- "BriefDescription": "l3 ci of cache inject",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x128084",
- "EventName": "PM_L3_CI_HIT",
- "BriefDescription": "L3 Castins Hit (total count",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x128086",
- "EventName": "PM_L3_CI_MISS",
- "BriefDescription": "L3 castins miss (total count",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x819082",
"EventName": "PM_L3_CI_USAGE",
"BriefDescription": "rotating sample of 16 CI or CO actives",
"PublicDescription": ""
},
{,
- "EventCode": "0x438088",
- "EventName": "PM_L3_CO",
- "BriefDescription": "l3 castout occurring ( does not include casthrough or log writes (cinj/dmaw)",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x83908b",
"EventName": "PM_L3_CO0_ALLOC",
"BriefDescription": "lifetime, sample of CO machine 0 valid",
@@ -2010,120 +1764,18 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x238088",
- "EventName": "PM_L3_CO_LCO",
- "BriefDescription": "Total L3 castouts occurred on LCO",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x28084",
"EventName": "PM_L3_CO_MEM",
"BriefDescription": "L3 CO to memory OR of port 0 and 1 ( lossy)",
"PublicDescription": ""
},
{,
- "EventCode": "0xb19082",
- "EventName": "PM_L3_GRP_GUESS_CORRECT",
- "BriefDescription": "Initial scope=group and data from same group (near) (pred successful)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xb3908a",
- "EventName": "PM_L3_GRP_GUESS_WRONG_HIGH",
- "BriefDescription": "Initial scope=group but data from local node. Predition too high",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xb39088",
- "EventName": "PM_L3_GRP_GUESS_WRONG_LOW",
- "BriefDescription": "Initial scope=group but data from outside group (far or rem). Prediction too Low",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x218080",
- "EventName": "PM_L3_HIT",
- "BriefDescription": "L3 Hits",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x138088",
- "EventName": "PM_L3_L2_CO_HIT",
- "BriefDescription": "L2 castout hits",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x13808a",
- "EventName": "PM_L3_L2_CO_MISS",
- "BriefDescription": "L2 castout miss",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x14808c",
- "EventName": "PM_L3_LAT_CI_HIT",
- "BriefDescription": "L3 Lateral Castins Hit",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x14808e",
- "EventName": "PM_L3_LAT_CI_MISS",
- "BriefDescription": "L3 Lateral Castins Miss",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x228084",
- "EventName": "PM_L3_LD_HIT",
- "BriefDescription": "L3 demand LD Hits",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x228086",
- "EventName": "PM_L3_LD_MISS",
- "BriefDescription": "L3 demand LD Miss",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x1e052",
"EventName": "PM_L3_LD_PREF",
"BriefDescription": "L3 Load Prefetches",
"PublicDescription": ""
},
{,
- "EventCode": "0xb19080",
- "EventName": "PM_L3_LOC_GUESS_CORRECT",
- "BriefDescription": "initial scope=node/chip and data from local node (local) (pred successful)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xb29086",
- "EventName": "PM_L3_LOC_GUESS_WRONG",
- "BriefDescription": "Initial scope=node but data from out side local node (near or far or rem). Prediction too Low",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x218082",
- "EventName": "PM_L3_MISS",
- "BriefDescription": "L3 Misses",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x54808c",
- "EventName": "PM_L3_P0_CO_L31",
- "BriefDescription": "l3 CO to L3.1 (lco) port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x538088",
- "EventName": "PM_L3_P0_CO_MEM",
- "BriefDescription": "l3 CO to memory port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x929084",
- "EventName": "PM_L3_P0_CO_RTY",
- "BriefDescription": "L3 CO received retry port 0",
- "PublicDescription": ""
- },
- {,
"EventCode": "0xa29084",
"EventName": "PM_L3_P0_GRP_PUMP",
"BriefDescription": "L3 pf sent with grp scope port 0",
@@ -2148,120 +1800,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0xa19080",
- "EventName": "PM_L3_P0_NODE_PUMP",
- "BriefDescription": "L3 pf sent with nodal scope port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x919080",
- "EventName": "PM_L3_P0_PF_RTY",
- "BriefDescription": "L3 PF received retry port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x939088",
- "EventName": "PM_L3_P0_SN_HIT",
- "BriefDescription": "L3 snoop hit port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x118080",
- "EventName": "PM_L3_P0_SN_INV",
- "BriefDescription": "Port0 snooper detects someone doing a store to a line thats Sx",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x94908c",
- "EventName": "PM_L3_P0_SN_MISS",
- "BriefDescription": "L3 snoop miss port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xa39088",
- "EventName": "PM_L3_P0_SYS_PUMP",
- "BriefDescription": "L3 pf sent with sys scope port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x54808e",
- "EventName": "PM_L3_P1_CO_L31",
- "BriefDescription": "l3 CO to L3.1 (lco) port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x53808a",
- "EventName": "PM_L3_P1_CO_MEM",
- "BriefDescription": "l3 CO to memory port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x929086",
- "EventName": "PM_L3_P1_CO_RTY",
- "BriefDescription": "L3 CO received retry port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xa29086",
- "EventName": "PM_L3_P1_GRP_PUMP",
- "BriefDescription": "L3 pf sent with grp scope port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x528086",
- "EventName": "PM_L3_P1_LCO_DATA",
- "BriefDescription": "lco sent with data port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x518082",
- "EventName": "PM_L3_P1_LCO_NO_DATA",
- "BriefDescription": "dataless l3 lco sent port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xa4908e",
- "EventName": "PM_L3_P1_LCO_RTY",
- "BriefDescription": "L3 LCO received retry port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xa19082",
- "EventName": "PM_L3_P1_NODE_PUMP",
- "BriefDescription": "L3 pf sent with nodal scope port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x919082",
- "EventName": "PM_L3_P1_PF_RTY",
- "BriefDescription": "L3 PF received retry port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x93908a",
- "EventName": "PM_L3_P1_SN_HIT",
- "BriefDescription": "L3 snoop hit port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x118082",
- "EventName": "PM_L3_P1_SN_INV",
- "BriefDescription": "Port1 snooper detects someone doing a store to a line thats Sx",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x94908e",
- "EventName": "PM_L3_P1_SN_MISS",
- "BriefDescription": "L3 snoop miss port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xa3908a",
- "EventName": "PM_L3_P1_SYS_PUMP",
- "BriefDescription": "L3 pf sent with sys scope port 1",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x84908d",
"EventName": "PM_L3_PF0_ALLOC",
"BriefDescription": "lifetime, sample of PF machine 0 valid",
@@ -2274,12 +1812,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x428084",
- "EventName": "PM_L3_PF_HIT_L3",
- "BriefDescription": "l3 pf hit in l3",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x18080",
"EventName": "PM_L3_PF_MISS_L3",
"BriefDescription": "L3 Prefetch missed in L3",
@@ -2370,42 +1902,12 @@
"PublicDescription": ""
},
{,
- "EventCode": "0xb29084",
- "EventName": "PM_L3_SYS_GUESS_CORRECT",
- "BriefDescription": "Initial scope=system and data from outside group (far or rem)(pred successful)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xb4908c",
- "EventName": "PM_L3_SYS_GUESS_WRONG",
- "BriefDescription": "Initial scope=system but data from local or near. Predction too high",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x24808e",
- "EventName": "PM_L3_TRANS_PF",
- "BriefDescription": "L3 Transient prefetch",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x18081",
"EventName": "PM_L3_WI0_ALLOC",
"BriefDescription": "lifetime, sample of Write Inject machine 0 valid",
"PublicDescription": "0.0"
},
{,
- "EventCode": "0x418080",
- "EventName": "PM_L3_WI0_BUSY",
- "BriefDescription": "lifetime, sample of Write Inject machine 0 valid",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x418082",
- "EventName": "PM_L3_WI_USAGE",
- "BriefDescription": "rotating sample of 8 WI actives",
- "PublicDescription": ""
- },
- {,
"EventCode": "0xc080",
"EventName": "PM_LD_REF_L1_LSU0",
"BriefDescription": "LS0 L1 D cache load references counted at finish, gated by reject",
@@ -3312,12 +2814,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x328084",
- "EventName": "PM_NON_TM_RST_SC",
- "BriefDescription": "non tm snp rst tm sc",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x2001a",
"EventName": "PM_NTCG_ALL_FIN",
"BriefDescription": "Cycles after all instructions have finished to group completed",
@@ -3420,24 +2916,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x34808e",
- "EventName": "PM_RD_CLEARING_SC",
- "BriefDescription": "rd clearing sc",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x34808c",
- "EventName": "PM_RD_FORMING_SC",
- "BriefDescription": "rd forming sc",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x428086",
- "EventName": "PM_RD_HIT_PF",
- "BriefDescription": "rd machine hit l3 pf machine",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x20004",
"EventName": "PM_REAL_SRQ_FULL",
"BriefDescription": "Out of real srq entries",
@@ -3504,18 +2982,6 @@
"PublicDescription": "TLBIE snoopSnoop TLBIE"
},
{,
- "EventCode": "0x338088",
- "EventName": "PM_SNP_TM_HIT_M",
- "BriefDescription": "snp tm st hit m mu",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x33808a",
- "EventName": "PM_SNP_TM_HIT_T",
- "BriefDescription": "snp tm_st_hit t tn te",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x4608c",
"EventName": "PM_SN_USAGE",
"BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 SN machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running",
@@ -3534,12 +3000,6 @@
"PublicDescription": "STCX executed reported at sent to nest42"
},
{,
- "EventCode": "0x717080",
- "EventName": "PM_ST_CAUSED_FAIL",
- "BriefDescription": "Non TM St caused any thread to fail",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x3090",
"EventName": "PM_SWAP_CANCEL",
"BriefDescription": "SWAP cancel , rtag not available",
@@ -3624,18 +3084,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x318082",
- "EventName": "PM_TM_CAM_OVERFLOW",
- "BriefDescription": "l3 tm cam overflow during L2 co of SC",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x74708c",
- "EventName": "PM_TM_CAP_OVERFLOW",
- "BriefDescription": "TM Footprint Capactiy Overflow",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x20ba",
"EventName": "PM_TM_END_ALL",
"BriefDescription": "Tm any tend",
@@ -3690,48 +3138,6 @@
"PublicDescription": "Transactional conflict from LSU, whatever gets reported to texas 42"
},
{,
- "EventCode": "0x727086",
- "EventName": "PM_TM_FAV_CAUSED_FAIL",
- "BriefDescription": "TM Load (fav) caused another thread to fail",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x717082",
- "EventName": "PM_TM_LD_CAUSED_FAIL",
- "BriefDescription": "Non TM Ld caused any thread to fail",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x727084",
- "EventName": "PM_TM_LD_CONF",
- "BriefDescription": "TM Load (fav or non-fav) ran into conflict (failed)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x328086",
- "EventName": "PM_TM_RST_SC",
- "BriefDescription": "tm snp rst tm sc",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x318080",
- "EventName": "PM_TM_SC_CO",
- "BriefDescription": "l3 castout tm Sc line",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x73708a",
- "EventName": "PM_TM_ST_CAUSED_FAIL",
- "BriefDescription": "TM Store (fav or non-fav) caused another thread to fail",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x737088",
- "EventName": "PM_TM_ST_CONF",
- "BriefDescription": "TM Store (fav or non-fav) ran into conflict (failed)",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x20bc",
"EventName": "PM_TM_TBEGIN",
"BriefDescription": "Tm nested tbegin",
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/metrics.json b/tools/perf/pmu-events/arch/powerpc/power9/metrics.json
new file mode 100644
index 000000000000..811c2a8c1c9e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power9/metrics.json
@@ -0,0 +1,1982 @@
+[
+ {
+ "MetricExpr": "PM_BR_MPRED_CMPL / PM_BR_PRED * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "br_misprediction_percent"
+ },
+ {
+ "BriefDescription": "Count cache branch misprediction per instruction",
+ "MetricExpr": "PM_BR_MPRED_CCACHE / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ccache_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Count cache branch misprediction",
+ "MetricExpr": "PM_BR_MPRED_CCACHE / PM_BR_PRED_CCACHE * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ccache_misprediction_percent"
+ },
+ {
+ "BriefDescription": "Link stack branch misprediction",
+ "MetricExpr": "PM_BR_MPRED_LSTACK / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "lstack_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Link stack branch misprediction",
+ "MetricExpr": "PM_BR_MPRED_LSTACK/ PM_BR_PRED_LSTACK * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "lstack_misprediction_percent"
+ },
+ {
+ "BriefDescription": "% Branches Taken",
+ "MetricExpr": "PM_BR_TAKEN_CMPL * 100 / PM_BRU_FIN",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "taken_branches_percent"
+ },
+ {
+ "BriefDescription": "Completion stall due to a Branch Unit",
+ "MetricExpr": "PM_CMPLU_STALL_BRU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "bru_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was routed to the crypto execution pipe and was waiting to finish",
+ "MetricExpr": "PM_CMPLU_STALL_CRYPTO/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "crypto_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load that missed the L1 and was waiting for the data to return from the nest",
+ "MetricExpr": "PM_CMPLU_STALL_DCACHE_MISS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dcache_miss_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a multi-cycle instruction issued to the Decimal Floating Point execution pipe and waiting to finish.",
+ "MetricExpr": "PM_CMPLU_STALL_DFLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dflong_stall_cpi"
+ },
+ {
+ "BriefDescription": "Stalls due to short latency decimal floating ops.",
+ "MetricExpr": "(PM_CMPLU_STALL_DFU - PM_CMPLU_STALL_DFLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dfu_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was issued to the Decimal Floating Point execution pipe and waiting to finish.",
+ "MetricExpr": "PM_CMPLU_STALL_DFU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dfu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved off node memory/cache",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L3MISS - PM_CMPLU_STALL_DMISS_L21_L31 - PM_CMPLU_STALL_DMISS_LMEM - PM_CMPLU_STALL_DMISS_REMOTE)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_distant_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved on chip ( excluding local L2/L3)",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L21_L31/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l21_l31_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to cache miss that resolves in the L2 or L3 with a conflict",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L2L3_CONFLICT/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l2l3_conflict_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to cache miss that resolves in the L2 or L3 without conflict",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L2L3 - PM_CMPLU_STALL_DMISS_L2L3_CONFLICT)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l2l3_noconflict_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved in L2/L3",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L2L3/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l2l3_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to cache miss resolving missed the L3",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L3MISS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l3miss_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to cache miss that resolves in local memory",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_LMEM/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_lmem_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved outside of local memory",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L3MISS - PM_CMPLU_STALL_DMISS_L21_L31 - PM_CMPLU_STALL_DMISS_LMEM)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_non_local_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved from remote chip (cache or memory)",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_REMOTE/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_remote_stall_cpi"
+ },
+ {
+ "BriefDescription": "Stalls due to short latency double precision ops.",
+ "MetricExpr": "(PM_CMPLU_STALL_DP - PM_CMPLU_STALL_DPLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dp_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a scalar instruction issued to the Double Precision execution pipe and waiting to finish. Includes binary floating point instructions in 32 and 64 bit binary floating point format.",
+ "MetricExpr": "PM_CMPLU_STALL_DP/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dp_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a scalar multi-cycle instruction issued to the Double Precision execution pipe and waiting to finish. Includes binary floating point instructions in 32 and 64 bit binary floating point format.",
+ "MetricExpr": "PM_CMPLU_STALL_DPLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dplong_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction is an EIEIO waiting for response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_EIEIO/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "eieio_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the next to finish instruction suffered an ERAT miss and the EMQ was full",
+ "MetricExpr": "PM_CMPLU_STALL_EMQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "emq_full_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_ERAT_MISS + PM_CMPLU_STALL_EMQ_FULL)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "emq_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load or store that suffered a translation miss",
+ "MetricExpr": "PM_CMPLU_STALL_ERAT_MISS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "erat_miss_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which the NTC instruction is not allowed to complete because it was interrupted by ANY exception, which has to be serviced before the instruction can complete",
+ "MetricExpr": "PM_CMPLU_STALL_EXCEPTION/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "exception_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to execution units for other reasons.",
+ "MetricExpr": "(PM_CMPLU_STALL_EXEC_UNIT - PM_CMPLU_STALL_FXU - PM_CMPLU_STALL_DP - PM_CMPLU_STALL_DFU - PM_CMPLU_STALL_PM - PM_CMPLU_STALL_CRYPTO - PM_CMPLU_STALL_VFXU - PM_CMPLU_STALL_VDP)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "exec_unit_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to execution units (FXU/VSU/CRU)",
+ "MetricExpr": "PM_CMPLU_STALL_EXEC_UNIT/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "exec_unit_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which the NTC instruction is not allowed to complete because any of the 4 threads in the same core suffered a flush, which blocks completion",
+ "MetricExpr": "PM_CMPLU_STALL_FLUSH_ANY_THREAD/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "flush_any_thread_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to a long latency scalar fixed point instruction (division, square root)",
+ "MetricExpr": "PM_CMPLU_STALL_FXLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxlong_stall_cpi"
+ },
+ {
+ "BriefDescription": "Stalls due to short latency integer ops",
+ "MetricExpr": "(PM_CMPLU_STALL_FXU - PM_CMPLU_STALL_FXLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall due to a scalar fixed point or CR instruction in the execution pipeline. These instructions get routed to the ALU, ALU2, and DIV pipes",
+ "MetricExpr": "PM_CMPLU_STALL_FXU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_NTC_ISSUE_HELD_DARQ_FULL + PM_NTC_ISSUE_HELD_ARB + PM_NTC_ISSUE_HELD_OTHER)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "issue_hold_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a larx waiting to be satisfied",
+ "MetricExpr": "PM_CMPLU_STALL_LARX/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "larx_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load that hit on an older store and it was waiting for store data",
+ "MetricExpr": "PM_CMPLU_STALL_LHS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lhs_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load that missed in the L1 and the LMQ was unable to accept this load miss request because it was full",
+ "MetricExpr": "PM_CMPLU_STALL_LMQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lmq_full_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load instruction with all its dependencies satisfied just going through the LSU pipe to finish",
+ "MetricExpr": "PM_CMPLU_STALL_LOAD_FINISH/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "load_finish_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load that was held in LSAQ because the LRQ was full",
+ "MetricExpr": "PM_CMPLU_STALL_LRQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lrq_full_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall due to LRQ miscellaneous reasons, lost arbitration to LMQ slot, bank collisions, set prediction cleanup, set prediction multihit and others",
+ "MetricExpr": "PM_CMPLU_STALL_LRQ_OTHER/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lrq_other_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_LMQ_FULL + PM_CMPLU_STALL_ST_FWD + PM_CMPLU_STALL_LHS + PM_CMPLU_STALL_LSU_MFSPR + PM_CMPLU_STALL_LARX + PM_CMPLU_STALL_LRQ_OTHER)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lrq_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load or store that was held in LSAQ because an older instruction from SRQ or LRQ won arbitration to the LSU pipe when this instruction tried to launch",
+ "MetricExpr": "PM_CMPLU_STALL_LSAQ_ARB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsaq_arb_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_LRQ_FULL + PM_CMPLU_STALL_SRQ_FULL + PM_CMPLU_STALL_LSAQ_ARB)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsaq_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was an LSU op (other than a load or a store) with all its dependencies met and just going through the LSU pipe to finish",
+ "MetricExpr": "PM_CMPLU_STALL_LSU_FIN/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_fin_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall of one cycle because the LSU requested to flush the next iop in the sequence. It takes 1 cycle for the ISU to process this request before the LSU instruction is allowed to complete",
+ "MetricExpr": "PM_CMPLU_STALL_LSU_FLUSH_NEXT/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_flush_next_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a mfspr instruction targeting an LSU SPR and it was waiting for the register data to be returned",
+ "MetricExpr": "PM_CMPLU_STALL_LSU_MFSPR/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_mfspr_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion LSU stall for other reasons",
+ "MetricExpr": "(PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_LSU_FIN - PM_CMPLU_STALL_STORE_FINISH - PM_CMPLU_STALL_STORE_DATA - PM_CMPLU_STALL_EIEIO - PM_CMPLU_STALL_STCX - PM_CMPLU_STALL_SLB - PM_CMPLU_STALL_TEND - PM_CMPLU_STALL_PASTE - PM_CMPLU_STALL_TLBIE - PM_CMPLU_STALL_STORE_PIPE_ARB - PM_CMPLU_STALL_STORE_FIN_ARB - PM_CMPLU_STALL_LOAD_FINISH + PM_CMPLU_STALL_DCACHE_MISS - PM_CMPLU_STALL_LMQ_FULL - PM_CMPLU_STALL_ST_FWD - PM_CMPLU_STALL_LHS - PM_CMPLU_STALL_LSU_MFSPR - PM_CMPLU_STALL_LARX - PM_CMPLU_STALL_LRQ_OTHER + PM_CMPLU_STALL_ERAT_MISS + PM_CMPLU_STALL_EMQ_FULL - PM_CMPLU_STALL_LRQ_FULL - PM_CMPLU_STALL_SRQ_FULL - PM_CMPLU_STALL_LSAQ_ARB) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by LSU instruction",
+ "MetricExpr": "PM_CMPLU_STALL_LSU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall because the ISU is updating the register and notifying the Effective Address Table (EAT)",
+ "MetricExpr": "PM_CMPLU_STALL_MTFPSCR/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "mtfpscr_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall because the ISU is updating the TEXASR to keep track of the nested tbegin. This is a short delay, and it includes ROT",
+ "MetricExpr": "PM_CMPLU_STALL_NESTED_TBEGIN/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "nested_tbegin_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall because the ISU is updating the TEXASR to keep track of the nested tend and decrement the TEXASR nested level. This is a short delay",
+ "MetricExpr": "PM_CMPLU_STALL_NESTED_TEND/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "nested_tend_stall_cpi"
+ },
+ {
+ "BriefDescription": "Number of cycles the ICT has no itags assigned to this thread",
+ "MetricExpr": "PM_ICT_NOSLOT_CYC/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "nothing_dispatched_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was one that must finish at dispatch.",
+ "MetricExpr": "PM_CMPLU_STALL_NTC_DISP_FIN/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_disp_fin_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline (NTC) finishes. This event is used to account for cycles in which work is being completed in the CPI stack",
+ "MetricExpr": "PM_NTC_FIN/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_fin_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to ntc flush",
+ "MetricExpr": "PM_CMPLU_STALL_NTC_FLUSH/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_flush_stall_cpi"
+ },
+ {
+ "BriefDescription": "The NTC instruction is being held at dispatch because it lost arbitration onto the issue pipe to another instruction (from the same thread or a different thread)",
+ "MetricExpr": "PM_NTC_ISSUE_HELD_ARB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_issue_held_arb_cpi"
+ },
+ {
+ "BriefDescription": "The NTC instruction is being held at dispatch because there are no slots in the DARQ for it",
+ "MetricExpr": "PM_NTC_ISSUE_HELD_DARQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_issue_held_darq_full_cpi"
+ },
+ {
+ "BriefDescription": "The NTC instruction is being held at dispatch during regular pipeline cycles, or because the VSU is busy with multi-cycle instructions, or because of a write-back collision with VSU",
+ "MetricExpr": "PM_NTC_ISSUE_HELD_OTHER/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_issue_held_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles unaccounted for.",
+ "MetricExpr": "(PM_RUN_CYC - PM_1PLUS_PPC_CMPL - PM_CMPLU_STALL_THRD - PM_CMPLU_STALL - PM_ICT_NOSLOT_CYC)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall for other reasons",
+ "MetricExpr": "PM_CMPLU_STALL - PM_CMPLU_STALL_NTC_DISP_FIN - PM_CMPLU_STALL_NTC_FLUSH - PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_EXEC_UNIT - PM_CMPLU_STALL_BRU)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a paste waiting for response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_PASTE/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "paste_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was issued to the Permute execution pipe and waiting to finish.",
+ "MetricExpr": "PM_CMPLU_STALL_PM/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "pm_stall_cpi"
+ },
+ {
+ "BriefDescription": "Run cycles per run instruction",
+ "MetricExpr": "PM_RUN_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "run_cpi"
+ },
+ {
+ "BriefDescription": "Run_cycles",
+ "MetricExpr": "PM_RUN_CYC/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "run_cyc_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_FXU + PM_CMPLU_STALL_DP + PM_CMPLU_STALL_DFU + PM_CMPLU_STALL_PM + PM_CMPLU_STALL_CRYPTO)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "scalar_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was awaiting L2 response for an SLB",
+ "MetricExpr": "PM_CMPLU_STALL_SLB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "slb_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall while waiting for the non-speculative finish of either a stcx waiting for its result or a load waiting for non-critical sectors of data and ECC",
+ "MetricExpr": "PM_CMPLU_STALL_SPEC_FINISH/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "spec_finish_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a store that was held in LSAQ because the SRQ was full",
+ "MetricExpr": "PM_CMPLU_STALL_SRQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "srq_full_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_STORE_DATA + PM_CMPLU_STALL_EIEIO + PM_CMPLU_STALL_STCX + PM_CMPLU_STALL_SLB + PM_CMPLU_STALL_TEND + PM_CMPLU_STALL_PASTE + PM_CMPLU_STALL_TLBIE + PM_CMPLU_STALL_STORE_PIPE_ARB + PM_CMPLU_STALL_STORE_FIN_ARB)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "srq_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to store forward",
+ "MetricExpr": "PM_CMPLU_STALL_ST_FWD/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "st_fwd_stall_cpi"
+ },
+ {
+ "BriefDescription": "Nothing completed and ICT not empty",
+ "MetricExpr": "PM_CMPLU_STALL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a stcx waiting for response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_STCX/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "stcx_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the next to finish instruction was a store waiting on data",
+ "MetricExpr": "PM_CMPLU_STALL_STORE_DATA/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "store_data_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a store waiting for a slot in the store finish pipe. This means the instruction is ready to finish but there are instructions ahead of it, using the finish pipe",
+ "MetricExpr": "PM_CMPLU_STALL_STORE_FIN_ARB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "store_fin_arb_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a store with all its dependencies met, just waiting to go through the LSU pipe to finish",
+ "MetricExpr": "PM_CMPLU_STALL_STORE_FINISH/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "store_finish_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a store waiting for the next relaunch opportunity after an internal reject. This means the instruction is ready to relaunch and tried once but lost arbitration",
+ "MetricExpr": "PM_CMPLU_STALL_STORE_PIPE_ARB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "store_pipe_arb_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a tend instruction awaiting response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_TEND/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "tend_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion Stalled because the thread was blocked",
+ "MetricExpr": "PM_CMPLU_STALL_THRD/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "thread_block_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a tlbie waiting for response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_TLBIE/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "tlbie_stall_cpi"
+ },
+ {
+ "BriefDescription": "Vector stalls due to small latency double precision ops",
+ "MetricExpr": "(PM_CMPLU_STALL_VDP - PM_CMPLU_STALL_VDPLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vdp_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a vector instruction issued to the Double Precision execution pipe and waiting to finish.",
+ "MetricExpr": "PM_CMPLU_STALL_VDP/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vdp_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a scalar multi-cycle instruction issued to the Double Precision execution pipe and waiting to finish. Includes binary floating point instructions in 32 and 64 bit binary floating point format.",
+ "MetricExpr": "PM_CMPLU_STALL_VDPLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vdplong_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_VFXU + PM_CMPLU_STALL_VDP)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vector_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to a long latency vector fixed point instruction (division, square root)",
+ "MetricExpr": "PM_CMPLU_STALL_VFXLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vfxlong_stall_cpi"
+ },
+ {
+ "BriefDescription": "Vector stalls due to small latency integer ops",
+ "MetricExpr": "(PM_CMPLU_STALL_VFXU - PM_CMPLU_STALL_VFXLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vfxu_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall due to a vector fixed point instruction in the execution pipeline. These instructions get routed to the ALU, ALU2, and DIV pipes",
+ "MetricExpr": "PM_CMPLU_STALL_VFXU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vfxu_stall_cpi"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3 M state, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3 S tate, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads that came from the L3 and were brought into the L3 by a prefetch, per instruction completed",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_mepf_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L3 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from L3 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Local Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 demand load misses per run instruction",
+ "MetricExpr": "PM_LD_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "l1_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 misses that result in a cache reload",
+ "MetricExpr": "PM_L1_DCACHE_RELOAD_VALID * 100 / PM_LD_MISS_L1",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_miss_reloads_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant Memory",
+ "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from sources beyond the local L2",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_miss_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2",
+ "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads that came from L3 and were brought into the L3 by a prefetch",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_mepf_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from sources beyond the local L3",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_miss_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from L3",
+ "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Local Memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote Memory",
+ "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 distant MOD miss rates with measured DL2L3 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * PM_MRK_DATA_FROM_DL2L3_MOD_CYC / PM_MRK_DATA_FROM_DL2L3_MOD / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl2l3_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 distant SHR miss rates with measured DL2L3 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * PM_MRK_DATA_FROM_DL2L3_SHR_CYC / PM_MRK_DATA_FROM_DL2L3_SHR / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl2l3_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of distant L4 miss rates with measured DL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_DL4 * PM_MRK_DATA_FROM_DL4_CYC / PM_MRK_DATA_FROM_DL4 / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of distant memory miss rates with measured DMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_DMEM * PM_MRK_DATA_FROM_DMEM_CYC / PM_MRK_DATA_FROM_DMEM / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl21 MOD miss rates with measured L21 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * PM_MRK_DATA_FROM_L21_MOD_CYC / PM_MRK_DATA_FROM_L21_MOD / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l21_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl21 SHR miss rates with measured L21 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * PM_MRK_DATA_FROM_L21_SHR_CYC / PM_MRK_DATA_FROM_L21_SHR / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l21_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2 miss rates with measured L2 latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L2 * PM_MRK_DATA_FROM_L2_CYC / PM_MRK_DATA_FROM_L2 / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l2_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl31 MOD miss rates with measured L31 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * PM_MRK_DATA_FROM_L31_MOD_CYC / PM_MRK_DATA_FROM_L31_MOD / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l31_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl31 SHR miss rates with measured L31 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * PM_MRK_DATA_FROM_L31_SHR_CYC / PM_MRK_DATA_FROM_L31_SHR / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l31_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl3 miss rates with measured L3 latency as a % of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L3 * PM_MRK_DATA_FROM_L3_CYC / PM_MRK_DATA_FROM_L3 / PM_CMPLU_STALL_DCACHE_MISS * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l3_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of Local memory miss rates with measured LMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_LMEM * PM_MRK_DATA_FROM_LMEM_CYC / PM_MRK_DATA_FROM_LMEM / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "lmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 remote MOD miss rates with measured RL2L3 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * PM_MRK_DATA_FROM_RL2L3_MOD_CYC / PM_MRK_DATA_FROM_RL2L3_MOD / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl2l3_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 shared miss rates with measured RL2L3 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * PM_MRK_DATA_FROM_RL2L3_SHR_CYC / PM_MRK_DATA_FROM_RL2L3_SHR / PM_CMPLU_STALL_DCACHE_MISS * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl2l3_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of remote L4 miss rates with measured RL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_RL4 * PM_MRK_DATA_FROM_RL4_CYC / PM_MRK_DATA_FROM_RL4 / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of remote memory miss rates with measured RMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_RMEM * PM_MRK_DATA_FROM_RMEM_CYC / PM_MRK_DATA_FROM_RMEM / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "Branch Mispredict flushes per instruction",
+ "MetricExpr": "PM_FLUSH_MPRED / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "general",
+ "MetricName": "br_mpred_flush_rate_percent"
+ },
+ {
+ "BriefDescription": "Cycles per instruction",
+ "MetricExpr": "PM_CYC / PM_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "cpi"
+ },
+ {
+ "BriefDescription": "GCT empty cycles",
+ "MetricExpr": "(PM_FLUSH_DISP / PM_RUN_INST_CMPL) * 100",
+ "MetricGroup": "general",
+ "MetricName": "disp_flush_rate_percent"
+ },
+ {
+ "BriefDescription": "% DTLB miss rate per inst",
+ "MetricExpr": "PM_DTLB_MISS / PM_RUN_INST_CMPL *100",
+ "MetricGroup": "general",
+ "MetricName": "dtlb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Flush rate (%)",
+ "MetricExpr": "PM_FLUSH * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "flush_rate_percent"
+ },
+ {
+ "BriefDescription": "Instructions per cycles",
+ "MetricExpr": "PM_INST_CMPL / PM_CYC",
+ "MetricGroup": "general",
+ "MetricName": "ipc"
+ },
+ {
+ "BriefDescription": "% ITLB miss rate per inst",
+ "MetricExpr": "PM_ITLB_MISS / PM_RUN_INST_CMPL *100",
+ "MetricGroup": "general",
+ "MetricName": "itlb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 load misses per L1 load ref",
+ "MetricExpr": "PM_LD_MISS_L1 / PM_LD_REF_L1 * 100",
+ "MetricGroup": "general",
+ "MetricName": "l1_ld_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 store misses per run instruction",
+ "MetricExpr": "PM_ST_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l1_st_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 store misses per L1 store ref",
+ "MetricExpr": "PM_ST_MISS_L1 / PM_ST_FIN * 100",
+ "MetricGroup": "general",
+ "MetricName": "l1_st_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "L2 Instruction Miss Rate (per instruction)(%)",
+ "MetricExpr": "PM_INST_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 dmand Load Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 PTEG Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DPTEG_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_pteg_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 Instruction Miss Rate (per instruction)(%)",
+ "MetricExpr": "PM_INST_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 demand Load Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 PTEG Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DPTEG_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_pteg_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Run cycles per cycle",
+ "MetricExpr": "PM_RUN_CYC / PM_CYC*100",
+ "MetricGroup": "general",
+ "MetricName": "run_cycles_percent"
+ },
+ {
+ "BriefDescription": "Instruction dispatch-to-completion ratio",
+ "MetricExpr": "PM_INST_DISP / PM_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "speculation"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_INST_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_INST_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L2 per Inst",
+ "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3 other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L3 per Inst",
+ "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_INST_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_INST_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Instruction Cache Miss Rate (Per run Instruction)(%)",
+ "MetricExpr": "PM_L1_ICACHE_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "l1_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Icache Fetchs per Icache Miss",
+ "MetricExpr": "(PM_L1_ICACHE_MISS - PM_IC_PREF_WRITE) / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "icache_miss_reload"
+ },
+ {
+ "BriefDescription": "% of ICache reloads due to prefetch",
+ "MetricExpr": "PM_IC_PREF_WRITE * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "icache_pref_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_INST_FROM_DL2L3_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_INST_FROM_DL2L3_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L4",
+ "MetricExpr": "PM_INST_FROM_DL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant Memory",
+ "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core",
+ "MetricExpr": "PM_INST_FROM_L21_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core",
+ "MetricExpr": "PM_INST_FROM_L21_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L2",
+ "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core",
+ "MetricExpr": "PM_INST_FROM_L31_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core",
+ "MetricExpr": "PM_INST_FROM_L31_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L3",
+ "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local L4",
+ "MetricExpr": "PM_INST_FROM_LL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local Memory",
+ "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_INST_FROM_RL2L3_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_INST_FROM_RL2L3_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L4",
+ "MetricExpr": "PM_INST_FROM_RL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote Memory",
+ "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "%L2 Modified CO Cache read Utilization (4 pclks per disp attempt)",
+ "MetricExpr": "((PM_L2_CASTOUT_MOD/2)*4)/ PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_co_m_rd_util"
+ },
+ {
+ "BriefDescription": "L2 dcache invalidates per run inst (per core)",
+ "MetricExpr": "(PM_L2_DC_INV / 2) / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_dc_inv_rate_percent"
+ },
+ {
+ "BriefDescription": "Demand load misses as a % of L2 LD dispatches (per thread)",
+ "MetricExpr": "PM_L1_DCACHE_RELOAD_VALID / (PM_L2_LD / 2) * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_dem_ld_disp_percent"
+ },
+ {
+ "BriefDescription": "L2 Icache invalidates per run inst (per core)",
+ "MetricExpr": "(PM_L2_IC_INV / 2) / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ic_inv_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 Inst misses as a % of total L2 Inst dispatches (per thread)",
+ "MetricExpr": "PM_L2_INST_MISS / PM_L2_INST * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_inst_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "Average number of cycles between L2 Load hits",
+ "MetricExpr": "(PM_L2_LD_HIT / PM_RUN_CYC) / 2",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ld_hit_frequency"
+ },
+ {
+ "BriefDescription": "Average number of cycles between L2 Load misses",
+ "MetricExpr": "(PM_L2_LD_MISS / PM_RUN_CYC) / 2",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ld_miss_frequency"
+ },
+ {
+ "BriefDescription": "L2 Load misses as a % of total L2 Load dispatches (per thread)",
+ "MetricExpr": "PM_L2_LD_MISS / PM_L2_LD * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ld_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "% L2 load disp attempts Cache read Utilization (4 pclks per disp attempt)",
+ "MetricExpr": "((PM_L2_RCLD_DISP/2)*4)/ PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ld_rd_util"
+ },
+ {
+ "BriefDescription": "L2 load misses that require a cache write (4 pclks per disp attempt) % of pclks",
+ "MetricExpr": "((( PM_L2_LD_DISP - PM_L2_LD_HIT)/2)*4)/ PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ldmiss_wr_util"
+ },
+ {
+ "BriefDescription": "L2 local pump prediction success",
+ "MetricExpr": "PM_L2_LOC_GUESS_CORRECT / (PM_L2_LOC_GUESS_CORRECT + PM_L2_LOC_GUESS_WRONG) * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_local_pred_correct_percent"
+ },
+ {
+ "BriefDescription": "L2 COs that were in M,Me,Mu state as a % of all L2 COs",
+ "MetricExpr": "PM_L2_CASTOUT_MOD / (PM_L2_CASTOUT_MOD + PM_L2_CASTOUT_SHR) * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_mod_co_percent"
+ },
+ {
+ "BriefDescription": "% of L2 Load RC dispatch atampts that failed because of address collisions and cclass conflicts",
+ "MetricExpr": "(PM_L2_RCLD_DISP_FAIL_ADDR )/ PM_L2_RCLD_DISP * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rc_ld_disp_addr_fail_percent"
+ },
+ {
+ "BriefDescription": "% of L2 Load RC dispatch attempts that failed",
+ "MetricExpr": "(PM_L2_RCLD_DISP_FAIL_ADDR + PM_L2_RCLD_DISP_FAIL_OTHER)/ PM_L2_RCLD_DISP * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rc_ld_disp_fail_percent"
+ },
+ {
+ "BriefDescription": "% of L2 Store RC dispatch atampts that failed because of address collisions and cclass conflicts",
+ "MetricExpr": "PM_L2_RCST_DISP_FAIL_ADDR / PM_L2_RCST_DISP * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rc_st_disp_addr_fail_percent"
+ },
+ {
+ "BriefDescription": "% of L2 Store RC dispatch attempts that failed",
+ "MetricExpr": "(PM_L2_RCST_DISP_FAIL_ADDR + PM_L2_RCST_DISP_FAIL_OTHER)/ PM_L2_RCST_DISP * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rc_st_disp_fail_percent"
+ },
+ {
+ "BriefDescription": "L2 Cache Read Utilization (per core)",
+ "MetricExpr": "(((PM_L2_RCLD_DISP/2)*4)/ PM_RUN_CYC * 100) + (((PM_L2_RCST_DISP/2)*4)/PM_RUN_CYC * 100) + (((PM_L2_CASTOUT_MOD/2)*4)/PM_RUN_CYC * 100)",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rd_util_percent"
+ },
+ {
+ "BriefDescription": "L2 COs that were in T,Te,Si,S state as a % of all L2 COs",
+ "MetricExpr": "PM_L2_CASTOUT_SHR / (PM_L2_CASTOUT_MOD + PM_L2_CASTOUT_SHR) * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_shr_co_percent"
+ },
+ {
+ "BriefDescription": "L2 Store misses as a % of total L2 Store dispatches (per thread)",
+ "MetricExpr": "PM_L2_ST_MISS / PM_L2_ST * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_st_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "% L2 store disp attempts Cache read Utilization (4 pclks per disp attempt)",
+ "MetricExpr": "((PM_L2_RCST_DISP/2)*4) / PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_st_rd_util"
+ },
+ {
+ "BriefDescription": "L2 stores that require a cache write (4 pclks per disp attempt) % of pclks",
+ "MetricExpr": "((PM_L2_ST_DISP/2)*4) / PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_st_wr_util"
+ },
+ {
+ "BriefDescription": "L2 Cache Write Utilization (per core)",
+ "MetricExpr": "((((PM_L2_LD_DISP - PM_L2_LD_HIT)/2)*4) / PM_RUN_CYC * 100) + (((PM_L2_ST_DISP/2)*4) / PM_RUN_CYC * 100)",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_wr_util_percent"
+ },
+ {
+ "BriefDescription": "Average number of cycles between L3 Load hits",
+ "MetricExpr": "(PM_L3_LD_HIT / PM_RUN_CYC) / 2",
+ "MetricGroup": "l3_stats",
+ "MetricName": "l3_ld_hit_frequency"
+ },
+ {
+ "BriefDescription": "Average number of cycles between L3 Load misses",
+ "MetricExpr": "(PM_L3_LD_MISS / PM_RUN_CYC) / 2",
+ "MetricGroup": "l3_stats",
+ "MetricName": "l3_ld_miss_frequency"
+ },
+ {
+ "BriefDescription": "Average number of Write-in machines used. 1 of 8 WI machines is sampled every L3 cycle",
+ "MetricExpr": "(PM_L3_WI_USAGE / PM_RUN_CYC) * 8",
+ "MetricGroup": "l3_stats",
+ "MetricName": "l3_wi_usage"
+ },
+ {
+ "BriefDescription": "Average icache miss latency",
+ "MetricExpr": "PM_IC_DEMAND_CYC / PM_IC_DEMAND_REQ",
+ "MetricGroup": "latency",
+ "MetricName": "average_il1_miss_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL2L3_MOD_CYC/ PM_MRK_DATA_FROM_DL2L3_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "dl2l3_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 distant Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL2L3_SHR_CYC/ PM_MRK_DATA_FROM_DL2L3_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "dl2l3_shr_latency"
+ },
+ {
+ "BriefDescription": "Distant L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL4_CYC/ PM_MRK_DATA_FROM_DL4",
+ "MetricGroup": "latency",
+ "MetricName": "dl4_latency"
+ },
+ {
+ "BriefDescription": "Marked Dmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DMEM_CYC/ PM_MRK_DATA_FROM_DMEM",
+ "MetricGroup": "latency",
+ "MetricName": "dmem_latency"
+ },
+ {
+ "BriefDescription": "average L1 miss latency using marked events",
+ "MetricExpr": "PM_MRK_LD_MISS_L1_CYC / PM_MRK_LD_MISS_L1",
+ "MetricGroup": "latency",
+ "MetricName": "estimated_dl1miss_latency"
+ },
+ {
+ "BriefDescription": "Marked L21 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L21_MOD_CYC/ PM_MRK_DATA_FROM_L21_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "l21_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L21 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L21_SHR_CYC/ PM_MRK_DATA_FROM_L21_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "l21_shr_latency"
+ },
+ {
+ "BriefDescription": "Marked L2 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l2_latency"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L31_MOD_CYC/ PM_MRK_DATA_FROM_L31_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "l31_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L31_SHR_CYC/ PM_MRK_DATA_FROM_L31_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "l31_shr_latency"
+ },
+ {
+ "BriefDescription": "Marked L3 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3_CYC/ PM_MRK_DATA_FROM_L3",
+ "MetricGroup": "latency",
+ "MetricName": "l3_latency"
+ },
+ {
+ "BriefDescription": "Local L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_LL4_CYC/ PM_MRK_DATA_FROM_LL4",
+ "MetricGroup": "latency",
+ "MetricName": "ll4_latency"
+ },
+ {
+ "BriefDescription": "Marked Lmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_LMEM_CYC/ PM_MRK_DATA_FROM_LMEM",
+ "MetricGroup": "latency",
+ "MetricName": "lmem_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL2L3_MOD_CYC/ PM_MRK_DATA_FROM_RL2L3_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "rl2l3_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL2L3_SHR_CYC/ PM_MRK_DATA_FROM_RL2L3_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "rl2l3_shr_latency"
+ },
+ {
+ "BriefDescription": "Remote L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL4_CYC/ PM_MRK_DATA_FROM_RL4",
+ "MetricGroup": "latency",
+ "MetricName": "rl4_latency"
+ },
+ {
+ "BriefDescription": "Marked Rmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RMEM_CYC/ PM_MRK_DATA_FROM_RMEM",
+ "MetricGroup": "latency",
+ "MetricName": "rmem_latency"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_ERAT_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "erat_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "LHS reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LHS *100/ PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lhs_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LMQ_FULL * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lmq_full_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LMQ_FULL * 100 / PM_LD_REF_L1",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lmq_full_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "L4 locality(%)",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / (PM_DATA_FROM_LL4 + PM_DATA_FROM_RL4 + PM_DATA_FROM_DL4)",
+ "MetricGroup": "memory",
+ "MetricName": "l4_locality"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to distant L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_DATA_FROM_DL4",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to remote+distant L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / (PM_DATA_FROM_DL4 + PM_DATA_FROM_RL4)",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_mem"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to remote L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_DATA_FROM_RL4",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_rl4"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from distant memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_DMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from remote and distant memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / (PM_DATA_FROM_DMEM + PM_DATA_FROM_RMEM)",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_mem"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from remote memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_RMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_rmem"
+ },
+ {
+ "BriefDescription": "Number of loads from remote memory per loads from distant memory",
+ "MetricExpr": "PM_DATA_FROM_RMEM / PM_DATA_FROM_DMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_rmem_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Memory locality",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100/ (PM_DATA_FROM_LMEM + PM_DATA_FROM_RMEM + PM_DATA_FROM_DMEM)",
+ "MetricGroup": "memory",
+ "MetricName": "mem_locality_percent"
+ },
+ {
+ "BriefDescription": "L1 Prefetches issued by the prefetch machine per instruction (per thread)",
+ "MetricExpr": "PM_L1_PREF / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "prefetch",
+ "MetricName": "l1_prefetch_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_LSU_DERAT_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "derat_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Modified) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Shared) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L2 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L3 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Modified) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Shared) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT misses that result in an ERAT reload",
+ "MetricExpr": "PM_DTLB_MISS * 100 / PM_LSU_DERAT_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "derat_miss_reload_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L4",
+ "MetricExpr": "PM_DPTEG_FROM_DL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant Memory",
+ "MetricExpr": "PM_DPTEG_FROM_DMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L21_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L21_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L2",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L31_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L31_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L3",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local L4",
+ "MetricExpr": "PM_DPTEG_FROM_LL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local Memory",
+ "MetricExpr": "PM_DPTEG_FROM_LMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L4",
+ "MetricExpr": "PM_DPTEG_FROM_RL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote Memory",
+ "MetricExpr": "PM_DPTEG_FROM_RMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "% DERAT miss rate for 4K page per inst",
+ "MetricExpr": "PM_DERAT_MISS_4K * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_4k_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 4K page",
+ "MetricExpr": "PM_DERAT_MISS_4K / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_4k_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DERAT miss ratio for 64K page per inst",
+ "MetricExpr": "PM_DERAT_MISS_64K * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_64k_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 64K page",
+ "MetricExpr": "PM_DERAT_MISS_64K / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_64k_miss_ratio"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio",
+ "MetricExpr": "PM_LSU_DERAT_MISS / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DSLB_Miss_Rate per inst",
+ "MetricExpr": "PM_DSLB_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "dslb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% ISLB miss rate per inst",
+ "MetricExpr": "PM_ISLB_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "islb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "ANY_SYNC_STALL_CPI",
+ "MetricExpr": "PM_CMPLU_STALL_ANY_SYNC / PM_RUN_INST_CMPL",
+ "MetricName": "any_sync_stall_cpi"
+ },
+ {
+ "BriefDescription": "Avg. more than 1 instructions completed",
+ "MetricExpr": "PM_INST_CMPL / PM_1PLUS_PPC_CMPL",
+ "MetricName": "average_completed_instruction_set_size"
+ },
+ {
+ "BriefDescription": "% Branches per instruction",
+ "MetricExpr": "PM_BRU_FIN / PM_RUN_INST_CMPL",
+ "MetricName": "branches_per_inst"
+ },
+ {
+ "BriefDescription": "Cycles in which at least one instruction completes in this thread",
+ "MetricExpr": "PM_1PLUS_PPC_CMPL/PM_RUN_INST_CMPL",
+ "MetricName": "completion_cpi"
+ },
+ {
+ "BriefDescription": "cycles",
+ "MetricExpr": "PM_RUN_CYC",
+ "MetricName": "custom_secs"
+ },
+ {
+ "BriefDescription": "Percentage Cycles atleast one instruction dispatched",
+ "MetricExpr": "PM_1PLUS_PPC_DISP / PM_CYC * 100",
+ "MetricName": "cycles_atleast_one_inst_dispatched_percent"
+ },
+ {
+ "BriefDescription": "Cycles per instruction group",
+ "MetricExpr": "PM_CYC / PM_1PLUS_PPC_CMPL",
+ "MetricName": "cycles_per_completed_instructions_set"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L4",
+ "MetricExpr": "PM_DATA_FROM_DL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L4 per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "(PM_DATA_FROM_L31_MOD + PM_DATA_FROM_L31_SHR) * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_l31_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Local L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Local L4 per Inst",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L4",
+ "MetricExpr": "PM_DATA_FROM_RL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of DERAT reloads from L2",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dpteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of DERAT reloads from L3",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dpteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "Cycles in which the oldest instruction is finished and ready to complete for waiting to get through the completion pipe",
+ "MetricExpr": "PM_NTC_ALL_FIN / PM_RUN_INST_CMPL",
+ "MetricName": "finish_to_cmpl_cpi"
+ },
+ {
+ "BriefDescription": "Total Fixed point operations",
+ "MetricExpr": "PM_FXU_FIN/PM_RUN_INST_CMPL",
+ "MetricName": "fixed_per_inst"
+ },
+ {
+ "BriefDescription": "All FXU Busy",
+ "MetricExpr": "PM_FXU_BUSY / PM_CYC",
+ "MetricName": "fxu_all_busy"
+ },
+ {
+ "BriefDescription": "All FXU Idle",
+ "MetricExpr": "PM_FXU_IDLE / PM_CYC",
+ "MetricName": "fxu_all_idle"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to branch mispred",
+ "MetricExpr": "PM_ICT_NOSLOT_BR_MPRED/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_br_mpred_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to Icache Miss and branch mispred",
+ "MetricExpr": "PM_ICT_NOSLOT_BR_MPRED_ICMISS/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_br_mpred_icmiss_cpi"
+ },
+ {
+ "BriefDescription": "ICT other stalls",
+ "MetricExpr": "(PM_ICT_NOSLOT_CYC - PM_ICT_NOSLOT_IC_MISS - PM_ICT_NOSLOT_BR_MPRED_ICMISS - PM_ICT_NOSLOT_BR_MPRED - PM_ICT_NOSLOT_DISP_HELD)/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_cyc_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which the NTC instruciton is held at dispatch for any reason",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to dispatch holds because the History Buffer was full. Could be GPR/VSR/VMR/FPR/CR/XVF",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_HB_FULL/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_hb_full_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to dispatch hold on this thread due to Issue q full, BRQ full, XVCF Full, Count cache, Link, Tar full",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_ISSQ/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_issq_cpi"
+ },
+ {
+ "BriefDescription": "ICT_NOSLOT_DISP_HELD_OTHER_CPI",
+ "MetricExpr": "(PM_ICT_NOSLOT_DISP_HELD - PM_ICT_NOSLOT_DISP_HELD_HB_FULL - PM_ICT_NOSLOT_DISP_HELD_SYNC - PM_ICT_NOSLOT_DISP_HELD_TBEGIN - PM_ICT_NOSLOT_DISP_HELD_ISSQ)/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_other_cpi"
+ },
+ {
+ "BriefDescription": "Dispatch held due to a synchronizing instruction at dispatch",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_SYNC/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_sync_cpi"
+ },
+ {
+ "BriefDescription": "the NTC instruction is being held at dispatch because it is a tbegin instruction and there is an older tbegin in the pipeline that must complete before the younger tbegin can dispatch",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_TBEGIN/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_tbegin_cpi"
+ },
+ {
+ "BriefDescription": "ICT_NOSLOT_IC_L2_CPI",
+ "MetricExpr": "(PM_ICT_NOSLOT_IC_MISS - PM_ICT_NOSLOT_IC_L3 - PM_ICT_NOSLOT_IC_L3MISS)/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_ic_l2_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to icache misses that were sourced from the local L3",
+ "MetricExpr": "PM_ICT_NOSLOT_IC_L3/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_ic_l3_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to icache misses that were sourced from beyond the local L3. The source could be local/remote/distant memory or another core's cache",
+ "MetricExpr": "PM_ICT_NOSLOT_IC_L3MISS/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_ic_l3miss_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to Icache Miss",
+ "MetricExpr": "PM_ICT_NOSLOT_IC_MISS/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_ic_miss_cpi"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from L2",
+ "MetricExpr": "PM_IPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from L3",
+ "MetricExpr": "PM_IPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from local memory",
+ "MetricExpr": "PM_IPTEG_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from local memory",
+ "MetricExpr": "PM_IPTEG_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Average number of Castout machines used. 1 of 16 CO machines is sampled every L2 cycle",
+ "MetricExpr": "PM_CO_USAGE / PM_RUN_CYC * 16",
+ "MetricName": "l2_co_usage"
+ },
+ {
+ "BriefDescription": "Percent of instruction reads out of all L2 commands",
+ "MetricExpr": "PM_ISIDE_DISP * 100 / (PM_L2_ST + PM_L2_LD + PM_ISIDE_DISP)",
+ "MetricName": "l2_instr_commands_percent"
+ },
+ {
+ "BriefDescription": "Percent of loads out of all L2 commands",
+ "MetricExpr": "PM_L2_LD * 100 / (PM_L2_ST + PM_L2_LD + PM_ISIDE_DISP)",
+ "MetricName": "l2_ld_commands_percent"
+ },
+ {
+ "BriefDescription": "Rate of L2 store dispatches that failed per core",
+ "MetricExpr": "100 * (PM_L2_RCST_DISP_FAIL_ADDR + PM_L2_RCST_DISP_FAIL_OTHER)/2 / PM_RUN_INST_CMPL",
+ "MetricName": "l2_rc_st_disp_fail_rate_percent"
+ },
+ {
+ "BriefDescription": "Average number of Read/Claim machines used. 1 of 16 RC machines is sampled every L2 cycle",
+ "MetricExpr": "PM_RC_USAGE / PM_RUN_CYC * 16",
+ "MetricName": "l2_rc_usage"
+ },
+ {
+ "BriefDescription": "Average number of Snoop machines used. 1 of 8 SN machines is sampled every L2 cycle",
+ "MetricExpr": "PM_SN_USAGE / PM_RUN_CYC * 8",
+ "MetricName": "l2_sn_usage"
+ },
+ {
+ "BriefDescription": "Percent of stores out of all L2 commands",
+ "MetricExpr": "PM_L2_ST * 100 / (PM_L2_ST + PM_L2_LD + PM_ISIDE_DISP)",
+ "MetricName": "l2_st_commands_percent"
+ },
+ {
+ "BriefDescription": "Rate of L2 store dispatches that failed per core",
+ "MetricExpr": "100 * (PM_L2_RCST_DISP_FAIL_ADDR + PM_L2_RCST_DISP_FAIL_OTHER)/2 / PM_RUN_INST_CMPL",
+ "MetricName": "l2_st_disp_fail_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of L2 dispatches per core",
+ "MetricExpr": "100 * PM_L2_RCST_DISP/2 / PM_RUN_INST_CMPL",
+ "MetricName": "l2_st_disp_rate_percent"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "(PM_MRK_DATA_FROM_L31_SHR_CYC + PM_MRK_DATA_FROM_L31_MOD_CYC) / (PM_MRK_DATA_FROM_L31_SHR + PM_MRK_DATA_FROM_L31_MOD)",
+ "MetricName": "l31_latency"
+ },
+ {
+ "BriefDescription": "PCT instruction loads",
+ "MetricExpr": "PM_LD_REF_L1 / PM_RUN_INST_CMPL",
+ "MetricName": "loads_per_inst"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses",
+ "MetricExpr": "PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL",
+ "MetricName": "lsu_stall_dcache_miss_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall because a different thread was using the completion pipe",
+ "MetricExpr": "(PM_CMPLU_STALL_THRD - PM_CMPLU_STALL_EXCEPTION - PM_CMPLU_STALL_ANY_SYNC - PM_CMPLU_STALL_SYNC_PMU_INT - PM_CMPLU_STALL_SPEC_FINISH - PM_CMPLU_STALL_FLUSH_ANY_THREAD - PM_CMPLU_STALL_LSU_FLUSH_NEXT - PM_CMPLU_STALL_NESTED_TBEGIN - PM_CMPLU_STALL_NESTED_TEND - PM_CMPLU_STALL_MTFPSCR)/PM_RUN_INST_CMPL",
+ "MetricName": "other_thread_cmpl_stall"
+ },
+ {
+ "BriefDescription": "PCT instruction stores",
+ "MetricExpr": "PM_ST_FIN / PM_RUN_INST_CMPL",
+ "MetricName": "stores_per_inst"
+ },
+ {
+ "BriefDescription": "ANY_SYNC_STALL_CPI",
+ "MetricExpr": "PM_CMPLU_STALL_SYNC_PMU_INT / PM_RUN_INST_CMPL",
+ "MetricName": "sync_pmu_int_stall_cpi"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/branch.json b/tools/perf/pmu-events/arch/x86/amdfam17h/branch.json
new file mode 100644
index 000000000000..93ddfd8053ca
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/branch.json
@@ -0,0 +1,12 @@
+[
+ {
+ "EventName": "bp_l1_btb_correct",
+ "EventCode": "0x8a",
+ "BriefDescription": "L1 BTB Correction."
+ },
+ {
+ "EventName": "bp_l2_btb_correct",
+ "EventCode": "0x8b",
+ "BriefDescription": "L2 BTB Correction."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json b/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json
new file mode 100644
index 000000000000..fad4af9142cb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json
@@ -0,0 +1,287 @@
+[
+ {
+ "EventName": "ic_fw32",
+ "EventCode": "0x80",
+ "BriefDescription": "The number of 32B fetch windows transferred from IC pipe to DE instruction decoder (includes non-cacheable and cacheable fill responses)."
+ },
+ {
+ "EventName": "ic_fw32_miss",
+ "EventCode": "0x81",
+ "BriefDescription": "The number of 32B fetch windows tried to read the L1 IC and missed in the full tag."
+ },
+ {
+ "EventName": "ic_cache_fill_l2",
+ "EventCode": "0x82",
+ "BriefDescription": "The number of 64 byte instruction cache line was fulfilled from the L2 cache."
+ },
+ {
+ "EventName": "ic_cache_fill_sys",
+ "EventCode": "0x83",
+ "BriefDescription": "The number of 64 byte instruction cache line fulfilled from system memory or another cache."
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_hit",
+ "EventCode": "0x84",
+ "BriefDescription": "The number of instruction fetches that miss in the L1 ITLB but hit in the L2 ITLB."
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_miss",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs."
+ },
+ {
+ "EventName": "bp_snp_re_sync",
+ "EventCode": "0x86",
+ "BriefDescription": "The number of pipeline restarts caused by invalidating probes that hit on the instruction stream currently being executed. This would happen if the active instruction stream was being modified by another processor in an MP system - typically a highly unlikely event."
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_any",
+ "EventCode": "0x87",
+ "BriefDescription": "IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
+ "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_dq_empty",
+ "EventCode": "0x87",
+ "BriefDescription": "IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
+ "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_back_pressure",
+ "EventCode": "0x87",
+ "BriefDescription": "IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
+ "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ic_cache_inval.l2_invalidating_probe",
+ "EventCode": "0x8c",
+ "BriefDescription": "IC line invalidated due to L2 invalidating probe (external or LS).",
+ "PublicDescription": "The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core. IC line invalidated due to L2 invalidating probe (external or LS).",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ic_cache_inval.fill_invalidated",
+ "EventCode": "0x8c",
+ "BriefDescription": "IC line invalidated due to overwriting fill response.",
+ "PublicDescription": "The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core. IC line invalidated due to overwriting fill response.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "bp_tlb_rel",
+ "EventCode": "0x99",
+ "BriefDescription": "The number of ITLB reload requests."
+ },
+ {
+ "EventName": "l2_request_g1.rd_blk_l",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_request_g1.rd_blk_x",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_request_g1.ls_rd_blk_c_s",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_request_g1.cacheable_ic_read",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_request_g1.change_to_x",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "l2_request_g1.prefetch_l2",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_request_g1.l2_hw_pf",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "l2_request_g1.other_requests",
+ "EventCode": "0x60",
+ "BriefDescription": "Events covered by l2_request_g2.",
+ "PublicDescription": "Requests to L2 Group1. Events covered by l2_request_g2.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_request_g2.group1",
+ "EventCode": "0x61",
+ "BriefDescription": "All Group 1 commands not in unit0.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. All Group 1 commands not in unit0.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_request_g2.ls_rd_sized",
+ "EventCode": "0x61",
+ "BriefDescription": "RdSized, RdSized32, RdSized64.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. RdSized, RdSized32, RdSized64.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_request_g2.ls_rd_sized_nc",
+ "EventCode": "0x61",
+ "BriefDescription": "RdSizedNC, RdSized32NC, RdSized64NC.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. RdSizedNC, RdSized32NC, RdSized64NC.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_request_g2.ic_rd_sized",
+ "EventCode": "0x61",
+ "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_request_g2.ic_rd_sized_nc",
+ "EventCode": "0x61",
+ "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "l2_request_g2.smc_inval",
+ "EventCode": "0x61",
+ "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_request_g2.bus_locks_originator",
+ "EventCode": "0x61",
+ "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "l2_request_g2.bus_locks_responses",
+ "EventCode": "0x61",
+ "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_latency.l2_cycles_waiting_on_fills",
+ "EventCode": "0x62",
+ "BriefDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
+ "PublicDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_wcb_req.wcb_write",
+ "EventCode": "0x63",
+ "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) write requests.",
+ "BriefDescription": "LS to L2 WCB write requests.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_wcb_req.wcb_close",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB close requests.",
+ "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) close requests.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_wcb_req.zero_byte_store",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB zero byte store requests.",
+ "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) zero byte store requests.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_wcb_req.cl_zero",
+ "EventCode": "0x63",
+ "PublicDescription": "LS to L2 WCB cache line zeroing requests.",
+ "BriefDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) cache line zeroing requests.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_cs",
+ "EventCode": "0x64",
+ "BriefDescription": "LS ReadBlock C/S Hit.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS ReadBlock C/S Hit.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_x",
+ "EventCode": "0x64",
+ "BriefDescription": "LS Read Block L Hit X.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS Read Block L Hit X.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_s",
+ "EventCode": "0x64",
+ "BriefDescription": "LsRdBlkL Hit Shared.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LsRdBlkL Hit Shared.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_x",
+ "EventCode": "0x64",
+ "BriefDescription": "LsRdBlkX/ChgToX Hit X. Count RdBlkX finding Shared as a Miss.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LsRdBlkX/ChgToX Hit X. Count RdBlkX finding Shared as a Miss.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_c",
+ "EventCode": "0x64",
+ "BriefDescription": "LS Read Block C S L X Change to X Miss.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS Read Block C S L X Change to X Miss.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_hit_x",
+ "EventCode": "0x64",
+ "BriefDescription": "IC Fill Hit Exclusive Stale.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Hit Exclusive Stale.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_hit_s",
+ "EventCode": "0x64",
+ "BriefDescription": "IC Fill Hit Shared.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Hit Shared.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_miss",
+ "EventCode": "0x64",
+ "BriefDescription": "IC Fill Miss.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Miss.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_fill_pending.l2_fill_busy",
+ "EventCode": "0x6d",
+ "BriefDescription": "Total cycles spent with one or more fill requests in flight from L2.",
+ "PublicDescription": "Total cycles spent with one or more fill requests in flight from L2.",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/core.json b/tools/perf/pmu-events/arch/x86/amdfam17h/core.json
new file mode 100644
index 000000000000..7b285b0a7f35
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/core.json
@@ -0,0 +1,134 @@
+[
+ {
+ "EventName": "ex_ret_instr",
+ "EventCode": "0xc0",
+ "BriefDescription": "Retired Instructions."
+ },
+ {
+ "EventName": "ex_ret_cops",
+ "EventCode": "0xc1",
+ "BriefDescription": "Retired Uops.",
+ "PublicDescription": "The number of uOps retired. This includes all processor activity (instructions, exceptions, interrupts, microcode assists, etc.). The number of events logged per cycle can vary from 0 to 4."
+ },
+ {
+ "EventName": "ex_ret_brn",
+ "EventCode": "0xc2",
+ "BriefDescription": "[Retired Branch Instructions.",
+ "PublicDescription": "The number of branch instructions retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
+ },
+ {
+ "EventName": "ex_ret_brn_misp",
+ "EventCode": "0xc3",
+ "BriefDescription": "Retired Branch Instructions Mispredicted.",
+ "PublicDescription": "The number of branch instructions retired, of any type, that were not correctly predicted. This includes those for which prediction is not attempted (far control transfers, exceptions and interrupts)."
+ },
+ {
+ "EventName": "ex_ret_brn_tkn",
+ "EventCode": "0xc4",
+ "BriefDescription": "Retired Taken Branch Instructions.",
+ "PublicDescription": "The number of taken branches that were retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
+ },
+ {
+ "EventName": "ex_ret_brn_tkn_misp",
+ "EventCode": "0xc5",
+ "BriefDescription": "Retired Taken Branch Instructions Mispredicted.",
+ "PublicDescription": "The number of retired taken branch instructions that were mispredicted."
+ },
+ {
+ "EventName": "ex_ret_brn_far",
+ "EventCode": "0xc6",
+ "BriefDescription": "Retired Far Control Transfers.",
+ "PublicDescription": "The number of far control transfers retired including far call/jump/return, IRET, SYSCALL and SYSRET, plus exceptions and interrupts. Far control transfers are not subject to branch prediction."
+ },
+ {
+ "EventName": "ex_ret_brn_resync",
+ "EventCode": "0xc7",
+ "BriefDescription": "Retired Branch Resyncs.",
+ "PublicDescription": "The number of resync branches. These reflect pipeline restarts due to certain microcode assists and events such as writes to the active instruction stream, among other things. Each occurrence reflects a restart penalty similar to a branch mispredict. This is relatively rare."
+ },
+ {
+ "EventName": "ex_ret_near_ret",
+ "EventCode": "0xc8",
+ "BriefDescription": "Retired Near Returns.",
+ "PublicDescription": "The number of near return instructions (RET or RET Iw) retired."
+ },
+ {
+ "EventName": "ex_ret_near_ret_mispred",
+ "EventCode": "0xc9",
+ "BriefDescription": "Retired Near Returns Mispredicted.",
+ "PublicDescription": "The number of near returns retired that were not correctly predicted by the return address predictor. Each such mispredict incurs the same penalty as a mispredicted conditional branch instruction."
+ },
+ {
+ "EventName": "ex_ret_brn_ind_misp",
+ "EventCode": "0xca",
+ "BriefDescription": "Retired Indirect Branch Instructions Mispredicted.",
+ "PublicDescription": "Retired Indirect Branch Instructions Mispredicted."
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.sse_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.mmx_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "MMX instructions.",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. MMX instructions.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.x87_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "x87 instructions.",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. x87 instructions.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ex_ret_cond",
+ "EventCode": "0xd1",
+ "BriefDescription": "Retired Conditional Branch Instructions."
+ },
+ {
+ "EventName": "ex_ret_cond_misp",
+ "EventCode": "0xd2",
+ "BriefDescription": "Retired Conditional Branch Instructions Mispredicted."
+ },
+ {
+ "EventName": "ex_div_busy",
+ "EventCode": "0xd3",
+ "BriefDescription": "Div Cycles Busy count."
+ },
+ {
+ "EventName": "ex_div_count",
+ "EventCode": "0xd4",
+ "BriefDescription": "Div Op Count."
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_count_rollover",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
+ "PublicDescription": "Tagged IBS Ops. Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops_ret",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Number of Ops tagged by IBS that retired.",
+ "PublicDescription": "Tagged IBS Ops. Number of Ops tagged by IBS that retired.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Number of Ops tagged by IBS.",
+ "PublicDescription": "Tagged IBS Ops. Number of Ops tagged by IBS.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ex_ret_fus_brnch_inst",
+ "EventCode": "0x1d0",
+ "BriefDescription": "The number of fused retired branch instructions retired per cycle. The number of events logged per cycle can vary from 0 to 3."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json b/tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json
new file mode 100644
index 000000000000..ea4711983d1d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json
@@ -0,0 +1,168 @@
+[
+ {
+ "EventName": "fpu_pipe_assignment.dual",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number multi-pipe uOps.",
+ "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number multi-pipe uOps assigned to Pipe 3.",
+ "UMask": "0xf0"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number uOps.",
+ "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to Pipe 3.",
+ "UMask": "0xf"
+ },
+ {
+ "EventName": "fp_sched_empty",
+ "EventCode": "0x01",
+ "BriefDescription": "This is a speculative event. The number of cycles in which the FPU scheduler is empty. Note that some Ops like FP loads bypass the scheduler."
+ },
+ {
+ "EventName": "fp_retx87_fp_ops.all",
+ "EventCode": "0x02",
+ "BriefDescription": "All Ops.",
+ "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8.",
+ "UMask": "0x7"
+ },
+ {
+ "EventName": "fp_retx87_fp_ops.div_sqr_r_ops",
+ "EventCode": "0x02",
+ "BriefDescription": "Divide and square root Ops.",
+ "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Divide and square root Ops.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "fp_retx87_fp_ops.mul_ops",
+ "EventCode": "0x02",
+ "BriefDescription": "Multiply Ops.",
+ "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Multiply Ops.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "fp_retx87_fp_ops.add_sub_ops",
+ "EventCode": "0x02",
+ "BriefDescription": "Add/subtract Ops.",
+ "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Add/subtract Ops.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.all",
+ "EventCode": "0x03",
+ "BriefDescription": "All FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.dp_mult_add_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Double precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.dp_div_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Double precision divide/square root FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision divide/square root FLOPS.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.dp_mult_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Double precision multiply FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision multiply FLOPS.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.dp_add_sub_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Double precision add/subtract FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision add/subtract FLOPS.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.sp_mult_add_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Single precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.sp_div_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Single-precision divide/square root FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision divide/square root FLOPS.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.sp_mult_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Single-precision multiply FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision multiply FLOPS.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.sp_add_sub_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Single-precision add/subtract FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision add/subtract FLOPS.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.optimized",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of Scalar Ops optimized.",
+ "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of Scalar Ops optimized.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.opt_potential",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of Ops that are candidates for optimization (have Z-bit either set or pass).",
+ "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of Ops that are candidates for optimization (have Z-bit either set or pass).",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops_elim",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of SSE Move Ops eliminated.",
+ "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of SSE Move Ops eliminated.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of SSE Move Ops.",
+ "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of SSE Move Ops.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.x87_ctrl_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits.",
+ "PublicDescription": "The number of serializing Ops retired. x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.x87_bot_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "x87 bottom-executing uOps retired.",
+ "PublicDescription": "The number of serializing Ops retired. x87 bottom-executing uOps retired.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.sse_ctrl_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
+ "PublicDescription": "The number of serializing Ops retired. SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.sse_bot_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "SSE bottom-executing uOps retired.",
+ "PublicDescription": "The number of serializing Ops retired. SSE bottom-executing uOps retired.",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/memory.json b/tools/perf/pmu-events/arch/x86/amdfam17h/memory.json
new file mode 100644
index 000000000000..fa2d60d4def0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/memory.json
@@ -0,0 +1,162 @@
+[
+ {
+ "EventName": "ls_locks.bus_lock",
+ "EventCode": "0x25",
+ "BriefDescription": "Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type.",
+ "PublicDescription": "Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_dispatch.ld_st_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Load-op-Stores.",
+ "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed. Load-op-Stores.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ls_dispatch.store_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_dispatch.ld_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_stlf",
+ "EventCode": "0x35",
+ "BriefDescription": "Number of STLF hits."
+ },
+ {
+ "EventName": "ls_dc_accesses",
+ "EventCode": "0x40",
+ "BriefDescription": "The number of accesses to the data cache for load and store references. This may include certain microcode scratchpad accesses, although these are generally rare. Each increment represents an eight-byte access, although the instruction may only be accessing a portion of that. This event is a speculative event."
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.all",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss or Reload off all sizes.",
+ "PublicDescription": "L1 DTLB Miss or Reload off all sizes.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss of a page of 1G size.",
+ "PublicDescription": "L1 DTLB Miss of a page of 1G size.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss of a page of 2M size.",
+ "PublicDescription": "L1 DTLB Miss of a page of 2M size.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_32k_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss of a page of 32K size.",
+ "PublicDescription": "L1 DTLB Miss of a page of 32K size.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss of a page of 4K size.",
+ "PublicDescription": "L1 DTLB Miss of a page of 4K size.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Reload of a page of 1G size.",
+ "PublicDescription": "L1 DTLB Reload of a page of 1G size.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Reload of a page of 2M size.",
+ "PublicDescription": "L1 DTLB Reload of a page of 2M size.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_32k_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Reload of a page of 32K size.",
+ "PublicDescription": "L1 DTLB Reload of a page of 32K size.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Reload of a page of 4K size.",
+ "PublicDescription": "L1 DTLB Reload of a page of 4K size.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_tablewalker.perf_mon_tablewalk_alloc_iside",
+ "EventCode": "0x46",
+ "BriefDescription": "Tablewalker allocation.",
+ "PublicDescription": "Tablewalker allocation.",
+ "UMask": "0xc"
+ },
+ {
+ "EventName": "ls_tablewalker.perf_mon_tablewalk_alloc_dside",
+ "EventCode": "0x46",
+ "BriefDescription": "Tablewalker allocation.",
+ "PublicDescription": "Tablewalker allocation.",
+ "UMask": "0x3"
+ },
+ {
+ "EventName": "ls_misal_accesses",
+ "EventCode": "0x47",
+ "BriefDescription": "Misaligned loads."
+ },
+ {
+ "EventName": "ls_pref_instr_disp.prefetch_nta",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions (PREFETCHNTA instruction) Dispatched.",
+ "PublicDescription": "Software Prefetch Instructions (PREFETCHNTA instruction) Dispatched.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.store_prefetch_w",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions (3DNow PREFETCHW instruction) Dispatched.",
+ "PublicDescription": "Software Prefetch Instructions (3DNow PREFETCHW instruction) Dispatched.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.load_prefetch_w",
+ "EventCode": "0x4b",
+ "BriefDescription": "Prefetch, Prefetch_T0_T1_T2.",
+ "PublicDescription": "Software Prefetch Instructions Dispatched. Prefetch, Prefetch_T0_T1_T2.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_inef_sw_pref.mab_mch_cnt",
+ "EventCode": "0x52",
+ "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+ "PublicDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_inef_sw_pref.data_pipe_sw_pf_dc_hit",
+ "EventCode": "0x52",
+ "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+ "PublicDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_not_halted_cyc",
+ "EventCode": "0x76",
+ "BriefDescription": "Cycles not in Halt."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/other.json b/tools/perf/pmu-events/arch/x86/amdfam17h/other.json
new file mode 100644
index 000000000000..b26a00d05a2e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/other.json
@@ -0,0 +1,65 @@
+[
+ {
+ "EventName": "ic_oc_mode_switch.oc_ic_mode_switch",
+ "EventCode": "0x28a",
+ "BriefDescription": "OC to IC mode switch.",
+ "PublicDescription": "OC Mode Switch. OC to IC mode switch.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ic_oc_mode_switch.ic_oc_mode_switch",
+ "EventCode": "0x28a",
+ "BriefDescription": "IC to OC mode switch.",
+ "PublicDescription": "OC Mode Switch. IC to OC mode switch.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.retire_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "RETIRE Tokens unavailable.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. RETIRE Tokens unavailable.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.agsq_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "AGSQ Tokens unavailable.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. AGSQ Tokens unavailable.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alu_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "ALU tokens total unavailable.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALU tokens total unavailable.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq3_0_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq3_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "ALSQ 3 Tokens unavailable.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 3 Tokens unavailable.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq2_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "ALSQ 2 Tokens unavailable.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 2 Tokens unavailable.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq1_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "ALSQ 1 Tokens unavailable.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 1 Tokens unavailable.",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
index 36c903faed0b..71e9737f4614 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
@@ -73,7 +73,7 @@
},
{
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index e05c2c8458fc..d6984a3017e0 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -33,3 +33,4 @@ GenuineIntel-6-25,v2,westmereep-sp,core
GenuineIntel-6-2F,v2,westmereex,core
GenuineIntel-6-55-[01234],v1,skylakex,core
GenuineIntel-6-55-[56789ABCDEF],v1,cascadelakex,core
+AuthenticAMD-23-[[:xdigit:]]+,v1,amdfam17h,core
diff --git a/tools/perf/scripts/Build b/tools/perf/scripts/Build
index 41efd7e368b3..68d4b54574ad 100644
--- a/tools/perf/scripts/Build
+++ b/tools/perf/scripts/Build
@@ -1,2 +1,2 @@
-libperf-$(CONFIG_LIBPERL) += perl/Perf-Trace-Util/
-libperf-$(CONFIG_LIBPYTHON) += python/Perf-Trace-Util/
+perf-$(CONFIG_LIBPERL) += perl/Perf-Trace-Util/
+perf-$(CONFIG_LIBPYTHON) += python/Perf-Trace-Util/
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Build b/tools/perf/scripts/perl/Perf-Trace-Util/Build
index 34faecf774ae..db0036129307 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/Build
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Build
@@ -1,4 +1,4 @@
-libperf-y += Context.o
+perf-y += Context.o
CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes
CFLAGS_Context.o += -Wno-unused-parameter -Wno-nested-externs -Wno-undef
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Build b/tools/perf/scripts/python/Perf-Trace-Util/Build
index aefc15c9444a..7d0e33ce6aba 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/Build
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Build
@@ -1,3 +1,3 @@
-libperf-y += Context.o
+perf-y += Context.o
CFLAGS_Context.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs
diff --git a/tools/perf/scripts/python/check-perf-trace.py b/tools/perf/scripts/python/check-perf-trace.py
index 334599c6032c..d2c22954800d 100644
--- a/tools/perf/scripts/python/check-perf-trace.py
+++ b/tools/perf/scripts/python/check-perf-trace.py
@@ -7,6 +7,8 @@
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
+from __future__ import print_function
+
import os
import sys
@@ -19,64 +21,64 @@ from perf_trace_context import *
unhandled = autodict()
def trace_begin():
- print "trace_begin"
+ print("trace_begin")
pass
def trace_end():
- print_unhandled()
+ print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, vec):
- print_header(event_name, common_cpu, common_secs, common_nsecs,
- common_pid, common_comm)
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, vec):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
- print_uncommon(context)
+ print_uncommon(context)
- print "vec=%s\n" % \
- (symbol_str("irq__softirq_entry", "vec", vec)),
+ print("vec=%s" % (symbol_str("irq__softirq_entry", "vec", vec)))
def kmem__kmalloc(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, call_site, ptr, bytes_req, bytes_alloc,
- gfp_flags):
- print_header(event_name, common_cpu, common_secs, common_nsecs,
- common_pid, common_comm)
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, call_site, ptr, bytes_req, bytes_alloc,
+ gfp_flags):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
- print_uncommon(context)
+ print_uncommon(context)
- print "call_site=%u, ptr=%u, bytes_req=%u, " \
- "bytes_alloc=%u, gfp_flags=%s\n" % \
+ print("call_site=%u, ptr=%u, bytes_req=%u, "
+ "bytes_alloc=%u, gfp_flags=%s" %
(call_site, ptr, bytes_req, bytes_alloc,
-
- flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
+ flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)))
def trace_unhandled(event_name, context, event_fields_dict):
- try:
- unhandled[event_name] += 1
- except TypeError:
- unhandled[event_name] = 1
+ try:
+ unhandled[event_name] += 1
+ except TypeError:
+ unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
- print "%-20s %5u %05u.%09u %8u %-20s " % \
- (event_name, cpu, secs, nsecs, pid, comm),
+ print("%-20s %5u %05u.%09u %8u %-20s " %
+ (event_name, cpu, secs, nsecs, pid, comm),
+ end=' ')
# print trace fields not included in handler args
def print_uncommon(context):
- print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
- % (common_pc(context), trace_flag_str(common_flags(context)), \
- common_lock_depth(context))
+ print("common_preempt_count=%d, common_flags=%s, "
+ "common_lock_depth=%d, " %
+ (common_pc(context), trace_flag_str(common_flags(context)),
+ common_lock_depth(context)))
def print_unhandled():
- keys = unhandled.keys()
- if not keys:
- return
+ keys = unhandled.keys()
+ if not keys:
+ return
- print "\nunhandled events:\n\n",
+ print("\nunhandled events:\n")
- print "%-40s %10s\n" % ("event", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "-----------"),
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "-----------"))
- for event_name in keys:
- print "%-40s %10d\n" % (event_name, unhandled[event_name])
+ for event_name in keys:
+ print("%-40s %10d\n" % (event_name, unhandled[event_name]))
diff --git a/tools/perf/scripts/python/compaction-times.py b/tools/perf/scripts/python/compaction-times.py
index 239cb0568ec3..2560a042dc6f 100644
--- a/tools/perf/scripts/python/compaction-times.py
+++ b/tools/perf/scripts/python/compaction-times.py
@@ -216,15 +216,15 @@ def compaction__mm_compaction_migratepages(event_name, context, common_cpu,
pair(nr_migrated, nr_failed), None, None)
def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
chead.increment_pending(common_pid,
None, pair(nr_scanned, nr_taken), None)
def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
chead.increment_pending(common_pid,
None, None, pair(nr_scanned, nr_taken))
diff --git a/tools/perf/scripts/python/event_analyzing_sample.py b/tools/perf/scripts/python/event_analyzing_sample.py
index 4e843b9864ec..aa1e2cfa26a6 100644
--- a/tools/perf/scripts/python/event_analyzing_sample.py
+++ b/tools/perf/scripts/python/event_analyzing_sample.py
@@ -15,6 +15,8 @@
# for a x86 HW PMU event: PEBS with load latency data.
#
+from __future__ import print_function
+
import os
import sys
import math
@@ -37,7 +39,7 @@ con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
- print "In trace_begin:\n"
+ print("In trace_begin:\n")
#
# Will create several tables at the start, pebs_ll is for PEBS data with
@@ -76,12 +78,12 @@ def process_event(param_dict):
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
- if (param_dict.has_key("dso")):
+ if ("dso" in param_dict):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
- if (param_dict.has_key("symbol")):
+ if ("symbol" in param_dict):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
@@ -102,7 +104,7 @@ def insert_db(event):
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
- print "In trace_end:\n"
+ print("In trace_end:\n")
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
@@ -123,29 +125,29 @@ def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
- print "There is %d records in gen_events table" % t[0]
+ print("There is %d records in gen_events table" % t[0])
if t[0] == 0:
return
- print "Statistics about the general events grouped by thread/symbol/dso: \n"
+ print("Statistics about the general events grouped by thread/symbol/dso: \n")
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
- print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+ print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
for row in commq:
- print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by symbol
- print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by dso
- print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
+ print("\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74))
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
- print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%40s %8d %s" % (row[0], row[1], num2sym(row[1])))
#
# This function just shows the basic info, and we could do more with the
@@ -156,35 +158,35 @@ def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
- print "There is %d records in pebs_ll table" % t[0]
+ print("There is %d records in pebs_ll table" % t[0])
if t[0] == 0:
return
- print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
+ print("Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n")
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
- print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+ print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
for row in commq:
- print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by symbol
- print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
- print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58))
for row in dseq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
- print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58))
for row in latq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
def trace_unhandled(event_name, context, event_fields_dict):
- print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
+ print (' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index 0564dd7377f2..c3eae1d77d36 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
+from __future__ import print_function
+
import os
import sys
import struct
@@ -199,6 +201,18 @@ import datetime
from PySide.QtSql import *
+if sys.version_info < (3, 0):
+ def toserverstr(str):
+ return str
+ def toclientstr(str):
+ return str
+else:
+ # Assume UTF-8 server_encoding and client_encoding
+ def toserverstr(str):
+ return bytes(str, "UTF_8")
+ def toclientstr(str):
+ return bytes(str, "UTF_8")
+
# Need to access PostgreSQL C library directly to use COPY FROM STDIN
from ctypes import *
libpq = CDLL("libpq.so.5")
@@ -234,12 +248,17 @@ perf_db_export_mode = True
perf_db_export_calls = False
perf_db_export_callchains = False
+def printerr(*args, **kw_args):
+ print(*args, file=sys.stderr, **kw_args)
+
+def printdate(*args, **kw_args):
+ print(datetime.datetime.today(), *args, sep=' ', **kw_args)
def usage():
- print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]"
- print >> sys.stderr, "where: columns 'all' or 'branches'"
- print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
- print >> sys.stderr, " callchains 'callchains' => create call_paths table"
+ printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]")
+ printerr("where: columns 'all' or 'branches'")
+ printerr(" calls 'calls' => create calls and call_paths table")
+ printerr(" callchains 'callchains' => create call_paths table")
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
@@ -273,7 +292,7 @@ def do_query(q, s):
return
raise Exception("Query failed: " + q.lastError().text())
-print datetime.datetime.today(), "Creating database..."
+printdate("Creating database...")
db = QSqlDatabase.addDatabase('QPSQL')
query = QSqlQuery(db)
@@ -394,7 +413,8 @@ if perf_db_export_calls:
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
- 'flags integer)')
+ 'flags integer,'
+ 'parent_id bigint)')
do_query(query, 'CREATE VIEW machines_view AS '
'SELECT '
@@ -478,8 +498,9 @@ if perf_db_export_calls:
'branch_count,'
'call_id,'
'return_id,'
- 'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,'
- 'parent_call_path_id'
+ 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE CAST ( flags AS VARCHAR(6) ) END AS flags,'
+ 'parent_call_path_id,'
+ 'calls.parent_id'
' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
do_query(query, 'CREATE VIEW samples_view AS '
@@ -504,12 +525,12 @@ do_query(query, 'CREATE VIEW samples_view AS '
' FROM samples')
-file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
-file_trailer = "\377\377"
+file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0)
+file_trailer = b"\377\377"
def open_output_file(file_name):
path_name = output_dir_name + "/" + file_name
- file = open(path_name, "w+")
+ file = open(path_name, "wb+")
file.write(file_header)
return file
@@ -524,13 +545,13 @@ def copy_output_file_direct(file, table_name):
# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
def copy_output_file(file, table_name):
- conn = PQconnectdb("dbname = " + dbname)
+ conn = PQconnectdb(toclientstr("dbname = " + dbname))
if (PQstatus(conn)):
raise Exception("COPY FROM STDIN PQconnectdb failed")
file.write(file_trailer)
file.seek(0)
sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
- res = PQexec(conn, sql)
+ res = PQexec(conn, toclientstr(sql))
if (PQresultStatus(res) != 4):
raise Exception("COPY FROM STDIN PQexec failed")
data = file.read(65536)
@@ -564,7 +585,7 @@ if perf_db_export_calls:
call_file = open_output_file("call_table.bin")
def trace_begin():
- print datetime.datetime.today(), "Writing to intermediate files..."
+ printdate("Writing to intermediate files...")
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
machine_table(0, 0, "unknown")
@@ -575,11 +596,12 @@ def trace_begin():
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls or perf_db_export_callchains:
call_path_table(0, 0, 0, 0)
+ call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
unhandled_count = 0
def trace_end():
- print datetime.datetime.today(), "Copying to database..."
+ printdate("Copying to database...")
copy_output_file(evsel_file, "selected_events")
copy_output_file(machine_file, "machines")
copy_output_file(thread_file, "threads")
@@ -594,7 +616,7 @@ def trace_end():
if perf_db_export_calls:
copy_output_file(call_file, "calls")
- print datetime.datetime.today(), "Removing intermediate files..."
+ printdate("Removing intermediate files...")
remove_output_file(evsel_file)
remove_output_file(machine_file)
remove_output_file(thread_file)
@@ -609,7 +631,7 @@ def trace_end():
if perf_db_export_calls:
remove_output_file(call_file)
os.rmdir(output_dir_name)
- print datetime.datetime.today(), "Adding primary keys"
+ printdate("Adding primary keys")
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
@@ -624,7 +646,7 @@ def trace_end():
if perf_db_export_calls:
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
- print datetime.datetime.today(), "Adding foreign keys"
+ printdate("Adding foreign keys")
do_query(query, 'ALTER TABLE threads '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
@@ -657,10 +679,11 @@ def trace_end():
'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),'
'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
+ do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
if (unhandled_count):
- print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
- print datetime.datetime.today(), "Done"
+ printdate("Warning: ", unhandled_count, " unhandled events")
+ printdate("Done")
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
@@ -670,12 +693,14 @@ def sched__sched_switch(*x):
pass
def evsel_table(evsel_id, evsel_name, *x):
+ evsel_name = toserverstr(evsel_name)
n = len(evsel_name)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
evsel_file.write(value)
def machine_table(machine_id, pid, root_dir, *x):
+ root_dir = toserverstr(root_dir)
n = len(root_dir)
fmt = "!hiqiii" + str(n) + "s"
value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
@@ -686,6 +711,7 @@ def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
thread_file.write(value)
def comm_table(comm_id, comm_str, *x):
+ comm_str = toserverstr(comm_str)
n = len(comm_str)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
@@ -697,6 +723,9 @@ def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
comm_thread_file.write(value)
def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
+ short_name = toserverstr(short_name)
+ long_name = toserverstr(long_name)
+ build_id = toserverstr(build_id)
n1 = len(short_name)
n2 = len(long_name)
n3 = len(build_id)
@@ -705,12 +734,14 @@ def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
dso_file.write(value)
def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
+ symbol_name = toserverstr(symbol_name)
n = len(symbol_name)
fmt = "!hiqiqiqiqiii" + str(n) + "s"
value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
symbol_file.write(value)
def branch_type_table(branch_type, name, *x):
+ name = toserverstr(name)
n = len(name)
fmt = "!hiii" + str(n) + "s"
value = struct.pack(fmt, 2, 4, branch_type, n, name)
@@ -728,7 +759,7 @@ def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
call_path_file.write(value)
-def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x):
- fmt = "!hiqiqiqiqiqiqiqiqiqiqii"
- value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags)
+def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, parent_id, *x):
+ fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiq"
+ value = struct.pack(fmt, 12, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id)
call_file.write(value)
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
index 245caf2643ed..3b71902a5a21 100644
--- a/tools/perf/scripts/python/export-to-sqlite.py
+++ b/tools/perf/scripts/python/export-to-sqlite.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
+from __future__ import print_function
+
import os
import sys
import struct
@@ -60,11 +62,17 @@ perf_db_export_mode = True
perf_db_export_calls = False
perf_db_export_callchains = False
+def printerr(*args, **keyword_args):
+ print(*args, file=sys.stderr, **keyword_args)
+
+def printdate(*args, **kw_args):
+ print(datetime.datetime.today(), *args, sep=' ', **kw_args)
+
def usage():
- print >> sys.stderr, "Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]"
- print >> sys.stderr, "where: columns 'all' or 'branches'"
- print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
- print >> sys.stderr, " callchains 'callchains' => create call_paths table"
+ printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]");
+ printerr("where: columns 'all' or 'branches'");
+ printerr(" calls 'calls' => create calls and call_paths table");
+ printerr(" callchains 'callchains' => create call_paths table");
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
@@ -100,7 +108,7 @@ def do_query_(q):
return
raise Exception("Query failed: " + q.lastError().text())
-print datetime.datetime.today(), "Creating database..."
+printdate("Creating database ...")
db_exists = False
try:
@@ -222,7 +230,8 @@ if perf_db_export_calls:
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
- 'flags integer)')
+ 'flags integer,'
+ 'parent_id bigint)')
# printf was added to sqlite in version 3.8.3
sqlite_has_printf = False
@@ -320,8 +329,9 @@ if perf_db_export_calls:
'branch_count,'
'call_id,'
'return_id,'
- 'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,'
- 'parent_call_path_id'
+ 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,'
+ 'parent_call_path_id,'
+ 'parent_id'
' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
do_query(query, 'CREATE VIEW samples_view AS '
@@ -373,10 +383,10 @@ if perf_db_export_calls or perf_db_export_callchains:
call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)")
if perf_db_export_calls:
call_query = QSqlQuery(db)
- call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
+ call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
def trace_begin():
- print datetime.datetime.today(), "Writing records..."
+ printdate("Writing records...")
do_query(query, 'BEGIN TRANSACTION')
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
@@ -388,19 +398,21 @@ def trace_begin():
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls or perf_db_export_callchains:
call_path_table(0, 0, 0, 0)
+ call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
unhandled_count = 0
def trace_end():
do_query(query, 'END TRANSACTION')
- print datetime.datetime.today(), "Adding indexes"
+ printdate("Adding indexes")
if perf_db_export_calls:
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
+ do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
if (unhandled_count):
- print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
- print datetime.datetime.today(), "Done"
+ printdate("Warning: ", unhandled_count, " unhandled events")
+ printdate("Done")
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
@@ -452,4 +464,4 @@ def call_path_table(*x):
bind_exec(call_path_query, 4, x)
def call_return_table(*x):
- bind_exec(call_query, 11, x)
+ bind_exec(call_query, 12, x)
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index f278ce5ebab7..e38518cdcbc3 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
# SPDX-License-Identifier: GPL-2.0
# exported-sql-viewer.py: view data from sql database
# Copyright (c) 2014-2018, Intel Corporation.
@@ -88,11 +88,20 @@
# 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip)
# 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
+from __future__ import print_function
+
import sys
import weakref
import threading
import string
-import cPickle
+try:
+ # Python2
+ import cPickle as pickle
+ # size of pickled integer big enough for record size
+ glb_nsz = 8
+except ImportError:
+ import pickle
+ glb_nsz = 16
import re
import os
from PySide.QtCore import *
@@ -102,6 +111,15 @@ from decimal import *
from ctypes import *
from multiprocessing import Process, Array, Value, Event
+# xrange is range in Python3
+try:
+ xrange
+except NameError:
+ xrange = range
+
+def printerr(*args, **keyword_args):
+ print(*args, file=sys.stderr, **keyword_args)
+
# Data formatting helpers
def tohex(ip):
@@ -167,9 +185,10 @@ class Thread(QThread):
class TreeModel(QAbstractItemModel):
- def __init__(self, root, parent=None):
+ def __init__(self, glb, parent=None):
super(TreeModel, self).__init__(parent)
- self.root = root
+ self.glb = glb
+ self.root = self.GetRoot()
self.last_row_read = 0
def Item(self, parent):
@@ -557,24 +576,12 @@ class CallGraphRootItem(CallGraphLevelItemBase):
self.child_items.append(child_item)
self.child_count += 1
-# Context-sensitive call graph data model
+# Context-sensitive call graph data model base
-class CallGraphModel(TreeModel):
+class CallGraphModelBase(TreeModel):
def __init__(self, glb, parent=None):
- super(CallGraphModel, self).__init__(CallGraphRootItem(glb), parent)
- self.glb = glb
-
- def columnCount(self, parent=None):
- return 7
-
- def columnHeader(self, column):
- headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
- return headers[column]
-
- def columnAlignment(self, column):
- alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
- return alignment[column]
+ super(CallGraphModelBase, self).__init__(glb, parent)
def FindSelect(self, value, pattern, query):
if pattern:
@@ -594,34 +601,7 @@ class CallGraphModel(TreeModel):
match = " GLOB '" + str(value) + "'"
else:
match = " = '" + str(value) + "'"
- QueryExec(query, "SELECT call_path_id, comm_id, thread_id"
- " FROM calls"
- " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
- " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
- " WHERE symbols.name" + match +
- " GROUP BY comm_id, thread_id, call_path_id"
- " ORDER BY comm_id, thread_id, call_path_id")
-
- def FindPath(self, query):
- # Turn the query result into a list of ids that the tree view can walk
- # to open the tree at the right place.
- ids = []
- parent_id = query.value(0)
- while parent_id:
- ids.insert(0, parent_id)
- q2 = QSqlQuery(self.glb.db)
- QueryExec(q2, "SELECT parent_id"
- " FROM call_paths"
- " WHERE id = " + str(parent_id))
- if not q2.next():
- break
- parent_id = q2.value(0)
- # The call path root is not used
- if ids[0] == 1:
- del ids[0]
- ids.insert(0, query.value(2))
- ids.insert(0, query.value(1))
- return ids
+ self.DoFindSelect(query, match)
def Found(self, query, found):
if found:
@@ -675,6 +655,201 @@ class CallGraphModel(TreeModel):
def FindDone(self, thread, callback, ids):
callback(ids)
+# Context-sensitive call graph data model
+
+class CallGraphModel(CallGraphModelBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallGraphModel, self).__init__(glb, parent)
+
+ def GetRoot(self):
+ return CallGraphRootItem(self.glb)
+
+ def columnCount(self, parent=None):
+ return 7
+
+ def columnHeader(self, column):
+ headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
+ return headers[column]
+
+ def columnAlignment(self, column):
+ alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
+ return alignment[column]
+
+ def DoFindSelect(self, query, match):
+ QueryExec(query, "SELECT call_path_id, comm_id, thread_id"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " WHERE symbols.name" + match +
+ " GROUP BY comm_id, thread_id, call_path_id"
+ " ORDER BY comm_id, thread_id, call_path_id")
+
+ def FindPath(self, query):
+ # Turn the query result into a list of ids that the tree view can walk
+ # to open the tree at the right place.
+ ids = []
+ parent_id = query.value(0)
+ while parent_id:
+ ids.insert(0, parent_id)
+ q2 = QSqlQuery(self.glb.db)
+ QueryExec(q2, "SELECT parent_id"
+ " FROM call_paths"
+ " WHERE id = " + str(parent_id))
+ if not q2.next():
+ break
+ parent_id = q2.value(0)
+ # The call path root is not used
+ if ids[0] == 1:
+ del ids[0]
+ ids.insert(0, query.value(2))
+ ids.insert(0, query.value(1))
+ return ids
+
+# Call tree data model level 2+ item base
+
+class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
+
+ def __init__(self, glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item):
+ super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, row, parent_item)
+ self.comm_id = comm_id
+ self.thread_id = thread_id
+ self.calls_id = calls_id
+ self.branch_count = branch_count
+ self.time = time
+
+ def Select(self):
+ self.query_done = True;
+ if self.calls_id == 0:
+ comm_thread = " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id)
+ else:
+ comm_thread = ""
+ query = QSqlQuery(self.glb.db)
+ QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time, branch_count"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " INNER JOIN dsos ON symbols.dso_id = dsos.id"
+ " WHERE calls.parent_id = " + str(self.calls_id) + comm_thread +
+ " ORDER BY call_time, calls.id")
+ while query.next():
+ child_item = CallTreeLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call tree data model level three item
+
+class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase):
+
+ def __init__(self, glb, row, comm_id, thread_id, calls_id, name, dso, count, time, branch_count, parent_item):
+ super(CallTreeLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item)
+ dso = dsoname(dso)
+ self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
+ self.dbid = calls_id
+
+# Call tree data model level two item
+
+class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase):
+
+ def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item):
+ super(CallTreeLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 0, 0, 0, parent_item)
+ self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
+ self.dbid = thread_id
+
+ def Select(self):
+ super(CallTreeLevelTwoItem, self).Select()
+ for child_item in self.child_items:
+ self.time += child_item.time
+ self.branch_count += child_item.branch_count
+ for child_item in self.child_items:
+ child_item.data[4] = PercentToOneDP(child_item.time, self.time)
+ child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
+
+# Call tree data model level one item
+
+class CallTreeLevelOneItem(CallGraphLevelItemBase):
+
+ def __init__(self, glb, row, comm_id, comm, parent_item):
+ super(CallTreeLevelOneItem, self).__init__(glb, row, parent_item)
+ self.data = [comm, "", "", "", "", "", ""]
+ self.dbid = comm_id
+
+ def Select(self):
+ self.query_done = True;
+ query = QSqlQuery(self.glb.db)
+ QueryExec(query, "SELECT thread_id, pid, tid"
+ " FROM comm_threads"
+ " INNER JOIN threads ON thread_id = threads.id"
+ " WHERE comm_id = " + str(self.dbid))
+ while query.next():
+ child_item = CallTreeLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call tree data model root item
+
+class CallTreeRootItem(CallGraphLevelItemBase):
+
+ def __init__(self, glb):
+ super(CallTreeRootItem, self).__init__(glb, 0, None)
+ self.dbid = 0
+ self.query_done = True;
+ query = QSqlQuery(glb.db)
+ QueryExec(query, "SELECT id, comm FROM comms")
+ while query.next():
+ if not query.value(0):
+ continue
+ child_item = CallTreeLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call Tree data model
+
+class CallTreeModel(CallGraphModelBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallTreeModel, self).__init__(glb, parent)
+
+ def GetRoot(self):
+ return CallTreeRootItem(self.glb)
+
+ def columnCount(self, parent=None):
+ return 7
+
+ def columnHeader(self, column):
+ headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
+ return headers[column]
+
+ def columnAlignment(self, column):
+ alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
+ return alignment[column]
+
+ def DoFindSelect(self, query, match):
+ QueryExec(query, "SELECT calls.id, comm_id, thread_id"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " WHERE symbols.name" + match +
+ " ORDER BY comm_id, thread_id, call_time, calls.id")
+
+ def FindPath(self, query):
+ # Turn the query result into a list of ids that the tree view can walk
+ # to open the tree at the right place.
+ ids = []
+ parent_id = query.value(0)
+ while parent_id:
+ ids.insert(0, parent_id)
+ q2 = QSqlQuery(self.glb.db)
+ QueryExec(q2, "SELECT parent_id"
+ " FROM calls"
+ " WHERE id = " + str(parent_id))
+ if not q2.next():
+ break
+ parent_id = q2.value(0)
+ ids.insert(0, query.value(2))
+ ids.insert(0, query.value(1))
+ return ids
+
# Vertical widget layout
class VBox():
@@ -693,28 +868,16 @@ class VBox():
def Widget(self):
return self.vbox
-# Context-sensitive call graph window
-
-class CallGraphWindow(QMdiSubWindow):
+# Tree window base
- def __init__(self, glb, parent=None):
- super(CallGraphWindow, self).__init__(parent)
-
- self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
-
- self.view = QTreeView()
- self.view.setModel(self.model)
+class TreeWindowBase(QMdiSubWindow):
- for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
- self.view.setColumnWidth(c, w)
-
- self.find_bar = FindBar(self, self)
-
- self.vbox = VBox(self.view, self.find_bar.Widget())
-
- self.setWidget(self.vbox.Widget())
+ def __init__(self, parent=None):
+ super(TreeWindowBase, self).__init__(parent)
- AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph")
+ self.model = None
+ self.view = None
+ self.find_bar = None
def DisplayFound(self, ids):
if not len(ids):
@@ -747,6 +910,53 @@ class CallGraphWindow(QMdiSubWindow):
if not found:
self.find_bar.NotFound()
+
+# Context-sensitive call graph window
+
+class CallGraphWindow(TreeWindowBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallGraphWindow, self).__init__(parent)
+
+ self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
+
+ self.view = QTreeView()
+ self.view.setModel(self.model)
+
+ for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
+ self.view.setColumnWidth(c, w)
+
+ self.find_bar = FindBar(self, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph")
+
+# Call tree window
+
+class CallTreeWindow(TreeWindowBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallTreeWindow, self).__init__(parent)
+
+ self.model = LookupCreateModel("Call Tree", lambda x=glb: CallTreeModel(x))
+
+ self.view = QTreeView()
+ self.view.setModel(self.model)
+
+ for c, w in ((0, 230), (1, 100), (2, 100), (3, 70), (4, 70), (5, 100)):
+ self.view.setColumnWidth(c, w)
+
+ self.find_bar = FindBar(self, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, "Call Tree")
+
# Child data item finder
class ChildDataItemFinder():
@@ -812,10 +1022,6 @@ class ChildDataItemFinder():
glb_chunk_sz = 10000
-# size of pickled integer big enough for record size
-
-glb_nsz = 8
-
# Background process for SQL data fetcher
class SQLFetcherProcess():
@@ -874,7 +1080,7 @@ class SQLFetcherProcess():
return True
if space >= glb_nsz:
# Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer
- nd = cPickle.dumps(0, cPickle.HIGHEST_PROTOCOL)
+ nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL)
self.buffer[self.local_head : self.local_head + len(nd)] = nd
self.local_head = 0
if self.local_tail - self.local_head > sz:
@@ -892,9 +1098,9 @@ class SQLFetcherProcess():
self.wait_event.wait()
def AddToBuffer(self, obj):
- d = cPickle.dumps(obj, cPickle.HIGHEST_PROTOCOL)
+ d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
n = len(d)
- nd = cPickle.dumps(n, cPickle.HIGHEST_PROTOCOL)
+ nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL)
sz = n + glb_nsz
self.WaitForSpace(sz)
pos = self.local_head
@@ -1006,12 +1212,12 @@ class SQLFetcher(QObject):
pos = self.local_tail
if len(self.buffer) - pos < glb_nsz:
pos = 0
- n = cPickle.loads(self.buffer[pos : pos + glb_nsz])
+ n = pickle.loads(self.buffer[pos : pos + glb_nsz])
if n == 0:
pos = 0
- n = cPickle.loads(self.buffer[0 : glb_nsz])
+ n = pickle.loads(self.buffer[0 : glb_nsz])
pos += glb_nsz
- obj = cPickle.loads(self.buffer[pos : pos + n])
+ obj = pickle.loads(self.buffer[pos : pos + n])
self.local_tail = pos + n
return obj
@@ -1327,8 +1533,7 @@ class BranchModel(TreeModel):
progress = Signal(object)
def __init__(self, glb, event_id, where_clause, parent=None):
- super(BranchModel, self).__init__(BranchRootItem(), parent)
- self.glb = glb
+ super(BranchModel, self).__init__(glb, parent)
self.event_id = event_id
self.more = True
self.populated = 0
@@ -1352,6 +1557,9 @@ class BranchModel(TreeModel):
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
+ def GetRoot(self):
+ return BranchRootItem()
+
def columnCount(self, parent=None):
return 8
@@ -1398,18 +1606,28 @@ class BranchModel(TreeModel):
def HasMoreRecords(self):
return self.more
+# Report Variables
+
+class ReportVars():
+
+ def __init__(self, name = "", where_clause = "", limit = ""):
+ self.name = name
+ self.where_clause = where_clause
+ self.limit = limit
+
+ def UniqueId(self):
+ return str(self.where_clause + ";" + self.limit)
+
# Branch window
class BranchWindow(QMdiSubWindow):
- def __init__(self, glb, event_id, name, where_clause, parent=None):
+ def __init__(self, glb, event_id, report_vars, parent=None):
super(BranchWindow, self).__init__(parent)
- model_name = "Branch Events " + str(event_id)
- if len(where_clause):
- model_name = where_clause + " " + model_name
+ model_name = "Branch Events " + str(event_id) + " " + report_vars.UniqueId()
- self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, where_clause))
+ self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, report_vars.where_clause))
self.view = QTreeView()
self.view.setUniformRowHeights(True)
@@ -1427,7 +1645,7 @@ class BranchWindow(QMdiSubWindow):
self.setWidget(self.vbox.Widget())
- AddSubWindow(glb.mainwindow.mdi_area, self, name + " Branch Events")
+ AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name + " Branch Events")
def ResizeColumnToContents(self, column, n):
# Using the view's resizeColumnToContents() here is extrememly slow
@@ -1472,47 +1690,134 @@ class BranchWindow(QMdiSubWindow):
else:
self.find_bar.NotFound()
-# Dialog data item converted and validated using a SQL table
+# Line edit data item
-class SQLTableDialogDataItem():
+class LineEditDataItem(object):
- def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
+ def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
self.glb = glb
self.label = label
self.placeholder_text = placeholder_text
- self.table_name = table_name
- self.match_column = match_column
- self.column_name1 = column_name1
- self.column_name2 = column_name2
self.parent = parent
+ self.id = id
- self.value = ""
+ self.value = default
- self.widget = QLineEdit()
+ self.widget = QLineEdit(default)
self.widget.editingFinished.connect(self.Validate)
self.widget.textChanged.connect(self.Invalidate)
self.red = False
self.error = ""
self.validated = True
- self.last_id = 0
- self.first_time = 0
- self.last_time = 2 ** 64
- if self.table_name == "<timeranges>":
- query = QSqlQuery(self.glb.db)
- QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
- if query.next():
- self.last_id = int(query.value(0))
- self.last_time = int(query.value(1))
- QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
- if query.next():
- self.first_time = int(query.value(0))
- if placeholder_text:
- placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
-
if placeholder_text:
self.widget.setPlaceholderText(placeholder_text)
+ def TurnTextRed(self):
+ if not self.red:
+ palette = QPalette()
+ palette.setColor(QPalette.Text,Qt.red)
+ self.widget.setPalette(palette)
+ self.red = True
+
+ def TurnTextNormal(self):
+ if self.red:
+ palette = QPalette()
+ self.widget.setPalette(palette)
+ self.red = False
+
+ def InvalidValue(self, value):
+ self.value = ""
+ self.TurnTextRed()
+ self.error = self.label + " invalid value '" + value + "'"
+ self.parent.ShowMessage(self.error)
+
+ def Invalidate(self):
+ self.validated = False
+
+ def DoValidate(self, input_string):
+ self.value = input_string.strip()
+
+ def Validate(self):
+ self.validated = True
+ self.error = ""
+ self.TurnTextNormal()
+ self.parent.ClearMessage()
+ input_string = self.widget.text()
+ if not len(input_string.strip()):
+ self.value = ""
+ return
+ self.DoValidate(input_string)
+
+ def IsValid(self):
+ if not self.validated:
+ self.Validate()
+ if len(self.error):
+ self.parent.ShowMessage(self.error)
+ return False
+ return True
+
+ def IsNumber(self, value):
+ try:
+ x = int(value)
+ except:
+ x = 0
+ return str(x) == value
+
+# Non-negative integer ranges dialog data item
+
+class NonNegativeIntegerRangesDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, column_name, parent):
+ super(NonNegativeIntegerRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
+
+ self.column_name = column_name
+
+ def DoValidate(self, input_string):
+ singles = []
+ ranges = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ if "-" in value:
+ vrange = value.split("-")
+ if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
+ return self.InvalidValue(value)
+ ranges.append(vrange)
+ else:
+ if not self.IsNumber(value):
+ return self.InvalidValue(value)
+ singles.append(value)
+ ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
+ if len(singles):
+ ranges.append(self.column_name + " IN (" + ",".join(singles) + ")")
+ self.value = " OR ".join(ranges)
+
+# Positive integer dialog data item
+
+class PositiveIntegerDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
+ super(PositiveIntegerDataItem, self).__init__(glb, label, placeholder_text, parent, id, default)
+
+ def DoValidate(self, input_string):
+ if not self.IsNumber(input_string.strip()):
+ return self.InvalidValue(input_string)
+ value = int(input_string.strip())
+ if value <= 0:
+ return self.InvalidValue(input_string)
+ self.value = str(value)
+
+# Dialog data item converted and validated using a SQL table
+
+class SQLTableDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
+ super(SQLTableDataItem, self).__init__(glb, label, placeholder_text, parent)
+
+ self.table_name = table_name
+ self.match_column = match_column
+ self.column_name1 = column_name1
+ self.column_name2 = column_name2
+
def ValueToIds(self, value):
ids = []
query = QSqlQuery(self.glb.db)
@@ -1523,6 +1828,42 @@ class SQLTableDialogDataItem():
ids.append(str(query.value(0)))
return ids
+ def DoValidate(self, input_string):
+ all_ids = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ ids = self.ValueToIds(value)
+ if len(ids):
+ all_ids.extend(ids)
+ else:
+ return self.InvalidValue(value)
+ self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
+ if self.column_name2:
+ self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
+
+# Sample time ranges dialog data item converted and validated using 'samples' SQL table
+
+class SampleTimeRangesDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, column_name, parent):
+ self.column_name = column_name
+
+ self.last_id = 0
+ self.first_time = 0
+ self.last_time = 2 ** 64
+
+ query = QSqlQuery(glb.db)
+ QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
+ if query.next():
+ self.last_id = int(query.value(0))
+ self.last_time = int(query.value(1))
+ QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
+ if query.next():
+ self.first_time = int(query.value(0))
+ if placeholder_text:
+ placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
+
+ super(SampleTimeRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
+
def IdBetween(self, query, lower_id, higher_id, order):
QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1")
if query.next():
@@ -1560,7 +1901,6 @@ class SQLTableDialogDataItem():
return str(lower_id)
def ConvertRelativeTime(self, val):
- print "val ", val
mult = 1
suffix = val[-2:]
if suffix == "ms":
@@ -1582,29 +1922,23 @@ class SQLTableDialogDataItem():
return str(val)
def ConvertTimeRange(self, vrange):
- print "vrange ", vrange
if vrange[0] == "":
vrange[0] = str(self.first_time)
if vrange[1] == "":
vrange[1] = str(self.last_time)
vrange[0] = self.ConvertRelativeTime(vrange[0])
vrange[1] = self.ConvertRelativeTime(vrange[1])
- print "vrange2 ", vrange
if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
return False
- print "ok1"
beg_range = max(int(vrange[0]), self.first_time)
end_range = min(int(vrange[1]), self.last_time)
if beg_range > self.last_time or end_range < self.first_time:
return False
- print "ok2"
vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True)
vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False)
- print "vrange3 ", vrange
return True
def AddTimeRange(self, value, ranges):
- print "value ", value
n = value.count("-")
if n == 1:
pass
@@ -1622,111 +1956,31 @@ class SQLTableDialogDataItem():
return True
return False
- def InvalidValue(self, value):
- self.value = ""
- palette = QPalette()
- palette.setColor(QPalette.Text,Qt.red)
- self.widget.setPalette(palette)
- self.red = True
- self.error = self.label + " invalid value '" + value + "'"
- self.parent.ShowMessage(self.error)
+ def DoValidate(self, input_string):
+ ranges = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ if not self.AddTimeRange(value, ranges):
+ return self.InvalidValue(value)
+ ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
+ self.value = " OR ".join(ranges)
- def IsNumber(self, value):
- try:
- x = int(value)
- except:
- x = 0
- return str(x) == value
+# Report Dialog Base
- def Invalidate(self):
- self.validated = False
+class ReportDialogBase(QDialog):
- def Validate(self):
- input_string = self.widget.text()
- self.validated = True
- if self.red:
- palette = QPalette()
- self.widget.setPalette(palette)
- self.red = False
- if not len(input_string.strip()):
- self.error = ""
- self.value = ""
- return
- if self.table_name == "<timeranges>":
- ranges = []
- for value in [x.strip() for x in input_string.split(",")]:
- if not self.AddTimeRange(value, ranges):
- return self.InvalidValue(value)
- ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
- self.value = " OR ".join(ranges)
- elif self.table_name == "<ranges>":
- singles = []
- ranges = []
- for value in [x.strip() for x in input_string.split(",")]:
- if "-" in value:
- vrange = value.split("-")
- if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
- return self.InvalidValue(value)
- ranges.append(vrange)
- else:
- if not self.IsNumber(value):
- return self.InvalidValue(value)
- singles.append(value)
- ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
- if len(singles):
- ranges.append(self.column_name1 + " IN (" + ",".join(singles) + ")")
- self.value = " OR ".join(ranges)
- elif self.table_name:
- all_ids = []
- for value in [x.strip() for x in input_string.split(",")]:
- ids = self.ValueToIds(value)
- if len(ids):
- all_ids.extend(ids)
- else:
- return self.InvalidValue(value)
- self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
- if self.column_name2:
- self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
- else:
- self.value = input_string.strip()
- self.error = ""
- self.parent.ClearMessage()
-
- def IsValid(self):
- if not self.validated:
- self.Validate()
- if len(self.error):
- self.parent.ShowMessage(self.error)
- return False
- return True
-
-# Selected branch report creation dialog
-
-class SelectedBranchDialog(QDialog):
-
- def __init__(self, glb, parent=None):
- super(SelectedBranchDialog, self).__init__(parent)
+ def __init__(self, glb, title, items, partial, parent=None):
+ super(ReportDialogBase, self).__init__(parent)
self.glb = glb
- self.name = ""
- self.where_clause = ""
+ self.report_vars = ReportVars()
- self.setWindowTitle("Selected Branches")
+ self.setWindowTitle(title)
self.setMinimumWidth(600)
- items = (
- ("Report name:", "Enter a name to appear in the window title bar", "", "", "", ""),
- ("Time ranges:", "Enter time ranges", "<timeranges>", "", "samples.id", ""),
- ("CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "<ranges>", "", "cpu", ""),
- ("Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", ""),
- ("PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", ""),
- ("TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", ""),
- ("DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id"),
- ("Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id"),
- ("Raw SQL clause: ", "Enter a raw SQL WHERE clause", "", "", "", ""),
- )
- self.data_items = [SQLTableDialogDataItem(glb, *x, parent=self) for x in items]
+ self.data_items = [x(glb, self) for x in items]
+
+ self.partial = partial
self.grid = QGridLayout()
@@ -1758,23 +2012,28 @@ class SelectedBranchDialog(QDialog):
self.setLayout(self.vbox);
def Ok(self):
- self.name = self.data_items[0].value
- if not self.name:
+ vars = self.report_vars
+ for d in self.data_items:
+ if d.id == "REPORTNAME":
+ vars.name = d.value
+ if not vars.name:
self.ShowMessage("Report name is required")
return
for d in self.data_items:
if not d.IsValid():
return
for d in self.data_items[1:]:
- if len(d.value):
- if len(self.where_clause):
- self.where_clause += " AND "
- self.where_clause += d.value
- if len(self.where_clause):
- self.where_clause = " AND ( " + self.where_clause + " ) "
- else:
- self.ShowMessage("No selection")
- return
+ if d.id == "LIMIT":
+ vars.limit = d.value
+ elif len(d.value):
+ if len(vars.where_clause):
+ vars.where_clause += " AND "
+ vars.where_clause += d.value
+ if len(vars.where_clause):
+ if self.partial:
+ vars.where_clause = " AND ( " + vars.where_clause + " ) "
+ else:
+ vars.where_clause = " WHERE " + vars.where_clause + " "
self.accept()
def ShowMessage(self, msg):
@@ -1783,6 +2042,23 @@ class SelectedBranchDialog(QDialog):
def ClearMessage(self):
self.status.setText("")
+# Selected branch report creation dialog
+
+class SelectedBranchDialog(ReportDialogBase):
+
+ def __init__(self, glb, parent=None):
+ title = "Selected Branches"
+ items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
+ lambda g, p: SampleTimeRangesDataItem(g, "Time ranges:", "Enter time ranges", "samples.id", p),
+ lambda g, p: NonNegativeIntegerRangesDataItem(g, "CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "cpu", p),
+ lambda g, p: SQLTableDataItem(g, "Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id", p),
+ lambda g, p: SQLTableDataItem(g, "Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id", p),
+ lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p))
+ super(SelectedBranchDialog, self).__init__(glb, title, items, True, parent)
+
# Event list
def GetEventList(db):
@@ -1793,6 +2069,16 @@ def GetEventList(db):
events.append(query.value(0))
return events
+# Is a table selectable
+
+def IsSelectable(db, table, sql = ""):
+ query = QSqlQuery(db)
+ try:
+ QueryExec(query, "SELECT * FROM " + table + " " + sql + " LIMIT 1")
+ except:
+ return False
+ return True
+
# SQL data preparation
def SQLTableDataPrep(query, count):
@@ -1818,12 +2104,13 @@ class SQLTableModel(TableModel):
progress = Signal(object)
- def __init__(self, glb, sql, column_count, parent=None):
+ def __init__(self, glb, sql, column_headers, parent=None):
super(SQLTableModel, self).__init__(parent)
self.glb = glb
self.more = True
self.populated = 0
- self.fetcher = SQLFetcher(glb, sql, lambda x, y=column_count: SQLTableDataPrep(x, y), self.AddSample)
+ self.column_headers = column_headers
+ self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): SQLTableDataPrep(x, y), self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
@@ -1861,6 +2148,12 @@ class SQLTableModel(TableModel):
def HasMoreRecords(self):
return self.more
+ def columnCount(self, parent=None):
+ return len(self.column_headers)
+
+ def columnHeader(self, column):
+ return self.column_headers[column]
+
# SQL automatic table data model
class SQLAutoTableModel(SQLTableModel):
@@ -1870,12 +2163,12 @@ class SQLAutoTableModel(SQLTableModel):
if table_name == "comm_threads_view":
# For now, comm_threads_view has no id column
sql = "SELECT * FROM " + table_name + " WHERE comm_id > $$last_id$$ ORDER BY comm_id LIMIT " + str(glb_chunk_sz)
- self.column_headers = []
+ column_headers = []
query = QSqlQuery(glb.db)
if glb.dbref.is_sqlite3:
QueryExec(query, "PRAGMA table_info(" + table_name + ")")
while query.next():
- self.column_headers.append(query.value(1))
+ column_headers.append(query.value(1))
if table_name == "sqlite_master":
sql = "SELECT * FROM " + table_name
else:
@@ -1888,14 +2181,8 @@ class SQLAutoTableModel(SQLTableModel):
schema = "public"
QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
while query.next():
- self.column_headers.append(query.value(0))
- super(SQLAutoTableModel, self).__init__(glb, sql, len(self.column_headers), parent)
-
- def columnCount(self, parent=None):
- return len(self.column_headers)
-
- def columnHeader(self, column):
- return self.column_headers[column]
+ column_headers.append(query.value(0))
+ super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent)
# Base class for custom ResizeColumnsToContents
@@ -1998,6 +2285,103 @@ def GetTableList(glb):
tables.append("information_schema.columns")
return tables
+# Top Calls data model
+
+class TopCallsModel(SQLTableModel):
+
+ def __init__(self, glb, report_vars, parent=None):
+ text = ""
+ if not glb.dbref.is_sqlite3:
+ text = "::text"
+ limit = ""
+ if len(report_vars.limit):
+ limit = " LIMIT " + report_vars.limit
+ sql = ("SELECT comm, pid, tid, name,"
+ " CASE"
+ " WHEN (short_name = '[kernel.kallsyms]') THEN '[kernel]'" + text +
+ " ELSE short_name"
+ " END AS dso,"
+ " call_time, return_time, (return_time - call_time) AS elapsed_time, branch_count, "
+ " CASE"
+ " WHEN (calls.flags = 1) THEN 'no call'" + text +
+ " WHEN (calls.flags = 2) THEN 'no return'" + text +
+ " WHEN (calls.flags = 3) THEN 'no call/return'" + text +
+ " ELSE ''" + text +
+ " END AS flags"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " INNER JOIN dsos ON symbols.dso_id = dsos.id"
+ " INNER JOIN comms ON calls.comm_id = comms.id"
+ " INNER JOIN threads ON calls.thread_id = threads.id" +
+ report_vars.where_clause +
+ " ORDER BY elapsed_time DESC" +
+ limit
+ )
+ column_headers = ("Command", "PID", "TID", "Symbol", "Object", "Call Time", "Return Time", "Elapsed Time (ns)", "Branch Count", "Flags")
+ self.alignment = (Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignLeft)
+ super(TopCallsModel, self).__init__(glb, sql, column_headers, parent)
+
+ def columnAlignment(self, column):
+ return self.alignment[column]
+
+# Top Calls report creation dialog
+
+class TopCallsDialog(ReportDialogBase):
+
+ def __init__(self, glb, parent=None):
+ title = "Top Calls by Elapsed Time"
+ items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
+ lambda g, p: SQLTableDataItem(g, "Commands:", "Only calls with these commands will be included", "comms", "comm", "comm_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "PIDs:", "Only calls with these process IDs will be included", "threads", "pid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "TIDs:", "Only calls with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "DSOs:", "Only calls with these DSOs will be included", "dsos", "short_name", "dso_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "Symbols:", "Only calls with these symbols will be included", "symbols", "name", "symbol_id", "", p),
+ lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p),
+ lambda g, p: PositiveIntegerDataItem(g, "Record limit:", "Limit selection to this number of records", p, "LIMIT", "100"))
+ super(TopCallsDialog, self).__init__(glb, title, items, False, parent)
+
+# Top Calls window
+
+class TopCallsWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
+
+ def __init__(self, glb, report_vars, parent=None):
+ super(TopCallsWindow, self).__init__(parent)
+
+ self.data_model = LookupCreateModel("Top Calls " + report_vars.UniqueId(), lambda: TopCallsModel(glb, report_vars))
+ self.model = self.data_model
+
+ self.view = QTableView()
+ self.view.setModel(self.model)
+ self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
+ self.view.verticalHeader().setVisible(False)
+
+ self.ResizeColumnsToContents()
+
+ self.find_bar = FindBar(self, self, True)
+
+ self.finder = ChildDataItemFinder(self.model)
+
+ self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name)
+
+ def Find(self, value, direction, pattern, context):
+ self.view.setFocus()
+ self.find_bar.Busy()
+ self.finder.Find(value, direction, pattern, context, self.FindDone)
+
+ def FindDone(self, row):
+ self.find_bar.Idle()
+ if row >= 0:
+ self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
+ else:
+ self.find_bar.NotFound()
+
# Action Definition
def CreateAction(label, tip, callback, parent=None, shortcut=None):
@@ -2099,8 +2483,10 @@ p.c2 {
</style>
<p class=c1><a href=#reports>1. Reports</a></p>
<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
-<p class=c2><a href=#allbranches>1.2 All branches</a></p>
-<p class=c2><a href=#selectedbranches>1.3 Selected branches</a></p>
+<p class=c2><a href=#calltree>1.2 Call Tree</a></p>
+<p class=c2><a href=#allbranches>1.3 All branches</a></p>
+<p class=c2><a href=#selectedbranches>1.4 Selected branches</a></p>
+<p class=c2><a href=#topcallsbyelapsedtime>1.5 Top calls by elapsed time</a></p>
<p class=c1><a href=#tables>2. Tables</a></p>
<h1 id=reports>1. Reports</h1>
<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
@@ -2136,7 +2522,10 @@ v- ls
<h3>Find</h3>
Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match.
The pattern matching symbols are ? for any character and * for zero or more characters.
-<h2 id=allbranches>1.2 All branches</h2>
+<h2 id=calltree>1.2 Call Tree</h2>
+The Call Tree report is very similar to the Context-Sensitive Call Graph, but the data is not aggregated.
+Also the 'Count' column, which would be always 1, is replaced by the 'Call Time'.
+<h2 id=allbranches>1.3 All branches</h2>
The All branches report displays all branches in chronological order.
Not all data is fetched immediately. More records can be fetched using the Fetch bar provided.
<h3>Disassembly</h3>
@@ -2162,10 +2551,10 @@ sudo ldconfig
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
Refer to Python documentation for the regular expression syntax.
All columns are searched, but only currently fetched rows are searched.
-<h2 id=selectedbranches>1.3 Selected branches</h2>
+<h2 id=selectedbranches>1.4 Selected branches</h2>
This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced
by various selection criteria. A dialog box displays available criteria which are AND'ed together.
-<h3>1.3.1 Time ranges</h3>
+<h3>1.4.1 Time ranges</h3>
The time ranges hint text shows the total time range. Relative time ranges can also be entered in
ms, us or ns. Also, negative values are relative to the end of trace. Examples:
<pre>
@@ -2176,6 +2565,10 @@ ms, us or ns. Also, negative values are relative to the end of trace. Examples:
-10ms- The last 10ms
</pre>
N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
+<h2 id=topcallsbyelapsedtime>1.5 Top calls by elapsed time</h2>
+The Top calls by elapsed time report displays calls in descending order of time elapsed between when the function was called and when it returned.
+The data is reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together.
+If not all data is fetched, a Fetch bar is provided. Ctrl-F displays a Find bar.
<h1 id=tables>2. Tables</h1>
The Tables menu shows all tables and views in the database. Most tables have an associated view
which displays the information in a more friendly way. Not all data for large tables is fetched
@@ -2305,10 +2698,17 @@ class MainWindow(QMainWindow):
edit_menu.addAction(CreateAction("&Enlarge Font", "Make text bigger", self.EnlargeFont, self, [QKeySequence("Ctrl++")]))
reports_menu = menu.addMenu("&Reports")
- reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self))
+ if IsSelectable(glb.db, "calls"):
+ reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self))
+
+ if IsSelectable(glb.db, "calls", "WHERE parent_id >= 0"):
+ reports_menu.addAction(CreateAction("Call &Tree", "Create a new window containing a call tree", self.NewCallTree, self))
self.EventMenu(GetEventList(glb.db), reports_menu)
+ if IsSelectable(glb.db, "calls"):
+ reports_menu.addAction(CreateAction("&Top calls by elapsed time", "Create a new window displaying top calls by elapsed time", self.NewTopCalls, self))
+
self.TableMenu(GetTableList(glb), menu)
self.window_menu = WindowMenu(self.mdi_area, menu)
@@ -2364,14 +2764,23 @@ class MainWindow(QMainWindow):
def NewCallGraph(self):
CallGraphWindow(self.glb, self)
+ def NewCallTree(self):
+ CallTreeWindow(self.glb, self)
+
+ def NewTopCalls(self):
+ dialog = TopCallsDialog(self.glb, self)
+ ret = dialog.exec_()
+ if ret:
+ TopCallsWindow(self.glb, dialog.report_vars, self)
+
def NewBranchView(self, event_id):
- BranchWindow(self.glb, event_id, "", "", self)
+ BranchWindow(self.glb, event_id, ReportVars(), self)
def NewSelectedBranchView(self, event_id):
dialog = SelectedBranchDialog(self.glb, self)
ret = dialog.exec_()
if ret:
- BranchWindow(self.glb, event_id, dialog.name, dialog.where_clause, self)
+ BranchWindow(self.glb, event_id, dialog.report_vars, self)
def NewTableView(self, table_name):
TableWindow(self.glb, table_name, self)
@@ -2578,7 +2987,7 @@ class DBRef():
def Main():
if (len(sys.argv) < 2):
- print >> sys.stderr, "Usage is: exported-sql-viewer.py {<database name> | --help-only}"
+ printerr("Usage is: exported-sql-viewer.py {<database name> | --help-only}");
raise Exception("Too few arguments")
dbname = sys.argv[1]
@@ -2591,8 +3000,8 @@ def Main():
is_sqlite3 = False
try:
- f = open(dbname)
- if f.read(15) == "SQLite format 3":
+ f = open(dbname, "rb")
+ if f.read(15) == b'SQLite format 3':
is_sqlite3 = True
f.close()
except:
diff --git a/tools/perf/scripts/python/failed-syscalls-by-pid.py b/tools/perf/scripts/python/failed-syscalls-by-pid.py
index cafeff3d74db..310efe5e7e23 100644
--- a/tools/perf/scripts/python/failed-syscalls-by-pid.py
+++ b/tools/perf/scripts/python/failed-syscalls-by-pid.py
@@ -5,6 +5,8 @@
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+from __future__ import print_function
+
import os
import sys
@@ -32,7 +34,7 @@ if len(sys.argv) > 1:
syscalls = autodict()
def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")
def trace_end():
print_error_totals()
@@ -56,23 +58,22 @@ def syscalls__sys_exit(event_name, context, common_cpu,
raw_syscalls__sys_exit(**locals())
def print_error_totals():
- if for_comm is not None:
- print "\nsyscall errors for %s:\n\n" % (for_comm),
- else:
- print "\nsyscall errors:\n\n",
-
- print "%-30s %10s\n" % ("comm [pid]", "count"),
- print "%-30s %10s\n" % ("------------------------------", \
- "----------"),
-
- comm_keys = syscalls.keys()
- for comm in comm_keys:
- pid_keys = syscalls[comm].keys()
- for pid in pid_keys:
- print "\n%s [%d]\n" % (comm, pid),
- id_keys = syscalls[comm][pid].keys()
- for id in id_keys:
- print " syscall: %-16s\n" % syscall_name(id),
- ret_keys = syscalls[comm][pid][id].keys()
- for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
- print " err = %-20s %10d\n" % (strerror(ret), val),
+ if for_comm is not None:
+ print("\nsyscall errors for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall errors:\n")
+
+ print("%-30s %10s" % ("comm [pid]", "count"))
+ print("%-30s %10s" % ("------------------------------", "----------"))
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print("\n%s [%d]" % (comm, pid))
+ id_keys = syscalls[comm][pid].keys()
+ for id in id_keys:
+ print(" syscall: %-16s" % syscall_name(id))
+ ret_keys = syscalls[comm][pid][id].keys()
+ for ret, val in sorted(syscalls[comm][pid][id].items(), key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print(" err = %-20s %10d" % (strerror(ret), val))
diff --git a/tools/perf/scripts/python/futex-contention.py b/tools/perf/scripts/python/futex-contention.py
index 0f5cf437b602..0c4841acf75d 100644
--- a/tools/perf/scripts/python/futex-contention.py
+++ b/tools/perf/scripts/python/futex-contention.py
@@ -10,6 +10,8 @@
#
# Measures futex contention
+from __future__ import print_function
+
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
@@ -33,18 +35,18 @@ def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, ret):
- if thread_blocktime.has_key(tid):
+ if tid in thread_blocktime:
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
- print "%s[%d] lock %x contended %d times, %d avg ns" % \
- (process_names[tid], tid, lock, count, avg)
+ print("%s[%d] lock %x contended %d times, %d avg ns" %
+ (process_names[tid], tid, lock, count, avg))
diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
index b19172d673af..a73847c8f548 100644
--- a/tools/perf/scripts/python/intel-pt-events.py
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
+from __future__ import print_function
+
import os
import sys
import struct
@@ -22,34 +24,34 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
#from Core import *
def trace_begin():
- print "Intel PT Power Events and PTWRITE"
+ print("Intel PT Power Events and PTWRITE")
def trace_end():
- print "End"
+ print("End")
def trace_unhandled(event_name, context, event_fields_dict):
- print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
+ print(' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))
def print_ptwrite(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
flags = data[0]
payload = data[1]
exact_ip = flags & 1
- print "IP: %u payload: %#x" % (exact_ip, payload),
+ print("IP: %u payload: %#x" % (exact_ip, payload), end=' ')
def print_cbr(raw_buf):
data = struct.unpack_from("<BBBBII", raw_buf)
cbr = data[0]
f = (data[4] + 500) / 1000
p = ((cbr * 1000 / data[2]) + 5) / 10
- print "%3u freq: %4u MHz (%3u%%)" % (cbr, f, p),
+ print("%3u freq: %4u MHz (%3u%%)" % (cbr, f, p), end=' ')
def print_mwait(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hints = payload & 0xff
extensions = (payload >> 32) & 0x3
- print "hints: %#x extensions: %#x" % (hints, extensions),
+ print("hints: %#x extensions: %#x" % (hints, extensions), end=' ')
def print_pwre(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
@@ -57,13 +59,14 @@ def print_pwre(raw_buf):
hw = (payload >> 7) & 1
cstate = (payload >> 12) & 0xf
subcstate = (payload >> 8) & 0xf
- print "hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+ print("hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+ end=' ')
def print_exstop(raw_buf):
data = struct.unpack_from("<I", raw_buf)
flags = data[0]
exact_ip = flags & 1
- print "IP: %u" % (exact_ip),
+ print("IP: %u" % (exact_ip), end=' ')
def print_pwrx(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
@@ -71,36 +74,39 @@ def print_pwrx(raw_buf):
deepest_cstate = payload & 0xf
last_cstate = (payload >> 4) & 0xf
wake_reason = (payload >> 8) & 0xf
- print "deepest cstate: %u last cstate: %u wake reason: %#x" % (deepest_cstate, last_cstate, wake_reason),
+ print("deepest cstate: %u last cstate: %u wake reason: %#x" %
+ (deepest_cstate, last_cstate, wake_reason), end=' ')
def print_common_start(comm, sample, name):
ts = sample["time"]
cpu = sample["cpu"]
pid = sample["pid"]
tid = sample["tid"]
- print "%16s %5u/%-5u [%03u] %9u.%09u %7s:" % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+ print("%16s %5u/%-5u [%03u] %9u.%09u %7s:" %
+ (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+ end=' ')
def print_common_ip(sample, symbol, dso):
ip = sample["ip"]
- print "%16x %s (%s)" % (ip, symbol, dso)
+ print("%16x %s (%s)" % (ip, symbol, dso))
def process_event(param_dict):
- event_attr = param_dict["attr"]
- sample = param_dict["sample"]
- raw_buf = param_dict["raw_buf"]
- comm = param_dict["comm"]
- name = param_dict["ev_name"]
-
- # Symbol and dso info are not always resolved
- if (param_dict.has_key("dso")):
- dso = param_dict["dso"]
- else:
- dso = "[unknown]"
-
- if (param_dict.has_key("symbol")):
- symbol = param_dict["symbol"]
- else:
- symbol = "[unknown]"
+ event_attr = param_dict["attr"]
+ sample = param_dict["sample"]
+ raw_buf = param_dict["raw_buf"]
+ comm = param_dict["comm"]
+ name = param_dict["ev_name"]
+
+ # Symbol and dso info are not always resolved
+ if "dso" in param_dict:
+ dso = param_dict["dso"]
+ else:
+ dso = "[unknown]"
+
+ if "symbol" in param_dict:
+ symbol = param_dict["symbol"]
+ else:
+ symbol = "[unknown]"
if name == "ptwrite":
print_common_start(comm, sample, name)
diff --git a/tools/perf/scripts/python/mem-phys-addr.py b/tools/perf/scripts/python/mem-phys-addr.py
index ebee2c5ae496..1f332e72b9b0 100644
--- a/tools/perf/scripts/python/mem-phys-addr.py
+++ b/tools/perf/scripts/python/mem-phys-addr.py
@@ -4,6 +4,8 @@
# Copyright (c) 2018, Intel Corporation.
from __future__ import division
+from __future__ import print_function
+
import os
import sys
import struct
@@ -31,21 +33,24 @@ def parse_iomem():
for i, j in enumerate(f):
m = re.split('-|:',j,2)
if m[2].strip() == 'System RAM':
- system_ram.append(long(m[0], 16))
- system_ram.append(long(m[1], 16))
+ system_ram.append(int(m[0], 16))
+ system_ram.append(int(m[1], 16))
if m[2].strip() == 'Persistent Memory':
- pmem.append(long(m[0], 16))
- pmem.append(long(m[1], 16))
+ pmem.append(int(m[0], 16))
+ pmem.append(int(m[1], 16))
def print_memory_type():
- print "Event: %s" % (event_name)
- print "%-40s %10s %10s\n" % ("Memory type", "count", "percentage"),
- print "%-40s %10s %10s\n" % ("----------------------------------------", \
+ print("Event: %s" % (event_name))
+ print("%-40s %10s %10s\n" % ("Memory type", "count", "percentage"), end='')
+ print("%-40s %10s %10s\n" % ("----------------------------------------",
"-----------", "-----------"),
+ end='');
total = sum(load_mem_type_cnt.values())
for mem_type, count in sorted(load_mem_type_cnt.most_common(), \
- key = lambda(k, v): (v, k), reverse = True):
- print "%-40s %10d %10.1f%%\n" % (mem_type, count, 100 * count / total),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print("%-40s %10d %10.1f%%\n" %
+ (mem_type, count, 100 * count / total),
+ end='')
def trace_begin():
parse_iomem()
@@ -80,7 +85,7 @@ def find_memory_type(phys_addr):
f.seek(0, 0)
for j in f:
m = re.split('-|:',j,2)
- if long(m[0], 16) <= phys_addr <= long(m[1], 16):
+ if int(m[0], 16) <= phys_addr <= int(m[1], 16):
return m[2]
return "N/A"
diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py
index a150164b44a3..101059971738 100755
--- a/tools/perf/scripts/python/net_dropmonitor.py
+++ b/tools/perf/scripts/python/net_dropmonitor.py
@@ -1,11 +1,13 @@
# Monitor the system for dropped packets and proudce a report of drop locations and counts
# SPDX-License-Identifier: GPL-2.0
+from __future__ import print_function
+
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
@@ -50,19 +52,19 @@ def get_sym(sloc):
return (None, 0)
def print_drop_table():
- print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
+ print("%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT"))
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
- print "%25s %25s %25s" % (sym, off, drop_log[i])
+ print("%25s %25s %25s" % (sym, off, drop_log[i]))
def trace_begin():
- print "Starting trace (Ctrl-C to dump results)"
+ print("Starting trace (Ctrl-C to dump results)")
def trace_end():
- print "Gathering kallsyms data"
+ print("Gathering kallsyms data")
get_kallsyms_table()
print_drop_table()
diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py
index 9b2050f778f1..ea0c8b90a783 100644
--- a/tools/perf/scripts/python/netdev-times.py
+++ b/tools/perf/scripts/python/netdev-times.py
@@ -8,6 +8,8 @@
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
+from __future__ import print_function
+
import os
import sys
@@ -17,6 +19,7 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
from perf_trace_context import *
from Core import *
from Util import *
+from functools import cmp_to_key
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
@@ -61,12 +64,12 @@ def diff_msec(src, dst):
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
- print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
+ print("%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" %
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
- diff_msec(hunk['xmit_t'], hunk['free_t']))
+ diff_msec(hunk['xmit_t'], hunk['free_t'])))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
@@ -98,55 +101,57 @@ def print_receive(hunk):
if show_hunk == 0:
return
- print "%d.%06dsec cpu=%d" % \
- (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
+ print("%d.%06dsec cpu=%d" %
+ (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu))
for i in range(len(irq_list)):
- print PF_IRQ_ENTRY % \
+ print(PF_IRQ_ENTRY %
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
- irq_list[i]['irq'], irq_list[i]['name'])
- print PF_JOINT
+ irq_list[i]['irq'], irq_list[i]['name']))
+ print(PF_JOINT)
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
- print PF_NET_RX % \
+ print(PF_NET_RX %
(diff_msec(base_t, irq_event['time']),
- irq_event['skbaddr'])
- print PF_JOINT
- print PF_SOFT_ENTRY % \
- diff_msec(base_t, hunk['sirq_ent_t'])
- print PF_JOINT
+ irq_event['skbaddr']))
+ print(PF_JOINT)
+ print(PF_SOFT_ENTRY %
+ diff_msec(base_t, hunk['sirq_ent_t']))
+ print(PF_JOINT)
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
- print PF_NAPI_POLL % \
- (diff_msec(base_t, event['event_t']), event['dev'])
+ print(PF_NAPI_POLL %
+ (diff_msec(base_t, event['event_t']),
+ event['dev']))
if i == len(event_list) - 1:
- print ""
+ print("")
else:
- print PF_JOINT
+ print(PF_JOINT)
else:
- print PF_NET_RECV % \
- (diff_msec(base_t, event['event_t']), event['skbaddr'],
- event['len'])
+ print(PF_NET_RECV %
+ (diff_msec(base_t, event['event_t']),
+ event['skbaddr'],
+ event['len']))
if 'comm' in event.keys():
- print PF_WJOINT
- print PF_CPY_DGRAM % \
+ print(PF_WJOINT)
+ print(PF_CPY_DGRAM %
(diff_msec(base_t, event['comm_t']),
- event['pid'], event['comm'])
+ event['pid'], event['comm']))
elif 'handle' in event.keys():
- print PF_WJOINT
+ print(PF_WJOINT)
if event['handle'] == "kfree_skb":
- print PF_KFREE_SKB % \
+ print(PF_KFREE_SKB %
(diff_msec(base_t,
event['comm_t']),
- event['location'])
+ event['location']))
elif event['handle'] == "consume_skb":
- print PF_CONS_SKB % \
+ print(PF_CONS_SKB %
diff_msec(base_t,
- event['comm_t'])
- print PF_JOINT
+ event['comm_t']))
+ print(PF_JOINT)
def trace_begin():
global show_tx
@@ -172,8 +177,7 @@ def trace_begin():
def trace_end():
# order all events in time
- all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
- b[EINFO_IDX_TIME]))
+ all_event_list.sort(key=cmp_to_key(lambda a,b :a[EINFO_IDX_TIME] < b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
@@ -210,19 +214,19 @@ def trace_end():
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
- print " dev len Qdisc " \
- " netdevice free"
+ print(" dev len Qdisc "
+ " netdevice free")
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
- print "debug buffer status"
- print "----------------------------"
- print "xmit Qdisc:remain:%d overflow:%d" % \
- (len(tx_queue_list), of_count_tx_queue_list)
- print "xmit netdevice:remain:%d overflow:%d" % \
- (len(tx_xmit_list), of_count_tx_xmit_list)
- print "receive:remain:%d overflow:%d" % \
- (len(rx_skb_list), of_count_rx_skb_list)
+ print("debug buffer status")
+ print("----------------------------")
+ print("xmit Qdisc:remain:%d overflow:%d" %
+ (len(tx_queue_list), of_count_tx_queue_list))
+ print("xmit netdevice:remain:%d overflow:%d" %
+ (len(tx_xmit_list), of_count_tx_xmit_list))
+ print("receive:remain:%d overflow:%d" %
+ (len(rx_skb_list), of_count_rx_skb_list))
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
@@ -254,7 +258,7 @@ def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, i
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi,
- dev_name, work=None, budget=None):
+ dev_name, work=None, budget=None):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name, work, budget)
all_event_list.append(event_info)
@@ -351,7 +355,7 @@ def handle_irq_softirq_exit(event_info):
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
- 'irq_list':irq_list, 'event_list':event_list}
+ 'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
@@ -388,7 +392,7 @@ def handle_netif_receive_skb(event_info):
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
- 'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
+ 'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
diff --git a/tools/perf/scripts/python/powerpc-hcalls.py b/tools/perf/scripts/python/powerpc-hcalls.py
index 00e0e7476e55..8b78dc790adb 100644
--- a/tools/perf/scripts/python/powerpc-hcalls.py
+++ b/tools/perf/scripts/python/powerpc-hcalls.py
@@ -4,6 +4,8 @@
#
# Hypervisor call statisics
+from __future__ import print_function
+
import os
import sys
@@ -149,7 +151,7 @@ hcall_table = {
}
def hcall_table_lookup(opcode):
- if (hcall_table.has_key(opcode)):
+ if (opcode in hcall_table):
return hcall_table[opcode]
else:
return opcode
@@ -157,8 +159,8 @@ def hcall_table_lookup(opcode):
print_ptrn = '%-28s%10s%10s%10s%10s'
def trace_end():
- print print_ptrn % ('hcall', 'count', 'min(ns)', 'max(ns)', 'avg(ns)')
- print '-' * 68
+ print(print_ptrn % ('hcall', 'count', 'min(ns)', 'max(ns)', 'avg(ns)'))
+ print('-' * 68)
for opcode in output:
h_name = hcall_table_lookup(opcode)
time = output[opcode]['time']
@@ -166,14 +168,14 @@ def trace_end():
min_t = output[opcode]['min']
max_t = output[opcode]['max']
- print print_ptrn % (h_name, cnt, min_t, max_t, time/cnt)
+ print(print_ptrn % (h_name, cnt, min_t, max_t, time//cnt))
def powerpc__hcall_exit(name, context, cpu, sec, nsec, pid, comm, callchain,
opcode, retval):
- if (d_enter.has_key(cpu) and d_enter[cpu].has_key(opcode)):
+ if (cpu in d_enter and opcode in d_enter[cpu]):
diff = nsecs(sec, nsec) - d_enter[cpu][opcode]
- if (output.has_key(opcode)):
+ if (opcode in output):
output[opcode]['time'] += diff
output[opcode]['cnt'] += 1
if (output[opcode]['min'] > diff):
@@ -190,11 +192,11 @@ def powerpc__hcall_exit(name, context, cpu, sec, nsec, pid, comm, callchain,
del d_enter[cpu][opcode]
# else:
-# print "Can't find matching hcall_enter event. Ignoring sample"
+# print("Can't find matching hcall_enter event. Ignoring sample")
def powerpc__hcall_entry(event_name, context, cpu, sec, nsec, pid, comm,
callchain, opcode):
- if (d_enter.has_key(cpu)):
+ if (cpu in d_enter):
d_enter[cpu][opcode] = nsecs(sec, nsec)
else:
d_enter[cpu] = {opcode: nsecs(sec, nsec)}
diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py
index 3473e7f66081..8196e3087c9e 100644
--- a/tools/perf/scripts/python/sched-migration.py
+++ b/tools/perf/scripts/python/sched-migration.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
@@ -16,10 +14,10 @@ import sys
from collections import defaultdict
try:
- from UserList import UserList
+ from UserList import UserList
except ImportError:
- # Python 3: UserList moved to the collections package
- from collections import UserList
+ # Python 3: UserList moved to the collections package
+ from collections import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
diff --git a/tools/perf/scripts/python/sctop.py b/tools/perf/scripts/python/sctop.py
index 61621b93affb..6e0278dcb092 100644
--- a/tools/perf/scripts/python/sctop.py
+++ b/tools/perf/scripts/python/sctop.py
@@ -8,7 +8,14 @@
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
-import os, sys, thread, time
+from __future__ import print_function
+
+import os, sys, time
+
+try:
+ import thread
+except ImportError:
+ import _thread as thread
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
@@ -62,18 +69,20 @@ def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
- print "\nsyscall events for %s:\n\n" % (for_comm),
+ print("\nsyscall events for %s:\n" % (for_comm))
else:
- print "\nsyscall events:\n\n",
+ print("\nsyscall events:\n")
- print "%-40s %10s\n" % ("event", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "----------"),
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" %
+ ("----------------------------------------",
+ "----------"))
- for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
- reverse = True):
+ for id, val in sorted(syscalls.items(),
+ key = lambda kv: (kv[1], kv[0]),
+ reverse = True):
try:
- print "%-40s %10d\n" % (syscall_name(id), val),
+ print("%-40s %10d" % (syscall_name(id), val))
except TypeError:
pass
syscalls.clear()
diff --git a/tools/perf/scripts/python/stackcollapse.py b/tools/perf/scripts/python/stackcollapse.py
index 1697b5e18c96..b1c4def1410a 100755
--- a/tools/perf/scripts/python/stackcollapse.py
+++ b/tools/perf/scripts/python/stackcollapse.py
@@ -19,13 +19,15 @@
# Written by Paolo Bonzini <pbonzini@redhat.com>
# Based on Brendan Gregg's stackcollapse-perf.pl script.
+from __future__ import print_function
+
import os
import sys
from collections import defaultdict
from optparse import OptionParser, make_option
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
@@ -120,7 +122,6 @@ def process_event(param_dict):
lines[stack_string] = lines[stack_string] + 1
def trace_end():
- list = lines.keys()
- list.sort()
+ list = sorted(lines)
for stack in list:
- print "%s %d" % (stack, lines[stack])
+ print("%s %d" % (stack, lines[stack]))
diff --git a/tools/perf/scripts/python/stat-cpi.py b/tools/perf/scripts/python/stat-cpi.py
index 8410672efb8b..01fa933ff3cf 100644
--- a/tools/perf/scripts/python/stat-cpi.py
+++ b/tools/perf/scripts/python/stat-cpi.py
@@ -1,6 +1,7 @@
-#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
+from __future__ import print_function
+
data = {}
times = []
threads = []
@@ -20,8 +21,8 @@ def store_key(time, cpu, thread):
threads.append(thread)
def store(time, event, cpu, thread, val, ena, run):
- #print "event %s cpu %d, thread %d, time %d, val %d, ena %d, run %d" % \
- # (event, cpu, thread, time, val, ena, run)
+ #print("event %s cpu %d, thread %d, time %d, val %d, ena %d, run %d" %
+ # (event, cpu, thread, time, val, ena, run))
store_key(time, cpu, thread)
key = get_key(time, event, cpu, thread)
@@ -59,7 +60,7 @@ def stat__interval(time):
if ins != 0:
cpi = cyc/float(ins)
- print "%15f: cpu %d, thread %d -> cpi %f (%d/%d)" % (time/(float(1000000000)), cpu, thread, cpi, cyc, ins)
+ print("%15f: cpu %d, thread %d -> cpi %f (%d/%d)" % (time/(float(1000000000)), cpu, thread, cpi, cyc, ins))
def trace_end():
pass
@@ -75,4 +76,4 @@ def trace_end():
# if ins != 0:
# cpi = cyc/float(ins)
#
-# print "time %.9f, cpu %d, thread %d -> cpi %f" % (time/(float(1000000000)), cpu, thread, cpi)
+# print("time %.9f, cpu %d, thread %d -> cpi %f" % (time/(float(1000000000)), cpu, thread, cpi))
diff --git a/tools/perf/scripts/python/syscall-counts-by-pid.py b/tools/perf/scripts/python/syscall-counts-by-pid.py
index daf314cc5dd3..f254e40c6f0f 100644
--- a/tools/perf/scripts/python/syscall-counts-by-pid.py
+++ b/tools/perf/scripts/python/syscall-counts-by-pid.py
@@ -5,6 +5,8 @@
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+from __future__ import print_function
+
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
@@ -31,17 +33,16 @@ if len(sys.argv) > 1:
syscalls = autodict()
def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, id, args):
-
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, id, args):
if (for_comm and common_comm != for_comm) or \
- (for_pid and common_pid != for_pid ):
+ (for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
@@ -49,26 +50,26 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
syscalls[common_comm][common_pid][id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- id, args):
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
- if for_comm is not None:
- print "\nsyscall events for %s:\n\n" % (for_comm),
- else:
- print "\nsyscall events by comm/pid:\n\n",
-
- print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "----------"),
-
- comm_keys = syscalls.keys()
- for comm in comm_keys:
- pid_keys = syscalls[comm].keys()
- for pid in pid_keys:
- print "\n%s [%d]\n" % (comm, pid),
- id_keys = syscalls[comm][pid].keys()
- for id, val in sorted(syscalls[comm][pid].iteritems(), \
- key = lambda(k, v): (v, k), reverse = True):
- print " %-38s %10d\n" % (syscall_name(id), val),
+ if for_comm is not None:
+ print("\nsyscall events for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall events by comm/pid:\n")
+
+ print("%-40s %10s" % ("comm [pid]/syscalls", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "----------"))
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print("\n%s [%d]" % (comm, pid))
+ id_keys = syscalls[comm][pid].keys()
+ for id, val in sorted(syscalls[comm][pid].items(),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print(" %-38s %10d" % (syscall_name(id), val))
diff --git a/tools/perf/scripts/python/syscall-counts.py b/tools/perf/scripts/python/syscall-counts.py
index e66a7730aeb5..8adb95ff1664 100644
--- a/tools/perf/scripts/python/syscall-counts.py
+++ b/tools/perf/scripts/python/syscall-counts.py
@@ -5,6 +5,8 @@
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+from __future__ import print_function
+
import os
import sys
@@ -28,14 +30,14 @@ if len(sys.argv) > 1:
syscalls = autodict()
def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, id, args):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
@@ -45,20 +47,19 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- id, args):
+ common_secs, common_nsecs, common_pid, common_comm, id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
- if for_comm is not None:
- print "\nsyscall events for %s:\n\n" % (for_comm),
- else:
- print "\nsyscall events:\n\n",
-
- print "%-40s %10s\n" % ("event", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "-----------"),
-
- for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
- reverse = True):
- print "%-40s %10d\n" % (syscall_name(id), val),
+ if for_comm is not None:
+ print("\nsyscall events for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall events:\n")
+
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "-----------"))
+
+ for id, val in sorted(syscalls.items(),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print("%-40s %10d" % (syscall_name(id), val))
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
index 44090a9a19f3..cb39ac46bc73 100644
--- a/tools/perf/tests/attr.py
+++ b/tools/perf/tests/attr.py
@@ -1,6 +1,7 @@
-#! /usr/bin/python
# SPDX-License-Identifier: GPL-2.0
+from __future__ import print_function
+
import os
import sys
import glob
@@ -8,7 +9,11 @@ import optparse
import tempfile
import logging
import shutil
-import ConfigParser
+
+try:
+ import configparser
+except ImportError:
+ import ConfigParser as configparser
def data_equal(a, b):
# Allow multiple values in assignment separated by '|'
@@ -100,20 +105,20 @@ class Event(dict):
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
- if not self.has_key(t) or not other.has_key(t):
+ if t not in self or t not in other:
return False
if not data_equal(self[t], other[t]):
return False
return True
def optional(self):
- if self.has_key('optional') and self['optional'] == '1':
+ if 'optional' in self and self['optional'] == '1':
return True
return False
def diff(self, other):
for t in Event.terms:
- if not self.has_key(t) or not other.has_key(t):
+ if t not in self or t not in other:
continue
if not data_equal(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
@@ -134,7 +139,7 @@ class Event(dict):
# - expected values assignments
class Test(object):
def __init__(self, path, options):
- parser = ConfigParser.SafeConfigParser()
+ parser = configparser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
@@ -193,7 +198,7 @@ class Test(object):
return True
def load_events(self, path, events):
- parser_event = ConfigParser.SafeConfigParser()
+ parser_event = configparser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
@@ -207,7 +212,7 @@ class Test(object):
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
- parser_base = ConfigParser.SafeConfigParser()
+ parser_base = configparser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
@@ -322,9 +327,9 @@ def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
- except Unsup, obj:
+ except Unsup as obj:
log.warning("unsupp %s" % obj.getMsg())
- except Notest, obj:
+ except Notest as obj:
log.warning("skipped %s" % obj.getMsg())
def setup_log(verbose):
@@ -363,7 +368,7 @@ def main():
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
- action="count", dest="verbose")
+ default=0, action="count", dest="verbose")
options, args = parser.parse_args()
if args:
@@ -373,7 +378,7 @@ def main():
setup_log(options.verbose)
if not options.test_dir:
- print 'FAILED no -d option specified'
+ print('FAILED no -d option specified')
sys.exit(-1)
if not options.test:
@@ -382,8 +387,8 @@ def main():
try:
run_tests(options)
- except Fail, obj:
- print "FAILED %s" % obj.getMsg();
+ except Fail as obj:
+ print("FAILED %s" % obj.getMsg())
sys.exit(-1)
sys.exit(0)
diff --git a/tools/perf/tests/attr/test-record-C0 b/tools/perf/tests/attr/test-record-C0
index cb0a3138fa54..93818054ae20 100644
--- a/tools/perf/tests/attr/test-record-C0
+++ b/tools/perf/tests/attr/test-record-C0
@@ -1,6 +1,6 @@
[config]
command = record
-args = -C 0 kill >/dev/null 2>&1
+args = --no-bpf-event -C 0 kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-basic b/tools/perf/tests/attr/test-record-basic
index 85a23cf35ba1..b0ca42a5ecc9 100644
--- a/tools/perf/tests/attr/test-record-basic
+++ b/tools/perf/tests/attr/test-record-basic
@@ -1,6 +1,6 @@
[config]
command = record
-args = kill >/dev/null 2>&1
+args = --no-bpf-event kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-any b/tools/perf/tests/attr/test-record-branch-any
index 81f839e2fad0..1a99b3ce6b89 100644
--- a/tools/perf/tests/attr/test-record-branch-any
+++ b/tools/perf/tests/attr/test-record-branch-any
@@ -1,6 +1,6 @@
[config]
command = record
-args = -b kill >/dev/null 2>&1
+args = --no-bpf-event -b kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-any b/tools/perf/tests/attr/test-record-branch-filter-any
index 357421f4dfce..709768b508c6 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-any
+++ b/tools/perf/tests/attr/test-record-branch-filter-any
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j any kill >/dev/null 2>&1
+args = --no-bpf-event -j any kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-any_call b/tools/perf/tests/attr/test-record-branch-filter-any_call
index dbc55f2ab845..f943221f7825 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-any_call
+++ b/tools/perf/tests/attr/test-record-branch-filter-any_call
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j any_call kill >/dev/null 2>&1
+args = --no-bpf-event -j any_call kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-any_ret b/tools/perf/tests/attr/test-record-branch-filter-any_ret
index a0824ff8e131..fd4f5b4154a9 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-any_ret
+++ b/tools/perf/tests/attr/test-record-branch-filter-any_ret
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j any_ret kill >/dev/null 2>&1
+args = --no-bpf-event -j any_ret kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-hv b/tools/perf/tests/attr/test-record-branch-filter-hv
index f34d6f120181..4e52d685ebe1 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-hv
+++ b/tools/perf/tests/attr/test-record-branch-filter-hv
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j hv kill >/dev/null 2>&1
+args = --no-bpf-event -j hv kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-ind_call b/tools/perf/tests/attr/test-record-branch-filter-ind_call
index b86a35232248..e08c6ab3796e 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-ind_call
+++ b/tools/perf/tests/attr/test-record-branch-filter-ind_call
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j ind_call kill >/dev/null 2>&1
+args = --no-bpf-event -j ind_call kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-k b/tools/perf/tests/attr/test-record-branch-filter-k
index d3fbc5e1858a..b4b98f84fc2f 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-k
+++ b/tools/perf/tests/attr/test-record-branch-filter-k
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j k kill >/dev/null 2>&1
+args = --no-bpf-event -j k kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-u b/tools/perf/tests/attr/test-record-branch-filter-u
index a318f0dda173..fb9610edbb0d 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-u
+++ b/tools/perf/tests/attr/test-record-branch-filter-u
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j u kill >/dev/null 2>&1
+args = --no-bpf-event -j u kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-count b/tools/perf/tests/attr/test-record-count
index 34f6cc577263..5e9b9019d786 100644
--- a/tools/perf/tests/attr/test-record-count
+++ b/tools/perf/tests/attr/test-record-count
@@ -1,6 +1,6 @@
[config]
command = record
-args = -c 123 kill >/dev/null 2>&1
+args = --no-bpf-event -c 123 kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-data b/tools/perf/tests/attr/test-record-data
index a9cf2233b0ce..a99bb13149c2 100644
--- a/tools/perf/tests/attr/test-record-data
+++ b/tools/perf/tests/attr/test-record-data
@@ -1,6 +1,6 @@
[config]
command = record
-args = -d kill >/dev/null 2>&1
+args = --no-bpf-event -d kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-freq b/tools/perf/tests/attr/test-record-freq
index bf4cb459f0d5..89e29f6b2ae0 100644
--- a/tools/perf/tests/attr/test-record-freq
+++ b/tools/perf/tests/attr/test-record-freq
@@ -1,6 +1,6 @@
[config]
command = record
-args = -F 100 kill >/dev/null 2>&1
+args = --no-bpf-event -F 100 kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-graph-default b/tools/perf/tests/attr/test-record-graph-default
index 0b216e69760c..5d8234d50845 100644
--- a/tools/perf/tests/attr/test-record-graph-default
+++ b/tools/perf/tests/attr/test-record-graph-default
@@ -1,6 +1,6 @@
[config]
command = record
-args = -g kill >/dev/null 2>&1
+args = --no-bpf-event -g kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-graph-dwarf b/tools/perf/tests/attr/test-record-graph-dwarf
index da2fa73bd0a2..ae92061d611d 100644
--- a/tools/perf/tests/attr/test-record-graph-dwarf
+++ b/tools/perf/tests/attr/test-record-graph-dwarf
@@ -1,6 +1,6 @@
[config]
command = record
-args = --call-graph dwarf -- kill >/dev/null 2>&1
+args = --no-bpf-event --call-graph dwarf -- kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-graph-fp b/tools/perf/tests/attr/test-record-graph-fp
index 625d190bb798..5630521c0b0f 100644
--- a/tools/perf/tests/attr/test-record-graph-fp
+++ b/tools/perf/tests/attr/test-record-graph-fp
@@ -1,6 +1,6 @@
[config]
command = record
-args = --call-graph fp kill >/dev/null 2>&1
+args = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-group b/tools/perf/tests/attr/test-record-group
index 618ba1c17474..14ee60fd3f41 100644
--- a/tools/perf/tests/attr/test-record-group
+++ b/tools/perf/tests/attr/test-record-group
@@ -1,6 +1,6 @@
[config]
command = record
-args = --group -e cycles,instructions kill >/dev/null 2>&1
+args = --no-bpf-event --group -e cycles,instructions kill >/dev/null 2>&1
ret = 1
[event-1:base-record]
diff --git a/tools/perf/tests/attr/test-record-group-sampling b/tools/perf/tests/attr/test-record-group-sampling
index f0729c454f16..300b9f7e6d69 100644
--- a/tools/perf/tests/attr/test-record-group-sampling
+++ b/tools/perf/tests/attr/test-record-group-sampling
@@ -1,6 +1,6 @@
[config]
command = record
-args = -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
+args = --no-bpf-event -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
ret = 1
[event-1:base-record]
diff --git a/tools/perf/tests/attr/test-record-group1 b/tools/perf/tests/attr/test-record-group1
index 48e8bd12fe46..3ffe246e0228 100644
--- a/tools/perf/tests/attr/test-record-group1
+++ b/tools/perf/tests/attr/test-record-group1
@@ -1,6 +1,6 @@
[config]
command = record
-args = -e '{cycles,instructions}' kill >/dev/null 2>&1
+args = --no-bpf-event -e '{cycles,instructions}' kill >/dev/null 2>&1
ret = 1
[event-1:base-record]
diff --git a/tools/perf/tests/attr/test-record-no-buffering b/tools/perf/tests/attr/test-record-no-buffering
index aa3956d8fe20..583dcbb078ba 100644
--- a/tools/perf/tests/attr/test-record-no-buffering
+++ b/tools/perf/tests/attr/test-record-no-buffering
@@ -1,6 +1,6 @@
[config]
command = record
-args = --no-buffering kill >/dev/null 2>&1
+args = --no-bpf-event --no-buffering kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-no-inherit b/tools/perf/tests/attr/test-record-no-inherit
index 560943decb87..15d1dc162e1c 100644
--- a/tools/perf/tests/attr/test-record-no-inherit
+++ b/tools/perf/tests/attr/test-record-no-inherit
@@ -1,6 +1,6 @@
[config]
command = record
-args = -i kill >/dev/null 2>&1
+args = --no-bpf-event -i kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-no-samples b/tools/perf/tests/attr/test-record-no-samples
index 8eb73ab639e0..596fbd6d5a2c 100644
--- a/tools/perf/tests/attr/test-record-no-samples
+++ b/tools/perf/tests/attr/test-record-no-samples
@@ -1,6 +1,6 @@
[config]
command = record
-args = -n kill >/dev/null 2>&1
+args = --no-bpf-event -n kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-period b/tools/perf/tests/attr/test-record-period
index 69bc748f0f27..119101154c5e 100644
--- a/tools/perf/tests/attr/test-record-period
+++ b/tools/perf/tests/attr/test-record-period
@@ -1,6 +1,6 @@
[config]
command = record
-args = -c 100 -P kill >/dev/null 2>&1
+args = --no-bpf-event -c 100 -P kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-raw b/tools/perf/tests/attr/test-record-raw
index a188a614a44c..13a5f7860c78 100644
--- a/tools/perf/tests/attr/test-record-raw
+++ b/tools/perf/tests/attr/test-record-raw
@@ -1,6 +1,6 @@
[config]
command = record
-args = -R kill >/dev/null 2>&1
+args = --no-bpf-event -R kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
index 6d598cc071ae..1a9c3becf5ff 100644
--- a/tools/perf/tests/backward-ring-buffer.c
+++ b/tools/perf/tests/backward-ring-buffer.c
@@ -18,7 +18,7 @@ static void testcase(void)
int i;
for (i = 0; i < NR_ITERS; i++) {
- char proc_name[10];
+ char proc_name[15];
snprintf(proc_name, sizeof(proc_name), "p:%d\n", i);
prctl(PR_SET_NAME, proc_name);
diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c
index a20cbc445426..57fc544aedb0 100644
--- a/tools/perf/tests/bp_account.c
+++ b/tools/perf/tests/bp_account.c
@@ -15,7 +15,6 @@
#include <sys/mman.h>
#include <linux/compiler.h>
#include <linux/hw_breakpoint.h>
-#include <sys/ioctl.h>
#include "tests.h"
#include "debug.h"
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index dbf2c69944d2..4ebd2681e760 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -15,6 +15,8 @@
#include "thread_map.h"
#include "cpumap.h"
#include "machine.h"
+#include "map.h"
+#include "symbol.h"
#include "event.h"
#include "thread.h"
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index 7c8d2e422401..077c306c1cae 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -10,6 +10,7 @@
#include "../util/unwind.h"
#include "perf_regs.h"
#include "map.h"
+#include "symbol.h"
#include "thread.h"
#include "callchain.h"
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index 5f8501c68da4..71f60c0f9faa 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -17,7 +17,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
return -1;
}
- is_signed = !!(field->flags | TEP_FIELD_IS_SIGNED);
+ is_signed = !!(field->flags & TEP_FIELD_IS_SIGNED);
if (should_be_signed && !is_signed) {
pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
evsel->name, name, is_signed, should_be_signed);
@@ -43,7 +43,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
return -1;
}
- if (perf_evsel__test_field(evsel, "prev_comm", 16, true))
+ if (perf_evsel__test_field(evsel, "prev_comm", 16, false))
ret = -1;
if (perf_evsel__test_field(evsel, "prev_pid", 4, true))
@@ -55,7 +55,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
if (perf_evsel__test_field(evsel, "prev_state", sizeof(long), true))
ret = -1;
- if (perf_evsel__test_field(evsel, "next_comm", 16, true))
+ if (perf_evsel__test_field(evsel, "next_comm", 16, false))
ret = -1;
if (perf_evsel__test_field(evsel, "next_pid", 4, true))
@@ -73,7 +73,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
return -1;
}
- if (perf_evsel__test_field(evsel, "comm", 16, true))
+ if (perf_evsel__test_field(evsel, "comm", 16, false))
ret = -1;
if (perf_evsel__test_field(evsel, "pid", 4, true))
@@ -85,5 +85,6 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
ret = -1;
+ perf_evsel__delete(evsel);
return ret;
}
diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
index 01f0706995a9..9acc1e80b936 100644
--- a/tools/perf/tests/expr.c
+++ b/tools/perf/tests/expr.c
@@ -19,7 +19,7 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
const char *p;
const char **other;
double val;
- int ret;
+ int i, ret;
struct parse_ctx ctx;
int num_other;
@@ -56,6 +56,9 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
TEST_ASSERT_VAL("find other", !strcmp(other[1], "BAZ"));
TEST_ASSERT_VAL("find other", !strcmp(other[2], "BOZO"));
TEST_ASSERT_VAL("find other", other[3] == NULL);
+
+ for (i = 0; i < num_other; i++)
+ free((void *)other[i]);
free((void *)other);
return 0;
diff --git a/tools/perf/tests/hists_common.c b/tools/perf/tests/hists_common.c
index b889a28fd80b..469958cd7fe0 100644
--- a/tools/perf/tests/hists_common.c
+++ b/tools/perf/tests/hists_common.c
@@ -2,6 +2,7 @@
#include <inttypes.h>
#include "perf.h"
#include "util/debug.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
@@ -161,7 +162,7 @@ out:
void print_hists_in(struct hists *hists)
{
int i = 0;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
if (hists__has(hists, need_collapse))
@@ -170,7 +171,7 @@ void print_hists_in(struct hists *hists)
root = hists->entries_in;
pr_info("----- %s --------\n", __func__);
- node = rb_first(root);
+ node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
@@ -191,13 +192,13 @@ void print_hists_in(struct hists *hists)
void print_hists_out(struct hists *hists)
{
int i = 0;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
root = &hists->entries;
pr_info("----- %s --------\n", __func__);
- node = rb_first(root);
+ node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 65fe02bebbee..7a2eed6c783e 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -2,6 +2,7 @@
#include "perf.h"
#include "util/debug.h"
#include "util/event.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
@@ -125,8 +126,8 @@ out:
static void del_hist_entries(struct hists *hists)
{
struct hist_entry *he;
- struct rb_root *root_in;
- struct rb_root *root_out;
+ struct rb_root_cached *root_in;
+ struct rb_root_cached *root_out;
struct rb_node *node;
if (hists__has(hists, need_collapse))
@@ -136,12 +137,12 @@ static void del_hist_entries(struct hists *hists)
root_out = &hists->entries;
- while (!RB_EMPTY_ROOT(root_out)) {
- node = rb_first(root_out);
+ while (!RB_EMPTY_ROOT(&root_out->rb_root)) {
+ node = rb_first_cached(root_out);
he = rb_entry(node, struct hist_entry, rb_node);
- rb_erase(node, root_out);
- rb_erase(&he->rb_node_in, root_in);
+ rb_erase_cached(node, root_out);
+ rb_erase_cached(&he->rb_node_in, root_in);
hist_entry__delete(he);
}
}
@@ -198,7 +199,7 @@ static int do_test(struct hists *hists, struct result *expected, size_t nr_expec
print_hists_out(hists);
}
- root = &hists->entries;
+ root = &hists->entries.rb_root;
for (node = rb_first(root), i = 0;
node && (he = rb_entry(node, struct hist_entry, rb_node));
node = rb_next(node), i++) {
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 1c5bedab3c2c..975844807fe2 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "perf.h"
#include "util/debug.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index 9a9d06cb0222..af633db63f4d 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -142,7 +142,7 @@ static int find_sample(struct sample *samples, size_t nr_samples,
static int __validate_match(struct hists *hists)
{
size_t count = 0;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
/*
@@ -153,7 +153,7 @@ static int __validate_match(struct hists *hists)
else
root = hists->entries_in;
- node = rb_first(root);
+ node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
@@ -192,7 +192,7 @@ static int __validate_link(struct hists *hists, int idx)
size_t count = 0;
size_t count_pair = 0;
size_t count_dummy = 0;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
/*
@@ -205,7 +205,7 @@ static int __validate_link(struct hists *hists, int idx)
else
root = hists->entries_in;
- node = rb_first(root);
+ node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index faacb4f41460..0a510c524a5d 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -2,6 +2,7 @@
#include "perf.h"
#include "util/debug.h"
#include "util/event.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
@@ -91,8 +92,8 @@ out:
static void del_hist_entries(struct hists *hists)
{
struct hist_entry *he;
- struct rb_root *root_in;
- struct rb_root *root_out;
+ struct rb_root_cached *root_in;
+ struct rb_root_cached *root_out;
struct rb_node *node;
if (hists__has(hists, need_collapse))
@@ -102,12 +103,12 @@ static void del_hist_entries(struct hists *hists)
root_out = &hists->entries;
- while (!RB_EMPTY_ROOT(root_out)) {
- node = rb_first(root_out);
+ while (!RB_EMPTY_ROOT(&root_out->rb_root)) {
+ node = rb_first_cached(root_out);
he = rb_entry(node, struct hist_entry, rb_node);
- rb_erase(node, root_out);
- rb_erase(&he->rb_node_in, root_in);
+ rb_erase_cached(node, root_out);
+ rb_erase_cached(&he->rb_node_in, root_in);
hist_entry__delete(he);
}
}
@@ -126,7 +127,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = NULL;
@@ -162,7 +163,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
@@ -228,7 +229,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = "overhead,cpu";
@@ -262,7 +263,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
CPU(he) == 1 && PID(he) == 100 && he->stat.period == 300);
@@ -284,7 +285,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = "comm,overhead,dso";
@@ -316,7 +317,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
!strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
@@ -358,7 +359,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = "dso,sym,comm,overhead,dso";
@@ -394,7 +395,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
!strcmp(DSO(he), "perf") && !strcmp(SYM(he), "cmd_record") &&
@@ -460,7 +461,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = "cpu,pid,comm,dso,sym";
@@ -497,7 +498,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index 5ede9b561d32..ba87e6e8d18c 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -11,6 +11,7 @@
#include "tests.h"
#include "machine.h"
#include "thread_map.h"
+#include "map.h"
#include "symbol.h"
#include "thread.h"
#include "util.h"
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
index c531e6deb104..493ecb611540 100644
--- a/tools/perf/tests/openat-syscall-all-cpus.c
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
@@ -45,7 +45,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
if (IS_ERR(evsel)) {
tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
pr_debug("%s\n", errbuf);
- goto out_thread_map_delete;
+ goto out_cpu_map_delete;
}
if (perf_evsel__open(evsel, cpus, threads) < 0) {
@@ -119,6 +119,8 @@ out_close_fd:
perf_evsel__close_fd(evsel);
out_evsel_delete:
perf_evsel__delete(evsel);
+out_cpu_map_delete:
+ cpu_map__put(cpus);
out_thread_map_delete:
thread_map__put(threads);
return err;
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 3b97ac018d5a..4a69c07f4101 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1330,6 +1330,26 @@ static int test__checkevent_complex_name(struct perf_evlist *evlist)
return 0;
}
+static int test__sym_event_slash(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong type", evsel->attr.type == PERF_TYPE_HARDWARE);
+ TEST_ASSERT_VAL("wrong config", evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ return 0;
+}
+
+static int test__sym_event_dc(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong type", evsel->attr.type == PERF_TYPE_HARDWARE);
+ TEST_ASSERT_VAL("wrong config", evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ return 0;
+}
+
static int count_tracepoints(void)
{
struct dirent *events_ent;
@@ -1670,6 +1690,16 @@ static struct evlist_test test__events[] = {
.name = "cycles/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks'/Duk",
.check = test__checkevent_complex_name,
.id = 53
+ },
+ {
+ .name = "cycles//u",
+ .check = test__sym_event_slash,
+ .id = 54,
+ },
+ {
+ .name = "cycles:k",
+ .check = test__sym_event_dc,
+ .id = 55,
}
};
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
index 7bedf8608fdd..14a78898d79e 100644
--- a/tools/perf/tests/pmu.c
+++ b/tools/perf/tests/pmu.c
@@ -4,7 +4,9 @@
#include "util.h"
#include "tests.h"
#include <errno.h>
+#include <stdio.h>
#include <linux/kernel.h>
+#include <linux/limits.h>
/* Simulated format definitions. */
static struct test_format {
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 0e2d00d69e6e..236ce0d6c826 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdbool.h>
#include <inttypes.h>
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include "branch.h"
#include "util.h"
#include "event.h"
#include "evsel.h"
diff --git a/tools/perf/tests/sdt.c b/tools/perf/tests/sdt.c
index 5059452d27dd..8bfaa630389c 100644
--- a/tools/perf/tests/sdt.c
+++ b/tools/perf/tests/sdt.c
@@ -3,6 +3,7 @@
#include <stdio.h>
#include <sys/epoll.h>
#include <util/evlist.h>
+#include <util/symbol.h>
#include <linux/filter.h>
#include "tests.h"
#include "debug.h"
diff --git a/tools/perf/tests/shell/lib/probe.sh b/tools/perf/tests/shell/lib/probe.sh
index 6293cc660947..e37787be672b 100644
--- a/tools/perf/tests/shell/lib/probe.sh
+++ b/tools/perf/tests/shell/lib/probe.sh
@@ -4,3 +4,8 @@ skip_if_no_perf_probe() {
perf probe 2>&1 | grep -q 'is not a perf-command' && return 2
return 0
}
+
+skip_if_no_perf_trace() {
+ perf trace -h 2>&1 | grep -q -e 'is not a perf-command' -e 'trace command not available' && return 2
+ return 0
+}
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 50109f27ca07..147efeb6b195 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -12,6 +12,7 @@
. $(dirname $0)/lib/probe.sh
skip_if_no_perf_probe || exit 2
+skip_if_no_perf_trace || exit 2
. $(dirname $0)/lib/probe_vfs_getname.sh
diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build
index 637365099b7d..85f328ddf897 100644
--- a/tools/perf/trace/beauty/Build
+++ b/tools/perf/trace/beauty/Build
@@ -1,15 +1,15 @@
-libperf-y += clone.o
-libperf-y += fcntl.o
-libperf-y += flock.o
+perf-y += clone.o
+perf-y += fcntl.o
+perf-y += flock.o
ifeq ($(SRCARCH),$(filter $(SRCARCH),x86))
-libperf-y += ioctl.o
+perf-y += ioctl.o
endif
-libperf-y += kcmp.o
-libperf-y += mount_flags.o
-libperf-y += pkey_alloc.o
-libperf-y += arch_prctl.o
-libperf-y += prctl.o
-libperf-y += renameat.o
-libperf-y += sockaddr.o
-libperf-y += socket.o
-libperf-y += statx.o
+perf-y += kcmp.o
+perf-y += mount_flags.o
+perf-y += pkey_alloc.o
+perf-y += arch_prctl.o
+perf-y += prctl.o
+perf-y += renameat.o
+perf-y += sockaddr.o
+perf-y += socket.o
+perf-y += statx.o
diff --git a/tools/perf/trace/beauty/ioctl.c b/tools/perf/trace/beauty/ioctl.c
index 620350d41209..52242fa4072b 100644
--- a/tools/perf/trace/beauty/ioctl.c
+++ b/tools/perf/trace/beauty/ioctl.c
@@ -175,7 +175,7 @@ static size_t ioctl__scnprintf_cmd(unsigned long cmd, char *bf, size_t size, boo
size_t syscall_arg__scnprintf_ioctl_cmd(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long cmd = arg->val;
- unsigned int fd = syscall_arg__val(arg, 0);
+ int fd = syscall_arg__val(arg, 0);
struct file *file = thread__files_entry(arg->thread, fd);
if (file != NULL) {
diff --git a/tools/perf/trace/beauty/msg_flags.c b/tools/perf/trace/beauty/msg_flags.c
index d66c66315987..ea68db08b8e7 100644
--- a/tools/perf/trace/beauty/msg_flags.c
+++ b/tools/perf/trace/beauty/msg_flags.c
@@ -29,7 +29,7 @@ static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
return scnprintf(bf, size, "NONE");
#define P_MSG_FLAG(n) \
if (flags & MSG_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~MSG_##n; \
}
diff --git a/tools/perf/trace/beauty/waitid_options.c b/tools/perf/trace/beauty/waitid_options.c
index 6897fab40dcc..d4d10b33ba0e 100644
--- a/tools/perf/trace/beauty/waitid_options.c
+++ b/tools/perf/trace/beauty/waitid_options.c
@@ -11,7 +11,7 @@ static size_t syscall_arg__scnprintf_waitid_options(char *bf, size_t size,
#define P_OPTION(n) \
if (options & W##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
options &= ~W##n; \
}
diff --git a/tools/perf/ui/Build b/tools/perf/ui/Build
index 0a73538c0441..3aff83c3275f 100644
--- a/tools/perf/ui/Build
+++ b/tools/perf/ui/Build
@@ -1,14 +1,14 @@
-libperf-y += setup.o
-libperf-y += helpline.o
-libperf-y += progress.o
-libperf-y += util.o
-libperf-y += hist.o
-libperf-y += stdio/hist.o
+perf-y += setup.o
+perf-y += helpline.o
+perf-y += progress.o
+perf-y += util.o
+perf-y += hist.o
+perf-y += stdio/hist.o
CFLAGS_setup.o += -DLIBDIR="BUILD_STR($(LIBDIR))"
-libperf-$(CONFIG_SLANG) += browser.o
-libperf-$(CONFIG_SLANG) += browsers/
-libperf-$(CONFIG_SLANG) += tui/
+perf-$(CONFIG_SLANG) += browser.o
+perf-$(CONFIG_SLANG) += browsers/
+perf-$(CONFIG_SLANG) += tui/
CFLAGS_browser.o += -DENABLE_SLFUTURE_CONST
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index 4f75561424ed..4ad37d8c7d6a 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -611,14 +611,16 @@ void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence)
browser->top = browser->entries;
break;
case SEEK_CUR:
- browser->top = browser->top + browser->top_idx + offset;
+ browser->top = (char **)browser->top + offset;
break;
case SEEK_END:
- browser->top = browser->top + browser->nr_entries - 1 + offset;
+ browser->top = (char **)browser->entries + browser->nr_entries - 1 + offset;
break;
default:
return;
}
+ assert((char **)browser->top < (char **)browser->entries + browser->nr_entries);
+ assert((char **)browser->top >= (char **)browser->entries);
}
unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
@@ -630,7 +632,9 @@ unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
browser->top = browser->entries;
pos = (char **)browser->top;
- while (idx < browser->nr_entries) {
+ while (idx < browser->nr_entries &&
+ row < (unsigned)SLtt_Screen_Rows - 1) {
+ assert(pos < (char **)browser->entries + browser->nr_entries);
if (!browser->filter || !browser->filter(browser, *pos)) {
ui_browser__gotorc(browser, row, 0);
browser->write(browser, pos, row);
diff --git a/tools/perf/ui/browsers/Build b/tools/perf/ui/browsers/Build
index de223f5bed58..fdf86f7981ca 100644
--- a/tools/perf/ui/browsers/Build
+++ b/tools/perf/ui/browsers/Build
@@ -1,8 +1,9 @@
-libperf-y += annotate.o
-libperf-y += hists.o
-libperf-y += map.o
-libperf-y += scripts.o
-libperf-y += header.o
+perf-y += annotate.o
+perf-y += hists.o
+perf-y += map.o
+perf-y += scripts.o
+perf-y += header.o
+perf-y += res_sample.o
CFLAGS_annotate.o += -DENABLE_SLFUTURE_CONST
CFLAGS_hists.o += -DENABLE_SLFUTURE_CONST
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 1d00e5ec7906..98d934a36d86 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -7,6 +7,7 @@
#include "../../util/annotate.h"
#include "../../util/hist.h"
#include "../../util/sort.h"
+#include "../../util/map.h"
#include "../../util/symbol.h"
#include "../../util/evsel.h"
#include "../../util/evlist.h"
@@ -224,20 +225,24 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser)
return ret;
}
-static int disasm__cmp(struct annotation_line *a, struct annotation_line *b)
+static double disasm__cmp(struct annotation_line *a, struct annotation_line *b,
+ int percent_type)
{
int i;
for (i = 0; i < a->data_nr; i++) {
- if (a->data[i].percent == b->data[i].percent)
+ if (a->data[i].percent[percent_type] == b->data[i].percent[percent_type])
continue;
- return a->data[i].percent < b->data[i].percent;
+ return a->data[i].percent[percent_type] -
+ b->data[i].percent[percent_type];
}
return 0;
}
-static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line *al)
+static void disasm_rb_tree__insert(struct annotate_browser *browser,
+ struct annotation_line *al)
{
+ struct rb_root *root = &browser->entries;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct annotation_line *l;
@@ -246,7 +251,7 @@ static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line
parent = *p;
l = rb_entry(parent, struct annotation_line, rb_node);
- if (disasm__cmp(al, l))
+ if (disasm__cmp(al, l, browser->opts->percent_type) < 0)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
@@ -329,7 +334,7 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
RB_CLEAR_NODE(&pos->al.rb_node);
continue;
}
- disasm_rb_tree__insert(&browser->entries, &pos->al);
+ disasm_rb_tree__insert(browser, &pos->al);
}
pthread_mutex_unlock(&notes->lock);
@@ -745,7 +750,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
continue;
case 'r':
{
- script_browse(NULL);
+ script_browse(NULL, NULL);
continue;
}
case 'k':
diff --git a/tools/perf/ui/browsers/header.c b/tools/perf/ui/browsers/header.c
index d75492189acb..5aeb663dd184 100644
--- a/tools/perf/ui/browsers/header.c
+++ b/tools/perf/ui/browsers/header.c
@@ -35,7 +35,7 @@ static int list_menu__run(struct ui_browser *menu)
{
int key;
unsigned long offset;
- const char help[] =
+ static const char help[] =
"h/?/F1 Show this window\n"
"UP/DOWN/PGUP\n"
"PGDN/SPACE\n"
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index ffac1d54a3d4..3421ecbdd3f0 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -7,10 +7,14 @@
#include <string.h>
#include <linux/rbtree.h>
#include <sys/ttydefaults.h>
+#include <linux/time64.h>
+#include "../../util/callchain.h"
#include "../../util/evsel.h"
#include "../../util/evlist.h"
#include "../../util/hist.h"
+#include "../../util/map.h"
+#include "../../util/symbol.h"
#include "../../util/pstack.h"
#include "../../util/sort.h"
#include "../../util/util.h"
@@ -27,6 +31,7 @@
#include "srcline.h"
#include "string2.h"
#include "units.h"
+#include "time-utils.h"
#include "sane_ctype.h"
@@ -49,7 +54,7 @@ static int hist_browser__get_folding(struct hist_browser *browser)
struct hists *hists = browser->hists;
int unfolded_rows = 0;
- for (nd = rb_first(&hists->entries);
+ for (nd = rb_first_cached(&hists->entries);
(nd = hists__filter_entries(nd, browser->min_pcnt)) != NULL;
nd = rb_hierarchy_next(nd)) {
struct hist_entry *he =
@@ -267,7 +272,7 @@ static int hierarchy_count_rows(struct hist_browser *hb, struct hist_entry *he,
if (he->has_no_entry)
return 1;
- node = rb_first(&he->hroot_out);
+ node = rb_first_cached(&he->hroot_out);
while (node) {
float percent;
@@ -372,7 +377,7 @@ static void hist_entry__init_have_children(struct hist_entry *he)
he->has_children = !RB_EMPTY_ROOT(&he->sorted_chain);
callchain__init_have_children(&he->sorted_chain);
} else {
- he->has_children = !RB_EMPTY_ROOT(&he->hroot_out);
+ he->has_children = !RB_EMPTY_ROOT(&he->hroot_out.rb_root);
}
he->init_have_children = true;
@@ -508,7 +513,7 @@ static int hierarchy_set_folding(struct hist_browser *hb, struct hist_entry *he,
struct hist_entry *child;
int n = 0;
- for (nd = rb_first(&he->hroot_out); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&he->hroot_out); nd; nd = rb_next(nd)) {
child = rb_entry(nd, struct hist_entry, rb_node);
percent = hist_entry__get_percent_limit(child);
if (!child->filtered && percent >= hb->min_pcnt)
@@ -566,7 +571,7 @@ __hist_browser__set_folding(struct hist_browser *browser, bool unfold)
struct rb_node *nd;
struct hist_entry *he;
- nd = rb_first(&browser->hists->entries);
+ nd = rb_first_cached(&browser->hists->entries);
while (nd) {
he = rb_entry(nd, struct hist_entry, rb_node);
@@ -1221,6 +1226,8 @@ void hist_browser__init_hpp(void)
hist_browser__hpp_color_overhead_guest_us;
perf_hpp__format[PERF_HPP__OVERHEAD_ACC].color =
hist_browser__hpp_color_overhead_acc;
+
+ res_sample_init();
}
static int hist_browser__show_entry(struct hist_browser *browser,
@@ -1738,7 +1745,7 @@ static void ui_browser__hists_init_top(struct ui_browser *browser)
struct hist_browser *hb;
hb = container_of(browser, struct hist_browser, b);
- browser->top = rb_first(&hb->hists->entries);
+ browser->top = rb_first_cached(&hb->hists->entries);
}
}
@@ -2335,9 +2342,12 @@ close_file_and_continue:
}
struct popup_action {
+ unsigned long time;
struct thread *thread;
struct map_symbol ms;
int socket;
+ struct perf_evsel *evsel;
+ enum rstype rstype;
int (*fn)(struct hist_browser *browser, struct popup_action *act);
};
@@ -2524,46 +2534,137 @@ static int
do_run_script(struct hist_browser *browser __maybe_unused,
struct popup_action *act)
{
- char script_opt[64];
- memset(script_opt, 0, sizeof(script_opt));
+ char *script_opt;
+ int len;
+ int n = 0;
+ len = 100;
+ if (act->thread)
+ len += strlen(thread__comm_str(act->thread));
+ else if (act->ms.sym)
+ len += strlen(act->ms.sym->name);
+ script_opt = malloc(len);
+ if (!script_opt)
+ return -1;
+
+ script_opt[0] = 0;
if (act->thread) {
- scnprintf(script_opt, sizeof(script_opt), " -c %s ",
+ n = scnprintf(script_opt, len, " -c %s ",
thread__comm_str(act->thread));
} else if (act->ms.sym) {
- scnprintf(script_opt, sizeof(script_opt), " -S %s ",
+ n = scnprintf(script_opt, len, " -S %s ",
act->ms.sym->name);
}
- script_browse(script_opt);
+ if (act->time) {
+ char start[32], end[32];
+ unsigned long starttime = act->time;
+ unsigned long endtime = act->time + symbol_conf.time_quantum;
+
+ if (starttime == endtime) { /* Display 1ms as fallback */
+ starttime -= 1*NSEC_PER_MSEC;
+ endtime += 1*NSEC_PER_MSEC;
+ }
+ timestamp__scnprintf_usec(starttime, start, sizeof start);
+ timestamp__scnprintf_usec(endtime, end, sizeof end);
+ n += snprintf(script_opt + n, len - n, " --time %s,%s", start, end);
+ }
+
+ script_browse(script_opt, act->evsel);
+ free(script_opt);
return 0;
}
static int
-add_script_opt(struct hist_browser *browser __maybe_unused,
+do_res_sample_script(struct hist_browser *browser __maybe_unused,
+ struct popup_action *act)
+{
+ struct hist_entry *he;
+
+ he = hist_browser__selected_entry(browser);
+ res_sample_browse(he->res_samples, he->num_res, act->evsel, act->rstype);
+ return 0;
+}
+
+static int
+add_script_opt_2(struct hist_browser *browser __maybe_unused,
struct popup_action *act, char **optstr,
- struct thread *thread, struct symbol *sym)
+ struct thread *thread, struct symbol *sym,
+ struct perf_evsel *evsel, const char *tstr)
{
+
if (thread) {
- if (asprintf(optstr, "Run scripts for samples of thread [%s]",
- thread__comm_str(thread)) < 0)
+ if (asprintf(optstr, "Run scripts for samples of thread [%s]%s",
+ thread__comm_str(thread), tstr) < 0)
return 0;
} else if (sym) {
- if (asprintf(optstr, "Run scripts for samples of symbol [%s]",
- sym->name) < 0)
+ if (asprintf(optstr, "Run scripts for samples of symbol [%s]%s",
+ sym->name, tstr) < 0)
return 0;
} else {
- if (asprintf(optstr, "Run scripts for all samples") < 0)
+ if (asprintf(optstr, "Run scripts for all samples%s", tstr) < 0)
return 0;
}
act->thread = thread;
act->ms.sym = sym;
+ act->evsel = evsel;
act->fn = do_run_script;
return 1;
}
static int
+add_script_opt(struct hist_browser *browser,
+ struct popup_action *act, char **optstr,
+ struct thread *thread, struct symbol *sym,
+ struct perf_evsel *evsel)
+{
+ int n, j;
+ struct hist_entry *he;
+
+ n = add_script_opt_2(browser, act, optstr, thread, sym, evsel, "");
+
+ he = hist_browser__selected_entry(browser);
+ if (sort_order && strstr(sort_order, "time")) {
+ char tstr[128];
+
+ optstr++;
+ act++;
+ j = sprintf(tstr, " in ");
+ j += timestamp__scnprintf_usec(he->time, tstr + j,
+ sizeof tstr - j);
+ j += sprintf(tstr + j, "-");
+ timestamp__scnprintf_usec(he->time + symbol_conf.time_quantum,
+ tstr + j, sizeof tstr - j);
+ n += add_script_opt_2(browser, act, optstr, thread, sym,
+ evsel, tstr);
+ act->time = he->time;
+ }
+ return n;
+}
+
+static int
+add_res_sample_opt(struct hist_browser *browser __maybe_unused,
+ struct popup_action *act, char **optstr,
+ struct res_sample *res_sample,
+ struct perf_evsel *evsel,
+ enum rstype type)
+{
+ if (!res_sample)
+ return 0;
+
+ if (asprintf(optstr, "Show context for individual samples %s",
+ type == A_ASM ? "with assembler" :
+ type == A_SOURCE ? "with source" : "") < 0)
+ return 0;
+
+ act->fn = do_res_sample_script;
+ act->evsel = evsel;
+ act->rstype = type;
+ return 1;
+}
+
+static int
do_switch_data(struct hist_browser *browser __maybe_unused,
struct popup_action *act __maybe_unused)
{
@@ -2649,7 +2750,7 @@ add_socket_opt(struct hist_browser *browser, struct popup_action *act,
static void hist_browser__update_nr_entries(struct hist_browser *hb)
{
u64 nr_entries = 0;
- struct rb_node *nd = rb_first(&hb->hists->entries);
+ struct rb_node *nd = rb_first_cached(&hb->hists->entries);
if (hb->min_pcnt == 0 && !symbol_conf.report_hierarchy) {
hb->nr_non_filtered_entries = hb->hists->nr_non_filtered_entries;
@@ -2669,7 +2770,7 @@ static void hist_browser__update_percent_limit(struct hist_browser *hb,
double percent)
{
struct hist_entry *he;
- struct rb_node *nd = rb_first(&hb->hists->entries);
+ struct rb_node *nd = rb_first_cached(&hb->hists->entries);
u64 total = hists__total_period(hb->hists);
u64 min_callchain_hits = total * (percent / 100);
@@ -2748,7 +2849,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"S Zoom into current Processor Socket\n" \
/* help messages are sorted by lexical order of the hotkey */
- const char report_help[] = HIST_BROWSER_HELP_COMMON
+ static const char report_help[] = HIST_BROWSER_HELP_COMMON
"i Show header information\n"
"P Print histograms to perf.hist.N\n"
"r Run available scripts\n"
@@ -2756,7 +2857,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"t Zoom into current Thread\n"
"V Verbose (DSO names in callchains, etc)\n"
"/ Filter symbol by name";
- const char top_help[] = HIST_BROWSER_HELP_COMMON
+ static const char top_help[] = HIST_BROWSER_HELP_COMMON
"P Print histograms to perf.hist.N\n"
"t Zoom into current Thread\n"
"V Verbose (DSO names in callchains, etc)\n"
@@ -3028,7 +3129,7 @@ skip_annotation:
nr_options += add_script_opt(browser,
&actions[nr_options],
&options[nr_options],
- thread, NULL);
+ thread, NULL, evsel);
}
/*
* Note that browser->selection != NULL
@@ -3043,11 +3144,24 @@ skip_annotation:
nr_options += add_script_opt(browser,
&actions[nr_options],
&options[nr_options],
- NULL, browser->selection->sym);
+ NULL, browser->selection->sym,
+ evsel);
}
}
nr_options += add_script_opt(browser, &actions[nr_options],
- &options[nr_options], NULL, NULL);
+ &options[nr_options], NULL, NULL, evsel);
+ nr_options += add_res_sample_opt(browser, &actions[nr_options],
+ &options[nr_options],
+ hist_browser__selected_entry(browser)->res_samples,
+ evsel, A_NORMAL);
+ nr_options += add_res_sample_opt(browser, &actions[nr_options],
+ &options[nr_options],
+ hist_browser__selected_entry(browser)->res_samples,
+ evsel, A_ASM);
+ nr_options += add_res_sample_opt(browser, &actions[nr_options],
+ &options[nr_options],
+ hist_browser__selected_entry(browser)->res_samples,
+ evsel, A_SOURCE);
nr_options += add_switch_opt(browser, &actions[nr_options],
&options[nr_options]);
skip_scripting:
diff --git a/tools/perf/ui/browsers/map.c b/tools/perf/ui/browsers/map.c
index 5b8b8c637686..c70d9337405b 100644
--- a/tools/perf/ui/browsers/map.c
+++ b/tools/perf/ui/browsers/map.c
@@ -6,6 +6,7 @@
#include <linux/bitops.h>
#include "../../util/util.h"
#include "../../util/debug.h"
+#include "../../util/map.h"
#include "../../util/symbol.h"
#include "../browser.h"
#include "../helpline.h"
diff --git a/tools/perf/ui/browsers/res_sample.c b/tools/perf/ui/browsers/res_sample.c
new file mode 100644
index 000000000000..c0dd73176d42
--- /dev/null
+++ b/tools/perf/ui/browsers/res_sample.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Display a menu with individual samples to browse with perf script */
+#include "util.h"
+#include "hist.h"
+#include "evsel.h"
+#include "hists.h"
+#include "sort.h"
+#include "config.h"
+#include "time-utils.h"
+#include <linux/time64.h>
+
+static u64 context_len = 10 * NSEC_PER_MSEC;
+
+static int res_sample_config(const char *var, const char *value, void *data __maybe_unused)
+{
+ if (!strcmp(var, "samples.context"))
+ return perf_config_u64(&context_len, var, value);
+ return 0;
+}
+
+void res_sample_init(void)
+{
+ perf_config(res_sample_config, NULL);
+}
+
+int res_sample_browse(struct res_sample *res_samples, int num_res,
+ struct perf_evsel *evsel, enum rstype rstype)
+{
+ char **names;
+ int i, n;
+ int choice;
+ char *cmd;
+ char pbuf[256], tidbuf[32], cpubuf[32];
+ const char *perf = perf_exe(pbuf, sizeof pbuf);
+ char trange[128], tsample[64];
+ struct res_sample *r;
+ char extra_format[256];
+
+ names = calloc(num_res, sizeof(char *));
+ if (!names)
+ return -1;
+ for (i = 0; i < num_res; i++) {
+ char tbuf[64];
+
+ timestamp__scnprintf_nsec(res_samples[i].time, tbuf, sizeof tbuf);
+ if (asprintf(&names[i], "%s: CPU %d tid %d", tbuf,
+ res_samples[i].cpu, res_samples[i].tid) < 0) {
+ while (--i >= 0)
+ free(names[i]);
+ free(names);
+ return -1;
+ }
+ }
+ choice = ui__popup_menu(num_res, names);
+ for (i = 0; i < num_res; i++)
+ free(names[i]);
+ free(names);
+
+ if (choice < 0 || choice >= num_res)
+ return -1;
+ r = &res_samples[choice];
+
+ n = timestamp__scnprintf_nsec(r->time - context_len, trange, sizeof trange);
+ trange[n++] = ',';
+ timestamp__scnprintf_nsec(r->time + context_len, trange + n, sizeof trange - n);
+
+ timestamp__scnprintf_nsec(r->time, tsample, sizeof tsample);
+
+ attr_to_script(extra_format, &evsel->attr);
+
+ if (asprintf(&cmd, "%s script %s%s --time %s %s%s %s%s --ns %s %s %s %s %s | less +/%s",
+ perf,
+ input_name ? "-i " : "",
+ input_name ? input_name : "",
+ trange,
+ r->cpu >= 0 ? "--cpu " : "",
+ r->cpu >= 0 ? (sprintf(cpubuf, "%d", r->cpu), cpubuf) : "",
+ r->tid ? "--tid " : "",
+ r->tid ? (sprintf(tidbuf, "%d", r->tid), tidbuf) : "",
+ extra_format,
+ rstype == A_ASM ? "-F +insn --xed" :
+ rstype == A_SOURCE ? "-F +srcline,+srccode" : "",
+ symbol_conf.inline_name ? "--inline" : "",
+ "--show-lost-events ",
+ r->tid ? "--show-switch-events --show-task-events " : "",
+ tsample) < 0)
+ return -1;
+ run_script(cmd);
+ free(cmd);
+ return 0;
+}
diff --git a/tools/perf/ui/browsers/scripts.c b/tools/perf/ui/browsers/scripts.c
index 90a32ac69e76..27cf3ab88d13 100644
--- a/tools/perf/ui/browsers/scripts.c
+++ b/tools/perf/ui/browsers/scripts.c
@@ -1,34 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
-#include <elf.h>
-#include <inttypes.h>
-#include <sys/ttydefaults.h>
-#include <string.h>
#include "../../util/sort.h"
#include "../../util/util.h"
#include "../../util/hist.h"
#include "../../util/debug.h"
#include "../../util/symbol.h"
#include "../browser.h"
-#include "../helpline.h"
#include "../libslang.h"
-
-/* 2048 lines should be enough for a script output */
-#define MAX_LINES 2048
-
-/* 160 bytes for one output line */
-#define AVERAGE_LINE_LEN 160
-
-struct script_line {
- struct list_head node;
- char line[AVERAGE_LINE_LEN];
-};
-
-struct perf_script_browser {
- struct ui_browser b;
- struct list_head entries;
- const char *script_name;
- int nr_lines;
-};
+#include "config.h"
#define SCRIPT_NAMELEN 128
#define SCRIPT_MAX_NO 64
@@ -40,149 +18,169 @@ struct perf_script_browser {
*/
#define SCRIPT_FULLPATH_LEN 256
+struct script_config {
+ const char **names;
+ char **paths;
+ int index;
+ const char *perf;
+ char extra_format[256];
+};
+
+void attr_to_script(char *extra_format, struct perf_event_attr *attr)
+{
+ extra_format[0] = 0;
+ if (attr->read_format & PERF_FORMAT_GROUP)
+ strcat(extra_format, " -F +metric");
+ if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK)
+ strcat(extra_format, " -F +brstackinsn --xed");
+ if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
+ strcat(extra_format, " -F +iregs");
+ if (attr->sample_type & PERF_SAMPLE_REGS_USER)
+ strcat(extra_format, " -F +uregs");
+ if (attr->sample_type & PERF_SAMPLE_PHYS_ADDR)
+ strcat(extra_format, " -F +phys_addr");
+}
+
+static int add_script_option(const char *name, const char *opt,
+ struct script_config *c)
+{
+ c->names[c->index] = name;
+ if (asprintf(&c->paths[c->index],
+ "%s script %s -F +metric %s %s",
+ c->perf, opt, symbol_conf.inline_name ? " --inline" : "",
+ c->extra_format) < 0)
+ return -1;
+ c->index++;
+ return 0;
+}
+
+static int scripts_config(const char *var, const char *value, void *data)
+{
+ struct script_config *c = data;
+
+ if (!strstarts(var, "scripts."))
+ return -1;
+ if (c->index >= SCRIPT_MAX_NO)
+ return -1;
+ c->names[c->index] = strdup(var + 7);
+ if (!c->names[c->index])
+ return -1;
+ if (asprintf(&c->paths[c->index], "%s %s", value,
+ c->extra_format) < 0)
+ return -1;
+ c->index++;
+ return 0;
+}
+
/*
* When success, will copy the full path of the selected script
* into the buffer pointed by script_name, and return 0.
* Return -1 on failure.
*/
-static int list_scripts(char *script_name)
+static int list_scripts(char *script_name, bool *custom,
+ struct perf_evsel *evsel)
{
- char *buf, *names[SCRIPT_MAX_NO], *paths[SCRIPT_MAX_NO];
- int i, num, choice, ret = -1;
+ char *buf, *paths[SCRIPT_MAX_NO], *names[SCRIPT_MAX_NO];
+ int i, num, choice;
+ int ret = 0;
+ int max_std, custom_perf;
+ char pbuf[256];
+ const char *perf = perf_exe(pbuf, sizeof pbuf);
+ struct script_config scriptc = {
+ .names = (const char **)names,
+ .paths = paths,
+ .perf = perf
+ };
+
+ script_name[0] = 0;
/* Preset the script name to SCRIPT_NAMELEN */
buf = malloc(SCRIPT_MAX_NO * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN));
if (!buf)
- return ret;
+ return -1;
- for (i = 0; i < SCRIPT_MAX_NO; i++) {
- names[i] = buf + i * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN);
+ if (evsel)
+ attr_to_script(scriptc.extra_format, &evsel->attr);
+ add_script_option("Show individual samples", "", &scriptc);
+ add_script_option("Show individual samples with assembler", "-F +insn --xed",
+ &scriptc);
+ add_script_option("Show individual samples with source", "-F +srcline,+srccode",
+ &scriptc);
+ perf_config(scripts_config, &scriptc);
+ custom_perf = scriptc.index;
+ add_script_option("Show samples with custom perf script arguments", "", &scriptc);
+ i = scriptc.index;
+ max_std = i;
+
+ for (; i < SCRIPT_MAX_NO; i++) {
+ names[i] = buf + (i - max_std) * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN);
paths[i] = names[i] + SCRIPT_NAMELEN;
}
- num = find_scripts(names, paths);
- if (num > 0) {
- choice = ui__popup_menu(num, names);
- if (choice < num && choice >= 0) {
- strcpy(script_name, paths[choice]);
- ret = 0;
- }
+ num = find_scripts(names + max_std, paths + max_std, SCRIPT_MAX_NO - max_std,
+ SCRIPT_FULLPATH_LEN);
+ if (num < 0)
+ num = 0;
+ choice = ui__popup_menu(num + max_std, (char * const *)names);
+ if (choice < 0) {
+ ret = -1;
+ goto out;
}
+ if (choice == custom_perf) {
+ char script_args[50];
+ int key = ui_browser__input_window("perf script command",
+ "Enter perf script command line (without perf script prefix)",
+ script_args, "", 0);
+ if (key != K_ENTER)
+ return -1;
+ sprintf(script_name, "%s script %s", perf, script_args);
+ } else if (choice < num + max_std) {
+ strcpy(script_name, paths[choice]);
+ }
+ *custom = choice >= max_std;
+out:
free(buf);
+ for (i = 0; i < max_std; i++)
+ free(paths[i]);
return ret;
}
-static void script_browser__write(struct ui_browser *browser,
- void *entry, int row)
+void run_script(char *cmd)
{
- struct script_line *sline = list_entry(entry, struct script_line, node);
- bool current_entry = ui_browser__is_current_entry(browser, row);
-
- ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
- HE_COLORSET_NORMAL);
-
- ui_browser__write_nstring(browser, sline->line, browser->width);
+ pr_debug("Running %s\n", cmd);
+ SLang_reset_tty();
+ if (system(cmd) < 0)
+ pr_warning("Cannot run %s\n", cmd);
+ /*
+ * SLang doesn't seem to reset the whole terminal, so be more
+ * forceful to get back to the original state.
+ */
+ printf("\033[c\033[H\033[J");
+ fflush(stdout);
+ SLang_init_tty(0, 0, 0);
+ SLsmg_refresh();
}
-static int script_browser__run(struct perf_script_browser *browser)
+int script_browse(const char *script_opt, struct perf_evsel *evsel)
{
- int key;
+ char *cmd, script_name[SCRIPT_FULLPATH_LEN];
+ bool custom = false;
- if (ui_browser__show(&browser->b, browser->script_name,
- "Press ESC to exit") < 0)
+ memset(script_name, 0, SCRIPT_FULLPATH_LEN);
+ if (list_scripts(script_name, &custom, evsel))
return -1;
- while (1) {
- key = ui_browser__run(&browser->b, 0);
-
- /* We can add some special key handling here if needed */
- break;
- }
-
- ui_browser__hide(&browser->b);
- return key;
-}
-
-
-int script_browse(const char *script_opt)
-{
- char cmd[SCRIPT_FULLPATH_LEN*2], script_name[SCRIPT_FULLPATH_LEN];
- char *line = NULL;
- size_t len = 0;
- ssize_t retlen;
- int ret = -1, nr_entries = 0;
- FILE *fp;
- void *buf;
- struct script_line *sline;
-
- struct perf_script_browser script = {
- .b = {
- .refresh = ui_browser__list_head_refresh,
- .seek = ui_browser__list_head_seek,
- .write = script_browser__write,
- },
- .script_name = script_name,
- };
-
- INIT_LIST_HEAD(&script.entries);
-
- /* Save each line of the output in one struct script_line object. */
- buf = zalloc((sizeof(*sline)) * MAX_LINES);
- if (!buf)
+ if (asprintf(&cmd, "%s%s %s %s%s 2>&1 | less",
+ custom ? "perf script -s " : "",
+ script_name,
+ script_opt ? script_opt : "",
+ input_name ? "-i " : "",
+ input_name ? input_name : "") < 0)
return -1;
- sline = buf;
-
- memset(script_name, 0, SCRIPT_FULLPATH_LEN);
- if (list_scripts(script_name))
- goto exit;
-
- sprintf(cmd, "perf script -s %s ", script_name);
- if (script_opt)
- strcat(cmd, script_opt);
+ run_script(cmd);
+ free(cmd);
- if (input_name) {
- strcat(cmd, " -i ");
- strcat(cmd, input_name);
- }
-
- strcat(cmd, " 2>&1");
-
- fp = popen(cmd, "r");
- if (!fp)
- goto exit;
-
- while ((retlen = getline(&line, &len, fp)) != -1) {
- strncpy(sline->line, line, AVERAGE_LINE_LEN);
-
- /* If one output line is very large, just cut it short */
- if (retlen >= AVERAGE_LINE_LEN) {
- sline->line[AVERAGE_LINE_LEN - 1] = '\0';
- sline->line[AVERAGE_LINE_LEN - 2] = '\n';
- }
- list_add_tail(&sline->node, &script.entries);
-
- if (script.b.width < retlen)
- script.b.width = retlen;
-
- if (nr_entries++ >= MAX_LINES - 1)
- break;
- sline++;
- }
-
- if (script.b.width > AVERAGE_LINE_LEN)
- script.b.width = AVERAGE_LINE_LEN;
-
- free(line);
- pclose(fp);
-
- script.nr_lines = nr_entries;
- script.b.nr_entries = nr_entries;
- script.b.entries = &script.entries;
-
- ret = script_browser__run(&script);
-exit:
- free(buf);
- return ret;
+ return 0;
}
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index 48428c9acd89..df49c9ba1785 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
#include "gtk.h"
+#include "util/sort.h"
#include "util/debug.h"
#include "util/annotate.h"
#include "util/evsel.h"
+#include "util/map.h"
+#include "util/symbol.h"
#include "ui/helpline.h"
#include <inttypes.h>
#include <signal.h>
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index 4ab663ec3e5e..0c08890f006a 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "../evlist.h"
#include "../cache.h"
+#include "../callchain.h"
#include "../evsel.h"
#include "../sort.h"
#include "../hist.h"
@@ -353,7 +354,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
g_object_unref(GTK_TREE_MODEL(store));
- for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
GtkTreeIter iter;
u64 total = hists__total_period(h->hists);
@@ -401,7 +402,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
}
static void perf_gtk__add_hierarchy_entries(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
GtkTreeStore *store,
GtkTreeIter *parent,
struct perf_hpp *hpp,
@@ -415,7 +416,7 @@ static void perf_gtk__add_hierarchy_entries(struct hists *hists,
u64 total = hists__total_period(hists);
int size;
- for (node = rb_first(root); node; node = rb_next(node)) {
+ for (node = rb_first_cached(root); node; node = rb_next(node)) {
GtkTreeIter iter;
float percent;
char *bf;
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index fe3dfaa64a91..412d6f1626e3 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -3,6 +3,7 @@
#include <math.h>
#include <linux/compiler.h>
+#include "../util/callchain.h"
#include "../util/hist.h"
#include "../util/util.h"
#include "../util/sort.h"
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 74c4ae1f0a05..a60f2993d390 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -2,8 +2,12 @@
#include <stdio.h>
#include <linux/string.h>
+#include "../../util/callchain.h"
#include "../../util/util.h"
#include "../../util/hist.h"
+#include "../../util/map.h"
+#include "../../util/map_groups.h"
+#include "../../util/symbol.h"
#include "../../util/sort.h"
#include "../../util/evsel.h"
#include "../../util/srcline.h"
@@ -788,7 +792,8 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
indent = hists__overhead_width(hists) + 4;
- for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
+ for (nd = rb_first_cached(&hists->entries); nd;
+ nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
float percent;
diff --git a/tools/perf/ui/tui/Build b/tools/perf/ui/tui/Build
index 9e4c6ca41a9f..f916df33a1a7 100644
--- a/tools/perf/ui/tui/Build
+++ b/tools/perf/ui/tui/Build
@@ -1,4 +1,4 @@
-libperf-y += setup.o
-libperf-y += util.o
-libperf-y += helpline.o
-libperf-y += progress.o
+perf-y += setup.o
+perf-y += util.o
+perf-y += helpline.o
+perf-y += progress.o
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index af72be7f5b3b..8dd3102301ea 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -1,158 +1,164 @@
-libperf-y += annotate.o
-libperf-y += block-range.o
-libperf-y += build-id.o
-libperf-y += config.o
-libperf-y += ctype.o
-libperf-y += db-export.o
-libperf-y += env.o
-libperf-y += event.o
-libperf-y += evlist.o
-libperf-y += evsel.o
-libperf-y += evsel_fprintf.o
-libperf-y += find_bit.o
-libperf-y += get_current_dir_name.o
-libperf-y += kallsyms.o
-libperf-y += levenshtein.o
-libperf-y += llvm-utils.o
-libperf-y += mmap.o
-libperf-y += memswap.o
-libperf-y += parse-events.o
-libperf-y += perf_regs.o
-libperf-y += path.o
-libperf-y += print_binary.o
-libperf-y += rbtree.o
-libperf-y += libstring.o
-libperf-y += bitmap.o
-libperf-y += hweight.o
-libperf-y += smt.o
-libperf-y += strbuf.o
-libperf-y += string.o
-libperf-y += strlist.o
-libperf-y += strfilter.o
-libperf-y += top.o
-libperf-y += usage.o
-libperf-y += dso.o
-libperf-y += symbol.o
-libperf-y += symbol_fprintf.o
-libperf-y += color.o
-libperf-y += metricgroup.o
-libperf-y += header.o
-libperf-y += callchain.o
-libperf-y += values.o
-libperf-y += debug.o
-libperf-y += machine.o
-libperf-y += map.o
-libperf-y += pstack.o
-libperf-y += session.o
-libperf-$(CONFIG_TRACE) += syscalltbl.o
-libperf-y += ordered-events.o
-libperf-y += namespaces.o
-libperf-y += comm.o
-libperf-y += thread.o
-libperf-y += thread_map.o
-libperf-y += trace-event-parse.o
-libperf-y += parse-events-flex.o
-libperf-y += parse-events-bison.o
-libperf-y += pmu.o
-libperf-y += pmu-flex.o
-libperf-y += pmu-bison.o
-libperf-y += trace-event-read.o
-libperf-y += trace-event-info.o
-libperf-y += trace-event-scripting.o
-libperf-y += trace-event.o
-libperf-y += svghelper.o
-libperf-y += sort.o
-libperf-y += hist.o
-libperf-y += util.o
-libperf-y += xyarray.o
-libperf-y += cpumap.o
-libperf-y += cgroup.o
-libperf-y += target.o
-libperf-y += rblist.o
-libperf-y += intlist.o
-libperf-y += vdso.o
-libperf-y += counts.o
-libperf-y += stat.o
-libperf-y += stat-shadow.o
-libperf-y += stat-display.o
-libperf-y += record.o
-libperf-y += srcline.o
-libperf-y += srccode.o
-libperf-y += data.o
-libperf-y += tsc.o
-libperf-y += cloexec.o
-libperf-y += call-path.o
-libperf-y += rwsem.o
-libperf-y += thread-stack.o
-libperf-$(CONFIG_AUXTRACE) += auxtrace.o
-libperf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
-libperf-$(CONFIG_AUXTRACE) += intel-pt.o
-libperf-$(CONFIG_AUXTRACE) += intel-bts.o
-libperf-$(CONFIG_AUXTRACE) += arm-spe.o
-libperf-$(CONFIG_AUXTRACE) += arm-spe-pkt-decoder.o
-libperf-$(CONFIG_AUXTRACE) += s390-cpumsf.o
+perf-y += annotate.o
+perf-y += block-range.o
+perf-y += build-id.o
+perf-y += config.o
+perf-y += ctype.o
+perf-y += db-export.o
+perf-y += env.o
+perf-y += event.o
+perf-y += evlist.o
+perf-y += evsel.o
+perf-y += evsel_fprintf.o
+perf-y += find_bit.o
+perf-y += get_current_dir_name.o
+perf-y += kallsyms.o
+perf-y += levenshtein.o
+perf-y += llvm-utils.o
+perf-y += mmap.o
+perf-y += memswap.o
+perf-y += parse-events.o
+perf-y += perf_regs.o
+perf-y += path.o
+perf-y += print_binary.o
+perf-y += rbtree.o
+perf-y += libstring.o
+perf-y += bitmap.o
+perf-y += hweight.o
+perf-y += smt.o
+perf-y += strbuf.o
+perf-y += string.o
+perf-y += strlist.o
+perf-y += strfilter.o
+perf-y += top.o
+perf-y += usage.o
+perf-y += dso.o
+perf-y += symbol.o
+perf-y += symbol_fprintf.o
+perf-y += color.o
+perf-y += color_config.o
+perf-y += metricgroup.o
+perf-y += header.o
+perf-y += callchain.o
+perf-y += values.o
+perf-y += debug.o
+perf-y += machine.o
+perf-y += map.o
+perf-y += pstack.o
+perf-y += session.o
+perf-y += sample-raw.o
+perf-y += s390-sample-raw.o
+perf-$(CONFIG_TRACE) += syscalltbl.o
+perf-y += ordered-events.o
+perf-y += namespaces.o
+perf-y += comm.o
+perf-y += thread.o
+perf-y += thread_map.o
+perf-y += trace-event-parse.o
+perf-y += parse-events-flex.o
+perf-y += parse-events-bison.o
+perf-y += pmu.o
+perf-y += pmu-flex.o
+perf-y += pmu-bison.o
+perf-y += trace-event-read.o
+perf-y += trace-event-info.o
+perf-y += trace-event-scripting.o
+perf-y += trace-event.o
+perf-y += svghelper.o
+perf-y += sort.o
+perf-y += hist.o
+perf-y += util.o
+perf-y += xyarray.o
+perf-y += cpumap.o
+perf-y += cputopo.o
+perf-y += cgroup.o
+perf-y += target.o
+perf-y += rblist.o
+perf-y += intlist.o
+perf-y += vdso.o
+perf-y += counts.o
+perf-y += stat.o
+perf-y += stat-shadow.o
+perf-y += stat-display.o
+perf-y += record.o
+perf-y += srcline.o
+perf-y += srccode.o
+perf-y += data.o
+perf-y += tsc.o
+perf-y += cloexec.o
+perf-y += call-path.o
+perf-y += rwsem.o
+perf-y += thread-stack.o
+perf-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
+perf-$(CONFIG_AUXTRACE) += intel-pt.o
+perf-$(CONFIG_AUXTRACE) += intel-bts.o
+perf-$(CONFIG_AUXTRACE) += arm-spe.o
+perf-$(CONFIG_AUXTRACE) += arm-spe-pkt-decoder.o
+perf-$(CONFIG_AUXTRACE) += s390-cpumsf.o
ifdef CONFIG_LIBOPENCSD
-libperf-$(CONFIG_AUXTRACE) += cs-etm.o
-libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder/
+perf-$(CONFIG_AUXTRACE) += cs-etm.o
+perf-$(CONFIG_AUXTRACE) += cs-etm-decoder/
endif
-libperf-y += parse-branch-options.o
-libperf-y += dump-insn.o
-libperf-y += parse-regs-options.o
-libperf-y += term.o
-libperf-y += help-unknown-cmd.o
-libperf-y += mem-events.o
-libperf-y += vsprintf.o
-libperf-y += drv_configs.o
-libperf-y += units.o
-libperf-y += time-utils.o
-libperf-y += expr-bison.o
-libperf-y += branch.o
-libperf-y += mem2node.o
-
-libperf-$(CONFIG_LIBBPF) += bpf-loader.o
-libperf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
-libperf-$(CONFIG_LIBELF) += symbol-elf.o
-libperf-$(CONFIG_LIBELF) += probe-file.o
-libperf-$(CONFIG_LIBELF) += probe-event.o
+perf-y += parse-branch-options.o
+perf-y += dump-insn.o
+perf-y += parse-regs-options.o
+perf-y += term.o
+perf-y += help-unknown-cmd.o
+perf-y += mem-events.o
+perf-y += vsprintf.o
+perf-y += units.o
+perf-y += time-utils.o
+perf-y += expr-bison.o
+perf-y += branch.o
+perf-y += mem2node.o
+
+perf-$(CONFIG_LIBBPF) += bpf-loader.o
+perf-$(CONFIG_LIBBPF) += bpf_map.o
+perf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
+perf-$(CONFIG_LIBELF) += symbol-elf.o
+perf-$(CONFIG_LIBELF) += probe-file.o
+perf-$(CONFIG_LIBELF) += probe-event.o
ifndef CONFIG_LIBELF
-libperf-y += symbol-minimal.o
+perf-y += symbol-minimal.o
endif
ifndef CONFIG_SETNS
-libperf-y += setns.o
+perf-y += setns.o
endif
-libperf-$(CONFIG_DWARF) += probe-finder.o
-libperf-$(CONFIG_DWARF) += dwarf-aux.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += probe-finder.o
+perf-$(CONFIG_DWARF) += dwarf-aux.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind-local.o
-libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBUNWIND_X86) += libunwind/x86_32.o
-libperf-$(CONFIG_LIBUNWIND_AARCH64) += libunwind/arm64.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind-local.o
+perf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBUNWIND_X86) += libunwind/x86_32.o
+perf-$(CONFIG_LIBUNWIND_AARCH64) += libunwind/arm64.o
-libperf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o
+perf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o
-libperf-y += scripting-engines/
+perf-y += scripting-engines/
-libperf-$(CONFIG_ZLIB) += zlib.o
-libperf-$(CONFIG_LZMA) += lzma.o
-libperf-y += demangle-java.o
-libperf-y += demangle-rust.o
+perf-$(CONFIG_ZLIB) += zlib.o
+perf-$(CONFIG_LZMA) += lzma.o
+perf-y += demangle-java.o
+perf-y += demangle-rust.o
ifdef CONFIG_JITDUMP
-libperf-$(CONFIG_LIBELF) += jitdump.o
-libperf-$(CONFIG_LIBELF) += genelf.o
-libperf-$(CONFIG_DWARF) += genelf_debug.o
+perf-$(CONFIG_LIBELF) += jitdump.o
+perf-$(CONFIG_LIBELF) += genelf.o
+perf-$(CONFIG_DWARF) += genelf_debug.o
endif
-libperf-y += perf-hooks.o
+perf-y += perf-hooks.o
-libperf-$(CONFIG_CXX) += c++/
+perf-$(CONFIG_LIBBPF) += bpf-event.o
+
+perf-$(CONFIG_CXX) += c++/
CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_llvm-utils.o += -DPERF_INCLUDE_DIR="BUILD_STR($(perf_include_dir_SQ))"
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 70de8f6b3aee..c8b01176c9e1 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -9,6 +9,11 @@
#include <errno.h>
#include <inttypes.h>
+#include <libgen.h>
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
+#include <linux/btf.h>
#include "util.h"
#include "ui/ui.h"
#include "sort.h"
@@ -16,12 +21,14 @@
#include "color.h"
#include "config.h"
#include "cache.h"
+#include "map.h"
#include "symbol.h"
#include "units.h"
#include "debug.h"
#include "annotate.h"
#include "evsel.h"
#include "evlist.h"
+#include "bpf-event.h"
#include "block-range.h"
#include "string2.h"
#include "arch/common.h"
@@ -29,6 +36,7 @@
#include <pthread.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
+#include <bpf/libbpf.h>
/* FIXME: For the HE_COLORSET */
#include "ui/browser.h"
@@ -196,18 +204,18 @@ static void ins__delete(struct ins_operands *ops)
}
static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
- return scnprintf(bf, size, "%-6s %s", ins->name, ops->raw);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->raw);
}
int ins__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
if (ins->ops->scnprintf)
- return ins->ops->scnprintf(ins, bf, size, ops);
+ return ins->ops->scnprintf(ins, bf, size, ops, max_ins_name);
- return ins__raw_scnprintf(ins, bf, size, ops);
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
}
bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2)
@@ -271,18 +279,18 @@ indirect_call:
}
static int call__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
if (ops->target.sym)
- return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.sym->name);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name);
if (ops->target.addr == 0)
- return ins__raw_scnprintf(ins, bf, size, ops);
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
if (ops->target.name)
- return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.name);
- return scnprintf(bf, size, "%-6s *%" PRIx64, ins->name, ops->target.addr);
+ return scnprintf(bf, size, "%-*s *%" PRIx64, max_ins_name, ins->name, ops->target.addr);
}
static struct ins_ops call_ops = {
@@ -386,15 +394,15 @@ static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_s
}
static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
const char *c;
if (!ops->target.addr || ops->target.offset < 0)
- return ins__raw_scnprintf(ins, bf, size, ops);
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
if (ops->target.outside && ops->target.sym != NULL)
- return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.sym->name);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name);
c = strchr(ops->raw, ',');
c = validate_comma(c, ops);
@@ -413,7 +421,7 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
c++;
}
- return scnprintf(bf, size, "%-6s %.*s%" PRIx64,
+ return scnprintf(bf, size, "%-*s %.*s%" PRIx64, max_ins_name,
ins->name, c ? c - ops->raw : 0, ops->raw,
ops->target.offset);
}
@@ -481,16 +489,16 @@ out_free_ops:
}
static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
int printed;
if (ops->locked.ins.ops == NULL)
- return ins__raw_scnprintf(ins, bf, size, ops);
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
- printed = scnprintf(bf, size, "%-6s ", ins->name);
+ printed = scnprintf(bf, size, "%-*s ", max_ins_name, ins->name);
return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
- size - printed, ops->locked.ops);
+ size - printed, ops->locked.ops, max_ins_name);
}
static void lock__delete(struct ins_operands *ops)
@@ -562,9 +570,9 @@ out_free_source:
}
static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
- return scnprintf(bf, size, "%-6s %s,%s", ins->name,
+ return scnprintf(bf, size, "%-*s %s,%s", max_ins_name, ins->name,
ops->source.name ?: ops->source.raw,
ops->target.name ?: ops->target.raw);
}
@@ -602,9 +610,9 @@ static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops
}
static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
- return scnprintf(bf, size, "%-6s %s", ins->name,
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name,
ops->target.name ?: ops->target.raw);
}
@@ -614,9 +622,9 @@ static struct ins_ops dec_ops = {
};
static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
- struct ins_operands *ops __maybe_unused)
+ struct ins_operands *ops __maybe_unused, int max_ins_name)
{
- return scnprintf(bf, size, "%-6s", "nop");
+ return scnprintf(bf, size, "%-*s", max_ins_name, "nop");
}
static struct ins_ops nop_ops = {
@@ -1230,12 +1238,12 @@ void disasm_line__free(struct disasm_line *dl)
annotation_line__delete(&dl->al);
}
-int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
+int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name)
{
if (raw || !dl->ins.ops)
- return scnprintf(bf, size, "%-6s %s", dl->ins.name, dl->ops.raw);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, dl->ins.name, dl->ops.raw);
- return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
+ return ins__scnprintf(&dl->ins, bf, size, &dl->ops, max_ins_name);
}
static void annotation_line__add(struct annotation_line *al, struct list_head *head)
@@ -1613,6 +1621,9 @@ int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *
" --vmlinux vmlinux\n", build_id_msg ?: "");
}
break;
+ case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF:
+ scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation");
+ break;
default:
scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
break;
@@ -1672,6 +1683,156 @@ fallback:
return 0;
}
+#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+#define PACKAGE "perf"
+#include <bfd.h>
+#include <dis-asm.h>
+
+static int symbol__disassemble_bpf(struct symbol *sym,
+ struct annotate_args *args)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct annotation_options *opts = args->options;
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_linfo *prog_linfo = NULL;
+ struct bpf_prog_info_node *info_node;
+ int len = sym->end - sym->start;
+ disassembler_ftype disassemble;
+ struct map *map = args->ms.map;
+ struct disassemble_info info;
+ struct dso *dso = map->dso;
+ int pc = 0, count, sub_id;
+ struct btf *btf = NULL;
+ char tpath[PATH_MAX];
+ size_t buf_size;
+ int nr_skip = 0;
+ int ret = -1;
+ char *buf;
+ bfd *bfdf;
+ FILE *s;
+
+ if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO)
+ return -1;
+
+ pr_debug("%s: handling sym %s addr %lx len %lx\n", __func__,
+ sym->name, sym->start, sym->end - sym->start);
+
+ memset(tpath, 0, sizeof(tpath));
+ perf_exe(tpath, sizeof(tpath));
+
+ bfdf = bfd_openr(tpath, NULL);
+ assert(bfdf);
+ assert(bfd_check_format(bfdf, bfd_object));
+
+ s = open_memstream(&buf, &buf_size);
+ if (!s)
+ goto out;
+ init_disassemble_info(&info, s,
+ (fprintf_ftype) fprintf);
+
+ info.arch = bfd_get_arch(bfdf);
+ info.mach = bfd_get_mach(bfdf);
+
+ info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
+ dso->bpf_prog.id);
+ if (!info_node)
+ goto out;
+ info_linear = info_node->info_linear;
+ sub_id = dso->bpf_prog.sub_id;
+
+ info.buffer = (void *)(info_linear->info.jited_prog_insns);
+ info.buffer_length = info_linear->info.jited_prog_len;
+
+ if (info_linear->info.nr_line_info)
+ prog_linfo = bpf_prog_linfo__new(&info_linear->info);
+
+ if (info_linear->info.btf_id) {
+ struct btf_node *node;
+
+ node = perf_env__find_btf(dso->bpf_prog.env,
+ info_linear->info.btf_id);
+ if (node)
+ btf = btf__new((__u8 *)(node->data),
+ node->data_size);
+ }
+
+ disassemble_init_for_target(&info);
+
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+ disassemble = disassembler(info.arch,
+ bfd_big_endian(bfdf),
+ info.mach,
+ bfdf);
+#else
+ disassemble = disassembler(bfdf);
+#endif
+ assert(disassemble);
+
+ fflush(s);
+ do {
+ const struct bpf_line_info *linfo = NULL;
+ struct disasm_line *dl;
+ size_t prev_buf_size;
+ const char *srcline;
+ u64 addr;
+
+ addr = pc + ((u64 *)(info_linear->info.jited_ksyms))[sub_id];
+ count = disassemble(pc, &info);
+
+ if (prog_linfo)
+ linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
+ addr, sub_id,
+ nr_skip);
+
+ if (linfo && btf) {
+ srcline = btf__name_by_offset(btf, linfo->line_off);
+ nr_skip++;
+ } else
+ srcline = NULL;
+
+ fprintf(s, "\n");
+ prev_buf_size = buf_size;
+ fflush(s);
+
+ if (!opts->hide_src_code && srcline) {
+ args->offset = -1;
+ args->line = strdup(srcline);
+ args->line_nr = 0;
+ args->ms.sym = sym;
+ dl = disasm_line__new(args);
+ if (dl) {
+ annotation_line__add(&dl->al,
+ &notes->src->source);
+ }
+ }
+
+ args->offset = pc;
+ args->line = buf + prev_buf_size;
+ args->line_nr = 0;
+ args->ms.sym = sym;
+ dl = disasm_line__new(args);
+ if (dl)
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ pc += count;
+ } while (count > 0 && pc < len);
+
+ ret = 0;
+out:
+ free(prog_linfo);
+ free(btf);
+ fclose(s);
+ bfd_close(bfdf);
+ return ret;
+}
+#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused,
+ struct annotate_args *args __maybe_unused)
+{
+ return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF;
+}
+#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+
static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
{
struct annotation_options *opts = args->options;
@@ -1699,7 +1860,9 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
pr_debug("annotating [%p] %30s : [%p] %30s\n",
dso, dso->long_name, sym, sym->name);
- if (dso__is_kcore(dso)) {
+ if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) {
+ return symbol__disassemble_bpf(sym, args);
+ } else if (dso__is_kcore(dso)) {
kce.kcore_filename = symfs_filename;
kce.addr = map__rip_2objdump(map, sym->start);
kce.offs = sym->start;
@@ -1889,6 +2052,7 @@ int symbol__annotate(struct symbol *sym, struct map *map,
struct annotation_options *options,
struct arch **parch)
{
+ struct annotation *notes = symbol__annotation(sym);
struct annotate_args args = {
.privsize = privsize,
.evsel = evsel,
@@ -1919,6 +2083,7 @@ int symbol__annotate(struct symbol *sym, struct map *map,
args.ms.map = map;
args.ms.sym = sym;
+ notes->start = map__rip_2objdump(map, sym->start);
return symbol__disassemble(sym, &args);
}
@@ -2410,12 +2575,30 @@ static inline int width_jumps(int n)
return 1;
}
+static int annotation__max_ins_name(struct annotation *notes)
+{
+ int max_name = 0, len;
+ struct annotation_line *al;
+
+ list_for_each_entry(al, &notes->src->source, node) {
+ if (al->offset == -1)
+ continue;
+
+ len = strlen(disasm_line(al)->ins.name);
+ if (max_name < len)
+ max_name = len;
+ }
+
+ return max_name;
+}
+
void annotation__init_column_widths(struct annotation *notes, struct symbol *sym)
{
notes->widths.addr = notes->widths.target =
notes->widths.min_addr = hex_width(symbol__size(sym));
notes->widths.max_addr = hex_width(sym->end);
notes->widths.jumps = width_jumps(notes->max_jump_sources);
+ notes->widths.max_ins_name = annotation__max_ins_name(notes);
}
void annotation__update_column_widths(struct annotation *notes)
@@ -2579,7 +2762,7 @@ call_like:
obj__printf(obj, " ");
}
- disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset);
+ disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset, notes->widths.max_ins_name);
}
static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
@@ -2794,8 +2977,6 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct perf_evsel *ev
symbol__calc_percent(sym, evsel);
- notes->start = map__rip_2objdump(map, sym->start);
-
annotation__set_offsets(notes, size);
annotation__mark_jump_targets(notes, sym);
annotation__compute_ipc(notes, size);
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index fb6463730ba4..5bc0cf655d37 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -4,16 +4,24 @@
#include <stdbool.h>
#include <stdint.h>
+#include <stdio.h>
#include <linux/types.h>
-#include "symbol.h"
-#include "hist.h"
-#include "sort.h"
#include <linux/list.h>
#include <linux/rbtree.h>
#include <pthread.h>
#include <asm/bug.h>
+#include "symbol_conf.h"
+struct hist_browser_timer;
+struct hist_entry;
struct ins_ops;
+struct map;
+struct map_symbol;
+struct addr_map_symbol;
+struct option;
+struct perf_sample;
+struct perf_evsel;
+struct symbol;
struct ins {
const char *name;
@@ -51,14 +59,14 @@ struct ins_ops {
void (*free)(struct ins_operands *ops);
int (*parse)(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms);
int (*scnprintf)(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops);
+ struct ins_operands *ops, int max_ins_name);
};
bool ins__is_jump(const struct ins *ins);
bool ins__is_call(const struct ins *ins);
bool ins__is_ret(const struct ins *ins);
bool ins__is_lock(const struct ins *ins);
-int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops);
+int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops, int max_ins_name);
bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2);
#define ANNOTATION__IPC_WIDTH 6
@@ -211,7 +219,7 @@ int __annotation__scnprintf_samples_period(struct annotation *notes,
struct perf_evsel *evsel,
bool show_freq);
-int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw);
+int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name);
size_t disasm__fprintf(struct list_head *head, FILE *fp);
void symbol__calc_percent(struct symbol *sym, struct perf_evsel *evsel);
@@ -281,6 +289,7 @@ struct annotation {
u8 target;
u8 min_addr;
u8 max_addr;
+ u8 max_ins_name;
} widths;
bool have_cycles;
struct annotated_source *src;
@@ -360,6 +369,7 @@ enum symbol_disassemble_errno {
__SYMBOL_ANNOTATE_ERRNO__START = -10000,
SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX = __SYMBOL_ANNOTATE_ERRNO__START,
+ SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF,
__SYMBOL_ANNOTATE_ERRNO__END,
};
diff --git a/tools/perf/util/archinsn.h b/tools/perf/util/archinsn.h
new file mode 100644
index 000000000000..448cbb6b8d7e
--- /dev/null
+++ b/tools/perf/util/archinsn.h
@@ -0,0 +1,12 @@
+#ifndef INSN_H
+#define INSN_H 1
+
+struct perf_sample;
+struct machine;
+struct thread;
+
+void arch_fetch_insn(struct perf_sample *sample,
+ struct thread *thread,
+ struct machine *machine);
+
+#endif
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index f69961c4a4f3..fb76b6b232d4 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -27,6 +27,7 @@
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/string.h>
+#include <linux/time64.h>
#include <sys/param.h>
#include <stdlib.h>
@@ -41,6 +42,7 @@
#include "pmu.h"
#include "evsel.h"
#include "cpumap.h"
+#include "symbol.h"
#include "thread_map.h"
#include "asm/bug.h"
#include "auxtrace.h"
@@ -857,7 +859,7 @@ void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
- const char *msg)
+ const char *msg, u64 timestamp)
{
size_t size;
@@ -869,7 +871,9 @@ void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
auxtrace_error->cpu = cpu;
auxtrace_error->pid = pid;
auxtrace_error->tid = tid;
+ auxtrace_error->fmt = 1;
auxtrace_error->ip = ip;
+ auxtrace_error->time = timestamp;
strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
@@ -1159,12 +1163,27 @@ static const char *auxtrace_error_name(int type)
size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
{
struct auxtrace_error_event *e = &event->auxtrace_error;
+ unsigned long long nsecs = e->time;
+ const char *msg = e->msg;
int ret;
ret = fprintf(fp, " %s error type %u",
auxtrace_error_name(e->type), e->type);
+
+ if (e->fmt && nsecs) {
+ unsigned long secs = nsecs / NSEC_PER_SEC;
+
+ nsecs -= secs * NSEC_PER_SEC;
+ ret += fprintf(fp, " time %lu.%09llu", secs, nsecs);
+ } else {
+ ret += fprintf(fp, " time 0");
+ }
+
+ if (!e->fmt)
+ msg = (const char *)&e->time;
+
ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRIx64" code %u: %s\n",
- e->cpu, e->pid, e->tid, e->ip, e->code, e->msg);
+ e->cpu, e->pid, e->tid, e->ip, e->code, msg);
return ret;
}
@@ -1278,9 +1297,9 @@ static int __auxtrace_mmap__read(struct perf_mmap *map,
}
/* padding must be written by fn() e.g. record__process_auxtrace() */
- padding = size & 7;
+ padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
if (padding)
- padding = 8 - padding;
+ padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
memset(&ev, 0, sizeof(ev));
ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
@@ -1899,7 +1918,8 @@ static struct dso *load_dso(const char *name)
if (!map)
return NULL;
- map__load(map);
+ if (map__load(map) < 0)
+ pr_err("File '%s' not found or has no symbols.\n", name);
dso = dso__get(map->dso);
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 8e50f96d4b23..c69bcd9a3091 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -40,6 +40,9 @@ struct record_opts;
struct auxtrace_info_event;
struct events_stats;
+/* Auxtrace records must have the same alignment as perf event records */
+#define PERF_AUXTRACE_RECORD_ALIGNMENT 8
+
enum auxtrace_type {
PERF_AUXTRACE_UNKNOWN,
PERF_AUXTRACE_INTEL_PT,
@@ -516,7 +519,7 @@ void auxtrace_index__free(struct list_head *head);
void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
- const char *msg);
+ const char *msg, u64 timestamp);
int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
struct perf_tool *tool,
diff --git a/tools/perf/util/block-range.c b/tools/perf/util/block-range.c
index f1451c987eec..1be432657501 100644
--- a/tools/perf/util/block-range.c
+++ b/tools/perf/util/block-range.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include "block-range.h"
#include "annotate.h"
+#include <assert.h>
+#include <stdlib.h>
struct {
struct rb_root root;
diff --git a/tools/perf/util/block-range.h b/tools/perf/util/block-range.h
index a5ba719d69fb..ec0fb534bf56 100644
--- a/tools/perf/util/block-range.h
+++ b/tools/perf/util/block-range.h
@@ -2,7 +2,11 @@
#ifndef __PERF_BLOCK_RANGE_H
#define __PERF_BLOCK_RANGE_H
-#include "symbol.h"
+#include <stdbool.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+struct symbol;
/*
* struct block_range - non-overlapping parts of basic blocks
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
new file mode 100644
index 000000000000..2a4a0da35632
--- /dev/null
+++ b/tools/perf/util/bpf-event.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <stdlib.h>
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
+#include <linux/btf.h>
+#include <linux/err.h>
+#include "bpf-event.h"
+#include "debug.h"
+#include "symbol.h"
+#include "machine.h"
+#include "env.h"
+#include "session.h"
+#include "map.h"
+#include "evlist.h"
+
+#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
+
+static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
+{
+ int ret = 0;
+ size_t i;
+
+ for (i = 0; i < len; i++)
+ ret += snprintf(buf + ret, size - ret, "%02x", data[i]);
+ return ret;
+}
+
+static int machine__process_bpf_event_load(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_info_node *info_node;
+ struct perf_env *env = machine->env;
+ int id = event->bpf_event.id;
+ unsigned int i;
+
+ /* perf-record, no need to handle bpf-event */
+ if (env == NULL)
+ return 0;
+
+ info_node = perf_env__find_bpf_prog_info(env, id);
+ if (!info_node)
+ return 0;
+ info_linear = info_node->info_linear;
+
+ for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
+ u64 addr = addrs[i];
+ struct map *map;
+
+ map = map_groups__find(&machine->kmaps, addr);
+
+ if (map) {
+ map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
+ map->dso->bpf_prog.id = id;
+ map->dso->bpf_prog.sub_id = i;
+ map->dso->bpf_prog.env = env;
+ }
+ }
+ return 0;
+}
+
+int machine__process_bpf_event(struct machine *machine __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ if (dump_trace)
+ perf_event__fprintf_bpf_event(event, stdout);
+
+ switch (event->bpf_event.type) {
+ case PERF_BPF_EVENT_PROG_LOAD:
+ return machine__process_bpf_event_load(machine, event, sample);
+
+ case PERF_BPF_EVENT_PROG_UNLOAD:
+ /*
+ * Do not free bpf_prog_info and btf of the program here,
+ * as annotation still need them. They will be freed at
+ * the end of the session.
+ */
+ break;
+ default:
+ pr_debug("unexpected bpf_event type of %d\n",
+ event->bpf_event.type);
+ break;
+ }
+ return 0;
+}
+
+static int perf_env__fetch_btf(struct perf_env *env,
+ u32 btf_id,
+ struct btf *btf)
+{
+ struct btf_node *node;
+ u32 data_size;
+ const void *data;
+
+ data = btf__get_raw_data(btf, &data_size);
+
+ node = malloc(data_size + sizeof(struct btf_node));
+ if (!node)
+ return -1;
+
+ node->id = btf_id;
+ node->data_size = data_size;
+ memcpy(node->data, data, data_size);
+
+ perf_env__insert_btf(env, node);
+ return 0;
+}
+
+static int synthesize_bpf_prog_name(char *buf, int size,
+ struct bpf_prog_info *info,
+ struct btf *btf,
+ u32 sub_id)
+{
+ u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
+ void *func_infos = (void *)(uintptr_t)(info->func_info);
+ u32 sub_prog_cnt = info->nr_jited_ksyms;
+ const struct bpf_func_info *finfo;
+ const char *short_name = NULL;
+ const struct btf_type *t;
+ int name_len;
+
+ name_len = snprintf(buf, size, "bpf_prog_");
+ name_len += snprintf_hex(buf + name_len, size - name_len,
+ prog_tags[sub_id], BPF_TAG_SIZE);
+ if (btf) {
+ finfo = func_infos + sub_id * info->func_info_rec_size;
+ t = btf__type_by_id(btf, finfo->type_id);
+ short_name = btf__name_by_offset(btf, t->name_off);
+ } else if (sub_id == 0 && sub_prog_cnt == 1) {
+ /* no subprog */
+ if (info->name[0])
+ short_name = info->name;
+ } else
+ short_name = "F";
+ if (short_name)
+ name_len += snprintf(buf + name_len, size - name_len,
+ "_%s", short_name);
+ return name_len;
+}
+
+/*
+ * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
+ * program. One PERF_RECORD_BPF_EVENT is generated for the program. And
+ * one PERF_RECORD_KSYMBOL is generated for each sub program.
+ *
+ * Returns:
+ * 0 for success;
+ * -1 for failures;
+ * -2 for lack of kernel support.
+ */
+static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
+ perf_event__handler_t process,
+ struct machine *machine,
+ int fd,
+ union perf_event *event,
+ struct record_opts *opts)
+{
+ struct ksymbol_event *ksymbol_event = &event->ksymbol_event;
+ struct bpf_event *bpf_event = &event->bpf_event;
+ struct bpf_prog_info_linear *info_linear;
+ struct perf_tool *tool = session->tool;
+ struct bpf_prog_info_node *info_node;
+ struct bpf_prog_info *info;
+ struct btf *btf = NULL;
+ struct perf_env *env;
+ u32 sub_prog_cnt, i;
+ int err = 0;
+ u64 arrays;
+
+ /*
+ * for perf-record and perf-report use header.env;
+ * otherwise, use global perf_env.
+ */
+ env = session->data ? &session->header.env : &perf_env;
+
+ arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
+ arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
+ arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
+
+ info_linear = bpf_program__get_prog_info_linear(fd, arrays);
+ if (IS_ERR_OR_NULL(info_linear)) {
+ info_linear = NULL;
+ pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
+ return -1;
+ }
+
+ if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
+ pr_debug("%s: the kernel is too old, aborting\n", __func__);
+ return -2;
+ }
+
+ info = &info_linear->info;
+
+ /* number of ksyms, func_lengths, and tags should match */
+ sub_prog_cnt = info->nr_jited_ksyms;
+ if (sub_prog_cnt != info->nr_prog_tags ||
+ sub_prog_cnt != info->nr_jited_func_lens)
+ return -1;
+
+ /* check BTF func info support */
+ if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
+ /* btf func info number should be same as sub_prog_cnt */
+ if (sub_prog_cnt != info->nr_func_info) {
+ pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
+ err = -1;
+ goto out;
+ }
+ if (btf__get_from_id(info->btf_id, &btf)) {
+ pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
+ err = -1;
+ btf = NULL;
+ goto out;
+ }
+ perf_env__fetch_btf(env, info->btf_id, btf);
+ }
+
+ /* Synthesize PERF_RECORD_KSYMBOL */
+ for (i = 0; i < sub_prog_cnt; i++) {
+ __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
+ __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
+ int name_len;
+
+ *ksymbol_event = (struct ksymbol_event){
+ .header = {
+ .type = PERF_RECORD_KSYMBOL,
+ .size = offsetof(struct ksymbol_event, name),
+ },
+ .addr = prog_addrs[i],
+ .len = prog_lens[i],
+ .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
+ .flags = 0,
+ };
+
+ name_len = synthesize_bpf_prog_name(ksymbol_event->name,
+ KSYM_NAME_LEN, info, btf, i);
+ ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
+ sizeof(u64));
+
+ memset((void *)event + event->header.size, 0, machine->id_hdr_size);
+ event->header.size += machine->id_hdr_size;
+ err = perf_tool__process_synth_event(tool, event,
+ machine, process);
+ }
+
+ if (!opts->no_bpf_event) {
+ /* Synthesize PERF_RECORD_BPF_EVENT */
+ *bpf_event = (struct bpf_event){
+ .header = {
+ .type = PERF_RECORD_BPF_EVENT,
+ .size = sizeof(struct bpf_event),
+ },
+ .type = PERF_BPF_EVENT_PROG_LOAD,
+ .flags = 0,
+ .id = info->id,
+ };
+ memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
+ memset((void *)event + event->header.size, 0, machine->id_hdr_size);
+ event->header.size += machine->id_hdr_size;
+
+ /* save bpf_prog_info to env */
+ info_node = malloc(sizeof(struct bpf_prog_info_node));
+ if (!info_node) {
+ err = -1;
+ goto out;
+ }
+
+ info_node->info_linear = info_linear;
+ perf_env__insert_bpf_prog_info(env, info_node);
+ info_linear = NULL;
+
+ /*
+ * process after saving bpf_prog_info to env, so that
+ * required information is ready for look up
+ */
+ err = perf_tool__process_synth_event(tool, event,
+ machine, process);
+ }
+
+out:
+ free(info_linear);
+ free(btf);
+ return err ? -1 : 0;
+}
+
+int perf_event__synthesize_bpf_events(struct perf_session *session,
+ perf_event__handler_t process,
+ struct machine *machine,
+ struct record_opts *opts)
+{
+ union perf_event *event;
+ __u32 id = 0;
+ int err;
+ int fd;
+
+ event = malloc(sizeof(event->bpf_event) + KSYM_NAME_LEN + machine->id_hdr_size);
+ if (!event)
+ return -1;
+ while (true) {
+ err = bpf_prog_get_next_id(id, &id);
+ if (err) {
+ if (errno == ENOENT) {
+ err = 0;
+ break;
+ }
+ pr_debug("%s: can't get next program: %s%s\n",
+ __func__, strerror(errno),
+ errno == EINVAL ? " -- kernel too old?" : "");
+ /* don't report error on old kernel or EPERM */
+ err = (errno == EINVAL || errno == EPERM) ? 0 : -1;
+ break;
+ }
+ fd = bpf_prog_get_fd_by_id(id);
+ if (fd < 0) {
+ pr_debug("%s: failed to get fd for prog_id %u\n",
+ __func__, id);
+ continue;
+ }
+
+ err = perf_event__synthesize_one_bpf_prog(session, process,
+ machine, fd,
+ event, opts);
+ close(fd);
+ if (err) {
+ /* do not return error for old kernel */
+ if (err == -2)
+ err = 0;
+ break;
+ }
+ }
+ free(event);
+ return err;
+}
+
+static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_info_node *info_node;
+ struct btf *btf = NULL;
+ u64 arrays;
+ u32 btf_id;
+ int fd;
+
+ fd = bpf_prog_get_fd_by_id(id);
+ if (fd < 0)
+ return;
+
+ arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
+ arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
+ arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
+
+ info_linear = bpf_program__get_prog_info_linear(fd, arrays);
+ if (IS_ERR_OR_NULL(info_linear)) {
+ pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
+ goto out;
+ }
+
+ btf_id = info_linear->info.btf_id;
+
+ info_node = malloc(sizeof(struct bpf_prog_info_node));
+ if (info_node) {
+ info_node->info_linear = info_linear;
+ perf_env__insert_bpf_prog_info(env, info_node);
+ } else
+ free(info_linear);
+
+ if (btf_id == 0)
+ goto out;
+
+ if (btf__get_from_id(btf_id, &btf)) {
+ pr_debug("%s: failed to get BTF of id %u, aborting\n",
+ __func__, btf_id);
+ goto out;
+ }
+ perf_env__fetch_btf(env, btf_id, btf);
+
+out:
+ free(btf);
+ close(fd);
+}
+
+static int bpf_event__sb_cb(union perf_event *event, void *data)
+{
+ struct perf_env *env = data;
+
+ if (event->header.type != PERF_RECORD_BPF_EVENT)
+ return -1;
+
+ switch (event->bpf_event.type) {
+ case PERF_BPF_EVENT_PROG_LOAD:
+ perf_env__add_bpf_info(env, event->bpf_event.id);
+
+ case PERF_BPF_EVENT_PROG_UNLOAD:
+ /*
+ * Do not free bpf_prog_info and btf of the program here,
+ * as annotation still need them. They will be freed at
+ * the end of the session.
+ */
+ break;
+ default:
+ pr_debug("unexpected bpf_event type of %d\n",
+ event->bpf_event.type);
+ break;
+ }
+
+ return 0;
+}
+
+int bpf_event__add_sb_event(struct perf_evlist **evlist,
+ struct perf_env *env)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_DUMMY,
+ .sample_id_all = 1,
+ .watermark = 1,
+ .bpf_event = 1,
+ .size = sizeof(attr), /* to capture ABI version */
+ };
+
+ /*
+ * Older gcc versions don't support designated initializers, like above,
+ * for unnamed union members, such as the following:
+ */
+ attr.wakeup_watermark = 1;
+
+ return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
+}
+
+void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ struct perf_env *env,
+ FILE *fp)
+{
+ __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
+ __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
+ char name[KSYM_NAME_LEN];
+ struct btf *btf = NULL;
+ u32 sub_prog_cnt, i;
+
+ sub_prog_cnt = info->nr_jited_ksyms;
+ if (sub_prog_cnt != info->nr_prog_tags ||
+ sub_prog_cnt != info->nr_jited_func_lens)
+ return;
+
+ if (info->btf_id) {
+ struct btf_node *node;
+
+ node = perf_env__find_btf(env, info->btf_id);
+ if (node)
+ btf = btf__new((__u8 *)(node->data),
+ node->data_size);
+ }
+
+ if (sub_prog_cnt == 1) {
+ synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0);
+ fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n",
+ info->id, name, prog_addrs[0], prog_lens[0]);
+ return;
+ }
+
+ fprintf(fp, "# bpf_prog_info %u:\n", info->id);
+ for (i = 0; i < sub_prog_cnt; i++) {
+ synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i);
+
+ fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n",
+ i, name, prog_addrs[i], prog_lens[i]);
+ }
+}
diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
new file mode 100644
index 000000000000..04c33b3bfe28
--- /dev/null
+++ b/tools/perf/util/bpf-event.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_BPF_EVENT_H
+#define __PERF_BPF_EVENT_H
+
+#include <linux/compiler.h>
+#include <linux/rbtree.h>
+#include <pthread.h>
+#include <api/fd/array.h>
+#include "event.h"
+#include <stdio.h>
+
+struct machine;
+union perf_event;
+struct perf_env;
+struct perf_sample;
+struct record_opts;
+struct evlist;
+struct target;
+
+struct bpf_prog_info_node {
+ struct bpf_prog_info_linear *info_linear;
+ struct rb_node rb_node;
+};
+
+struct btf_node {
+ struct rb_node rb_node;
+ u32 id;
+ u32 data_size;
+ char data[];
+};
+
+#ifdef HAVE_LIBBPF_SUPPORT
+int machine__process_bpf_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample);
+
+int perf_event__synthesize_bpf_events(struct perf_session *session,
+ perf_event__handler_t process,
+ struct machine *machine,
+ struct record_opts *opts);
+int bpf_event__add_sb_event(struct perf_evlist **evlist,
+ struct perf_env *env);
+void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ struct perf_env *env,
+ FILE *fp);
+#else
+static inline int machine__process_bpf_event(struct machine *machine __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused)
+{
+ return 0;
+}
+
+static inline int perf_event__synthesize_bpf_events(struct perf_session *session __maybe_unused,
+ perf_event__handler_t process __maybe_unused,
+ struct machine *machine __maybe_unused,
+ struct record_opts *opts __maybe_unused)
+{
+ return 0;
+}
+
+static inline int bpf_event__add_sb_event(struct perf_evlist **evlist __maybe_unused,
+ struct perf_env *env __maybe_unused)
+{
+ return 0;
+}
+
+static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
+ struct perf_env *env __maybe_unused,
+ FILE *fp __maybe_unused)
+{
+
+}
+#endif // HAVE_LIBBPF_SUPPORT
+#endif
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 2f3eb6d293ee..251d9ea6252f 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -15,6 +15,7 @@
#include <errno.h>
#include "perf.h"
#include "debug.h"
+#include "evlist.h"
#include "bpf-loader.h"
#include "bpf-prologue.h"
#include "probe-event.h"
@@ -24,22 +25,12 @@
#include "llvm-utils.h"
#include "c++/clang-c.h"
-#define DEFINE_PRINT_FN(name, level) \
-static int libbpf_##name(const char *fmt, ...) \
-{ \
- va_list args; \
- int ret; \
- \
- va_start(args, fmt); \
- ret = veprintf(level, verbose, pr_fmt(fmt), args);\
- va_end(args); \
- return ret; \
+static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)),
+ const char *fmt, va_list args)
+{
+ return veprintf(1, verbose, pr_fmt(fmt), args);
}
-DEFINE_PRINT_FN(warning, 1)
-DEFINE_PRINT_FN(info, 1)
-DEFINE_PRINT_FN(debug, 1)
-
struct bpf_prog_priv {
bool is_tp;
char *sys_name;
@@ -59,9 +50,7 @@ bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
struct bpf_object *obj;
if (!libbpf_initialized) {
- libbpf_set_print(libbpf_warning,
- libbpf_info,
- libbpf_debug);
+ libbpf_set_print(libbpf_perf_print);
libbpf_initialized = true;
}
@@ -79,9 +68,7 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
struct bpf_object *obj;
if (!libbpf_initialized) {
- libbpf_set_print(libbpf_warning,
- libbpf_info,
- libbpf_debug);
+ libbpf_set_print(libbpf_perf_print);
libbpf_initialized = true;
}
@@ -1503,7 +1490,7 @@ apply_obj_config_object(struct bpf_object *obj)
struct bpf_map *map;
int err;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
err = apply_obj_config_map(map);
if (err)
return err;
@@ -1527,7 +1514,7 @@ int bpf__apply_obj_config(void)
#define bpf__for_each_map(pos, obj, objtmp) \
bpf_object__for_each_safe(obj, objtmp) \
- bpf_map__for_each(pos, obj)
+ bpf_object__for_each_map(pos, obj)
#define bpf__for_each_map_named(pos, obj, objtmp, name) \
bpf__for_each_map(pos, obj, objtmp) \
diff --git a/tools/perf/util/bpf-loader.h b/tools/perf/util/bpf-loader.h
index 62d245a90e1d..3f46856e3330 100644
--- a/tools/perf/util/bpf-loader.h
+++ b/tools/perf/util/bpf-loader.h
@@ -8,11 +8,7 @@
#include <linux/compiler.h>
#include <linux/err.h>
-#include <string.h>
#include <bpf/libbpf.h>
-#include "probe-event.h"
-#include "evlist.h"
-#include "debug.h"
enum bpf_loader_errno {
__BPF_LOADER_ERRNO__START = __LIBBPF_ERRNO__START - 100,
@@ -44,6 +40,7 @@ enum bpf_loader_errno {
};
struct perf_evsel;
+struct perf_evlist;
struct bpf_object;
struct parse_events_term;
#define PERF_BPF_PROBE_GROUP "perf_bpf_probe"
@@ -87,6 +84,8 @@ struct perf_evsel *bpf__setup_output_event(struct perf_evlist *evlist, const cha
int bpf__strerror_setup_output_event(struct perf_evlist *evlist, int err, char *buf, size_t size);
#else
#include <errno.h>
+#include <string.h>
+#include "debug.h"
static inline struct bpf_object *
bpf__prepare_load(const char *filename __maybe_unused,
diff --git a/tools/perf/util/bpf_map.c b/tools/perf/util/bpf_map.c
new file mode 100644
index 000000000000..eb853ca67cf4
--- /dev/null
+++ b/tools/perf/util/bpf_map.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+#include "util/bpf_map.h"
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+static bool bpf_map_def__is_per_cpu(const struct bpf_map_def *def)
+{
+ return def->type == BPF_MAP_TYPE_PERCPU_HASH ||
+ def->type == BPF_MAP_TYPE_PERCPU_ARRAY ||
+ def->type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
+ def->type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE;
+}
+
+static void *bpf_map_def__alloc_value(const struct bpf_map_def *def)
+{
+ if (bpf_map_def__is_per_cpu(def))
+ return malloc(round_up(def->value_size, 8) * sysconf(_SC_NPROCESSORS_CONF));
+
+ return malloc(def->value_size);
+}
+
+int bpf_map__fprintf(struct bpf_map *map, FILE *fp)
+{
+ const struct bpf_map_def *def = bpf_map__def(map);
+ void *prev_key = NULL, *key, *value;
+ int fd = bpf_map__fd(map), err;
+ int printed = 0;
+
+ if (fd < 0)
+ return fd;
+
+ if (IS_ERR(def))
+ return PTR_ERR(def);
+
+ err = -ENOMEM;
+ key = malloc(def->key_size);
+ if (key == NULL)
+ goto out;
+
+ value = bpf_map_def__alloc_value(def);
+ if (value == NULL)
+ goto out_free_key;
+
+ while ((err = bpf_map_get_next_key(fd, prev_key, key) == 0)) {
+ int intkey = *(int *)key;
+
+ if (!bpf_map_lookup_elem(fd, key, value)) {
+ bool boolval = *(bool *)value;
+ if (boolval)
+ printed += fprintf(fp, "[%d] = %d,\n", intkey, boolval);
+ } else {
+ printed += fprintf(fp, "[%d] = ERROR,\n", intkey);
+ }
+
+ prev_key = key;
+ }
+
+ if (err == ENOENT)
+ err = printed;
+
+ free(value);
+out_free_key:
+ free(key);
+out:
+ return err;
+}
diff --git a/tools/perf/util/bpf_map.h b/tools/perf/util/bpf_map.h
new file mode 100644
index 000000000000..d6abd5e47af8
--- /dev/null
+++ b/tools/perf/util/bpf_map.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+#ifndef __PERF_BPF_MAP_H
+#define __PERF_BPF_MAP_H 1
+
+#include <stdio.h>
+#include <linux/compiler.h>
+struct bpf_map;
+
+#ifdef HAVE_LIBBPF_SUPPORT
+
+int bpf_map__fprintf(struct bpf_map *map, FILE *fp);
+
+#else
+
+static inline int bpf_map__fprintf(struct bpf_map *map __maybe_unused, FILE *fp __maybe_unused)
+{
+ return 0;
+}
+
+#endif // HAVE_LIBBPF_SUPPORT
+
+#endif // __PERF_BPF_MAP_H
diff --git a/tools/perf/util/branch.h b/tools/perf/util/branch.h
index 1e3c7c5cdc63..64f96b79f1d7 100644
--- a/tools/perf/util/branch.h
+++ b/tools/perf/util/branch.h
@@ -1,8 +1,31 @@
#ifndef _PERF_BRANCH_H
#define _PERF_BRANCH_H 1
+#include <stdio.h>
#include <stdint.h>
-#include "../perf.h"
+#include <linux/perf_event.h>
+#include <linux/types.h>
+
+struct branch_flags {
+ u64 mispred:1;
+ u64 predicted:1;
+ u64 in_tx:1;
+ u64 abort:1;
+ u64 cycles:16;
+ u64 type:4;
+ u64 reserved:40;
+};
+
+struct branch_entry {
+ u64 from;
+ u64 to;
+ struct branch_flags flags;
+};
+
+struct branch_stack {
+ u64 nr;
+ struct branch_entry entries[0];
+};
struct branch_type_stat {
bool branch_to;
@@ -13,8 +36,6 @@ struct branch_type_stat {
u64 cross_2m;
};
-struct branch_flags;
-
void branch_type_count(struct branch_type_stat *st, struct branch_flags *flags,
u64 from, u64 to);
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 04b1d53e4bf9..0c5517a8d0b7 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -15,6 +15,8 @@
#include <sys/types.h>
#include "build-id.h"
#include "event.h"
+#include "namespaces.h"
+#include "map.h"
#include "symbol.h"
#include "thread.h"
#include <linux/kernel.h>
@@ -183,6 +185,7 @@ char *build_id_cache__linkname(const char *sbuild_id, char *bf, size_t size)
return bf;
}
+/* The caller is responsible to free the returned buffer. */
char *build_id_cache__origname(const char *sbuild_id)
{
char *linkname;
@@ -363,7 +366,8 @@ int perf_session__write_buildid_table(struct perf_session *session,
if (err)
return err;
- for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&session->machines.guests); nd;
+ nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
err = machine__write_buildid_table(pos, fd);
if (err)
@@ -396,7 +400,8 @@ int dsos__hit_all(struct perf_session *session)
if (err)
return err;
- for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&session->machines.guests); nd;
+ nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
err = machine__hit_all_dsos(pos);
@@ -849,7 +854,8 @@ int perf_session__cache_build_ids(struct perf_session *session)
ret = machine__cache_build_ids(&session->machines.host);
- for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&session->machines.guests); nd;
+ nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__cache_build_ids(pos);
}
@@ -866,7 +872,8 @@ bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
struct rb_node *nd;
bool ret = machine__read_build_ids(&session->machines.host, with_hits);
- for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&session->machines.guests); nd;
+ nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__read_build_ids(pos, with_hits);
}
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index f0c565164a97..93668f38f1ed 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -6,9 +6,10 @@
#define SBUILD_ID_SIZE (BUILD_ID_SIZE * 2 + 1)
#include "tool.h"
-#include "namespaces.h"
#include <linux/types.h>
+struct nsinfo;
+
extern struct perf_tool build_id__mark_dso_hit_ops;
struct dso;
struct feat_fd;
diff --git a/tools/perf/util/c++/Build b/tools/perf/util/c++/Build
index 988fef1b11d7..613ecfd76527 100644
--- a/tools/perf/util/c++/Build
+++ b/tools/perf/util/c++/Build
@@ -1,2 +1,2 @@
-libperf-$(CONFIG_CLANGLLVM) += clang.o
-libperf-$(CONFIG_CLANGLLVM) += clang-test.o
+perf-$(CONFIG_CLANGLLVM) += clang.o
+perf-$(CONFIG_CLANGLLVM) += clang-test.o
diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp
index 89512504551b..fc361c3f8570 100644
--- a/tools/perf/util/c++/clang.cpp
+++ b/tools/perf/util/c++/clang.cpp
@@ -156,11 +156,11 @@ getBPFObjectFromModule(llvm::Module *Module)
#endif
if (NotAdded) {
llvm::errs() << "TargetMachine can't emit a file of this type\n";
- return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);;
+ return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);
}
PM.run(*Module);
- return std::move(Buffer);
+ return Buffer;
}
}
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index dc2212e12184..abb608b09269 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -23,8 +23,10 @@
#include "util.h"
#include "sort.h"
#include "machine.h"
+#include "map.h"
#include "callchain.h"
#include "branch.h"
+#include "symbol.h"
#define CALLCHAIN_PARAM_DEFAULT \
.mode = CHAIN_GRAPH_ABS, \
@@ -1577,3 +1579,18 @@ int callchain_cursor__copy(struct callchain_cursor *dst,
return rc;
}
+
+/*
+ * Initialize a cursor before adding entries inside, but keep
+ * the previously allocated entries as a cache.
+ */
+void callchain_cursor_reset(struct callchain_cursor *cursor)
+{
+ struct callchain_cursor_node *node;
+
+ cursor->nr = 0;
+ cursor->last = &cursor->first;
+
+ for (node = cursor->first; node != NULL; node = node->next)
+ map__zput(node->map);
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 99d38ac019b8..80e056a3d882 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -2,14 +2,14 @@
#ifndef __PERF_CALLCHAIN_H
#define __PERF_CALLCHAIN_H
-#include "../perf.h"
#include <linux/list.h>
#include <linux/rbtree.h>
#include "event.h"
-#include "map.h"
-#include "symbol.h"
+#include "map_symbol.h"
#include "branch.h"
+struct map;
+
#define HELP_PAD "\t\t\t\t"
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace):\n\n"
@@ -188,20 +188,7 @@ int callchain_append(struct callchain_root *root,
int callchain_merge(struct callchain_cursor *cursor,
struct callchain_root *dst, struct callchain_root *src);
-/*
- * Initialize a cursor before adding entries inside, but keep
- * the previously allocated entries as a cache.
- */
-static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
-{
- struct callchain_cursor_node *node;
-
- cursor->nr = 0;
- cursor->last = &cursor->first;
-
- for (node = cursor->first; node != NULL; node = node->next)
- map__zput(node->map);
-}
+void callchain_cursor_reset(struct callchain_cursor *cursor);
int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
struct map *map, struct symbol *sym,
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
index 39e628b8938e..39b8c4ec4e2e 100644
--- a/tools/perf/util/color.c
+++ b/tools/perf/util/color.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include "cache.h"
-#include "config.h"
#include <stdlib.h>
#include <stdio.h>
#include "color.h"
@@ -10,44 +9,6 @@
int perf_use_color_default = -1;
-int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
-{
- if (value) {
- if (!strcasecmp(value, "never"))
- return 0;
- if (!strcasecmp(value, "always"))
- return 1;
- if (!strcasecmp(value, "auto"))
- goto auto_color;
- }
-
- /* Missing or explicit false to turn off colorization */
- if (!perf_config_bool(var, value))
- return 0;
-
- /* any normal truth value defaults to 'auto' */
- auto_color:
- if (stdout_is_tty < 0)
- stdout_is_tty = isatty(1);
- if (stdout_is_tty || pager_in_use()) {
- char *term = getenv("TERM");
- if (term && strcmp(term, "dumb"))
- return 1;
- }
- return 0;
-}
-
-int perf_color_default_config(const char *var, const char *value,
- void *cb __maybe_unused)
-{
- if (!strcmp(var, "color.ui")) {
- perf_use_color_default = perf_config_colorbool(var, value, -1);
- return 0;
- }
-
- return 0;
-}
-
static int __color_vsnprintf(char *bf, size_t size, const char *color,
const char *fmt, va_list args, const char *trail)
{
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
index 22777b1812ee..01f7bed21c9b 100644
--- a/tools/perf/util/color.h
+++ b/tools/perf/util/color.h
@@ -3,6 +3,7 @@
#define __PERF_COLOR_H
#include <stdio.h>
+#include <stdarg.h>
/* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */
#define COLOR_MAXLEN 24
diff --git a/tools/perf/util/color_config.c b/tools/perf/util/color_config.c
new file mode 100644
index 000000000000..817dc56e7e95
--- /dev/null
+++ b/tools/perf/util/color_config.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include "cache.h"
+#include "config.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include "color.h"
+#include <math.h>
+#include <unistd.h>
+
+int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
+{
+ if (value) {
+ if (!strcasecmp(value, "never"))
+ return 0;
+ if (!strcasecmp(value, "always"))
+ return 1;
+ if (!strcasecmp(value, "auto"))
+ goto auto_color;
+ }
+
+ /* Missing or explicit false to turn off colorization */
+ if (!perf_config_bool(var, value))
+ return 0;
+
+ /* any normal truth value defaults to 'auto' */
+ auto_color:
+ if (stdout_is_tty < 0)
+ stdout_is_tty = isatty(1);
+ if (stdout_is_tty || pager_in_use()) {
+ char *term = getenv("TERM");
+ if (term && strcmp(term, "dumb"))
+ return 1;
+ }
+ return 0;
+}
+
+int perf_color_default_config(const char *var, const char *value,
+ void *cb __maybe_unused)
+{
+ if (!strcmp(var, "color.ui")) {
+ perf_use_color_default = perf_config_colorbool(var, value, -1);
+ return 0;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c
index 31279a7bd919..1066de92af12 100644
--- a/tools/perf/util/comm.c
+++ b/tools/perf/util/comm.c
@@ -6,6 +6,7 @@
#include <stdio.h>
#include <string.h>
#include <linux/refcount.h>
+#include <linux/rbtree.h>
#include "rwsem.h"
struct comm_str {
diff --git a/tools/perf/util/comm.h b/tools/perf/util/comm.h
index 3e5c438fe85e..f35d8fbfa2dd 100644
--- a/tools/perf/util/comm.h
+++ b/tools/perf/util/comm.h
@@ -2,9 +2,9 @@
#ifndef __PERF_COMM_H
#define __PERF_COMM_H
-#include "../perf.h"
-#include <linux/rbtree.h>
#include <linux/list.h>
+#include <linux/types.h>
+#include <stdbool.h>
struct comm_str;
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 1ea8f898f1a1..7e3c1b60120c 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -13,6 +13,7 @@
#include <sys/param.h>
#include "util.h"
#include "cache.h"
+#include "callchain.h"
#include <subcmd/exec-cmd.h>
#include "util/event.h" /* proc_map_timeout */
#include "util/hist.h" /* perf_hist_config */
@@ -632,11 +633,10 @@ static int collect_config(const char *var, const char *value,
}
ret = set_value(item, value);
- return ret;
out_free:
free(key);
- return -1;
+ return ret;
}
int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
diff --git a/tools/perf/util/cpu-set-sched.h b/tools/perf/util/cpu-set-sched.h
new file mode 100644
index 000000000000..8cf4e40d322a
--- /dev/null
+++ b/tools/perf/util/cpu-set-sched.h
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: LGPL-2.1
+// Definitions taken from glibc for use with older systems, same licensing.
+#ifndef _CPU_SET_SCHED_PERF_H
+#define _CPU_SET_SCHED_PERF_H
+
+#include <features.h>
+#include <sched.h>
+
+#ifndef CPU_EQUAL
+#ifndef __CPU_EQUAL_S
+#if __GNUC_PREREQ (2, 91)
+# define __CPU_EQUAL_S(setsize, cpusetp1, cpusetp2) \
+ (__builtin_memcmp (cpusetp1, cpusetp2, setsize) == 0)
+#else
+# define __CPU_EQUAL_S(setsize, cpusetp1, cpusetp2) \
+ (__extension__ \
+ ({ const __cpu_mask *__arr1 = (cpusetp1)->__bits; \
+ const __cpu_mask *__arr2 = (cpusetp2)->__bits; \
+ size_t __imax = (setsize) / sizeof (__cpu_mask); \
+ size_t __i; \
+ for (__i = 0; __i < __imax; ++__i) \
+ if (__arr1[__i] != __arr2[__i]) \
+ break; \
+ __i == __imax; }))
+#endif
+#endif // __CPU_EQUAL_S
+
+#define CPU_EQUAL(cpusetp1, cpusetp2) \
+ __CPU_EQUAL_S (sizeof (cpu_set_t), cpusetp1, cpusetp2)
+#endif // CPU_EQUAL
+
+#ifndef CPU_OR
+#ifndef __CPU_OP_S
+#define __CPU_OP_S(setsize, destset, srcset1, srcset2, op) \
+ (__extension__ \
+ ({ cpu_set_t *__dest = (destset); \
+ const __cpu_mask *__arr1 = (srcset1)->__bits; \
+ const __cpu_mask *__arr2 = (srcset2)->__bits; \
+ size_t __imax = (setsize) / sizeof (__cpu_mask); \
+ size_t __i; \
+ for (__i = 0; __i < __imax; ++__i) \
+ ((__cpu_mask *) __dest->__bits)[__i] = __arr1[__i] op __arr2[__i]; \
+ __dest; }))
+#endif // __CPU_OP_S
+
+#define CPU_OR(destset, srcset1, srcset2) \
+ __CPU_OP_S (sizeof (cpu_set_t), destset, srcset1, srcset2, |)
+#endif // CPU_OR
+
+#endif // _CPU_SET_SCHED_PERF_H
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 1ccbd3342069..0b599229bc7e 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -134,7 +134,12 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
if (!cpu_list)
return cpu_map__read_all_cpu_map();
- if (!isdigit(*cpu_list))
+ /*
+ * must handle the case of empty cpumap to cover
+ * TOPOLOGY header for NUMA nodes with no CPU
+ * ( e.g., because of CPU hotplug)
+ */
+ if (!isdigit(*cpu_list) && *cpu_list != '\0')
goto out;
while (isdigit(*cpu_list)) {
@@ -181,8 +186,10 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
if (nr_cpus > 0)
cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
- else
+ else if (*cpu_list != '\0')
cpus = cpu_map__default_new();
+ else
+ cpus = cpu_map__dummy_new();
invalid:
free(tmp_cpus);
out:
@@ -674,7 +681,7 @@ size_t cpu_map__snprint(struct cpu_map *map, char *buf, size_t size)
#undef COMMA
- pr_debug("cpumask list: %s\n", buf);
+ pr_debug2("cpumask list: %s\n", buf);
return ret;
}
@@ -723,3 +730,13 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
buf[size - 1] = '\0';
return ptr - buf;
}
+
+const struct cpu_map *cpu_map__online(void) /* thread unsafe */
+{
+ static const struct cpu_map *online = NULL;
+
+ if (!online)
+ online = cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */
+
+ return online;
+}
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index ed8999d1a640..f00ce624b9f7 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -29,6 +29,7 @@ int cpu_map__get_core_id(int cpu);
int cpu_map__get_core(struct cpu_map *map, int idx, void *data);
int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp);
int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep);
+const struct cpu_map *cpu_map__online(void); /* thread unsafe */
struct cpu_map *cpu_map__get(struct cpu_map *map);
void cpu_map__put(struct cpu_map *map);
diff --git a/tools/perf/util/cputopo.c b/tools/perf/util/cputopo.c
new file mode 100644
index 000000000000..ece0710249d4
--- /dev/null
+++ b/tools/perf/util/cputopo.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <sys/param.h>
+#include <inttypes.h>
+#include <api/fs/fs.h>
+
+#include "cputopo.h"
+#include "cpumap.h"
+#include "util.h"
+#include "env.h"
+
+
+#define CORE_SIB_FMT \
+ "%s/devices/system/cpu/cpu%d/topology/core_siblings_list"
+#define THRD_SIB_FMT \
+ "%s/devices/system/cpu/cpu%d/topology/thread_siblings_list"
+#define NODE_ONLINE_FMT \
+ "%s/devices/system/node/online"
+#define NODE_MEMINFO_FMT \
+ "%s/devices/system/node/node%d/meminfo"
+#define NODE_CPULIST_FMT \
+ "%s/devices/system/node/node%d/cpulist"
+
+static int build_cpu_topology(struct cpu_topology *tp, int cpu)
+{
+ FILE *fp;
+ char filename[MAXPATHLEN];
+ char *buf = NULL, *p;
+ size_t len = 0;
+ ssize_t sret;
+ u32 i = 0;
+ int ret = -1;
+
+ scnprintf(filename, MAXPATHLEN, CORE_SIB_FMT,
+ sysfs__mountpoint(), cpu);
+ fp = fopen(filename, "r");
+ if (!fp)
+ goto try_threads;
+
+ sret = getline(&buf, &len, fp);
+ fclose(fp);
+ if (sret <= 0)
+ goto try_threads;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ for (i = 0; i < tp->core_sib; i++) {
+ if (!strcmp(buf, tp->core_siblings[i]))
+ break;
+ }
+ if (i == tp->core_sib) {
+ tp->core_siblings[i] = buf;
+ tp->core_sib++;
+ buf = NULL;
+ len = 0;
+ }
+ ret = 0;
+
+try_threads:
+ scnprintf(filename, MAXPATHLEN, THRD_SIB_FMT,
+ sysfs__mountpoint(), cpu);
+ fp = fopen(filename, "r");
+ if (!fp)
+ goto done;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto done;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ for (i = 0; i < tp->thread_sib; i++) {
+ if (!strcmp(buf, tp->thread_siblings[i]))
+ break;
+ }
+ if (i == tp->thread_sib) {
+ tp->thread_siblings[i] = buf;
+ tp->thread_sib++;
+ buf = NULL;
+ }
+ ret = 0;
+done:
+ if (fp)
+ fclose(fp);
+ free(buf);
+ return ret;
+}
+
+void cpu_topology__delete(struct cpu_topology *tp)
+{
+ u32 i;
+
+ if (!tp)
+ return;
+
+ for (i = 0 ; i < tp->core_sib; i++)
+ zfree(&tp->core_siblings[i]);
+
+ for (i = 0 ; i < tp->thread_sib; i++)
+ zfree(&tp->thread_siblings[i]);
+
+ free(tp);
+}
+
+struct cpu_topology *cpu_topology__new(void)
+{
+ struct cpu_topology *tp = NULL;
+ void *addr;
+ u32 nr, i;
+ size_t sz;
+ long ncpus;
+ int ret = -1;
+ struct cpu_map *map;
+
+ ncpus = cpu__max_present_cpu();
+
+ /* build online CPU map */
+ map = cpu_map__new(NULL);
+ if (map == NULL) {
+ pr_debug("failed to get system cpumap\n");
+ return NULL;
+ }
+
+ nr = (u32)(ncpus & UINT_MAX);
+
+ sz = nr * sizeof(char *);
+ addr = calloc(1, sizeof(*tp) + 2 * sz);
+ if (!addr)
+ goto out_free;
+
+ tp = addr;
+ addr += sizeof(*tp);
+ tp->core_siblings = addr;
+ addr += sz;
+ tp->thread_siblings = addr;
+
+ for (i = 0; i < nr; i++) {
+ if (!cpu_map__has(map, i))
+ continue;
+
+ ret = build_cpu_topology(tp, i);
+ if (ret < 0)
+ break;
+ }
+
+out_free:
+ cpu_map__put(map);
+ if (ret) {
+ cpu_topology__delete(tp);
+ tp = NULL;
+ }
+ return tp;
+}
+
+static int load_numa_node(struct numa_topology_node *node, int nr)
+{
+ char str[MAXPATHLEN];
+ char field[32];
+ char *buf = NULL, *p;
+ size_t len = 0;
+ int ret = -1;
+ FILE *fp;
+ u64 mem;
+
+ node->node = (u32) nr;
+
+ scnprintf(str, MAXPATHLEN, NODE_MEMINFO_FMT,
+ sysfs__mountpoint(), nr);
+ fp = fopen(str, "r");
+ if (!fp)
+ return -1;
+
+ while (getline(&buf, &len, fp) > 0) {
+ /* skip over invalid lines */
+ if (!strchr(buf, ':'))
+ continue;
+ if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
+ goto err;
+ if (!strcmp(field, "MemTotal:"))
+ node->mem_total = mem;
+ if (!strcmp(field, "MemFree:"))
+ node->mem_free = mem;
+ if (node->mem_total && node->mem_free)
+ break;
+ }
+
+ fclose(fp);
+ fp = NULL;
+
+ scnprintf(str, MAXPATHLEN, NODE_CPULIST_FMT,
+ sysfs__mountpoint(), nr);
+
+ fp = fopen(str, "r");
+ if (!fp)
+ return -1;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto err;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ node->cpus = buf;
+ fclose(fp);
+ return 0;
+
+err:
+ free(buf);
+ if (fp)
+ fclose(fp);
+ return ret;
+}
+
+struct numa_topology *numa_topology__new(void)
+{
+ struct cpu_map *node_map = NULL;
+ struct numa_topology *tp = NULL;
+ char path[MAXPATHLEN];
+ char *buf = NULL;
+ size_t len = 0;
+ u32 nr, i;
+ FILE *fp;
+ char *c;
+
+ scnprintf(path, MAXPATHLEN, NODE_ONLINE_FMT,
+ sysfs__mountpoint());
+
+ fp = fopen(path, "r");
+ if (!fp)
+ return NULL;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto out;
+
+ c = strchr(buf, '\n');
+ if (c)
+ *c = '\0';
+
+ node_map = cpu_map__new(buf);
+ if (!node_map)
+ goto out;
+
+ nr = (u32) node_map->nr;
+
+ tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr);
+ if (!tp)
+ goto out;
+
+ tp->nr = nr;
+
+ for (i = 0; i < nr; i++) {
+ if (load_numa_node(&tp->nodes[i], node_map->map[i])) {
+ numa_topology__delete(tp);
+ tp = NULL;
+ break;
+ }
+ }
+
+out:
+ free(buf);
+ fclose(fp);
+ cpu_map__put(node_map);
+ return tp;
+}
+
+void numa_topology__delete(struct numa_topology *tp)
+{
+ u32 i;
+
+ for (i = 0; i < tp->nr; i++)
+ free(tp->nodes[i].cpus);
+
+ free(tp);
+}
diff --git a/tools/perf/util/cputopo.h b/tools/perf/util/cputopo.h
new file mode 100644
index 000000000000..47a97e71acdf
--- /dev/null
+++ b/tools/perf/util/cputopo.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_CPUTOPO_H
+#define __PERF_CPUTOPO_H
+
+#include <linux/types.h>
+#include "env.h"
+
+struct cpu_topology {
+ u32 core_sib;
+ u32 thread_sib;
+ char **core_siblings;
+ char **thread_siblings;
+};
+
+struct numa_topology_node {
+ char *cpus;
+ u32 node;
+ u64 mem_total;
+ u64 mem_free;
+};
+
+struct numa_topology {
+ u32 nr;
+ struct numa_topology_node nodes[0];
+};
+
+struct cpu_topology *cpu_topology__new(void);
+void cpu_topology__delete(struct cpu_topology *tp);
+
+struct numa_topology *numa_topology__new(void);
+void numa_topology__delete(struct numa_topology *tp);
+
+#endif /* __PERF_CPUTOPO_H */
diff --git a/tools/perf/util/cs-etm-decoder/Build b/tools/perf/util/cs-etm-decoder/Build
index bc22c39c727f..216cb17a3322 100644
--- a/tools/perf/util/cs-etm-decoder/Build
+++ b/tools/perf/util/cs-etm-decoder/Build
@@ -1 +1 @@
-libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder.o
+perf-$(CONFIG_AUXTRACE) += cs-etm-decoder.o
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 8c155575c6c5..ba4c623cd8de 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -290,6 +290,12 @@ static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder)
decoder->packet_buffer[i].instr_count = 0;
decoder->packet_buffer[i].last_instr_taken_branch = false;
decoder->packet_buffer[i].last_instr_size = 0;
+ decoder->packet_buffer[i].last_instr_type = 0;
+ decoder->packet_buffer[i].last_instr_subtype = 0;
+ decoder->packet_buffer[i].last_instr_cond = 0;
+ decoder->packet_buffer[i].flags = 0;
+ decoder->packet_buffer[i].exception_number = UINT32_MAX;
+ decoder->packet_buffer[i].trace_chan_id = UINT8_MAX;
decoder->packet_buffer[i].cpu = INT_MIN;
}
}
@@ -300,14 +306,12 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
enum cs_etm_sample_type sample_type)
{
u32 et = 0;
- struct int_node *inode = NULL;
+ int cpu;
if (decoder->packet_count >= MAX_BUFFER - 1)
return OCSD_RESP_FATAL_SYS_ERR;
- /* Search the RB tree for the cpu associated with this traceID */
- inode = intlist__find(traceid_list, trace_chan_id);
- if (!inode)
+ if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
return OCSD_RESP_FATAL_SYS_ERR;
et = decoder->tail;
@@ -317,12 +321,18 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
decoder->packet_buffer[et].sample_type = sample_type;
decoder->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN;
- decoder->packet_buffer[et].cpu = *((int *)inode->priv);
+ decoder->packet_buffer[et].cpu = cpu;
decoder->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
decoder->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
decoder->packet_buffer[et].instr_count = 0;
decoder->packet_buffer[et].last_instr_taken_branch = false;
decoder->packet_buffer[et].last_instr_size = 0;
+ decoder->packet_buffer[et].last_instr_type = 0;
+ decoder->packet_buffer[et].last_instr_subtype = 0;
+ decoder->packet_buffer[et].last_instr_cond = 0;
+ decoder->packet_buffer[et].flags = 0;
+ decoder->packet_buffer[et].exception_number = UINT32_MAX;
+ decoder->packet_buffer[et].trace_chan_id = trace_chan_id;
if (decoder->packet_count == MAX_BUFFER - 1)
return OCSD_RESP_WAIT;
@@ -366,6 +376,9 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
packet->start_addr = elem->st_addr;
packet->end_addr = elem->en_addr;
packet->instr_count = elem->num_instr_range;
+ packet->last_instr_type = elem->last_i_type;
+ packet->last_instr_subtype = elem->last_i_subtype;
+ packet->last_instr_cond = elem->last_instr_cond;
switch (elem->last_i_type) {
case OCSD_INSTR_BR:
@@ -395,10 +408,20 @@ cs_etm_decoder__buffer_discontinuity(struct cs_etm_decoder *decoder,
static ocsd_datapath_resp_t
cs_etm_decoder__buffer_exception(struct cs_etm_decoder *decoder,
+ const ocsd_generic_trace_elem *elem,
const uint8_t trace_chan_id)
-{
- return cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
- CS_ETM_EXCEPTION);
+{ int ret = 0;
+ struct cs_etm_packet *packet;
+
+ ret = cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
+ CS_ETM_EXCEPTION);
+ if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
+ return ret;
+
+ packet = &decoder->packet_buffer[decoder->tail];
+ packet->exception_number = elem->exception_number;
+
+ return ret;
}
static ocsd_datapath_resp_t
@@ -432,7 +455,7 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_EXCEPTION:
- resp = cs_etm_decoder__buffer_exception(decoder,
+ resp = cs_etm_decoder__buffer_exception(decoder, elem,
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
index a6407d41598f..3ab11dfa92ae 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
@@ -15,13 +15,6 @@
struct cs_etm_decoder;
-struct cs_etm_buffer {
- const unsigned char *buf;
- size_t len;
- u64 offset;
- u64 ref_timestamp;
-};
-
enum cs_etm_sample_type {
CS_ETM_EMPTY,
CS_ETM_RANGE,
@@ -43,8 +36,14 @@ struct cs_etm_packet {
u64 start_addr;
u64 end_addr;
u32 instr_count;
+ u32 last_instr_type;
+ u32 last_instr_subtype;
+ u32 flags;
+ u32 exception_number;
+ u8 last_instr_cond;
u8 last_instr_taken_branch;
u8 last_instr_size;
+ u8 trace_chan_id;
int cpu;
};
@@ -99,9 +98,10 @@ enum {
CS_ETM_PROTO_PTM,
};
-enum {
+enum cs_etm_decoder_operation {
CS_ETM_OPERATION_PRINT = 1,
CS_ETM_OPERATION_DECODE,
+ CS_ETM_OPERATION_MAX,
};
int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 27a374ddf661..110804936fc3 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -12,6 +12,7 @@
#include <linux/log2.h>
#include <linux/types.h>
+#include <opencsd/ocsd_if_types.h>
#include <stdlib.h>
#include "auxtrace.h"
@@ -24,6 +25,7 @@
#include "machine.h"
#include "map.h"
#include "perf.h"
+#include "symbol.h"
#include "thread.h"
#include "thread_map.h"
#include "thread-stack.h"
@@ -63,13 +65,10 @@ struct cs_etm_queue {
struct thread *thread;
struct cs_etm_decoder *decoder;
struct auxtrace_buffer *buffer;
- const struct cs_etm_state *state;
union perf_event *event_buf;
unsigned int queue_nr;
pid_t pid, tid;
int cpu;
- u64 time;
- u64 timestamp;
u64 offset;
u64 period_instructions;
struct branch_stack *last_branch;
@@ -77,11 +76,13 @@ struct cs_etm_queue {
size_t last_branch_pos;
struct cs_etm_packet *prev_packet;
struct cs_etm_packet *packet;
+ const unsigned char *buf;
+ size_t buf_len, buf_used;
};
static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
- pid_t tid, u64 time_);
+ pid_t tid);
/* PTMs ETMIDR [11:8] set to b0011 */
#define ETMIDR_PTM_VERSION 0x00000300
@@ -96,6 +97,34 @@ static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
return CS_ETM_PROTO_ETMV3;
}
+static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
+{
+ struct int_node *inode;
+ u64 *metadata;
+
+ inode = intlist__find(traceid_list, trace_chan_id);
+ if (!inode)
+ return -EINVAL;
+
+ metadata = inode->priv;
+ *magic = metadata[CS_ETM_MAGIC];
+ return 0;
+}
+
+int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
+{
+ struct int_node *inode;
+ u64 *metadata;
+
+ inode = intlist__find(traceid_list, trace_chan_id);
+ if (!inode)
+ return -EINVAL;
+
+ metadata = inode->priv;
+ *cpu = (int)metadata[CS_ETM_CPU];
+ return 0;
+}
+
static void cs_etm__packet_dump(const char *pkt_string)
{
const char *color = PERF_COLOR_BLUE;
@@ -109,10 +138,83 @@ static void cs_etm__packet_dump(const char *pkt_string)
fflush(stdout);
}
+static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
+ struct cs_etm_auxtrace *etm, int idx,
+ u32 etmidr)
+{
+ u64 **metadata = etm->metadata;
+
+ t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
+ t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
+ t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
+}
+
+static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
+ struct cs_etm_auxtrace *etm, int idx)
+{
+ u64 **metadata = etm->metadata;
+
+ t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
+ t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
+ t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
+ t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
+ t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
+ t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
+ t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
+}
+
+static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
+ struct cs_etm_auxtrace *etm)
+{
+ int i;
+ u32 etmidr;
+ u64 architecture;
+
+ for (i = 0; i < etm->num_cpu; i++) {
+ architecture = etm->metadata[i][CS_ETM_MAGIC];
+
+ switch (architecture) {
+ case __perf_cs_etmv3_magic:
+ etmidr = etm->metadata[i][CS_ETM_ETMIDR];
+ cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
+ break;
+ case __perf_cs_etmv4_magic:
+ cs_etm__set_trace_param_etmv4(t_params, etm, i);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
+ struct cs_etm_queue *etmq,
+ enum cs_etm_decoder_operation mode)
+{
+ int ret = -EINVAL;
+
+ if (!(mode < CS_ETM_OPERATION_MAX))
+ goto out;
+
+ d_params->packet_printer = cs_etm__packet_dump;
+ d_params->operation = mode;
+ d_params->data = etmq;
+ d_params->formatted = true;
+ d_params->fsyncs = false;
+ d_params->hsyncs = false;
+ d_params->frame_aligned = true;
+
+ ret = 0;
+out:
+ return ret;
+}
+
static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
struct auxtrace_buffer *buffer)
{
- int i, ret;
+ int ret;
const char *color = PERF_COLOR_BLUE;
struct cs_etm_decoder_params d_params;
struct cs_etm_trace_params *t_params;
@@ -126,48 +228,22 @@ static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
/* Use metadata to fill in trace parameters for trace decoder */
t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
- for (i = 0; i < etm->num_cpu; i++) {
- if (etm->metadata[i][CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
- u32 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
-
- t_params[i].protocol =
- cs_etm__get_v7_protocol_version(etmidr);
- t_params[i].etmv3.reg_ctrl =
- etm->metadata[i][CS_ETM_ETMCR];
- t_params[i].etmv3.reg_trc_id =
- etm->metadata[i][CS_ETM_ETMTRACEIDR];
- } else if (etm->metadata[i][CS_ETM_MAGIC] ==
- __perf_cs_etmv4_magic) {
- t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
- t_params[i].etmv4.reg_idr0 =
- etm->metadata[i][CS_ETMV4_TRCIDR0];
- t_params[i].etmv4.reg_idr1 =
- etm->metadata[i][CS_ETMV4_TRCIDR1];
- t_params[i].etmv4.reg_idr2 =
- etm->metadata[i][CS_ETMV4_TRCIDR2];
- t_params[i].etmv4.reg_idr8 =
- etm->metadata[i][CS_ETMV4_TRCIDR8];
- t_params[i].etmv4.reg_configr =
- etm->metadata[i][CS_ETMV4_TRCCONFIGR];
- t_params[i].etmv4.reg_traceidr =
- etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
- }
- }
+
+ if (!t_params)
+ return;
+
+ if (cs_etm__init_trace_params(t_params, etm))
+ goto out_free;
/* Set decoder parameters to simply print the trace packets */
- d_params.packet_printer = cs_etm__packet_dump;
- d_params.operation = CS_ETM_OPERATION_PRINT;
- d_params.formatted = true;
- d_params.fsyncs = false;
- d_params.hsyncs = false;
- d_params.frame_aligned = true;
+ if (cs_etm__init_decoder_params(&d_params, NULL,
+ CS_ETM_OPERATION_PRINT))
+ goto out_free;
decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
- zfree(&t_params);
-
if (!decoder)
- return;
+ goto out_free;
do {
size_t consumed;
@@ -182,6 +258,9 @@ static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
} while (buffer_used < buffer->size);
cs_etm_decoder__free(decoder);
+
+out_free:
+ zfree(&t_params);
}
static int cs_etm__flush_events(struct perf_session *session,
@@ -205,7 +284,7 @@ static int cs_etm__flush_events(struct perf_session *session,
if (ret < 0)
return ret;
- return cs_etm__process_timeless_queues(etm, -1, MAX_TIMESTAMP - 1);
+ return cs_etm__process_timeless_queues(etm, -1);
}
static void cs_etm__free_queue(void *priv)
@@ -251,7 +330,7 @@ static void cs_etm__free(struct perf_session *session)
cs_etm__free_events(session);
session->auxtrace = NULL;
- /* First remove all traceID/CPU# nodes for the RB tree */
+ /* First remove all traceID/metadata nodes for the RB tree */
intlist__for_each_entry_safe(inode, tmp, traceid_list)
intlist__remove(traceid_list, inode);
/* Then the RB tree itself */
@@ -297,7 +376,7 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
struct addr_location al;
if (!etmq)
- return -1;
+ return 0;
machine = etmq->etm->machine;
cpumode = cs_etm__cpu_mode(etmq, address);
@@ -305,7 +384,7 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
thread = etmq->thread;
if (!thread) {
if (cpumode != PERF_RECORD_MISC_KERNEL)
- return -EINVAL;
+ return 0;
thread = etmq->etm->unknown_thread;
}
@@ -328,12 +407,10 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
return len;
}
-static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
- unsigned int queue_nr)
+static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm)
{
- int i;
struct cs_etm_decoder_params d_params;
- struct cs_etm_trace_params *t_params;
+ struct cs_etm_trace_params *t_params = NULL;
struct cs_etm_queue *etmq;
size_t szp = sizeof(struct cs_etm_packet);
@@ -368,59 +445,22 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
if (!etmq->event_buf)
goto out_free;
- etmq->etm = etm;
- etmq->queue_nr = queue_nr;
- etmq->pid = -1;
- etmq->tid = -1;
- etmq->cpu = -1;
-
/* Use metadata to fill in trace parameters for trace decoder */
t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
if (!t_params)
goto out_free;
- for (i = 0; i < etm->num_cpu; i++) {
- if (etm->metadata[i][CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
- u32 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
-
- t_params[i].protocol =
- cs_etm__get_v7_protocol_version(etmidr);
- t_params[i].etmv3.reg_ctrl =
- etm->metadata[i][CS_ETM_ETMCR];
- t_params[i].etmv3.reg_trc_id =
- etm->metadata[i][CS_ETM_ETMTRACEIDR];
- } else if (etm->metadata[i][CS_ETM_MAGIC] ==
- __perf_cs_etmv4_magic) {
- t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
- t_params[i].etmv4.reg_idr0 =
- etm->metadata[i][CS_ETMV4_TRCIDR0];
- t_params[i].etmv4.reg_idr1 =
- etm->metadata[i][CS_ETMV4_TRCIDR1];
- t_params[i].etmv4.reg_idr2 =
- etm->metadata[i][CS_ETMV4_TRCIDR2];
- t_params[i].etmv4.reg_idr8 =
- etm->metadata[i][CS_ETMV4_TRCIDR8];
- t_params[i].etmv4.reg_configr =
- etm->metadata[i][CS_ETMV4_TRCCONFIGR];
- t_params[i].etmv4.reg_traceidr =
- etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
- }
- }
+ if (cs_etm__init_trace_params(t_params, etm))
+ goto out_free;
- /* Set decoder parameters to simply print the trace packets */
- d_params.packet_printer = cs_etm__packet_dump;
- d_params.operation = CS_ETM_OPERATION_DECODE;
- d_params.formatted = true;
- d_params.fsyncs = false;
- d_params.hsyncs = false;
- d_params.frame_aligned = true;
- d_params.data = etmq;
+ /* Set decoder parameters to decode trace packets */
+ if (cs_etm__init_decoder_params(&d_params, etmq,
+ CS_ETM_OPERATION_DECODE))
+ goto out_free;
etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
- zfree(&t_params);
-
if (!etmq->decoder)
goto out_free;
@@ -433,14 +473,13 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
cs_etm__mem_access))
goto out_free_decoder;
- etmq->offset = 0;
- etmq->period_instructions = 0;
-
+ zfree(&t_params);
return etmq;
out_free_decoder:
cs_etm_decoder__free(etmq->decoder);
out_free:
+ zfree(&t_params);
zfree(&etmq->event_buf);
zfree(&etmq->last_branch);
zfree(&etmq->last_branch_rb);
@@ -455,24 +494,30 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
struct auxtrace_queue *queue,
unsigned int queue_nr)
{
+ int ret = 0;
struct cs_etm_queue *etmq = queue->priv;
if (list_empty(&queue->head) || etmq)
- return 0;
+ goto out;
- etmq = cs_etm__alloc_queue(etm, queue_nr);
+ etmq = cs_etm__alloc_queue(etm);
- if (!etmq)
- return -ENOMEM;
+ if (!etmq) {
+ ret = -ENOMEM;
+ goto out;
+ }
queue->priv = etmq;
-
- if (queue->cpu != -1)
- etmq->cpu = queue->cpu;
-
+ etmq->etm = etm;
+ etmq->queue_nr = queue_nr;
+ etmq->cpu = queue->cpu;
etmq->tid = queue->tid;
+ etmq->pid = -1;
+ etmq->offset = 0;
+ etmq->period_instructions = 0;
- return 0;
+out:
+ return ret;
}
static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
@@ -480,6 +525,9 @@ static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
unsigned int i;
int ret;
+ if (!etm->kernel_start)
+ etm->kernel_start = machine__kernel_start(etm->machine);
+
for (i = 0; i < etm->queues.nr_queues; i++) {
ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
if (ret)
@@ -637,7 +685,7 @@ static int cs_etm__inject_event(union perf_event *event,
static int
-cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
+cs_etm__get_trace(struct cs_etm_queue *etmq)
{
struct auxtrace_buffer *aux_buffer = etmq->buffer;
struct auxtrace_buffer *old_buffer = aux_buffer;
@@ -651,7 +699,7 @@ cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
if (!aux_buffer) {
if (old_buffer)
auxtrace_buffer__drop_data(old_buffer);
- buff->len = 0;
+ etmq->buf_len = 0;
return 0;
}
@@ -671,13 +719,11 @@ cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
if (old_buffer)
auxtrace_buffer__drop_data(old_buffer);
- buff->offset = aux_buffer->offset;
- buff->len = aux_buffer->size;
- buff->buf = aux_buffer->data;
-
- buff->ref_timestamp = aux_buffer->reference;
+ etmq->buf_used = 0;
+ etmq->buf_len = aux_buffer->size;
+ etmq->buf = aux_buffer->data;
- return buff->len;
+ return etmq->buf_len;
}
static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
@@ -719,7 +765,7 @@ static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
sample.stream_id = etmq->etm->instructions_id;
sample.period = period;
sample.cpu = etmq->packet->cpu;
- sample.flags = 0;
+ sample.flags = etmq->prev_packet->flags;
sample.insn_len = 1;
sample.cpumode = event->sample.header.misc;
@@ -778,7 +824,7 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq)
sample.stream_id = etmq->etm->branches_id;
sample.period = 1;
sample.cpu = etmq->packet->cpu;
- sample.flags = 0;
+ sample.flags = etmq->prev_packet->flags;
sample.cpumode = event->sample.header.misc;
/*
@@ -1106,95 +1152,489 @@ static int cs_etm__end_block(struct cs_etm_queue *etmq)
return 0;
}
+/*
+ * cs_etm__get_data_block: Fetch a block from the auxtrace_buffer queue
+ * if need be.
+ * Returns: < 0 if error
+ * = 0 if no more auxtrace_buffer to read
+ * > 0 if the current buffer isn't empty yet
+ */
+static int cs_etm__get_data_block(struct cs_etm_queue *etmq)
+{
+ int ret;
+
+ if (!etmq->buf_len) {
+ ret = cs_etm__get_trace(etmq);
+ if (ret <= 0)
+ return ret;
+ /*
+ * We cannot assume consecutive blocks in the data file
+ * are contiguous, reset the decoder to force re-sync.
+ */
+ ret = cs_etm_decoder__reset(etmq->decoder);
+ if (ret)
+ return ret;
+ }
+
+ return etmq->buf_len;
+}
+
+static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq,
+ struct cs_etm_packet *packet,
+ u64 end_addr)
+{
+ u16 instr16;
+ u32 instr32;
+ u64 addr;
+
+ switch (packet->isa) {
+ case CS_ETM_ISA_T32:
+ /*
+ * The SVC of T32 is defined in ARM DDI 0487D.a, F5.1.247:
+ *
+ * b'15 b'8
+ * +-----------------+--------+
+ * | 1 1 0 1 1 1 1 1 | imm8 |
+ * +-----------------+--------+
+ *
+ * According to the specifiction, it only defines SVC for T32
+ * with 16 bits instruction and has no definition for 32bits;
+ * so below only read 2 bytes as instruction size for T32.
+ */
+ addr = end_addr - 2;
+ cs_etm__mem_access(etmq, addr, sizeof(instr16), (u8 *)&instr16);
+ if ((instr16 & 0xFF00) == 0xDF00)
+ return true;
+
+ break;
+ case CS_ETM_ISA_A32:
+ /*
+ * The SVC of A32 is defined in ARM DDI 0487D.a, F5.1.247:
+ *
+ * b'31 b'28 b'27 b'24
+ * +---------+---------+-------------------------+
+ * | !1111 | 1 1 1 1 | imm24 |
+ * +---------+---------+-------------------------+
+ */
+ addr = end_addr - 4;
+ cs_etm__mem_access(etmq, addr, sizeof(instr32), (u8 *)&instr32);
+ if ((instr32 & 0x0F000000) == 0x0F000000 &&
+ (instr32 & 0xF0000000) != 0xF0000000)
+ return true;
+
+ break;
+ case CS_ETM_ISA_A64:
+ /*
+ * The SVC of A64 is defined in ARM DDI 0487D.a, C6.2.294:
+ *
+ * b'31 b'21 b'4 b'0
+ * +-----------------------+---------+-----------+
+ * | 1 1 0 1 0 1 0 0 0 0 0 | imm16 | 0 0 0 0 1 |
+ * +-----------------------+---------+-----------+
+ */
+ addr = end_addr - 4;
+ cs_etm__mem_access(etmq, addr, sizeof(instr32), (u8 *)&instr32);
+ if ((instr32 & 0xFFE0001F) == 0xd4000001)
+ return true;
+
+ break;
+ case CS_ETM_ISA_UNKNOWN:
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static bool cs_etm__is_syscall(struct cs_etm_queue *etmq, u64 magic)
+{
+ struct cs_etm_packet *packet = etmq->packet;
+ struct cs_etm_packet *prev_packet = etmq->prev_packet;
+
+ if (magic == __perf_cs_etmv3_magic)
+ if (packet->exception_number == CS_ETMV3_EXC_SVC)
+ return true;
+
+ /*
+ * ETMv4 exception type CS_ETMV4_EXC_CALL covers SVC, SMC and
+ * HVC cases; need to check if it's SVC instruction based on
+ * packet address.
+ */
+ if (magic == __perf_cs_etmv4_magic) {
+ if (packet->exception_number == CS_ETMV4_EXC_CALL &&
+ cs_etm__is_svc_instr(etmq, prev_packet,
+ prev_packet->end_addr))
+ return true;
+ }
+
+ return false;
+}
+
+static bool cs_etm__is_async_exception(struct cs_etm_queue *etmq, u64 magic)
+{
+ struct cs_etm_packet *packet = etmq->packet;
+
+ if (magic == __perf_cs_etmv3_magic)
+ if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT ||
+ packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT ||
+ packet->exception_number == CS_ETMV3_EXC_PE_RESET ||
+ packet->exception_number == CS_ETMV3_EXC_IRQ ||
+ packet->exception_number == CS_ETMV3_EXC_FIQ)
+ return true;
+
+ if (magic == __perf_cs_etmv4_magic)
+ if (packet->exception_number == CS_ETMV4_EXC_RESET ||
+ packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT ||
+ packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR ||
+ packet->exception_number == CS_ETMV4_EXC_INST_DEBUG ||
+ packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG ||
+ packet->exception_number == CS_ETMV4_EXC_IRQ ||
+ packet->exception_number == CS_ETMV4_EXC_FIQ)
+ return true;
+
+ return false;
+}
+
+static bool cs_etm__is_sync_exception(struct cs_etm_queue *etmq, u64 magic)
+{
+ struct cs_etm_packet *packet = etmq->packet;
+ struct cs_etm_packet *prev_packet = etmq->prev_packet;
+
+ if (magic == __perf_cs_etmv3_magic)
+ if (packet->exception_number == CS_ETMV3_EXC_SMC ||
+ packet->exception_number == CS_ETMV3_EXC_HYP ||
+ packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE ||
+ packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR ||
+ packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT ||
+ packet->exception_number == CS_ETMV3_EXC_DATA_FAULT ||
+ packet->exception_number == CS_ETMV3_EXC_GENERIC)
+ return true;
+
+ if (magic == __perf_cs_etmv4_magic) {
+ if (packet->exception_number == CS_ETMV4_EXC_TRAP ||
+ packet->exception_number == CS_ETMV4_EXC_ALIGNMENT ||
+ packet->exception_number == CS_ETMV4_EXC_INST_FAULT ||
+ packet->exception_number == CS_ETMV4_EXC_DATA_FAULT)
+ return true;
+
+ /*
+ * For CS_ETMV4_EXC_CALL, except SVC other instructions
+ * (SMC, HVC) are taken as sync exceptions.
+ */
+ if (packet->exception_number == CS_ETMV4_EXC_CALL &&
+ !cs_etm__is_svc_instr(etmq, prev_packet,
+ prev_packet->end_addr))
+ return true;
+
+ /*
+ * ETMv4 has 5 bits for exception number; if the numbers
+ * are in the range ( CS_ETMV4_EXC_FIQ, CS_ETMV4_EXC_END ]
+ * they are implementation defined exceptions.
+ *
+ * For this case, simply take it as sync exception.
+ */
+ if (packet->exception_number > CS_ETMV4_EXC_FIQ &&
+ packet->exception_number <= CS_ETMV4_EXC_END)
+ return true;
+ }
+
+ return false;
+}
+
+static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq)
+{
+ struct cs_etm_packet *packet = etmq->packet;
+ struct cs_etm_packet *prev_packet = etmq->prev_packet;
+ u64 magic;
+ int ret;
+
+ switch (packet->sample_type) {
+ case CS_ETM_RANGE:
+ /*
+ * Immediate branch instruction without neither link nor
+ * return flag, it's normal branch instruction within
+ * the function.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR &&
+ packet->last_instr_subtype == OCSD_S_INSTR_NONE) {
+ packet->flags = PERF_IP_FLAG_BRANCH;
+
+ if (packet->last_instr_cond)
+ packet->flags |= PERF_IP_FLAG_CONDITIONAL;
+ }
+
+ /*
+ * Immediate branch instruction with link (e.g. BL), this is
+ * branch instruction for function call.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR &&
+ packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL;
+
+ /*
+ * Indirect branch instruction with link (e.g. BLR), this is
+ * branch instruction for function call.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
+ packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL;
+
+ /*
+ * Indirect branch instruction with subtype of
+ * OCSD_S_INSTR_V7_IMPLIED_RET, this is explicit hint for
+ * function return for A32/T32.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
+ packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN;
+
+ /*
+ * Indirect branch instruction without link (e.g. BR), usually
+ * this is used for function return, especially for functions
+ * within dynamic link lib.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
+ packet->last_instr_subtype == OCSD_S_INSTR_NONE)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN;
+
+ /* Return instruction for function return. */
+ if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
+ packet->last_instr_subtype == OCSD_S_INSTR_V8_RET)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN;
+
+ /*
+ * Decoder might insert a discontinuity in the middle of
+ * instruction packets, fixup prev_packet with flag
+ * PERF_IP_FLAG_TRACE_BEGIN to indicate restarting trace.
+ */
+ if (prev_packet->sample_type == CS_ETM_DISCONTINUITY)
+ prev_packet->flags |= PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_TRACE_BEGIN;
+
+ /*
+ * If the previous packet is an exception return packet
+ * and the return address just follows SVC instuction,
+ * it needs to calibrate the previous packet sample flags
+ * as PERF_IP_FLAG_SYSCALLRET.
+ */
+ if (prev_packet->flags == (PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN |
+ PERF_IP_FLAG_INTERRUPT) &&
+ cs_etm__is_svc_instr(etmq, packet, packet->start_addr))
+ prev_packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN |
+ PERF_IP_FLAG_SYSCALLRET;
+ break;
+ case CS_ETM_DISCONTINUITY:
+ /*
+ * The trace is discontinuous, if the previous packet is
+ * instruction packet, set flag PERF_IP_FLAG_TRACE_END
+ * for previous packet.
+ */
+ if (prev_packet->sample_type == CS_ETM_RANGE)
+ prev_packet->flags |= PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_TRACE_END;
+ break;
+ case CS_ETM_EXCEPTION:
+ ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
+ if (ret)
+ return ret;
+
+ /* The exception is for system call. */
+ if (cs_etm__is_syscall(etmq, magic))
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL |
+ PERF_IP_FLAG_SYSCALLRET;
+ /*
+ * The exceptions are triggered by external signals from bus,
+ * interrupt controller, debug module, PE reset or halt.
+ */
+ else if (cs_etm__is_async_exception(etmq, magic))
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL |
+ PERF_IP_FLAG_ASYNC |
+ PERF_IP_FLAG_INTERRUPT;
+ /*
+ * Otherwise, exception is caused by trap, instruction &
+ * data fault, or alignment errors.
+ */
+ else if (cs_etm__is_sync_exception(etmq, magic))
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL |
+ PERF_IP_FLAG_INTERRUPT;
+
+ /*
+ * When the exception packet is inserted, since exception
+ * packet is not used standalone for generating samples
+ * and it's affiliation to the previous instruction range
+ * packet; so set previous range packet flags to tell perf
+ * it is an exception taken branch.
+ */
+ if (prev_packet->sample_type == CS_ETM_RANGE)
+ prev_packet->flags = packet->flags;
+ break;
+ case CS_ETM_EXCEPTION_RET:
+ /*
+ * When the exception return packet is inserted, since
+ * exception return packet is not used standalone for
+ * generating samples and it's affiliation to the previous
+ * instruction range packet; so set previous range packet
+ * flags to tell perf it is an exception return branch.
+ *
+ * The exception return can be for either system call or
+ * other exception types; unfortunately the packet doesn't
+ * contain exception type related info so we cannot decide
+ * the exception type purely based on exception return packet.
+ * If we record the exception number from exception packet and
+ * reuse it for excpetion return packet, this is not reliable
+ * due the trace can be discontinuity or the interrupt can
+ * be nested, thus the recorded exception number cannot be
+ * used for exception return packet for these two cases.
+ *
+ * For exception return packet, we only need to distinguish the
+ * packet is for system call or for other types. Thus the
+ * decision can be deferred when receive the next packet which
+ * contains the return address, based on the return address we
+ * can read out the previous instruction and check if it's a
+ * system call instruction and then calibrate the sample flag
+ * as needed.
+ */
+ if (prev_packet->sample_type == CS_ETM_RANGE)
+ prev_packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN |
+ PERF_IP_FLAG_INTERRUPT;
+ break;
+ case CS_ETM_EMPTY:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int cs_etm__decode_data_block(struct cs_etm_queue *etmq)
+{
+ int ret = 0;
+ size_t processed = 0;
+
+ /*
+ * Packets are decoded and added to the decoder's packet queue
+ * until the decoder packet processing callback has requested that
+ * processing stops or there is nothing left in the buffer. Normal
+ * operations that stop processing are a timestamp packet or a full
+ * decoder buffer queue.
+ */
+ ret = cs_etm_decoder__process_data_block(etmq->decoder,
+ etmq->offset,
+ &etmq->buf[etmq->buf_used],
+ etmq->buf_len,
+ &processed);
+ if (ret)
+ goto out;
+
+ etmq->offset += processed;
+ etmq->buf_used += processed;
+ etmq->buf_len -= processed;
+
+out:
+ return ret;
+}
+
+static int cs_etm__process_decoder_queue(struct cs_etm_queue *etmq)
+{
+ int ret;
+
+ /* Process each packet in this chunk */
+ while (1) {
+ ret = cs_etm_decoder__get_packet(etmq->decoder,
+ etmq->packet);
+ if (ret <= 0)
+ /*
+ * Stop processing this chunk on
+ * end of data or error
+ */
+ break;
+
+ /*
+ * Since packet addresses are swapped in packet
+ * handling within below switch() statements,
+ * thus setting sample flags must be called
+ * prior to switch() statement to use address
+ * information before packets swapping.
+ */
+ ret = cs_etm__set_sample_flags(etmq);
+ if (ret < 0)
+ break;
+
+ switch (etmq->packet->sample_type) {
+ case CS_ETM_RANGE:
+ /*
+ * If the packet contains an instruction
+ * range, generate instruction sequence
+ * events.
+ */
+ cs_etm__sample(etmq);
+ break;
+ case CS_ETM_EXCEPTION:
+ case CS_ETM_EXCEPTION_RET:
+ /*
+ * If the exception packet is coming,
+ * make sure the previous instruction
+ * range packet to be handled properly.
+ */
+ cs_etm__exception(etmq);
+ break;
+ case CS_ETM_DISCONTINUITY:
+ /*
+ * Discontinuity in trace, flush
+ * previous branch stack
+ */
+ cs_etm__flush(etmq);
+ break;
+ case CS_ETM_EMPTY:
+ /*
+ * Should not receive empty packet,
+ * report error.
+ */
+ pr_err("CS ETM Trace: empty packet\n");
+ return -EINVAL;
+ default:
+ break;
+ }
+ }
+
+ return ret;
+}
static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
{
- struct cs_etm_auxtrace *etm = etmq->etm;
- struct cs_etm_buffer buffer;
- size_t buffer_used, processed;
int err = 0;
- if (!etm->kernel_start)
- etm->kernel_start = machine__kernel_start(etm->machine);
-
/* Go through each buffer in the queue and decode them one by one */
while (1) {
- buffer_used = 0;
- memset(&buffer, 0, sizeof(buffer));
- err = cs_etm__get_trace(&buffer, etmq);
+ err = cs_etm__get_data_block(etmq);
if (err <= 0)
return err;
- /*
- * We cannot assume consecutive blocks in the data file are
- * contiguous, reset the decoder to force re-sync.
- */
- err = cs_etm_decoder__reset(etmq->decoder);
- if (err != 0)
- return err;
/* Run trace decoder until buffer consumed or end of trace */
do {
- processed = 0;
- err = cs_etm_decoder__process_data_block(
- etmq->decoder,
- etmq->offset,
- &buffer.buf[buffer_used],
- buffer.len - buffer_used,
- &processed);
+ err = cs_etm__decode_data_block(etmq);
if (err)
return err;
- etmq->offset += processed;
- buffer_used += processed;
-
- /* Process each packet in this chunk */
- while (1) {
- err = cs_etm_decoder__get_packet(etmq->decoder,
- etmq->packet);
- if (err <= 0)
- /*
- * Stop processing this chunk on
- * end of data or error
- */
- break;
-
- switch (etmq->packet->sample_type) {
- case CS_ETM_RANGE:
- /*
- * If the packet contains an instruction
- * range, generate instruction sequence
- * events.
- */
- cs_etm__sample(etmq);
- break;
- case CS_ETM_EXCEPTION:
- case CS_ETM_EXCEPTION_RET:
- /*
- * If the exception packet is coming,
- * make sure the previous instruction
- * range packet to be handled properly.
- */
- cs_etm__exception(etmq);
- break;
- case CS_ETM_DISCONTINUITY:
- /*
- * Discontinuity in trace, flush
- * previous branch stack
- */
- cs_etm__flush(etmq);
- break;
- case CS_ETM_EMPTY:
- /*
- * Should not receive empty packet,
- * report error.
- */
- pr_err("CS ETM Trace: empty packet\n");
- return -EINVAL;
- default:
- break;
- }
- }
- } while (buffer.len > buffer_used);
+ /*
+ * Process each packet in this chunk, nothing to do if
+ * an error occurs other than hoping the next one will
+ * be better.
+ */
+ err = cs_etm__process_decoder_queue(etmq);
+
+ } while (etmq->buf_len);
if (err == 0)
/* Flush any remaining branch stack entries */
@@ -1205,7 +1645,7 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
}
static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
- pid_t tid, u64 time_)
+ pid_t tid)
{
unsigned int i;
struct auxtrace_queues *queues = &etm->queues;
@@ -1215,7 +1655,6 @@ static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
struct cs_etm_queue *etmq = queue->priv;
if (etmq && ((tid == -1) || (etmq->tid == tid))) {
- etmq->time = time_;
cs_etm__set_pid_tid_cpu(etm, queue);
cs_etm__run_decoder(etmq);
}
@@ -1259,8 +1698,7 @@ static int cs_etm__process_event(struct perf_session *session,
if (event->header.type == PERF_RECORD_EXIT)
return cs_etm__process_timeless_queues(etm,
- event->fork.tid,
- sample->time);
+ event->fork.tid);
return 0;
}
@@ -1414,9 +1852,9 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
0xffffffff);
/*
- * Create an RB tree for traceID-CPU# tuple. Since the conversion has
- * to be made for each packet that gets decoded, optimizing access in
- * anything other than a sequential array is worth doing.
+ * Create an RB tree for traceID-metadata tuple. Since the conversion
+ * has to be made for each packet that gets decoded, optimizing access
+ * in anything other than a sequential array is worth doing.
*/
traceid_list = intlist__new(NULL);
if (!traceid_list) {
@@ -1482,8 +1920,8 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
err = -EINVAL;
goto err_free_metadata;
}
- /* All good, associate the traceID with the CPU# */
- inode->priv = &metadata[j][CS_ETM_CPU];
+ /* All good, associate the traceID with the metadata pointer */
+ inode->priv = metadata[j];
}
/*
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
index 37f8d48179ca..0e97c196147a 100644
--- a/tools/perf/util/cs-etm.h
+++ b/tools/perf/util/cs-etm.h
@@ -53,7 +53,51 @@ enum {
CS_ETMV4_PRIV_MAX,
};
-/* RB tree for quick conversion between traceID and CPUs */
+/*
+ * ETMv3 exception encoding number:
+ * See Embedded Trace Macrocell spcification (ARM IHI 0014Q)
+ * table 7-12 Encoding of Exception[3:0] for non-ARMv7-M processors.
+ */
+enum {
+ CS_ETMV3_EXC_NONE = 0,
+ CS_ETMV3_EXC_DEBUG_HALT = 1,
+ CS_ETMV3_EXC_SMC = 2,
+ CS_ETMV3_EXC_HYP = 3,
+ CS_ETMV3_EXC_ASYNC_DATA_ABORT = 4,
+ CS_ETMV3_EXC_JAZELLE_THUMBEE = 5,
+ CS_ETMV3_EXC_PE_RESET = 8,
+ CS_ETMV3_EXC_UNDEFINED_INSTR = 9,
+ CS_ETMV3_EXC_SVC = 10,
+ CS_ETMV3_EXC_PREFETCH_ABORT = 11,
+ CS_ETMV3_EXC_DATA_FAULT = 12,
+ CS_ETMV3_EXC_GENERIC = 13,
+ CS_ETMV3_EXC_IRQ = 14,
+ CS_ETMV3_EXC_FIQ = 15,
+};
+
+/*
+ * ETMv4 exception encoding number:
+ * See ARM Embedded Trace Macrocell Architecture Specification (ARM IHI 0064D)
+ * table 6-12 Possible values for the TYPE field in an Exception instruction
+ * trace packet, for ARMv7-A/R and ARMv8-A/R PEs.
+ */
+enum {
+ CS_ETMV4_EXC_RESET = 0,
+ CS_ETMV4_EXC_DEBUG_HALT = 1,
+ CS_ETMV4_EXC_CALL = 2,
+ CS_ETMV4_EXC_TRAP = 3,
+ CS_ETMV4_EXC_SYSTEM_ERROR = 4,
+ CS_ETMV4_EXC_INST_DEBUG = 6,
+ CS_ETMV4_EXC_DATA_DEBUG = 7,
+ CS_ETMV4_EXC_ALIGNMENT = 10,
+ CS_ETMV4_EXC_INST_FAULT = 11,
+ CS_ETMV4_EXC_DATA_FAULT = 12,
+ CS_ETMV4_EXC_IRQ = 14,
+ CS_ETMV4_EXC_FIQ = 15,
+ CS_ETMV4_EXC_END = 31,
+};
+
+/* RB tree for quick conversion between traceID and metadata pointers */
struct intlist *traceid_list;
#define KiB(x) ((x) * 1024)
@@ -61,14 +105,15 @@ struct intlist *traceid_list;
#define CS_ETM_HEADER_SIZE (CS_HEADER_VERSION_0_MAX * sizeof(u64))
-static const u64 __perf_cs_etmv3_magic = 0x3030303030303030ULL;
-static const u64 __perf_cs_etmv4_magic = 0x4040404040404040ULL;
+#define __perf_cs_etmv3_magic 0x3030303030303030ULL
+#define __perf_cs_etmv4_magic 0x4040404040404040ULL
#define CS_ETMV3_PRIV_SIZE (CS_ETM_PRIV_MAX * sizeof(u64))
#define CS_ETMV4_PRIV_SIZE (CS_ETMV4_PRIV_MAX * sizeof(u64))
#ifdef HAVE_CSTRACE_SUPPORT
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session);
+int cs_etm__get_cpu(u8 trace_chan_id, int *cpu);
#else
static inline int
cs_etm__process_auxtrace_info(union perf_event *event __maybe_unused,
@@ -76,6 +121,12 @@ cs_etm__process_auxtrace_info(union perf_event *event __maybe_unused,
{
return -1;
}
+
+static inline int cs_etm__get_cpu(u8 trace_chan_id __maybe_unused,
+ int *cpu __maybe_unused)
+{
+ return -1;
+}
#endif
#endif
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 2a36fab76994..26af43ad9ddd 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -1578,7 +1578,7 @@ int bt_convert__perf2ctf(const char *input, const char *path,
{
struct perf_session *session;
struct perf_data data = {
- .file = { .path = input, .fd = -1 },
+ .path = input,
.mode = PERF_DATA_MODE_READ,
.force = opts->force,
};
@@ -1650,7 +1650,7 @@ int bt_convert__perf2ctf(const char *input, const char *path,
fprintf(stderr,
"[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
- data.file.path, path);
+ data.path, path);
fprintf(stderr,
"[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index d8cfc19ddb10..6a64f713710d 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -7,10 +7,148 @@
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
+#include <asm/bug.h>
+#include <sys/types.h>
+#include <dirent.h>
#include "data.h"
#include "util.h"
#include "debug.h"
+#include "header.h"
+
+static void close_dir(struct perf_data_file *files, int nr)
+{
+ while (--nr >= 1) {
+ close(files[nr].fd);
+ free(files[nr].path);
+ }
+ free(files);
+}
+
+void perf_data__close_dir(struct perf_data *data)
+{
+ close_dir(data->dir.files, data->dir.nr);
+}
+
+int perf_data__create_dir(struct perf_data *data, int nr)
+{
+ struct perf_data_file *files = NULL;
+ int i, ret = -1;
+
+ if (WARN_ON(!data->is_dir))
+ return -EINVAL;
+
+ files = zalloc(nr * sizeof(*files));
+ if (!files)
+ return -ENOMEM;
+
+ data->dir.version = PERF_DIR_VERSION;
+ data->dir.files = files;
+ data->dir.nr = nr;
+
+ for (i = 0; i < nr; i++) {
+ struct perf_data_file *file = &files[i];
+
+ if (asprintf(&file->path, "%s/data.%d", data->path, i) < 0)
+ goto out_err;
+
+ ret = open(file->path, O_RDWR|O_CREAT|O_TRUNC, S_IRUSR|S_IWUSR);
+ if (ret < 0)
+ goto out_err;
+
+ file->fd = ret;
+ }
+
+ return 0;
+
+out_err:
+ close_dir(files, i);
+ return ret;
+}
+
+int perf_data__open_dir(struct perf_data *data)
+{
+ struct perf_data_file *files = NULL;
+ struct dirent *dent;
+ int ret = -1;
+ DIR *dir;
+ int nr = 0;
+
+ if (WARN_ON(!data->is_dir))
+ return -EINVAL;
+
+ /* The version is provided by DIR_FORMAT feature. */
+ if (WARN_ON(data->dir.version != PERF_DIR_VERSION))
+ return -1;
+
+ dir = opendir(data->path);
+ if (!dir)
+ return -EINVAL;
+
+ while ((dent = readdir(dir)) != NULL) {
+ struct perf_data_file *file;
+ char path[PATH_MAX];
+ struct stat st;
+
+ snprintf(path, sizeof(path), "%s/%s", data->path, dent->d_name);
+ if (stat(path, &st))
+ continue;
+
+ if (!S_ISREG(st.st_mode) || strncmp(dent->d_name, "data", 4))
+ continue;
+
+ ret = -ENOMEM;
+
+ file = realloc(files, (nr + 1) * sizeof(*files));
+ if (!file)
+ goto out_err;
+
+ files = file;
+ file = &files[nr++];
+
+ file->path = strdup(path);
+ if (!file->path)
+ goto out_err;
+
+ ret = open(file->path, O_RDONLY);
+ if (ret < 0)
+ goto out_err;
+
+ file->fd = ret;
+ file->size = st.st_size;
+ }
+
+ if (!files)
+ return -EINVAL;
+
+ data->dir.files = files;
+ data->dir.nr = nr;
+ return 0;
+
+out_err:
+ close_dir(files, nr);
+ return ret;
+}
+
+int perf_data__update_dir(struct perf_data *data)
+{
+ int i;
+
+ if (WARN_ON(!data->is_dir))
+ return -EINVAL;
+
+ for (i = 0; i < data->dir.nr; i++) {
+ struct perf_data_file *file = &data->dir.files[i];
+ struct stat st;
+
+ if (fstat(file->fd, &st))
+ return -1;
+
+ file->size = st.st_size;
+ }
+
+ return 0;
+}
static bool check_pipe(struct perf_data *data)
{
@@ -19,11 +157,11 @@ static bool check_pipe(struct perf_data *data)
int fd = perf_data__is_read(data) ?
STDIN_FILENO : STDOUT_FILENO;
- if (!data->file.path) {
+ if (!data->path) {
if (!fstat(fd, &st) && S_ISFIFO(st.st_mode))
is_pipe = true;
} else {
- if (!strcmp(data->file.path, "-"))
+ if (!strcmp(data->path, "-"))
is_pipe = true;
}
@@ -37,18 +175,46 @@ static int check_backup(struct perf_data *data)
{
struct stat st;
- if (!stat(data->file.path, &st) && st.st_size) {
- /* TODO check errors properly */
+ if (perf_data__is_read(data))
+ return 0;
+
+ if (!stat(data->path, &st) && st.st_size) {
char oldname[PATH_MAX];
+ int ret;
+
snprintf(oldname, sizeof(oldname), "%s.old",
- data->file.path);
- unlink(oldname);
- rename(data->file.path, oldname);
+ data->path);
+
+ ret = rm_rf_perf_data(oldname);
+ if (ret) {
+ pr_err("Can't remove old data: %s (%s)\n",
+ ret == -2 ?
+ "Unknown file found" : strerror(errno),
+ oldname);
+ return -1;
+ }
+
+ if (rename(data->path, oldname)) {
+ pr_err("Can't move data: %s (%s to %s)\n",
+ strerror(errno),
+ data->path, oldname);
+ return -1;
+ }
}
return 0;
}
+static bool is_dir(struct perf_data *data)
+{
+ struct stat st;
+
+ if (stat(data->path, &st))
+ return false;
+
+ return (st.st_mode & S_IFMT) == S_IFDIR;
+}
+
static int open_file_read(struct perf_data *data)
{
struct stat st;
@@ -82,7 +248,7 @@ static int open_file_read(struct perf_data *data)
goto out_close;
}
- data->size = st.st_size;
+ data->file.size = st.st_size;
return fd;
out_close:
@@ -95,9 +261,6 @@ static int open_file_write(struct perf_data *data)
int fd;
char sbuf[STRERR_BUFSIZE];
- if (check_backup(data))
- return -1;
-
fd = open(data->file.path, O_CREAT|O_RDWR|O_TRUNC|O_CLOEXEC,
S_IRUSR|S_IWUSR);
@@ -115,8 +278,46 @@ static int open_file(struct perf_data *data)
fd = perf_data__is_read(data) ?
open_file_read(data) : open_file_write(data);
+ if (fd < 0) {
+ zfree(&data->file.path);
+ return -1;
+ }
+
data->file.fd = fd;
- return fd < 0 ? -1 : 0;
+ return 0;
+}
+
+static int open_file_dup(struct perf_data *data)
+{
+ data->file.path = strdup(data->path);
+ if (!data->file.path)
+ return -ENOMEM;
+
+ return open_file(data);
+}
+
+static int open_dir(struct perf_data *data)
+{
+ int ret;
+
+ /*
+ * So far we open only the header, so we can read the data version and
+ * layout.
+ */
+ if (asprintf(&data->file.path, "%s/header", data->path) < 0)
+ return -1;
+
+ if (perf_data__is_write(data) &&
+ mkdir(data->path, S_IRWXU) < 0)
+ return -1;
+
+ ret = open_file(data);
+
+ /* Cleanup whatever we managed to create so far. */
+ if (ret && perf_data__is_write(data))
+ rm_rf_perf_data(data->path);
+
+ return ret;
}
int perf_data__open(struct perf_data *data)
@@ -124,14 +325,25 @@ int perf_data__open(struct perf_data *data)
if (check_pipe(data))
return 0;
- if (!data->file.path)
- data->file.path = "perf.data";
+ if (!data->path)
+ data->path = "perf.data";
- return open_file(data);
+ if (check_backup(data))
+ return -1;
+
+ if (perf_data__is_read(data))
+ data->is_dir = is_dir(data);
+
+ return perf_data__is_dir(data) ?
+ open_dir(data) : open_file_dup(data);
}
void perf_data__close(struct perf_data *data)
{
+ if (perf_data__is_dir(data))
+ perf_data__close_dir(data);
+
+ zfree(&data->file.path);
close(data->file.fd);
}
@@ -149,9 +361,9 @@ ssize_t perf_data__write(struct perf_data *data,
int perf_data__switch(struct perf_data *data,
const char *postfix,
- size_t pos, bool at_exit)
+ size_t pos, bool at_exit,
+ char **new_filepath)
{
- char *new_filepath;
int ret;
if (check_pipe(data))
@@ -159,15 +371,15 @@ int perf_data__switch(struct perf_data *data,
if (perf_data__is_read(data))
return -EINVAL;
- if (asprintf(&new_filepath, "%s.%s", data->file.path, postfix) < 0)
+ if (asprintf(new_filepath, "%s.%s", data->path, postfix) < 0)
return -ENOMEM;
/*
* Only fire a warning, don't return error, continue fill
* original file.
*/
- if (rename(data->file.path, new_filepath))
- pr_warning("Failed to rename %s to %s\n", data->file.path, new_filepath);
+ if (rename(data->path, *new_filepath))
+ pr_warning("Failed to rename %s to %s\n", data->path, *new_filepath);
if (!at_exit) {
close(data->file.fd);
@@ -184,6 +396,22 @@ int perf_data__switch(struct perf_data *data,
}
ret = data->file.fd;
out:
- free(new_filepath);
return ret;
}
+
+unsigned long perf_data__size(struct perf_data *data)
+{
+ u64 size = data->file.size;
+ int i;
+
+ if (!data->is_dir)
+ return size;
+
+ for (i = 0; i < data->dir.nr; i++) {
+ struct perf_data_file *file = &data->dir.files[i];
+
+ size += file->size;
+ }
+
+ return size;
+}
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
index 4828f7feea89..259868a39019 100644
--- a/tools/perf/util/data.h
+++ b/tools/perf/util/data.h
@@ -10,16 +10,24 @@ enum perf_data_mode {
};
struct perf_data_file {
- const char *path;
+ char *path;
int fd;
+ unsigned long size;
};
struct perf_data {
+ const char *path;
struct perf_data_file file;
bool is_pipe;
+ bool is_dir;
bool force;
- unsigned long size;
enum perf_data_mode mode;
+
+ struct {
+ u64 version;
+ struct perf_data_file *files;
+ int nr;
+ } dir;
};
static inline bool perf_data__is_read(struct perf_data *data)
@@ -37,14 +45,14 @@ static inline int perf_data__is_pipe(struct perf_data *data)
return data->is_pipe;
}
-static inline int perf_data__fd(struct perf_data *data)
+static inline bool perf_data__is_dir(struct perf_data *data)
{
- return data->file.fd;
+ return data->is_dir;
}
-static inline unsigned long perf_data__size(struct perf_data *data)
+static inline int perf_data__fd(struct perf_data *data)
{
- return data->size;
+ return data->file.fd;
}
int perf_data__open(struct perf_data *data);
@@ -62,5 +70,11 @@ ssize_t perf_data_file__write(struct perf_data_file *file,
*/
int perf_data__switch(struct perf_data *data,
const char *postfix,
- size_t pos, bool at_exit);
+ size_t pos, bool at_exit, char **new_filepath);
+
+int perf_data__create_dir(struct perf_data *data, int nr);
+int perf_data__open_dir(struct perf_data *data);
+void perf_data__close_dir(struct perf_data *data);
+int perf_data__update_dir(struct perf_data *data);
+unsigned long perf_data__size(struct perf_data *data);
#endif /* __PERF_DATA_H */
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index 69fbb0a72d0c..d7315a00c731 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -20,6 +20,7 @@
#include "thread.h"
#include "comm.h"
#include "symbol.h"
+#include "map.h"
#include "event.h"
#include "util.h"
#include "thread-stack.h"
@@ -509,18 +510,23 @@ int db_export__call_path(struct db_export *dbe, struct call_path *cp)
return 0;
}
-int db_export__call_return(struct db_export *dbe, struct call_return *cr)
+int db_export__call_return(struct db_export *dbe, struct call_return *cr,
+ u64 *parent_db_id)
{
int err;
- if (cr->db_id)
- return 0;
-
err = db_export__call_path(dbe, cr->cp);
if (err)
return err;
- cr->db_id = ++dbe->call_return_last_db_id;
+ if (!cr->db_id)
+ cr->db_id = ++dbe->call_return_last_db_id;
+
+ if (parent_db_id) {
+ if (!*parent_db_id)
+ *parent_db_id = ++dbe->call_return_last_db_id;
+ cr->parent_db_id = *parent_db_id;
+ }
if (dbe->export_call_return)
return dbe->export_call_return(dbe, cr);
diff --git a/tools/perf/util/db-export.h b/tools/perf/util/db-export.h
index 67bc6b8ad2d6..4e2424c89df9 100644
--- a/tools/perf/util/db-export.h
+++ b/tools/perf/util/db-export.h
@@ -104,6 +104,7 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
int db_export__branch_types(struct db_export *dbe);
int db_export__call_path(struct db_export *dbe, struct call_path *cp);
-int db_export__call_return(struct db_export *dbe, struct call_return *cr);
+int db_export__call_return(struct db_export *dbe, struct call_return *cr,
+ u64 *parent_db_id);
#endif
diff --git a/tools/perf/util/drv_configs.c b/tools/perf/util/drv_configs.c
deleted file mode 100644
index eec754243f4d..000000000000
--- a/tools/perf/util/drv_configs.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * drv_configs.h: Interface to apply PMU specific configuration
- * Copyright (c) 2016-2018, Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-#include "drv_configs.h"
-#include "evlist.h"
-#include "evsel.h"
-#include "pmu.h"
-#include <errno.h>
-
-static int
-perf_evsel__apply_drv_configs(struct perf_evsel *evsel,
- struct perf_evsel_config_term **err_term)
-{
- bool found = false;
- int err = 0;
- struct perf_evsel_config_term *term;
- struct perf_pmu *pmu = NULL;
-
- while ((pmu = perf_pmu__scan(pmu)) != NULL)
- if (pmu->type == evsel->attr.type) {
- found = true;
- break;
- }
-
- list_for_each_entry(term, &evsel->config_terms, list) {
- if (term->type != PERF_EVSEL__CONFIG_TERM_DRV_CFG)
- continue;
-
- /*
- * We have a configuration term, report an error if we
- * can't find the PMU or if the PMU driver doesn't support
- * cmd line driver configuration.
- */
- if (!found || !pmu->set_drv_config) {
- err = -EINVAL;
- *err_term = term;
- break;
- }
-
- err = pmu->set_drv_config(term);
- if (err) {
- *err_term = term;
- break;
- }
- }
-
- return err;
-}
-
-int perf_evlist__apply_drv_configs(struct perf_evlist *evlist,
- struct perf_evsel **err_evsel,
- struct perf_evsel_config_term **err_term)
-{
- struct perf_evsel *evsel;
- int err = 0;
-
- evlist__for_each_entry(evlist, evsel) {
- err = perf_evsel__apply_drv_configs(evsel, err_term);
- if (err) {
- *err_evsel = evsel;
- break;
- }
- }
-
- return err;
-}
diff --git a/tools/perf/util/drv_configs.h b/tools/perf/util/drv_configs.h
deleted file mode 100644
index 32bc9babc2e0..000000000000
--- a/tools/perf/util/drv_configs.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * drv_configs.h: Interface to apply PMU specific configuration
- * Copyright (c) 2016-2018, Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-#ifndef __PERF_DRV_CONFIGS_H
-#define __PERF_DRV_CONFIGS_H
-
-#include "drv_configs.h"
-#include "evlist.h"
-#include "evsel.h"
-
-int perf_evlist__apply_drv_configs(struct perf_evlist *evlist,
- struct perf_evsel **err_evsel,
- struct perf_evsel_config_term **term);
-#endif
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 62c8cf622607..e059976d9d93 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -8,8 +8,11 @@
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
+#include <libgen.h>
#include "compress.h"
+#include "namespaces.h"
#include "path.h"
+#include "map.h"
#include "symbol.h"
#include "srcline.h"
#include "dso.h"
@@ -181,6 +184,7 @@ int dso__read_binary_type_filename(const struct dso *dso,
case DSO_BINARY_TYPE__KALLSYMS:
case DSO_BINARY_TYPE__GUEST_KALLSYMS:
case DSO_BINARY_TYPE__JAVA_JIT:
+ case DSO_BINARY_TYPE__BPF_PROG_INFO:
case DSO_BINARY_TYPE__NOT_FOUND:
ret = -1;
break;
@@ -1138,28 +1142,34 @@ void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
static void dso__set_basename(struct dso *dso)
{
- /*
- * basename() may modify path buffer, so we must pass
- * a copy.
- */
- char *base, *lname = strdup(dso->long_name);
+ char *base, *lname;
+ int tid;
- if (!lname)
- return;
-
- /*
- * basename() may return a pointer to internal
- * storage which is reused in subsequent calls
- * so copy the result.
- */
- base = strdup(basename(lname));
+ if (sscanf(dso->long_name, "/tmp/perf-%d.map", &tid) == 1) {
+ if (asprintf(&base, "[JIT] tid %d", tid) < 0)
+ return;
+ } else {
+ /*
+ * basename() may modify path buffer, so we must pass
+ * a copy.
+ */
+ lname = strdup(dso->long_name);
+ if (!lname)
+ return;
- free(lname);
+ /*
+ * basename() may return a pointer to internal
+ * storage which is reused in subsequent calls
+ * so copy the result.
+ */
+ base = strdup(basename(lname));
- if (!base)
- return;
+ free(lname);
- dso__set_short_name(dso, base, true);
+ if (!base)
+ return;
+ }
+ dso__set_short_name(dso, base, true);
}
int dso__name_len(const struct dso *dso)
@@ -1195,10 +1205,10 @@ struct dso *dso__new(const char *name)
strcpy(dso->name, name);
dso__set_long_name(dso, dso->name, false);
dso__set_short_name(dso, dso->name, false);
- dso->symbols = dso->symbol_names = RB_ROOT;
+ dso->symbols = dso->symbol_names = RB_ROOT_CACHED;
dso->data.cache = RB_ROOT;
- dso->inlined_nodes = RB_ROOT;
- dso->srclines = RB_ROOT;
+ dso->inlined_nodes = RB_ROOT_CACHED;
+ dso->srclines = RB_ROOT_CACHED;
dso->data.fd = -1;
dso->data.status = DSO_DATA_STATUS_UNKNOWN;
dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
@@ -1467,7 +1477,7 @@ size_t dso__fprintf(struct dso *dso, FILE *fp)
ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
ret += dso__fprintf_buildid(dso, fp);
ret += fprintf(fp, ")\n");
- for (nd = rb_first(&dso->symbols); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
ret += symbol__fprintf(pos, fp);
}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 8c8a7abe809d..6e3f63781e51 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -7,13 +7,15 @@
#include <linux/rbtree.h>
#include <sys/types.h>
#include <stdbool.h>
+#include <stdio.h>
#include "rwsem.h"
-#include <linux/types.h>
#include <linux/bitops.h>
-#include "map.h"
-#include "namespaces.h"
#include "build-id.h"
+struct machine;
+struct map;
+struct perf_env;
+
enum dso_binary_type {
DSO_BINARY_TYPE__KALLSYMS = 0,
DSO_BINARY_TYPE__GUEST_KALLSYMS,
@@ -34,6 +36,7 @@ enum dso_binary_type {
DSO_BINARY_TYPE__KCORE,
DSO_BINARY_TYPE__GUEST_KCORE,
DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
+ DSO_BINARY_TYPE__BPF_PROG_INFO,
DSO_BINARY_TYPE__NOT_FOUND,
};
@@ -140,10 +143,10 @@ struct dso {
struct list_head node;
struct rb_node rb_node; /* rbtree node sorted by long name */
struct rb_root *root; /* root of rbtree that rb_node is in */
- struct rb_root symbols;
- struct rb_root symbol_names;
- struct rb_root inlined_nodes;
- struct rb_root srclines;
+ struct rb_root_cached symbols;
+ struct rb_root_cached symbol_names;
+ struct rb_root_cached inlined_nodes;
+ struct rb_root_cached srclines;
struct {
u64 addr;
struct symbol *symbol;
@@ -188,6 +191,12 @@ struct dso {
u64 debug_frame_offset;
u64 eh_frame_hdr_offset;
} data;
+ /* bpf prog information */
+ struct {
+ u32 id;
+ u32 sub_id;
+ struct perf_env *env;
+ } bpf_prog;
union { /* Tool specific area */
void *priv;
@@ -235,7 +244,7 @@ bool dso__loaded(const struct dso *dso);
static inline bool dso__has_symbols(const struct dso *dso)
{
- return !RB_EMPTY_ROOT(&dso->symbols);
+ return !RB_EMPTY_ROOT(&dso->symbols.rb_root);
}
bool dso__sorted_by_name(const struct dso *dso);
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 4c23779e271a..c6351b557bb0 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -3,15 +3,163 @@
#include "env.h"
#include "sane_ctype.h"
#include "util.h"
+#include "bpf-event.h"
#include <errno.h>
#include <sys/utsname.h>
+#include <bpf/libbpf.h>
struct perf_env perf_env;
+void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node)
+{
+ __u32 prog_id = info_node->info_linear->info.id;
+ struct bpf_prog_info_node *node;
+ struct rb_node *parent = NULL;
+ struct rb_node **p;
+
+ down_write(&env->bpf_progs.lock);
+ p = &env->bpf_progs.infos.rb_node;
+
+ while (*p != NULL) {
+ parent = *p;
+ node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
+ if (prog_id < node->info_linear->info.id) {
+ p = &(*p)->rb_left;
+ } else if (prog_id > node->info_linear->info.id) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_debug("duplicated bpf prog info %u\n", prog_id);
+ goto out;
+ }
+ }
+
+ rb_link_node(&info_node->rb_node, parent, p);
+ rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
+ env->bpf_progs.infos_cnt++;
+out:
+ up_write(&env->bpf_progs.lock);
+}
+
+struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ __u32 prog_id)
+{
+ struct bpf_prog_info_node *node = NULL;
+ struct rb_node *n;
+
+ down_read(&env->bpf_progs.lock);
+ n = env->bpf_progs.infos.rb_node;
+
+ while (n) {
+ node = rb_entry(n, struct bpf_prog_info_node, rb_node);
+ if (prog_id < node->info_linear->info.id)
+ n = n->rb_left;
+ else if (prog_id > node->info_linear->info.id)
+ n = n->rb_right;
+ else
+ break;
+ }
+
+ up_read(&env->bpf_progs.lock);
+ return node;
+}
+
+void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+{
+ struct rb_node *parent = NULL;
+ __u32 btf_id = btf_node->id;
+ struct btf_node *node;
+ struct rb_node **p;
+
+ down_write(&env->bpf_progs.lock);
+ p = &env->bpf_progs.btfs.rb_node;
+
+ while (*p != NULL) {
+ parent = *p;
+ node = rb_entry(parent, struct btf_node, rb_node);
+ if (btf_id < node->id) {
+ p = &(*p)->rb_left;
+ } else if (btf_id > node->id) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_debug("duplicated btf %u\n", btf_id);
+ goto out;
+ }
+ }
+
+ rb_link_node(&btf_node->rb_node, parent, p);
+ rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
+ env->bpf_progs.btfs_cnt++;
+out:
+ up_write(&env->bpf_progs.lock);
+}
+
+struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+{
+ struct btf_node *node = NULL;
+ struct rb_node *n;
+
+ down_read(&env->bpf_progs.lock);
+ n = env->bpf_progs.btfs.rb_node;
+
+ while (n) {
+ node = rb_entry(n, struct btf_node, rb_node);
+ if (btf_id < node->id)
+ n = n->rb_left;
+ else if (btf_id > node->id)
+ n = n->rb_right;
+ else
+ break;
+ }
+
+ up_read(&env->bpf_progs.lock);
+ return node;
+}
+
+/* purge data in bpf_progs.infos tree */
+static void perf_env__purge_bpf(struct perf_env *env)
+{
+ struct rb_root *root;
+ struct rb_node *next;
+
+ down_write(&env->bpf_progs.lock);
+
+ root = &env->bpf_progs.infos;
+ next = rb_first(root);
+
+ while (next) {
+ struct bpf_prog_info_node *node;
+
+ node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+ next = rb_next(&node->rb_node);
+ rb_erase(&node->rb_node, root);
+ free(node);
+ }
+
+ env->bpf_progs.infos_cnt = 0;
+
+ root = &env->bpf_progs.btfs;
+ next = rb_first(root);
+
+ while (next) {
+ struct btf_node *node;
+
+ node = rb_entry(next, struct btf_node, rb_node);
+ next = rb_next(&node->rb_node);
+ rb_erase(&node->rb_node, root);
+ free(node);
+ }
+
+ env->bpf_progs.btfs_cnt = 0;
+
+ up_write(&env->bpf_progs.lock);
+}
+
void perf_env__exit(struct perf_env *env)
{
int i;
+ perf_env__purge_bpf(env);
zfree(&env->hostname);
zfree(&env->os_release);
zfree(&env->version);
@@ -38,6 +186,13 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->memory_nodes);
}
+void perf_env__init(struct perf_env *env)
+{
+ env->bpf_progs.infos = RB_ROOT;
+ env->bpf_progs.btfs = RB_ROOT;
+ init_rwsem(&env->bpf_progs.lock);
+}
+
int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
{
int i;
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index d01b8355f4ca..4f8e2b485c01 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -3,7 +3,9 @@
#define __PERF_ENV_H
#include <linux/types.h>
+#include <linux/rbtree.h>
#include "cpumap.h"
+#include "rwsem.h"
struct cpu_topology_map {
int socket_id;
@@ -64,8 +66,23 @@ struct perf_env {
struct memory_node *memory_nodes;
unsigned long long memory_bsize;
u64 clockid_res_ns;
+
+ /*
+ * bpf_info_lock protects bpf rbtrees. This is needed because the
+ * trees are accessed by different threads in perf-top
+ */
+ struct {
+ struct rw_semaphore lock;
+ struct rb_root infos;
+ u32 infos_cnt;
+ struct rb_root btfs;
+ u32 btfs_cnt;
+ } bpf_progs;
};
+struct bpf_prog_info_node;
+struct btf_node;
+
extern struct perf_env perf_env;
void perf_env__exit(struct perf_env *env);
@@ -80,4 +97,11 @@ const char *perf_env__arch(struct perf_env *env);
const char *perf_env__raw_arch(struct perf_env *env);
int perf_env__nr_cpus_avail(struct perf_env *env);
+void perf_env__init(struct perf_env *env);
+void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node);
+struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ __u32 prog_id);
+void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
+struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
#endif /* __PERF_ENV_H */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 937a5a4f71cc..ba7be74fad6e 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -21,9 +21,13 @@
#include "thread.h"
#include "thread_map.h"
#include "sane_ctype.h"
+#include "map.h"
+#include "symbol.h"
#include "symbol/kallsyms.h"
#include "asm/bug.h"
#include "stat.h"
+#include "session.h"
+#include "bpf-event.h"
#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
@@ -45,6 +49,8 @@ static const char *perf_event__names[] = {
[PERF_RECORD_SWITCH] = "SWITCH",
[PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE",
[PERF_RECORD_NAMESPACES] = "NAMESPACES",
+ [PERF_RECORD_KSYMBOL] = "KSYMBOL",
+ [PERF_RECORD_BPF_EVENT] = "BPF_EVENT",
[PERF_RECORD_HEADER_ATTR] = "ATTR",
[PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
[PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
@@ -1329,6 +1335,22 @@ int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
return machine__process_switch_event(machine, event);
}
+int perf_event__process_ksymbol(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine)
+{
+ return machine__process_ksymbol(machine, event, sample);
+}
+
+int perf_event__process_bpf_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine)
+{
+ return machine__process_bpf_event(machine, event, sample);
+}
+
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
@@ -1461,6 +1483,21 @@ static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
return fprintf(fp, " lost %" PRIu64 "\n", event->lost.lost);
}
+size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, " ksymbol event with addr %" PRIx64 " len %u type %u flags 0x%x name %s\n",
+ event->ksymbol_event.addr, event->ksymbol_event.len,
+ event->ksymbol_event.ksym_type,
+ event->ksymbol_event.flags, event->ksymbol_event.name);
+}
+
+size_t perf_event__fprintf_bpf_event(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, " bpf event with type %u, flags %u, id %u\n",
+ event->bpf_event.type, event->bpf_event.flags,
+ event->bpf_event.id);
+}
+
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
size_t ret = fprintf(fp, "PERF_RECORD_%s",
@@ -1496,6 +1533,12 @@ size_t perf_event__fprintf(union perf_event *event, FILE *fp)
case PERF_RECORD_LOST:
ret += perf_event__fprintf_lost(event, fp);
break;
+ case PERF_RECORD_KSYMBOL:
+ ret += perf_event__fprintf_ksymbol(event, fp);
+ break;
+ case PERF_RECORD_BPF_EVENT:
+ ret += perf_event__fprintf_bpf_event(event, fp);
+ break;
default:
ret += fprintf(fp, "\n");
}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index eb95f3384958..36ae7e92dab1 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -5,6 +5,7 @@
#include <limits.h>
#include <stdio.h>
#include <linux/kernel.h>
+#include <linux/bpf.h>
#include "../perf.h"
#include "build-id.h"
@@ -84,6 +85,29 @@ struct throttle_event {
u64 stream_id;
};
+#ifndef KSYM_NAME_LEN
+#define KSYM_NAME_LEN 256
+#endif
+
+struct ksymbol_event {
+ struct perf_event_header header;
+ u64 addr;
+ u32 len;
+ u16 ksym_type;
+ u16 flags;
+ char name[KSYM_NAME_LEN];
+};
+
+struct bpf_event {
+ struct perf_event_header header;
+ u16 type;
+ u16 flags;
+ u32 id;
+
+ /* for bpf_prog types */
+ u8 tag[BPF_TAG_SIZE]; // prog tag
+};
+
#define PERF_SAMPLE_MASK \
(PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
@@ -137,26 +161,7 @@ struct ip_callchain {
u64 ips[0];
};
-struct branch_flags {
- u64 mispred:1;
- u64 predicted:1;
- u64 in_tx:1;
- u64 abort:1;
- u64 cycles:16;
- u64 type:4;
- u64 reserved:40;
-};
-
-struct branch_entry {
- u64 from;
- u64 to;
- struct branch_flags flags;
-};
-
-struct branch_stack {
- u64 nr;
- struct branch_entry entries[0];
-};
+struct branch_stack;
enum {
PERF_IP_FLAG_BRANCH = 1ULL << 0,
@@ -527,8 +532,9 @@ struct auxtrace_error_event {
u32 cpu;
u32 pid;
u32 tid;
- u32 reserved__; /* For alignment */
+ u32 fmt;
u64 ip;
+ u64 time;
char msg[MAX_AUXTRACE_ERROR_MSG];
};
@@ -651,6 +657,8 @@ union perf_event {
struct stat_round_event stat_round;
struct time_conv_event time_conv;
struct feature_event feat;
+ struct ksymbol_event ksymbol_event;
+ struct bpf_event bpf_event;
};
void perf_event__print_totals(void);
@@ -748,6 +756,14 @@ int perf_event__process_exit(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
+int perf_event__process_ksymbol(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_bpf_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
int perf_tool__process_synth_event(struct perf_tool *tool,
union perf_event *event,
struct machine *machine,
@@ -811,6 +827,8 @@ size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_bpf_event(union perf_event *event, FILE *fp);
size_t perf_event__fprintf(union perf_event *event, FILE *fp);
int kallsyms__get_function_start(const char *kallsyms_filename,
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 8c902276d4b4..ec78e93085de 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -19,6 +19,7 @@
#include "debug.h"
#include "units.h"
#include "asm/bug.h"
+#include "bpf-event.h"
#include <signal.h>
#include <unistd.h>
@@ -230,18 +231,33 @@ void perf_evlist__set_leader(struct perf_evlist *evlist)
}
}
-void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
+void perf_event_attr__set_max_precise_ip(struct perf_event_attr *pattr)
{
- attr->precise_ip = 3;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .exclude_kernel = 1,
+ .precise_ip = 3,
+ };
+
+ event_attr_init(&attr);
+
+ /*
+ * Unnamed union member, not supported as struct member named
+ * initializer in older compilers such as gcc 4.4.7
+ */
+ attr.sample_period = 1;
- while (attr->precise_ip != 0) {
- int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
+ while (attr.precise_ip != 0) {
+ int fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
if (fd != -1) {
close(fd);
break;
}
- --attr->precise_ip;
+ --attr.precise_ip;
}
+
+ pattr->precise_ip = attr.precise_ip;
}
int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise)
@@ -1022,7 +1038,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
*/
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
- bool auxtrace_overwrite, int nr_cblocks)
+ bool auxtrace_overwrite, int nr_cblocks, int affinity)
{
struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus;
@@ -1032,7 +1048,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
* Its value is decided by evsel's write_backward.
* So &mp should not be passed through const pointer.
*/
- struct mmap_params mp = { .nr_cblocks = nr_cblocks };
+ struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity };
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
@@ -1064,7 +1080,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
{
- return perf_evlist__mmap_ex(evlist, pages, 0, false, 0);
+ return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS);
}
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
@@ -1841,3 +1857,121 @@ struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evsel_list,
}
return leader;
}
+
+int perf_evlist__add_sb_event(struct perf_evlist **evlist,
+ struct perf_event_attr *attr,
+ perf_evsel__sb_cb_t cb,
+ void *data)
+{
+ struct perf_evsel *evsel;
+ bool new_evlist = (*evlist) == NULL;
+
+ if (*evlist == NULL)
+ *evlist = perf_evlist__new();
+ if (*evlist == NULL)
+ return -1;
+
+ if (!attr->sample_id_all) {
+ pr_warning("enabling sample_id_all for all side band events\n");
+ attr->sample_id_all = 1;
+ }
+
+ evsel = perf_evsel__new_idx(attr, (*evlist)->nr_entries);
+ if (!evsel)
+ goto out_err;
+
+ evsel->side_band.cb = cb;
+ evsel->side_band.data = data;
+ perf_evlist__add(*evlist, evsel);
+ return 0;
+
+out_err:
+ if (new_evlist) {
+ perf_evlist__delete(*evlist);
+ *evlist = NULL;
+ }
+ return -1;
+}
+
+static void *perf_evlist__poll_thread(void *arg)
+{
+ struct perf_evlist *evlist = arg;
+ bool draining = false;
+ int i;
+
+ while (draining || !(evlist->thread.done)) {
+ if (draining)
+ draining = false;
+ else if (evlist->thread.done)
+ draining = true;
+
+ if (!draining)
+ perf_evlist__poll(evlist, 1000);
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ struct perf_mmap *map = &evlist->mmap[i];
+ union perf_event *event;
+
+ if (perf_mmap__read_init(map))
+ continue;
+ while ((event = perf_mmap__read_event(map)) != NULL) {
+ struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
+
+ if (evsel && evsel->side_band.cb)
+ evsel->side_band.cb(event, evsel->side_band.data);
+ else
+ pr_warning("cannot locate proper evsel for the side band event\n");
+
+ perf_mmap__consume(map);
+ }
+ perf_mmap__read_done(map);
+ }
+ }
+ return NULL;
+}
+
+int perf_evlist__start_sb_thread(struct perf_evlist *evlist,
+ struct target *target)
+{
+ struct perf_evsel *counter;
+
+ if (!evlist)
+ return 0;
+
+ if (perf_evlist__create_maps(evlist, target))
+ goto out_delete_evlist;
+
+ evlist__for_each_entry(evlist, counter) {
+ if (perf_evsel__open(counter, evlist->cpus,
+ evlist->threads) < 0)
+ goto out_delete_evlist;
+ }
+
+ if (perf_evlist__mmap(evlist, UINT_MAX))
+ goto out_delete_evlist;
+
+ evlist__for_each_entry(evlist, counter) {
+ if (perf_evsel__enable(counter))
+ goto out_delete_evlist;
+ }
+
+ evlist->thread.done = 0;
+ if (pthread_create(&evlist->thread.th, NULL, perf_evlist__poll_thread, evlist))
+ goto out_delete_evlist;
+
+ return 0;
+
+out_delete_evlist:
+ perf_evlist__delete(evlist);
+ evlist = NULL;
+ return -1;
+}
+
+void perf_evlist__stop_sb_thread(struct perf_evlist *evlist)
+{
+ if (!evlist)
+ return;
+ evlist->thread.done = 1;
+ pthread_join(evlist->thread.th, NULL);
+ perf_evlist__delete(evlist);
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 868294491194..dcb68f34d2cd 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -49,8 +49,15 @@ struct perf_evlist {
struct perf_evsel *selected;
struct events_stats stats;
struct perf_env *env;
+ void (*trace_event_sample_raw)(struct perf_evlist *evlist,
+ union perf_event *event,
+ struct perf_sample *sample);
u64 first_sample_time;
u64 last_sample_time;
+ struct {
+ pthread_t th;
+ volatile int done;
+ } thread;
};
struct perf_evsel_str_handler {
@@ -84,6 +91,14 @@ int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
int perf_evlist__add_dummy(struct perf_evlist *evlist);
+int perf_evlist__add_sb_event(struct perf_evlist **evlist,
+ struct perf_event_attr *attr,
+ perf_evsel__sb_cb_t cb,
+ void *data);
+int perf_evlist__start_sb_thread(struct perf_evlist *evlist,
+ struct target *target);
+void perf_evlist__stop_sb_thread(struct perf_evlist *evlist);
+
int perf_evlist__add_newtp(struct perf_evlist *evlist,
const char *sys, const char *name, void *handler);
@@ -162,7 +177,7 @@ unsigned long perf_event_mlock_kb_in_pages(void);
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
- bool auxtrace_overwrite, int nr_cblocks);
+ bool auxtrace_overwrite, int nr_cblocks, int affinity);
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
void perf_evlist__munmap(struct perf_evlist *evlist);
@@ -314,5 +329,4 @@ void perf_evlist__force_leader(struct perf_evlist *evlist);
struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evlist,
struct perf_evsel *evsel);
-
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index dbc0466db368..7835e05f0c0a 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -294,20 +294,12 @@ struct perf_evsel *perf_evsel__new_cycles(bool precise)
if (!precise)
goto new_event;
- /*
- * Unnamed union member, not supported as struct member named
- * initializer in older compilers such as gcc 4.4.7
- *
- * Just for probing the precise_ip:
- */
- attr.sample_period = 1;
perf_event_attr__set_max_precise_ip(&attr);
/*
* Now let the usual logic to set up the perf_event_attr defaults
* to kick in when we return and before perf_evsel__open() is called.
*/
- attr.sample_period = 0;
new_event:
evsel = perf_evsel__new(&attr);
if (evsel == NULL)
@@ -956,6 +948,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->sample_freq = 0;
attr->sample_period = 0;
attr->write_backward = 0;
+
+ /*
+ * We don't get sample for slave events, we make them
+ * when delivering group leader sample. Set the slave
+ * event to follow the master sample_type to ease up
+ * report.
+ */
+ attr->sample_type = leader->attr.sample_type;
}
if (opts->no_samples)
@@ -1035,6 +1035,9 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->mmap = track;
attr->mmap2 = track && !perf_missing_features.mmap2;
attr->comm = track;
+ attr->ksymbol = track && !perf_missing_features.ksymbol;
+ attr->bpf_event = track && !opts->no_bpf_event &&
+ !perf_missing_features.bpf_event;
if (opts->record_namespaces)
attr->namespaces = track;
@@ -1289,6 +1292,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
{
assert(list_empty(&evsel->node));
assert(evsel->evlist == NULL);
+ perf_evsel__free_counts(evsel);
perf_evsel__free_fd(evsel);
perf_evsel__free_id(evsel);
perf_evsel__free_config_terms(evsel);
@@ -1339,10 +1343,9 @@ void perf_counts_values__scale(struct perf_counts_values *count,
count->val = 0;
} else if (count->run < count->ena) {
scaled = 1;
- count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
+ count->val = (u64)((double) count->val * count->ena / count->run);
}
- } else
- count->ena = count->run = 0;
+ }
if (pscaled)
*pscaled = scaled;
@@ -1652,6 +1655,8 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(context_switch, p_unsigned);
PRINT_ATTRf(write_backward, p_unsigned);
PRINT_ATTRf(namespaces, p_unsigned);
+ PRINT_ATTRf(ksymbol, p_unsigned);
+ PRINT_ATTRf(bpf_event, p_unsigned);
PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
PRINT_ATTRf(bp_type, p_unsigned);
@@ -1811,6 +1816,10 @@ fallback_missing_features:
PERF_SAMPLE_BRANCH_NO_CYCLES);
if (perf_missing_features.group_read && evsel->attr.inherit)
evsel->attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
+ if (perf_missing_features.ksymbol)
+ evsel->attr.ksymbol = 0;
+ if (perf_missing_features.bpf_event)
+ evsel->attr.bpf_event = 0;
retry_sample_id:
if (perf_missing_features.sample_id_all)
evsel->attr.sample_id_all = 0;
@@ -1930,7 +1939,15 @@ try_fallback:
* Must probe features in the order they were added to the
* perf_event_attr interface.
*/
- if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
+ if (!perf_missing_features.bpf_event && evsel->attr.bpf_event) {
+ perf_missing_features.bpf_event = true;
+ pr_debug2("switching off bpf_event\n");
+ goto fallback_missing_features;
+ } else if (!perf_missing_features.ksymbol && evsel->attr.ksymbol) {
+ perf_missing_features.ksymbol = true;
+ pr_debug2("switching off ksymbol\n");
+ goto fallback_missing_features;
+ } else if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
perf_missing_features.write_backward = true;
pr_debug2("switching off write_backward\n");
goto out_close;
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 82a289ce8b0c..0f2c6c93d721 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -8,7 +8,7 @@
#include <linux/perf_event.h>
#include <linux/types.h>
#include "xyarray.h"
-#include "symbol.h"
+#include "symbol_conf.h"
#include "cpumap.h"
#include "counts.h"
@@ -73,6 +73,8 @@ struct perf_evsel_config_term {
struct perf_stat_evsel;
+typedef int (perf_evsel__sb_cb_t)(union perf_event *event, void *data);
+
/** struct perf_evsel - event selector
*
* @evlist - evlist this evsel is in, if it is in one.
@@ -151,6 +153,10 @@ struct perf_evsel {
bool collect_stat;
bool weak_group;
const char *pmu_name;
+ struct {
+ perf_evsel__sb_cb_t *cb;
+ void *data;
+ } side_band;
};
union u64_swap {
@@ -168,6 +174,8 @@ struct perf_missing_features {
bool lbr_flags;
bool write_backward;
bool group_read;
+ bool ksymbol;
+ bool bpf_event;
};
extern struct perf_missing_features perf_missing_features;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index dec6d218c31c..b9e693825873 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -18,6 +18,7 @@
#include <sys/utsname.h>
#include <linux/time64.h>
#include <dirent.h>
+#include <bpf/libbpf.h>
#include "evlist.h"
#include "evsel.h"
@@ -39,6 +40,8 @@
#include "tool.h"
#include "time-utils.h"
#include "units.h"
+#include "cputopo.h"
+#include "bpf-event.h"
#include "sane_ctype.h"
@@ -526,17 +529,11 @@ static int write_event_desc(struct feat_fd *ff,
static int write_cmdline(struct feat_fd *ff,
struct perf_evlist *evlist __maybe_unused)
{
- char buf[MAXPATHLEN];
- u32 n;
- int i, ret;
+ char pbuf[MAXPATHLEN], *buf;
+ int i, ret, n;
/* actual path to perf binary */
- ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
- if (ret <= 0)
- return -1;
-
- /* readlink() does not add null termination */
- buf[ret] = '\0';
+ buf = perf_exe(pbuf, MAXPATHLEN);
/* account for binary path */
n = perf_env.nr_cmdline + 1;
@@ -557,160 +554,15 @@ static int write_cmdline(struct feat_fd *ff,
return 0;
}
-#define CORE_SIB_FMT \
- "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
-#define THRD_SIB_FMT \
- "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
-
-struct cpu_topo {
- u32 cpu_nr;
- u32 core_sib;
- u32 thread_sib;
- char **core_siblings;
- char **thread_siblings;
-};
-
-static int build_cpu_topo(struct cpu_topo *tp, int cpu)
-{
- FILE *fp;
- char filename[MAXPATHLEN];
- char *buf = NULL, *p;
- size_t len = 0;
- ssize_t sret;
- u32 i = 0;
- int ret = -1;
-
- sprintf(filename, CORE_SIB_FMT, cpu);
- fp = fopen(filename, "r");
- if (!fp)
- goto try_threads;
-
- sret = getline(&buf, &len, fp);
- fclose(fp);
- if (sret <= 0)
- goto try_threads;
-
- p = strchr(buf, '\n');
- if (p)
- *p = '\0';
-
- for (i = 0; i < tp->core_sib; i++) {
- if (!strcmp(buf, tp->core_siblings[i]))
- break;
- }
- if (i == tp->core_sib) {
- tp->core_siblings[i] = buf;
- tp->core_sib++;
- buf = NULL;
- len = 0;
- }
- ret = 0;
-
-try_threads:
- sprintf(filename, THRD_SIB_FMT, cpu);
- fp = fopen(filename, "r");
- if (!fp)
- goto done;
-
- if (getline(&buf, &len, fp) <= 0)
- goto done;
-
- p = strchr(buf, '\n');
- if (p)
- *p = '\0';
-
- for (i = 0; i < tp->thread_sib; i++) {
- if (!strcmp(buf, tp->thread_siblings[i]))
- break;
- }
- if (i == tp->thread_sib) {
- tp->thread_siblings[i] = buf;
- tp->thread_sib++;
- buf = NULL;
- }
- ret = 0;
-done:
- if(fp)
- fclose(fp);
- free(buf);
- return ret;
-}
-
-static void free_cpu_topo(struct cpu_topo *tp)
-{
- u32 i;
-
- if (!tp)
- return;
-
- for (i = 0 ; i < tp->core_sib; i++)
- zfree(&tp->core_siblings[i]);
-
- for (i = 0 ; i < tp->thread_sib; i++)
- zfree(&tp->thread_siblings[i]);
-
- free(tp);
-}
-
-static struct cpu_topo *build_cpu_topology(void)
-{
- struct cpu_topo *tp = NULL;
- void *addr;
- u32 nr, i;
- size_t sz;
- long ncpus;
- int ret = -1;
- struct cpu_map *map;
-
- ncpus = cpu__max_present_cpu();
-
- /* build online CPU map */
- map = cpu_map__new(NULL);
- if (map == NULL) {
- pr_debug("failed to get system cpumap\n");
- return NULL;
- }
-
- nr = (u32)(ncpus & UINT_MAX);
-
- sz = nr * sizeof(char *);
- addr = calloc(1, sizeof(*tp) + 2 * sz);
- if (!addr)
- goto out_free;
-
- tp = addr;
- tp->cpu_nr = nr;
- addr += sizeof(*tp);
- tp->core_siblings = addr;
- addr += sz;
- tp->thread_siblings = addr;
-
- for (i = 0; i < nr; i++) {
- if (!cpu_map__has(map, i))
- continue;
-
- ret = build_cpu_topo(tp, i);
- if (ret < 0)
- break;
- }
-
-out_free:
- cpu_map__put(map);
- if (ret) {
- free_cpu_topo(tp);
- tp = NULL;
- }
- return tp;
-}
static int write_cpu_topology(struct feat_fd *ff,
struct perf_evlist *evlist __maybe_unused)
{
- struct cpu_topo *tp;
+ struct cpu_topology *tp;
u32 i;
int ret, j;
- tp = build_cpu_topology();
+ tp = cpu_topology__new();
if (!tp)
return -1;
@@ -748,7 +600,7 @@ static int write_cpu_topology(struct feat_fd *ff,
return ret;
}
done:
- free_cpu_topo(tp);
+ cpu_topology__delete(tp);
return ret;
}
@@ -783,112 +635,45 @@ static int write_total_mem(struct feat_fd *ff,
return ret;
}
-static int write_topo_node(struct feat_fd *ff, int node)
-{
- char str[MAXPATHLEN];
- char field[32];
- char *buf = NULL, *p;
- size_t len = 0;
- FILE *fp;
- u64 mem_total, mem_free, mem;
- int ret = -1;
-
- sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
- fp = fopen(str, "r");
- if (!fp)
- return -1;
-
- while (getline(&buf, &len, fp) > 0) {
- /* skip over invalid lines */
- if (!strchr(buf, ':'))
- continue;
- if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
- goto done;
- if (!strcmp(field, "MemTotal:"))
- mem_total = mem;
- if (!strcmp(field, "MemFree:"))
- mem_free = mem;
- }
-
- fclose(fp);
- fp = NULL;
-
- ret = do_write(ff, &mem_total, sizeof(u64));
- if (ret)
- goto done;
-
- ret = do_write(ff, &mem_free, sizeof(u64));
- if (ret)
- goto done;
-
- ret = -1;
- sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
-
- fp = fopen(str, "r");
- if (!fp)
- goto done;
-
- if (getline(&buf, &len, fp) <= 0)
- goto done;
-
- p = strchr(buf, '\n');
- if (p)
- *p = '\0';
-
- ret = do_write_string(ff, buf);
-done:
- free(buf);
- if (fp)
- fclose(fp);
- return ret;
-}
-
static int write_numa_topology(struct feat_fd *ff,
struct perf_evlist *evlist __maybe_unused)
{
- char *buf = NULL;
- size_t len = 0;
- FILE *fp;
- struct cpu_map *node_map = NULL;
- char *c;
- u32 nr, i, j;
+ struct numa_topology *tp;
int ret = -1;
+ u32 i;
- fp = fopen("/sys/devices/system/node/online", "r");
- if (!fp)
- return -1;
-
- if (getline(&buf, &len, fp) <= 0)
- goto done;
+ tp = numa_topology__new();
+ if (!tp)
+ return -ENOMEM;
- c = strchr(buf, '\n');
- if (c)
- *c = '\0';
+ ret = do_write(ff, &tp->nr, sizeof(u32));
+ if (ret < 0)
+ goto err;
- node_map = cpu_map__new(buf);
- if (!node_map)
- goto done;
+ for (i = 0; i < tp->nr; i++) {
+ struct numa_topology_node *n = &tp->nodes[i];
- nr = (u32)node_map->nr;
+ ret = do_write(ff, &n->node, sizeof(u32));
+ if (ret < 0)
+ goto err;
- ret = do_write(ff, &nr, sizeof(nr));
- if (ret < 0)
- goto done;
+ ret = do_write(ff, &n->mem_total, sizeof(u64));
+ if (ret)
+ goto err;
- for (i = 0; i < nr; i++) {
- j = (u32)node_map->map[i];
- ret = do_write(ff, &j, sizeof(j));
- if (ret < 0)
- break;
+ ret = do_write(ff, &n->mem_free, sizeof(u64));
+ if (ret)
+ goto err;
- ret = write_topo_node(ff, i);
+ ret = do_write_string(ff, n->cpus);
if (ret < 0)
- break;
+ goto err;
}
-done:
- free(buf);
- fclose(fp);
- cpu_map__put(node_map);
+
+ ret = 0;
+
+err:
+ numa_topology__delete(tp);
return ret;
}
@@ -1042,11 +827,9 @@ static int write_cpuid(struct feat_fd *ff,
int ret;
ret = get_cpuid(buffer, sizeof(buffer));
- if (!ret)
- goto write_it;
+ if (ret)
+ return -1;
- return -1;
-write_it:
return do_write_string(ff, buffer);
}
@@ -1080,6 +863,104 @@ static int write_clockid(struct feat_fd *ff,
sizeof(ff->ph->env.clockid_res_ns));
}
+static int write_dir_format(struct feat_fd *ff,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_session *session;
+ struct perf_data *data;
+
+ session = container_of(ff->ph, struct perf_session, header);
+ data = session->data;
+
+ if (WARN_ON(!perf_data__is_dir(data)))
+ return -1;
+
+ return do_write(ff, &data->dir.version, sizeof(data->dir.version));
+}
+
+#ifdef HAVE_LIBBPF_SUPPORT
+static int write_bpf_prog_info(struct feat_fd *ff,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_env *env = &ff->ph->env;
+ struct rb_root *root;
+ struct rb_node *next;
+ int ret;
+
+ down_read(&env->bpf_progs.lock);
+
+ ret = do_write(ff, &env->bpf_progs.infos_cnt,
+ sizeof(env->bpf_progs.infos_cnt));
+ if (ret < 0)
+ goto out;
+
+ root = &env->bpf_progs.infos;
+ next = rb_first(root);
+ while (next) {
+ struct bpf_prog_info_node *node;
+ size_t len;
+
+ node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+ next = rb_next(&node->rb_node);
+ len = sizeof(struct bpf_prog_info_linear) +
+ node->info_linear->data_len;
+
+ /* before writing to file, translate address to offset */
+ bpf_program__bpil_addr_to_offs(node->info_linear);
+ ret = do_write(ff, node->info_linear, len);
+ /*
+ * translate back to address even when do_write() fails,
+ * so that this function never changes the data.
+ */
+ bpf_program__bpil_offs_to_addr(node->info_linear);
+ if (ret < 0)
+ goto out;
+ }
+out:
+ up_read(&env->bpf_progs.lock);
+ return ret;
+}
+#else // HAVE_LIBBPF_SUPPORT
+static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ return 0;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+
+static int write_bpf_btf(struct feat_fd *ff,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_env *env = &ff->ph->env;
+ struct rb_root *root;
+ struct rb_node *next;
+ int ret;
+
+ down_read(&env->bpf_progs.lock);
+
+ ret = do_write(ff, &env->bpf_progs.btfs_cnt,
+ sizeof(env->bpf_progs.btfs_cnt));
+
+ if (ret < 0)
+ goto out;
+
+ root = &env->bpf_progs.btfs;
+ next = rb_first(root);
+ while (next) {
+ struct btf_node *node;
+
+ node = rb_entry(next, struct btf_node, rb_node);
+ next = rb_next(&node->rb_node);
+ ret = do_write(ff, &node->id,
+ sizeof(u32) * 2 + node->data_size);
+ if (ret < 0)
+ goto out;
+ }
+out:
+ up_read(&env->bpf_progs.lock);
+ return ret;
+}
+
static int cpu_cache_level__sort(const void *a, const void *b)
{
struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
@@ -1560,6 +1441,63 @@ static void print_clockid(struct feat_fd *ff, FILE *fp)
ff->ph->env.clockid_res_ns * 1000);
}
+static void print_dir_format(struct feat_fd *ff, FILE *fp)
+{
+ struct perf_session *session;
+ struct perf_data *data;
+
+ session = container_of(ff->ph, struct perf_session, header);
+ data = session->data;
+
+ fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
+}
+
+static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
+{
+ struct perf_env *env = &ff->ph->env;
+ struct rb_root *root;
+ struct rb_node *next;
+
+ down_read(&env->bpf_progs.lock);
+
+ root = &env->bpf_progs.infos;
+ next = rb_first(root);
+
+ while (next) {
+ struct bpf_prog_info_node *node;
+
+ node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+ next = rb_next(&node->rb_node);
+
+ bpf_event__print_bpf_prog_info(&node->info_linear->info,
+ env, fp);
+ }
+
+ up_read(&env->bpf_progs.lock);
+}
+
+static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
+{
+ struct perf_env *env = &ff->ph->env;
+ struct rb_root *root;
+ struct rb_node *next;
+
+ down_read(&env->bpf_progs.lock);
+
+ root = &env->bpf_progs.btfs;
+ next = rb_first(root);
+
+ while (next) {
+ struct btf_node *node;
+
+ node = rb_entry(next, struct btf_node, rb_node);
+ next = rb_next(&node->rb_node);
+ fprintf(fp, "# btf info of id %u\n", node->id);
+ }
+
+ up_read(&env->bpf_progs.lock);
+}
+
static void free_event_desc(struct perf_evsel *events)
{
struct perf_evsel *evsel;
@@ -2592,6 +2530,139 @@ static int process_clockid(struct feat_fd *ff,
return 0;
}
+static int process_dir_format(struct feat_fd *ff,
+ void *_data __maybe_unused)
+{
+ struct perf_session *session;
+ struct perf_data *data;
+
+ session = container_of(ff->ph, struct perf_session, header);
+ data = session->data;
+
+ if (WARN_ON(!perf_data__is_dir(data)))
+ return -1;
+
+ return do_read_u64(ff, &data->dir.version);
+}
+
+#ifdef HAVE_LIBBPF_SUPPORT
+static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_info_node *info_node;
+ struct perf_env *env = &ff->ph->env;
+ u32 count, i;
+ int err = -1;
+
+ if (ff->ph->needs_swap) {
+ pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
+ return 0;
+ }
+
+ if (do_read_u32(ff, &count))
+ return -1;
+
+ down_write(&env->bpf_progs.lock);
+
+ for (i = 0; i < count; ++i) {
+ u32 info_len, data_len;
+
+ info_linear = NULL;
+ info_node = NULL;
+ if (do_read_u32(ff, &info_len))
+ goto out;
+ if (do_read_u32(ff, &data_len))
+ goto out;
+
+ if (info_len > sizeof(struct bpf_prog_info)) {
+ pr_warning("detected invalid bpf_prog_info\n");
+ goto out;
+ }
+
+ info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
+ data_len);
+ if (!info_linear)
+ goto out;
+ info_linear->info_len = sizeof(struct bpf_prog_info);
+ info_linear->data_len = data_len;
+ if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
+ goto out;
+ if (__do_read(ff, &info_linear->info, info_len))
+ goto out;
+ if (info_len < sizeof(struct bpf_prog_info))
+ memset(((void *)(&info_linear->info)) + info_len, 0,
+ sizeof(struct bpf_prog_info) - info_len);
+
+ if (__do_read(ff, info_linear->data, data_len))
+ goto out;
+
+ info_node = malloc(sizeof(struct bpf_prog_info_node));
+ if (!info_node)
+ goto out;
+
+ /* after reading from file, translate offset to address */
+ bpf_program__bpil_offs_to_addr(info_linear);
+ info_node->info_linear = info_linear;
+ perf_env__insert_bpf_prog_info(env, info_node);
+ }
+
+ return 0;
+out:
+ free(info_linear);
+ free(info_node);
+ up_write(&env->bpf_progs.lock);
+ return err;
+}
+#else // HAVE_LIBBPF_SUPPORT
+static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
+{
+ return 0;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+
+static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
+{
+ struct perf_env *env = &ff->ph->env;
+ u32 count, i;
+
+ if (ff->ph->needs_swap) {
+ pr_warning("interpreting btf from systems with endianity is not yet supported\n");
+ return 0;
+ }
+
+ if (do_read_u32(ff, &count))
+ return -1;
+
+ down_write(&env->bpf_progs.lock);
+
+ for (i = 0; i < count; ++i) {
+ struct btf_node *node;
+ u32 id, data_size;
+
+ if (do_read_u32(ff, &id))
+ return -1;
+ if (do_read_u32(ff, &data_size))
+ return -1;
+
+ node = malloc(sizeof(struct btf_node) + data_size);
+ if (!node)
+ return -1;
+
+ node->id = id;
+ node->data_size = data_size;
+
+ if (__do_read(ff, node->data, data_size)) {
+ free(node);
+ return -1;
+ }
+
+ perf_env__insert_btf(env, node);
+ }
+
+ up_write(&env->bpf_progs.lock);
+ return 0;
+}
+
struct feature_ops {
int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
void (*print)(struct feat_fd *ff, FILE *fp);
@@ -2651,7 +2722,10 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPN(CACHE, cache, true),
FEAT_OPR(SAMPLE_TIME, sample_time, false),
FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
- FEAT_OPR(CLOCKID, clockid, false)
+ FEAT_OPR(CLOCKID, clockid, false),
+ FEAT_OPN(DIR_FORMAT, dir_format, false),
+ FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
+ FEAT_OPR(BPF_BTF, bpf_btf, false),
};
struct header_print_data {
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 0d553ddca0a3..386da49e1bfa 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -39,6 +39,9 @@ enum {
HEADER_SAMPLE_TIME,
HEADER_MEM_TOPOLOGY,
HEADER_CLOCKID,
+ HEADER_DIR_FORMAT,
+ HEADER_BPF_PROG_INFO,
+ HEADER_BPF_BTF,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
@@ -48,6 +51,10 @@ enum perf_header_version {
PERF_HEADER_VERSION_2,
};
+enum perf_dir_version {
+ PERF_DIR_VERSION = 1,
+};
+
struct perf_file_section {
u64 offset;
u64 size;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 8aad8330e392..7ace7a10054d 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include "callchain.h"
#include "util.h"
#include "build-id.h"
#include "hist.h"
@@ -11,12 +12,14 @@
#include "evsel.h"
#include "annotate.h"
#include "srcline.h"
+#include "symbol.h"
#include "thread.h"
#include "ui/progress.h"
#include <errno.h>
#include <math.h>
#include <inttypes.h>
#include <sys/param.h>
+#include <linux/time64.h>
static bool hists__filter_entry_by_dso(struct hists *hists,
struct hist_entry *he);
@@ -190,6 +193,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
+ hists__new_col_len(hists, HISTC_TIME, 12);
if (h->srcline) {
len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
@@ -209,7 +213,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
{
- struct rb_node *next = rb_first(&hists->entries);
+ struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
int row = 0;
@@ -244,6 +248,14 @@ static void he_stat__add_cpumode_period(struct he_stat *he_stat,
}
}
+static long hist_time(unsigned long htime)
+{
+ unsigned long time_quantum = symbol_conf.time_quantum;
+ if (time_quantum)
+ return (htime / time_quantum) * time_quantum;
+ return htime;
+}
+
static void he_stat__add_period(struct he_stat *he_stat, u64 period,
u64 weight)
{
@@ -296,7 +308,7 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
if (!he->leaf) {
struct hist_entry *child;
- struct rb_node *node = rb_first(&he->hroot_out);
+ struct rb_node *node = rb_first_cached(&he->hroot_out);
while (node) {
child = rb_entry(node, struct hist_entry, rb_node);
node = rb_next(node);
@@ -311,8 +323,8 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
{
- struct rb_root *root_in;
- struct rb_root *root_out;
+ struct rb_root_cached *root_in;
+ struct rb_root_cached *root_out;
if (he->parent_he) {
root_in = &he->parent_he->hroot_in;
@@ -325,8 +337,8 @@ static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
root_out = &hists->entries;
}
- rb_erase(&he->rb_node_in, root_in);
- rb_erase(&he->rb_node, root_out);
+ rb_erase_cached(&he->rb_node_in, root_in);
+ rb_erase_cached(&he->rb_node, root_out);
--hists->nr_entries;
if (!he->filtered)
@@ -337,7 +349,7 @@ static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
{
- struct rb_node *next = rb_first(&hists->entries);
+ struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
while (next) {
@@ -353,7 +365,7 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
void hists__delete_entries(struct hists *hists)
{
- struct rb_node *next = rb_first(&hists->entries);
+ struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
while (next) {
@@ -394,11 +406,8 @@ static int hist_entry__init(struct hist_entry *he,
* adding new entries. So we need to save a copy.
*/
he->branch_info = malloc(sizeof(*he->branch_info));
- if (he->branch_info == NULL) {
- map__zput(he->ms.map);
- free(he->stat_acc);
- return -ENOMEM;
- }
+ if (he->branch_info == NULL)
+ goto err;
memcpy(he->branch_info, template->branch_info,
sizeof(*he->branch_info));
@@ -417,31 +426,53 @@ static int hist_entry__init(struct hist_entry *he,
if (he->raw_data) {
he->raw_data = memdup(he->raw_data, he->raw_size);
+ if (he->raw_data == NULL)
+ goto err_infos;
+ }
- if (he->raw_data == NULL) {
- map__put(he->ms.map);
- if (he->branch_info) {
- map__put(he->branch_info->from.map);
- map__put(he->branch_info->to.map);
- free(he->branch_info);
- }
- if (he->mem_info) {
- map__put(he->mem_info->iaddr.map);
- map__put(he->mem_info->daddr.map);
- }
- free(he->stat_acc);
- return -ENOMEM;
- }
+ if (he->srcline) {
+ he->srcline = strdup(he->srcline);
+ if (he->srcline == NULL)
+ goto err_rawdata;
+ }
+
+ if (symbol_conf.res_sample) {
+ he->res_samples = calloc(sizeof(struct res_sample),
+ symbol_conf.res_sample);
+ if (!he->res_samples)
+ goto err_srcline;
}
+
INIT_LIST_HEAD(&he->pairs.node);
thread__get(he->thread);
- he->hroot_in = RB_ROOT;
- he->hroot_out = RB_ROOT;
+ he->hroot_in = RB_ROOT_CACHED;
+ he->hroot_out = RB_ROOT_CACHED;
if (!symbol_conf.report_hierarchy)
he->leaf = true;
return 0;
+
+err_srcline:
+ free(he->srcline);
+
+err_rawdata:
+ free(he->raw_data);
+
+err_infos:
+ if (he->branch_info) {
+ map__put(he->branch_info->from.map);
+ map__put(he->branch_info->to.map);
+ free(he->branch_info);
+ }
+ if (he->mem_info) {
+ map__put(he->mem_info->iaddr.map);
+ map__put(he->mem_info->daddr.map);
+ }
+err:
+ map__zput(he->ms.map);
+ free(he->stat_acc);
+ return -ENOMEM;
}
static void *hist_entry__zalloc(size_t size)
@@ -513,8 +544,9 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
int64_t cmp;
u64 period = entry->stat.period;
u64 weight = entry->stat.weight;
+ bool leftmost = true;
- p = &hists->entries_in->rb_node;
+ p = &hists->entries_in->rb_root.rb_node;
while (*p != NULL) {
parent = *p;
@@ -557,8 +589,10 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
if (cmp < 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
he = hist_entry__new(entry, sample_self);
@@ -570,7 +604,7 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
- rb_insert_color(&he->rb_node_in, hists->entries_in);
+ rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
out:
if (sample_self)
he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
@@ -579,6 +613,32 @@ out:
return he;
}
+static unsigned random_max(unsigned high)
+{
+ unsigned thresh = -high % high;
+ for (;;) {
+ unsigned r = random();
+ if (r >= thresh)
+ return r % high;
+ }
+}
+
+static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
+{
+ struct res_sample *r;
+ int j;
+
+ if (he->num_res < symbol_conf.res_sample) {
+ j = he->num_res++;
+ } else {
+ j = random_max(symbol_conf.res_sample);
+ }
+ r = &he->res_samples[j];
+ r->time = sample->time;
+ r->cpu = sample->cpu;
+ r->tid = sample->tid;
+}
+
static struct hist_entry*
__hists__add_entry(struct hists *hists,
struct addr_location *al,
@@ -601,7 +661,7 @@ __hists__add_entry(struct hists *hists,
.map = al->map,
.sym = al->sym,
},
- .srcline = al->srcline ? strdup(al->srcline) : NULL,
+ .srcline = (char *) al->srcline,
.socket = al->socket,
.cpu = al->cpu,
.cpumode = al->cpumode,
@@ -621,10 +681,13 @@ __hists__add_entry(struct hists *hists,
.raw_data = sample->raw_data,
.raw_size = sample->raw_size,
.ops = ops,
+ .time = hist_time(sample->time),
}, *he = hists__findnew_entry(hists, &entry, al, sample_self);
if (!hists->has_callchains && he && he->callchain_size != 0)
hists->has_callchains = true;
+ if (he && symbol_conf.res_sample)
+ hists__res_sample(he, sample);
return he;
}
@@ -958,7 +1021,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
.map = al->map,
.sym = al->sym,
},
- .srcline = al->srcline ? strdup(al->srcline) : NULL,
+ .srcline = (char *) al->srcline,
.parent = iter->parent,
.raw_data = sample->raw_data,
.raw_size = sample->raw_size,
@@ -1048,8 +1111,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
iter->evsel, al, max_stack_depth);
- if (err)
+ if (err) {
+ map__put(alm);
return err;
+ }
err = iter->ops->prepare_entry(iter, al);
if (err)
@@ -1148,6 +1213,7 @@ void hist_entry__delete(struct hist_entry *he)
mem_info__zput(he->mem_info);
}
+ zfree(&he->res_samples);
zfree(&he->stat_acc);
free_srcline(he->srcline);
if (he->srcfile && he->srcfile[0])
@@ -1279,16 +1345,17 @@ static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
}
static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
struct hist_entry *he,
struct hist_entry *parent_he,
struct perf_hpp_list *hpp_list)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter, *new;
struct perf_hpp_fmt *fmt;
int64_t cmp;
+ bool leftmost = true;
while (*p != NULL) {
parent = *p;
@@ -1308,8 +1375,10 @@ static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
if (cmp < 0)
p = &parent->rb_left;
- else
+ else {
p = &parent->rb_right;
+ leftmost = false;
+ }
}
new = hist_entry__new(he, true);
@@ -1343,12 +1412,12 @@ static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
}
rb_link_node(&new->rb_node_in, parent, p);
- rb_insert_color(&new->rb_node_in, root);
+ rb_insert_color_cached(&new->rb_node_in, root, leftmost);
return new;
}
static int hists__hierarchy_insert_entry(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
struct hist_entry *he)
{
struct perf_hpp_list_node *node;
@@ -1395,13 +1464,14 @@ static int hists__hierarchy_insert_entry(struct hists *hists,
}
static int hists__collapse_insert_entry(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
struct hist_entry *he)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
int64_t cmp;
+ bool leftmost = true;
if (symbol_conf.report_hierarchy)
return hists__hierarchy_insert_entry(hists, root, he);
@@ -1432,19 +1502,21 @@ static int hists__collapse_insert_entry(struct hists *hists,
if (cmp < 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
- rb_insert_color(&he->rb_node_in, root);
+ rb_insert_color_cached(&he->rb_node_in, root, leftmost);
return 1;
}
-struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
+struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
pthread_mutex_lock(&hists->lock);
@@ -1467,7 +1539,7 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *next;
struct hist_entry *n;
int ret;
@@ -1479,7 +1551,7 @@ int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
root = hists__get_rotate_entries_in(hists);
- next = rb_first(root);
+ next = rb_first_cached(root);
while (next) {
if (session_done())
@@ -1487,7 +1559,7 @@ int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
n = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&n->rb_node_in);
- rb_erase(&n->rb_node_in, root);
+ rb_erase_cached(&n->rb_node_in, root);
ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
if (ret < 0)
return -1;
@@ -1558,7 +1630,7 @@ static void hierarchy_recalc_total_periods(struct hists *hists)
struct rb_node *node;
struct hist_entry *he;
- node = rb_first(&hists->entries);
+ node = rb_first_cached(&hists->entries);
hists->stats.total_period = 0;
hists->stats.total_non_filtered_period = 0;
@@ -1578,13 +1650,14 @@ static void hierarchy_recalc_total_periods(struct hists *hists)
}
}
-static void hierarchy_insert_output_entry(struct rb_root *root,
+static void hierarchy_insert_output_entry(struct rb_root_cached *root,
struct hist_entry *he)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
struct perf_hpp_fmt *fmt;
+ bool leftmost = true;
while (*p != NULL) {
parent = *p;
@@ -1592,12 +1665,14 @@ static void hierarchy_insert_output_entry(struct rb_root *root,
if (hist_entry__sort(he, iter) > 0)
p = &parent->rb_left;
- else
+ else {
p = &parent->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, root);
+ rb_insert_color_cached(&he->rb_node, root, leftmost);
/* update column width of dynamic entry */
perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
@@ -1608,16 +1683,16 @@ static void hierarchy_insert_output_entry(struct rb_root *root,
static void hists__hierarchy_output_resort(struct hists *hists,
struct ui_progress *prog,
- struct rb_root *root_in,
- struct rb_root *root_out,
+ struct rb_root_cached *root_in,
+ struct rb_root_cached *root_out,
u64 min_callchain_hits,
bool use_callchain)
{
struct rb_node *node;
struct hist_entry *he;
- *root_out = RB_ROOT;
- node = rb_first(root_in);
+ *root_out = RB_ROOT_CACHED;
+ node = rb_first_cached(root_in);
while (node) {
he = rb_entry(node, struct hist_entry, rb_node_in);
@@ -1660,15 +1735,16 @@ static void hists__hierarchy_output_resort(struct hists *hists,
}
}
-static void __hists__insert_output_entry(struct rb_root *entries,
+static void __hists__insert_output_entry(struct rb_root_cached *entries,
struct hist_entry *he,
u64 min_callchain_hits,
bool use_callchain)
{
- struct rb_node **p = &entries->rb_node;
+ struct rb_node **p = &entries->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
struct perf_hpp_fmt *fmt;
+ bool leftmost = true;
if (use_callchain) {
if (callchain_param.mode == CHAIN_GRAPH_REL) {
@@ -1689,12 +1765,14 @@ static void __hists__insert_output_entry(struct rb_root *entries,
if (hist_entry__sort(he, iter) > 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, entries);
+ rb_insert_color_cached(&he->rb_node, entries, leftmost);
perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
if (perf_hpp__is_dynamic_entry(fmt) &&
@@ -1704,9 +1782,10 @@ static void __hists__insert_output_entry(struct rb_root *entries,
}
static void output_resort(struct hists *hists, struct ui_progress *prog,
- bool use_callchain, hists__resort_cb_t cb)
+ bool use_callchain, hists__resort_cb_t cb,
+ void *cb_arg)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *next;
struct hist_entry *n;
u64 callchain_total;
@@ -1736,14 +1815,14 @@ static void output_resort(struct hists *hists, struct ui_progress *prog,
else
root = hists->entries_in;
- next = rb_first(root);
- hists->entries = RB_ROOT;
+ next = rb_first_cached(root);
+ hists->entries = RB_ROOT_CACHED;
while (next) {
n = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&n->rb_node_in);
- if (cb && cb(n))
+ if (cb && cb(n, cb_arg))
continue;
__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
@@ -1757,7 +1836,8 @@ static void output_resort(struct hists *hists, struct ui_progress *prog,
}
}
-void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
+void perf_evsel__output_resort_cb(struct perf_evsel *evsel, struct ui_progress *prog,
+ hists__resort_cb_t cb, void *cb_arg)
{
bool use_callchain;
@@ -1768,18 +1848,23 @@ void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *pro
use_callchain |= symbol_conf.show_branchflag_count;
- output_resort(evsel__hists(evsel), prog, use_callchain, NULL);
+ output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
+}
+
+void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
+{
+ return perf_evsel__output_resort_cb(evsel, prog, NULL, NULL);
}
void hists__output_resort(struct hists *hists, struct ui_progress *prog)
{
- output_resort(hists, prog, symbol_conf.use_callchain, NULL);
+ output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
}
void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
hists__resort_cb_t cb)
{
- output_resort(hists, prog, symbol_conf.use_callchain, cb);
+ output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
}
static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
@@ -1798,7 +1883,7 @@ struct rb_node *rb_hierarchy_last(struct rb_node *node)
struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
while (can_goto_child(he, HMD_NORMAL)) {
- node = rb_last(&he->hroot_out);
+ node = rb_last(&he->hroot_out.rb_root);
he = rb_entry(node, struct hist_entry, rb_node);
}
return node;
@@ -1809,7 +1894,7 @@ struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_di
struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
if (can_goto_child(he, hmd))
- node = rb_first(&he->hroot_out);
+ node = rb_first_cached(&he->hroot_out);
else
node = rb_next(node);
@@ -1847,7 +1932,7 @@ bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
if (he->leaf)
return false;
- node = rb_first(&he->hroot_out);
+ node = rb_first_cached(&he->hroot_out);
child = rb_entry(node, struct hist_entry, rb_node);
while (node && child->filtered) {
@@ -1965,7 +2050,7 @@ static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t fil
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
- for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (filter(hists, h))
@@ -1975,13 +2060,15 @@ static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t fil
}
}
-static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
+static void resort_filtered_entry(struct rb_root_cached *root,
+ struct hist_entry *he)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
- struct rb_root new_root = RB_ROOT;
+ struct rb_root_cached new_root = RB_ROOT_CACHED;
struct rb_node *nd;
+ bool leftmost = true;
while (*p != NULL) {
parent = *p;
@@ -1989,22 +2076,24 @@ static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
if (hist_entry__sort(he, iter) > 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, root);
+ rb_insert_color_cached(&he->rb_node, root, leftmost);
if (he->leaf || he->filtered)
return;
- nd = rb_first(&he->hroot_out);
+ nd = rb_first_cached(&he->hroot_out);
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd = rb_next(nd);
- rb_erase(&h->rb_node, &he->hroot_out);
+ rb_erase_cached(&h->rb_node, &he->hroot_out);
resort_filtered_entry(&new_root, h);
}
@@ -2015,14 +2104,14 @@ static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
{
struct rb_node *nd;
- struct rb_root new_root = RB_ROOT;
+ struct rb_root_cached new_root = RB_ROOT_CACHED;
hists->stats.nr_non_filtered_samples = 0;
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
- nd = rb_first(&hists->entries);
+ nd = rb_first_cached(&hists->entries);
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
int ret;
@@ -2066,12 +2155,12 @@ static void hists__filter_hierarchy(struct hists *hists, int type, const void *a
* resort output after applying a new filter since filter in a lower
* hierarchy can change periods in a upper hierarchy.
*/
- nd = rb_first(&hists->entries);
+ nd = rb_first_cached(&hists->entries);
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd = rb_next(nd);
- rb_erase(&h->rb_node, &hists->entries);
+ rb_erase_cached(&h->rb_node, &hists->entries);
resort_filtered_entry(&new_root, h);
}
@@ -2140,18 +2229,19 @@ void hists__inc_nr_samples(struct hists *hists, bool filtered)
static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
struct hist_entry *pair)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
int64_t cmp;
+ bool leftmost = true;
if (hists__has(hists, need_collapse))
root = &hists->entries_collapsed;
else
root = hists->entries_in;
- p = &root->rb_node;
+ p = &root->rb_root.rb_node;
while (*p != NULL) {
parent = *p;
@@ -2164,8 +2254,10 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
if (cmp < 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
he = hist_entry__new(pair, true);
@@ -2175,7 +2267,7 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
if (symbol_conf.cumulate_callchain)
memset(he->stat_acc, 0, sizeof(he->stat));
rb_link_node(&he->rb_node_in, parent, p);
- rb_insert_color(&he->rb_node_in, root);
+ rb_insert_color_cached(&he->rb_node_in, root, leftmost);
hists__inc_stats(hists, he);
he->dummy = true;
}
@@ -2184,15 +2276,16 @@ out:
}
static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
struct hist_entry *pair)
{
struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
struct perf_hpp_fmt *fmt;
+ bool leftmost = true;
- p = &root->rb_node;
+ p = &root->rb_root.rb_node;
while (*p != NULL) {
int64_t cmp = 0;
@@ -2209,14 +2302,16 @@ static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
if (cmp < 0)
p = &parent->rb_left;
- else
+ else {
p = &parent->rb_right;
+ leftmost = false;
+ }
}
he = hist_entry__new(pair, true);
if (he) {
rb_link_node(&he->rb_node_in, parent, p);
- rb_insert_color(&he->rb_node_in, root);
+ rb_insert_color_cached(&he->rb_node_in, root, leftmost);
he->dummy = true;
he->hists = hists;
@@ -2233,9 +2328,9 @@ static struct hist_entry *hists__find_entry(struct hists *hists,
struct rb_node *n;
if (hists__has(hists, need_collapse))
- n = hists->entries_collapsed.rb_node;
+ n = hists->entries_collapsed.rb_root.rb_node;
else
- n = hists->entries_in->rb_node;
+ n = hists->entries_in->rb_root.rb_node;
while (n) {
struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
@@ -2252,10 +2347,10 @@ static struct hist_entry *hists__find_entry(struct hists *hists,
return NULL;
}
-static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root,
+static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
struct hist_entry *he)
{
- struct rb_node *n = root->rb_node;
+ struct rb_node *n = root->rb_root.rb_node;
while (n) {
struct hist_entry *iter;
@@ -2280,13 +2375,13 @@ static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root,
return NULL;
}
-static void hists__match_hierarchy(struct rb_root *leader_root,
- struct rb_root *other_root)
+static void hists__match_hierarchy(struct rb_root_cached *leader_root,
+ struct rb_root_cached *other_root)
{
struct rb_node *nd;
struct hist_entry *pos, *pair;
- for (nd = rb_first(leader_root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
pair = hists__find_hierarchy_entry(other_root, pos);
@@ -2302,7 +2397,7 @@ static void hists__match_hierarchy(struct rb_root *leader_root,
*/
void hists__match(struct hists *leader, struct hists *other)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *nd;
struct hist_entry *pos, *pair;
@@ -2317,7 +2412,7 @@ void hists__match(struct hists *leader, struct hists *other)
else
root = leader->entries_in;
- for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
pair = hists__find_entry(other, pos);
@@ -2328,13 +2423,13 @@ void hists__match(struct hists *leader, struct hists *other)
static int hists__link_hierarchy(struct hists *leader_hists,
struct hist_entry *parent,
- struct rb_root *leader_root,
- struct rb_root *other_root)
+ struct rb_root_cached *leader_root,
+ struct rb_root_cached *other_root)
{
struct rb_node *nd;
struct hist_entry *pos, *leader;
- for (nd = rb_first(other_root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
if (hist_entry__has_pairs(pos)) {
@@ -2377,7 +2472,7 @@ static int hists__link_hierarchy(struct hists *leader_hists,
*/
int hists__link(struct hists *leader, struct hists *other)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *nd;
struct hist_entry *pos, *pair;
@@ -2393,7 +2488,7 @@ int hists__link(struct hists *leader, struct hists *other)
else
root = other->entries_in;
- for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
if (!hist_entry__has_pairs(pos)) {
@@ -2566,10 +2661,10 @@ int perf_hist_config(const char *var, const char *value)
int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
{
memset(hists, 0, sizeof(*hists));
- hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
+ hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
hists->entries_in = &hists->entries_in_array[0];
- hists->entries_collapsed = RB_ROOT;
- hists->entries = RB_ROOT;
+ hists->entries_collapsed = RB_ROOT_CACHED;
+ hists->entries = RB_ROOT_CACHED;
pthread_mutex_init(&hists->lock, NULL);
hists->socket_filter = -1;
hists->hpp_list = hpp_list;
@@ -2577,14 +2672,14 @@ int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
return 0;
}
-static void hists__delete_remaining_entries(struct rb_root *root)
+static void hists__delete_remaining_entries(struct rb_root_cached *root)
{
struct rb_node *node;
struct hist_entry *he;
- while (!RB_EMPTY_ROOT(root)) {
- node = rb_first(root);
- rb_erase(node, root);
+ while (!RB_EMPTY_ROOT(&root->rb_root)) {
+ node = rb_first_cached(root);
+ rb_erase_cached(node, root);
he = rb_entry(node, struct hist_entry, rb_node_in);
hist_entry__delete(he);
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 664b5eda8d51..76ff6c6d03b8 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -2,9 +2,9 @@
#ifndef __PERF_HIST_H
#define __PERF_HIST_H
+#include <linux/rbtree.h>
#include <linux/types.h>
#include <pthread.h>
-#include "callchain.h"
#include "evsel.h"
#include "header.h"
#include "color.h"
@@ -13,6 +13,9 @@
struct hist_entry;
struct hist_entry_ops;
struct addr_location;
+struct map_symbol;
+struct mem_info;
+struct branch_info;
struct symbol;
enum hist_filter {
@@ -28,6 +31,7 @@ enum hist_filter {
enum hist_column {
HISTC_SYMBOL,
+ HISTC_TIME,
HISTC_DSO,
HISTC_THREAD,
HISTC_COMM,
@@ -70,10 +74,10 @@ struct thread;
struct dso;
struct hists {
- struct rb_root entries_in_array[2];
- struct rb_root *entries_in;
- struct rb_root entries;
- struct rb_root entries_collapsed;
+ struct rb_root_cached entries_in_array[2];
+ struct rb_root_cached *entries_in;
+ struct rb_root_cached entries;
+ struct rb_root_cached entries_collapsed;
u64 nr_entries;
u64 nr_non_filtered_entries;
u64 callchain_period;
@@ -160,8 +164,10 @@ int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
struct perf_hpp_fmt *fmt, int printed);
void hist_entry__delete(struct hist_entry *he);
-typedef int (*hists__resort_cb_t)(struct hist_entry *he);
+typedef int (*hists__resort_cb_t)(struct hist_entry *he, void *arg);
+void perf_evsel__output_resort_cb(struct perf_evsel *evsel, struct ui_progress *prog,
+ hists__resort_cb_t cb, void *cb_arg);
void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog);
void hists__output_resort(struct hists *hists, struct ui_progress *prog);
void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
@@ -230,7 +236,7 @@ static __pure inline bool hists__has_callchains(struct hists *hists)
int hists__init(void);
int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list);
-struct rb_root *hists__get_rotate_entries_in(struct hists *hists);
+struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists);
struct perf_hpp {
char *buf;
@@ -427,9 +433,18 @@ struct hist_browser_timer {
};
struct annotation_options;
+struct res_sample;
+
+enum rstype {
+ A_NORMAL,
+ A_ASM,
+ A_SOURCE
+};
#ifdef HAVE_SLANG_SUPPORT
#include "../ui/keysyms.h"
+void attr_to_script(char *buf, struct perf_event_attr *attr);
+
int map_symbol__tui_annotate(struct map_symbol *ms, struct perf_evsel *evsel,
struct hist_browser_timer *hbt,
struct annotation_options *annotation_opts);
@@ -444,7 +459,13 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
struct perf_env *env,
bool warn_lost_event,
struct annotation_options *annotation_options);
-int script_browse(const char *script_opt);
+
+int script_browse(const char *script_opt, struct perf_evsel *evsel);
+
+void run_script(char *cmd);
+int res_sample_browse(struct res_sample *res_samples, int num_res,
+ struct perf_evsel *evsel, enum rstype rstype);
+void res_sample_init(void);
#else
static inline
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
@@ -473,11 +494,22 @@ static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused,
return 0;
}
-static inline int script_browse(const char *script_opt __maybe_unused)
+static inline int script_browse(const char *script_opt __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused)
{
return 0;
}
+static inline int res_sample_browse(struct res_sample *res_samples __maybe_unused,
+ int num_res __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused,
+ enum rstype rstype __maybe_unused)
+{
+ return 0;
+}
+
+static inline void res_sample_init(void) {}
+
#define K_LEFT -1000
#define K_RIGHT -2000
#define K_SWITCH_INPUT_DATA -3000
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index ee6ca65f81f4..47025bc727e1 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -27,6 +27,8 @@
#include "evsel.h"
#include "evlist.h"
#include "machine.h"
+#include "map.h"
+#include "symbol.h"
#include "session.h"
#include "util.h"
#include "thread.h"
@@ -142,7 +144,7 @@ static int intel_bts_lost(struct intel_bts *bts, struct perf_sample *sample)
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
INTEL_BTS_ERR_LOST, sample->cpu, sample->pid,
- sample->tid, 0, "Lost trace data");
+ sample->tid, 0, "Lost trace data", sample->time);
err = perf_session__deliver_synth_event(bts->session, &event, NULL);
if (err)
@@ -326,35 +328,19 @@ static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip)
{
struct machine *machine = btsq->bts->machine;
struct thread *thread;
- struct addr_location al;
unsigned char buf[INTEL_PT_INSN_BUF_SZ];
ssize_t len;
- int x86_64;
- uint8_t cpumode;
+ bool x86_64;
int err = -1;
- if (machine__kernel_ip(machine, ip))
- cpumode = PERF_RECORD_MISC_KERNEL;
- else
- cpumode = PERF_RECORD_MISC_USER;
-
thread = machine__find_thread(machine, -1, btsq->tid);
if (!thread)
return -1;
- if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
- goto out_put;
-
- len = dso__data_read_addr(al.map->dso, al.map, machine, ip, buf,
- INTEL_PT_INSN_BUF_SZ);
+ len = thread__memcpy(thread, machine, buf, ip, INTEL_PT_INSN_BUF_SZ, &x86_64);
if (len <= 0)
goto out_put;
- /* Load maps to ensure dso->is_64_bit has been updated */
- map__load(al.map);
-
- x86_64 = al.map->dso->is_64_bit;
-
if (intel_pt_get_insn(buf, len, x86_64, &btsq->intel_pt_insn))
goto out_put;
@@ -372,7 +358,7 @@ static int intel_bts_synth_error(struct intel_bts *bts, int cpu, pid_t pid,
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
INTEL_BTS_ERR_NOINSN, cpu, pid, tid, ip,
- "Failed to get instruction");
+ "Failed to get instruction", 0);
err = perf_session__deliver_synth_event(bts->session, &event, NULL);
if (err)
diff --git a/tools/perf/util/intel-pt-decoder/Build b/tools/perf/util/intel-pt-decoder/Build
index 1b704fbea9de..23bf788f84b9 100644
--- a/tools/perf/util/intel-pt-decoder/Build
+++ b/tools/perf/util/intel-pt-decoder/Build
@@ -1,4 +1,4 @@
-libperf-$(CONFIG_AUXTRACE) += intel-pt-pkt-decoder.o intel-pt-insn-decoder.o intel-pt-log.o intel-pt-decoder.o
+perf-$(CONFIG_AUXTRACE) += intel-pt-pkt-decoder.o intel-pt-insn-decoder.o intel-pt-log.o intel-pt-decoder.o
inat_tables_script = util/intel-pt-decoder/gen-insn-attr-x86.awk
inat_tables_maps = util/intel-pt-decoder/x86-opcode-map.txt
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 4503f3ca45ab..6e03db142091 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -26,6 +26,7 @@
#include "../cache.h"
#include "../util.h"
+#include "../auxtrace.h"
#include "intel-pt-insn-decoder.h"
#include "intel-pt-pkt-decoder.h"
@@ -867,7 +868,7 @@ static int intel_pt_get_next_packet(struct intel_pt_decoder *decoder)
ret = intel_pt_get_packet(decoder->buf, decoder->len,
&decoder->packet);
- if (ret == INTEL_PT_NEED_MORE_BYTES &&
+ if (ret == INTEL_PT_NEED_MORE_BYTES && BITS_PER_LONG == 32 &&
decoder->len < INTEL_PT_PKT_MAX_SZ && !decoder->next_buf) {
ret = intel_pt_get_split_packet(decoder);
if (ret < 0)
@@ -1394,7 +1395,6 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
{
intel_pt_log("ERROR: Buffer overflow\n");
intel_pt_clear_tx_flags(decoder);
- decoder->cbr = 0;
decoder->timestamp_insn_cnt = 0;
decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
decoder->overflow = true;
@@ -2575,6 +2575,34 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
}
}
+#define MAX_PADDING (PERF_AUXTRACE_RECORD_ALIGNMENT - 1)
+
+/**
+ * adj_for_padding - adjust overlap to account for padding.
+ * @buf_b: second buffer
+ * @buf_a: first buffer
+ * @len_a: size of first buffer
+ *
+ * @buf_a might have up to 7 bytes of padding appended. Adjust the overlap
+ * accordingly.
+ *
+ * Return: A pointer into @buf_b from where non-overlapped data starts
+ */
+static unsigned char *adj_for_padding(unsigned char *buf_b,
+ unsigned char *buf_a, size_t len_a)
+{
+ unsigned char *p = buf_b - MAX_PADDING;
+ unsigned char *q = buf_a + len_a - MAX_PADDING;
+ int i;
+
+ for (i = MAX_PADDING; i; i--, p++, q++) {
+ if (*p != *q)
+ break;
+ }
+
+ return p;
+}
+
/**
* intel_pt_find_overlap_tsc - determine start of non-overlapped trace data
* using TSC.
@@ -2625,8 +2653,11 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
/* Same TSC, so buffers are consecutive */
if (!cmp && rem_b >= rem_a) {
+ unsigned char *start;
+
*consecutive = true;
- return buf_b + len_b - (rem_b - rem_a);
+ start = buf_b + len_b - (rem_b - rem_a);
+ return adj_for_padding(start, buf_a, len_a);
}
if (cmp < 0)
return buf_b; /* tsc_a < tsc_b => no overlap */
@@ -2689,7 +2720,7 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
found = memmem(buf_a, len_a, buf_b, len_a);
if (found) {
*consecutive = true;
- return buf_b + len_a;
+ return adj_for_padding(buf_b + len_a, buf_a, len_a);
}
/* Try again at next PSB in buffer 'a' */
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 2e72373ec6df..6d288237887b 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -1411,7 +1411,7 @@ static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
}
static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
- pid_t pid, pid_t tid, u64 ip)
+ pid_t pid, pid_t tid, u64 ip, u64 timestamp)
{
union perf_event event;
char msg[MAX_AUXTRACE_ERROR_MSG];
@@ -1420,7 +1420,7 @@ static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
- code, cpu, pid, tid, ip, msg);
+ code, cpu, pid, tid, ip, msg, timestamp);
err = perf_session__deliver_synth_event(pt->session, &event, NULL);
if (err)
@@ -1430,6 +1430,18 @@ static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
return err;
}
+static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
+ const struct intel_pt_state *state)
+{
+ struct intel_pt *pt = ptq->pt;
+ u64 tm = ptq->timestamp;
+
+ tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
+
+ return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid,
+ ptq->tid, state->from_ip, tm);
+}
+
static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
{
struct auxtrace_queue *queue;
@@ -1676,10 +1688,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
intel_pt_next_tid(pt, ptq);
}
if (pt->synth_opts.errors) {
- err = intel_pt_synth_error(pt, state->err,
- ptq->cpu, ptq->pid,
- ptq->tid,
- state->from_ip);
+ err = intel_ptq_synth_error(ptq, state);
if (err)
return err;
}
@@ -1804,7 +1813,7 @@ static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
{
return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
- sample->pid, sample->tid, 0);
+ sample->pid, sample->tid, 0, sample->time);
}
static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
@@ -2522,6 +2531,8 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
}
pt->timeless_decoding = intel_pt_timeless_decoding(pt);
+ if (pt->timeless_decoding && !pt->tc.time_mult)
+ pt->tc.time_mult = 1;
pt->have_tsc = intel_pt_have_tsc(pt);
pt->sampling_mode = false;
pt->est_tsc = !pt->timeless_decoding;
diff --git a/tools/perf/util/intlist.h b/tools/perf/util/intlist.h
index 85bab8735fa9..5c19ee001299 100644
--- a/tools/perf/util/intlist.h
+++ b/tools/perf/util/intlist.h
@@ -45,7 +45,7 @@ static inline unsigned int intlist__nr_entries(const struct intlist *ilist)
/* For intlist iteration */
static inline struct int_node *intlist__first(struct intlist *ilist)
{
- struct rb_node *rn = rb_first(&ilist->rblist.entries);
+ struct rb_node *rn = rb_first_cached(&ilist->rblist.entries);
return rn ? rb_entry(rn, struct int_node, rb_node) : NULL;
}
static inline struct int_node *intlist__next(struct int_node *in)
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index bf249552a9b0..eda28d3570bc 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -2,6 +2,7 @@
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <errno.h>
+#include <libgen.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
diff --git a/tools/perf/util/kvm-stat.h b/tools/perf/util/kvm-stat.h
index 7b1f06567521..1403dec189b4 100644
--- a/tools/perf/util/kvm-stat.h
+++ b/tools/perf/util/kvm-stat.h
@@ -3,12 +3,13 @@
#define __PERF_KVM_STAT_H
#include "../perf.h"
-#include "evsel.h"
-#include "evlist.h"
-#include "session.h"
#include "tool.h"
#include "stat.h"
+struct perf_evsel;
+struct perf_evlist;
+struct perf_session;
+
struct event_key {
#define INVALID_KEY (~0ULL)
u64 key;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 143f7057d581..61959aba7e27 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -10,6 +10,7 @@
#include "hist.h"
#include "machine.h"
#include "map.h"
+#include "symbol.h"
#include "sort.h"
#include "strlist.h"
#include "thread.h"
@@ -21,6 +22,7 @@
#include "unwind.h"
#include "linux/hash.h"
#include "asm/bug.h"
+#include "bpf-event.h"
#include "sane_ctype.h"
#include <symbol/kallsyms.h>
@@ -41,7 +43,7 @@ static void machine__threads_init(struct machine *machine)
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
- threads->entries = RB_ROOT;
+ threads->entries = RB_ROOT_CACHED;
init_rwsem(&threads->lock);
threads->nr = 0;
INIT_LIST_HEAD(&threads->dead);
@@ -179,7 +181,7 @@ void machine__delete_threads(struct machine *machine)
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
down_write(&threads->lock);
- nd = rb_first(&threads->entries);
+ nd = rb_first_cached(&threads->entries);
while (nd) {
struct thread *t = rb_entry(nd, struct thread, rb_node);
@@ -222,7 +224,7 @@ void machine__delete(struct machine *machine)
void machines__init(struct machines *machines)
{
machine__init(&machines->host, "", HOST_KERNEL_ID);
- machines->guests = RB_ROOT;
+ machines->guests = RB_ROOT_CACHED;
}
void machines__exit(struct machines *machines)
@@ -234,9 +236,10 @@ void machines__exit(struct machines *machines)
struct machine *machines__add(struct machines *machines, pid_t pid,
const char *root_dir)
{
- struct rb_node **p = &machines->guests.rb_node;
+ struct rb_node **p = &machines->guests.rb_root.rb_node;
struct rb_node *parent = NULL;
struct machine *pos, *machine = malloc(sizeof(*machine));
+ bool leftmost = true;
if (machine == NULL)
return NULL;
@@ -251,12 +254,14 @@ struct machine *machines__add(struct machines *machines, pid_t pid,
pos = rb_entry(parent, struct machine, rb_node);
if (pid < pos->pid)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&machine->rb_node, parent, p);
- rb_insert_color(&machine->rb_node, &machines->guests);
+ rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
return machine;
}
@@ -267,7 +272,7 @@ void machines__set_comm_exec(struct machines *machines, bool comm_exec)
machines->host.comm_exec = comm_exec;
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *machine = rb_entry(nd, struct machine, rb_node);
machine->comm_exec = comm_exec;
@@ -276,7 +281,7 @@ void machines__set_comm_exec(struct machines *machines, bool comm_exec)
struct machine *machines__find(struct machines *machines, pid_t pid)
{
- struct rb_node **p = &machines->guests.rb_node;
+ struct rb_node **p = &machines->guests.rb_root.rb_node;
struct rb_node *parent = NULL;
struct machine *machine;
struct machine *default_machine = NULL;
@@ -339,7 +344,7 @@ void machines__process_guests(struct machines *machines,
{
struct rb_node *nd;
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
process(pos, data);
}
@@ -352,7 +357,8 @@ void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
machines->host.id_hdr_size = id_hdr_size;
- for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
+ for (node = rb_first_cached(&machines->guests); node;
+ node = rb_next(node)) {
machine = rb_entry(node, struct machine, rb_node);
machine->id_hdr_size = id_hdr_size;
}
@@ -465,9 +471,10 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
pid_t pid, pid_t tid,
bool create)
{
- struct rb_node **p = &threads->entries.rb_node;
+ struct rb_node **p = &threads->entries.rb_root.rb_node;
struct rb_node *parent = NULL;
struct thread *th;
+ bool leftmost = true;
th = threads__get_last_match(threads, machine, pid, tid);
if (th)
@@ -485,8 +492,10 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
if (tid < th->tid)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
if (!create)
@@ -495,7 +504,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
th = thread__new(pid, tid);
if (th != NULL) {
rb_link_node(&th->rb_node, parent, p);
- rb_insert_color(&th->rb_node, &threads->entries);
+ rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
/*
* We have to initialize map_groups separately
@@ -506,7 +515,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
* leader and that would screwed the rb tree.
*/
if (thread__init_map_groups(th, machine)) {
- rb_erase_init(&th->rb_node, &threads->entries);
+ rb_erase_cached(&th->rb_node, &threads->entries);
RB_CLEAR_NODE(&th->rb_node);
thread__put(th);
return NULL;
@@ -681,6 +690,59 @@ int machine__process_switch_event(struct machine *machine __maybe_unused,
return 0;
}
+static int machine__process_ksymbol_register(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ struct symbol *sym;
+ struct map *map;
+
+ map = map_groups__find(&machine->kmaps, event->ksymbol_event.addr);
+ if (!map) {
+ map = dso__new_map(event->ksymbol_event.name);
+ if (!map)
+ return -ENOMEM;
+
+ map->start = event->ksymbol_event.addr;
+ map->pgoff = map->start;
+ map->end = map->start + event->ksymbol_event.len;
+ map_groups__insert(&machine->kmaps, map);
+ }
+
+ sym = symbol__new(event->ksymbol_event.addr, event->ksymbol_event.len,
+ 0, 0, event->ksymbol_event.name);
+ if (!sym)
+ return -ENOMEM;
+ dso__insert_symbol(map->dso, sym);
+ return 0;
+}
+
+static int machine__process_ksymbol_unregister(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ struct map *map;
+
+ map = map_groups__find(&machine->kmaps, event->ksymbol_event.addr);
+ if (map)
+ map_groups__remove(&machine->kmaps, map);
+
+ return 0;
+}
+
+int machine__process_ksymbol(struct machine *machine __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample)
+{
+ if (dump_trace)
+ perf_event__fprintf_ksymbol(event, stdout);
+
+ if (event->ksymbol_event.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
+ return machine__process_ksymbol_unregister(machine, event,
+ sample);
+ return machine__process_ksymbol_register(machine, event, sample);
+}
+
static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
{
const char *dup_filename;
@@ -744,7 +806,7 @@ size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
struct rb_node *nd;
size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret += __dsos__fprintf(&pos->dsos.head, fp);
}
@@ -764,7 +826,7 @@ size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
struct rb_node *nd;
size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
}
@@ -804,7 +866,8 @@ size_t machine__fprintf(struct machine *machine, FILE *fp)
ret = fprintf(fp, "Threads: %u\n", threads->nr);
- for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&threads->entries); nd;
+ nd = rb_next(nd)) {
struct thread *pos = rb_entry(nd, struct thread, rb_node);
ret += thread__fprintf(pos, fp);
@@ -1107,7 +1170,7 @@ failure:
void machines__destroy_kernel_maps(struct machines *machines)
{
- struct rb_node *next = rb_first(&machines->guests);
+ struct rb_node *next = rb_first_cached(&machines->guests);
machine__destroy_kernel_maps(&machines->host);
@@ -1115,7 +1178,7 @@ void machines__destroy_kernel_maps(struct machines *machines)
struct machine *pos = rb_entry(next, struct machine, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, &machines->guests);
+ rb_erase_cached(&pos->rb_node, &machines->guests);
machine__delete(pos);
}
}
@@ -1680,7 +1743,7 @@ static void __machine__remove_thread(struct machine *machine, struct thread *th,
BUG_ON(refcount_read(&th->refcnt) == 0);
if (lock)
down_write(&threads->lock);
- rb_erase_init(&th->rb_node, &threads->entries);
+ rb_erase_cached(&th->rb_node, &threads->entries);
RB_CLEAR_NODE(&th->rb_node);
--threads->nr;
/*
@@ -1812,6 +1875,10 @@ int machine__process_event(struct machine *machine, union perf_event *event,
case PERF_RECORD_SWITCH:
case PERF_RECORD_SWITCH_CPU_WIDE:
ret = machine__process_switch_event(machine, event); break;
+ case PERF_RECORD_KSYMBOL:
+ ret = machine__process_ksymbol(machine, event, sample); break;
+ case PERF_RECORD_BPF_EVENT:
+ ret = machine__process_bpf_event(machine, event, sample); break;
default:
ret = -1;
break;
@@ -2453,7 +2520,8 @@ int machine__for_each_thread(struct machine *machine,
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
threads = &machine->threads[i];
- for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&threads->entries); nd;
+ nd = rb_next(nd)) {
thread = rb_entry(nd, struct thread, rb_node);
rc = fn(thread, priv);
if (rc != 0)
@@ -2480,7 +2548,7 @@ int machines__for_each_thread(struct machines *machines,
if (rc != 0)
return rc;
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *machine = rb_entry(nd, struct machine, rb_node);
rc = machine__for_each_thread(machine, fn, priv);
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index a5d1da60f751..f70ab98a7bde 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -4,7 +4,7 @@
#include <sys/types.h>
#include <linux/rbtree.h>
-#include "map.h"
+#include "map_groups.h"
#include "dso.h"
#include "event.h"
#include "rwsem.h"
@@ -29,11 +29,11 @@ struct vdso_info;
#define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS)
struct threads {
- struct rb_root entries;
- struct rw_semaphore lock;
- unsigned int nr;
- struct list_head dead;
- struct thread *last_match;
+ struct rb_root_cached entries;
+ struct rw_semaphore lock;
+ unsigned int nr;
+ struct list_head dead;
+ struct thread *last_match;
};
struct machine {
@@ -130,6 +130,9 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
struct perf_sample *sample);
int machine__process_mmap2_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
+int machine__process_ksymbol(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample);
int machine__process_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
@@ -137,7 +140,7 @@ typedef void (*machine__process_t)(struct machine *machine, void *data);
struct machines {
struct machine host;
- struct rb_root guests;
+ struct rb_root_cached guests;
};
void machines__init(struct machines *machines);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 6751301a755c..e32628cd20a7 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -286,8 +286,8 @@ void map__put(struct map *map)
void map__fixup_start(struct map *map)
{
- struct rb_root *symbols = &map->dso->symbols;
- struct rb_node *nd = rb_first(symbols);
+ struct rb_root_cached *symbols = &map->dso->symbols;
+ struct rb_node *nd = rb_first_cached(symbols);
if (nd != NULL) {
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
map->start = sym->start;
@@ -296,8 +296,8 @@ void map__fixup_start(struct map *map)
void map__fixup_end(struct map *map)
{
- struct rb_root *symbols = &map->dso->symbols;
- struct rb_node *nd = rb_last(symbols);
+ struct rb_root_cached *symbols = &map->dso->symbols;
+ struct rb_node *nd = rb_last(&symbols->rb_root);
if (nd != NULL) {
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
map->end = sym->end;
@@ -557,6 +557,12 @@ void map_groups__init(struct map_groups *mg, struct machine *machine)
refcount_set(&mg->refcnt, 1);
}
+void map_groups__insert(struct map_groups *mg, struct map *map)
+{
+ maps__insert(&mg->maps, map);
+ map->groups = mg;
+}
+
static void __maps__purge(struct maps *maps)
{
struct rb_root *root = &maps->entries;
@@ -571,10 +577,25 @@ static void __maps__purge(struct maps *maps)
}
}
+static void __maps__purge_names(struct maps *maps)
+{
+ struct rb_root *root = &maps->names;
+ struct rb_node *next = rb_first(root);
+
+ while (next) {
+ struct map *pos = rb_entry(next, struct map, rb_node_name);
+
+ next = rb_next(&pos->rb_node_name);
+ rb_erase_init(&pos->rb_node_name, root);
+ map__put(pos);
+ }
+}
+
static void maps__exit(struct maps *maps)
{
down_write(&maps->lock);
__maps__purge(maps);
+ __maps__purge_names(maps);
up_write(&maps->lock);
}
@@ -911,6 +932,9 @@ static void __maps__remove(struct maps *maps, struct map *map)
{
rb_erase_init(&map->rb_node, &maps->entries);
map__put(map);
+
+ rb_erase_init(&map->rb_node_name, &maps->names);
+ map__put(map);
}
void maps__remove(struct maps *maps, struct map *map)
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 09282aa45c80..0e20749f2c55 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -6,12 +6,10 @@
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/rbtree.h>
-#include <pthread.h>
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <linux/types.h>
-#include "rwsem.h"
struct dso;
struct ip_callchain;
@@ -48,38 +46,7 @@ struct map {
refcount_t refcnt;
};
-#define KMAP_NAME_LEN 256
-
-struct kmap {
- struct ref_reloc_sym *ref_reloc_sym;
- struct map_groups *kmaps;
- char name[KMAP_NAME_LEN];
-};
-
-struct maps {
- struct rb_root entries;
- struct rb_root names;
- struct rw_semaphore lock;
-};
-
-struct map_groups {
- struct maps maps;
- struct machine *machine;
- refcount_t refcnt;
-};
-
-struct map_groups *map_groups__new(struct machine *machine);
-void map_groups__delete(struct map_groups *mg);
-bool map_groups__empty(struct map_groups *mg);
-
-static inline struct map_groups *map_groups__get(struct map_groups *mg)
-{
- if (mg)
- refcount_inc(&mg->refcnt);
- return mg;
-}
-
-void map_groups__put(struct map_groups *mg);
+struct kmap;
struct kmap *__map__kmap(struct map *map);
struct kmap *map__kmap(struct map *map);
@@ -174,18 +141,7 @@ char *map__srcline(struct map *map, u64 addr, struct symbol *sym);
int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
FILE *fp);
-struct srccode_state {
- char *srcfile;
- unsigned line;
-};
-
-static inline void srccode_state_init(struct srccode_state *state)
-{
- state->srcfile = NULL;
- state->line = 0;
-}
-
-void srccode_state_free(struct srccode_state *state);
+struct srccode_state;
int map__fprintf_srccode(struct map *map, u64 addr,
FILE *fp, struct srccode_state *state);
@@ -198,61 +154,9 @@ void map__fixup_end(struct map *map);
void map__reloc_vmlinux(struct map *map);
-void maps__insert(struct maps *maps, struct map *map);
-void maps__remove(struct maps *maps, struct map *map);
-struct map *maps__find(struct maps *maps, u64 addr);
-struct map *maps__first(struct maps *maps);
-struct map *map__next(struct map *map);
-struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
- struct map **mapp);
-void map_groups__init(struct map_groups *mg, struct machine *machine);
-void map_groups__exit(struct map_groups *mg);
-int map_groups__clone(struct thread *thread,
- struct map_groups *parent);
-size_t map_groups__fprintf(struct map_groups *mg, FILE *fp);
-
int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name,
u64 addr);
-static inline void map_groups__insert(struct map_groups *mg, struct map *map)
-{
- maps__insert(&mg->maps, map);
- map->groups = mg;
-}
-
-static inline void map_groups__remove(struct map_groups *mg, struct map *map)
-{
- maps__remove(&mg->maps, map);
-}
-
-static inline struct map *map_groups__find(struct map_groups *mg, u64 addr)
-{
- return maps__find(&mg->maps, addr);
-}
-
-struct map *map_groups__first(struct map_groups *mg);
-
-static inline struct map *map_groups__next(struct map *map)
-{
- return map__next(map);
-}
-
-struct symbol *map_groups__find_symbol(struct map_groups *mg,
- u64 addr, struct map **mapp);
-
-struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
- const char *name,
- struct map **mapp);
-
-struct addr_map_symbol;
-
-int map_groups__find_ams(struct addr_map_symbol *ams);
-
-int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
- FILE *fp);
-
-struct map *map_groups__find_by_name(struct map_groups *mg, const char *name);
-
bool __map__is_kernel(const struct map *map);
bool __map__is_extra_kernel_map(const struct map *map);
diff --git a/tools/perf/util/map_groups.h b/tools/perf/util/map_groups.h
new file mode 100644
index 000000000000..4dcda33e0fdf
--- /dev/null
+++ b/tools/perf/util/map_groups.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_MAP_GROUPS_H
+#define __PERF_MAP_GROUPS_H
+
+#include <linux/refcount.h>
+#include <linux/rbtree.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <linux/types.h>
+#include "rwsem.h"
+
+struct ref_reloc_sym;
+struct machine;
+struct map;
+struct thread;
+
+struct maps {
+ struct rb_root entries;
+ struct rb_root names;
+ struct rw_semaphore lock;
+};
+
+void maps__insert(struct maps *maps, struct map *map);
+void maps__remove(struct maps *maps, struct map *map);
+struct map *maps__find(struct maps *maps, u64 addr);
+struct map *maps__first(struct maps *maps);
+struct map *map__next(struct map *map);
+struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp);
+
+struct map_groups {
+ struct maps maps;
+ struct machine *machine;
+ refcount_t refcnt;
+};
+
+#define KMAP_NAME_LEN 256
+
+struct kmap {
+ struct ref_reloc_sym *ref_reloc_sym;
+ struct map_groups *kmaps;
+ char name[KMAP_NAME_LEN];
+};
+
+struct map_groups *map_groups__new(struct machine *machine);
+void map_groups__delete(struct map_groups *mg);
+bool map_groups__empty(struct map_groups *mg);
+
+static inline struct map_groups *map_groups__get(struct map_groups *mg)
+{
+ if (mg)
+ refcount_inc(&mg->refcnt);
+ return mg;
+}
+
+void map_groups__put(struct map_groups *mg);
+void map_groups__init(struct map_groups *mg, struct machine *machine);
+void map_groups__exit(struct map_groups *mg);
+int map_groups__clone(struct thread *thread, struct map_groups *parent);
+size_t map_groups__fprintf(struct map_groups *mg, FILE *fp);
+
+void map_groups__insert(struct map_groups *mg, struct map *map);
+
+static inline void map_groups__remove(struct map_groups *mg, struct map *map)
+{
+ maps__remove(&mg->maps, map);
+}
+
+static inline struct map *map_groups__find(struct map_groups *mg, u64 addr)
+{
+ return maps__find(&mg->maps, addr);
+}
+
+struct map *map_groups__first(struct map_groups *mg);
+
+static inline struct map *map_groups__next(struct map *map)
+{
+ return map__next(map);
+}
+
+struct symbol *map_groups__find_symbol(struct map_groups *mg, u64 addr, struct map **mapp);
+struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, const char *name, struct map **mapp);
+
+struct addr_map_symbol;
+
+int map_groups__find_ams(struct addr_map_symbol *ams);
+
+int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, FILE *fp);
+
+struct map *map_groups__find_by_name(struct map_groups *mg, const char *name);
+
+#endif // __PERF_MAP_GROUPS_H
diff --git a/tools/perf/util/map_symbol.h b/tools/perf/util/map_symbol.h
new file mode 100644
index 000000000000..5a1aed9f6bb4
--- /dev/null
+++ b/tools/perf/util/map_symbol.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __PERF_MAP_SYMBOL
+#define __PERF_MAP_SYMBOL 1
+
+#include <linux/types.h>
+
+struct map;
+struct symbol;
+
+struct map_symbol {
+ struct map *map;
+ struct symbol *sym;
+};
+
+struct addr_map_symbol {
+ struct map *map;
+ struct symbol *sym;
+ u64 addr;
+ u64 al_addr;
+ u64 phys_addr;
+};
+#endif // __PERF_MAP_SYMBOL
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index 93f74d8d3cdd..42c3e5a229d2 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -28,7 +28,7 @@ struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
static char mem_loads_name[100];
static bool mem_loads_name__init;
-char *perf_mem_events__name(int i)
+char * __weak perf_mem_events__name(int i)
{
if (i == PERF_MEM_EVENTS__LOAD) {
if (!mem_loads_name__init) {
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index a28f9b5cc4ff..b8d864ed4afe 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -270,7 +270,7 @@ static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
}
void metricgroup__print(bool metrics, bool metricgroups, char *filter,
- bool raw)
+ bool raw, bool details)
{
struct pmu_events_map *map = perf_pmu__find_map(NULL);
struct pmu_event *pe;
@@ -329,6 +329,12 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
if (asprintf(&s, "%s\n%*s%s]",
pe->metric_name, 8, "[", pe->desc) < 0)
return;
+
+ if (details) {
+ if (asprintf(&s, "%s\n%*s%s]",
+ s, 8, "[", pe->metric_expr) < 0)
+ return;
+ }
}
if (!s)
@@ -352,7 +358,7 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
else if (metrics && !raw)
printf("\nMetrics:\n\n");
- for (node = rb_first(&groups.entries); node; node = next) {
+ for (node = rb_first_cached(&groups.entries); node; node = next) {
struct mep *me = container_of(node, struct mep, nd);
if (metricgroups)
diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h
index 8a155dba0581..5c52097a5c63 100644
--- a/tools/perf/util/metricgroup.h
+++ b/tools/perf/util/metricgroup.h
@@ -27,6 +27,7 @@ int metricgroup__parse_groups(const struct option *opt,
const char *str,
struct rblist *metric_events);
-void metricgroup__print(bool metrics, bool groups, char *filter, bool raw);
+void metricgroup__print(bool metrics, bool groups, char *filter,
+ bool raw, bool details);
bool metricgroup__has_metric(const char *metric);
#endif
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 8fc39311a30d..cdc7740fc181 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -10,6 +10,9 @@
#include <sys/mman.h>
#include <inttypes.h>
#include <asm/bug.h>
+#ifdef HAVE_LIBNUMA_SUPPORT
+#include <numaif.h>
+#endif
#include "debug.h"
#include "event.h"
#include "mmap.h"
@@ -154,9 +157,72 @@ void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __mayb
}
#ifdef HAVE_AIO_SUPPORT
+
+#ifdef HAVE_LIBNUMA_SUPPORT
+static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
+{
+ map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
+ if (map->aio.data[idx] == MAP_FAILED) {
+ map->aio.data[idx] = NULL;
+ return -1;
+ }
+
+ return 0;
+}
+
+static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
+{
+ if (map->aio.data[idx]) {
+ munmap(map->aio.data[idx], perf_mmap__mmap_len(map));
+ map->aio.data[idx] = NULL;
+ }
+}
+
+static int perf_mmap__aio_bind(struct perf_mmap *map, int idx, int cpu, int affinity)
+{
+ void *data;
+ size_t mmap_len;
+ unsigned long node_mask;
+
+ if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
+ data = map->aio.data[idx];
+ mmap_len = perf_mmap__mmap_len(map);
+ node_mask = 1UL << cpu__get_node(cpu);
+ if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) {
+ pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n",
+ data, data + mmap_len, cpu__get_node(cpu));
+ return -1;
+ }
+ }
+
+ return 0;
+}
+#else
+static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
+{
+ map->aio.data[idx] = malloc(perf_mmap__mmap_len(map));
+ if (map->aio.data[idx] == NULL)
+ return -1;
+
+ return 0;
+}
+
+static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
+{
+ zfree(&(map->aio.data[idx]));
+}
+
+static int perf_mmap__aio_bind(struct perf_mmap *map __maybe_unused, int idx __maybe_unused,
+ int cpu __maybe_unused, int affinity __maybe_unused)
+{
+ return 0;
+}
+#endif
+
static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
{
- int delta_max, i, prio;
+ int delta_max, i, prio, ret;
map->aio.nr_cblocks = mp->nr_cblocks;
if (map->aio.nr_cblocks) {
@@ -177,11 +243,14 @@ static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
}
delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
for (i = 0; i < map->aio.nr_cblocks; ++i) {
- map->aio.data[i] = malloc(perf_mmap__mmap_len(map));
- if (!map->aio.data[i]) {
+ ret = perf_mmap__aio_alloc(map, i);
+ if (ret == -1) {
pr_debug2("failed to allocate data buffer area, error %m");
return -1;
}
+ ret = perf_mmap__aio_bind(map, i, map->cpu, mp->affinity);
+ if (ret == -1)
+ return -1;
/*
* Use cblock.aio_fildes value different from -1
* to denote started aio write operation on the
@@ -210,7 +279,7 @@ static void perf_mmap__aio_munmap(struct perf_mmap *map)
int i;
for (i = 0; i < map->aio.nr_cblocks; ++i)
- zfree(&map->aio.data[i]);
+ perf_mmap__aio_free(map, i);
if (map->aio.data)
zfree(&map->aio.data);
zfree(&map->aio.cblocks);
@@ -314,6 +383,32 @@ void perf_mmap__munmap(struct perf_mmap *map)
auxtrace_mmap__munmap(&map->auxtrace_mmap);
}
+static void build_node_mask(int node, cpu_set_t *mask)
+{
+ int c, cpu, nr_cpus;
+ const struct cpu_map *cpu_map = NULL;
+
+ cpu_map = cpu_map__online();
+ if (!cpu_map)
+ return;
+
+ nr_cpus = cpu_map__nr(cpu_map);
+ for (c = 0; c < nr_cpus; c++) {
+ cpu = cpu_map->map[c]; /* map c index to online cpu index */
+ if (cpu__get_node(cpu) == node)
+ CPU_SET(cpu, mask);
+ }
+}
+
+static void perf_mmap__setup_affinity_mask(struct perf_mmap *map, struct mmap_params *mp)
+{
+ CPU_ZERO(&map->affinity_mask);
+ if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
+ build_node_mask(cpu__get_node(map->cpu), &map->affinity_mask);
+ else if (mp->affinity == PERF_AFFINITY_CPU)
+ CPU_SET(map->cpu, &map->affinity_mask);
+}
+
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu)
{
/*
@@ -343,6 +438,8 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
map->fd = fd;
map->cpu = cpu;
+ perf_mmap__setup_affinity_mask(map, mp);
+
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
&mp->auxtrace_mp, map->base, fd))
return -1;
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index aeb6942fdb00..e566c19b242b 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -38,6 +38,7 @@ struct perf_mmap {
int nr_cblocks;
} aio;
#endif
+ cpu_set_t affinity_mask;
};
/*
@@ -69,7 +70,7 @@ enum bkw_mmap_state {
};
struct mmap_params {
- int prot, mask, nr_cblocks;
+ int prot, mask, nr_cblocks, affinity;
struct auxtrace_mmap_params auxtrace_mp;
};
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index 897589507d97..989fed6f43b5 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -270,6 +270,8 @@ static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
"FINAL",
"ROUND",
"HALF ",
+ "TOP ",
+ "TIME ",
};
int err;
bool show_progress = false;
@@ -391,8 +393,10 @@ void ordered_events__free(struct ordered_events *oe)
* Current buffer might not have all the events allocated
* yet, we need to free only allocated ones ...
*/
- list_del(&oe->buffer->list);
- ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
+ if (oe->buffer) {
+ list_del(&oe->buffer->list);
+ ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
+ }
/* ... and continue with the rest */
list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 920e1e6551dd..5ef4939408f2 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -2271,6 +2271,7 @@ static bool is_event_supported(u8 type, unsigned config)
perf_evsel__delete(evsel);
}
+ thread_map__put(tmap);
return ret;
}
@@ -2341,6 +2342,7 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
printf(" %-50s [%s]\n", buf, "SDT event");
free(buf);
}
+ free(path);
} else
printf(" %-50s [%s]\n", nd->s, "SDT event");
if (nd2) {
@@ -2540,7 +2542,7 @@ void print_events(const char *event_glob, bool name_only, bool quiet_flag,
print_sdt_events(NULL, NULL, name_only);
- metricgroup__print(true, true, NULL, name_only);
+ metricgroup__print(true, true, NULL, name_only, details_flag);
}
int parse_events__is_hardcoded_term(struct parse_events_term *term)
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index da8fe57691b8..44819bdb037d 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -311,7 +311,7 @@ value_sym '/' event_config '/'
$$ = list;
}
|
-value_sym sep_slash_dc
+value_sym sep_slash_slash_dc
{
struct list_head *list;
int type = $1 >> 16;
@@ -702,7 +702,7 @@ PE_VALUE PE_ARRAY_RANGE PE_VALUE
sep_dc: ':' |
-sep_slash_dc: '/' | ':' |
+sep_slash_slash_dc: '/' '/' | ':' |
%%
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 11a234740632..6199a3174ab9 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -29,8 +29,6 @@ struct perf_pmu_format {
struct list_head list;
};
-#define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/"
-
int perf_pmu_parse(struct list_head *list, char *name);
extern FILE *perf_pmu_in;
@@ -754,6 +752,19 @@ perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
return NULL;
}
+static int pmu_max_precise(const char *name)
+{
+ char path[PATH_MAX];
+ int max_precise = -1;
+
+ scnprintf(path, PATH_MAX,
+ "bus/event_source/devices/%s/caps/max_precise",
+ name);
+
+ sysfs__read_int(path, &max_precise);
+ return max_precise;
+}
+
static struct perf_pmu *pmu_lookup(const char *name)
{
struct perf_pmu *pmu;
@@ -786,6 +797,7 @@ static struct perf_pmu *pmu_lookup(const char *name)
pmu->name = strdup(name);
pmu->type = type;
pmu->is_uncore = pmu_is_uncore(name);
+ pmu->max_precise = pmu_max_precise(name);
pmu_add_cpu_aliases(&aliases, pmu);
INIT_LIST_HEAD(&pmu->format);
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 76fecec7b3f9..bd9ec2704a57 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -6,9 +6,10 @@
#include <linux/compiler.h>
#include <linux/perf_event.h>
#include <stdbool.h>
-#include "evsel.h"
#include "parse-events.h"
+struct perf_evsel_config_term;
+
enum {
PERF_PMU_FORMAT_VALUE_CONFIG,
PERF_PMU_FORMAT_VALUE_CONFIG1,
@@ -16,6 +17,7 @@ enum {
};
#define PERF_PMU_FORMAT_BITS 64
+#define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/"
struct perf_event_attr;
@@ -24,12 +26,12 @@ struct perf_pmu {
__u32 type;
bool selectable;
bool is_uncore;
+ int max_precise;
struct perf_event_attr *default_config;
struct cpu_map *cpus;
struct list_head format; /* HEAD struct perf_pmu_format -> list */
struct list_head aliases; /* HEAD struct perf_pmu_alias -> list */
struct list_head list; /* ELEM */
- int (*set_drv_config) (struct perf_evsel_config_term *term);
};
struct perf_pmu_info {
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 18a59fba97ff..198e09ff611e 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -35,11 +35,14 @@
#include "util.h"
#include "event.h"
+#include "namespaces.h"
#include "strlist.h"
#include "strfilter.h"
#include "debug.h"
#include "cache.h"
#include "color.h"
+#include "map.h"
+#include "map_groups.h"
#include "symbol.h"
#include "thread.h"
#include <api/fs/fs.h>
@@ -157,8 +160,10 @@ static struct map *kernel_get_module_map(const char *module)
if (module && strchr(module, '/'))
return dso__new_map(module);
- if (!module)
- module = "kernel";
+ if (!module) {
+ pos = machine__kernel_map(host_machine);
+ return map__get(pos);
+ }
for (pos = maps__first(maps); pos; pos = map__next(pos)) {
/* short_name is "[module]" */
@@ -469,9 +474,12 @@ static struct debuginfo *open_debuginfo(const char *module, struct nsinfo *nsi,
strcpy(reason, "(unknown)");
} else
dso__strerror_load(dso, reason, STRERR_BUFSIZE);
- if (!silent)
- pr_err("Failed to find the path for %s: %s\n",
- module ?: "kernel", reason);
+ if (!silent) {
+ if (module)
+ pr_err("Module %s is not loaded, please specify its full path name.\n", module);
+ else
+ pr_err("Failed to find the path for the kernel: %s\n", reason);
+ }
return NULL;
}
path = dso->long_name;
@@ -3528,7 +3536,8 @@ int show_available_funcs(const char *target, struct nsinfo *nsi,
/* Show all (filtered) symbols */
setup_pager();
- for (nd = rb_first(&map->dso->symbol_names); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&map->dso->symbol_names); nd;
+ nd = rb_next(nd)) {
struct symbol_name_rb_node *pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
if (strfilter__compare(_filter, pos->sym.name))
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 15a98c3a2a2f..05c8d571a901 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -4,8 +4,9 @@
#include <linux/compiler.h>
#include <stdbool.h>
-#include "intlist.h"
-#include "namespaces.h"
+
+struct intlist;
+struct nsinfo;
/* Probe related configurations */
struct probe_conf {
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index 0b1195cad0e5..4062bc4412a9 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -20,6 +20,7 @@
#include <sys/types.h>
#include <sys/uio.h>
#include <unistd.h>
+#include "namespaces.h"
#include "util.h"
#include "event.h"
#include "strlist.h"
diff --git a/tools/perf/util/rb_resort.h b/tools/perf/util/rb_resort.h
index a920f702a74d..376e86cb4c3c 100644
--- a/tools/perf/util/rb_resort.h
+++ b/tools/perf/util/rb_resort.h
@@ -140,12 +140,12 @@ struct __name##_sorted *__name = __name##_sorted__new
/* For 'struct intlist' */
#define DECLARE_RESORT_RB_INTLIST(__name, __ilist) \
- DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries, \
+ DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries.rb_root, \
__ilist->rblist.nr_entries)
/* For 'struct machine->threads' */
-#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine, hash_bucket) \
- DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries, \
- __machine->threads[hash_bucket].nr)
+#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine, hash_bucket) \
+ DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries.rb_root, \
+ __machine->threads[hash_bucket].nr)
#endif /* _PERF_RESORT_RB_H_ */
diff --git a/tools/perf/util/rblist.c b/tools/perf/util/rblist.c
index 0efc3258c648..11e07fab20dc 100644
--- a/tools/perf/util/rblist.c
+++ b/tools/perf/util/rblist.c
@@ -13,8 +13,9 @@
int rblist__add_node(struct rblist *rblist, const void *new_entry)
{
- struct rb_node **p = &rblist->entries.rb_node;
+ struct rb_node **p = &rblist->entries.rb_root.rb_node;
struct rb_node *parent = NULL, *new_node;
+ bool leftmost = true;
while (*p != NULL) {
int rc;
@@ -24,8 +25,10 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry)
rc = rblist->node_cmp(parent, new_entry);
if (rc > 0)
p = &(*p)->rb_left;
- else if (rc < 0)
+ else if (rc < 0) {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
else
return -EEXIST;
}
@@ -35,7 +38,7 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry)
return -ENOMEM;
rb_link_node(new_node, parent, p);
- rb_insert_color(new_node, &rblist->entries);
+ rb_insert_color_cached(new_node, &rblist->entries, leftmost);
++rblist->nr_entries;
return 0;
@@ -43,7 +46,7 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry)
void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node)
{
- rb_erase(rb_node, &rblist->entries);
+ rb_erase_cached(rb_node, &rblist->entries);
--rblist->nr_entries;
rblist->node_delete(rblist, rb_node);
}
@@ -52,8 +55,9 @@ static struct rb_node *__rblist__findnew(struct rblist *rblist,
const void *entry,
bool create)
{
- struct rb_node **p = &rblist->entries.rb_node;
+ struct rb_node **p = &rblist->entries.rb_root.rb_node;
struct rb_node *parent = NULL, *new_node = NULL;
+ bool leftmost = true;
while (*p != NULL) {
int rc;
@@ -63,8 +67,10 @@ static struct rb_node *__rblist__findnew(struct rblist *rblist,
rc = rblist->node_cmp(parent, entry);
if (rc > 0)
p = &(*p)->rb_left;
- else if (rc < 0)
+ else if (rc < 0) {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
else
return parent;
}
@@ -73,7 +79,8 @@ static struct rb_node *__rblist__findnew(struct rblist *rblist,
new_node = rblist->node_new(rblist, entry);
if (new_node) {
rb_link_node(new_node, parent, p);
- rb_insert_color(new_node, &rblist->entries);
+ rb_insert_color_cached(new_node,
+ &rblist->entries, leftmost);
++rblist->nr_entries;
}
}
@@ -94,7 +101,7 @@ struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry)
void rblist__init(struct rblist *rblist)
{
if (rblist != NULL) {
- rblist->entries = RB_ROOT;
+ rblist->entries = RB_ROOT_CACHED;
rblist->nr_entries = 0;
}
@@ -103,7 +110,7 @@ void rblist__init(struct rblist *rblist)
void rblist__exit(struct rblist *rblist)
{
- struct rb_node *pos, *next = rb_first(&rblist->entries);
+ struct rb_node *pos, *next = rb_first_cached(&rblist->entries);
while (next) {
pos = next;
@@ -124,7 +131,8 @@ struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx)
{
struct rb_node *node;
- for (node = rb_first(&rblist->entries); node; node = rb_next(node)) {
+ for (node = rb_first_cached(&rblist->entries); node;
+ node = rb_next(node)) {
if (!idx--)
return node;
}
diff --git a/tools/perf/util/rblist.h b/tools/perf/util/rblist.h
index 76df15c27f5f..14b232a4d0b6 100644
--- a/tools/perf/util/rblist.h
+++ b/tools/perf/util/rblist.h
@@ -20,7 +20,7 @@
*/
struct rblist {
- struct rb_root entries;
+ struct rb_root_cached entries;
unsigned int nr_entries;
int (*node_cmp)(struct rb_node *rbn, const void *entry);
diff --git a/tools/perf/util/s390-cpumcf-kernel.h b/tools/perf/util/s390-cpumcf-kernel.h
new file mode 100644
index 000000000000..d4356030b504
--- /dev/null
+++ b/tools/perf/util/s390-cpumcf-kernel.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for s390 CPU measurement counter set diagnostic facility
+ *
+ * Copyright IBM Corp. 2019
+ Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
+ * Thomas Richter <tmricht@linux.ibm.com>
+ */
+#ifndef S390_CPUMCF_KERNEL_H
+#define S390_CPUMCF_KERNEL_H
+
+#define S390_CPUMCF_DIAG_DEF 0xfeef /* Counter diagnostic entry ID */
+#define PERF_EVENT_CPUM_CF_DIAG 0xBC000 /* Event: Counter sets */
+
+struct cf_ctrset_entry { /* CPU-M CF counter set entry (8 byte) */
+ unsigned int def:16; /* 0-15 Data Entry Format */
+ unsigned int set:16; /* 16-23 Counter set identifier */
+ unsigned int ctr:16; /* 24-39 Number of stored counters */
+ unsigned int res1:16; /* 40-63 Reserved */
+};
+
+struct cf_trailer_entry { /* CPU-M CF trailer for raw traces (64 byte) */
+ /* 0 - 7 */
+ union {
+ struct {
+ unsigned int clock_base:1; /* TOD clock base */
+ unsigned int speed:1; /* CPU speed */
+ /* Measurement alerts */
+ unsigned int mtda:1; /* Loss of MT ctr. data alert */
+ unsigned int caca:1; /* Counter auth. change alert */
+ unsigned int lcda:1; /* Loss of counter data alert */
+ };
+ unsigned long flags; /* 0-63 All indicators */
+ };
+ /* 8 - 15 */
+ unsigned int cfvn:16; /* 64-79 Ctr First Version */
+ unsigned int csvn:16; /* 80-95 Ctr Second Version */
+ unsigned int cpu_speed:32; /* 96-127 CPU speed */
+ /* 16 - 23 */
+ unsigned long timestamp; /* 128-191 Timestamp (TOD) */
+ /* 24 - 55 */
+ union {
+ struct {
+ unsigned long progusage1;
+ unsigned long progusage2;
+ unsigned long progusage3;
+ unsigned long tod_base;
+ };
+ unsigned long progusage[4];
+ };
+ /* 56 - 63 */
+ unsigned int mach_type:16; /* Machine type */
+ unsigned int res1:16; /* Reserved */
+ unsigned int res2:32; /* Reserved */
+};
+
+#define CPUMF_CTR_SET_BASIC 0 /* Basic Counter Set */
+#define CPUMF_CTR_SET_USER 1 /* Problem-State Counter Set */
+#define CPUMF_CTR_SET_CRYPTO 2 /* Crypto-Activity Counter Set */
+#define CPUMF_CTR_SET_EXT 3 /* Extended Counter Set */
+#define CPUMF_CTR_SET_MT_DIAG 4 /* MT-diagnostic Counter Set */
+#endif
diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
index 68b2570304ec..c215704931dc 100644
--- a/tools/perf/util/s390-cpumsf.c
+++ b/tools/perf/util/s390-cpumsf.c
@@ -162,6 +162,7 @@
#include "auxtrace.h"
#include "s390-cpumsf.h"
#include "s390-cpumsf-kernel.h"
+#include "s390-cpumcf-kernel.h"
#include "config.h"
struct s390_cpumsf {
@@ -184,8 +185,58 @@ struct s390_cpumsf_queue {
struct auxtrace_buffer *buffer;
int cpu;
FILE *logfile;
+ FILE *logfile_ctr;
};
+/* Check if the raw data should be dumped to file. If this is the case and
+ * the file to dump to has not been opened for writing, do so.
+ *
+ * Return 0 on success and greater zero on error so processing continues.
+ */
+static int s390_cpumcf_dumpctr(struct s390_cpumsf *sf,
+ struct perf_sample *sample)
+{
+ struct s390_cpumsf_queue *sfq;
+ struct auxtrace_queue *q;
+ int rc = 0;
+
+ if (!sf->use_logfile || sf->queues.nr_queues <= sample->cpu)
+ return rc;
+
+ q = &sf->queues.queue_array[sample->cpu];
+ sfq = q->priv;
+ if (!sfq) /* Queue not yet allocated */
+ return rc;
+
+ if (!sfq->logfile_ctr) {
+ char *name;
+
+ rc = (sf->logdir)
+ ? asprintf(&name, "%s/aux.ctr.%02x",
+ sf->logdir, sample->cpu)
+ : asprintf(&name, "aux.ctr.%02x", sample->cpu);
+ if (rc > 0)
+ sfq->logfile_ctr = fopen(name, "w");
+ if (sfq->logfile_ctr == NULL) {
+ pr_err("Failed to open counter set log file %s, "
+ "continue...\n", name);
+ rc = 1;
+ }
+ free(name);
+ }
+
+ if (sfq->logfile_ctr) {
+ /* See comment above for -4 */
+ size_t n = fwrite(sample->raw_data, sample->raw_size - 4, 1,
+ sfq->logfile_ctr);
+ if (n != 1) {
+ pr_err("Failed to write counter set data\n");
+ rc = 1;
+ }
+ }
+ return rc;
+}
+
/* Display s390 CPU measurement facility basic-sampling data entry */
static bool s390_cpumsf_basic_show(const char *color, size_t pos,
struct hws_basic_entry *basic)
@@ -301,6 +352,11 @@ static bool s390_cpumsf_validate(int machine_type,
*dsdes = 85;
*bsdes = 32;
break;
+ case 2964:
+ case 2965:
+ *dsdes = 112;
+ *bsdes = 32;
+ break;
default:
/* Illegal trailer entry */
return false;
@@ -768,7 +824,7 @@ static int s390_cpumsf_process_queues(struct s390_cpumsf *sf, u64 timestamp)
}
static int s390_cpumsf_synth_error(struct s390_cpumsf *sf, int code, int cpu,
- pid_t pid, pid_t tid, u64 ip)
+ pid_t pid, pid_t tid, u64 ip, u64 timestamp)
{
char msg[MAX_AUXTRACE_ERROR_MSG];
union perf_event event;
@@ -776,7 +832,7 @@ static int s390_cpumsf_synth_error(struct s390_cpumsf *sf, int code, int cpu,
strncpy(msg, "Lost Auxiliary Trace Buffer", sizeof(msg) - 1);
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
- code, cpu, pid, tid, ip, msg);
+ code, cpu, pid, tid, ip, msg, timestamp);
err = perf_session__deliver_synth_event(sf->session, &event, NULL);
if (err)
@@ -788,11 +844,12 @@ static int s390_cpumsf_synth_error(struct s390_cpumsf *sf, int code, int cpu,
static int s390_cpumsf_lost(struct s390_cpumsf *sf, struct perf_sample *sample)
{
return s390_cpumsf_synth_error(sf, 1, sample->cpu,
- sample->pid, sample->tid, 0);
+ sample->pid, sample->tid, 0,
+ sample->time);
}
static int
-s390_cpumsf_process_event(struct perf_session *session __maybe_unused,
+s390_cpumsf_process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool)
@@ -801,6 +858,8 @@ s390_cpumsf_process_event(struct perf_session *session __maybe_unused,
struct s390_cpumsf,
auxtrace);
u64 timestamp = sample->time;
+ struct perf_evsel *ev_bc000;
+
int err = 0;
if (dump_trace)
@@ -811,6 +870,16 @@ s390_cpumsf_process_event(struct perf_session *session __maybe_unused,
return -EINVAL;
}
+ if (event->header.type == PERF_RECORD_SAMPLE &&
+ sample->raw_size) {
+ /* Handle event with raw data */
+ ev_bc000 = perf_evlist__event2evsel(session->evlist, event);
+ if (ev_bc000 &&
+ ev_bc000->attr.config == PERF_EVENT_CPUM_CF_DIAG)
+ err = s390_cpumcf_dumpctr(sf, sample);
+ return err;
+ }
+
if (event->header.type == PERF_RECORD_AUX &&
event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
return s390_cpumsf_lost(sf, sample);
@@ -891,9 +960,15 @@ static void s390_cpumsf_free_queues(struct perf_session *session)
struct s390_cpumsf_queue *sfq = (struct s390_cpumsf_queue *)
queues->queue_array[i].priv;
- if (sfq != NULL && sfq->logfile) {
- fclose(sfq->logfile);
- sfq->logfile = NULL;
+ if (sfq != NULL) {
+ if (sfq->logfile) {
+ fclose(sfq->logfile);
+ sfq->logfile = NULL;
+ }
+ if (sfq->logfile_ctr) {
+ fclose(sfq->logfile_ctr);
+ sfq->logfile_ctr = NULL;
+ }
}
zfree(&queues->queue_array[i].priv);
}
diff --git a/tools/perf/util/s390-sample-raw.c b/tools/perf/util/s390-sample-raw.c
new file mode 100644
index 000000000000..6650f599ed9c
--- /dev/null
+++ b/tools/perf/util/s390-sample-raw.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Thomas Richter <tmricht@linux.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Architecture specific trace_event function. Save event's bc000 raw data
+ * to file. File name is aux.ctr.## where ## stands for the CPU number the
+ * sample was taken from.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include <sys/stat.h>
+#include <linux/compiler.h>
+#include <asm/byteorder.h>
+
+#include "debug.h"
+#include "util.h"
+#include "auxtrace.h"
+#include "session.h"
+#include "evlist.h"
+#include "config.h"
+#include "color.h"
+#include "sample-raw.h"
+#include "s390-cpumcf-kernel.h"
+#include "pmu-events/pmu-events.h"
+
+static size_t ctrset_size(struct cf_ctrset_entry *set)
+{
+ return sizeof(*set) + set->ctr * sizeof(u64);
+}
+
+static bool ctrset_valid(struct cf_ctrset_entry *set)
+{
+ return set->def == S390_CPUMCF_DIAG_DEF;
+}
+
+/* CPU Measurement Counter Facility raw data is a byte stream. It is 8 byte
+ * aligned and might have trailing padding bytes.
+ * Display the raw data on screen.
+ */
+static bool s390_cpumcfdg_testctr(struct perf_sample *sample)
+{
+ size_t len = sample->raw_size, offset = 0;
+ unsigned char *buf = sample->raw_data;
+ struct cf_trailer_entry *te;
+ struct cf_ctrset_entry *cep, ce;
+
+ if (!len)
+ return false;
+ while (offset < len) {
+ cep = (struct cf_ctrset_entry *)(buf + offset);
+ ce.def = be16_to_cpu(cep->def);
+ ce.set = be16_to_cpu(cep->set);
+ ce.ctr = be16_to_cpu(cep->ctr);
+ ce.res1 = be16_to_cpu(cep->res1);
+
+ if (!ctrset_valid(&ce) || offset + ctrset_size(&ce) > len) {
+ /* Raw data for counter sets are always multiple of 8
+ * bytes. Prepending a 4 bytes size field to the
+ * raw data block in the sample causes the perf tool
+ * to append 4 padding bytes to make the raw data part
+ * of the sample a multiple of eight bytes again.
+ *
+ * If the last entry (trailer) is 4 bytes off the raw
+ * area data end, all is good.
+ */
+ if (len - offset - sizeof(*te) == 4)
+ break;
+ pr_err("Invalid counter set entry at %zd\n", offset);
+ return false;
+ }
+ offset += ctrset_size(&ce);
+ }
+ return true;
+}
+
+/* Dump event bc000 on screen, already tested on correctness. */
+static void s390_cpumcfdg_dumptrail(const char *color, size_t offset,
+ struct cf_trailer_entry *tep)
+{
+ struct cf_trailer_entry te;
+
+ te.flags = be64_to_cpu(tep->flags);
+ te.cfvn = be16_to_cpu(tep->cfvn);
+ te.csvn = be16_to_cpu(tep->csvn);
+ te.cpu_speed = be32_to_cpu(tep->cpu_speed);
+ te.timestamp = be64_to_cpu(tep->timestamp);
+ te.progusage1 = be64_to_cpu(tep->progusage1);
+ te.progusage2 = be64_to_cpu(tep->progusage2);
+ te.progusage3 = be64_to_cpu(tep->progusage3);
+ te.tod_base = be64_to_cpu(tep->tod_base);
+ te.mach_type = be16_to_cpu(tep->mach_type);
+ te.res1 = be16_to_cpu(tep->res1);
+ te.res2 = be32_to_cpu(tep->res2);
+
+ color_fprintf(stdout, color, " [%#08zx] Trailer:%c%c%c%c%c"
+ " Cfvn:%d Csvn:%d Speed:%d TOD:%#llx\n",
+ offset, te.clock_base ? 'T' : ' ',
+ te.speed ? 'S' : ' ', te.mtda ? 'M' : ' ',
+ te.caca ? 'C' : ' ', te.lcda ? 'L' : ' ',
+ te.cfvn, te.csvn, te.cpu_speed, te.timestamp);
+ color_fprintf(stdout, color, "\t\t1:%lx 2:%lx 3:%lx TOD-Base:%#llx"
+ " Type:%x\n\n",
+ te.progusage1, te.progusage2, te.progusage3,
+ te.tod_base, te.mach_type);
+}
+
+/* Return starting number of a counter set */
+static int get_counterset_start(int setnr)
+{
+ switch (setnr) {
+ case CPUMF_CTR_SET_BASIC: /* Basic counter set */
+ return 0;
+ case CPUMF_CTR_SET_USER: /* Problem state counter set */
+ return 32;
+ case CPUMF_CTR_SET_CRYPTO: /* Crypto counter set */
+ return 64;
+ case CPUMF_CTR_SET_EXT: /* Extended counter set */
+ return 128;
+ case CPUMF_CTR_SET_MT_DIAG: /* Diagnostic counter set */
+ return 448;
+ default:
+ return -1;
+ }
+}
+
+/* Scan the PMU table and extract the logical name of a counter from the
+ * PMU events table. Input is the counter set and counter number with in the
+ * set. Construct the event number and use this as key. If they match return
+ * the name of this counter.
+ * If no match is found a NULL pointer is returned.
+ */
+static const char *get_counter_name(int set, int nr, struct pmu_events_map *map)
+{
+ int rc, event_nr, wanted = get_counterset_start(set) + nr;
+
+ if (map) {
+ struct pmu_event *evp = map->table;
+
+ for (; evp->name || evp->event || evp->desc; ++evp) {
+ if (evp->name == NULL || evp->event == NULL)
+ continue;
+ rc = sscanf(evp->event, "event=%x", &event_nr);
+ if (rc == 1 && event_nr == wanted)
+ return evp->name;
+ }
+ }
+ return NULL;
+}
+
+static void s390_cpumcfdg_dump(struct perf_sample *sample)
+{
+ size_t i, len = sample->raw_size, offset = 0;
+ unsigned char *buf = sample->raw_data;
+ const char *color = PERF_COLOR_BLUE;
+ struct cf_ctrset_entry *cep, ce;
+ struct pmu_events_map *map;
+ struct perf_pmu pmu;
+ u64 *p;
+
+ memset(&pmu, 0, sizeof(pmu));
+ map = perf_pmu__find_map(&pmu);
+ while (offset < len) {
+ cep = (struct cf_ctrset_entry *)(buf + offset);
+
+ ce.def = be16_to_cpu(cep->def);
+ ce.set = be16_to_cpu(cep->set);
+ ce.ctr = be16_to_cpu(cep->ctr);
+ ce.res1 = be16_to_cpu(cep->res1);
+
+ if (!ctrset_valid(&ce)) { /* Print trailer */
+ s390_cpumcfdg_dumptrail(color, offset,
+ (struct cf_trailer_entry *)cep);
+ return;
+ }
+
+ color_fprintf(stdout, color, " [%#08zx] Counterset:%d"
+ " Counters:%d\n", offset, ce.set, ce.ctr);
+ for (i = 0, p = (u64 *)(cep + 1); i < ce.ctr; ++i, ++p) {
+ const char *ev_name = get_counter_name(ce.set, i, map);
+
+ color_fprintf(stdout, color,
+ "\tCounter:%03d %s Value:%#018lx\n", i,
+ ev_name ?: "<unknown>", be64_to_cpu(*p));
+ }
+ offset += ctrset_size(&ce);
+ }
+}
+
+/* S390 specific trace event function. Check for PERF_RECORD_SAMPLE events
+ * and if the event was triggered by a counter set diagnostic event display
+ * its raw data.
+ * The function is only invoked when the dump flag -D is set.
+ */
+void perf_evlist__s390_sample_raw(struct perf_evlist *evlist, union perf_event *event,
+ struct perf_sample *sample)
+{
+ struct perf_evsel *ev_bc000;
+
+ if (event->header.type != PERF_RECORD_SAMPLE)
+ return;
+
+ ev_bc000 = perf_evlist__event2evsel(evlist, event);
+ if (ev_bc000 == NULL ||
+ ev_bc000->attr.config != PERF_EVENT_CPUM_CF_DIAG)
+ return;
+
+ /* Display raw data on screen */
+ if (!s390_cpumcfdg_testctr(sample)) {
+ pr_err("Invalid counter set data encountered\n");
+ return;
+ }
+ s390_cpumcfdg_dump(sample);
+}
diff --git a/tools/perf/util/sample-raw.c b/tools/perf/util/sample-raw.c
new file mode 100644
index 000000000000..c21e1311fb0f
--- /dev/null
+++ b/tools/perf/util/sample-raw.c
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <string.h>
+#include "evlist.h"
+#include "env.h"
+#include "sample-raw.h"
+
+/*
+ * Check platform the perf data file was created on and perform platform
+ * specific interpretation.
+ */
+void perf_evlist__init_trace_event_sample_raw(struct perf_evlist *evlist)
+{
+ const char *arch_pf = perf_env__arch(evlist->env);
+
+ if (arch_pf && !strcmp("s390", arch_pf))
+ evlist->trace_event_sample_raw = perf_evlist__s390_sample_raw;
+}
diff --git a/tools/perf/util/sample-raw.h b/tools/perf/util/sample-raw.h
new file mode 100644
index 000000000000..95d445c87e93
--- /dev/null
+++ b/tools/perf/util/sample-raw.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SAMPLE_RAW_H
+#define __SAMPLE_RAW_H 1
+
+struct perf_evlist;
+union perf_event;
+struct perf_sample;
+
+void perf_evlist__s390_sample_raw(struct perf_evlist *evlist,
+ union perf_event *event,
+ struct perf_sample *sample);
+
+void perf_evlist__init_trace_event_sample_raw(struct perf_evlist *evlist);
+#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
index 82d28c67e0f3..7b342ce38d99 100644
--- a/tools/perf/util/scripting-engines/Build
+++ b/tools/perf/util/scripting-engines/Build
@@ -1,5 +1,5 @@
-libperf-$(CONFIG_LIBPERL) += trace-event-perl.o
-libperf-$(CONFIG_LIBPYTHON) += trace-event-python.o
+perf-$(CONFIG_LIBPERL) += trace-event-perl.o
+perf-$(CONFIG_LIBPYTHON) += trace-event-python.o
CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index b93f36b887b5..5f06378a482b 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -37,6 +37,8 @@
#include "../../perf.h"
#include "../callchain.h"
#include "../machine.h"
+#include "../map.h"
+#include "../symbol.h"
#include "../thread.h"
#include "../event.h"
#include "../trace-event.h"
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 87ef16a1b17e..09604c6508f0 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -44,6 +44,8 @@
#include "../thread-stack.h"
#include "../trace-event.h"
#include "../call-path.h"
+#include "map.h"
+#include "symbol.h"
#include "thread_map.h"
#include "cpumap.h"
#include "print_binary.h"
@@ -733,8 +735,7 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
Py_FatalError("couldn't create Python dictionary");
pydict_set_item_string_decref(dict, "ev_name", _PyUnicode_FromString(perf_evsel__name(evsel)));
- pydict_set_item_string_decref(dict, "attr", _PyUnicode_FromStringAndSize(
- (const char *)&evsel->attr, sizeof(evsel->attr)));
+ pydict_set_item_string_decref(dict, "attr", _PyBytes_FromStringAndSize((const char *)&evsel->attr, sizeof(evsel->attr)));
pydict_set_item_string_decref(dict_sample, "pid",
_PyLong_FromLong(sample->pid));
@@ -1172,7 +1173,7 @@ static int python_export_call_return(struct db_export *dbe,
u64 comm_db_id = cr->comm ? cr->comm->db_id : 0;
PyObject *t;
- t = tuple_new(11);
+ t = tuple_new(12);
tuple_set_u64(t, 0, cr->db_id);
tuple_set_u64(t, 1, cr->thread->db_id);
@@ -1185,6 +1186,7 @@ static int python_export_call_return(struct db_export *dbe,
tuple_set_u64(t, 8, cr->return_ref);
tuple_set_u64(t, 9, cr->cp->parent->db_id);
tuple_set_s32(t, 10, cr->flags);
+ tuple_set_u64(t, 11, cr->parent_db_id);
call_object(tables->call_return_handler, t, "call_return_table");
@@ -1193,11 +1195,12 @@ static int python_export_call_return(struct db_export *dbe,
return 0;
}
-static int python_process_call_return(struct call_return *cr, void *data)
+static int python_process_call_return(struct call_return *cr, u64 *parent_db_id,
+ void *data)
{
struct db_export *dbe = data;
- return db_export__call_return(dbe, cr);
+ return db_export__call_return(dbe, cr, parent_db_id);
}
static void python_process_general_event(struct perf_sample *sample,
@@ -1494,34 +1497,40 @@ static void _free_command_line(wchar_t **command_line, int num)
static int python_start_script(const char *script, int argc, const char **argv)
{
struct tables *tables = &tables_global;
+ PyMODINIT_FUNC (*initfunc)(void);
#if PY_MAJOR_VERSION < 3
const char **command_line;
#else
wchar_t **command_line;
#endif
- char buf[PATH_MAX];
+ /*
+ * Use a non-const name variable to cope with python 2.6's
+ * PyImport_AppendInittab prototype
+ */
+ char buf[PATH_MAX], name[19] = "perf_trace_context";
int i, err = 0;
FILE *fp;
#if PY_MAJOR_VERSION < 3
+ initfunc = initperf_trace_context;
command_line = malloc((argc + 1) * sizeof(const char *));
command_line[0] = script;
for (i = 1; i < argc + 1; i++)
command_line[i] = argv[i - 1];
#else
+ initfunc = PyInit_perf_trace_context;
command_line = malloc((argc + 1) * sizeof(wchar_t *));
command_line[0] = Py_DecodeLocale(script, NULL);
for (i = 1; i < argc + 1; i++)
command_line[i] = Py_DecodeLocale(argv[i - 1], NULL);
#endif
+ PyImport_AppendInittab(name, initfunc);
Py_Initialize();
#if PY_MAJOR_VERSION < 3
- initperf_trace_context();
PySys_SetArgv(argc + 1, (char **)command_line);
#else
- PyInit_perf_trace_context();
PySys_SetArgv(argc + 1, command_line);
#endif
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 5456c84c7dd1..b17f1c9bc965 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -13,6 +13,8 @@
#include "evlist.h"
#include "evsel.h"
#include "memswap.h"
+#include "map.h"
+#include "symbol.h"
#include "session.h"
#include "tool.h"
#include "sort.h"
@@ -23,6 +25,7 @@
#include "auxtrace.h"
#include "thread.h"
#include "thread-stack.h"
+#include "sample-raw.h"
#include "stat.h"
#include "arch/common.h"
@@ -129,6 +132,7 @@ struct perf_session *perf_session__new(struct perf_data *data,
ordered_events__init(&session->ordered_events,
ordered_events__deliver_event, NULL);
+ perf_env__init(&session->header.env);
if (data) {
if (perf_data__open(data))
goto out_delete;
@@ -137,7 +141,7 @@ struct perf_session *perf_session__new(struct perf_data *data,
if (perf_data__is_read(data)) {
if (perf_session__open(session) < 0)
- goto out_close;
+ goto out_delete;
/*
* set session attributes that are present in perf.data
@@ -147,6 +151,12 @@ struct perf_session *perf_session__new(struct perf_data *data,
perf_session__set_id_hdr_size(session);
perf_session__set_comm_exec(session);
}
+
+ perf_evlist__init_trace_event_sample_raw(session->evlist);
+
+ /* Open the directory data. */
+ if (data->is_dir && perf_data__open_dir(data))
+ goto out_delete;
}
} else {
session->machines.host.env = &perf_env;
@@ -176,8 +186,6 @@ struct perf_session *perf_session__new(struct perf_data *data,
return session;
- out_close:
- perf_data__close(data);
out_delete:
perf_session__delete(session);
out:
@@ -376,6 +384,10 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
tool->itrace_start = perf_event__process_itrace_start;
if (tool->context_switch == NULL)
tool->context_switch = perf_event__process_switch;
+ if (tool->ksymbol == NULL)
+ tool->ksymbol = perf_event__process_ksymbol;
+ if (tool->bpf_event == NULL)
+ tool->bpf_event = perf_event__process_bpf_event;
if (tool->read == NULL)
tool->read = process_event_sample_stub;
if (tool->throttle == NULL)
@@ -694,7 +706,10 @@ static void perf_event__auxtrace_error_swap(union perf_event *event,
event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
+ event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
+ if (event->auxtrace_error.fmt)
+ event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
}
static void perf_event__thread_map_swap(union perf_event *event,
@@ -1065,6 +1080,8 @@ static void dump_event(struct perf_evlist *evlist, union perf_event *event,
file_offset, event->header.size, event->header.type);
trace_event(event);
+ if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
+ evlist->trace_event_sample_raw(evlist, event, sample);
if (sample)
perf_evlist__print_tstamp(evlist, event, sample);
@@ -1188,6 +1205,13 @@ static int deliver_sample_value(struct perf_evlist *evlist,
return 0;
}
+ /*
+ * There's no reason to deliver sample
+ * for zero period, bail out.
+ */
+ if (!sample->period)
+ return 0;
+
return tool->sample(tool, event, sample, sid->evsel, machine);
}
@@ -1305,6 +1329,10 @@ static int machines__deliver_event(struct machines *machines,
case PERF_RECORD_SWITCH:
case PERF_RECORD_SWITCH_CPU_WIDE:
return tool->context_switch(tool, event, sample, machine);
+ case PERF_RECORD_KSYMBOL:
+ return tool->ksymbol(tool, event, sample, machine);
+ case PERF_RECORD_BPF_EVENT:
+ return tool->bpf_event(tool, event, sample, machine);
default:
++evlist->stats.nr_unknown_events;
return -1;
@@ -1820,38 +1848,42 @@ fetch_mmaped_event(struct perf_session *session,
#define NUM_MMAPS 128
#endif
-static int __perf_session__process_events(struct perf_session *session,
- u64 data_offset, u64 data_size,
- u64 file_size)
+struct reader;
+
+typedef s64 (*reader_cb_t)(struct perf_session *session,
+ union perf_event *event,
+ u64 file_offset);
+
+struct reader {
+ int fd;
+ u64 data_size;
+ u64 data_offset;
+ reader_cb_t process;
+};
+
+static int
+reader__process_events(struct reader *rd, struct perf_session *session,
+ struct ui_progress *prog)
{
- struct ordered_events *oe = &session->ordered_events;
- struct perf_tool *tool = session->tool;
- int fd = perf_data__fd(session->data);
+ u64 data_size = rd->data_size;
u64 head, page_offset, file_offset, file_pos, size;
- int err, mmap_prot, mmap_flags, map_idx = 0;
+ int err = 0, mmap_prot, mmap_flags, map_idx = 0;
size_t mmap_size;
char *buf, *mmaps[NUM_MMAPS];
union perf_event *event;
- struct ui_progress prog;
s64 skip;
- perf_tool__fill_defaults(tool);
-
- page_offset = page_size * (data_offset / page_size);
+ page_offset = page_size * (rd->data_offset / page_size);
file_offset = page_offset;
- head = data_offset - page_offset;
-
- if (data_size == 0)
- goto out;
+ head = rd->data_offset - page_offset;
- if (data_offset + data_size < file_size)
- file_size = data_offset + data_size;
+ ui_progress__init_size(prog, data_size, "Processing events...");
- ui_progress__init_size(&prog, file_size, "Processing events...");
+ data_size += rd->data_offset;
mmap_size = MMAP_SIZE;
- if (mmap_size > file_size) {
- mmap_size = file_size;
+ if (mmap_size > data_size) {
+ mmap_size = data_size;
session->one_mmap = true;
}
@@ -1865,12 +1897,12 @@ static int __perf_session__process_events(struct perf_session *session,
mmap_flags = MAP_PRIVATE;
}
remap:
- buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
+ buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
file_offset);
if (buf == MAP_FAILED) {
pr_err("failed to mmap file\n");
err = -errno;
- goto out_err;
+ goto out;
}
mmaps[map_idx] = buf;
map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
@@ -1897,12 +1929,12 @@ more:
size = event->header.size;
if (size < sizeof(struct perf_event_header) ||
- (skip = perf_session__process_event(session, event, file_pos)) < 0) {
+ (skip = rd->process(session, event, file_pos)) < 0) {
pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
file_offset + head, event->header.size,
event->header.type);
err = -EINVAL;
- goto out_err;
+ goto out;
}
if (skip)
@@ -1911,15 +1943,48 @@ more:
head += size;
file_pos += size;
- ui_progress__update(&prog, size);
+ ui_progress__update(prog, size);
if (session_done())
goto out;
- if (file_pos < file_size)
+ if (file_pos < data_size)
goto more;
out:
+ return err;
+}
+
+static s64 process_simple(struct perf_session *session,
+ union perf_event *event,
+ u64 file_offset)
+{
+ return perf_session__process_event(session, event, file_offset);
+}
+
+static int __perf_session__process_events(struct perf_session *session)
+{
+ struct reader rd = {
+ .fd = perf_data__fd(session->data),
+ .data_size = session->header.data_size,
+ .data_offset = session->header.data_offset,
+ .process = process_simple,
+ };
+ struct ordered_events *oe = &session->ordered_events;
+ struct perf_tool *tool = session->tool;
+ struct ui_progress prog;
+ int err;
+
+ perf_tool__fill_defaults(tool);
+
+ if (rd.data_size == 0)
+ return -1;
+
+ ui_progress__init_size(&prog, rd.data_size, "Processing events...");
+
+ err = reader__process_events(&rd, session, &prog);
+ if (err)
+ goto out_err;
/* do the final flush for ordered samples */
err = ordered_events__flush(oe, OE_FLUSH__FINAL);
if (err)
@@ -1944,20 +2009,13 @@ out_err:
int perf_session__process_events(struct perf_session *session)
{
- u64 size = perf_data__size(session->data);
- int err;
-
if (perf_session__register_idle_thread(session) < 0)
return -ENOMEM;
- if (!perf_data__is_pipe(session->data))
- err = __perf_session__process_events(session,
- session->header.data_offset,
- session->header.data_size, size);
- else
- err = __perf_session__process_pipe_events(session);
+ if (perf_data__is_pipe(session->data))
+ return __perf_session__process_pipe_events(session);
- return err;
+ return __perf_session__process_events(session);
}
bool perf_session__has_traces(struct perf_session *session, const char *msg)
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 63f758c655d5..5b5a167b43ce 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-
from os import getenv
from subprocess import Popen, PIPE
from re import sub
@@ -17,6 +15,8 @@ if cc == "clang":
vars[var] = sub("-mcet", "", vars[var])
if not clang_has_option("-fcf-protection"):
vars[var] = sub("-fcf-protection", "", vars[var])
+ if not clang_has_option("-fstack-clash-protection"):
+ vars[var] = sub("-fstack-clash-protection", "", vars[var])
from distutils.core import setup, Extension
@@ -53,9 +53,14 @@ ext_sources = [f.strip() for f in open('util/python-ext-sources')
# use full paths with source files
ext_sources = list(map(lambda x: '%s/%s' % (src_perf, x) , ext_sources))
+extra_libraries = []
+if '-DHAVE_LIBNUMA_SUPPORT' in cflags:
+ extra_libraries = [ 'numa' ]
+
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
+ libraries = extra_libraries,
extra_compile_args = cflags,
extra_objects = [libtraceevent, libapikfs],
)
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 6c1a83768eb0..5d2518e89fc4 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -3,17 +3,21 @@
#include <inttypes.h>
#include <regex.h>
#include <linux/mman.h>
+#include <linux/time64.h>
#include "sort.h"
#include "hist.h"
#include "comm.h"
+#include "map.h"
#include "symbol.h"
#include "thread.h"
#include "evsel.h"
#include "evlist.h"
#include "strlist.h"
+#include "strbuf.h"
#include <traceevent/event-parse.h>
#include "mem-events.h"
#include "annotate.h"
+#include "time-utils.h"
#include <linux/kernel.h>
regex_t parent_regex;
@@ -230,8 +234,14 @@ static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
if (sym_l == sym_r)
return 0;
- if (sym_l->inlined || sym_r->inlined)
- return strcmp(sym_l->name, sym_r->name);
+ if (sym_l->inlined || sym_r->inlined) {
+ int ret = strcmp(sym_l->name, sym_r->name);
+
+ if (ret)
+ return ret;
+ if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
+ return 0;
+ }
if (sym_l->start != sym_r->start)
return (int64_t)(sym_r->start - sym_l->start);
@@ -428,8 +438,6 @@ static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
{
struct symbol *sym = he->ms.sym;
- struct map *map = he->ms.map;
- struct perf_evsel *evsel = hists_to_evsel(he->hists);
struct annotation *notes;
double ipc = 0.0, coverage = 0.0;
char tmp[64];
@@ -437,11 +445,6 @@ static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
if (!sym)
return repsep_snprintf(bf, size, "%-*s", width, "-");
- if (!sym->annotate2 && symbol__annotate2(sym, map, evsel,
- &annotation__default_options, NULL) < 0) {
- return 0;
- }
-
notes = symbol__annotation(sym);
if (notes->hit_cycles)
@@ -654,6 +657,42 @@ struct sort_entry sort_socket = {
.se_width_idx = HISTC_SOCKET,
};
+/* --sort time */
+
+static int64_t
+sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return right->time - left->time;
+}
+
+static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ unsigned long secs;
+ unsigned long long nsecs;
+ char he_time[32];
+
+ nsecs = he->time;
+ secs = nsecs / NSEC_PER_SEC;
+ nsecs -= secs * NSEC_PER_SEC;
+
+ if (symbol_conf.nanosecs)
+ snprintf(he_time, sizeof he_time, "%5lu.%09llu: ",
+ secs, nsecs);
+ else
+ timestamp__scnprintf_usec(he->time, he_time,
+ sizeof(he_time));
+
+ return repsep_snprintf(bf, size, "%-.*s", width, he_time);
+}
+
+struct sort_entry sort_time = {
+ .se_header = "Time",
+ .se_cmp = sort__time_cmp,
+ .se_snprintf = hist_entry__time_snprintf,
+ .se_width_idx = HISTC_TIME,
+};
+
/* --sort trace */
static char *get_trace_output(struct hist_entry *he)
@@ -1634,6 +1673,7 @@ static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
+ DIM(SORT_TIME, "time", sort_time),
};
#undef DIM
@@ -3068,3 +3108,54 @@ void reset_output_field(void)
reset_dimensions();
perf_hpp__reset_output_field(&perf_hpp_list);
}
+
+#define INDENT (3*8 + 1)
+
+static void add_key(struct strbuf *sb, const char *str, int *llen)
+{
+ if (*llen >= 75) {
+ strbuf_addstr(sb, "\n\t\t\t ");
+ *llen = INDENT;
+ }
+ strbuf_addf(sb, " %s", str);
+ *llen += strlen(str) + 1;
+}
+
+static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
+ int *llen)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ add_key(sb, s[i].name, llen);
+}
+
+static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
+ int *llen)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ add_key(sb, s[i].name, llen);
+}
+
+const char *sort_help(const char *prefix)
+{
+ struct strbuf sb;
+ char *s;
+ int len = strlen(prefix) + INDENT;
+
+ strbuf_init(&sb, 300);
+ strbuf_addstr(&sb, prefix);
+ add_hpp_sort_string(&sb, hpp_sort_dimensions,
+ ARRAY_SIZE(hpp_sort_dimensions), &len);
+ add_sort_string(&sb, common_sort_dimensions,
+ ARRAY_SIZE(common_sort_dimensions), &len);
+ add_sort_string(&sb, bstack_sort_dimensions,
+ ARRAY_SIZE(bstack_sort_dimensions), &len);
+ add_sort_string(&sb, memory_sort_dimensions,
+ ARRAY_SIZE(memory_sort_dimensions), &len);
+ s = strbuf_detach(&sb, NULL);
+ strbuf_release(&sb);
+ return s;
+}
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 130fe37fe2df..ce376a73f964 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -9,7 +9,8 @@
#include <linux/list.h>
#include "cache.h"
#include <linux/rbtree.h>
-#include "symbol.h"
+#include "map_symbol.h"
+#include "symbol_conf.h"
#include "string.h"
#include "callchain.h"
#include "values.h"
@@ -46,6 +47,12 @@ extern struct sort_entry sort_srcline;
extern enum sort_type sort__first_dimension;
extern const char default_mem_sort_order[];
+struct res_sample {
+ u64 time;
+ int cpu;
+ int tid;
+};
+
struct he_stat {
u64 period;
u64 period_sys;
@@ -134,10 +141,13 @@ struct hist_entry {
char *srcfile;
struct symbol *parent;
struct branch_info *branch_info;
+ long time;
struct hists *hists;
struct mem_info *mem_info;
void *raw_data;
u32 raw_size;
+ int num_res;
+ struct res_sample *res_samples;
void *trace_output;
struct perf_hpp_list *hpp_list;
struct hist_entry *parent_he;
@@ -145,8 +155,8 @@ struct hist_entry {
union {
/* this is for hierarchical entry structure */
struct {
- struct rb_root hroot_in;
- struct rb_root hroot_out;
+ struct rb_root_cached hroot_in;
+ struct rb_root_cached hroot_out;
}; /* non-leaf entries */
struct rb_root sorted_chain; /* leaf entry has callchains */
};
@@ -230,6 +240,7 @@ enum sort_type {
SORT_DSO_SIZE,
SORT_CGROUP_ID,
SORT_SYM_IPC_NULL,
+ SORT_TIME,
/* branch stack specific sort keys */
__SORT_BRANCH_STACK,
@@ -285,6 +296,8 @@ void reset_output_field(void);
void sort__setup_elide(FILE *fp);
void perf_hpp__set_elide(int idx, bool elide);
+const char *sort_help(const char *prefix);
+
int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
bool is_strict_order(const char *order);
diff --git a/tools/perf/util/srccode.h b/tools/perf/util/srccode.h
index e500a746d5f1..1b5ed769779c 100644
--- a/tools/perf/util/srccode.h
+++ b/tools/perf/util/srccode.h
@@ -1,6 +1,19 @@
#ifndef SRCCODE_H
#define SRCCODE_H 1
+struct srccode_state {
+ char *srcfile;
+ unsigned line;
+};
+
+static inline void srccode_state_init(struct srccode_state *state)
+{
+ state->srcfile = NULL;
+ state->line = 0;
+}
+
+void srccode_state_free(struct srccode_state *state);
+
/* Result is not 0 terminated */
char *find_sourceline(char *fn, unsigned line, int *lenp);
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index dc86597d0cc4..10ca1533937e 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -104,7 +104,7 @@ static struct symbol *new_inline_sym(struct dso *dso,
} else {
/* create a fake symbol for the inline frame */
inline_sym = symbol__new(base_sym ? base_sym->start : 0,
- base_sym ? base_sym->end : 0,
+ base_sym ? (base_sym->end - base_sym->start) : 0,
base_sym ? base_sym->binding : 0,
base_sym ? base_sym->type : 0,
funcname);
@@ -594,11 +594,12 @@ struct srcline_node {
struct rb_node rb_node;
};
-void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline)
+void srcline__tree_insert(struct rb_root_cached *tree, u64 addr, char *srcline)
{
- struct rb_node **p = &tree->rb_node;
+ struct rb_node **p = &tree->rb_root.rb_node;
struct rb_node *parent = NULL;
struct srcline_node *i, *node;
+ bool leftmost = true;
node = zalloc(sizeof(struct srcline_node));
if (!node) {
@@ -614,16 +615,18 @@ void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline)
i = rb_entry(parent, struct srcline_node, rb_node);
if (addr < i->addr)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&node->rb_node, parent, p);
- rb_insert_color(&node->rb_node, tree);
+ rb_insert_color_cached(&node->rb_node, tree, leftmost);
}
-char *srcline__tree_find(struct rb_root *tree, u64 addr)
+char *srcline__tree_find(struct rb_root_cached *tree, u64 addr)
{
- struct rb_node *n = tree->rb_node;
+ struct rb_node *n = tree->rb_root.rb_node;
while (n) {
struct srcline_node *i = rb_entry(n, struct srcline_node,
@@ -640,15 +643,15 @@ char *srcline__tree_find(struct rb_root *tree, u64 addr)
return NULL;
}
-void srcline__tree_delete(struct rb_root *tree)
+void srcline__tree_delete(struct rb_root_cached *tree)
{
struct srcline_node *pos;
- struct rb_node *next = rb_first(tree);
+ struct rb_node *next = rb_first_cached(tree);
while (next) {
pos = rb_entry(next, struct srcline_node, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, tree);
+ rb_erase_cached(&pos->rb_node, tree);
free_srcline(pos->srcline);
zfree(&pos);
}
@@ -682,28 +685,32 @@ void inline_node__delete(struct inline_node *node)
free(node);
}
-void inlines__tree_insert(struct rb_root *tree, struct inline_node *inlines)
+void inlines__tree_insert(struct rb_root_cached *tree,
+ struct inline_node *inlines)
{
- struct rb_node **p = &tree->rb_node;
+ struct rb_node **p = &tree->rb_root.rb_node;
struct rb_node *parent = NULL;
const u64 addr = inlines->addr;
struct inline_node *i;
+ bool leftmost = true;
while (*p != NULL) {
parent = *p;
i = rb_entry(parent, struct inline_node, rb_node);
if (addr < i->addr)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&inlines->rb_node, parent, p);
- rb_insert_color(&inlines->rb_node, tree);
+ rb_insert_color_cached(&inlines->rb_node, tree, leftmost);
}
-struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr)
+struct inline_node *inlines__tree_find(struct rb_root_cached *tree, u64 addr)
{
- struct rb_node *n = tree->rb_node;
+ struct rb_node *n = tree->rb_root.rb_node;
while (n) {
struct inline_node *i = rb_entry(n, struct inline_node,
@@ -720,15 +727,15 @@ struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr)
return NULL;
}
-void inlines__tree_delete(struct rb_root *tree)
+void inlines__tree_delete(struct rb_root_cached *tree)
{
struct inline_node *pos;
- struct rb_node *next = rb_first(tree);
+ struct rb_node *next = rb_first_cached(tree);
while (next) {
pos = rb_entry(next, struct inline_node, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, tree);
+ rb_erase_cached(&pos->rb_node, tree);
inline_node__delete(pos);
}
}
diff --git a/tools/perf/util/srcline.h b/tools/perf/util/srcline.h
index 5762212dc342..b11a0aaaa676 100644
--- a/tools/perf/util/srcline.h
+++ b/tools/perf/util/srcline.h
@@ -19,11 +19,11 @@ void free_srcline(char *srcline);
char *get_srcline_split(struct dso *dso, u64 addr, unsigned *line);
/* insert the srcline into the DSO, which will take ownership */
-void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline);
+void srcline__tree_insert(struct rb_root_cached *tree, u64 addr, char *srcline);
/* find previously inserted srcline */
-char *srcline__tree_find(struct rb_root *tree, u64 addr);
+char *srcline__tree_find(struct rb_root_cached *tree, u64 addr);
/* delete all srclines within the tree */
-void srcline__tree_delete(struct rb_root *tree);
+void srcline__tree_delete(struct rb_root_cached *tree);
#define SRCLINE_UNKNOWN ((char *) "??:0")
@@ -46,10 +46,11 @@ struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr,
void inline_node__delete(struct inline_node *node);
/* insert the inline node list into the DSO, which will take ownership */
-void inlines__tree_insert(struct rb_root *tree, struct inline_node *inlines);
+void inlines__tree_insert(struct rb_root_cached *tree,
+ struct inline_node *inlines);
/* find previously inserted inline node list */
-struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr);
+struct inline_node *inlines__tree_find(struct rb_root_cached *tree, u64 addr);
/* delete all nodes within the tree of inline_node s */
-void inlines__tree_delete(struct rb_root *tree);
+void inlines__tree_delete(struct rb_root_cached *tree);
#endif /* PERF_SRCLINE_H */
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 665ee374fc01..6d043c78f3c2 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -2,6 +2,7 @@
#include <inttypes.h>
#include <linux/time64.h>
#include <math.h>
+#include "color.h"
#include "evlist.h"
#include "evsel.h"
#include "stat.h"
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 3c22c58b3e90..83d8094be4fe 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -168,7 +168,7 @@ static void reset_stat(struct runtime_stat *st)
struct rb_node *pos, *next;
rblist = &st->value_list;
- next = rb_first(&rblist->entries);
+ next = rb_first_cached(&rblist->entries);
while (next) {
pos = next;
next = rb_next(pos);
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 4d40515307b8..2856cc9d5a31 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -291,10 +291,8 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
break;
case AGGR_GLOBAL:
aggr->val += count->val;
- if (config->scale) {
- aggr->ena += count->ena;
- aggr->run += count->run;
- }
+ aggr->ena += count->ena;
+ aggr->run += count->run;
case AGGR_UNSET:
default:
break;
@@ -442,10 +440,8 @@ int create_perf_stat_counter(struct perf_evsel *evsel,
struct perf_event_attr *attr = &evsel->attr;
struct perf_evsel *leader = evsel->leader;
- if (config->scale) {
- attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
- PERF_FORMAT_TOTAL_TIME_RUNNING;
- }
+ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING;
/*
* The event is part of non trivial group, let's enable
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
index d58f1e08b170..7e82c71dcc42 100644
--- a/tools/perf/util/strlist.h
+++ b/tools/perf/util/strlist.h
@@ -57,7 +57,7 @@ static inline unsigned int strlist__nr_entries(const struct strlist *slist)
/* For strlist iteration */
static inline struct str_node *strlist__first(struct strlist *slist)
{
- struct rb_node *rn = rb_first(&slist->rblist.entries);
+ struct rb_node *rn = rb_first_cached(&slist->rblist.entries);
return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
}
static inline struct str_node *strlist__next(struct str_node *sn)
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 66a84d5846c8..4ad106a5f2c0 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -6,6 +6,8 @@
#include <unistd.h>
#include <inttypes.h>
+#include "map.h"
+#include "map_groups.h"
#include "symbol.h"
#include "demangle-java.h"
#include "demangle-rust.h"
@@ -19,6 +21,20 @@
#define EM_AARCH64 183 /* ARM 64 bit */
#endif
+#ifndef ELF32_ST_VISIBILITY
+#define ELF32_ST_VISIBILITY(o) ((o) & 0x03)
+#endif
+
+/* For ELF64 the definitions are the same. */
+#ifndef ELF64_ST_VISIBILITY
+#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o)
+#endif
+
+/* How to extract information held in the st_other field. */
+#ifndef GELF_ST_VISIBILITY
+#define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val)
+#endif
+
typedef Elf64_Nhdr GElf_Nhdr;
#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
@@ -87,6 +103,11 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
return GELF_ST_TYPE(sym->st_info);
}
+static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
+{
+ return GELF_ST_VISIBILITY(sym->st_other);
+}
+
#ifndef STT_GNU_IFUNC
#define STT_GNU_IFUNC 10
#endif
@@ -111,7 +132,9 @@ static inline int elf_sym__is_label(const GElf_Sym *sym)
return elf_sym__type(sym) == STT_NOTYPE &&
sym->st_name != 0 &&
sym->st_shndx != SHN_UNDEF &&
- sym->st_shndx != SHN_ABS;
+ sym->st_shndx != SHN_ABS &&
+ elf_sym__visibility(sym) != STV_HIDDEN &&
+ elf_sym__visibility(sym) != STV_INTERNAL;
}
static bool elf_sym__filter(GElf_Sym *sym)
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index 7119df77dc0b..17edbd4f6f85 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -3,6 +3,7 @@
#include "util.h"
#include <errno.h>
+#include <unistd.h>
#include <stdio.h>
#include <fcntl.h>
#include <string.h>
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 48efad6d0f90..5cbad55cd99d 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -6,6 +6,7 @@
#include <string.h>
#include <linux/kernel.h>
#include <linux/mman.h>
+#include <linux/time64.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/param.h>
@@ -17,6 +18,7 @@
#include "util.h"
#include "debug.h"
#include "machine.h"
+#include "map.h"
#include "symbol.h"
#include "strlist.h"
#include "intlist.h"
@@ -38,15 +40,18 @@ int vmlinux_path__nr_entries;
char **vmlinux_path;
struct symbol_conf symbol_conf = {
+ .nanosecs = false,
.use_modules = true,
.try_vmlinux_path = true,
.demangle = true,
.demangle_kernel = false,
.cumulate_callchain = true,
+ .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */
.show_hist_headers = true,
.symfs = "",
.event_group = true,
.inline_name = true,
+ .res_sample = 0,
};
static enum dso_binary_type binary_type_symtab[] = {
@@ -163,7 +168,7 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
return arch__choose_best_symbol(syma, symb);
}
-void symbols__fixup_duplicate(struct rb_root *symbols)
+void symbols__fixup_duplicate(struct rb_root_cached *symbols)
{
struct rb_node *nd;
struct symbol *curr, *next;
@@ -171,7 +176,7 @@ void symbols__fixup_duplicate(struct rb_root *symbols)
if (symbol_conf.allow_aliases)
return;
- nd = rb_first(symbols);
+ nd = rb_first_cached(symbols);
while (nd) {
curr = rb_entry(nd, struct symbol, rb_node);
@@ -186,20 +191,20 @@ again:
continue;
if (choose_best_symbol(curr, next) == SYMBOL_A) {
- rb_erase(&next->rb_node, symbols);
+ rb_erase_cached(&next->rb_node, symbols);
symbol__delete(next);
goto again;
} else {
nd = rb_next(&curr->rb_node);
- rb_erase(&curr->rb_node, symbols);
+ rb_erase_cached(&curr->rb_node, symbols);
symbol__delete(curr);
}
}
}
-void symbols__fixup_end(struct rb_root *symbols)
+void symbols__fixup_end(struct rb_root_cached *symbols)
{
- struct rb_node *nd, *prevnd = rb_first(symbols);
+ struct rb_node *nd, *prevnd = rb_first_cached(symbols);
struct symbol *curr, *prev;
if (prevnd == NULL)
@@ -282,25 +287,27 @@ void symbol__delete(struct symbol *sym)
free(((void *)sym) - symbol_conf.priv_size);
}
-void symbols__delete(struct rb_root *symbols)
+void symbols__delete(struct rb_root_cached *symbols)
{
struct symbol *pos;
- struct rb_node *next = rb_first(symbols);
+ struct rb_node *next = rb_first_cached(symbols);
while (next) {
pos = rb_entry(next, struct symbol, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, symbols);
+ rb_erase_cached(&pos->rb_node, symbols);
symbol__delete(pos);
}
}
-void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel)
+void __symbols__insert(struct rb_root_cached *symbols,
+ struct symbol *sym, bool kernel)
{
- struct rb_node **p = &symbols->rb_node;
+ struct rb_node **p = &symbols->rb_root.rb_node;
struct rb_node *parent = NULL;
const u64 ip = sym->start;
struct symbol *s;
+ bool leftmost = true;
if (kernel) {
const char *name = sym->name;
@@ -318,26 +325,28 @@ void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel)
s = rb_entry(parent, struct symbol, rb_node);
if (ip < s->start)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&sym->rb_node, parent, p);
- rb_insert_color(&sym->rb_node, symbols);
+ rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
}
-void symbols__insert(struct rb_root *symbols, struct symbol *sym)
+void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym)
{
__symbols__insert(symbols, sym, false);
}
-static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
+static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip)
{
struct rb_node *n;
if (symbols == NULL)
return NULL;
- n = symbols->rb_node;
+ n = symbols->rb_root.rb_node;
while (n) {
struct symbol *s = rb_entry(n, struct symbol, rb_node);
@@ -353,9 +362,9 @@ static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
return NULL;
}
-static struct symbol *symbols__first(struct rb_root *symbols)
+static struct symbol *symbols__first(struct rb_root_cached *symbols)
{
- struct rb_node *n = rb_first(symbols);
+ struct rb_node *n = rb_first_cached(symbols);
if (n)
return rb_entry(n, struct symbol, rb_node);
@@ -363,9 +372,9 @@ static struct symbol *symbols__first(struct rb_root *symbols)
return NULL;
}
-static struct symbol *symbols__last(struct rb_root *symbols)
+static struct symbol *symbols__last(struct rb_root_cached *symbols)
{
- struct rb_node *n = rb_last(symbols);
+ struct rb_node *n = rb_last(&symbols->rb_root);
if (n)
return rb_entry(n, struct symbol, rb_node);
@@ -383,11 +392,12 @@ static struct symbol *symbols__next(struct symbol *sym)
return NULL;
}
-static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
+static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym)
{
- struct rb_node **p = &symbols->rb_node;
+ struct rb_node **p = &symbols->rb_root.rb_node;
struct rb_node *parent = NULL;
struct symbol_name_rb_node *symn, *s;
+ bool leftmost = true;
symn = container_of(sym, struct symbol_name_rb_node, sym);
@@ -396,19 +406,21 @@ static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
if (strcmp(sym->name, s->sym.name) < 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&symn->rb_node, parent, p);
- rb_insert_color(&symn->rb_node, symbols);
+ rb_insert_color_cached(&symn->rb_node, symbols, leftmost);
}
-static void symbols__sort_by_name(struct rb_root *symbols,
- struct rb_root *source)
+static void symbols__sort_by_name(struct rb_root_cached *symbols,
+ struct rb_root_cached *source)
{
struct rb_node *nd;
- for (nd = rb_first(source); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
symbols__insert_by_name(symbols, pos);
}
@@ -431,7 +443,7 @@ int symbol__match_symbol_name(const char *name, const char *str,
return arch__compare_symbol_names(name, str);
}
-static struct symbol *symbols__find_by_name(struct rb_root *symbols,
+static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols,
const char *name,
enum symbol_tag_include includes)
{
@@ -441,7 +453,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
if (symbols == NULL)
return NULL;
- n = symbols->rb_node;
+ n = symbols->rb_root.rb_node;
while (n) {
int cmp;
@@ -644,7 +656,7 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
{
struct symbol *sym;
struct dso *dso = arg;
- struct rb_root *root = &dso->symbols;
+ struct rb_root_cached *root = &dso->symbols;
if (!symbol_type__filter(type))
return 0;
@@ -681,14 +693,14 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
struct map *curr_map;
struct symbol *pos;
int count = 0;
- struct rb_root old_root = dso->symbols;
- struct rb_root *root = &dso->symbols;
- struct rb_node *next = rb_first(root);
+ struct rb_root_cached old_root = dso->symbols;
+ struct rb_root_cached *root = &dso->symbols;
+ struct rb_node *next = rb_first_cached(root);
if (!kmaps)
return -1;
- *root = RB_ROOT;
+ *root = RB_ROOT_CACHED;
while (next) {
char *module;
@@ -696,8 +708,8 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
pos = rb_entry(next, struct symbol, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase_init(&pos->rb_node, &old_root);
-
+ rb_erase_cached(&pos->rb_node, &old_root);
+ RB_CLEAR_NODE(&pos->rb_node);
module = strchr(pos->name, '\t');
if (module)
*module = '\0';
@@ -710,6 +722,8 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
}
pos->start -= curr_map->start - curr_map->pgoff;
+ if (pos->end > curr_map->end)
+ pos->end = curr_map->end;
if (pos->end)
pos->end -= curr_map->start - curr_map->pgoff;
symbols__insert(&curr_map->dso->symbols, pos);
@@ -734,8 +748,8 @@ static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso,
struct map *curr_map = initial_map;
struct symbol *pos;
int count = 0, moved = 0;
- struct rb_root *root = &dso->symbols;
- struct rb_node *next = rb_first(root);
+ struct rb_root_cached *root = &dso->symbols;
+ struct rb_node *next = rb_first_cached(root);
int kernel_range = 0;
bool x86_64;
@@ -849,7 +863,7 @@ static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso,
}
add_symbol:
if (curr_map != initial_map) {
- rb_erase(&pos->rb_node, root);
+ rb_erase_cached(&pos->rb_node, root);
symbols__insert(&curr_map->dso->symbols, pos);
++moved;
} else
@@ -857,7 +871,7 @@ add_symbol:
continue;
discard_symbol:
- rb_erase(&pos->rb_node, root);
+ rb_erase_cached(&pos->rb_node, root);
symbol__delete(pos);
}
@@ -1441,6 +1455,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
return true;
+ case DSO_BINARY_TYPE__BPF_PROG_INFO:
case DSO_BINARY_TYPE__NOT_FOUND:
default:
return false;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 14d9d438e7e2..9a8fe012910a 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -5,16 +5,13 @@
#include <linux/types.h>
#include <stdbool.h>
#include <stdint.h>
-#include "map.h"
-#include "../perf.h"
#include <linux/list.h>
#include <linux/rbtree.h>
#include <stdio.h>
-#include <byteswap.h>
-#include <libgen.h>
-#include "build-id.h"
-#include "event.h"
+#include "map_symbol.h"
+#include "branch.h"
#include "path.h"
+#include "symbol_conf.h"
#ifdef HAVE_LIBELF_SUPPORT
#include <libelf.h>
@@ -24,6 +21,10 @@
#include "dso.h"
+struct map;
+struct map_groups;
+struct option;
+
/*
* libelf 0.8.x and earlier do not support ELF_C_READ_MMAP;
* for newer versions we can use mmap to reduce memory usage:
@@ -68,7 +69,7 @@ struct symbol {
};
void symbol__delete(struct symbol *sym);
-void symbols__delete(struct rb_root *symbols);
+void symbols__delete(struct rb_root_cached *symbols);
/* symbols__for_each_entry - iterate over symbols (rb_root)
*
@@ -77,7 +78,7 @@ void symbols__delete(struct rb_root *symbols);
* @nd: the 'struct rb_node *' to use as a temporary storage
*/
#define symbols__for_each_entry(symbols, pos, nd) \
- for (nd = rb_first(symbols); \
+ for (nd = rb_first_cached(symbols); \
nd && (pos = rb_entry(nd, struct symbol, rb_node)); \
nd = rb_next(nd))
@@ -89,69 +90,6 @@ static inline size_t symbol__size(const struct symbol *sym)
struct strlist;
struct intlist;
-struct symbol_conf {
- unsigned short priv_size;
- bool try_vmlinux_path,
- init_annotation,
- force,
- ignore_vmlinux,
- ignore_vmlinux_buildid,
- show_kernel_path,
- use_modules,
- allow_aliases,
- sort_by_name,
- show_nr_samples,
- show_total_period,
- use_callchain,
- cumulate_callchain,
- show_branchflag_count,
- exclude_other,
- show_cpu_utilization,
- initialized,
- kptr_restrict,
- event_group,
- demangle,
- demangle_kernel,
- filter_relative,
- show_hist_headers,
- branch_callstack,
- has_filter,
- show_ref_callgraph,
- hide_unresolved,
- raw_trace,
- report_hierarchy,
- inline_name;
- const char *vmlinux_name,
- *kallsyms_name,
- *source_prefix,
- *field_sep,
- *graph_function;
- const char *default_guest_vmlinux_name,
- *default_guest_kallsyms,
- *default_guest_modules;
- const char *guestmount;
- const char *dso_list_str,
- *comm_list_str,
- *pid_list_str,
- *tid_list_str,
- *sym_list_str,
- *col_width_list_str,
- *bt_stop_list_str;
- struct strlist *dso_list,
- *comm_list,
- *sym_list,
- *dso_from_list,
- *dso_to_list,
- *sym_from_list,
- *sym_to_list,
- *bt_stop_list;
- struct intlist *pid_list,
- *tid_list;
- const char *symfs;
-};
-
-extern struct symbol_conf symbol_conf;
-
struct symbol_name_rb_node {
struct rb_node rb_node;
struct symbol sym;
@@ -178,19 +116,6 @@ struct ref_reloc_sym {
u64 unrelocated_addr;
};
-struct map_symbol {
- struct map *map;
- struct symbol *sym;
-};
-
-struct addr_map_symbol {
- struct map *map;
- struct symbol *sym;
- u64 addr;
- u64 al_addr;
- u64 phys_addr;
-};
-
struct branch_info {
struct addr_map_symbol from;
struct addr_map_symbol to;
@@ -310,10 +235,11 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss);
char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name);
-void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel);
-void symbols__insert(struct rb_root *symbols, struct symbol *sym);
-void symbols__fixup_duplicate(struct rb_root *symbols);
-void symbols__fixup_end(struct rb_root *symbols);
+void __symbols__insert(struct rb_root_cached *symbols, struct symbol *sym,
+ bool kernel);
+void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym);
+void symbols__fixup_duplicate(struct rb_root_cached *symbols);
+void symbols__fixup_end(struct rb_root_cached *symbols);
void map_groups__fixup_end(struct map_groups *mg);
typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h
new file mode 100644
index 000000000000..6c55fa6fccec
--- /dev/null
+++ b/tools/perf/util/symbol_conf.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_SYMBOL_CONF
+#define __PERF_SYMBOL_CONF 1
+
+#include <stdbool.h>
+
+struct strlist;
+struct intlist;
+
+struct symbol_conf {
+ bool nanosecs;
+ unsigned short priv_size;
+ bool try_vmlinux_path,
+ init_annotation,
+ force,
+ ignore_vmlinux,
+ ignore_vmlinux_buildid,
+ show_kernel_path,
+ use_modules,
+ allow_aliases,
+ sort_by_name,
+ show_nr_samples,
+ show_total_period,
+ use_callchain,
+ cumulate_callchain,
+ show_branchflag_count,
+ exclude_other,
+ show_cpu_utilization,
+ initialized,
+ kptr_restrict,
+ event_group,
+ demangle,
+ demangle_kernel,
+ filter_relative,
+ show_hist_headers,
+ branch_callstack,
+ has_filter,
+ show_ref_callgraph,
+ hide_unresolved,
+ raw_trace,
+ report_hierarchy,
+ inline_name;
+ const char *vmlinux_name,
+ *kallsyms_name,
+ *source_prefix,
+ *field_sep,
+ *graph_function;
+ const char *default_guest_vmlinux_name,
+ *default_guest_kallsyms,
+ *default_guest_modules;
+ const char *guestmount;
+ const char *dso_list_str,
+ *comm_list_str,
+ *pid_list_str,
+ *tid_list_str,
+ *sym_list_str,
+ *col_width_list_str,
+ *bt_stop_list_str;
+ unsigned long time_quantum;
+ struct strlist *dso_list,
+ *comm_list,
+ *sym_list,
+ *dso_from_list,
+ *dso_to_list,
+ *sym_from_list,
+ *sym_to_list,
+ *bt_stop_list;
+ struct intlist *pid_list,
+ *tid_list;
+ const char *symfs;
+ int res_sample;
+};
+
+extern struct symbol_conf symbol_conf;
+
+#endif // __PERF_SYMBOL_CONF
diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
index ed0205cc7942..02e89b02c2ce 100644
--- a/tools/perf/util/symbol_fprintf.c
+++ b/tools/perf/util/symbol_fprintf.c
@@ -3,6 +3,7 @@
#include <inttypes.h>
#include <stdio.h>
+#include "map.h"
#include "symbol.h"
size_t symbol__fprintf(struct symbol *sym, FILE *fp)
@@ -64,7 +65,7 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
struct rb_node *nd;
struct symbol_name_rb_node *pos;
- for (nd = rb_first(&dso->symbol_names); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
fprintf(fp, "%s\n", pos->sym.name);
}
diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c
index d52f27f373ce..41942c2aaa18 100644
--- a/tools/perf/util/thread-stack.c
+++ b/tools/perf/util/thread-stack.c
@@ -20,6 +20,7 @@
#include "thread.h"
#include "event.h"
#include "machine.h"
+#include "env.h"
#include "util.h"
#include "debug.h"
#include "symbol.h"
@@ -29,24 +30,41 @@
#define STACK_GROWTH 2048
+/*
+ * State of retpoline detection.
+ *
+ * RETPOLINE_NONE: no retpoline detection
+ * X86_RETPOLINE_POSSIBLE: x86 retpoline possible
+ * X86_RETPOLINE_DETECTED: x86 retpoline detected
+ */
+enum retpoline_state_t {
+ RETPOLINE_NONE,
+ X86_RETPOLINE_POSSIBLE,
+ X86_RETPOLINE_DETECTED,
+};
+
/**
* struct thread_stack_entry - thread stack entry.
* @ret_addr: return address
* @timestamp: timestamp (if known)
* @ref: external reference (e.g. db_id of sample)
* @branch_count: the branch count when the entry was created
+ * @db_id: id used for db-export
* @cp: call path
* @no_call: a 'call' was not seen
* @trace_end: a 'call' but trace ended
+ * @non_call: a branch but not a 'call' to the start of a different symbol
*/
struct thread_stack_entry {
u64 ret_addr;
u64 timestamp;
u64 ref;
u64 branch_count;
+ u64 db_id;
struct call_path *cp;
bool no_call;
bool trace_end;
+ bool non_call;
};
/**
@@ -62,6 +80,7 @@ struct thread_stack_entry {
* @crp: call/return processor
* @comm: current comm
* @arr_sz: size of array if this is the first element of an array
+ * @rstate: used to detect retpolines
*/
struct thread_stack {
struct thread_stack_entry *stack;
@@ -74,6 +93,7 @@ struct thread_stack {
struct call_return_processor *crp;
struct comm *comm;
unsigned int arr_sz;
+ enum retpoline_state_t rstate;
};
/*
@@ -113,10 +133,16 @@ static int thread_stack__init(struct thread_stack *ts, struct thread *thread,
if (err)
return err;
- if (thread->mg && thread->mg->machine)
- ts->kernel_start = machine__kernel_start(thread->mg->machine);
- else
+ if (thread->mg && thread->mg->machine) {
+ struct machine *machine = thread->mg->machine;
+ const char *arch = perf_env__arch(machine->env);
+
+ ts->kernel_start = machine__kernel_start(machine);
+ if (!strcmp(arch, "x86"))
+ ts->rstate = X86_RETPOLINE_POSSIBLE;
+ } else {
ts->kernel_start = 1ULL << 63;
+ }
ts->crp = crp;
return 0;
@@ -256,20 +282,31 @@ static int thread_stack__call_return(struct thread *thread,
.comm = ts->comm,
.db_id = 0,
};
+ u64 *parent_db_id;
tse = &ts->stack[idx];
cr.cp = tse->cp;
cr.call_time = tse->timestamp;
cr.return_time = timestamp;
cr.branch_count = ts->branch_count - tse->branch_count;
+ cr.db_id = tse->db_id;
cr.call_ref = tse->ref;
cr.return_ref = ref;
if (tse->no_call)
cr.flags |= CALL_RETURN_NO_CALL;
if (no_return)
cr.flags |= CALL_RETURN_NO_RETURN;
+ if (tse->non_call)
+ cr.flags |= CALL_RETURN_NON_CALL;
+
+ /*
+ * The parent db_id must be assigned before exporting the child. Note
+ * it is not possible to export the parent first because its information
+ * is not yet complete because its 'return' has not yet been processed.
+ */
+ parent_db_id = idx ? &(tse - 1)->db_id : NULL;
- return crp->process(&cr, crp->data);
+ return crp->process(&cr, parent_db_id, crp->data);
}
static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
@@ -458,7 +495,7 @@ void thread_stack__sample(struct thread *thread, int cpu,
}
struct call_return_processor *
-call_return_processor__new(int (*process)(struct call_return *cr, void *data),
+call_return_processor__new(int (*process)(struct call_return *cr, u64 *parent_db_id, void *data),
void *data)
{
struct call_return_processor *crp;
@@ -493,6 +530,9 @@ static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
struct thread_stack_entry *tse;
int err;
+ if (!cp)
+ return -ENOMEM;
+
if (ts->cnt == ts->sz) {
err = thread_stack__grow(ts);
if (err)
@@ -507,6 +547,8 @@ static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
tse->cp = cp;
tse->no_call = no_call;
tse->trace_end = trace_end;
+ tse->non_call = false;
+ tse->db_id = 0;
return 0;
}
@@ -528,14 +570,16 @@ static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
timestamp, ref, false);
}
- if (ts->stack[ts->cnt - 1].ret_addr == ret_addr) {
+ if (ts->stack[ts->cnt - 1].ret_addr == ret_addr &&
+ !ts->stack[ts->cnt - 1].non_call) {
return thread_stack__call_return(thread, ts, --ts->cnt,
timestamp, ref, false);
} else {
size_t i = ts->cnt - 1;
while (i--) {
- if (ts->stack[i].ret_addr != ret_addr)
+ if (ts->stack[i].ret_addr != ret_addr ||
+ ts->stack[i].non_call)
continue;
i += 1;
while (ts->cnt > i) {
@@ -576,8 +620,6 @@ static int thread_stack__bottom(struct thread_stack *ts,
cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
ts->kernel_start);
- if (!cp)
- return -ENOMEM;
return thread_stack__push_cp(ts, ip, sample->time, ref, cp,
true, false);
@@ -590,36 +632,36 @@ static int thread_stack__no_call_return(struct thread *thread,
struct addr_location *to_al, u64 ref)
{
struct call_path_root *cpr = ts->crp->cpr;
+ struct call_path *root = &cpr->call_path;
+ struct symbol *fsym = from_al->sym;
+ struct symbol *tsym = to_al->sym;
struct call_path *cp, *parent;
u64 ks = ts->kernel_start;
+ u64 addr = sample->addr;
+ u64 tm = sample->time;
+ u64 ip = sample->ip;
int err;
- if (sample->ip >= ks && sample->addr < ks) {
+ if (ip >= ks && addr < ks) {
/* Return to userspace, so pop all kernel addresses */
while (thread_stack__in_kernel(ts)) {
err = thread_stack__call_return(thread, ts, --ts->cnt,
- sample->time, ref,
- true);
+ tm, ref, true);
if (err)
return err;
}
/* If the stack is empty, push the userspace address */
if (!ts->cnt) {
- cp = call_path__findnew(cpr, &cpr->call_path,
- to_al->sym, sample->addr,
- ts->kernel_start);
- if (!cp)
- return -ENOMEM;
- return thread_stack__push_cp(ts, 0, sample->time, ref,
- cp, true, false);
+ cp = call_path__findnew(cpr, root, tsym, addr, ks);
+ return thread_stack__push_cp(ts, 0, tm, ref, cp, true,
+ false);
}
- } else if (thread_stack__in_kernel(ts) && sample->ip < ks) {
+ } else if (thread_stack__in_kernel(ts) && ip < ks) {
/* Return to userspace, so pop all kernel addresses */
while (thread_stack__in_kernel(ts)) {
err = thread_stack__call_return(thread, ts, --ts->cnt,
- sample->time, ref,
- true);
+ tm, ref, true);
if (err)
return err;
}
@@ -628,21 +670,59 @@ static int thread_stack__no_call_return(struct thread *thread,
if (ts->cnt)
parent = ts->stack[ts->cnt - 1].cp;
else
- parent = &cpr->call_path;
+ parent = root;
- /* This 'return' had no 'call', so push and pop top of stack */
- cp = call_path__findnew(cpr, parent, from_al->sym, sample->ip,
- ts->kernel_start);
- if (!cp)
- return -ENOMEM;
+ if (parent->sym == from_al->sym) {
+ /*
+ * At the bottom of the stack, assume the missing 'call' was
+ * before the trace started. So, pop the current symbol and push
+ * the 'to' symbol.
+ */
+ if (ts->cnt == 1) {
+ err = thread_stack__call_return(thread, ts, --ts->cnt,
+ tm, ref, false);
+ if (err)
+ return err;
+ }
+
+ if (!ts->cnt) {
+ cp = call_path__findnew(cpr, root, tsym, addr, ks);
+
+ return thread_stack__push_cp(ts, addr, tm, ref, cp,
+ true, false);
+ }
+
+ /*
+ * Otherwise assume the 'return' is being used as a jump (e.g.
+ * retpoline) and just push the 'to' symbol.
+ */
+ cp = call_path__findnew(cpr, parent, tsym, addr, ks);
+
+ err = thread_stack__push_cp(ts, 0, tm, ref, cp, true, false);
+ if (!err)
+ ts->stack[ts->cnt - 1].non_call = true;
+
+ return err;
+ }
+
+ /*
+ * Assume 'parent' has not yet returned, so push 'to', and then push and
+ * pop 'from'.
+ */
+
+ cp = call_path__findnew(cpr, parent, tsym, addr, ks);
+
+ err = thread_stack__push_cp(ts, addr, tm, ref, cp, true, false);
+ if (err)
+ return err;
- err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp,
- true, false);
+ cp = call_path__findnew(cpr, cp, fsym, ip, ks);
+
+ err = thread_stack__push_cp(ts, ip, tm, ref, cp, true, false);
if (err)
return err;
- return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref,
- to_al->sym);
+ return thread_stack__call_return(thread, ts, --ts->cnt, tm, ref, false);
}
static int thread_stack__trace_begin(struct thread *thread,
@@ -680,8 +760,6 @@ static int thread_stack__trace_end(struct thread_stack *ts,
cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
ts->kernel_start);
- if (!cp)
- return -ENOMEM;
ret_addr = sample->ip + sample->insn_len;
@@ -689,6 +767,70 @@ static int thread_stack__trace_end(struct thread_stack *ts,
false, true);
}
+static bool is_x86_retpoline(const char *name)
+{
+ const char *p = strstr(name, "__x86_indirect_thunk_");
+
+ return p == name || !strcmp(name, "__indirect_thunk_start");
+}
+
+/*
+ * x86 retpoline functions pollute the call graph. This function removes them.
+ * This does not handle function return thunks, nor is there any improvement
+ * for the handling of inline thunks or extern thunks.
+ */
+static int thread_stack__x86_retpoline(struct thread_stack *ts,
+ struct perf_sample *sample,
+ struct addr_location *to_al)
+{
+ struct thread_stack_entry *tse = &ts->stack[ts->cnt - 1];
+ struct call_path_root *cpr = ts->crp->cpr;
+ struct symbol *sym = tse->cp->sym;
+ struct symbol *tsym = to_al->sym;
+ struct call_path *cp;
+
+ if (sym && is_x86_retpoline(sym->name)) {
+ /*
+ * This is a x86 retpoline fn. It pollutes the call graph by
+ * showing up everywhere there is an indirect branch, but does
+ * not itself mean anything. Here the top-of-stack is removed,
+ * by decrementing the stack count, and then further down, the
+ * resulting top-of-stack is replaced with the actual target.
+ * The result is that the retpoline functions will no longer
+ * appear in the call graph. Note this only affects the call
+ * graph, since all the original branches are left unchanged.
+ */
+ ts->cnt -= 1;
+ sym = ts->stack[ts->cnt - 2].cp->sym;
+ if (sym && sym == tsym && to_al->addr != tsym->start) {
+ /*
+ * Target is back to the middle of the symbol we came
+ * from so assume it is an indirect jmp and forget it
+ * altogether.
+ */
+ ts->cnt -= 1;
+ return 0;
+ }
+ } else if (sym && sym == tsym) {
+ /*
+ * Target is back to the symbol we came from so assume it is an
+ * indirect jmp and forget it altogether.
+ */
+ ts->cnt -= 1;
+ return 0;
+ }
+
+ cp = call_path__findnew(cpr, ts->stack[ts->cnt - 2].cp, tsym,
+ sample->addr, ts->kernel_start);
+ if (!cp)
+ return -ENOMEM;
+
+ /* Replace the top-of-stack with the actual target */
+ ts->stack[ts->cnt - 1].cp = cp;
+
+ return 0;
+}
+
int thread_stack__process(struct thread *thread, struct comm *comm,
struct perf_sample *sample,
struct addr_location *from_al,
@@ -696,6 +838,7 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
struct call_return_processor *crp)
{
struct thread_stack *ts = thread__stack(thread, sample->cpu);
+ enum retpoline_state_t rstate;
int err = 0;
if (ts && !ts->crp) {
@@ -711,6 +854,10 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
ts->comm = comm;
}
+ rstate = ts->rstate;
+ if (rstate == X86_RETPOLINE_DETECTED)
+ ts->rstate = X86_RETPOLINE_POSSIBLE;
+
/* Flush stack on exec */
if (ts->comm != comm && thread->pid_ == thread->tid) {
err = __thread_stack__flush(thread, ts);
@@ -745,14 +892,27 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
to_al->sym, sample->addr,
ts->kernel_start);
- if (!cp)
- return -ENOMEM;
err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
cp, false, trace_end);
+
+ /*
+ * A call to the same symbol but not the start of the symbol,
+ * may be the start of a x86 retpoline.
+ */
+ if (!err && rstate == X86_RETPOLINE_POSSIBLE && to_al->sym &&
+ from_al->sym == to_al->sym &&
+ to_al->addr != to_al->sym->start)
+ ts->rstate = X86_RETPOLINE_DETECTED;
+
} else if (sample->flags & PERF_IP_FLAG_RETURN) {
if (!sample->ip || !sample->addr)
return 0;
+ /* x86 retpoline 'return' doesn't match the stack */
+ if (rstate == X86_RETPOLINE_DETECTED && ts->cnt > 2 &&
+ ts->stack[ts->cnt - 1].ret_addr != sample->addr)
+ return thread_stack__x86_retpoline(ts, sample, to_al);
+
err = thread_stack__pop_cp(thread, ts, sample->addr,
sample->time, ref, from_al->sym);
if (err) {
@@ -765,6 +925,25 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
err = thread_stack__trace_begin(thread, ts, sample->time, ref);
} else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
err = thread_stack__trace_end(ts, sample, ref);
+ } else if (sample->flags & PERF_IP_FLAG_BRANCH &&
+ from_al->sym != to_al->sym && to_al->sym &&
+ to_al->addr == to_al->sym->start) {
+ struct call_path_root *cpr = ts->crp->cpr;
+ struct call_path *cp;
+
+ /*
+ * The compiler might optimize a call/ret combination by making
+ * it a jmp. Make that visible by recording on the stack a
+ * branch to the start of a different symbol. Note, that means
+ * when a ret pops the stack, all jmps must be popped off first.
+ */
+ cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
+ to_al->sym, sample->addr,
+ ts->kernel_start);
+ err = thread_stack__push_cp(ts, 0, sample->time, ref, cp, false,
+ false);
+ if (!err)
+ ts->stack[ts->cnt - 1].non_call = true;
}
return err;
diff --git a/tools/perf/util/thread-stack.h b/tools/perf/util/thread-stack.h
index 1f626f4a1c40..9c45f947f5a9 100644
--- a/tools/perf/util/thread-stack.h
+++ b/tools/perf/util/thread-stack.h
@@ -35,10 +35,13 @@ struct call_path;
*
* CALL_RETURN_NO_CALL: 'return' but no matching 'call'
* CALL_RETURN_NO_RETURN: 'call' but no matching 'return'
+ * CALL_RETURN_NON_CALL: a branch but not a 'call' to the start of a different
+ * symbol
*/
enum {
CALL_RETURN_NO_CALL = 1 << 0,
CALL_RETURN_NO_RETURN = 1 << 1,
+ CALL_RETURN_NON_CALL = 1 << 2,
};
/**
@@ -52,6 +55,7 @@ enum {
* @call_ref: external reference to 'call' sample (e.g. db_id)
* @return_ref: external reference to 'return' sample (e.g. db_id)
* @db_id: id used for db-export
+ * @parent_db_id: id of parent call used for db-export
* @flags: Call/Return flags
*/
struct call_return {
@@ -64,6 +68,7 @@ struct call_return {
u64 call_ref;
u64 return_ref;
u64 db_id;
+ u64 parent_db_id;
u32 flags;
};
@@ -76,7 +81,7 @@ struct call_return {
*/
struct call_return_processor {
struct call_path_root *cpr;
- int (*process)(struct call_return *cr, void *data);
+ int (*process)(struct call_return *cr, u64 *parent_db_id, void *data);
void *data;
};
@@ -90,7 +95,7 @@ void thread_stack__free(struct thread *thread);
size_t thread_stack__depth(struct thread *thread, int cpu);
struct call_return_processor *
-call_return_processor__new(int (*process)(struct call_return *cr, void *data),
+call_return_processor__new(int (*process)(struct call_return *cr, u64 *parent_db_id, void *data),
void *data);
void call_return_processor__free(struct call_return_processor *crp);
int thread_stack__process(struct thread *thread, struct comm *comm,
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index c83372329f89..50678d318185 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -12,6 +12,8 @@
#include "debug.h"
#include "namespaces.h"
#include "comm.h"
+#include "map.h"
+#include "symbol.h"
#include "unwind.h"
#include <api/fs/fs.h>
@@ -392,3 +394,25 @@ struct thread *thread__main_thread(struct machine *machine, struct thread *threa
return machine__find_thread(machine, thread->pid_, thread->pid_);
}
+
+int thread__memcpy(struct thread *thread, struct machine *machine,
+ void *buf, u64 ip, int len, bool *is64bit)
+{
+ u8 cpumode = PERF_RECORD_MISC_USER;
+ struct addr_location al;
+ long offset;
+
+ if (machine__kernel_ip(machine, ip))
+ cpumode = PERF_RECORD_MISC_KERNEL;
+
+ if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso ||
+ al.map->dso->data.status == DSO_DATA_STATUS_ERROR ||
+ map__load(al.map) < 0)
+ return -1;
+
+ offset = al.map->map_ip(al.map, ip);
+ if (is64bit)
+ *is64bit = al.map->dso->is_64_bit;
+
+ return dso__data_read_offset(al.map->dso, machine, offset, buf, len);
+}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 712dd48cc0ca..cf8375c017a0 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -5,14 +5,18 @@
#include <linux/refcount.h>
#include <linux/rbtree.h>
#include <linux/list.h>
+#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
-#include "symbol.h"
-#include "map.h"
+#include "srccode.h"
+#include "symbol_conf.h"
#include <strlist.h>
#include <intlist.h>
#include "rwsem.h"
+struct addr_location;
+struct map;
+struct namespaces_event;
struct thread_stack;
struct unwind_libunwind_ops;
@@ -109,6 +113,9 @@ struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
struct addr_location *al);
+int thread__memcpy(struct thread *thread, struct machine *machine,
+ void *buf, u64 ip, int len, bool *is64bit);
+
static inline void *thread__priv(struct thread *thread)
{
return thread->priv;
diff --git a/tools/perf/util/time-utils.c b/tools/perf/util/time-utils.c
index 6193b46050a5..20663a460df3 100644
--- a/tools/perf/util/time-utils.c
+++ b/tools/perf/util/time-utils.c
@@ -11,6 +11,8 @@
#include "perf.h"
#include "debug.h"
#include "time-utils.h"
+#include "session.h"
+#include "evlist.h"
int parse_nsec_time(const char *str, u64 *ptime)
{
@@ -374,7 +376,7 @@ bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
struct perf_time_interval *ptime;
int i;
- if ((timestamp == 0) || (num == 0))
+ if ((!ptime_buf) || (timestamp == 0) || (num == 0))
return false;
if (num == 1)
@@ -396,6 +398,53 @@ bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
return (i == num) ? true : false;
}
+int perf_time__parse_for_ranges(const char *time_str,
+ struct perf_session *session,
+ struct perf_time_interval **ranges,
+ int *range_size, int *range_num)
+{
+ struct perf_time_interval *ptime_range;
+ int size, num, ret;
+
+ ptime_range = perf_time__range_alloc(time_str, &size);
+ if (!ptime_range)
+ return -ENOMEM;
+
+ if (perf_time__parse_str(ptime_range, time_str) != 0) {
+ if (session->evlist->first_sample_time == 0 &&
+ session->evlist->last_sample_time == 0) {
+ pr_err("HINT: no first/last sample time found in perf data.\n"
+ "Please use latest perf binary to execute 'perf record'\n"
+ "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ num = perf_time__percent_parse_str(
+ ptime_range, size,
+ time_str,
+ session->evlist->first_sample_time,
+ session->evlist->last_sample_time);
+
+ if (num < 0) {
+ pr_err("Invalid time string\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ } else {
+ num = 1;
+ }
+
+ *range_size = size;
+ *range_num = num;
+ *ranges = ptime_range;
+ return 0;
+
+error:
+ free(ptime_range);
+ return ret;
+}
+
int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz)
{
u64 sec = timestamp / NSEC_PER_SEC;
@@ -404,6 +453,14 @@ int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz)
return scnprintf(buf, sz, "%"PRIu64".%06"PRIu64, sec, usec);
}
+int timestamp__scnprintf_nsec(u64 timestamp, char *buf, size_t sz)
+{
+ u64 sec = timestamp / NSEC_PER_SEC,
+ nsec = timestamp % NSEC_PER_SEC;
+
+ return scnprintf(buf, sz, "%" PRIu64 ".%09" PRIu64, sec, nsec);
+}
+
int fetch_current_timestamp(char *buf, size_t sz)
{
struct timeval tv;
diff --git a/tools/perf/util/time-utils.h b/tools/perf/util/time-utils.h
index 70b177d2b98c..72a42ea1d513 100644
--- a/tools/perf/util/time-utils.h
+++ b/tools/perf/util/time-utils.h
@@ -23,7 +23,14 @@ bool perf_time__skip_sample(struct perf_time_interval *ptime, u64 timestamp);
bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
int num, u64 timestamp);
+struct perf_session;
+
+int perf_time__parse_for_ranges(const char *str, struct perf_session *session,
+ struct perf_time_interval **ranges,
+ int *range_size, int *range_num);
+
int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz);
+int timestamp__scnprintf_nsec(u64 timestamp, char *buf, size_t sz);
int fetch_current_timestamp(char *buf, size_t sz);
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index 56e4ca54020a..250391672f9f 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -53,7 +53,10 @@ struct perf_tool {
itrace_start,
context_switch,
throttle,
- unthrottle;
+ unthrottle,
+ ksymbol,
+ bpf_event;
+
event_attr_op attr;
event_attr_op event_update;
event_op2 tracing_data;
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 5eff9bfc5758..407d0167b942 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -8,6 +8,8 @@
#include "unwind.h"
#include "unwind-libdw.h"
#include "machine.h"
+#include "map.h"
+#include "symbol.h"
#include "thread.h"
#include <linux/types.h>
#include "event.h"
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 79f521a552cf..f3c666a84e4d 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -34,6 +34,7 @@
#include "session.h"
#include "perf_regs.h"
#include "unwind.h"
+#include "map.h"
#include "symbol.h"
#include "util.h"
#include "debug.h"
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index b029a5e9ae49..9778b3133b77 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include "unwind.h"
+#include "map.h"
#include "thread.h"
#include "session.h"
#include "debug.h"
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 093352e93d50..d388f80d8703 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -2,6 +2,7 @@
#include "../perf.h"
#include "util.h"
#include "debug.h"
+#include "namespaces.h"
#include <api/fs/fs.h>
#include <sys/mman.h>
#include <sys/stat.h>
@@ -20,6 +21,7 @@
#include <linux/time64.h>
#include <unistd.h>
#include "strlist.h"
+#include "string2.h"
/*
* XXX We need to find a better place for these things...
@@ -116,23 +118,67 @@ int mkdir_p(char *path, mode_t mode)
return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0;
}
-int rm_rf(const char *path)
+static bool match_pat(char *file, const char **pat)
+{
+ int i = 0;
+
+ if (!pat)
+ return true;
+
+ while (pat[i]) {
+ if (strglobmatch(file, pat[i]))
+ return true;
+
+ i++;
+ }
+
+ return false;
+}
+
+/*
+ * The depth specify how deep the removal will go.
+ * 0 - will remove only files under the 'path' directory
+ * 1 .. x - will dive in x-level deep under the 'path' directory
+ *
+ * If specified the pat is array of string patterns ended with NULL,
+ * which are checked upon every file/directory found. Only matching
+ * ones are removed.
+ *
+ * The function returns:
+ * 0 on success
+ * -1 on removal failure with errno set
+ * -2 on pattern failure
+ */
+static int rm_rf_depth_pat(const char *path, int depth, const char **pat)
{
DIR *dir;
- int ret = 0;
+ int ret;
struct dirent *d;
char namebuf[PATH_MAX];
+ struct stat statbuf;
+ /* Do not fail if there's no file. */
+ ret = lstat(path, &statbuf);
+ if (ret)
+ return 0;
+
+ /* Try to remove any file we get. */
+ if (!(statbuf.st_mode & S_IFDIR))
+ return unlink(path);
+
+ /* We have directory in path. */
dir = opendir(path);
if (dir == NULL)
- return 0;
+ return -1;
while ((d = readdir(dir)) != NULL && !ret) {
- struct stat statbuf;
if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
continue;
+ if (!match_pat(d->d_name, pat))
+ return -2;
+
scnprintf(namebuf, sizeof(namebuf), "%s/%s",
path, d->d_name);
@@ -144,7 +190,7 @@ int rm_rf(const char *path)
}
if (S_ISDIR(statbuf.st_mode))
- ret = rm_rf(namebuf);
+ ret = depth ? rm_rf_depth_pat(namebuf, depth - 1, pat) : 0;
else
ret = unlink(namebuf);
}
@@ -156,6 +202,22 @@ int rm_rf(const char *path)
return rmdir(path);
}
+int rm_rf_perf_data(const char *path)
+{
+ const char *pat[] = {
+ "header",
+ "data.*",
+ NULL,
+ };
+
+ return rm_rf_depth_pat(path, 0, pat);
+}
+
+int rm_rf(const char *path)
+{
+ return rm_rf_depth_pat(path, INT_MAX, NULL);
+}
+
/* A filter which removes dot files */
bool lsdir_no_dot_filter(const char *name __maybe_unused, struct dirent *d)
{
@@ -506,3 +568,13 @@ out:
return tip;
}
+
+char *perf_exe(char *buf, int len)
+{
+ int n = readlink("/proc/self/exe", buf, len);
+ if (n > 0) {
+ buf[n] = 0;
+ return buf;
+ }
+ return strcpy(buf, "perf");
+}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index ece040b799f6..09c1b0f91f65 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -31,6 +31,7 @@ struct strlist;
int mkdir_p(char *path, mode_t mode);
int rm_rf(const char *path);
+int rm_rf_perf_data(const char *path);
struct strlist *lsdir(const char *name, bool (*filter)(const char *, struct dirent *));
bool lsdir_no_dot_filter(const char *name, struct dirent *d);
int copyfile(const char *from, const char *to);
@@ -76,6 +77,8 @@ extern bool perf_singlethreaded;
void perf_set_singlethreaded(void);
void perf_set_multithreaded(void);
+char *perf_exe(char *buf, int len);
+
#ifndef O_CLOEXEC
#ifdef __sparc__
#define O_CLOEXEC 0x400000
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index 3702cba11d7d..5031b7b22bbd 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -11,6 +11,7 @@
#include "vdso.h"
#include "util.h"
+#include "map.h"
#include "symbol.h"
#include "machine.h"
#include "thread.h"
diff --git a/tools/perf/util/zlib.c b/tools/perf/util/zlib.c
index 902ce6384f57..512ad7c09b13 100644
--- a/tools/perf/util/zlib.c
+++ b/tools/perf/util/zlib.c
@@ -6,7 +6,6 @@
#include <sys/mman.h>
#include <zlib.h>
#include <linux/compiler.h>
-#include <unistd.h>
#include "util/compress.h"
#include "util/util.h"
OpenPOWER on IntegriCloud