summaryrefslogtreecommitdiffstats
path: root/tools/perf/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/scripts')
-rw-r--r--tools/perf/scripts/python/check-perf-trace.py76
-rw-r--r--tools/perf/scripts/python/compaction-times.py8
-rw-r--r--tools/perf/scripts/python/event_analyzing_sample.py48
-rw-r--r--tools/perf/scripts/python/export-to-postgresql.py77
-rw-r--r--tools/perf/scripts/python/export-to-sqlite.py38
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py473
-rw-r--r--tools/perf/scripts/python/failed-syscalls-by-pid.py38
-rw-r--r--tools/perf/scripts/python/futex-contention.py10
-rw-r--r--tools/perf/scripts/python/intel-pt-events.py60
-rw-r--r--tools/perf/scripts/python/mem-phys-addr.py7
-rwxr-xr-xtools/perf/scripts/python/net_dropmonitor.py2
-rw-r--r--tools/perf/scripts/python/netdev-times.py12
-rw-r--r--tools/perf/scripts/python/sched-migration.py6
-rw-r--r--tools/perf/scripts/python/sctop.py13
-rwxr-xr-xtools/perf/scripts/python/stackcollapse.py2
-rw-r--r--tools/perf/scripts/python/syscall-counts-by-pid.py47
-rw-r--r--tools/perf/scripts/python/syscall-counts.py31
17 files changed, 636 insertions, 312 deletions
diff --git a/tools/perf/scripts/python/check-perf-trace.py b/tools/perf/scripts/python/check-perf-trace.py
index 334599c6032c..d2c22954800d 100644
--- a/tools/perf/scripts/python/check-perf-trace.py
+++ b/tools/perf/scripts/python/check-perf-trace.py
@@ -7,6 +7,8 @@
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
+from __future__ import print_function
+
import os
import sys
@@ -19,64 +21,64 @@ from perf_trace_context import *
unhandled = autodict()
def trace_begin():
- print "trace_begin"
+ print("trace_begin")
pass
def trace_end():
- print_unhandled()
+ print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, vec):
- print_header(event_name, common_cpu, common_secs, common_nsecs,
- common_pid, common_comm)
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, vec):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
- print_uncommon(context)
+ print_uncommon(context)
- print "vec=%s\n" % \
- (symbol_str("irq__softirq_entry", "vec", vec)),
+ print("vec=%s" % (symbol_str("irq__softirq_entry", "vec", vec)))
def kmem__kmalloc(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, call_site, ptr, bytes_req, bytes_alloc,
- gfp_flags):
- print_header(event_name, common_cpu, common_secs, common_nsecs,
- common_pid, common_comm)
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, call_site, ptr, bytes_req, bytes_alloc,
+ gfp_flags):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
- print_uncommon(context)
+ print_uncommon(context)
- print "call_site=%u, ptr=%u, bytes_req=%u, " \
- "bytes_alloc=%u, gfp_flags=%s\n" % \
+ print("call_site=%u, ptr=%u, bytes_req=%u, "
+ "bytes_alloc=%u, gfp_flags=%s" %
(call_site, ptr, bytes_req, bytes_alloc,
-
- flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
+ flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)))
def trace_unhandled(event_name, context, event_fields_dict):
- try:
- unhandled[event_name] += 1
- except TypeError:
- unhandled[event_name] = 1
+ try:
+ unhandled[event_name] += 1
+ except TypeError:
+ unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
- print "%-20s %5u %05u.%09u %8u %-20s " % \
- (event_name, cpu, secs, nsecs, pid, comm),
+ print("%-20s %5u %05u.%09u %8u %-20s " %
+ (event_name, cpu, secs, nsecs, pid, comm),
+ end=' ')
# print trace fields not included in handler args
def print_uncommon(context):
- print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
- % (common_pc(context), trace_flag_str(common_flags(context)), \
- common_lock_depth(context))
+ print("common_preempt_count=%d, common_flags=%s, "
+ "common_lock_depth=%d, " %
+ (common_pc(context), trace_flag_str(common_flags(context)),
+ common_lock_depth(context)))
def print_unhandled():
- keys = unhandled.keys()
- if not keys:
- return
+ keys = unhandled.keys()
+ if not keys:
+ return
- print "\nunhandled events:\n\n",
+ print("\nunhandled events:\n")
- print "%-40s %10s\n" % ("event", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "-----------"),
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "-----------"))
- for event_name in keys:
- print "%-40s %10d\n" % (event_name, unhandled[event_name])
+ for event_name in keys:
+ print("%-40s %10d\n" % (event_name, unhandled[event_name]))
diff --git a/tools/perf/scripts/python/compaction-times.py b/tools/perf/scripts/python/compaction-times.py
index 239cb0568ec3..2560a042dc6f 100644
--- a/tools/perf/scripts/python/compaction-times.py
+++ b/tools/perf/scripts/python/compaction-times.py
@@ -216,15 +216,15 @@ def compaction__mm_compaction_migratepages(event_name, context, common_cpu,
pair(nr_migrated, nr_failed), None, None)
def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
chead.increment_pending(common_pid,
None, pair(nr_scanned, nr_taken), None)
def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
chead.increment_pending(common_pid,
None, None, pair(nr_scanned, nr_taken))
diff --git a/tools/perf/scripts/python/event_analyzing_sample.py b/tools/perf/scripts/python/event_analyzing_sample.py
index 4e843b9864ec..aa1e2cfa26a6 100644
--- a/tools/perf/scripts/python/event_analyzing_sample.py
+++ b/tools/perf/scripts/python/event_analyzing_sample.py
@@ -15,6 +15,8 @@
# for a x86 HW PMU event: PEBS with load latency data.
#
+from __future__ import print_function
+
import os
import sys
import math
@@ -37,7 +39,7 @@ con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
- print "In trace_begin:\n"
+ print("In trace_begin:\n")
#
# Will create several tables at the start, pebs_ll is for PEBS data with
@@ -76,12 +78,12 @@ def process_event(param_dict):
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
- if (param_dict.has_key("dso")):
+ if ("dso" in param_dict):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
- if (param_dict.has_key("symbol")):
+ if ("symbol" in param_dict):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
@@ -102,7 +104,7 @@ def insert_db(event):
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
- print "In trace_end:\n"
+ print("In trace_end:\n")
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
@@ -123,29 +125,29 @@ def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
- print "There is %d records in gen_events table" % t[0]
+ print("There is %d records in gen_events table" % t[0])
if t[0] == 0:
return
- print "Statistics about the general events grouped by thread/symbol/dso: \n"
+ print("Statistics about the general events grouped by thread/symbol/dso: \n")
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
- print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+ print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
for row in commq:
- print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by symbol
- print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by dso
- print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
+ print("\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74))
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
- print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%40s %8d %s" % (row[0], row[1], num2sym(row[1])))
#
# This function just shows the basic info, and we could do more with the
@@ -156,35 +158,35 @@ def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
- print "There is %d records in pebs_ll table" % t[0]
+ print("There is %d records in pebs_ll table" % t[0])
if t[0] == 0:
return
- print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
+ print("Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n")
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
- print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+ print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
for row in commq:
- print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by symbol
- print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
- print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58))
for row in dseq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
- print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58))
for row in latq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
def trace_unhandled(event_name, context, event_fields_dict):
- print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
+ print (' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index 30130213da7e..c3eae1d77d36 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
+from __future__ import print_function
+
import os
import sys
import struct
@@ -199,6 +201,18 @@ import datetime
from PySide.QtSql import *
+if sys.version_info < (3, 0):
+ def toserverstr(str):
+ return str
+ def toclientstr(str):
+ return str
+else:
+ # Assume UTF-8 server_encoding and client_encoding
+ def toserverstr(str):
+ return bytes(str, "UTF_8")
+ def toclientstr(str):
+ return bytes(str, "UTF_8")
+
# Need to access PostgreSQL C library directly to use COPY FROM STDIN
from ctypes import *
libpq = CDLL("libpq.so.5")
@@ -234,12 +248,17 @@ perf_db_export_mode = True
perf_db_export_calls = False
perf_db_export_callchains = False
+def printerr(*args, **kw_args):
+ print(*args, file=sys.stderr, **kw_args)
+
+def printdate(*args, **kw_args):
+ print(datetime.datetime.today(), *args, sep=' ', **kw_args)
def usage():
- print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]"
- print >> sys.stderr, "where: columns 'all' or 'branches'"
- print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
- print >> sys.stderr, " callchains 'callchains' => create call_paths table"
+ printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]")
+ printerr("where: columns 'all' or 'branches'")
+ printerr(" calls 'calls' => create calls and call_paths table")
+ printerr(" callchains 'callchains' => create call_paths table")
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
@@ -273,7 +292,7 @@ def do_query(q, s):
return
raise Exception("Query failed: " + q.lastError().text())
-print datetime.datetime.today(), "Creating database..."
+printdate("Creating database...")
db = QSqlDatabase.addDatabase('QPSQL')
query = QSqlQuery(db)
@@ -394,7 +413,8 @@ if perf_db_export_calls:
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
- 'flags integer)')
+ 'flags integer,'
+ 'parent_id bigint)')
do_query(query, 'CREATE VIEW machines_view AS '
'SELECT '
@@ -478,8 +498,9 @@ if perf_db_export_calls:
'branch_count,'
'call_id,'
'return_id,'
- 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,'
- 'parent_call_path_id'
+ 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE CAST ( flags AS VARCHAR(6) ) END AS flags,'
+ 'parent_call_path_id,'
+ 'calls.parent_id'
' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
do_query(query, 'CREATE VIEW samples_view AS '
@@ -504,12 +525,12 @@ do_query(query, 'CREATE VIEW samples_view AS '
' FROM samples')
-file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
-file_trailer = "\377\377"
+file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0)
+file_trailer = b"\377\377"
def open_output_file(file_name):
path_name = output_dir_name + "/" + file_name
- file = open(path_name, "w+")
+ file = open(path_name, "wb+")
file.write(file_header)
return file
@@ -524,13 +545,13 @@ def copy_output_file_direct(file, table_name):
# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
def copy_output_file(file, table_name):
- conn = PQconnectdb("dbname = " + dbname)
+ conn = PQconnectdb(toclientstr("dbname = " + dbname))
if (PQstatus(conn)):
raise Exception("COPY FROM STDIN PQconnectdb failed")
file.write(file_trailer)
file.seek(0)
sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
- res = PQexec(conn, sql)
+ res = PQexec(conn, toclientstr(sql))
if (PQresultStatus(res) != 4):
raise Exception("COPY FROM STDIN PQexec failed")
data = file.read(65536)
@@ -564,7 +585,7 @@ if perf_db_export_calls:
call_file = open_output_file("call_table.bin")
def trace_begin():
- print datetime.datetime.today(), "Writing to intermediate files..."
+ printdate("Writing to intermediate files...")
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
machine_table(0, 0, "unknown")
@@ -575,11 +596,12 @@ def trace_begin():
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls or perf_db_export_callchains:
call_path_table(0, 0, 0, 0)
+ call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
unhandled_count = 0
def trace_end():
- print datetime.datetime.today(), "Copying to database..."
+ printdate("Copying to database...")
copy_output_file(evsel_file, "selected_events")
copy_output_file(machine_file, "machines")
copy_output_file(thread_file, "threads")
@@ -594,7 +616,7 @@ def trace_end():
if perf_db_export_calls:
copy_output_file(call_file, "calls")
- print datetime.datetime.today(), "Removing intermediate files..."
+ printdate("Removing intermediate files...")
remove_output_file(evsel_file)
remove_output_file(machine_file)
remove_output_file(thread_file)
@@ -609,7 +631,7 @@ def trace_end():
if perf_db_export_calls:
remove_output_file(call_file)
os.rmdir(output_dir_name)
- print datetime.datetime.today(), "Adding primary keys"
+ printdate("Adding primary keys")
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
@@ -624,7 +646,7 @@ def trace_end():
if perf_db_export_calls:
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
- print datetime.datetime.today(), "Adding foreign keys"
+ printdate("Adding foreign keys")
do_query(query, 'ALTER TABLE threads '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
@@ -657,10 +679,11 @@ def trace_end():
'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),'
'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
+ do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
if (unhandled_count):
- print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
- print datetime.datetime.today(), "Done"
+ printdate("Warning: ", unhandled_count, " unhandled events")
+ printdate("Done")
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
@@ -670,12 +693,14 @@ def sched__sched_switch(*x):
pass
def evsel_table(evsel_id, evsel_name, *x):
+ evsel_name = toserverstr(evsel_name)
n = len(evsel_name)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
evsel_file.write(value)
def machine_table(machine_id, pid, root_dir, *x):
+ root_dir = toserverstr(root_dir)
n = len(root_dir)
fmt = "!hiqiii" + str(n) + "s"
value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
@@ -686,6 +711,7 @@ def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
thread_file.write(value)
def comm_table(comm_id, comm_str, *x):
+ comm_str = toserverstr(comm_str)
n = len(comm_str)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
@@ -697,6 +723,9 @@ def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
comm_thread_file.write(value)
def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
+ short_name = toserverstr(short_name)
+ long_name = toserverstr(long_name)
+ build_id = toserverstr(build_id)
n1 = len(short_name)
n2 = len(long_name)
n3 = len(build_id)
@@ -705,12 +734,14 @@ def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
dso_file.write(value)
def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
+ symbol_name = toserverstr(symbol_name)
n = len(symbol_name)
fmt = "!hiqiqiqiqiii" + str(n) + "s"
value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
symbol_file.write(value)
def branch_type_table(branch_type, name, *x):
+ name = toserverstr(name)
n = len(name)
fmt = "!hiii" + str(n) + "s"
value = struct.pack(fmt, 2, 4, branch_type, n, name)
@@ -728,7 +759,7 @@ def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
call_path_file.write(value)
-def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x):
- fmt = "!hiqiqiqiqiqiqiqiqiqiqii"
- value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags)
+def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, parent_id, *x):
+ fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiq"
+ value = struct.pack(fmt, 12, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id)
call_file.write(value)
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
index ed237f2ed03f..3b71902a5a21 100644
--- a/tools/perf/scripts/python/export-to-sqlite.py
+++ b/tools/perf/scripts/python/export-to-sqlite.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
+from __future__ import print_function
+
import os
import sys
import struct
@@ -60,11 +62,17 @@ perf_db_export_mode = True
perf_db_export_calls = False
perf_db_export_callchains = False
+def printerr(*args, **keyword_args):
+ print(*args, file=sys.stderr, **keyword_args)
+
+def printdate(*args, **kw_args):
+ print(datetime.datetime.today(), *args, sep=' ', **kw_args)
+
def usage():
- print >> sys.stderr, "Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]"
- print >> sys.stderr, "where: columns 'all' or 'branches'"
- print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
- print >> sys.stderr, " callchains 'callchains' => create call_paths table"
+ printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]");
+ printerr("where: columns 'all' or 'branches'");
+ printerr(" calls 'calls' => create calls and call_paths table");
+ printerr(" callchains 'callchains' => create call_paths table");
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
@@ -100,7 +108,7 @@ def do_query_(q):
return
raise Exception("Query failed: " + q.lastError().text())
-print datetime.datetime.today(), "Creating database..."
+printdate("Creating database ...")
db_exists = False
try:
@@ -222,7 +230,8 @@ if perf_db_export_calls:
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
- 'flags integer)')
+ 'flags integer,'
+ 'parent_id bigint)')
# printf was added to sqlite in version 3.8.3
sqlite_has_printf = False
@@ -321,7 +330,8 @@ if perf_db_export_calls:
'call_id,'
'return_id,'
'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,'
- 'parent_call_path_id'
+ 'parent_call_path_id,'
+ 'parent_id'
' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
do_query(query, 'CREATE VIEW samples_view AS '
@@ -373,10 +383,10 @@ if perf_db_export_calls or perf_db_export_callchains:
call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)")
if perf_db_export_calls:
call_query = QSqlQuery(db)
- call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
+ call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
def trace_begin():
- print datetime.datetime.today(), "Writing records..."
+ printdate("Writing records...")
do_query(query, 'BEGIN TRANSACTION')
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
@@ -388,19 +398,21 @@ def trace_begin():
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls or perf_db_export_callchains:
call_path_table(0, 0, 0, 0)
+ call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
unhandled_count = 0
def trace_end():
do_query(query, 'END TRANSACTION')
- print datetime.datetime.today(), "Adding indexes"
+ printdate("Adding indexes")
if perf_db_export_calls:
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
+ do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
if (unhandled_count):
- print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
- print datetime.datetime.today(), "Done"
+ printdate("Warning: ", unhandled_count, " unhandled events")
+ printdate("Done")
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
@@ -452,4 +464,4 @@ def call_path_table(*x):
bind_exec(call_path_query, 4, x)
def call_return_table(*x):
- bind_exec(call_query, 11, x)
+ bind_exec(call_query, 12, x)
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index 09ce73b07d35..74ef92f1d19a 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -88,20 +88,39 @@
# 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip)
# 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
+from __future__ import print_function
+
import sys
import weakref
import threading
import string
-import cPickle
+try:
+ # Python2
+ import cPickle as pickle
+ # size of pickled integer big enough for record size
+ glb_nsz = 8
+except ImportError:
+ import pickle
+ glb_nsz = 16
import re
import os
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtSql import *
+pyside_version_1 = True
from decimal import *
from ctypes import *
from multiprocessing import Process, Array, Value, Event
+# xrange is range in Python3
+try:
+ xrange
+except NameError:
+ xrange = range
+
+def printerr(*args, **keyword_args):
+ print(*args, file=sys.stderr, **keyword_args)
+
# Data formatting helpers
def tohex(ip):
@@ -167,9 +186,10 @@ class Thread(QThread):
class TreeModel(QAbstractItemModel):
- def __init__(self, root, parent=None):
+ def __init__(self, glb, parent=None):
super(TreeModel, self).__init__(parent)
- self.root = root
+ self.glb = glb
+ self.root = self.GetRoot()
self.last_row_read = 0
def Item(self, parent):
@@ -557,24 +577,12 @@ class CallGraphRootItem(CallGraphLevelItemBase):
self.child_items.append(child_item)
self.child_count += 1
-# Context-sensitive call graph data model
+# Context-sensitive call graph data model base
-class CallGraphModel(TreeModel):
+class CallGraphModelBase(TreeModel):
def __init__(self, glb, parent=None):
- super(CallGraphModel, self).__init__(CallGraphRootItem(glb), parent)
- self.glb = glb
-
- def columnCount(self, parent=None):
- return 7
-
- def columnHeader(self, column):
- headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
- return headers[column]
-
- def columnAlignment(self, column):
- alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
- return alignment[column]
+ super(CallGraphModelBase, self).__init__(glb, parent)
def FindSelect(self, value, pattern, query):
if pattern:
@@ -594,34 +602,7 @@ class CallGraphModel(TreeModel):
match = " GLOB '" + str(value) + "'"
else:
match = " = '" + str(value) + "'"
- QueryExec(query, "SELECT call_path_id, comm_id, thread_id"
- " FROM calls"
- " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
- " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
- " WHERE symbols.name" + match +
- " GROUP BY comm_id, thread_id, call_path_id"
- " ORDER BY comm_id, thread_id, call_path_id")
-
- def FindPath(self, query):
- # Turn the query result into a list of ids that the tree view can walk
- # to open the tree at the right place.
- ids = []
- parent_id = query.value(0)
- while parent_id:
- ids.insert(0, parent_id)
- q2 = QSqlQuery(self.glb.db)
- QueryExec(q2, "SELECT parent_id"
- " FROM call_paths"
- " WHERE id = " + str(parent_id))
- if not q2.next():
- break
- parent_id = q2.value(0)
- # The call path root is not used
- if ids[0] == 1:
- del ids[0]
- ids.insert(0, query.value(2))
- ids.insert(0, query.value(1))
- return ids
+ self.DoFindSelect(query, match)
def Found(self, query, found):
if found:
@@ -675,6 +656,201 @@ class CallGraphModel(TreeModel):
def FindDone(self, thread, callback, ids):
callback(ids)
+# Context-sensitive call graph data model
+
+class CallGraphModel(CallGraphModelBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallGraphModel, self).__init__(glb, parent)
+
+ def GetRoot(self):
+ return CallGraphRootItem(self.glb)
+
+ def columnCount(self, parent=None):
+ return 7
+
+ def columnHeader(self, column):
+ headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
+ return headers[column]
+
+ def columnAlignment(self, column):
+ alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
+ return alignment[column]
+
+ def DoFindSelect(self, query, match):
+ QueryExec(query, "SELECT call_path_id, comm_id, thread_id"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " WHERE symbols.name" + match +
+ " GROUP BY comm_id, thread_id, call_path_id"
+ " ORDER BY comm_id, thread_id, call_path_id")
+
+ def FindPath(self, query):
+ # Turn the query result into a list of ids that the tree view can walk
+ # to open the tree at the right place.
+ ids = []
+ parent_id = query.value(0)
+ while parent_id:
+ ids.insert(0, parent_id)
+ q2 = QSqlQuery(self.glb.db)
+ QueryExec(q2, "SELECT parent_id"
+ " FROM call_paths"
+ " WHERE id = " + str(parent_id))
+ if not q2.next():
+ break
+ parent_id = q2.value(0)
+ # The call path root is not used
+ if ids[0] == 1:
+ del ids[0]
+ ids.insert(0, query.value(2))
+ ids.insert(0, query.value(1))
+ return ids
+
+# Call tree data model level 2+ item base
+
+class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
+
+ def __init__(self, glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item):
+ super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, row, parent_item)
+ self.comm_id = comm_id
+ self.thread_id = thread_id
+ self.calls_id = calls_id
+ self.branch_count = branch_count
+ self.time = time
+
+ def Select(self):
+ self.query_done = True;
+ if self.calls_id == 0:
+ comm_thread = " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id)
+ else:
+ comm_thread = ""
+ query = QSqlQuery(self.glb.db)
+ QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time, branch_count"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " INNER JOIN dsos ON symbols.dso_id = dsos.id"
+ " WHERE calls.parent_id = " + str(self.calls_id) + comm_thread +
+ " ORDER BY call_time, calls.id")
+ while query.next():
+ child_item = CallTreeLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call tree data model level three item
+
+class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase):
+
+ def __init__(self, glb, row, comm_id, thread_id, calls_id, name, dso, count, time, branch_count, parent_item):
+ super(CallTreeLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item)
+ dso = dsoname(dso)
+ self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
+ self.dbid = calls_id
+
+# Call tree data model level two item
+
+class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase):
+
+ def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item):
+ super(CallTreeLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 0, 0, 0, parent_item)
+ self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
+ self.dbid = thread_id
+
+ def Select(self):
+ super(CallTreeLevelTwoItem, self).Select()
+ for child_item in self.child_items:
+ self.time += child_item.time
+ self.branch_count += child_item.branch_count
+ for child_item in self.child_items:
+ child_item.data[4] = PercentToOneDP(child_item.time, self.time)
+ child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
+
+# Call tree data model level one item
+
+class CallTreeLevelOneItem(CallGraphLevelItemBase):
+
+ def __init__(self, glb, row, comm_id, comm, parent_item):
+ super(CallTreeLevelOneItem, self).__init__(glb, row, parent_item)
+ self.data = [comm, "", "", "", "", "", ""]
+ self.dbid = comm_id
+
+ def Select(self):
+ self.query_done = True;
+ query = QSqlQuery(self.glb.db)
+ QueryExec(query, "SELECT thread_id, pid, tid"
+ " FROM comm_threads"
+ " INNER JOIN threads ON thread_id = threads.id"
+ " WHERE comm_id = " + str(self.dbid))
+ while query.next():
+ child_item = CallTreeLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call tree data model root item
+
+class CallTreeRootItem(CallGraphLevelItemBase):
+
+ def __init__(self, glb):
+ super(CallTreeRootItem, self).__init__(glb, 0, None)
+ self.dbid = 0
+ self.query_done = True;
+ query = QSqlQuery(glb.db)
+ QueryExec(query, "SELECT id, comm FROM comms")
+ while query.next():
+ if not query.value(0):
+ continue
+ child_item = CallTreeLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call Tree data model
+
+class CallTreeModel(CallGraphModelBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallTreeModel, self).__init__(glb, parent)
+
+ def GetRoot(self):
+ return CallTreeRootItem(self.glb)
+
+ def columnCount(self, parent=None):
+ return 7
+
+ def columnHeader(self, column):
+ headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
+ return headers[column]
+
+ def columnAlignment(self, column):
+ alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
+ return alignment[column]
+
+ def DoFindSelect(self, query, match):
+ QueryExec(query, "SELECT calls.id, comm_id, thread_id"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " WHERE symbols.name" + match +
+ " ORDER BY comm_id, thread_id, call_time, calls.id")
+
+ def FindPath(self, query):
+ # Turn the query result into a list of ids that the tree view can walk
+ # to open the tree at the right place.
+ ids = []
+ parent_id = query.value(0)
+ while parent_id:
+ ids.insert(0, parent_id)
+ q2 = QSqlQuery(self.glb.db)
+ QueryExec(q2, "SELECT parent_id"
+ " FROM calls"
+ " WHERE id = " + str(parent_id))
+ if not q2.next():
+ break
+ parent_id = q2.value(0)
+ ids.insert(0, query.value(2))
+ ids.insert(0, query.value(1))
+ return ids
+
# Vertical widget layout
class VBox():
@@ -693,28 +869,16 @@ class VBox():
def Widget(self):
return self.vbox
-# Context-sensitive call graph window
-
-class CallGraphWindow(QMdiSubWindow):
+# Tree window base
- def __init__(self, glb, parent=None):
- super(CallGraphWindow, self).__init__(parent)
-
- self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
-
- self.view = QTreeView()
- self.view.setModel(self.model)
-
- for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
- self.view.setColumnWidth(c, w)
-
- self.find_bar = FindBar(self, self)
+class TreeWindowBase(QMdiSubWindow):
- self.vbox = VBox(self.view, self.find_bar.Widget())
-
- self.setWidget(self.vbox.Widget())
+ def __init__(self, parent=None):
+ super(TreeWindowBase, self).__init__(parent)
- AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph")
+ self.model = None
+ self.view = None
+ self.find_bar = None
def DisplayFound(self, ids):
if not len(ids):
@@ -747,6 +911,53 @@ class CallGraphWindow(QMdiSubWindow):
if not found:
self.find_bar.NotFound()
+
+# Context-sensitive call graph window
+
+class CallGraphWindow(TreeWindowBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallGraphWindow, self).__init__(parent)
+
+ self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
+
+ self.view = QTreeView()
+ self.view.setModel(self.model)
+
+ for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
+ self.view.setColumnWidth(c, w)
+
+ self.find_bar = FindBar(self, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph")
+
+# Call tree window
+
+class CallTreeWindow(TreeWindowBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallTreeWindow, self).__init__(parent)
+
+ self.model = LookupCreateModel("Call Tree", lambda x=glb: CallTreeModel(x))
+
+ self.view = QTreeView()
+ self.view.setModel(self.model)
+
+ for c, w in ((0, 230), (1, 100), (2, 100), (3, 70), (4, 70), (5, 100)):
+ self.view.setColumnWidth(c, w)
+
+ self.find_bar = FindBar(self, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, "Call Tree")
+
# Child data item finder
class ChildDataItemFinder():
@@ -812,10 +1023,6 @@ class ChildDataItemFinder():
glb_chunk_sz = 10000
-# size of pickled integer big enough for record size
-
-glb_nsz = 8
-
# Background process for SQL data fetcher
class SQLFetcherProcess():
@@ -874,7 +1081,7 @@ class SQLFetcherProcess():
return True
if space >= glb_nsz:
# Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer
- nd = cPickle.dumps(0, cPickle.HIGHEST_PROTOCOL)
+ nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL)
self.buffer[self.local_head : self.local_head + len(nd)] = nd
self.local_head = 0
if self.local_tail - self.local_head > sz:
@@ -892,9 +1099,9 @@ class SQLFetcherProcess():
self.wait_event.wait()
def AddToBuffer(self, obj):
- d = cPickle.dumps(obj, cPickle.HIGHEST_PROTOCOL)
+ d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
n = len(d)
- nd = cPickle.dumps(n, cPickle.HIGHEST_PROTOCOL)
+ nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL)
sz = n + glb_nsz
self.WaitForSpace(sz)
pos = self.local_head
@@ -1006,12 +1213,12 @@ class SQLFetcher(QObject):
pos = self.local_tail
if len(self.buffer) - pos < glb_nsz:
pos = 0
- n = cPickle.loads(self.buffer[pos : pos + glb_nsz])
+ n = pickle.loads(self.buffer[pos : pos + glb_nsz])
if n == 0:
pos = 0
- n = cPickle.loads(self.buffer[0 : glb_nsz])
+ n = pickle.loads(self.buffer[0 : glb_nsz])
pos += glb_nsz
- obj = cPickle.loads(self.buffer[pos : pos + n])
+ obj = pickle.loads(self.buffer[pos : pos + n])
self.local_tail = pos + n
return obj
@@ -1320,6 +1527,19 @@ def BranchDataPrep(query):
" (" + dsoname(query.value(15)) + ")")
return data
+def BranchDataPrepWA(query):
+ data = []
+ data.append(query.value(0))
+ # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+ data.append("{:>19}".format(query.value(1)))
+ for i in xrange(2, 8):
+ data.append(query.value(i))
+ data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
+ " (" + dsoname(query.value(11)) + ")" + " -> " +
+ tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
+ " (" + dsoname(query.value(15)) + ")")
+ return data
+
# Branch data model
class BranchModel(TreeModel):
@@ -1327,8 +1547,7 @@ class BranchModel(TreeModel):
progress = Signal(object)
def __init__(self, glb, event_id, where_clause, parent=None):
- super(BranchModel, self).__init__(BranchRootItem(), parent)
- self.glb = glb
+ super(BranchModel, self).__init__(glb, parent)
self.event_id = event_id
self.more = True
self.populated = 0
@@ -1348,10 +1567,17 @@ class BranchModel(TreeModel):
" AND evsel_id = " + str(self.event_id) +
" ORDER BY samples.id"
" LIMIT " + str(glb_chunk_sz))
- self.fetcher = SQLFetcher(glb, sql, BranchDataPrep, self.AddSample)
+ if pyside_version_1 and sys.version_info[0] == 3:
+ prep = BranchDataPrepWA
+ else:
+ prep = BranchDataPrep
+ self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
+ def GetRoot(self):
+ return BranchRootItem()
+
def columnCount(self, parent=None):
return 8
@@ -1863,22 +2089,14 @@ def GetEventList(db):
# Is a table selectable
-def IsSelectable(db, table):
+def IsSelectable(db, table, sql = ""):
query = QSqlQuery(db)
try:
- QueryExec(query, "SELECT * FROM " + table + " LIMIT 1")
+ QueryExec(query, "SELECT * FROM " + table + " " + sql + " LIMIT 1")
except:
return False
return True
-# SQL data preparation
-
-def SQLTableDataPrep(query, count):
- data = []
- for i in xrange(count):
- data.append(query.value(i))
- return data
-
# SQL table data model item
class SQLTableItem():
@@ -1902,7 +2120,7 @@ class SQLTableModel(TableModel):
self.more = True
self.populated = 0
self.column_headers = column_headers
- self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): SQLTableDataPrep(x, y), self.AddSample)
+ self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
@@ -1946,6 +2164,12 @@ class SQLTableModel(TableModel):
def columnHeader(self, column):
return self.column_headers[column]
+ def SQLTableDataPrep(self, query, count):
+ data = []
+ for i in xrange(count):
+ data.append(query.value(i))
+ return data
+
# SQL automatic table data model
class SQLAutoTableModel(SQLTableModel):
@@ -1974,8 +2198,32 @@ class SQLAutoTableModel(SQLTableModel):
QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
while query.next():
column_headers.append(query.value(0))
+ if pyside_version_1 and sys.version_info[0] == 3:
+ if table_name == "samples_view":
+ self.SQLTableDataPrep = self.samples_view_DataPrep
+ if table_name == "samples":
+ self.SQLTableDataPrep = self.samples_DataPrep
super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent)
+ def samples_view_DataPrep(self, query, count):
+ data = []
+ data.append(query.value(0))
+ # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+ data.append("{:>19}".format(query.value(1)))
+ for i in xrange(2, count):
+ data.append(query.value(i))
+ return data
+
+ def samples_DataPrep(self, query, count):
+ data = []
+ for i in xrange(9):
+ data.append(query.value(i))
+ # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+ data.append("{:>19}".format(query.value(9)))
+ for i in xrange(10, count):
+ data.append(query.value(i))
+ return data
+
# Base class for custom ResizeColumnsToContents
class ResizeColumnsToContentsBase(QObject):
@@ -2275,9 +2523,10 @@ p.c2 {
</style>
<p class=c1><a href=#reports>1. Reports</a></p>
<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
-<p class=c2><a href=#allbranches>1.2 All branches</a></p>
-<p class=c2><a href=#selectedbranches>1.3 Selected branches</a></p>
-<p class=c2><a href=#topcallsbyelapsedtime>1.4 Top calls by elapsed time</a></p>
+<p class=c2><a href=#calltree>1.2 Call Tree</a></p>
+<p class=c2><a href=#allbranches>1.3 All branches</a></p>
+<p class=c2><a href=#selectedbranches>1.4 Selected branches</a></p>
+<p class=c2><a href=#topcallsbyelapsedtime>1.5 Top calls by elapsed time</a></p>
<p class=c1><a href=#tables>2. Tables</a></p>
<h1 id=reports>1. Reports</h1>
<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
@@ -2313,7 +2562,10 @@ v- ls
<h3>Find</h3>
Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match.
The pattern matching symbols are ? for any character and * for zero or more characters.
-<h2 id=allbranches>1.2 All branches</h2>
+<h2 id=calltree>1.2 Call Tree</h2>
+The Call Tree report is very similar to the Context-Sensitive Call Graph, but the data is not aggregated.
+Also the 'Count' column, which would be always 1, is replaced by the 'Call Time'.
+<h2 id=allbranches>1.3 All branches</h2>
The All branches report displays all branches in chronological order.
Not all data is fetched immediately. More records can be fetched using the Fetch bar provided.
<h3>Disassembly</h3>
@@ -2339,10 +2591,10 @@ sudo ldconfig
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
Refer to Python documentation for the regular expression syntax.
All columns are searched, but only currently fetched rows are searched.
-<h2 id=selectedbranches>1.3 Selected branches</h2>
+<h2 id=selectedbranches>1.4 Selected branches</h2>
This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced
by various selection criteria. A dialog box displays available criteria which are AND'ed together.
-<h3>1.3.1 Time ranges</h3>
+<h3>1.4.1 Time ranges</h3>
The time ranges hint text shows the total time range. Relative time ranges can also be entered in
ms, us or ns. Also, negative values are relative to the end of trace. Examples:
<pre>
@@ -2353,7 +2605,7 @@ ms, us or ns. Also, negative values are relative to the end of trace. Examples:
-10ms- The last 10ms
</pre>
N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
-<h2 id=topcallsbyelapsedtime>1.4 Top calls by elapsed time</h2>
+<h2 id=topcallsbyelapsedtime>1.5 Top calls by elapsed time</h2>
The Top calls by elapsed time report displays calls in descending order of time elapsed between when the function was called and when it returned.
The data is reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together.
If not all data is fetched, a Fetch bar is provided. Ctrl-F displays a Find bar.
@@ -2489,6 +2741,9 @@ class MainWindow(QMainWindow):
if IsSelectable(glb.db, "calls"):
reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self))
+ if IsSelectable(glb.db, "calls", "WHERE parent_id >= 0"):
+ reports_menu.addAction(CreateAction("Call &Tree", "Create a new window containing a call tree", self.NewCallTree, self))
+
self.EventMenu(GetEventList(glb.db), reports_menu)
if IsSelectable(glb.db, "calls"):
@@ -2549,6 +2804,9 @@ class MainWindow(QMainWindow):
def NewCallGraph(self):
CallGraphWindow(self.glb, self)
+ def NewCallTree(self):
+ CallTreeWindow(self.glb, self)
+
def NewTopCalls(self):
dialog = TopCallsDialog(self.glb, self)
ret = dialog.exec_()
@@ -2650,9 +2908,13 @@ class LibXED():
ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
if not ok:
return 0, ""
+ if sys.version_info[0] == 2:
+ result = inst.buffer.value
+ else:
+ result = inst.buffer.value.decode()
# Return instruction length and the disassembled instruction text
# For now, assume the length is in byte 166
- return inst.xedd[166], inst.buffer.value
+ return inst.xedd[166], result
def TryOpen(file_name):
try:
@@ -2668,9 +2930,14 @@ def Is64Bit(f):
header = f.read(7)
f.seek(pos)
magic = header[0:4]
- eclass = ord(header[4])
- encoding = ord(header[5])
- version = ord(header[6])
+ if sys.version_info[0] == 2:
+ eclass = ord(header[4])
+ encoding = ord(header[5])
+ version = ord(header[6])
+ else:
+ eclass = header[4]
+ encoding = header[5]
+ version = header[6]
if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1:
result = True if eclass == 2 else False
return result
@@ -2769,7 +3036,7 @@ class DBRef():
def Main():
if (len(sys.argv) < 2):
- print >> sys.stderr, "Usage is: exported-sql-viewer.py {<database name> | --help-only}"
+ printerr("Usage is: exported-sql-viewer.py {<database name> | --help-only}");
raise Exception("Too few arguments")
dbname = sys.argv[1]
@@ -2782,8 +3049,8 @@ def Main():
is_sqlite3 = False
try:
- f = open(dbname)
- if f.read(15) == "SQLite format 3":
+ f = open(dbname, "rb")
+ if f.read(15) == b'SQLite format 3':
is_sqlite3 = True
f.close()
except:
diff --git a/tools/perf/scripts/python/failed-syscalls-by-pid.py b/tools/perf/scripts/python/failed-syscalls-by-pid.py
index 3648e8b986ec..310efe5e7e23 100644
--- a/tools/perf/scripts/python/failed-syscalls-by-pid.py
+++ b/tools/perf/scripts/python/failed-syscalls-by-pid.py
@@ -58,22 +58,22 @@ def syscalls__sys_exit(event_name, context, common_cpu,
raw_syscalls__sys_exit(**locals())
def print_error_totals():
- if for_comm is not None:
- print("\nsyscall errors for %s:\n" % (for_comm))
- else:
- print("\nsyscall errors:\n")
-
- print("%-30s %10s" % ("comm [pid]", "count"))
- print("%-30s %10s" % ("------------------------------", "----------"))
-
- comm_keys = syscalls.keys()
- for comm in comm_keys:
- pid_keys = syscalls[comm].keys()
- for pid in pid_keys:
- print("\n%s [%d]" % (comm, pid))
- id_keys = syscalls[comm][pid].keys()
- for id in id_keys:
- print(" syscall: %-16s" % syscall_name(id))
- ret_keys = syscalls[comm][pid][id].keys()
- for ret, val in sorted(syscalls[comm][pid][id].items(), key = lambda kv: (kv[1], kv[0]), reverse = True):
- print(" err = %-20s %10d" % (strerror(ret), val))
+ if for_comm is not None:
+ print("\nsyscall errors for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall errors:\n")
+
+ print("%-30s %10s" % ("comm [pid]", "count"))
+ print("%-30s %10s" % ("------------------------------", "----------"))
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print("\n%s [%d]" % (comm, pid))
+ id_keys = syscalls[comm][pid].keys()
+ for id in id_keys:
+ print(" syscall: %-16s" % syscall_name(id))
+ ret_keys = syscalls[comm][pid][id].keys()
+ for ret, val in sorted(syscalls[comm][pid][id].items(), key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print(" err = %-20s %10d" % (strerror(ret), val))
diff --git a/tools/perf/scripts/python/futex-contention.py b/tools/perf/scripts/python/futex-contention.py
index 0f5cf437b602..0c4841acf75d 100644
--- a/tools/perf/scripts/python/futex-contention.py
+++ b/tools/perf/scripts/python/futex-contention.py
@@ -10,6 +10,8 @@
#
# Measures futex contention
+from __future__ import print_function
+
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
@@ -33,18 +35,18 @@ def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, ret):
- if thread_blocktime.has_key(tid):
+ if tid in thread_blocktime:
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
- print "%s[%d] lock %x contended %d times, %d avg ns" % \
- (process_names[tid], tid, lock, count, avg)
+ print("%s[%d] lock %x contended %d times, %d avg ns" %
+ (process_names[tid], tid, lock, count, avg))
diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
index b19172d673af..a73847c8f548 100644
--- a/tools/perf/scripts/python/intel-pt-events.py
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
+from __future__ import print_function
+
import os
import sys
import struct
@@ -22,34 +24,34 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
#from Core import *
def trace_begin():
- print "Intel PT Power Events and PTWRITE"
+ print("Intel PT Power Events and PTWRITE")
def trace_end():
- print "End"
+ print("End")
def trace_unhandled(event_name, context, event_fields_dict):
- print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
+ print(' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))
def print_ptwrite(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
flags = data[0]
payload = data[1]
exact_ip = flags & 1
- print "IP: %u payload: %#x" % (exact_ip, payload),
+ print("IP: %u payload: %#x" % (exact_ip, payload), end=' ')
def print_cbr(raw_buf):
data = struct.unpack_from("<BBBBII", raw_buf)
cbr = data[0]
f = (data[4] + 500) / 1000
p = ((cbr * 1000 / data[2]) + 5) / 10
- print "%3u freq: %4u MHz (%3u%%)" % (cbr, f, p),
+ print("%3u freq: %4u MHz (%3u%%)" % (cbr, f, p), end=' ')
def print_mwait(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hints = payload & 0xff
extensions = (payload >> 32) & 0x3
- print "hints: %#x extensions: %#x" % (hints, extensions),
+ print("hints: %#x extensions: %#x" % (hints, extensions), end=' ')
def print_pwre(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
@@ -57,13 +59,14 @@ def print_pwre(raw_buf):
hw = (payload >> 7) & 1
cstate = (payload >> 12) & 0xf
subcstate = (payload >> 8) & 0xf
- print "hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+ print("hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+ end=' ')
def print_exstop(raw_buf):
data = struct.unpack_from("<I", raw_buf)
flags = data[0]
exact_ip = flags & 1
- print "IP: %u" % (exact_ip),
+ print("IP: %u" % (exact_ip), end=' ')
def print_pwrx(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
@@ -71,36 +74,39 @@ def print_pwrx(raw_buf):
deepest_cstate = payload & 0xf
last_cstate = (payload >> 4) & 0xf
wake_reason = (payload >> 8) & 0xf
- print "deepest cstate: %u last cstate: %u wake reason: %#x" % (deepest_cstate, last_cstate, wake_reason),
+ print("deepest cstate: %u last cstate: %u wake reason: %#x" %
+ (deepest_cstate, last_cstate, wake_reason), end=' ')
def print_common_start(comm, sample, name):
ts = sample["time"]
cpu = sample["cpu"]
pid = sample["pid"]
tid = sample["tid"]
- print "%16s %5u/%-5u [%03u] %9u.%09u %7s:" % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+ print("%16s %5u/%-5u [%03u] %9u.%09u %7s:" %
+ (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+ end=' ')
def print_common_ip(sample, symbol, dso):
ip = sample["ip"]
- print "%16x %s (%s)" % (ip, symbol, dso)
+ print("%16x %s (%s)" % (ip, symbol, dso))
def process_event(param_dict):
- event_attr = param_dict["attr"]
- sample = param_dict["sample"]
- raw_buf = param_dict["raw_buf"]
- comm = param_dict["comm"]
- name = param_dict["ev_name"]
-
- # Symbol and dso info are not always resolved
- if (param_dict.has_key("dso")):
- dso = param_dict["dso"]
- else:
- dso = "[unknown]"
-
- if (param_dict.has_key("symbol")):
- symbol = param_dict["symbol"]
- else:
- symbol = "[unknown]"
+ event_attr = param_dict["attr"]
+ sample = param_dict["sample"]
+ raw_buf = param_dict["raw_buf"]
+ comm = param_dict["comm"]
+ name = param_dict["ev_name"]
+
+ # Symbol and dso info are not always resolved
+ if "dso" in param_dict:
+ dso = param_dict["dso"]
+ else:
+ dso = "[unknown]"
+
+ if "symbol" in param_dict:
+ symbol = param_dict["symbol"]
+ else:
+ symbol = "[unknown]"
if name == "ptwrite":
print_common_start(comm, sample, name)
diff --git a/tools/perf/scripts/python/mem-phys-addr.py b/tools/perf/scripts/python/mem-phys-addr.py
index fb0bbcbfa0f0..1f332e72b9b0 100644
--- a/tools/perf/scripts/python/mem-phys-addr.py
+++ b/tools/perf/scripts/python/mem-phys-addr.py
@@ -44,12 +44,13 @@ def print_memory_type():
print("%-40s %10s %10s\n" % ("Memory type", "count", "percentage"), end='')
print("%-40s %10s %10s\n" % ("----------------------------------------",
"-----------", "-----------"),
- end='');
+ end='');
total = sum(load_mem_type_cnt.values())
for mem_type, count in sorted(load_mem_type_cnt.most_common(), \
key = lambda kv: (kv[1], kv[0]), reverse = True):
- print("%-40s %10d %10.1f%%\n" % (mem_type, count, 100 * count / total),
- end='')
+ print("%-40s %10d %10.1f%%\n" %
+ (mem_type, count, 100 * count / total),
+ end='')
def trace_begin():
parse_iomem()
diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py
index 212557a02c50..101059971738 100755
--- a/tools/perf/scripts/python/net_dropmonitor.py
+++ b/tools/perf/scripts/python/net_dropmonitor.py
@@ -7,7 +7,7 @@ import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py
index 267bda49325d..ea0c8b90a783 100644
--- a/tools/perf/scripts/python/netdev-times.py
+++ b/tools/perf/scripts/python/netdev-times.py
@@ -124,14 +124,16 @@ def print_receive(hunk):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print(PF_NAPI_POLL %
- (diff_msec(base_t, event['event_t']), event['dev']))
+ (diff_msec(base_t, event['event_t']),
+ event['dev']))
if i == len(event_list) - 1:
print("")
else:
print(PF_JOINT)
else:
print(PF_NET_RECV %
- (diff_msec(base_t, event['event_t']), event['skbaddr'],
+ (diff_msec(base_t, event['event_t']),
+ event['skbaddr'],
event['len']))
if 'comm' in event.keys():
print(PF_WJOINT)
@@ -256,7 +258,7 @@ def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, i
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi,
- dev_name, work=None, budget=None):
+ dev_name, work=None, budget=None):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name, work, budget)
all_event_list.append(event_info)
@@ -353,7 +355,7 @@ def handle_irq_softirq_exit(event_info):
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
- 'irq_list':irq_list, 'event_list':event_list}
+ 'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
@@ -390,7 +392,7 @@ def handle_netif_receive_skb(event_info):
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
- 'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
+ 'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py
index 3984bf51f3c5..8196e3087c9e 100644
--- a/tools/perf/scripts/python/sched-migration.py
+++ b/tools/perf/scripts/python/sched-migration.py
@@ -14,10 +14,10 @@ import sys
from collections import defaultdict
try:
- from UserList import UserList
+ from UserList import UserList
except ImportError:
- # Python 3: UserList moved to the collections package
- from collections import UserList
+ # Python 3: UserList moved to the collections package
+ from collections import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
diff --git a/tools/perf/scripts/python/sctop.py b/tools/perf/scripts/python/sctop.py
index 987ffae7c8ca..6e0278dcb092 100644
--- a/tools/perf/scripts/python/sctop.py
+++ b/tools/perf/scripts/python/sctop.py
@@ -13,9 +13,9 @@ from __future__ import print_function
import os, sys, time
try:
- import thread
+ import thread
except ImportError:
- import _thread as thread
+ import _thread as thread
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
@@ -75,11 +75,12 @@ def print_syscall_totals(interval):
print("%-40s %10s" % ("event", "count"))
print("%-40s %10s" %
- ("----------------------------------------",
- "----------"))
+ ("----------------------------------------",
+ "----------"))
- for id, val in sorted(syscalls.items(), key = lambda kv: (kv[1], kv[0]), \
- reverse = True):
+ for id, val in sorted(syscalls.items(),
+ key = lambda kv: (kv[1], kv[0]),
+ reverse = True):
try:
print("%-40s %10d" % (syscall_name(id), val))
except TypeError:
diff --git a/tools/perf/scripts/python/stackcollapse.py b/tools/perf/scripts/python/stackcollapse.py
index 5e703efaddcc..b1c4def1410a 100755
--- a/tools/perf/scripts/python/stackcollapse.py
+++ b/tools/perf/scripts/python/stackcollapse.py
@@ -27,7 +27,7 @@ from collections import defaultdict
from optparse import OptionParser, make_option
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
diff --git a/tools/perf/scripts/python/syscall-counts-by-pid.py b/tools/perf/scripts/python/syscall-counts-by-pid.py
index 42782487b0e9..f254e40c6f0f 100644
--- a/tools/perf/scripts/python/syscall-counts-by-pid.py
+++ b/tools/perf/scripts/python/syscall-counts-by-pid.py
@@ -39,11 +39,10 @@ def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, id, args):
-
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, id, args):
if (for_comm and common_comm != for_comm) or \
- (for_pid and common_pid != for_pid ):
+ (for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
@@ -51,26 +50,26 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
syscalls[common_comm][common_pid][id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- id, args):
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
- if for_comm is not None:
- print("\nsyscall events for %s:\n" % (for_comm))
- else:
- print("\nsyscall events by comm/pid:\n")
-
- print("%-40s %10s" % ("comm [pid]/syscalls", "count"))
- print("%-40s %10s" % ("----------------------------------------",
- "----------"))
-
- comm_keys = syscalls.keys()
- for comm in comm_keys:
- pid_keys = syscalls[comm].keys()
- for pid in pid_keys:
- print("\n%s [%d]" % (comm, pid))
- id_keys = syscalls[comm][pid].keys()
- for id, val in sorted(syscalls[comm][pid].items(), \
- key = lambda kv: (kv[1], kv[0]), reverse = True):
- print(" %-38s %10d" % (syscall_name(id), val))
+ if for_comm is not None:
+ print("\nsyscall events for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall events by comm/pid:\n")
+
+ print("%-40s %10s" % ("comm [pid]/syscalls", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "----------"))
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print("\n%s [%d]" % (comm, pid))
+ id_keys = syscalls[comm][pid].keys()
+ for id, val in sorted(syscalls[comm][pid].items(),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print(" %-38s %10d" % (syscall_name(id), val))
diff --git a/tools/perf/scripts/python/syscall-counts.py b/tools/perf/scripts/python/syscall-counts.py
index 0ebd89cfd42c..8adb95ff1664 100644
--- a/tools/perf/scripts/python/syscall-counts.py
+++ b/tools/perf/scripts/python/syscall-counts.py
@@ -36,8 +36,8 @@ def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, id, args):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
@@ -47,20 +47,19 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- id, args):
+ common_secs, common_nsecs, common_pid, common_comm, id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
- if for_comm is not None:
- print("\nsyscall events for %s:\n" % (for_comm))
- else:
- print("\nsyscall events:\n")
-
- print("%-40s %10s" % ("event", "count"))
- print("%-40s %10s" % ("----------------------------------------",
- "-----------"))
-
- for id, val in sorted(syscalls.items(), key = lambda kv: (kv[1], kv[0]), \
- reverse = True):
- print("%-40s %10d" % (syscall_name(id), val))
+ if for_comm is not None:
+ print("\nsyscall events for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall events:\n")
+
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "-----------"))
+
+ for id, val in sorted(syscalls.items(),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print("%-40s %10d" % (syscall_name(id), val))
OpenPOWER on IntegriCloud