summaryrefslogtreecommitdiffstats
path: root/lldb/test/benchmarks
diff options
context:
space:
mode:
authorJohnny Chen <johnny.chen@apple.com>2011-10-22 00:57:05 +0000
committerJohnny Chen <johnny.chen@apple.com>2011-10-22 00:57:05 +0000
commitb8da426285eafb6b55ef29ec2f8223e0004332db (patch)
treef950c68757ad81642f40211f26ebc7a6e7ced294 /lldb/test/benchmarks
parentf385f12e178861f219e02784475e39cc73b07325 (diff)
downloadbcm5719-llvm-b8da426285eafb6b55ef29ec2f8223e0004332db.tar.gz
bcm5719-llvm-b8da426285eafb6b55ef29ec2f8223e0004332db.zip
Add bench.py as a driver script to run some benchmarks on lldb.
Add benchmarks for expression evaluations (TestExpressionCmd.py) and disassembly (TestDoAttachThenDisassembly.py). An example: [17:45:55] johnny:/Volumes/data/lldb/svn/trunk/test $ ./bench.py 2>&1 | grep -P '^lldb.*benchmark:' lldb startup delay (create fresh target) benchmark: Avg: 0.104274 (Laps: 30, Total Elapsed Time: 3.128214) lldb startup delay (set first breakpoint) benchmark: Avg: 0.102216 (Laps: 30, Total Elapsed Time: 3.066470) lldb frame variable benchmark: Avg: 1.649162 (Laps: 20, Total Elapsed Time: 32.983245) lldb stepping benchmark: Avg: 0.104409 (Laps: 50, Total Elapsed Time: 5.220461) lldb expr cmd benchmark: Avg: 0.206774 (Laps: 25, Total Elapsed Time: 5.169350) lldb disassembly benchmark: Avg: 0.089086 (Laps: 10, Total Elapsed Time: 0.890859) llvm-svn: 142708
Diffstat (limited to 'lldb/test/benchmarks')
-rw-r--r--lldb/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py66
-rw-r--r--lldb/test/benchmarks/expression/TestExpressionCmd.py76
-rw-r--r--lldb/test/benchmarks/startup/TestStartupDelays.py2
3 files changed, 143 insertions, 1 deletions
diff --git a/lldb/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py b/lldb/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py
new file mode 100644
index 00000000000..90cf7a87cae
--- /dev/null
+++ b/lldb/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py
@@ -0,0 +1,66 @@
+"""Test lldb's disassemblt speed."""
+
+import os, sys
+import unittest2
+import lldb
+import pexpect
+from lldbbench import *
+
+class AttachThenDisassemblyBench(BenchBase):
+
+ mydir = os.path.join("benchmarks", "disassembly")
+
+ def setUp(self):
+ BenchBase.setUp(self)
+
+ @benchmarks_test
+ def test_attach_then_disassembly(self):
+ """Attach to a spawned lldb process then run disassembly benchmarks."""
+ print
+ self.run_lldb_attach_then_disassembly(10)
+ print "lldb disassembly benchmark:", self.stopwatch
+
+ def run_lldb_attach_then_disassembly(self, count):
+ target = self.dbg.CreateTarget(self.lldbHere)
+
+ # Spawn a new process and don't display the stdout if not in TraceOn() mode.
+ import subprocess
+ popen = subprocess.Popen([self.lldbHere, self.lldbOption],
+ stdout = open(os.devnull, 'w') if not self.TraceOn() else None)
+ if self.TraceOn():
+ print "pid of spawned process: %d" % popen.pid
+
+ # Attach to the launched lldb process.
+ listener = lldb.SBListener("my.attach.listener")
+ error = lldb.SBError()
+ process = target.AttachToProcessWithID(listener, popen.pid, error)
+
+ # Set thread0 as the selected thread, followed by the 'MainLoop' frame
+ # as the selected frame. Then do disassembly on the function.
+ thread0 = process.GetThreadAtIndex(0)
+ process.SetSelectedThread(thread0)
+ i = 0
+ found = False
+ for f in thread0:
+ #print "frame#%d %s" % (i, f.GetFunctionName())
+ if "MainLoop" in f.GetFunctionName():
+ found = True
+ thread0.SetSelectedFrame(i)
+ if self.TraceOn():
+ print "Found frame#%d for function 'MainLoop'" % i
+ break
+ i += 1
+
+ # Reset the stopwatch now.
+ self.stopwatch.reset()
+ for i in range(count):
+ with self.stopwatch:
+ # Disassemble the function.
+ self.runCmd("disassemble -f")
+
+
+if __name__ == '__main__':
+ import atexit
+ lldb.SBDebugger.Initialize()
+ atexit.register(lambda: lldb.SBDebugger.Terminate())
+ unittest2.main()
diff --git a/lldb/test/benchmarks/expression/TestExpressionCmd.py b/lldb/test/benchmarks/expression/TestExpressionCmd.py
new file mode 100644
index 00000000000..4993ee00282
--- /dev/null
+++ b/lldb/test/benchmarks/expression/TestExpressionCmd.py
@@ -0,0 +1,76 @@
+"""Test lldb's expression evaluations and collect statistics."""
+
+import os, sys
+import unittest2
+import lldb
+import pexpect
+from lldbbench import *
+
+class ExpressionEvaluationCase(BenchBase):
+
+ mydir = os.path.join("benchmarks", "expression")
+
+ def setUp(self):
+ BenchBase.setUp(self)
+ self.source = 'main.cpp'
+ self.line_to_break = line_number(self.source, '// Set breakpoint here.')
+ self.count = lldb.bmIterationCount
+ if self.count <= 0:
+ self.count = 25
+
+ @benchmarks_test
+ def test_expr_cmd(self):
+ """Test lldb's expression commands and collect statistics."""
+ self.buildDefault()
+ self.exe_name = 'a.out'
+
+ print
+ self.run_lldb_repeated_exprs(self.exe_name, self.count)
+ print "lldb expr cmd benchmark:", self.stopwatch
+
+ def run_lldb_repeated_exprs(self, exe_name, count):
+ exe = os.path.join(os.getcwd(), exe_name)
+
+ # Set self.child_prompt, which is "(lldb) ".
+ self.child_prompt = '(lldb) '
+ prompt = self.child_prompt
+
+ # Reset the stopwatch now.
+ self.stopwatch.reset()
+ for i in range(count):
+ # So that the child gets torn down after the test.
+ self.child = pexpect.spawn('%s %s %s' % (self.lldbExec, self.lldbOption, exe))
+ child = self.child
+
+ # Turn on logging for what the child sends back.
+ if self.TraceOn():
+ child.logfile_read = sys.stdout
+
+ child.expect_exact(prompt)
+ child.sendline('breakpoint set -f %s -l %d' % (self.source, self.line_to_break))
+ child.expect_exact(prompt)
+ child.sendline('run')
+ child.expect_exact(prompt)
+ expr_cmd1 = 'expr ptr[j]->point.x'
+ expr_cmd2 = 'expr ptr[j]->point.y'
+
+ with self.stopwatch:
+ child.sendline(expr_cmd1)
+ child.expect_exact(prompt)
+ child.sendline(expr_cmd2)
+ child.expect_exact(prompt)
+
+ child.sendline('quit')
+ try:
+ self.child.expect(pexpect.EOF)
+ except:
+ pass
+
+ self.child = None
+
+
+if __name__ == '__main__':
+ import atexit
+ lldb.SBDebugger.Initialize()
+ atexit.register(lambda: lldb.SBDebugger.Terminate())
+ unittest2.main()
diff --git a/lldb/test/benchmarks/startup/TestStartupDelays.py b/lldb/test/benchmarks/startup/TestStartupDelays.py
index f062354ac6c..1a15a917cdd 100644
--- a/lldb/test/benchmarks/startup/TestStartupDelays.py
+++ b/lldb/test/benchmarks/startup/TestStartupDelays.py
@@ -26,7 +26,7 @@ class StartupDelaysBench(BenchBase):
self.count = lldb.bmIterationCount
if self.count <= 0:
- self.count = 15
+ self.count = 30
@benchmarks_test
def test_startup_delay(self):
OpenPOWER on IntegriCloud