summaryrefslogtreecommitdiffstats
path: root/lldb/packages/Python/lldbsuite/test/bench.py
diff options
context:
space:
mode:
authorZachary Turner <zturner@google.com>2015-10-28 17:43:26 +0000
committerZachary Turner <zturner@google.com>2015-10-28 17:43:26 +0000
commitc432c8f856e0bd84de980a9d9bb2d31b06fa95b1 (patch)
tree4efa528e074a6e2df782345e4cd97f5d85d038c4 /lldb/packages/Python/lldbsuite/test/bench.py
parenta8a3bd210086b50242903ed95048fe5e53897878 (diff)
downloadbcm5719-llvm-c432c8f856e0bd84de980a9d9bb2d31b06fa95b1.tar.gz
bcm5719-llvm-c432c8f856e0bd84de980a9d9bb2d31b06fa95b1.zip
Move lldb/test to lldb/packages/Python/lldbsuite/test.
This is the conclusion of an effort to get LLDB's Python code structured into a bona-fide Python package. This has a number of benefits, but most notably the ability to more easily share Python code between different but related pieces of LLDB's Python infrastructure (for example, `scripts` can now share code with `test`). llvm-svn: 251532
Diffstat (limited to 'lldb/packages/Python/lldbsuite/test/bench.py')
-rw-r--r--lldb/packages/Python/lldbsuite/test/bench.py73
1 files changed, 73 insertions, 0 deletions
diff --git a/lldb/packages/Python/lldbsuite/test/bench.py b/lldb/packages/Python/lldbsuite/test/bench.py
new file mode 100644
index 00000000000..664aa3a93b8
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/test/bench.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+
+"""
+A simple bench runner which delegates to the ./dotest.py test driver to run the
+benchmarks defined in the list named 'benches'.
+
+You need to hand edit 'benches' to modify/change the command lines passed to the
+test driver.
+
+Use the following to get only the benchmark results in your terminal output:
+
+ ./bench.py -e /Volumes/data/lldb/svn/regression/build/Debug/lldb -x '-F Driver::MainLoop()' 2>&1 | grep -P '^lldb.*benchmark:'
+
+See also bench-history.
+"""
+
+from __future__ import print_function
+
+import os, sys
+import re
+from optparse import OptionParser
+
+# dotest.py invocation with no '-e exe-path' uses lldb as the inferior program,
+# unless there is a mentioning of custom executable program.
+benches = [
+ # Measure startup delays creating a target, setting a breakpoint, and run to breakpoint stop.
+ './dotest.py -v +b %E %X -n -p TestStartupDelays.py',
+
+ # Measure 'frame variable' response after stopping at a breakpoint.
+ './dotest.py -v +b %E %X -n -p TestFrameVariableResponse.py',
+
+ # Measure stepping speed after stopping at a breakpoint.
+ './dotest.py -v +b %E %X -n -p TestSteppingSpeed.py',
+
+ # Measure expression cmd response with a simple custom executable program.
+ './dotest.py +b -n -p TestExpressionCmd.py',
+
+ # Attach to a spawned process then run disassembly benchmarks.
+ './dotest.py -v +b -n %E -p TestDoAttachThenDisassembly.py'
+]
+
+def main():
+ """Read the items from 'benches' and run the command line one by one."""
+ parser = OptionParser(usage="""\
+%prog [options]
+Run the standard benchmarks defined in the list named 'benches'.\
+""")
+ parser.add_option('-e', '--executable',
+ type='string', action='store',
+ dest='exe',
+ help='The target program launched by lldb.')
+ parser.add_option('-x', '--breakpoint-spec',
+ type='string', action='store',
+ dest='break_spec',
+ help='The lldb breakpoint spec for the target program.')
+
+ # Parses the options, if any.
+ opts, args = parser.parse_args()
+
+ print("Starting bench runner....")
+
+ for item in benches:
+ command = item.replace('%E',
+ '-e "%s"' % opts.exe if opts.exe else '')
+ command = command.replace('%X',
+ '-x "%s"' % opts.break_spec if opts.break_spec else '')
+ print("Running %s" % (command))
+ os.system(command)
+
+ print("Bench runner done.")
+
+if __name__ == '__main__':
+ main()
OpenPOWER on IntegriCloud