diff options
| -rw-r--r-- | lldb/test/bench-history | 12 | ||||
| -rwxr-xr-x | lldb/test/bench.py | 43 | ||||
| -rw-r--r-- | lldb/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py | 19 | ||||
| -rw-r--r-- | lldb/test/benchmarks/startup/TestStartupDelays.py | 4 | ||||
| -rw-r--r-- | lldb/test/benchmarks/stepping/TestSteppingSpeed.py | 7 |
5 files changed, 62 insertions, 23 deletions
diff --git a/lldb/test/bench-history b/lldb/test/bench-history index 09f074c66a0..bc8bb181f0a 100644 --- a/lldb/test/bench-history +++ b/lldb/test/bench-history @@ -27,3 +27,15 @@ lldb stepping benchmark: Avg: 0.104091 (Laps: 50, Total Elapsed Time: 5.204557) lldb expr cmd benchmark: Avg: 0.207095 (Laps: 25, Total Elapsed Time: 5.177363) lldb disassembly benchmark: Avg: 0.001531 (Laps: 10, Total Elapsed Time: 0.015311) +r143065 (Oct 26, 2011): +# Establish a baseline by using a fixed lldb executable as the inferior program +# for the lldb debugger to operate on. The fixed lldb executable corresponds to +# r142902. +[15:50:34] johnny:/Volumes/data/lldb/svn/trunk/test $ ./bench.py -e /Volumes/data/lldb/svn/regression/build/Debug/lldb -x '-F Driver::MainLoop()' 2>&1 | grep -P '^lldb.*benchmark:' +lldb startup delay (create fresh target) benchmark: Avg: 0.103774 (Laps: 30, Total Elapsed Time: 3.113226) +lldb startup delay (set first breakpoint) benchmark: Avg: 0.102230 (Laps: 30, Total Elapsed Time: 3.066896) +lldb startup delay (run to breakpoint) benchmark: Avg: 0.448635 (Laps: 30, Total Elapsed Time: 13.459048) +lldb frame variable benchmark: Avg: 1.615647 (Laps: 20, Total Elapsed Time: 32.312934) +lldb stepping benchmark: Avg: 0.138386 (Laps: 50, Total Elapsed Time: 6.919313) +lldb expr cmd benchmark: Avg: 0.218967 (Laps: 25, Total Elapsed Time: 5.474171) +lldb disassembly benchmark: Avg: 0.092677 (Laps: 10, Total Elapsed Time: 0.926766) diff --git a/lldb/test/bench.py b/lldb/test/bench.py index 634fb182c2c..05c3a191eb5 100755 --- a/lldb/test/bench.py +++ b/lldb/test/bench.py @@ -9,36 +9,59 @@ test driver. Use the following to get only the benchmark results in your terminal output: - ./bench.py 2>&1 | grep -P '^lldb.*benchmark:' + ./bench.py -e /Volumes/data/lldb/svn/regression/build/Debug/lldb -x '-F Driver::MainLoop()' 2>&1 | grep -P '^lldb.*benchmark:' + +See also bench-history. """ import os, sys import re +from optparse import OptionParser # dotest.py invocation with no '-e exe-path' uses lldb as the inferior program, # unless there is a mentioning of custom executable program. benches = [ - # Measure startup delays creating a target and setting a breakpoint at main. - './dotest.py -v +b -n -p TestStartupDelays.py', + # Measure startup delays creating a target, setting a breakpoint, and run to breakpoint stop. + './dotest.py -v +b %E %X -n -p TestStartupDelays.py', - # Measure 'frame variable' response after stopping at Driver::MainLoop(). - './dotest.py -v +b -x "-F Driver::MainLoop()" -n -p TestFrameVariableResponse.py', + # Measure 'frame variable' response after stopping at a breakpoint. + './dotest.py -v +b %E %X -n -p TestFrameVariableResponse.py', - # Measure stepping speed after stopping at Driver::MainLoop(). - './dotest.py -v +b -x "-F Driver::MainLoop()" -n -p TestSteppingSpeed.py', + # Measure stepping speed after stopping at a breakpoint. + './dotest.py -v +b %E %X -n -p TestSteppingSpeed.py', # Measure expression cmd response with a simple custom executable program. './dotest.py +b -n -p TestExpressionCmd.py', - # Attach to a spawned lldb process then run disassembly benchmarks. - './dotest.py -v +b -n -p TestDoAttachThenDisassembly.py' + # Attach to a spawned process then run disassembly benchmarks. + './dotest.py -v +b -n %E -p TestDoAttachThenDisassembly.py' ] def main(): """Read the items from 'benches' and run the command line one by one.""" + parser = OptionParser(usage="""\ +%prog [options] +Run the standard benchmarks defined in the list named 'benches'.\ +""") + parser.add_option('-e', '--executable', + type='string', action='store', + dest='exe', + help='The target program launched by lldb.') + parser.add_option('-x', '--breakpoint-spec', + type='string', action='store', + dest='break_spec', + help='The lldb breakpoint spec for the target program.') + + # Parses the options, if any. + opts, args = parser.parse_args() + print "Starting bench runner...." - for command in benches: + for item in benches: + command = item.replace('%E', + '-e "%s"' % opts.exe if opts.exe else '') + command = command.replace('%X', + '-x "%s"' % opts.break_spec if opts.break_spec else '') print "Running %s" % (command) os.system(command) diff --git a/lldb/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py b/lldb/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py index 90cf7a87cae..0085a144c75 100644 --- a/lldb/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py +++ b/lldb/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py @@ -1,4 +1,6 @@ -"""Test lldb's disassemblt speed.""" +"""Test lldb's disassemblt speed. This bench deliberately attaches to an lldb +inferior and traverses the stack for thread0 to arrive at frame with function +'MainLoop'. It is important to specify an lldb executable as the inferior.""" import os, sys import unittest2 @@ -12,20 +14,27 @@ class AttachThenDisassemblyBench(BenchBase): def setUp(self): BenchBase.setUp(self) + if lldb.bmExecutable: + self.exe = lldb.bmExecutable + else: + self.exe = self.lldbHere + self.count = lldb.bmIterationCount + if self.count <= 0: + self.count = 10 @benchmarks_test def test_attach_then_disassembly(self): """Attach to a spawned lldb process then run disassembly benchmarks.""" print - self.run_lldb_attach_then_disassembly(10) + self.run_lldb_attach_then_disassembly(self.exe, self.count) print "lldb disassembly benchmark:", self.stopwatch - def run_lldb_attach_then_disassembly(self, count): - target = self.dbg.CreateTarget(self.lldbHere) + def run_lldb_attach_then_disassembly(self, exe, count): + target = self.dbg.CreateTarget(exe) # Spawn a new process and don't display the stdout if not in TraceOn() mode. import subprocess - popen = subprocess.Popen([self.lldbHere, self.lldbOption], + popen = subprocess.Popen([exe, self.lldbOption], stdout = open(os.devnull, 'w') if not self.TraceOn() else None) if self.TraceOn(): print "pid of spawned process: %d" % popen.pid diff --git a/lldb/test/benchmarks/startup/TestStartupDelays.py b/lldb/test/benchmarks/startup/TestStartupDelays.py index f770b31dfa3..2c3cad5345b 100644 --- a/lldb/test/benchmarks/startup/TestStartupDelays.py +++ b/lldb/test/benchmarks/startup/TestStartupDelays.py @@ -1,4 +1,4 @@ -"""Test lldb's startup delays creating a target and setting a breakpoint.""" +"""Test lldb's startup delays creating a target, setting a breakpoint, and run to breakpoint stop.""" import os, sys import unittest2 @@ -32,7 +32,7 @@ class StartupDelaysBench(BenchBase): @benchmarks_test def test_startup_delay(self): - """Test start up delays creating a target and setting a breakpoint.""" + """Test start up delays creating a target, setting a breakpoint, and run to breakpoint stop.""" print self.run_startup_delays_bench(self.exe, self.break_spec, self.count) print "lldb startup delay (create fresh target) benchmark:", self.stopwatch diff --git a/lldb/test/benchmarks/stepping/TestSteppingSpeed.py b/lldb/test/benchmarks/stepping/TestSteppingSpeed.py index e1edadc6c87..780e25384c9 100644 --- a/lldb/test/benchmarks/stepping/TestSteppingSpeed.py +++ b/lldb/test/benchmarks/stepping/TestSteppingSpeed.py @@ -14,17 +14,12 @@ class SteppingSpeedBench(BenchBase): BenchBase.setUp(self) if lldb.bmExecutable: self.exe = lldb.bmExecutable - bmExecutableDefauled = False else: self.exe = self.lldbHere - bmExecutableDefauled = True if lldb.bmBreakpointSpec: self.break_spec = lldb.bmBreakpointSpec else: - if bmExecutableDefauled: - self.break_spec = '-F Driver::MainLoop()' - else: - self.break_spec = '-n main' + self.break_spec = '-n main' self.count = lldb.bmIterationCount if self.count <= 0: |

