diff options
Diffstat (limited to 'lldb/test/benchmarks/example/TestRepeatedExprs.py')
-rw-r--r-- | lldb/test/benchmarks/example/TestRepeatedExprs.py | 124 |
1 files changed, 107 insertions, 17 deletions
diff --git a/lldb/test/benchmarks/example/TestRepeatedExprs.py b/lldb/test/benchmarks/example/TestRepeatedExprs.py index 7e0c3d352b3..4e6b71edf5d 100644 --- a/lldb/test/benchmarks/example/TestRepeatedExprs.py +++ b/lldb/test/benchmarks/example/TestRepeatedExprs.py @@ -1,6 +1,6 @@ """Test evaluating expressions repeatedly comparing lldb against gdb.""" -import os +import os, sys import unittest2 import lldb import pexpect @@ -10,28 +10,118 @@ class RepeatedExprsCase(BenchBase): mydir = os.path.join("benchmarks", "example") - @benchmarks_test - def test_with_lldb(self): - """Test repeated expressions with lldb.""" - self.buildDefault() - self.run_lldb_repeated_exprs() + def setUp(self): + BenchBase.setUp(self) + self.source = 'main.cpp' + self.line_to_break = line_number(self.source, '// Set breakpoint here.') + self.lldb_avg = None + self.gdb_avg = None @benchmarks_test - def test_with_gdb(self): - """Test repeated expressions with gdb.""" + def test_compare_lldb_to_gdb(self): + """Test repeated expressions with lldb vs. gdb.""" self.buildDefault() - self.run_gdb_repeated_exprs() + self.exe_name = 'a.out' + + print + self.run_lldb_repeated_exprs(self.exe_name, 100) + self.run_gdb_repeated_exprs(self.exe_name, 100) + print "lldb_avg: %f" % self.lldb_avg + print "gdb_avg: %f" % self.gdb_avg + print "lldb_avg/gdb_avg: %f" % (self.lldb_avg/self.gdb_avg) + + def run_lldb_repeated_exprs(self, exe_name, count): + exe = os.path.join(os.getcwd(), exe_name) + + # Set self.child_prompt, which is "(lldb) ". + self.child_prompt = '(lldb) ' + prompt = self.child_prompt - def run_lldb_repeated_exprs(self): - for i in range(1000): + # So that the child gets torn down after the test. + self.child = pexpect.spawn('%s %s' % (self.lldbExec, exe)) + child = self.child + + # Turn on logging for what the child sends back. + if self.TraceOn(): + child.logfile_read = sys.stdout + + child.expect_exact(prompt) + child.sendline('breakpoint set -f %s -l %d' % (self.source, self.line_to_break)) + child.expect_exact(prompt) + child.sendline('run') + child.expect_exact(prompt) + expr_cmd1 = 'expr ptr[j]->point.x' + expr_cmd2 = 'expr ptr[j]->point.y' + + # Reset the stopwatch now. + self.stopwatch.reset() + for i in range(count): with self.stopwatch: - print "running "+self.testMethodName - print "benchmarks result for "+self.testMethodName - print "stopwatch:", str(self.stopwatch) + child.sendline(expr_cmd1) + child.expect_exact(prompt) + child.sendline(expr_cmd2) + child.expect_exact(prompt) + child.sendline('process continue') + child.expect_exact(prompt) + + child.sendline('quit') + try: + self.child.expect(pexpect.EOF) + except: + pass + + self.lldb_avg = self.stopwatch.avg() + if self.TraceOn(): + print "lldb expression benchmark:", str(self.stopwatch) + self.child = None + + def run_gdb_repeated_exprs(self, exe_name, count): + exe = os.path.join(os.getcwd(), exe_name) + + # Set self.child_prompt, which is "(gdb) ". + self.child_prompt = '(gdb) ' + prompt = self.child_prompt + + # So that the child gets torn down after the test. + self.child = pexpect.spawn('gdb %s' % exe) + child = self.child + + # Turn on logging for what the child sends back. + if self.TraceOn(): + child.logfile_read = sys.stdout + + child.expect_exact(prompt) + child.sendline('break %s:%d' % (self.source, self.line_to_break)) + child.expect_exact(prompt) + child.sendline('run') + child.expect_exact(prompt) + expr_cmd1 = 'print ptr[j]->point.x' + expr_cmd2 = 'print ptr[j]->point.y' + + # Reset the stopwatch now. + self.stopwatch.reset() + for i in range(count): + with self.stopwatch: + child.sendline(expr_cmd1) + child.expect_exact(prompt) + child.sendline(expr_cmd2) + child.expect_exact(prompt) + child.sendline('continue') + child.expect_exact(prompt) + + child.sendline('quit') + child.expect_exact('The program is running. Exit anyway?') + child.sendline('y') + try: + self.child.expect(pexpect.EOF) + except: + pass + + self.gdb_avg = self.stopwatch.avg() + if self.TraceOn(): + print "gdb expression benchmark:", str(self.stopwatch) + self.child = None - def run_gdb_repeated_exprs(self): - print "running "+self.testMethodName - print "benchmarks result for "+self.testMethodName if __name__ == '__main__': import atexit |