summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lldb/test/benchmarks/example/TestRepeatedExprs.py124
-rw-r--r--lldb/test/benchmarks/example/main.cpp8
-rw-r--r--lldb/test/lldbbench.py5
-rw-r--r--lldb/test/lldbtest.py6
4 files changed, 117 insertions, 26 deletions
diff --git a/lldb/test/benchmarks/example/TestRepeatedExprs.py b/lldb/test/benchmarks/example/TestRepeatedExprs.py
index 7e0c3d352b3..4e6b71edf5d 100644
--- a/lldb/test/benchmarks/example/TestRepeatedExprs.py
+++ b/lldb/test/benchmarks/example/TestRepeatedExprs.py
@@ -1,6 +1,6 @@
"""Test evaluating expressions repeatedly comparing lldb against gdb."""
-import os
+import os, sys
import unittest2
import lldb
import pexpect
@@ -10,28 +10,118 @@ class RepeatedExprsCase(BenchBase):
mydir = os.path.join("benchmarks", "example")
- @benchmarks_test
- def test_with_lldb(self):
- """Test repeated expressions with lldb."""
- self.buildDefault()
- self.run_lldb_repeated_exprs()
+ def setUp(self):
+ BenchBase.setUp(self)
+ self.source = 'main.cpp'
+ self.line_to_break = line_number(self.source, '// Set breakpoint here.')
+ self.lldb_avg = None
+ self.gdb_avg = None
@benchmarks_test
- def test_with_gdb(self):
- """Test repeated expressions with gdb."""
+ def test_compare_lldb_to_gdb(self):
+ """Test repeated expressions with lldb vs. gdb."""
self.buildDefault()
- self.run_gdb_repeated_exprs()
+ self.exe_name = 'a.out'
+
+ print
+ self.run_lldb_repeated_exprs(self.exe_name, 100)
+ self.run_gdb_repeated_exprs(self.exe_name, 100)
+ print "lldb_avg: %f" % self.lldb_avg
+ print "gdb_avg: %f" % self.gdb_avg
+ print "lldb_avg/gdb_avg: %f" % (self.lldb_avg/self.gdb_avg)
+
+ def run_lldb_repeated_exprs(self, exe_name, count):
+ exe = os.path.join(os.getcwd(), exe_name)
+
+ # Set self.child_prompt, which is "(lldb) ".
+ self.child_prompt = '(lldb) '
+ prompt = self.child_prompt
- def run_lldb_repeated_exprs(self):
- for i in range(1000):
+ # So that the child gets torn down after the test.
+ self.child = pexpect.spawn('%s %s' % (self.lldbExec, exe))
+ child = self.child
+
+ # Turn on logging for what the child sends back.
+ if self.TraceOn():
+ child.logfile_read = sys.stdout
+
+ child.expect_exact(prompt)
+ child.sendline('breakpoint set -f %s -l %d' % (self.source, self.line_to_break))
+ child.expect_exact(prompt)
+ child.sendline('run')
+ child.expect_exact(prompt)
+ expr_cmd1 = 'expr ptr[j]->point.x'
+ expr_cmd2 = 'expr ptr[j]->point.y'
+
+ # Reset the stopwatch now.
+ self.stopwatch.reset()
+ for i in range(count):
with self.stopwatch:
- print "running "+self.testMethodName
- print "benchmarks result for "+self.testMethodName
- print "stopwatch:", str(self.stopwatch)
+ child.sendline(expr_cmd1)
+ child.expect_exact(prompt)
+ child.sendline(expr_cmd2)
+ child.expect_exact(prompt)
+ child.sendline('process continue')
+ child.expect_exact(prompt)
+
+ child.sendline('quit')
+ try:
+ self.child.expect(pexpect.EOF)
+ except:
+ pass
+
+ self.lldb_avg = self.stopwatch.avg()
+ if self.TraceOn():
+ print "lldb expression benchmark:", str(self.stopwatch)
+ self.child = None
+
+ def run_gdb_repeated_exprs(self, exe_name, count):
+ exe = os.path.join(os.getcwd(), exe_name)
+
+ # Set self.child_prompt, which is "(gdb) ".
+ self.child_prompt = '(gdb) '
+ prompt = self.child_prompt
+
+ # So that the child gets torn down after the test.
+ self.child = pexpect.spawn('gdb %s' % exe)
+ child = self.child
+
+ # Turn on logging for what the child sends back.
+ if self.TraceOn():
+ child.logfile_read = sys.stdout
+
+ child.expect_exact(prompt)
+ child.sendline('break %s:%d' % (self.source, self.line_to_break))
+ child.expect_exact(prompt)
+ child.sendline('run')
+ child.expect_exact(prompt)
+ expr_cmd1 = 'print ptr[j]->point.x'
+ expr_cmd2 = 'print ptr[j]->point.y'
+
+ # Reset the stopwatch now.
+ self.stopwatch.reset()
+ for i in range(count):
+ with self.stopwatch:
+ child.sendline(expr_cmd1)
+ child.expect_exact(prompt)
+ child.sendline(expr_cmd2)
+ child.expect_exact(prompt)
+ child.sendline('continue')
+ child.expect_exact(prompt)
+
+ child.sendline('quit')
+ child.expect_exact('The program is running. Exit anyway?')
+ child.sendline('y')
+ try:
+ self.child.expect(pexpect.EOF)
+ except:
+ pass
+
+ self.gdb_avg = self.stopwatch.avg()
+ if self.TraceOn():
+ print "gdb expression benchmark:", str(self.stopwatch)
+ self.child = None
- def run_gdb_repeated_exprs(self):
- print "running "+self.testMethodName
- print "benchmarks result for "+self.testMethodName
if __name__ == '__main__':
import atexit
diff --git a/lldb/test/benchmarks/example/main.cpp b/lldb/test/benchmarks/example/main.cpp
index 730a704a7f8..1a095d35022 100644
--- a/lldb/test/benchmarks/example/main.cpp
+++ b/lldb/test/benchmarks/example/main.cpp
@@ -30,13 +30,13 @@ int main(int argc, char const *argv[]) {
}
printf("Finished populating data.\n");
- for (int i = 0; i < 1000; ++i) {
+ for (int j = 0; j < 1000; ++j) {
bool dump = argc > 1; // Set breakpoint here.
// Evaluate a couple of expressions (2*1000 = 2000 exprs):
- // expr ptr[i]->point.x
- // expr ptr[i]->point.y
+ // expr ptr[j]->point.x
+ // expr ptr[j]->point.y
if (dump) {
- printf("data[%d] = %d (%d, %d)\n", i, ptr[i]->id, ptr[i]->point.x, ptr[i]->point.y);
+ printf("data[%d] = %d (%d, %d)\n", j, ptr[j]->id, ptr[j]->point.x, ptr[j]->point.y);
}
}
return 0;
diff --git a/lldb/test/lldbbench.py b/lldb/test/lldbbench.py
index 4bc220b19de..e0c7814f646 100644
--- a/lldb/test/lldbbench.py
+++ b/lldb/test/lldbbench.py
@@ -1,6 +1,7 @@
import time
-from lldbtest import benchmarks_test
from lldbtest import Base
+from lldbtest import benchmarks_test
+from lldbtest import line_number
class Stopwatch(object):
"""Stopwatch provides a simple utility to start/stop your stopwatch multiple
@@ -80,7 +81,7 @@ class Stopwatch(object):
return self.__total_elapsed__ / self.__laps__
def __str__(self):
- return "Avg: %f (Laps: %d, Total Elapsed Time: %d)" % (self.avg(),
+ return "Avg: %f (Laps: %d, Total Elapsed Time: %f)" % (self.avg(),
self.__laps__,
self.__total_elapsed__)
diff --git a/lldb/test/lldbtest.py b/lldb/test/lldbtest.py
index dea1a235cc0..3932dad52e2 100644
--- a/lldb/test/lldbtest.py
+++ b/lldb/test/lldbtest.py
@@ -451,6 +451,9 @@ class Base(unittest2.TestCase):
#import traceback
#traceback.print_stack()
+ if "LLDB_EXEC" in os.environ:
+ self.lldbExec = os.environ["LLDB_EXEC"]
+
# Assign the test method name to self.testMethodName.
#
# For an example of the use of this attribute, look at test/types dir.
@@ -837,9 +840,6 @@ class TestBase(Base):
# Works with the test driver to conditionally skip tests via decorators.
Base.setUp(self)
- if "LLDB_EXEC" in os.environ:
- self.lldbExec = os.environ["LLDB_EXEC"]
-
try:
if lldb.blacklist:
className = self.__class__.__name__
OpenPOWER on IntegriCloud