summaryrefslogtreecommitdiffstats
path: root/lldb/packages/Python
diff options
context:
space:
mode:
Diffstat (limited to 'lldb/packages/Python')
-rw-r--r--lldb/packages/Python/lldbsuite/test/basic_results_formatter.py30
-rw-r--r--lldb/packages/Python/lldbsuite/test/configuration.py1
-rw-r--r--lldb/packages/Python/lldbsuite/test/dosep.py102
-rw-r--r--lldb/packages/Python/lldbsuite/test/dotest.py1
-rw-r--r--lldb/packages/Python/lldbsuite/test/dotest_args.py11
-rw-r--r--lldb/packages/Python/lldbsuite/test/issue_verification/TestRerunFail.py.park23
-rw-r--r--lldb/packages/Python/lldbsuite/test/issue_verification/TestRerunTimeout.py.park22
-rw-r--r--lldb/packages/Python/lldbsuite/test/issue_verification/rerun_base.py28
-rw-r--r--lldb/packages/Python/lldbsuite/test/result_formatter.py3
9 files changed, 195 insertions, 26 deletions
diff --git a/lldb/packages/Python/lldbsuite/test/basic_results_formatter.py b/lldb/packages/Python/lldbsuite/test/basic_results_formatter.py
index b2b227ba370..472702d154d 100644
--- a/lldb/packages/Python/lldbsuite/test/basic_results_formatter.py
+++ b/lldb/packages/Python/lldbsuite/test/basic_results_formatter.py
@@ -31,6 +31,11 @@ class BasicResultsFormatter(result_formatter.ResultsFormatter):
action="store_true",
help=('cause unknown test events to generate '
'a python assert. Default is to ignore.'))
+ parser.add_argument(
+ "--dump-results",
+ action="store_true",
+ help=('dump the raw results data after printing '
+ 'the summary output.'))
return parser
def __init__(self, out_file, options):
@@ -56,21 +61,21 @@ class BasicResultsFormatter(result_formatter.ResultsFormatter):
if event_type is None:
return
- if event_type == "terminate":
+ if event_type == EventBuilder.TYPE_SESSION_TERMINATE:
self._finish_output()
- elif event_type == "test_start":
+ elif event_type == EventBuilder.TYPE_TEST_START:
self.track_start_time(
test_event["test_class"],
test_event["test_name"],
test_event["event_time"])
- elif event_type == "test_result":
+ elif event_type == EventBuilder.TYPE_TEST_RESULT:
# Build the test key.
test_key = test_event.get("test_filename", None)
if test_key is None:
raise Exception(
"failed to find test filename for test event {}".format(
test_event))
- test_key += ".{}.{}".format(
+ test_key += ":{}.{}".format(
test_event.get("test_class", ""),
test_event.get("test_name", ""))
@@ -91,14 +96,8 @@ class BasicResultsFormatter(result_formatter.ResultsFormatter):
self.result_status_counts[old_status] -= 1
self.test_method_rerun_count += 1
- if self.options.warn_on_multiple_results:
- print(
- "WARNING: test key {} already has a result: "
- "old:{} new:{}",
- self.result_events[test_key],
- test_event)
self.result_events[test_key] = test_event
- elif event_type == "job_result":
+ elif event_type == EventBuilder.TYPE_JOB_RESULT:
# Build the job key.
test_key = test_event.get("test_filename", None)
if test_key is None:
@@ -336,6 +335,15 @@ class BasicResultsFormatter(result_formatter.ResultsFormatter):
self._print_summary_counts(
categories, result_events_by_status, extra_results)
+ if self.options.dump_results:
+ # Debug dump of the key/result info for all categories.
+ self._print_banner("Results Dump")
+ for status, events_by_key in result_events_by_status.items():
+ print("\nSTATUS: {}".format(status))
+ for key, event in events_by_key:
+ print("key: {}".format(key))
+ print("event: {}".format(event))
+
def _finish_output(self):
"""Prepare and write the results report as all incoming events have
arrived.
diff --git a/lldb/packages/Python/lldbsuite/test/configuration.py b/lldb/packages/Python/lldbsuite/test/configuration.py
index 2e4dd411f2f..69ed9fc32e2 100644
--- a/lldb/packages/Python/lldbsuite/test/configuration.py
+++ b/lldb/packages/Python/lldbsuite/test/configuration.py
@@ -141,6 +141,7 @@ test_result = None
# Test rerun configuration vars
rerun_all_issues = False
+rerun_max_file_threhold = 0
# The names of all tests. Used to assert we don't have two tests with the same base name.
all_tests = set()
diff --git a/lldb/packages/Python/lldbsuite/test/dosep.py b/lldb/packages/Python/lldbsuite/test/dosep.py
index b3c22519b58..7e259bc3652 100644
--- a/lldb/packages/Python/lldbsuite/test/dosep.py
+++ b/lldb/packages/Python/lldbsuite/test/dosep.py
@@ -49,8 +49,10 @@ import threading
from six.moves import queue
# Our packages and modules
+import lldbsuite
import lldbsuite.support.seven as seven
+from . import configuration
from . import dotest_channels
from . import dotest_args
from . import result_formatter
@@ -416,21 +418,20 @@ def call_with_timeout(
def process_dir(root, files, dotest_argv, inferior_pid_events):
"""Examine a directory for tests, and invoke any found within it."""
results = []
- for name in files:
+ for (base_name, full_test_path) in files:
import __main__ as main
script_file = main.__file__
command = ([sys.executable, script_file] +
dotest_argv +
- ["--inferior", "-p", name, root])
+ ["--inferior", "-p", base_name, root])
- timeout_name = os.path.basename(os.path.splitext(name)[0]).upper()
+ timeout_name = os.path.basename(os.path.splitext(base_name)[0]).upper()
timeout = (os.getenv("LLDB_%s_TIMEOUT" % timeout_name) or
getDefaultTimeout(dotest_options.lldb_platform_name))
- test_filename = os.path.join(root, name)
results.append(call_with_timeout(
- command, timeout, name, inferior_pid_events, test_filename))
+ command, timeout, base_name, inferior_pid_events, full_test_path))
# result = (name, status, passes, failures, unexpected_successes)
timed_out = [name for name, status, _, _, _ in results
@@ -615,8 +616,10 @@ def find_test_files_in_dir_tree(dir_root, found_func):
return (base_filename.startswith("Test") and
base_filename.endswith(".py"))
- tests = [filename for filename in files
- if is_test_filename(root, filename)]
+ tests = [
+ (filename, os.path.join(root, filename))
+ for filename in files
+ if is_test_filename(root, filename)]
if tests:
found_func(root, tests)
@@ -1097,8 +1100,16 @@ def walk_and_invoke(test_files, dotest_argv, num_workers, test_runner_func):
dotest_channels.UnpicklingForwardingListenerChannel(
RUNNER_PROCESS_ASYNC_MAP, "localhost", 0,
2 * num_workers, forwarding_func))
- dotest_argv.append("--results-port")
- dotest_argv.append(str(RESULTS_LISTENER_CHANNEL.address[1]))
+ # Set the results port command line arg. Might have been
+ # inserted previous, so first try to replace.
+ listener_port = str(RESULTS_LISTENER_CHANNEL.address[1])
+ try:
+ port_value_index = dotest_argv.index("--results-port") + 1
+ dotest_argv[port_value_index] = listener_port
+ except ValueError:
+ # --results-port doesn't exist (yet), add it
+ dotest_argv.append("--results-port")
+ dotest_argv.append(listener_port)
# Build the test work items out of the (dir, file_list) entries passed in.
test_work_items = []
@@ -1424,6 +1435,58 @@ def default_test_runner_name(num_threads):
return test_runner_name
+def rerun_tests(test_subdir, tests_for_rerun, dotest_argv):
+ # Build the list of test files to rerun. Some future time we'll
+ # enable re-run by test method so we can constrain the rerun set
+ # to just the method(s) that were in issued within a file.
+
+ # Sort rerun files into subdirectories.
+ print("\nRerunning the following files:")
+ rerun_files_by_subdir = {}
+ for test_filename in tests_for_rerun.keys():
+ # Print the file we'll be rerunning
+ test_relative_path = os.path.relpath(
+ test_filename, lldbsuite.lldb_test_root)
+ print(" {}".format(test_relative_path))
+
+ # Store test filenames by subdir.
+ test_dir = os.path.dirname(test_filename)
+ test_basename = os.path.basename(test_filename)
+ if test_dir in rerun_files_by_subdir:
+ rerun_files_by_subdir[test_dir].append(
+ (test_basename, test_filename))
+ else:
+ rerun_files_by_subdir[test_dir] = [(test_basename, test_filename)]
+
+ # Break rerun work up by subdirectory. We do this since
+ # we have an invariant that states only one test file can
+ # be run at a time in any given subdirectory (related to
+ # rules around built inferior test program lifecycle).
+ rerun_work = []
+ for files_by_subdir in rerun_files_by_subdir.values():
+ rerun_work.append((test_subdir, files_by_subdir))
+
+ # Run the work with the serial runner.
+ # Do not update legacy counts, I am getting rid of
+ # them so no point adding complicated merge logic here.
+ rerun_thread_count = 1
+ rerun_runner_name = default_test_runner_name(rerun_thread_count)
+ runner_strategies_by_name = get_test_runner_strategies(rerun_thread_count)
+ rerun_runner_func = runner_strategies_by_name[
+ rerun_runner_name]
+ if rerun_runner_func is None:
+ raise Exception(
+ "failed to find rerun test runner "
+ "function named '{}'".format(rerun_runner_name))
+
+ walk_and_invoke(
+ rerun_work,
+ dotest_argv,
+ rerun_thread_count,
+ rerun_runner_func)
+ print("\nTest rerun complete\n")
+
+
def main(num_threads, test_subdir, test_runner_name, results_formatter):
"""Run dotest.py in inferior mode in parallel.
@@ -1501,11 +1564,13 @@ def main(num_threads, test_subdir, test_runner_name, results_formatter):
list(runner_strategies_by_name.keys())))
test_runner_func = runner_strategies_by_name[test_runner_name]
+ # Collect the files on which we'll run the first test run phase.
test_files = []
find_test_files_in_dir_tree(
test_subdir, lambda tdir, tfiles: test_files.append(
(test_subdir, tfiles)))
+ # Do the first test run phase.
summary_results = walk_and_invoke(
test_files,
dotest_argv,
@@ -1515,15 +1580,24 @@ def main(num_threads, test_subdir, test_runner_name, results_formatter):
(timed_out, passed, failed, unexpected_successes, pass_count,
fail_count) = summary_results
- # Check if we have any tests to rerun.
+ # Check if we have any tests to rerun as phase 2.
if results_formatter is not None:
tests_for_rerun = results_formatter.tests_for_rerun
- results_formatter.tests_for_rerun = None
+ results_formatter.tests_for_rerun = {}
if tests_for_rerun is not None and len(tests_for_rerun) > 0:
- # Here's where we trigger the re-run in a future change.
- # Make sure the rest of the changes don't break anything.
- pass
+ rerun_file_count = len(tests_for_rerun)
+ print("\n{} test files marked for rerun\n".format(
+ rerun_file_count))
+
+ # Check if the number of files exceeds the max cutoff. If so,
+ # we skip the rerun step.
+ if rerun_file_count > configuration.rerun_max_file_threshold:
+ print("Skipping rerun: max rerun file threshold ({}) "
+ "exceeded".format(
+ configuration.rerun_max_file_threshold))
+ else:
+ rerun_tests(test_subdir, tests_for_rerun, dotest_argv)
# The results formatter - if present - is done now. Tell it to
# terminate.
diff --git a/lldb/packages/Python/lldbsuite/test/dotest.py b/lldb/packages/Python/lldbsuite/test/dotest.py
index 2865c639fc1..536aced3bd2 100644
--- a/lldb/packages/Python/lldbsuite/test/dotest.py
+++ b/lldb/packages/Python/lldbsuite/test/dotest.py
@@ -387,6 +387,7 @@ def parseOptionsAndInitTestdirs():
# rerun-related arguments
configuration.rerun_all_issues = args.rerun_all_issues
+ configuration.rerun_max_file_threshold = args.rerun_max_file_threshold
if args.lldb_platform_name:
configuration.lldb_platform_name = args.lldb_platform_name
diff --git a/lldb/packages/Python/lldbsuite/test/dotest_args.py b/lldb/packages/Python/lldbsuite/test/dotest_args.py
index b0fda3506a5..105156df7e8 100644
--- a/lldb/packages/Python/lldbsuite/test/dotest_args.py
+++ b/lldb/packages/Python/lldbsuite/test/dotest_args.py
@@ -160,6 +160,7 @@ def create_parser():
'be specified as VAL:TYPE, where TYPE may be int to convert '
'the value to an int'))
+ # Re-run related arguments
group = parser.add_argument_group('Test Re-run Options')
group.add_argument(
'--rerun-all-issues',
@@ -168,6 +169,16 @@ def create_parser():
'irrespective of the test method\'s marking as flakey. '
'Default behavior is to apply re-runs only to flakey '
'tests that generate issues.'))
+ group.add_argument(
+ '--rerun-max-file-threshold',
+ action='store',
+ type=int,
+ default=50,
+ help=('Maximum number of files requiring a rerun beyond '
+ 'which the rerun will not occur. This is meant to '
+ 'stop a catastrophically failing test suite from forcing '
+ 'all tests to be rerun in the single-worker phase.'))
+
# Remove the reference to our helper function
del X
diff --git a/lldb/packages/Python/lldbsuite/test/issue_verification/TestRerunFail.py.park b/lldb/packages/Python/lldbsuite/test/issue_verification/TestRerunFail.py.park
new file mode 100644
index 00000000000..bcd1926d740
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/test/issue_verification/TestRerunFail.py.park
@@ -0,0 +1,23 @@
+"""Tests that a flakey fail is rerun, and will pass on the rerun.
+Run this test with --rerun-all-issues specified to test that
+the tests fail on the first run, then pass on the second.
+Do not mark them as flakey as, at this time, flakey tests will
+run twice, thus causing the second run to succeed."""
+
+from __future__ import print_function
+
+import rerun_base
+
+import lldbsuite.test.lldbtest as lldbtest
+
+
+class RerunFailTestCase(rerun_base.RerunBaseTestCase):
+ """Forces test failure on first run, success on rerun."""
+ @lldbtest.no_debug_info_test
+ def test_buildbot_catches_failure(self):
+ """Issues a failing test assertion."""
+ if self.should_generate_issue():
+ self.assertTrue(
+ False,
+ "This will fail on the first call, succeed on rerun, and "
+ "alternate thereafter.")
diff --git a/lldb/packages/Python/lldbsuite/test/issue_verification/TestRerunTimeout.py.park b/lldb/packages/Python/lldbsuite/test/issue_verification/TestRerunTimeout.py.park
new file mode 100644
index 00000000000..1cf5373ac49
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/test/issue_verification/TestRerunTimeout.py.park
@@ -0,0 +1,22 @@
+"""Tests that a timeout is detected by the testbot."""
+from __future__ import print_function
+
+import time
+
+import lldbsuite.test.lldbtest as lldbtest
+import rerun_base
+
+
+class RerunTimeoutTestCase(rerun_base.RerunBaseTestCase):
+ @lldbtest.no_debug_info_test
+ def test_timeout_rerun_succeeds(self):
+ """Tests that timeout logic kicks in and is picked up."""
+ if not self.should_generate_issue():
+ # We pass this time.
+ return
+ # We time out this time.
+ while True:
+ try:
+ time.sleep(1)
+ except:
+ print("ignoring exception during sleep")
diff --git a/lldb/packages/Python/lldbsuite/test/issue_verification/rerun_base.py b/lldb/packages/Python/lldbsuite/test/issue_verification/rerun_base.py
new file mode 100644
index 00000000000..2ce775dced1
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/test/issue_verification/rerun_base.py
@@ -0,0 +1,28 @@
+from __future__ import print_function
+
+import os
+
+import lldbsuite.test.lldbtest as lldbtest
+
+
+# pylint: disable=too-few-public-methods
+class RerunBaseTestCase(lldbtest.TestBase):
+ """Forces test failure."""
+ mydir = lldbtest.TestBase.compute_mydir(__file__)
+
+ def should_generate_issue(self):
+ """Returns whether a test issue should be generated.
+
+ @returns True on the first and every other call via a given
+ test method.
+ """
+ should_pass_filename = "{}.{}.succeed-marker".format(
+ __file__, self.id())
+ fail = not os.path.exists(should_pass_filename)
+ if fail:
+ # Create the marker so that next call to this passes.
+ open(should_pass_filename, 'w').close()
+ else:
+ # Delete the marker so next time we fail.
+ os.remove(should_pass_filename)
+ return fail
diff --git a/lldb/packages/Python/lldbsuite/test/result_formatter.py b/lldb/packages/Python/lldbsuite/test/result_formatter.py
index ff77c1411ba..e173fd2fda1 100644
--- a/lldb/packages/Python/lldbsuite/test/result_formatter.py
+++ b/lldb/packages/Python/lldbsuite/test/result_formatter.py
@@ -163,6 +163,7 @@ class EventBuilder(object):
TYPE_TEST_RESULT = "test_result"
TYPE_TEST_START = "test_start"
TYPE_MARK_TEST_RERUN_ELIGIBLE = "test_eligible_for_rerun"
+ TYPE_SESSION_TERMINATE = "terminate"
RESULT_TYPES = set([
TYPE_JOB_RESULT,
@@ -687,7 +688,7 @@ class ResultsFormatter(object):
component_count += 1
if "test_class" in test_result_event:
if component_count > 0:
- key += "."
+ key += ":"
key += test_result_event["test_class"]
component_count += 1
if "test_name" in test_result_event:
OpenPOWER on IntegriCloud