summaryrefslogtreecommitdiffstats
path: root/lldb/packages/Python/lldbsuite/test
diff options
context:
space:
mode:
Diffstat (limited to 'lldb/packages/Python/lldbsuite/test')
-rw-r--r--lldb/packages/Python/lldbsuite/test/basic_results_formatter.py51
-rw-r--r--lldb/packages/Python/lldbsuite/test/configuration.py3
-rw-r--r--lldb/packages/Python/lldbsuite/test/dosep.py35
-rw-r--r--lldb/packages/Python/lldbsuite/test/dotest.py3
-rw-r--r--lldb/packages/Python/lldbsuite/test/dotest_args.py9
-rw-r--r--lldb/packages/Python/lldbsuite/test/issue_verification/TestSignalOutsideTestMethod.py.park24
-rw-r--r--lldb/packages/Python/lldbsuite/test/result_formatter.py85
7 files changed, 165 insertions, 45 deletions
diff --git a/lldb/packages/Python/lldbsuite/test/basic_results_formatter.py b/lldb/packages/Python/lldbsuite/test/basic_results_formatter.py
index aded67532a3..b2b227ba370 100644
--- a/lldb/packages/Python/lldbsuite/test/basic_results_formatter.py
+++ b/lldb/packages/Python/lldbsuite/test/basic_results_formatter.py
@@ -258,37 +258,34 @@ class BasicResultsFormatter(result_formatter.ResultsFormatter):
if print_matching_tests:
# Sort by test name
for (_, event) in result_events_by_status[result_status_id]:
- extra_info = ""
+ # Convert full test path into test-root-relative.
+ test_relative_path = os.path.relpath(
+ os.path.realpath(event["test_filename"]),
+ lldbsuite.lldb_test_root)
+
+ # Create extra info component (used for exceptional exit info)
if result_status_id == EventBuilder.STATUS_EXCEPTIONAL_EXIT:
- extra_info = "{} ({}) ".format(
+ extra_info = "[EXCEPTIONAL EXIT {} ({})] ".format(
event["exception_code"],
event["exception_description"])
-
- if event["event"] == EventBuilder.TYPE_JOB_RESULT:
- # Jobs status that couldn't be mapped to a test method
- # doesn't have as much detail.
- self.out_file.write(
- "{}: {}{} (no test method running)\n".format(
- detail_label,
- extra_info,
- event["test_filename"]))
else:
- # Figure out the identity we will use for this test.
- if configuration.verbose and ("test_class" in event):
- test_id = "{}.{}".format(
- event["test_class"], event["test_name"])
- else:
- test_id = event["test_name"]
-
- # Test-method events have richer detail, use that here.
- test_relative_path = os.path.relpath(
- os.path.realpath(event["test_filename"]),
- lldbsuite.lldb_test_root)
- self.out_file.write("{}: {}{} ({})\n".format(
- detail_label,
- extra_info,
- test_id,
- test_relative_path))
+ extra_info = ""
+
+ # Figure out the identity we will use for this test.
+ if configuration.verbose and ("test_class" in event):
+ test_id = "{}.{}".format(
+ event["test_class"], event["test_name"])
+ elif "test_name" in event:
+ test_id = event["test_name"]
+ else:
+ test_id = "<no_running_test_method>"
+
+ # Display the info.
+ self.out_file.write("{}: {}{} ({})\n".format(
+ detail_label,
+ extra_info,
+ test_id,
+ test_relative_path))
def _finish_output_no_lock(self):
"""Writes the test result report to the output file."""
diff --git a/lldb/packages/Python/lldbsuite/test/configuration.py b/lldb/packages/Python/lldbsuite/test/configuration.py
index 7fd68b6dd12..d1aaef5cb7d 100644
--- a/lldb/packages/Python/lldbsuite/test/configuration.py
+++ b/lldb/packages/Python/lldbsuite/test/configuration.py
@@ -143,6 +143,9 @@ results_formatter_object = None
results_formatter_options = None
test_result = None
+# Test rerun configuration vars
+rerun_all_issues = False
+
# The names of all tests. Used to assert we don't have two tests with the same base name.
all_tests = set()
diff --git a/lldb/packages/Python/lldbsuite/test/dosep.py b/lldb/packages/Python/lldbsuite/test/dosep.py
index 31cc4b749e7..b3c22519b58 100644
--- a/lldb/packages/Python/lldbsuite/test/dosep.py
+++ b/lldb/packages/Python/lldbsuite/test/dosep.py
@@ -315,7 +315,8 @@ def send_events_to_collector(events, command):
formatter_spec.cleanup_func()
-def send_inferior_post_run_events(command, worker_index, process_driver):
+def send_inferior_post_run_events(
+ command, worker_index, process_driver, test_filename):
"""Sends any test events that should be generated after the inferior runs.
These events would include timeouts and exceptional (i.e. signal-returning)
@@ -326,6 +327,8 @@ def send_inferior_post_run_events(command, worker_index, process_driver):
this process
@param process_driver the ProcessDriver-derived instance that was used
to run the inferior process.
+ @param test_filename the full path to the Python test file that is being
+ run.
"""
if process_driver is None:
raise Exception("process_driver must not be None")
@@ -342,7 +345,6 @@ def send_inferior_post_run_events(command, worker_index, process_driver):
# Handle signal/exceptional exits.
if process_driver.is_exceptional_exit():
(code, desc) = process_driver.exceptional_exit_details()
- test_filename = process_driver.results[0]
post_events.append(
EventBuilder.event_for_job_exceptional_exit(
process_driver.pid,
@@ -354,7 +356,6 @@ def send_inferior_post_run_events(command, worker_index, process_driver):
# Handle timeouts.
if process_driver.is_timeout():
- test_filename = process_driver.results[0]
post_events.append(EventBuilder.event_for_job_timeout(
process_driver.pid,
worker_index,
@@ -365,7 +366,8 @@ def send_inferior_post_run_events(command, worker_index, process_driver):
send_events_to_collector(post_events, command)
-def call_with_timeout(command, timeout, name, inferior_pid_events):
+def call_with_timeout(
+ command, timeout, name, inferior_pid_events, test_filename):
# Add our worker index (if we have one) to all test events
# from this inferior.
worker_index = None
@@ -405,8 +407,8 @@ def call_with_timeout(command, timeout, name, inferior_pid_events):
send_inferior_post_run_events(
command,
worker_index,
- process_driver)
-
+ process_driver,
+ test_filename)
return process_driver.results
@@ -426,8 +428,9 @@ def process_dir(root, files, dotest_argv, inferior_pid_events):
timeout = (os.getenv("LLDB_%s_TIMEOUT" % timeout_name) or
getDefaultTimeout(dotest_options.lldb_platform_name))
+ test_filename = os.path.join(root, name)
results.append(call_with_timeout(
- command, timeout, name, inferior_pid_events))
+ command, timeout, name, inferior_pid_events, test_filename))
# result = (name, status, passes, failures, unexpected_successes)
timed_out = [name for name, status, _, _, _ in results
@@ -436,13 +439,15 @@ def process_dir(root, files, dotest_argv, inferior_pid_events):
if status == ePassed]
failed = [name for name, status, _, _, _ in results
if status != ePassed]
- unexpected_passes = [name for name, _, _, _, unexpected_successes in results
- if unexpected_successes > 0]
+ unexpected_passes = [
+ name for name, _, _, _, unexpected_successes in results
+ if unexpected_successes > 0]
pass_count = sum([result[2] for result in results])
fail_count = sum([result[3] for result in results])
- return (timed_out, passed, failed, unexpected_passes, pass_count, fail_count)
+ return (
+ timed_out, passed, failed, unexpected_passes, pass_count, fail_count)
in_q = None
out_q = None
@@ -1510,6 +1515,16 @@ def main(num_threads, test_subdir, test_runner_name, results_formatter):
(timed_out, passed, failed, unexpected_successes, pass_count,
fail_count) = summary_results
+ # Check if we have any tests to rerun.
+ if results_formatter is not None:
+ tests_for_rerun = results_formatter.tests_for_rerun
+ results_formatter.tests_for_rerun = None
+
+ if tests_for_rerun is not None and len(tests_for_rerun) > 0:
+ # Here's where we trigger the re-run in a future change.
+ # Make sure the rest of the changes don't break anything.
+ pass
+
# The results formatter - if present - is done now. Tell it to
# terminate.
if results_formatter is not None:
diff --git a/lldb/packages/Python/lldbsuite/test/dotest.py b/lldb/packages/Python/lldbsuite/test/dotest.py
index ed01eccf238..3063a91407b 100644
--- a/lldb/packages/Python/lldbsuite/test/dotest.py
+++ b/lldb/packages/Python/lldbsuite/test/dotest.py
@@ -393,6 +393,9 @@ def parseOptionsAndInitTestdirs():
configuration.results_formatter_name = (
"lldbsuite.test.basic_results_formatter.BasicResultsFormatter")
+ # rerun-related arguments
+ configuration.rerun_all_issues = args.rerun_all_issues
+
if args.lldb_platform_name:
configuration.lldb_platform_name = args.lldb_platform_name
if args.lldb_platform_url:
diff --git a/lldb/packages/Python/lldbsuite/test/dotest_args.py b/lldb/packages/Python/lldbsuite/test/dotest_args.py
index ddb615ef5df..38f65b2521d 100644
--- a/lldb/packages/Python/lldbsuite/test/dotest_args.py
+++ b/lldb/packages/Python/lldbsuite/test/dotest_args.py
@@ -160,6 +160,15 @@ def create_parser():
'pairs to all test events generated by this test run. VAL may '
'be specified as VAL:TYPE, where TYPE may be int to convert '
'the value to an int'))
+
+ group = parser.add_argument_group('Test Re-run Options')
+ group.add_argument(
+ '--rerun-all-issues',
+ action='store_true',
+ help=('Re-run all issues that occurred during the test run '
+ 'irrespective of the test method\'s marking as flakey. '
+ 'Default behavior is to apply re-runs only to flakey '
+ 'tests that generate issues.'))
# Remove the reference to our helper function
del X
diff --git a/lldb/packages/Python/lldbsuite/test/issue_verification/TestSignalOutsideTestMethod.py.park b/lldb/packages/Python/lldbsuite/test/issue_verification/TestSignalOutsideTestMethod.py.park
new file mode 100644
index 00000000000..7a5b2ba99f4
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/test/issue_verification/TestSignalOutsideTestMethod.py.park
@@ -0,0 +1,24 @@
+"""Tests that an exceptional exit is detected by the testbot."""
+
+from __future__ import print_function
+
+import atexit
+import os
+import signal
+import time
+
+import lldbsuite.test.lldbtest as lldbtest
+
+
+class ExceptionalExitOutOfTestMethodTestCase(lldbtest.TestBase):
+ """Forces exceptional exit."""
+ mydir = lldbtest.TestBase.compute_mydir(__file__)
+
+ @lldbtest.skipIfWindows
+ def test_buildbot_catches_exceptional_exit(self):
+ pass
+
+def cleanup():
+ os.kill(os.getpid(), signal.SIGKILL)
+
+atexit.register(cleanup)
diff --git a/lldb/packages/Python/lldbsuite/test/result_formatter.py b/lldb/packages/Python/lldbsuite/test/result_formatter.py
index cf0a6cf6b31..ff77c1411ba 100644
--- a/lldb/packages/Python/lldbsuite/test/result_formatter.py
+++ b/lldb/packages/Python/lldbsuite/test/result_formatter.py
@@ -28,6 +28,7 @@ import six
from six.moves import cPickle
# LLDB modules
+from . import configuration
# Ignore method count on DTOs.
@@ -165,7 +166,8 @@ class EventBuilder(object):
RESULT_TYPES = set([
TYPE_JOB_RESULT,
- TYPE_TEST_RESULT])
+ TYPE_TEST_RESULT
+ ])
# Test/Job Status Tags
STATUS_EXCEPTIONAL_EXIT = "exceptional_exit"
@@ -178,6 +180,16 @@ class EventBuilder(object):
STATUS_ERROR = "error"
STATUS_TIMEOUT = "timeout"
+ """Test methods or jobs with a status matching any of these
+ status values will cause a testrun failure, unless
+ the test methods rerun and do not trigger an issue when rerun."""
+ TESTRUN_ERROR_STATUS_VALUES = set([
+ STATUS_ERROR,
+ STATUS_EXCEPTIONAL_EXIT,
+ STATUS_FAILURE,
+ STATUS_TIMEOUT
+ ])
+
@staticmethod
def _get_test_name_info(test):
"""Returns (test-class-name, test-method-name) from a test case instance.
@@ -655,6 +667,61 @@ class ResultsFormatter(object):
# timeout test status for this.
self.expected_timeouts_by_basename = set()
+ # Keep track of rerun-eligible tests.
+ # This is a set that contains tests saved as:
+ # {test_filename}:{test_class}:{test_name}
+ self.rerun_eligible_tests = set()
+
+ # A dictionary of test files that had a failing
+ # test, in the format of:
+ # key = test path, value = array of test methods that need rerun
+ self.tests_for_rerun = {}
+
+ @classmethod
+ def _make_rerun_eligibility_key(cls, test_result_event):
+ if test_result_event is None:
+ return None
+ component_count = 0
+ if "test_filename" in test_result_event:
+ key = test_result_event["test_filename"]
+ component_count += 1
+ if "test_class" in test_result_event:
+ if component_count > 0:
+ key += "."
+ key += test_result_event["test_class"]
+ component_count += 1
+ if "test_name" in test_result_event:
+ if component_count > 0:
+ key += "."
+ key += test_result_event["test_name"]
+ component_count += 1
+ return key
+
+ def _mark_test_for_rerun_eligibility(self, test_result_event):
+ key = self._make_rerun_eligibility_key(test_result_event)
+ if key is not None:
+ self.rerun_eligible_tests.add(key)
+ else:
+ sys.stderr.write(
+ "\nerror: test marked for re-run eligibility but "
+ "failed to create key.\n")
+
+ def _maybe_add_test_to_rerun_list(self, result_event):
+ key = self._make_rerun_eligibility_key(result_event)
+ if key is not None:
+ if key in self.rerun_eligible_tests or configuration.rerun_all_issues:
+ test_filename = result_event.get("test_filename", None)
+ if test_filename is not None:
+ test_name = result_event.get("test_name", None)
+ if not test_filename in self.tests_for_rerun:
+ self.tests_for_rerun[test_filename] = []
+ if test_name is not None:
+ self.tests_for_rerun[test_filename].append(test_name)
+ else:
+ sys.stderr.write(
+ "\nerror: couldn't add testrun-failing test to rerun "
+ "list because no eligibility key could be created.\n")
+
def _maybe_remap_job_result_event(self, test_event):
"""Remaps timeout/exceptional exit job results to last test method running.
@@ -684,13 +751,6 @@ class ResultsFormatter(object):
if start_key not in test_event:
test_event[start_key] = start_value
- # Always take the value of test_filename from test_start,
- # as it was gathered by class introspections. Job status
- # has less refined info available to it, so might be missing
- # path info.
- if "test_filename" in test_start:
- test_event["test_filename"] = test_start["test_filename"]
-
def _maybe_remap_expected_timeout(self, event):
if event is None:
return
@@ -749,12 +809,21 @@ class ResultsFormatter(object):
worker_index = test_event.get("worker_index", None)
if worker_index is not None:
self.started_tests_by_worker.pop(worker_index, None)
+
+ if status in EventBuilder.TESTRUN_ERROR_STATUS_VALUES:
+ # A test/job status value in any of those status values
+ # causes a testrun failure. If such a test fails, check
+ # whether it can be rerun. If it can be rerun, add it
+ # to the rerun job.
+ self._maybe_add_test_to_rerun_list(test_event)
elif event_type == EventBuilder.TYPE_TEST_START:
# Keep track of the most recent test start event
# for the related worker.
worker_index = test_event.get("worker_index", None)
if worker_index is not None:
self.started_tests_by_worker[worker_index] = test_event
+ elif event_type == EventBuilder.TYPE_MARK_TEST_RERUN_ELIGIBLE:
+ self._mark_test_for_rerun_eligibility(test_event)
def set_expected_timeouts_by_basename(self, basenames):
"""Specifies a list of test file basenames that are allowed to timeout
OpenPOWER on IntegriCloud