summaryrefslogtreecommitdiffstats
path: root/lldb/packages/Python/lldbsuite/test
diff options
context:
space:
mode:
Diffstat (limited to 'lldb/packages/Python/lldbsuite/test')
-rw-r--r--lldb/packages/Python/lldbsuite/test/basic_results_formatter.py259
-rw-r--r--lldb/packages/Python/lldbsuite/test/curses_results.py72
-rw-r--r--lldb/packages/Python/lldbsuite/test/dosep.py92
-rw-r--r--lldb/packages/Python/lldbsuite/test/test_results.py97
4 files changed, 432 insertions, 88 deletions
diff --git a/lldb/packages/Python/lldbsuite/test/basic_results_formatter.py b/lldb/packages/Python/lldbsuite/test/basic_results_formatter.py
new file mode 100644
index 00000000000..8571e601710
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/test/basic_results_formatter.py
@@ -0,0 +1,259 @@
+"""
+ The LLVM Compiler Infrastructure
+
+This file is distributed under the University of Illinois Open Source
+License. See LICENSE.TXT for details.
+
+Provides basic test result output. This is intended to be suitable for
+normal LLDB test run output when no other option is specified.
+"""
+from __future__ import print_function
+
+from . import test_results
+
+
+class BasicResultsFormatter(test_results.ResultsFormatter):
+ """Provides basic test result output."""
+ @classmethod
+ def arg_parser(cls):
+ """@return arg parser used to parse formatter-specific options."""
+ parser = super(BasicResultsFormatter, cls).arg_parser()
+
+ parser.add_argument(
+ "--assert-on-unknown-events",
+ action="store_true",
+ help=('cause unknown test events to generate '
+ 'a python assert. Default is to ignore.'))
+ return parser
+
+ def __init__(self, out_file, options):
+ """Initializes the BasicResultsFormatter instance.
+ @param out_file file-like object where formatted output is written.
+ @param options_dict specifies a dictionary of options for the
+ formatter.
+ """
+ # Initialize the parent
+ super(BasicResultsFormatter, self).__init__(out_file, options)
+
+ # self.result_event will store the most current result_event
+ # by test method
+ self.result_events = {}
+ self.test_method_rerun_count = 0
+
+ def handle_event(self, test_event):
+ super(BasicResultsFormatter, self).handle_event(test_event)
+ if test_event is None:
+ return
+
+ event_type = test_event["event"]
+ if event_type is None:
+ return
+
+ if event_type == "terminate":
+ self._finish_output()
+ elif event_type == "test_start":
+ self.track_start_time(
+ test_event["test_class"],
+ test_event["test_name"],
+ test_event["event_time"])
+ elif event_type == "test_result":
+ # Build the test key.
+ test_key = test_event.get("test_filename", None)
+ if test_key is None:
+ raise Exception(
+ "failed to find test filename for test event {}".format(
+ test_event))
+ test_key += ".{}.{}".format(
+ test_event.get("test_class", ""),
+ test_event.get("test_name", ""))
+
+ # Save the most recent test event for the test key.
+ # This allows a second test phase to overwrite the most
+ # recent result for the test key (unique per method).
+ # We do final reporting at the end, so we'll report based
+ # on final results.
+ # We do this so that a re-run caused by, perhaps, the need
+ # to run a low-load, single-worker test run can have the final
+ # run's results to always be used.
+ if test_key in self.result_events:
+ # We are replacing the result of something that was
+ # already counted by the base class. Remove the double
+ # counting by reducing by one the count for the test
+ # result status.
+ old_status = self.result_events[test_key]["status"]
+ self.result_status_counts[old_status] -= 1
+
+ self.test_method_rerun_count += 1
+ if self.options.warn_on_multiple_results:
+ print(
+ "WARNING: test key {} already has a result: "
+ "old:{} new:{}",
+ self.result_events[test_key],
+ test_event)
+ self.result_events[test_key] = test_event
+ else:
+ # This is an unknown event.
+ if self.options.assert_on_unknown_events:
+ raise Exception("unknown event type {} from {}\n".format(
+ event_type, test_event))
+
+ def _partition_results_by_status(self, categories):
+ """Partitions the captured test results by event status.
+
+ This permits processing test results by the category ids.
+
+ @param categories the list of categories on which to partition.
+ Follows the format described in _report_category_details().
+
+ @return a dictionary where each key is the test result status,
+ and each entry is a list containing all the test result events
+ that matched that test result status. Result status IDs with
+ no matching entries will have a zero-length list.
+ """
+ partitioned_events = {}
+ for category in categories:
+ result_status_id = category[0]
+ matching_events = [
+ [key, event] for (key, event) in self.result_events.items()
+ if event.get("status", "") == result_status_id]
+ partitioned_events[result_status_id] = sorted(
+ matching_events,
+ key=lambda x: x[1]["test_name"])
+ return partitioned_events
+
+ def _print_summary_counts(self, categories, result_events_by_status):
+ """Prints summary counts for all categories.
+
+ @param categories the list of categories on which to partition.
+ Follows the format described in _report_category_details().
+
+ @param result_events_by_status the partitioned list of test
+ result events in a dictionary, with the key set to the test
+ result status id and the value set to the list of test method
+ results that match the status id.
+ """
+
+ # Get max length for category printed name
+ category_with_max_printed_name = max(
+ categories, key=lambda x: len(x[1]))
+ max_category_name_length = len(category_with_max_printed_name[1])
+
+ banner_text = "Test Result Summary"
+ banner_separator = "".ljust(len(banner_text), "=")
+
+ self.out_file.write("\n{}\n{}\n{}\n".format(
+ banner_separator,
+ banner_text,
+ banner_separator))
+
+ for category in categories:
+ result_status_id = category[0]
+ result_label = "{}:".format(category[1]).ljust(
+ max_category_name_length + 1)
+ count = len(result_events_by_status[result_status_id])
+ self.out_file.write("{} {:4}\n".format(
+ result_label,
+ count))
+
+ @classmethod
+ def _has_printable_details(cls, categories, result_events_by_status):
+ """Returns whether there are any test result details that need to be printed.
+
+ This will spin through the results and see if any result in a category
+ that is printable has any results to print.
+
+ @param categories the list of categories on which to partition.
+ Follows the format described in _report_category_details().
+
+ @param result_events_by_status the partitioned list of test
+ result events in a dictionary, with the key set to the test
+ result status id and the value set to the list of test method
+ results that match the status id.
+
+ @return True if there are any details (i.e. test results
+ for failures, errors, unexpected successes); False otherwise.
+ """
+ for category in categories:
+ result_status_id = category[0]
+ print_matching_tests = category[2]
+ if print_matching_tests:
+ if len(result_events_by_status[result_status_id]) > 0:
+ # We found a printable details test result status
+ # that has details to print.
+ return True
+ # We didn't find any test result category with printable
+ # details.
+ return False
+
+ def _report_category_details(self, category, result_events_by_status):
+ """Reports all test results matching the given category spec.
+
+ @param category a category spec of the format [test_event_name,
+ printed_category_name, print_matching_entries?]
+
+ @param result_events_by_status the partitioned list of test
+ result events in a dictionary, with the key set to the test
+ result status id and the value set to the list of test method
+ results that match the status id.
+ """
+ result_status_id = category[0]
+ print_matching_tests = category[2]
+ detail_label = category[3]
+
+ if print_matching_tests:
+ # Sort by test name
+ for (_, event) in result_events_by_status[result_status_id]:
+ self.out_file.write("{}: {}.{} ({})\n".format(
+ detail_label,
+ event["test_class"],
+ event["test_name"],
+ event["test_filename"]))
+
+ def _finish_output_no_lock(self):
+ """Writes the test result report to the output file."""
+ self.out_file.write("\nTest Results\n")
+ self.out_file.write(
+ "Total Test Methods Run (excluding reruns): {}\n".format(
+ len(self.result_events)))
+ self.out_file.write("Test Method rerun count: {}\n".format(
+ self.test_method_rerun_count))
+
+ # Output each of the test result entries.
+ categories = [
+ # result id, printed name, print matching tests?, detail label
+ [test_results.EventBuilder.STATUS_SUCCESS,
+ "Success", False, None],
+ [test_results.EventBuilder.STATUS_EXPECTED_FAILURE,
+ "Expected Failure", False, None],
+ [test_results.EventBuilder.STATUS_FAILURE,
+ "Failure", True, "FAIL"],
+ [test_results.EventBuilder.STATUS_ERROR, "Error", True, "ERROR"],
+ [test_results.EventBuilder.STATUS_UNEXPECTED_SUCCESS,
+ "Unexpected Success", True, "UNEXPECTED SUCCESS"],
+ [test_results.EventBuilder.STATUS_SKIP, "Skip", False, None]]
+
+ # Partition all the events by test result status
+ result_events_by_status = self._partition_results_by_status(
+ categories)
+
+ # Print the summary
+ self._print_summary_counts(categories, result_events_by_status)
+
+ # Print the details
+ have_details = self._has_printable_details(
+ categories, result_events_by_status)
+ if have_details:
+ self.out_file.write("\nDetails:\n")
+ for category in categories:
+ self._report_category_details(
+ category, result_events_by_status)
+
+ def _finish_output(self):
+ """Prepare and write the results report as all incoming events have
+ arrived.
+ """
+ with self.lock:
+ self._finish_output_no_lock()
+
+ def replaces_summary(self):
+ return True
diff --git a/lldb/packages/Python/lldbsuite/test/curses_results.py b/lldb/packages/Python/lldbsuite/test/curses_results.py
index 157bae4156b..a1dee1d8f51 100644
--- a/lldb/packages/Python/lldbsuite/test/curses_results.py
+++ b/lldb/packages/Python/lldbsuite/test/curses_results.py
@@ -24,6 +24,8 @@ import time
# LLDB modules
from . import lldbcurses
from . import test_results
+from .test_results import EventBuilder
+
class Curses(test_results.ResultsFormatter):
"""Receives live results from tests that are running and reports them to the terminal in a curses GUI"""
@@ -38,7 +40,7 @@ class Curses(test_results.ResultsFormatter):
self.job_tests = [None] * 64
self.results = list()
try:
- self.main_window = lldbcurses.intialize_curses()
+ self.main_window = lldbcurses.intialize_curses()
self.main_window.add_key_action('\t', self.main_window.select_next_first_responder, "Switch between views that can respond to keyboard input")
self.main_window.refresh()
self.job_panel = None
@@ -53,26 +55,25 @@ class Curses(test_results.ResultsFormatter):
self.using_terminal = False
print("Unexpected error:", sys.exc_info()[0])
raise
-
-
+
self.line_dict = dict()
- #self.events_file = open("/tmp/events.txt", "w")
+ # self.events_file = open("/tmp/events.txt", "w")
# self.formatters = list()
# if tee_results_formatter:
# self.formatters.append(tee_results_formatter)
def status_to_short_str(self, status):
- if status == 'success':
+ if status == EventBuilder.STATUS_SUCCESS:
return '.'
- elif status == 'failure':
+ elif status == EventBuilder.STATUS_FAILURE:
return 'F'
- elif status == 'unexpected_success':
+ elif status == EventBuilder.STATUS_UNEXPECTED_SUCCESS:
return '?'
- elif status == 'expected_failure':
+ elif status == EventBuilder.STATUS_EXPECTED_FAILURE:
return 'X'
- elif status == 'skip':
+ elif status == EventBuilder.STATUS_SKIP:
return 'S'
- elif status == 'error':
+ elif status == EventBuilder.STATUS_ERROR:
return 'E'
else:
return status
@@ -88,7 +89,7 @@ class Curses(test_results.ResultsFormatter):
self.info_panel.top()
else:
self.info_panel.show()
-
+
self.main_window.push_first_responder(self.info_panel)
test_start = self.results[selected_idx][0]
test_result = self.results[selected_idx][1]
@@ -99,9 +100,9 @@ class Curses(test_results.ResultsFormatter):
def hide_info_panel(self):
self.main_window.pop_first_responder(self.info_panel)
- self.info_panel.hide()
+ self.info_panel.hide()
self.main_window.refresh()
-
+
def toggle_status(self, status):
if status:
# Toggle showing and hiding results whose status matches "status" in "Results" window
@@ -123,8 +124,8 @@ class Curses(test_results.ResultsFormatter):
name = test_result['test_class'] + '.' + test_result['test_name']
self.results_panel.append_line('%s (%6.2f sec) %s' % (self.status_to_short_str(status), test_result['elapsed_time'], name))
if update:
- self.main_window.refresh()
-
+ self.main_window.refresh()
+
def handle_event(self, test_event):
with self.lock:
super(Curses, self).handle_event(test_event)
@@ -137,7 +138,7 @@ class Curses(test_results.ResultsFormatter):
if 'event' in test_event:
check_for_one_key = True
#print(str(test_event), file=self.events_file)
- event = test_event['event']
+ event = test_event['event']
if self.status_panel:
self.status_panel.update_status('time', str(datetime.timedelta(seconds=math.floor(time.time() - self.start_time))))
if event == 'test_start':
@@ -176,7 +177,7 @@ class Curses(test_results.ResultsFormatter):
elif event == 'job_end':
self.jobs[worker_index] = ''
self.job_panel.set_line(worker_index, '')
- elif event == 'initialize':
+ elif event == 'initialize':
self.initialize_event = test_event
num_jobs = test_event['worker_count']
job_frame = self.main_window.get_contained_rect(height=num_jobs+2)
@@ -184,41 +185,40 @@ class Curses(test_results.ResultsFormatter):
status_frame = self.main_window.get_contained_rect(height=1, top_inset=self.main_window.get_size().h-1)
self.job_panel = lldbcurses.BoxedPanel(frame=job_frame, title="Jobs")
self.results_panel = lldbcurses.BoxedPanel(frame=results_frame, title="Results")
-
+
self.results_panel.add_key_action(curses.KEY_UP, self.results_panel.select_prev , "Select the previous list entry")
self.results_panel.add_key_action(curses.KEY_DOWN, self.results_panel.select_next , "Select the next list entry")
self.results_panel.add_key_action(curses.KEY_HOME, self.results_panel.scroll_begin , "Scroll to the start of the list")
self.results_panel.add_key_action(curses.KEY_END, self.results_panel.scroll_end , "Scroll to the end of the list")
self.results_panel.add_key_action(curses.KEY_ENTER, self.show_info_panel , "Display info for the selected result item")
- self.results_panel.add_key_action('.', lambda : self.toggle_status('success') , "Toggle showing/hiding tests whose status is 'success'")
- self.results_panel.add_key_action('e', lambda : self.toggle_status('error') , "Toggle showing/hiding tests whose status is 'error'")
- self.results_panel.add_key_action('f', lambda : self.toggle_status('failure') , "Toggle showing/hiding tests whose status is 'failure'")
- self.results_panel.add_key_action('s', lambda : self.toggle_status('skip') , "Toggle showing/hiding tests whose status is 'skip'")
- self.results_panel.add_key_action('x', lambda : self.toggle_status('expected_failure') , "Toggle showing/hiding tests whose status is 'expected_failure'")
- self.results_panel.add_key_action('?', lambda : self.toggle_status('unexpected_success'), "Toggle showing/hiding tests whose status is 'unexpected_success'")
+ self.results_panel.add_key_action('.', lambda : self.toggle_status(EventBuilder.STATUS_SUCCESS) , "Toggle showing/hiding tests whose status is 'success'")
+ self.results_panel.add_key_action('e', lambda : self.toggle_status(EventBuilder.STATUS_ERROR) , "Toggle showing/hiding tests whose status is 'error'")
+ self.results_panel.add_key_action('f', lambda : self.toggle_status(EventBuilder.STATUS_FAILURE) , "Toggle showing/hiding tests whose status is 'failure'")
+ self.results_panel.add_key_action('s', lambda : self.toggle_status(EventBuilder.STATUS_SKIP) , "Toggle showing/hiding tests whose status is 'skip'")
+ self.results_panel.add_key_action('x', lambda : self.toggle_status(EventBuilder.STATUS_EXPECTED_FAILURE) , "Toggle showing/hiding tests whose status is 'expected_failure'")
+ self.results_panel.add_key_action('?', lambda : self.toggle_status(EventBuilder.STATUS_UNEXPECTED_SUCCESS), "Toggle showing/hiding tests whose status is 'unexpected_success'")
self.status_panel = lldbcurses.StatusPanel(frame=status_frame)
-
+
self.main_window.add_child(self.job_panel)
self.main_window.add_child(self.results_panel)
self.main_window.add_child(self.status_panel)
self.main_window.set_first_responder(self.results_panel)
-
+
self.status_panel.add_status_item(name="time", title="Elapsed", format="%s", width=20, value="0:00:00", update=False)
- self.status_panel.add_status_item(name="success", title="Success", format="%u", width=20, value=0, update=False)
- self.status_panel.add_status_item(name="failure", title="Failure", format="%u", width=20, value=0, update=False)
- self.status_panel.add_status_item(name="error", title="Error", format="%u", width=20, value=0, update=False)
- self.status_panel.add_status_item(name="skip", title="Skipped", format="%u", width=20, value=0, update=True)
- self.status_panel.add_status_item(name="expected_failure", title="Expected Failure", format="%u", width=30, value=0, update=False)
- self.status_panel.add_status_item(name="unexpected_success", title="Unexpected Success", format="%u", width=30, value=0, update=False)
+ self.status_panel.add_status_item(name=EventBuilder.STATUS_SUCCESS, title="Success", format="%u", width=20, value=0, update=False)
+ self.status_panel.add_status_item(name=EventBuilder.STATUS_FAILURE, title="Failure", format="%u", width=20, value=0, update=False)
+ self.status_panel.add_status_item(name=EventBuilder.STATUS_ERROR, title="Error", format="%u", width=20, value=0, update=False)
+ self.status_panel.add_status_item(name=EventBuilder.STATUS_SKIP, title="Skipped", format="%u", width=20, value=0, update=True)
+ self.status_panel.add_status_item(name=EventBuilder.STATUS_EXPECTED_FAILURE, title="Expected Failure", format="%u", width=30, value=0, update=False)
+ self.status_panel.add_status_item(name=EventBuilder.STATUS_UNEXPECTED_SUCCESS, title="Unexpected Success", format="%u", width=30, value=0, update=False)
self.main_window.refresh()
elif event == 'terminate':
#self.main_window.key_event_loop()
lldbcurses.terminate_curses()
check_for_one_key = False
self.using_terminal = False
- # Check for 1 keypress with no delay
-
+ # Check for 1 keypress with no delay
+
# Check for 1 keypress with no delay
if check_for_one_key:
- self.main_window.key_event_loop(0, 1)
-
+ self.main_window.key_event_loop(0, 1)
diff --git a/lldb/packages/Python/lldbsuite/test/dosep.py b/lldb/packages/Python/lldbsuite/test/dosep.py
index b4d2208845a..c98136f3634 100644
--- a/lldb/packages/Python/lldbsuite/test/dosep.py
+++ b/lldb/packages/Python/lldbsuite/test/dosep.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
"""
Run the test suite using a separate process for each test file.
@@ -55,6 +53,7 @@ import lldbsuite.support.seven as seven
from . import dotest_channels
from . import dotest_args
+from . import test_results
# Todo: Convert this folder layout to be relative-import friendly and don't hack up
# sys.path like this
@@ -1406,33 +1405,68 @@ def main(print_details_on_success, num_threads, test_subdir,
test_name = os.path.splitext(xtime)[0]
touch(os.path.join(session_dir, "{}-{}".format(result, test_name)))
- print()
- sys.stdout.write("Ran %d test suites" % num_test_files)
- if num_test_files > 0:
- sys.stdout.write(" (%d failed) (%f%%)" % (
- len(failed), 100.0 * len(failed) / num_test_files))
- print()
- sys.stdout.write("Ran %d test cases" % num_test_cases)
- if num_test_cases > 0:
- sys.stdout.write(" (%d failed) (%f%%)" % (
- fail_count, 100.0 * fail_count / num_test_cases))
- print()
- exit_code = 0
-
- if len(failed) > 0:
- failed.sort()
- print("Failing Tests (%d)" % len(failed))
- for f in failed:
- print("%s: LLDB (suite) :: %s (%s)" % (
- "TIMEOUT" if f in timed_out else "FAIL", f, system_info
- ))
- exit_code = 1
-
- if len(unexpected_successes) > 0:
- unexpected_successes.sort()
- print("\nUnexpected Successes (%d)" % len(unexpected_successes))
- for u in unexpected_successes:
- print("UNEXPECTED SUCCESS: LLDB (suite) :: %s (%s)" % (u, system_info))
+ # Only run the old summary logic if we don't have a results formatter
+ # that already prints the summary.
+ if results_formatter is None or not results_formatter.replaces_summary():
+ print_legacy_summary = True
+ else:
+ print_legacy_summary = False
+
+ if not print_legacy_summary:
+ # Remove this timeout handling once
+ # https://llvm.org/bugs/show_bug.cgi?id=25703
+ # is addressed.
+ #
+ # Use non-event-based structures to count timeouts.
+ timeout_count = len(timed_out)
+ if timeout_count > 0:
+ failed.sort()
+ print("Timed out test files: {}".format(len(timed_out)))
+ for f in failed:
+ if f in timed_out:
+ print("TIMEOUT: %s (%s)" % (f, system_info))
+
+ # Figure out exit code by count of test result types.
+ issue_count = (
+ results_formatter.counts_by_test_result_status(
+ test_results.EventBuilder.STATUS_ERROR) +
+ results_formatter.counts_by_test_result_status(
+ test_results.EventBuilder.STATUS_FAILURE) +
+ timeout_count)
+ # Return with appropriate result code
+ if issue_count > 0:
+ sys.exit(1)
+ else:
+ sys.exit(0)
+ else:
+ # Print the legacy test results summary.
+ print()
+ sys.stdout.write("Ran %d test suites" % num_test_files)
+ if num_test_files > 0:
+ sys.stdout.write(" (%d failed) (%f%%)" % (
+ len(failed), 100.0 * len(failed) / num_test_files))
+ print()
+ sys.stdout.write("Ran %d test cases" % num_test_cases)
+ if num_test_cases > 0:
+ sys.stdout.write(" (%d failed) (%f%%)" % (
+ fail_count, 100.0 * fail_count / num_test_cases))
+ print()
+ exit_code = 0
+
+ if len(failed) > 0:
+ failed.sort()
+ print("Failing Tests (%d)" % len(failed))
+ for f in failed:
+ print("%s: LLDB (suite) :: %s (%s)" % (
+ "TIMEOUT" if f in timed_out else "FAIL", f, system_info
+ ))
+ exit_code = 1
+
+ if len(unexpected_successes) > 0:
+ unexpected_successes.sort()
+ print("\nUnexpected Successes (%d)" % len(unexpected_successes))
+ for u in unexpected_successes:
+ print("UNEXPECTED SUCCESS: LLDB (suite) :: %s (%s)" % (u, system_info))
sys.exit(exit_code)
diff --git a/lldb/packages/Python/lldbsuite/test/test_results.py b/lldb/packages/Python/lldbsuite/test/test_results.py
index 6f9cefe31d6..12fb2e51d77 100644
--- a/lldb/packages/Python/lldbsuite/test/test_results.py
+++ b/lldb/packages/Python/lldbsuite/test/test_results.py
@@ -29,11 +29,20 @@ from six.moves import cPickle
# LLDB modules
+
class EventBuilder(object):
"""Helper class to build test result event dictionaries."""
BASE_DICTIONARY = None
+ # Test Status Tags
+ STATUS_SUCCESS = "success"
+ STATUS_FAILURE = "failure"
+ STATUS_EXPECTED_FAILURE = "expected_failure"
+ STATUS_UNEXPECTED_SUCCESS = "unexpected_success"
+ STATUS_SKIP = "skip"
+ STATUS_ERROR = "error"
+
@staticmethod
def _get_test_name_info(test):
"""Returns (test-class-name, test-method-name) from a test case instance.
@@ -182,7 +191,8 @@ class EventBuilder(object):
@return the event dictionary
"""
- return EventBuilder._event_dictionary_test_result(test, "success")
+ return EventBuilder._event_dictionary_test_result(
+ test, EventBuilder.STATUS_SUCCESS)
@staticmethod
def event_for_unexpected_success(test, bugnumber):
@@ -199,7 +209,7 @@ class EventBuilder(object):
"""
event = EventBuilder._event_dictionary_test_result(
- test, "unexpected_success")
+ test, EventBuilder.STATUS_UNEXPECTED_SUCCESS)
if bugnumber:
event["bugnumber"] = str(bugnumber)
return event
@@ -216,7 +226,7 @@ class EventBuilder(object):
@return the event dictionary
"""
return EventBuilder._event_dictionary_issue(
- test, "failure", error_tuple)
+ test, EventBuilder.STATUS_FAILURE, error_tuple)
@staticmethod
def event_for_expected_failure(test, error_tuple, bugnumber):
@@ -234,7 +244,7 @@ class EventBuilder(object):
"""
event = EventBuilder._event_dictionary_issue(
- test, "expected_failure", error_tuple)
+ test, EventBuilder.STATUS_EXPECTED_FAILURE, error_tuple)
if bugnumber:
event["bugnumber"] = str(bugnumber)
return event
@@ -249,7 +259,8 @@ class EventBuilder(object):
@return the event dictionary
"""
- event = EventBuilder._event_dictionary_test_result(test, "skip")
+ event = EventBuilder._event_dictionary_test_result(
+ test, EventBuilder.STATUS_SKIP)
event["skip_reason"] = reason
return event
@@ -264,7 +275,8 @@ class EventBuilder(object):
@return the event dictionary
"""
- return EventBuilder._event_dictionary_issue(test, "error", error_tuple)
+ return EventBuilder._event_dictionary_issue(
+ test, EventBuilder.STATUS_ERROR, error_tuple)
@staticmethod
def event_for_cleanup_error(test, error_tuple):
@@ -279,7 +291,7 @@ class EventBuilder(object):
@return the event dictionary
"""
event = EventBuilder._event_dictionary_issue(
- test, "error", error_tuple)
+ test, EventBuilder.STATUS_ERROR, error_tuple)
event["issue_phase"] = "cleanup"
return event
@@ -376,7 +388,6 @@ class ResultsFormatter(object):
expectations about when the call should be chained.
"""
-
@classmethod
def arg_parser(cls):
"""@return arg parser used to parse formatter-specific options."""
@@ -396,6 +407,16 @@ class ResultsFormatter(object):
self.start_time_by_test = {}
self.terminate_called = False
+ # Store counts of test_result events by status.
+ self.result_status_counts = {
+ EventBuilder.STATUS_SUCCESS: 0,
+ EventBuilder.STATUS_EXPECTED_FAILURE: 0,
+ EventBuilder.STATUS_SKIP: 0,
+ EventBuilder.STATUS_UNEXPECTED_SUCCESS: 0,
+ EventBuilder.STATUS_FAILURE: 0,
+ EventBuilder.STATUS_ERROR: 0
+ }
+
# Lock that we use while mutating inner state, like the
# total test count and the elements. We minimize how
# long we hold the lock just to keep inner state safe, not
@@ -417,14 +438,19 @@ class ResultsFormatter(object):
# atexit() cleanup can call the "terminate if it hasn't been
# called yet".
if test_event is not None:
- if test_event.get("event", "") == "terminate":
+ event_type = test_event.get("event", "")
+ if event_type == "terminate":
self.terminate_called = True
+ elif event_type == "test_result":
+ # Keep track of event counts per test result status type
+ status = test_event["status"]
+ self.result_status_counts[status] += 1
def track_start_time(self, test_class, test_name, start_time):
- """Tracks the start time of a test so elapsed time can be computed.
+ """tracks the start time of a test so elapsed time can be computed.
- This alleviates the need for test results to be processed serially
- by test. It will save the start time for the test so that
+ this alleviates the need for test results to be processed serially
+ by test. it will save the start time for the test so that
elapsed_time_for_test() can compute the elapsed time properly.
"""
if test_class is None or test_name is None:
@@ -435,9 +461,9 @@ class ResultsFormatter(object):
self.start_time_by_test[test_key] = start_time
def elapsed_time_for_test(self, test_class, test_name, end_time):
- """Returns the elapsed time for a test.
+ """returns the elapsed time for a test.
- This function can only be called once per test and requires that
+ this function can only be called once per test and requires that
the track_start_time() method be called sometime prior to calling
this method.
"""
@@ -454,16 +480,38 @@ class ResultsFormatter(object):
return end_time - start_time
def is_using_terminal(self):
- """Returns True if this results formatter is using the terminal and
+ """returns true if this results formatter is using the terminal and
output should be avoided."""
return self.using_terminal
def send_terminate_as_needed(self):
- """Sends the terminate event if it hasn't been received yet."""
+ """sends the terminate event if it hasn't been received yet."""
if not self.terminate_called:
terminate_event = EventBuilder.bare_event("terminate")
self.handle_event(terminate_event)
+ # Derived classes may require self access
+ # pylint: disable=no-self-use
+ def replaces_summary(self):
+ """Returns whether the results formatter includes a summary
+ suitable to replace the old lldb test run results.
+
+ @return True if the lldb test runner can skip its summary
+ generation when using this results formatter; False otherwise.
+ """
+ return False
+
+ def counts_by_test_result_status(self, status):
+ """Returns number of test method results for the given status.
+
+ @status_result a test result status (e.g. success, fail, skip)
+ as defined by the EventBuilder.STATUS_* class members.
+
+ @return an integer returning the number of test methods matching
+ the given test result status.
+ """
+ return self.result_status_counts[status]
+
class XunitFormatter(ResultsFormatter):
"""Provides xUnit-style formatted output.
@@ -527,7 +575,8 @@ class XunitFormatter(ResultsFormatter):
unicode_content = str_or_unicode.decode('utf-8')
else:
unicode_content = str_or_unicode
- return self.invalid_xml_re.sub(six.u('?'), unicode_content).encode('utf-8')
+ return self.invalid_xml_re.sub(
+ six.u('?'), unicode_content).encode('utf-8')
@classmethod
def arg_parser(cls):
@@ -622,12 +671,14 @@ class XunitFormatter(ResultsFormatter):
}
self.status_handlers = {
- "success": self._handle_success,
- "failure": self._handle_failure,
- "error": self._handle_error,
- "skip": self._handle_skip,
- "expected_failure": self._handle_expected_failure,
- "unexpected_success": self._handle_unexpected_success
+ EventBuilder.STATUS_SUCCESS: self._handle_success,
+ EventBuilder.STATUS_FAILURE: self._handle_failure,
+ EventBuilder.STATUS_ERROR: self._handle_error,
+ EventBuilder.STATUS_SKIP: self._handle_skip,
+ EventBuilder.STATUS_EXPECTED_FAILURE:
+ self._handle_expected_failure,
+ EventBuilder.STATUS_UNEXPECTED_SUCCESS:
+ self._handle_unexpected_success
}
def handle_event(self, test_event):
OpenPOWER on IntegriCloud