summaryrefslogtreecommitdiffstats
path: root/poky/meta/lib/oeqa/core/runner.py
diff options
context:
space:
mode:
Diffstat (limited to 'poky/meta/lib/oeqa/core/runner.py')
-rw-r--r--poky/meta/lib/oeqa/core/runner.py186
1 files changed, 96 insertions, 90 deletions
diff --git a/poky/meta/lib/oeqa/core/runner.py b/poky/meta/lib/oeqa/core/runner.py
index 13cdf5ba5..f8bb23f34 100644
--- a/poky/meta/lib/oeqa/core/runner.py
+++ b/poky/meta/lib/oeqa/core/runner.py
@@ -6,17 +6,10 @@ import time
import unittest
import logging
import re
+import json
-xmlEnabled = False
-try:
- import xmlrunner
- from xmlrunner.result import _XMLTestResult as _TestResult
- from xmlrunner.runner import XMLTestRunner as _TestRunner
- xmlEnabled = True
-except ImportError:
- # use the base runner instead
- from unittest import TextTestResult as _TestResult
- from unittest import TextTestRunner as _TestRunner
+from unittest import TextTestResult as _TestResult
+from unittest import TextTestRunner as _TestRunner
class OEStreamLogger(object):
def __init__(self, logger):
@@ -42,8 +35,12 @@ class OETestResult(_TestResult):
def __init__(self, tc, *args, **kwargs):
super(OETestResult, self).__init__(*args, **kwargs)
+ self.successes = []
+
+ # Inject into tc so that TestDepends decorator can see results
+ tc.results = self
+
self.tc = tc
- self._tc_map_results()
def startTest(self, test):
# Allow us to trigger the testcase buffer mode on a per test basis
@@ -53,12 +50,6 @@ class OETestResult(_TestResult):
self.buffer = test.buffer
super(OETestResult, self).startTest(test)
- def _tc_map_results(self):
- self.tc._results['failures'] = self.failures
- self.tc._results['errors'] = self.errors
- self.tc._results['skipped'] = self.skipped
- self.tc._results['expectedFailures'] = self.expectedFailures
-
def logSummary(self, component, context_msg=''):
elapsed_time = self.tc._run_end_time - self.tc._run_start_time
self.tc.logger.info("SUMMARY:")
@@ -70,67 +61,60 @@ class OETestResult(_TestResult):
msg = "%s - OK - All required tests passed" % component
else:
msg = "%s - FAIL - Required tests failed" % component
- skipped = len(self.tc._results['skipped'])
- if skipped:
- msg += " (skipped=%d)" % skipped
+ msg += " (successes=%d, skipped=%d, failures=%d, errors=%d)" % (len(self.successes), len(self.skipped), len(self.failures), len(self.errors))
self.tc.logger.info(msg)
- def _getDetailsNotPassed(self, case, type, desc):
- found = False
+ def _getTestResultDetails(self, case):
+ result_types = {'failures': 'FAILED', 'errors': 'ERROR', 'skipped': 'SKIPPED',
+ 'expectedFailures': 'EXPECTEDFAIL', 'successes': 'PASSED'}
- for (scase, msg) in self.tc._results[type]:
- # XXX: When XML reporting is enabled scase is
- # xmlrunner.result._TestInfo instance instead of
- # string.
- if xmlEnabled:
- if case.id() == scase.test_id:
- found = True
- break
- scase_str = scase.test_id
- else:
- if case == scase:
+ for rtype in result_types:
+ found = False
+ for (scase, msg) in getattr(self, rtype):
+ if case.id() == scase.id():
found = True
break
- scase_str = str(scase)
+ scase_str = str(scase.id())
- # When fails at module or class level the class name is passed as string
- # so figure out to see if match
- m = re.search("^setUpModule \((?P<module_name>.*)\)$", scase_str)
- if m:
- if case.__class__.__module__ == m.group('module_name'):
- found = True
- break
+ # When fails at module or class level the class name is passed as string
+ # so figure out to see if match
+ m = re.search("^setUpModule \((?P<module_name>.*)\)$", scase_str)
+ if m:
+ if case.__class__.__module__ == m.group('module_name'):
+ found = True
+ break
- m = re.search("^setUpClass \((?P<class_name>.*)\)$", scase_str)
- if m:
- class_name = "%s.%s" % (case.__class__.__module__,
- case.__class__.__name__)
+ m = re.search("^setUpClass \((?P<class_name>.*)\)$", scase_str)
+ if m:
+ class_name = "%s.%s" % (case.__class__.__module__,
+ case.__class__.__name__)
- if class_name == m.group('class_name'):
- found = True
- break
+ if class_name == m.group('class_name'):
+ found = True
+ break
+
+ if found:
+ return result_types[rtype], msg
- if found:
- return (found, msg)
+ return 'UNKNOWN', None
- return (found, None)
+ def addSuccess(self, test):
+ #Added so we can keep track of successes too
+ self.successes.append((test, None))
+ super(OETestResult, self).addSuccess(test)
- def logDetails(self):
+ def logDetails(self, json_file_dir=None, configuration=None, result_id=None):
self.tc.logger.info("RESULTS:")
+
+ result = {}
+ logs = {}
+ if hasattr(self.tc, "extraresults"):
+ result = self.tc.extraresults
+
for case_name in self.tc._registry['cases']:
case = self.tc._registry['cases'][case_name]
- result_types = ['failures', 'errors', 'skipped', 'expectedFailures']
- result_desc = ['FAILED', 'ERROR', 'SKIPPED', 'EXPECTEDFAIL']
-
- fail = False
- desc = None
- for idx, name in enumerate(result_types):
- (fail, msg) = self._getDetailsNotPassed(case, result_types[idx],
- result_desc[idx])
- if fail:
- desc = result_desc[idx]
- break
+ (status, log) = self._getTestResultDetails(case)
oeid = -1
if hasattr(case, 'decorators'):
@@ -138,12 +122,27 @@ class OETestResult(_TestResult):
if hasattr(d, 'oeid'):
oeid = d.oeid
- if fail:
- self.tc.logger.info("RESULTS - %s - Testcase %s: %s" % (case.id(),
- oeid, desc))
+ if status not in logs:
+ logs[status] = []
+ logs[status].append("RESULTS - %s - Testcase %s: %s" % (case.id(), oeid, status))
+ if log:
+ result[case.id()] = {'status': status, 'log': log}
else:
- self.tc.logger.info("RESULTS - %s - Testcase %s: %s" % (case.id(),
- oeid, 'PASSED'))
+ result[case.id()] = {'status': status}
+
+ for i in ['PASSED', 'SKIPPED', 'EXPECTEDFAIL', 'ERROR', 'FAILED', 'UNKNOWN']:
+ if i not in logs:
+ continue
+ for l in logs[i]:
+ self.tc.logger.info(l)
+
+ if json_file_dir:
+ tresultjsonhelper = OETestResultJSONHelper()
+ tresultjsonhelper.dump_testresult_file(json_file_dir, configuration, result_id, result)
+
+ def wasSuccessful(self):
+ # Override as we unexpected successes aren't failures for us
+ return (len(self.failures) == len(self.errors) == 0)
class OEListTestsResult(object):
def wasSuccessful(self):
@@ -153,33 +152,14 @@ class OETestRunner(_TestRunner):
streamLoggerClass = OEStreamLogger
def __init__(self, tc, *args, **kwargs):
- if xmlEnabled:
- if not kwargs.get('output'):
- kwargs['output'] = os.path.join(os.getcwd(),
- 'TestResults_%s_%s' % (time.strftime("%Y%m%d%H%M%S"), os.getpid()))
-
kwargs['stream'] = self.streamLoggerClass(tc.logger)
super(OETestRunner, self).__init__(*args, **kwargs)
self.tc = tc
self.resultclass = OETestResult
- # XXX: The unittest-xml-reporting package defines _make_result method instead
- # of _makeResult standard on unittest.
- if xmlEnabled:
- def _make_result(self):
- """
- Creates a TestResult object which will be used to store
- information about the executed tests.
- """
- # override in subclasses if necessary.
- return self.resultclass(self.tc,
- self.stream, self.descriptions, self.verbosity, self.elapsed_times
- )
- else:
- def _makeResult(self):
- return self.resultclass(self.tc, self.stream, self.descriptions,
- self.verbosity)
-
+ def _makeResult(self):
+ return self.resultclass(self.tc, self.stream, self.descriptions,
+ self.verbosity)
def _walk_suite(self, suite, func):
for obj in suite:
@@ -275,3 +255,29 @@ class OETestRunner(_TestRunner):
self._list_tests_module(suite)
return OEListTestsResult()
+
+class OETestResultJSONHelper(object):
+
+ testresult_filename = 'testresults.json'
+
+ def _get_existing_testresults_if_available(self, write_dir):
+ testresults = {}
+ file = os.path.join(write_dir, self.testresult_filename)
+ if os.path.exists(file):
+ with open(file, "r") as f:
+ testresults = json.load(f)
+ return testresults
+
+ def _write_file(self, write_dir, file_name, file_content):
+ file_path = os.path.join(write_dir, file_name)
+ with open(file_path, 'w') as the_file:
+ the_file.write(file_content)
+
+ def dump_testresult_file(self, write_dir, configuration, result_id, test_result):
+ bb.utils.mkdirhier(write_dir)
+ lf = bb.utils.lockfile(os.path.join(write_dir, 'jsontestresult.lock'))
+ test_results = self._get_existing_testresults_if_available(write_dir)
+ test_results[result_id] = {'configuration': configuration, 'result': test_result}
+ json_testresults = json.dumps(test_results, sort_keys=True, indent=4)
+ self._write_file(write_dir, self.testresult_filename, json_testresults)
+ bb.utils.unlockfile(lf)
OpenPOWER on IntegriCloud