summaryrefslogtreecommitdiffstats
path: root/poky/meta/lib/oeqa/core
diff options
context:
space:
mode:
Diffstat (limited to 'poky/meta/lib/oeqa/core')
-rw-r--r--poky/meta/lib/oeqa/core/context.py1
-rw-r--r--poky/meta/lib/oeqa/core/decorator/depends.py9
-rw-r--r--poky/meta/lib/oeqa/core/decorator/oetimeout.py40
-rw-r--r--poky/meta/lib/oeqa/core/loader.py2
-rw-r--r--poky/meta/lib/oeqa/core/runner.py186
-rw-r--r--poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded.py12
-rw-r--r--poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_alone.py8
-rw-r--r--poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_depends.py10
-rw-r--r--poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_module.py12
-rw-r--r--poky/meta/lib/oeqa/core/tests/common.py10
-rwxr-xr-xpoky/meta/lib/oeqa/core/tests/test_data.py4
-rwxr-xr-xpoky/meta/lib/oeqa/core/tests/test_decorators.py12
-rwxr-xr-xpoky/meta/lib/oeqa/core/tests/test_loader.py30
-rw-r--r--poky/meta/lib/oeqa/core/threaded.py275
14 files changed, 111 insertions, 500 deletions
diff --git a/poky/meta/lib/oeqa/core/context.py b/poky/meta/lib/oeqa/core/context.py
index acd547416..ef008454f 100644
--- a/poky/meta/lib/oeqa/core/context.py
+++ b/poky/meta/lib/oeqa/core/context.py
@@ -27,7 +27,6 @@ class OETestContext(object):
self.logger = logger
self._registry = {}
self._registry['cases'] = collections.OrderedDict()
- self._results = {}
def _read_modules_from_manifest(self, manifest):
if not os.path.exists(manifest):
diff --git a/poky/meta/lib/oeqa/core/decorator/depends.py b/poky/meta/lib/oeqa/core/decorator/depends.py
index baa04341c..69c604d8f 100644
--- a/poky/meta/lib/oeqa/core/decorator/depends.py
+++ b/poky/meta/lib/oeqa/core/decorator/depends.py
@@ -3,7 +3,6 @@
from unittest import SkipTest
-from oeqa.core.threaded import OETestRunnerThreaded
from oeqa.core.exception import OEQADependency
from . import OETestDiscover, registerDecorator
@@ -64,16 +63,10 @@ def _order_test_case_by_depends(cases, depends):
return [cases[case_id] for case_id in cases_ordered]
def _skipTestDependency(case, depends):
- if isinstance(case.tc.runner, OETestRunnerThreaded):
- import threading
- results = case.tc._results[threading.get_ident()]
- else:
- results = case.tc._results
-
skipReasons = ['errors', 'failures', 'skipped']
for reason in skipReasons:
- for test, _ in results[reason]:
+ for test, _ in getattr(case.tc.results, reason):
if test.id() in depends:
raise SkipTest("Test case %s depends on %s and was in %s." \
% (case.id(), test.id(), reason))
diff --git a/poky/meta/lib/oeqa/core/decorator/oetimeout.py b/poky/meta/lib/oeqa/core/decorator/oetimeout.py
index f85e7d979..a247583f7 100644
--- a/poky/meta/lib/oeqa/core/decorator/oetimeout.py
+++ b/poky/meta/lib/oeqa/core/decorator/oetimeout.py
@@ -1,12 +1,8 @@
# Copyright (C) 2016 Intel Corporation
# Released under the MIT license (see COPYING.MIT)
-from . import OETestDecorator, registerDecorator
-
import signal
-from threading import Timer
-
-from oeqa.core.threaded import OETestRunnerThreaded
+from . import OETestDecorator, registerDecorator
from oeqa.core.exception import OEQATimeoutError
@registerDecorator
@@ -14,32 +10,16 @@ class OETimeout(OETestDecorator):
attrs = ('oetimeout',)
def setUpDecorator(self):
- self.logger.debug("Setting up a %d second(s) timeout" % self.oetimeout)
-
- if isinstance(self.case.tc.runner, OETestRunnerThreaded):
- self.timeouted = False
- def _timeoutHandler():
- self.timeouted = True
-
- self.timer = Timer(self.oetimeout, _timeoutHandler)
- self.timer.start()
- else:
- timeout = self.oetimeout
- def _timeoutHandler(signum, frame):
- raise OEQATimeoutError("Timed out after %s "
+ timeout = self.oetimeout
+ def _timeoutHandler(signum, frame):
+ raise OEQATimeoutError("Timed out after %s "
"seconds of execution" % timeout)
- self.alarmSignal = signal.signal(signal.SIGALRM, _timeoutHandler)
- signal.alarm(self.oetimeout)
+ self.logger.debug("Setting up a %d second(s) timeout" % self.oetimeout)
+ self.alarmSignal = signal.signal(signal.SIGALRM, _timeoutHandler)
+ signal.alarm(self.oetimeout)
def tearDownDecorator(self):
- if isinstance(self.case.tc.runner, OETestRunnerThreaded):
- self.timer.cancel()
- self.logger.debug("Removed Timer handler")
- if self.timeouted:
- raise OEQATimeoutError("Timed out after %s "
- "seconds of execution" % self.oetimeout)
- else:
- signal.alarm(0)
- signal.signal(signal.SIGALRM, self.alarmSignal)
- self.logger.debug("Removed SIGALRM handler")
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, self.alarmSignal)
+ self.logger.debug("Removed SIGALRM handler")
diff --git a/poky/meta/lib/oeqa/core/loader.py b/poky/meta/lib/oeqa/core/loader.py
index 98fc0f696..2255cf1dc 100644
--- a/poky/meta/lib/oeqa/core/loader.py
+++ b/poky/meta/lib/oeqa/core/loader.py
@@ -24,7 +24,7 @@ from oeqa.core.decorator import decoratorClasses, OETestDecorator, \
# Generate the function definition because this differ across python versions
# Python >= 3.4.4 uses tree parameters instead four but for example Python 3.5.3
# ueses four parameters so isn't incremental.
-_failed_test_args = inspect.getargspec(unittest.loader._make_failed_test).args
+_failed_test_args = inspect.getfullargspec(unittest.loader._make_failed_test).args
exec("""def _make_failed_test(%s): raise exception""" % ', '.join(_failed_test_args))
unittest.loader._make_failed_test = _make_failed_test
diff --git a/poky/meta/lib/oeqa/core/runner.py b/poky/meta/lib/oeqa/core/runner.py
index 13cdf5ba5..f8bb23f34 100644
--- a/poky/meta/lib/oeqa/core/runner.py
+++ b/poky/meta/lib/oeqa/core/runner.py
@@ -6,17 +6,10 @@ import time
import unittest
import logging
import re
+import json
-xmlEnabled = False
-try:
- import xmlrunner
- from xmlrunner.result import _XMLTestResult as _TestResult
- from xmlrunner.runner import XMLTestRunner as _TestRunner
- xmlEnabled = True
-except ImportError:
- # use the base runner instead
- from unittest import TextTestResult as _TestResult
- from unittest import TextTestRunner as _TestRunner
+from unittest import TextTestResult as _TestResult
+from unittest import TextTestRunner as _TestRunner
class OEStreamLogger(object):
def __init__(self, logger):
@@ -42,8 +35,12 @@ class OETestResult(_TestResult):
def __init__(self, tc, *args, **kwargs):
super(OETestResult, self).__init__(*args, **kwargs)
+ self.successes = []
+
+ # Inject into tc so that TestDepends decorator can see results
+ tc.results = self
+
self.tc = tc
- self._tc_map_results()
def startTest(self, test):
# Allow us to trigger the testcase buffer mode on a per test basis
@@ -53,12 +50,6 @@ class OETestResult(_TestResult):
self.buffer = test.buffer
super(OETestResult, self).startTest(test)
- def _tc_map_results(self):
- self.tc._results['failures'] = self.failures
- self.tc._results['errors'] = self.errors
- self.tc._results['skipped'] = self.skipped
- self.tc._results['expectedFailures'] = self.expectedFailures
-
def logSummary(self, component, context_msg=''):
elapsed_time = self.tc._run_end_time - self.tc._run_start_time
self.tc.logger.info("SUMMARY:")
@@ -70,67 +61,60 @@ class OETestResult(_TestResult):
msg = "%s - OK - All required tests passed" % component
else:
msg = "%s - FAIL - Required tests failed" % component
- skipped = len(self.tc._results['skipped'])
- if skipped:
- msg += " (skipped=%d)" % skipped
+ msg += " (successes=%d, skipped=%d, failures=%d, errors=%d)" % (len(self.successes), len(self.skipped), len(self.failures), len(self.errors))
self.tc.logger.info(msg)
- def _getDetailsNotPassed(self, case, type, desc):
- found = False
+ def _getTestResultDetails(self, case):
+ result_types = {'failures': 'FAILED', 'errors': 'ERROR', 'skipped': 'SKIPPED',
+ 'expectedFailures': 'EXPECTEDFAIL', 'successes': 'PASSED'}
- for (scase, msg) in self.tc._results[type]:
- # XXX: When XML reporting is enabled scase is
- # xmlrunner.result._TestInfo instance instead of
- # string.
- if xmlEnabled:
- if case.id() == scase.test_id:
- found = True
- break
- scase_str = scase.test_id
- else:
- if case == scase:
+ for rtype in result_types:
+ found = False
+ for (scase, msg) in getattr(self, rtype):
+ if case.id() == scase.id():
found = True
break
- scase_str = str(scase)
+ scase_str = str(scase.id())
- # When fails at module or class level the class name is passed as string
- # so figure out to see if match
- m = re.search("^setUpModule \((?P<module_name>.*)\)$", scase_str)
- if m:
- if case.__class__.__module__ == m.group('module_name'):
- found = True
- break
+ # When fails at module or class level the class name is passed as string
+ # so figure out to see if match
+ m = re.search("^setUpModule \((?P<module_name>.*)\)$", scase_str)
+ if m:
+ if case.__class__.__module__ == m.group('module_name'):
+ found = True
+ break
- m = re.search("^setUpClass \((?P<class_name>.*)\)$", scase_str)
- if m:
- class_name = "%s.%s" % (case.__class__.__module__,
- case.__class__.__name__)
+ m = re.search("^setUpClass \((?P<class_name>.*)\)$", scase_str)
+ if m:
+ class_name = "%s.%s" % (case.__class__.__module__,
+ case.__class__.__name__)
- if class_name == m.group('class_name'):
- found = True
- break
+ if class_name == m.group('class_name'):
+ found = True
+ break
+
+ if found:
+ return result_types[rtype], msg
- if found:
- return (found, msg)
+ return 'UNKNOWN', None
- return (found, None)
+ def addSuccess(self, test):
+ #Added so we can keep track of successes too
+ self.successes.append((test, None))
+ super(OETestResult, self).addSuccess(test)
- def logDetails(self):
+ def logDetails(self, json_file_dir=None, configuration=None, result_id=None):
self.tc.logger.info("RESULTS:")
+
+ result = {}
+ logs = {}
+ if hasattr(self.tc, "extraresults"):
+ result = self.tc.extraresults
+
for case_name in self.tc._registry['cases']:
case = self.tc._registry['cases'][case_name]
- result_types = ['failures', 'errors', 'skipped', 'expectedFailures']
- result_desc = ['FAILED', 'ERROR', 'SKIPPED', 'EXPECTEDFAIL']
-
- fail = False
- desc = None
- for idx, name in enumerate(result_types):
- (fail, msg) = self._getDetailsNotPassed(case, result_types[idx],
- result_desc[idx])
- if fail:
- desc = result_desc[idx]
- break
+ (status, log) = self._getTestResultDetails(case)
oeid = -1
if hasattr(case, 'decorators'):
@@ -138,12 +122,27 @@ class OETestResult(_TestResult):
if hasattr(d, 'oeid'):
oeid = d.oeid
- if fail:
- self.tc.logger.info("RESULTS - %s - Testcase %s: %s" % (case.id(),
- oeid, desc))
+ if status not in logs:
+ logs[status] = []
+ logs[status].append("RESULTS - %s - Testcase %s: %s" % (case.id(), oeid, status))
+ if log:
+ result[case.id()] = {'status': status, 'log': log}
else:
- self.tc.logger.info("RESULTS - %s - Testcase %s: %s" % (case.id(),
- oeid, 'PASSED'))
+ result[case.id()] = {'status': status}
+
+ for i in ['PASSED', 'SKIPPED', 'EXPECTEDFAIL', 'ERROR', 'FAILED', 'UNKNOWN']:
+ if i not in logs:
+ continue
+ for l in logs[i]:
+ self.tc.logger.info(l)
+
+ if json_file_dir:
+ tresultjsonhelper = OETestResultJSONHelper()
+ tresultjsonhelper.dump_testresult_file(json_file_dir, configuration, result_id, result)
+
+ def wasSuccessful(self):
+ # Override as we unexpected successes aren't failures for us
+ return (len(self.failures) == len(self.errors) == 0)
class OEListTestsResult(object):
def wasSuccessful(self):
@@ -153,33 +152,14 @@ class OETestRunner(_TestRunner):
streamLoggerClass = OEStreamLogger
def __init__(self, tc, *args, **kwargs):
- if xmlEnabled:
- if not kwargs.get('output'):
- kwargs['output'] = os.path.join(os.getcwd(),
- 'TestResults_%s_%s' % (time.strftime("%Y%m%d%H%M%S"), os.getpid()))
-
kwargs['stream'] = self.streamLoggerClass(tc.logger)
super(OETestRunner, self).__init__(*args, **kwargs)
self.tc = tc
self.resultclass = OETestResult
- # XXX: The unittest-xml-reporting package defines _make_result method instead
- # of _makeResult standard on unittest.
- if xmlEnabled:
- def _make_result(self):
- """
- Creates a TestResult object which will be used to store
- information about the executed tests.
- """
- # override in subclasses if necessary.
- return self.resultclass(self.tc,
- self.stream, self.descriptions, self.verbosity, self.elapsed_times
- )
- else:
- def _makeResult(self):
- return self.resultclass(self.tc, self.stream, self.descriptions,
- self.verbosity)
-
+ def _makeResult(self):
+ return self.resultclass(self.tc, self.stream, self.descriptions,
+ self.verbosity)
def _walk_suite(self, suite, func):
for obj in suite:
@@ -275,3 +255,29 @@ class OETestRunner(_TestRunner):
self._list_tests_module(suite)
return OEListTestsResult()
+
+class OETestResultJSONHelper(object):
+
+ testresult_filename = 'testresults.json'
+
+ def _get_existing_testresults_if_available(self, write_dir):
+ testresults = {}
+ file = os.path.join(write_dir, self.testresult_filename)
+ if os.path.exists(file):
+ with open(file, "r") as f:
+ testresults = json.load(f)
+ return testresults
+
+ def _write_file(self, write_dir, file_name, file_content):
+ file_path = os.path.join(write_dir, file_name)
+ with open(file_path, 'w') as the_file:
+ the_file.write(file_content)
+
+ def dump_testresult_file(self, write_dir, configuration, result_id, test_result):
+ bb.utils.mkdirhier(write_dir)
+ lf = bb.utils.lockfile(os.path.join(write_dir, 'jsontestresult.lock'))
+ test_results = self._get_existing_testresults_if_available(write_dir)
+ test_results[result_id] = {'configuration': configuration, 'result': test_result}
+ json_testresults = json.dumps(test_results, sort_keys=True, indent=4)
+ self._write_file(write_dir, self.testresult_filename, json_testresults)
+ bb.utils.unlockfile(lf)
diff --git a/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded.py b/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded.py
deleted file mode 100644
index 0fe4cb3f1..000000000
--- a/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
-
-from oeqa.core.case import OETestCase
-
-class ThreadedTest(OETestCase):
- def test_threaded_no_depends(self):
- self.assertTrue(True, msg='How is this possible?')
-
-class ThreadedTest2(OETestCase):
- def test_threaded_same_module(self):
- self.assertTrue(True, msg='How is this possible?')
diff --git a/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_alone.py b/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_alone.py
deleted file mode 100644
index 905f39784..000000000
--- a/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_alone.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
-
-from oeqa.core.case import OETestCase
-
-class ThreadedTestAlone(OETestCase):
- def test_threaded_alone(self):
- self.assertTrue(True, msg='How is this possible?')
diff --git a/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_depends.py b/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_depends.py
deleted file mode 100644
index 0c158d3ba..000000000
--- a/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_depends.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
-
-from oeqa.core.case import OETestCase
-from oeqa.core.decorator.depends import OETestDepends
-
-class ThreadedTest3(OETestCase):
- @OETestDepends(['threaded.ThreadedTest.test_threaded_no_depends'])
- def test_threaded_depends(self):
- self.assertTrue(True, msg='How is this possible?')
diff --git a/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_module.py b/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_module.py
deleted file mode 100644
index 63d17e040..000000000
--- a/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_module.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
-
-from oeqa.core.case import OETestCase
-
-class ThreadedTestModule(OETestCase):
- def test_threaded_module(self):
- self.assertTrue(True, msg='How is this possible?')
-
-class ThreadedTestModule2(OETestCase):
- def test_threaded_module2(self):
- self.assertTrue(True, msg='How is this possible?')
diff --git a/poky/meta/lib/oeqa/core/tests/common.py b/poky/meta/lib/oeqa/core/tests/common.py
index 193232340..52b18a1c3 100644
--- a/poky/meta/lib/oeqa/core/tests/common.py
+++ b/poky/meta/lib/oeqa/core/tests/common.py
@@ -33,13 +33,3 @@ class TestBase(unittest.TestCase):
tc.loadTests(self.cases_path, modules=modules, tests=tests,
filters=filters)
return tc
-
- def _testLoaderThreaded(self, d={}, modules=[],
- tests=[], filters={}):
- from oeqa.core.threaded import OETestContextThreaded
-
- tc = OETestContextThreaded(d, self.logger)
- tc.loadTests(self.cases_path, modules=modules, tests=tests,
- filters=filters)
-
- return tc
diff --git a/poky/meta/lib/oeqa/core/tests/test_data.py b/poky/meta/lib/oeqa/core/tests/test_data.py
index 320468cbe..21b6c68b8 100755
--- a/poky/meta/lib/oeqa/core/tests/test_data.py
+++ b/poky/meta/lib/oeqa/core/tests/test_data.py
@@ -21,7 +21,7 @@ class TestData(TestBase):
tc = self._testLoader(modules=self.modules)
self.assertEqual(False, tc.runTests().wasSuccessful())
- for test, data in tc._results['errors']:
+ for test, data in tc.errors:
expect = False
if expectedException in data:
expect = True
@@ -34,7 +34,7 @@ class TestData(TestBase):
tc = self._testLoader(d=d, modules=self.modules)
self.assertEqual(False, tc.runTests().wasSuccessful())
- for test, data in tc._results['failures']:
+ for test, data in tc.failures:
expect = False
if expectedError in data:
expect = True
diff --git a/poky/meta/lib/oeqa/core/tests/test_decorators.py b/poky/meta/lib/oeqa/core/tests/test_decorators.py
index cf99e0d72..f7d11e885 100755
--- a/poky/meta/lib/oeqa/core/tests/test_decorators.py
+++ b/poky/meta/lib/oeqa/core/tests/test_decorators.py
@@ -131,17 +131,5 @@ class TestTimeoutDecorator(TestBase):
msg = "OETestTimeout didn't restore SIGALRM"
self.assertIs(alarm_signal, signal.getsignal(signal.SIGALRM), msg=msg)
- def test_timeout_thread(self):
- tests = ['timeout.TimeoutTest.testTimeoutPass']
- msg = 'Failed to run test using OETestTimeout'
- tc = self._testLoaderThreaded(modules=self.modules, tests=tests)
- self.assertTrue(tc.runTests().wasSuccessful(), msg=msg)
-
- def test_timeout_threaded_fail(self):
- tests = ['timeout.TimeoutTest.testTimeoutFail']
- msg = "OETestTimeout test didn't timeout as expected"
- tc = self._testLoaderThreaded(modules=self.modules, tests=tests)
- self.assertFalse(tc.runTests().wasSuccessful(), msg=msg)
-
if __name__ == '__main__':
unittest.main()
diff --git a/poky/meta/lib/oeqa/core/tests/test_loader.py b/poky/meta/lib/oeqa/core/tests/test_loader.py
index e0d917d31..b79b8bad4 100755
--- a/poky/meta/lib/oeqa/core/tests/test_loader.py
+++ b/poky/meta/lib/oeqa/core/tests/test_loader.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
-# Copyright (C) 2016-2017 Intel Corporation
+# Copyright (C) 2016 Intel Corporation
# Released under the MIT license (see COPYING.MIT)
import os
@@ -82,33 +82,5 @@ class TestLoader(TestBase):
msg = 'Expected modules from two different paths'
self.assertEqual(modules, expected_modules, msg=msg)
- def test_loader_threaded(self):
- cases_path = self.cases_path
-
- self.cases_path = [os.path.join(self.cases_path, 'loader', 'threaded')]
-
- tc = self._testLoaderThreaded()
- self.assertEqual(len(tc.suites), 3, "Expected to be 3 suites")
-
- case_ids = ['threaded.ThreadedTest.test_threaded_no_depends',
- 'threaded.ThreadedTest2.test_threaded_same_module',
- 'threaded_depends.ThreadedTest3.test_threaded_depends']
- for case in tc.suites[0]._tests:
- self.assertEqual(case.id(),
- case_ids[tc.suites[0]._tests.index(case)])
-
- case_ids = ['threaded_alone.ThreadedTestAlone.test_threaded_alone']
- for case in tc.suites[1]._tests:
- self.assertEqual(case.id(),
- case_ids[tc.suites[1]._tests.index(case)])
-
- case_ids = ['threaded_module.ThreadedTestModule.test_threaded_module',
- 'threaded_module.ThreadedTestModule2.test_threaded_module2']
- for case in tc.suites[2]._tests:
- self.assertEqual(case.id(),
- case_ids[tc.suites[2]._tests.index(case)])
-
- self.cases_path = cases_path
-
if __name__ == '__main__':
unittest.main()
diff --git a/poky/meta/lib/oeqa/core/threaded.py b/poky/meta/lib/oeqa/core/threaded.py
deleted file mode 100644
index 2cafe03a2..000000000
--- a/poky/meta/lib/oeqa/core/threaded.py
+++ /dev/null
@@ -1,275 +0,0 @@
-# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
-
-import threading
-import multiprocessing
-import queue
-import time
-
-from unittest.suite import TestSuite
-
-from oeqa.core.loader import OETestLoader
-from oeqa.core.runner import OEStreamLogger, OETestResult, OETestRunner
-from oeqa.core.context import OETestContext
-
-class OETestLoaderThreaded(OETestLoader):
- def __init__(self, tc, module_paths, modules, tests, modules_required,
- filters, process_num=0, *args, **kwargs):
- super(OETestLoaderThreaded, self).__init__(tc, module_paths, modules,
- tests, modules_required, filters, *args, **kwargs)
-
- self.process_num = process_num
-
- def discover(self):
- suite = super(OETestLoaderThreaded, self).discover()
-
- if self.process_num <= 0:
- self.process_num = min(multiprocessing.cpu_count(),
- len(suite._tests))
-
- suites = []
- for _ in range(self.process_num):
- suites.append(self.suiteClass())
-
- def _search_for_module_idx(suites, case):
- """
- Cases in the same module needs to be run
- in the same thread because PyUnit keeps track
- of setUp{Module, Class,} and tearDown{Module, Class,}.
- """
-
- for idx in range(self.process_num):
- suite = suites[idx]
- for c in suite._tests:
- if case.__module__ == c.__module__:
- return idx
-
- return -1
-
- def _search_for_depend_idx(suites, depends):
- """
- Dependency cases needs to be run in the same
- thread, because OEQA framework look at the state
- of dependant test to figure out if skip or not.
- """
-
- for idx in range(self.process_num):
- suite = suites[idx]
-
- for case in suite._tests:
- if case.id() in depends:
- return idx
- return -1
-
- def _get_best_idx(suites):
- sizes = [len(suite._tests) for suite in suites]
- return sizes.index(min(sizes))
-
- def _fill_suites(suite):
- idx = -1
- for case in suite:
- if isinstance(case, TestSuite):
- _fill_suites(case)
- else:
- idx = _search_for_module_idx(suites, case)
-
- depends = {}
- if 'depends' in self.tc._registry:
- depends = self.tc._registry['depends']
-
- if idx == -1 and case.id() in depends:
- case_depends = depends[case.id()]
- idx = _search_for_depend_idx(suites, case_depends)
-
- if idx == -1:
- idx = _get_best_idx(suites)
-
- suites[idx].addTest(case)
- _fill_suites(suite)
-
- suites_tmp = suites
- suites = []
- for suite in suites_tmp:
- if len(suite._tests) > 0:
- suites.append(suite)
-
- return suites
-
-class OEStreamLoggerThreaded(OEStreamLogger):
- _lock = threading.Lock()
- buffers = {}
-
- def write(self, msg):
- tid = threading.get_ident()
-
- if not tid in self.buffers:
- self.buffers[tid] = ""
-
- if msg:
- self.buffers[tid] += msg
-
- def finish(self):
- tid = threading.get_ident()
-
- self._lock.acquire()
- self.logger.info('THREAD: %d' % tid)
- self.logger.info('-' * 70)
- for line in self.buffers[tid].split('\n'):
- self.logger.info(line)
- self._lock.release()
-
-class OETestResultThreadedInternal(OETestResult):
- def _tc_map_results(self):
- tid = threading.get_ident()
-
- # PyUnit generates a result for every test module run, test
- # if the thread already has an entry to avoid lose the previous
- # test module results.
- if not tid in self.tc._results:
- self.tc._results[tid] = {}
- self.tc._results[tid]['failures'] = self.failures
- self.tc._results[tid]['errors'] = self.errors
- self.tc._results[tid]['skipped'] = self.skipped
- self.tc._results[tid]['expectedFailures'] = self.expectedFailures
-
-class OETestResultThreaded(object):
- _results = {}
- _lock = threading.Lock()
-
- def __init__(self, tc):
- self.tc = tc
-
- def _fill_tc_results(self):
- tids = list(self.tc._results.keys())
- fields = ['failures', 'errors', 'skipped', 'expectedFailures']
-
- for tid in tids:
- result = self.tc._results[tid]
- for field in fields:
- if not field in self.tc._results:
- self.tc._results[field] = []
- self.tc._results[field].extend(result[field])
-
- def addResult(self, result, run_start_time, run_end_time):
- tid = threading.get_ident()
-
- self._lock.acquire()
- self._results[tid] = {}
- self._results[tid]['result'] = result
- self._results[tid]['run_start_time'] = run_start_time
- self._results[tid]['run_end_time'] = run_end_time
- self._results[tid]['result'] = result
- self._lock.release()
-
- def wasSuccessful(self):
- wasSuccessful = True
- for tid in self._results.keys():
- wasSuccessful = wasSuccessful and \
- self._results[tid]['result'].wasSuccessful()
- return wasSuccessful
-
- def stop(self):
- for tid in self._results.keys():
- self._results[tid]['result'].stop()
-
- def logSummary(self, component, context_msg=''):
- elapsed_time = (self.tc._run_end_time - self.tc._run_start_time)
-
- self.tc.logger.info("SUMMARY:")
- self.tc.logger.info("%s (%s) - Ran %d tests in %.3fs" % (component,
- context_msg, len(self.tc._registry['cases']), elapsed_time))
- if self.wasSuccessful():
- msg = "%s - OK - All required tests passed" % component
- else:
- msg = "%s - FAIL - Required tests failed" % component
- self.tc.logger.info(msg)
-
- def logDetails(self):
- if list(self._results):
- tid = list(self._results)[0]
- result = self._results[tid]['result']
- result.logDetails()
-
-class _Worker(threading.Thread):
- """Thread executing tasks from a given tasks queue"""
- def __init__(self, tasks, result, stream):
- threading.Thread.__init__(self)
- self.tasks = tasks
-
- self.result = result
- self.stream = stream
-
- def run(self):
- while True:
- try:
- func, args, kargs = self.tasks.get(block=False)
- except queue.Empty:
- break
-
- try:
- run_start_time = time.time()
- rc = func(*args, **kargs)
- run_end_time = time.time()
- self.result.addResult(rc, run_start_time, run_end_time)
- self.stream.finish()
- except Exception as e:
- print(e)
- finally:
- self.tasks.task_done()
-
-class _ThreadedPool:
- """Pool of threads consuming tasks from a queue"""
- def __init__(self, num_workers, num_tasks, stream=None, result=None):
- self.tasks = queue.Queue(num_tasks)
- self.workers = []
-
- for _ in range(num_workers):
- worker = _Worker(self.tasks, result, stream)
- self.workers.append(worker)
-
- def start(self):
- for worker in self.workers:
- worker.start()
-
- def add_task(self, func, *args, **kargs):
- """Add a task to the queue"""
- self.tasks.put((func, args, kargs))
-
- def wait_completion(self):
- """Wait for completion of all the tasks in the queue"""
- self.tasks.join()
- for worker in self.workers:
- worker.join()
-
-class OETestRunnerThreaded(OETestRunner):
- streamLoggerClass = OEStreamLoggerThreaded
-
- def __init__(self, tc, *args, **kwargs):
- super(OETestRunnerThreaded, self).__init__(tc, *args, **kwargs)
- self.resultclass = OETestResultThreadedInternal # XXX: XML reporting overrides at __init__
-
- def run(self, suites):
- result = OETestResultThreaded(self.tc)
-
- pool = _ThreadedPool(len(suites), len(suites), stream=self.stream,
- result=result)
- for s in suites:
- pool.add_task(super(OETestRunnerThreaded, self).run, s)
- pool.start()
- pool.wait_completion()
- result._fill_tc_results()
-
- return result
-
-class OETestContextThreaded(OETestContext):
- loaderClass = OETestLoaderThreaded
- runnerClass = OETestRunnerThreaded
-
- def loadTests(self, module_paths, modules=[], tests=[],
- modules_manifest="", modules_required=[], filters={}, process_num=0):
- if modules_manifest:
- modules = self._read_modules_from_manifest(modules_manifest)
-
- self.loader = self.loaderClass(self, module_paths, modules, tests,
- modules_required, filters, process_num)
- self.suites = self.loader.discover()
OpenPOWER on IntegriCloud