summaryrefslogtreecommitdiffstats
path: root/test/py/conftest.py
diff options
context:
space:
mode:
Diffstat (limited to 'test/py/conftest.py')
-rw-r--r--test/py/conftest.py189
1 files changed, 130 insertions, 59 deletions
diff --git a/test/py/conftest.py b/test/py/conftest.py
index 3e162cafcc..449f98bee3 100644
--- a/test/py/conftest.py
+++ b/test/py/conftest.py
@@ -17,10 +17,10 @@ import atexit
import errno
import os
import os.path
-import pexpect
import pytest
from _pytest.runner import runtestprotocol
import ConfigParser
+import re
import StringIO
import sys
@@ -71,6 +71,9 @@ def pytest_addoption(parser):
help='U-Boot board identity/instance')
parser.addoption('--build', default=False, action='store_true',
help='Compile U-Boot before running tests')
+ parser.addoption('--gdbserver', default=None,
+ help='Run sandbox under gdbserver. The argument is the channel '+
+ 'over which gdbserver should communicate, e.g. localhost:1234')
def pytest_configure(config):
"""pytest hook: Perform custom initialization at startup time.
@@ -110,6 +113,10 @@ def pytest_configure(config):
persistent_data_dir = build_dir + '/persistent-data'
mkdir_p(persistent_data_dir)
+ gdbserver = config.getoption('gdbserver')
+ if gdbserver and board_type != 'sandbox':
+ raise Exception('--gdbserver only supported with sandbox')
+
import multiplexed_log
log = multiplexed_log.Logfile(result_dir + '/test-log.html')
@@ -122,10 +129,12 @@ def pytest_configure(config):
['make', o_opt, '-s', board_type + '_defconfig'],
['make', o_opt, '-s', '-j8'],
)
- runner = log.get_runner('make', sys.stdout)
- for cmd in cmds:
- runner.run(cmd, cwd=source_dir)
- runner.close()
+ with log.section('make'):
+ runner = log.get_runner('make', sys.stdout)
+ for cmd in cmds:
+ runner.run(cmd, cwd=source_dir)
+ runner.close()
+ log.status_pass('OK')
class ArbitraryAttributeContainer(object):
pass
@@ -169,6 +178,7 @@ def pytest_configure(config):
ubconfig.persistent_data_dir = persistent_data_dir
ubconfig.board_type = board_type
ubconfig.board_identity = board_identity
+ ubconfig.gdbserver = gdbserver
env_vars = (
'board_type',
@@ -189,8 +199,42 @@ def pytest_configure(config):
import u_boot_console_exec_attach
console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
-def pytest_generate_tests(metafunc):
- """pytest hook: parameterize test functions based on custom rules.
+re_ut_test_list = re.compile(r'_u_boot_list_2_(dm|env)_test_2_\1_test_(.*)\s*$')
+def generate_ut_subtest(metafunc, fixture_name):
+ """Provide parametrization for a ut_subtest fixture.
+
+ Determines the set of unit tests built into a U-Boot binary by parsing the
+ list of symbols generated by the build process. Provides this information
+ to test functions by parameterizing their ut_subtest fixture parameter.
+
+ Args:
+ metafunc: The pytest test function.
+ fixture_name: The fixture name to test.
+
+ Returns:
+ Nothing.
+ """
+
+ fn = console.config.build_dir + '/u-boot.sym'
+ try:
+ with open(fn, 'rt') as f:
+ lines = f.readlines()
+ except:
+ lines = []
+ lines.sort()
+
+ vals = []
+ for l in lines:
+ m = re_ut_test_list.search(l)
+ if not m:
+ continue
+ vals.append(m.group(1) + ' ' + m.group(2))
+
+ ids = ['ut_' + s.replace(' ', '_') for s in vals]
+ metafunc.parametrize(fixture_name, vals, ids=ids)
+
+def generate_config(metafunc, fixture_name):
+ """Provide parametrization for {env,brd}__ fixtures.
If a test function takes parameter(s) (fixture names) of the form brd__xxx
or env__xxx, the brd and env configuration dictionaries are consulted to
@@ -199,6 +243,7 @@ def pytest_generate_tests(metafunc):
Args:
metafunc: The pytest test function.
+ fixture_name: The fixture name to test.
Returns:
Nothing.
@@ -208,30 +253,49 @@ def pytest_generate_tests(metafunc):
'brd': console.config.brd,
'env': console.config.env,
}
+ parts = fixture_name.split('__')
+ if len(parts) < 2:
+ return
+ if parts[0] not in subconfigs:
+ return
+ subconfig = subconfigs[parts[0]]
+ vals = []
+ val = subconfig.get(fixture_name, [])
+ # If that exact name is a key in the data source:
+ if val:
+ # ... use the dict value as a single parameter value.
+ vals = (val, )
+ else:
+ # ... otherwise, see if there's a key that contains a list of
+ # values to use instead.
+ vals = subconfig.get(fixture_name+ 's', [])
+ def fixture_id(index, val):
+ try:
+ return val['fixture_id']
+ except:
+ return fixture_name + str(index)
+ ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
+ metafunc.parametrize(fixture_name, vals, ids=ids)
+
+def pytest_generate_tests(metafunc):
+ """pytest hook: parameterize test functions based on custom rules.
+
+ Check each test function parameter (fixture name) to see if it is one of
+ our custom names, and if so, provide the correct parametrization for that
+ parameter.
+
+ Args:
+ metafunc: The pytest test function.
+
+ Returns:
+ Nothing.
+ """
+
for fn in metafunc.fixturenames:
- parts = fn.split('__')
- if len(parts) < 2:
+ if fn == 'ut_subtest':
+ generate_ut_subtest(metafunc, fn)
continue
- if parts[0] not in subconfigs:
- continue
- subconfig = subconfigs[parts[0]]
- vals = []
- val = subconfig.get(fn, [])
- # If that exact name is a key in the data source:
- if val:
- # ... use the dict value as a single parameter value.
- vals = (val, )
- else:
- # ... otherwise, see if there's a key that contains a list of
- # values to use instead.
- vals = subconfig.get(fn + 's', [])
- def fixture_id(index, val):
- try:
- return val["fixture_id"]
- except:
- return fn + str(index)
- ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
- metafunc.parametrize(fn, vals, ids=ids)
+ generate_config(metafunc, fn)
@pytest.fixture(scope='function')
def u_boot_console(request):
@@ -247,12 +311,13 @@ def u_boot_console(request):
console.ensure_spawned()
return console
-tests_not_run = set()
-tests_failed = set()
-tests_xpassed = set()
-tests_xfailed = set()
-tests_skipped = set()
-tests_passed = set()
+anchors = {}
+tests_not_run = []
+tests_failed = []
+tests_xpassed = []
+tests_xfailed = []
+tests_skipped = []
+tests_passed = []
def pytest_itemcollected(item):
"""pytest hook: Called once for each test found during collection.
@@ -267,7 +332,7 @@ def pytest_itemcollected(item):
Nothing.
"""
- tests_not_run.add(item.name)
+ tests_not_run.append(item.name)
def cleanup():
"""Clean up all global state.
@@ -286,27 +351,33 @@ def cleanup():
if console:
console.close()
if log:
- log.status_pass('%d passed' % len(tests_passed))
- if tests_skipped:
- log.status_skipped('%d skipped' % len(tests_skipped))
- for test in tests_skipped:
- log.status_skipped('... ' + test)
- if tests_xpassed:
- log.status_xpass('%d xpass' % len(tests_xpassed))
- for test in tests_xpassed:
- log.status_xpass('... ' + test)
- if tests_xfailed:
- log.status_xfail('%d xfail' % len(tests_xfailed))
- for test in tests_xfailed:
- log.status_xfail('... ' + test)
- if tests_failed:
- log.status_fail('%d failed' % len(tests_failed))
- for test in tests_failed:
- log.status_fail('... ' + test)
- if tests_not_run:
- log.status_fail('%d not run' % len(tests_not_run))
- for test in tests_not_run:
- log.status_fail('... ' + test)
+ with log.section('Status Report', 'status_report'):
+ log.status_pass('%d passed' % len(tests_passed))
+ if tests_skipped:
+ log.status_skipped('%d skipped' % len(tests_skipped))
+ for test in tests_skipped:
+ anchor = anchors.get(test, None)
+ log.status_skipped('... ' + test, anchor)
+ if tests_xpassed:
+ log.status_xpass('%d xpass' % len(tests_xpassed))
+ for test in tests_xpassed:
+ anchor = anchors.get(test, None)
+ log.status_xpass('... ' + test, anchor)
+ if tests_xfailed:
+ log.status_xfail('%d xfail' % len(tests_xfailed))
+ for test in tests_xfailed:
+ anchor = anchors.get(test, None)
+ log.status_xfail('... ' + test, anchor)
+ if tests_failed:
+ log.status_fail('%d failed' % len(tests_failed))
+ for test in tests_failed:
+ anchor = anchors.get(test, None)
+ log.status_fail('... ' + test, anchor)
+ if tests_not_run:
+ log.status_fail('%d not run' % len(tests_not_run))
+ for test in tests_not_run:
+ anchor = anchors.get(test, None)
+ log.status_fail('... ' + test, anchor)
log.close()
atexit.register(cleanup)
@@ -372,7 +443,7 @@ def pytest_runtest_setup(item):
Nothing.
"""
- log.start_section(item.name)
+ anchors[item.name] = log.start_section(item.name)
setup_boardspec(item)
setup_buildconfigspec(item)
@@ -422,7 +493,7 @@ def pytest_runtest_protocol(item, nextitem):
if failure_cleanup:
console.drain_console()
- test_list.add(item.name)
+ test_list.append(item.name)
tests_not_run.remove(item.name)
try:
OpenPOWER on IntegriCloud