summaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/tc-testing
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/tc-testing')
-rw-r--r--tools/testing/selftests/tc-testing/.gitignore3
-rw-r--r--tools/testing/selftests/tc-testing/README2
-rw-r--r--tools/testing/selftests/tc-testing/TdcPlugin.py3
-rw-r--r--tools/testing/selftests/tc-testing/TdcResults.py132
-rw-r--r--tools/testing/selftests/tc-testing/bpf/Makefile30
-rw-r--r--tools/testing/selftests/tc-testing/bpf/action.c23
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py66
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py22
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json16
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/gact.json24
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/police.json24
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py147
-rw-r--r--tools/testing/selftests/tc-testing/tdc_config.py6
13 files changed, 428 insertions, 70 deletions
diff --git a/tools/testing/selftests/tc-testing/.gitignore b/tools/testing/selftests/tc-testing/.gitignore
index 7a60b85e148f..c5cc160948b3 100644
--- a/tools/testing/selftests/tc-testing/.gitignore
+++ b/tools/testing/selftests/tc-testing/.gitignore
@@ -1,2 +1,5 @@
__pycache__/
*.pyc
+plugins/
+*.xml
+*.tap
diff --git a/tools/testing/selftests/tc-testing/README b/tools/testing/selftests/tc-testing/README
index 49a6f8c3fdae..f9281e8aa313 100644
--- a/tools/testing/selftests/tc-testing/README
+++ b/tools/testing/selftests/tc-testing/README
@@ -232,6 +232,8 @@ directory:
and the other is a test whether the command leaked memory or not.
(This one is a preliminary version, it may not work quite right yet,
but the overall template is there and it should only need tweaks.)
+ - buildebpfPlugin.py:
+ builds all programs in $EBPFDIR.
ACKNOWLEDGEMENTS
diff --git a/tools/testing/selftests/tc-testing/TdcPlugin.py b/tools/testing/selftests/tc-testing/TdcPlugin.py
index 3ee9a6dacb52..1d9e279331eb 100644
--- a/tools/testing/selftests/tc-testing/TdcPlugin.py
+++ b/tools/testing/selftests/tc-testing/TdcPlugin.py
@@ -18,11 +18,12 @@ class TdcPlugin:
if self.args.verbose > 1:
print(' -- {}.post_suite'.format(self.sub_class))
- def pre_case(self, test_ordinal, testid):
+ def pre_case(self, test_ordinal, testid, test_name):
'''run commands before test_runner does one test'''
if self.args.verbose > 1:
print(' -- {}.pre_case'.format(self.sub_class))
self.args.testid = testid
+ self.args.test_name = test_name
self.args.test_ordinal = test_ordinal
def post_case(self):
diff --git a/tools/testing/selftests/tc-testing/TdcResults.py b/tools/testing/selftests/tc-testing/TdcResults.py
new file mode 100644
index 000000000000..1e4d95fdf8d0
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/TdcResults.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python3
+
+from enum import Enum
+
+class ResultState(Enum):
+ noresult = -1
+ skip = 0
+ success = 1
+ fail = 2
+
+class TestResult:
+ def __init__(self, test_id="", test_name=""):
+ self.test_id = test_id
+ self.test_name = test_name
+ self.result = ResultState.noresult
+ self.failmsg = ""
+ self.errormsg = ""
+ self.steps = []
+
+ def set_result(self, result):
+ if (isinstance(result, ResultState)):
+ self.result = result
+ return True
+ else:
+ raise TypeError('Unknown result type, must be type ResultState')
+
+ def get_result(self):
+ return self.result
+
+ def set_errormsg(self, errormsg):
+ self.errormsg = errormsg
+ return True
+
+ def append_errormsg(self, errormsg):
+ self.errormsg = '{}\n{}'.format(self.errormsg, errormsg)
+
+ def get_errormsg(self):
+ return self.errormsg
+
+ def set_failmsg(self, failmsg):
+ self.failmsg = failmsg
+ return True
+
+ def append_failmsg(self, failmsg):
+ self.failmsg = '{}\n{}'.format(self.failmsg, failmsg)
+
+ def get_failmsg(self):
+ return self.failmsg
+
+ def add_steps(self, newstep):
+ if type(newstep) == list:
+ self.steps.extend(newstep)
+ elif type(newstep) == str:
+ self.steps.append(step)
+ else:
+ raise TypeError('TdcResults.add_steps() requires a list or str')
+
+ def get_executed_steps(self):
+ return self.steps
+
+class TestSuiteReport():
+ _testsuite = []
+
+ def add_resultdata(self, result_data):
+ if isinstance(result_data, TestResult):
+ self._testsuite.append(result_data)
+ return True
+
+ def count_tests(self):
+ return len(self._testsuite)
+
+ def count_failures(self):
+ return sum(1 for t in self._testsuite if t.result == ResultState.fail)
+
+ def count_skips(self):
+ return sum(1 for t in self._testsuite if t.result == ResultState.skip)
+
+ def find_result(self, test_id):
+ return next((tr for tr in self._testsuite if tr.test_id == test_id), None)
+
+ def update_result(self, result_data):
+ orig = self.find_result(result_data.test_id)
+ if orig != None:
+ idx = self._testsuite.index(orig)
+ self._testsuite[idx] = result_data
+ else:
+ self.add_resultdata(result_data)
+
+ def format_tap(self):
+ ftap = ""
+ ftap += '1..{}\n'.format(self.count_tests())
+ index = 1
+ for t in self._testsuite:
+ if t.result == ResultState.fail:
+ ftap += 'not '
+ ftap += 'ok {} {} - {}'.format(str(index), t.test_id, t.test_name)
+ if t.result == ResultState.skip or t.result == ResultState.noresult:
+ ftap += ' # skipped - {}\n'.format(t.errormsg)
+ elif t.result == ResultState.fail:
+ if len(t.steps) > 0:
+ ftap += '\tCommands executed in this test case:'
+ for step in t.steps:
+ ftap += '\n\t\t{}'.format(step)
+ ftap += '\n\t{}'.format(t.failmsg)
+ ftap += '\n'
+ index += 1
+ return ftap
+
+ def format_xunit(self):
+ from xml.sax.saxutils import escape
+ xunit = "<testsuites>\n"
+ xunit += '\t<testsuite tests=\"{}\" skips=\"{}\">\n'.format(self.count_tests(), self.count_skips())
+ for t in self._testsuite:
+ xunit += '\t\t<testcase classname=\"{}\" '.format(escape(t.test_id))
+ xunit += 'name=\"{}\">\n'.format(escape(t.test_name))
+ if t.failmsg:
+ xunit += '\t\t\t<failure>\n'
+ if len(t.steps) > 0:
+ xunit += 'Commands executed in this test case:\n'
+ for step in t.steps:
+ xunit += '\t{}\n'.format(escape(step))
+ xunit += 'FAILURE: {}\n'.format(escape(t.failmsg))
+ xunit += '\t\t\t</failure>\n'
+ if t.errormsg:
+ xunit += '\t\t\t<error>\n{}\n'.format(escape(t.errormsg))
+ xunit += '\t\t\t</error>\n'
+ if t.result == ResultState.skip:
+ xunit += '\t\t\t<skipped/>\n'
+ xunit += '\t\t</testcase>\n'
+ xunit += '\t</testsuite>\n'
+ xunit += '</testsuites>\n'
+ return xunit
diff --git a/tools/testing/selftests/tc-testing/bpf/Makefile b/tools/testing/selftests/tc-testing/bpf/Makefile
new file mode 100644
index 000000000000..be5a5e542804
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/bpf/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0
+
+APIDIR := ../../../../include/uapi
+TEST_GEN_FILES = action.o
+
+top_srcdir = ../../../../..
+KSFT_KHDR_INSTALL := 1
+include ../../lib.mk
+
+CLANG ?= clang
+LLC ?= llc
+PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
+
+ifeq ($(PROBE),)
+ CPU ?= probe
+else
+ CPU ?= generic
+endif
+
+CLANG_SYS_INCLUDES := $(shell $(CLANG) -v -E - </dev/null 2>&1 \
+ | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }')
+
+CLANG_FLAGS = -I. -I$(APIDIR) \
+ $(CLANG_SYS_INCLUDES) \
+ -Wno-compare-distinct-pointer-types
+
+$(OUTPUT)/%.o: %.c
+ $(CLANG) $(CLANG_FLAGS) \
+ -O2 -target bpf -emit-llvm -c $< -o - | \
+ $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
diff --git a/tools/testing/selftests/tc-testing/bpf/action.c b/tools/testing/selftests/tc-testing/bpf/action.c
new file mode 100644
index 000000000000..c32b99b80e19
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/bpf/action.c
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018 Davide Caratti, Red Hat inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#include <linux/bpf.h>
+#include <linux/pkt_cls.h>
+
+__attribute__((section("action-ok"),used)) int action_ok(struct __sk_buff *s)
+{
+ return TC_ACT_OK;
+}
+
+__attribute__((section("action-ko"),used)) int action_ko(struct __sk_buff *s)
+{
+ s->data = 0x0;
+ return TC_ACT_OK;
+}
+
+char _license[] __attribute__((section("license"),used)) = "GPL";
diff --git a/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py
new file mode 100644
index 000000000000..9f0ba10c44b4
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py
@@ -0,0 +1,66 @@
+'''
+build ebpf program
+'''
+
+import os
+import signal
+from string import Template
+import subprocess
+import time
+from TdcPlugin import TdcPlugin
+from tdc_config import *
+
+class SubPlugin(TdcPlugin):
+ def __init__(self):
+ self.sub_class = 'buildebpf/SubPlugin'
+ self.tap = ''
+ super().__init__()
+
+ def pre_suite(self, testcount, testidlist):
+ super().pre_suite(testcount, testidlist)
+
+ if self.args.buildebpf:
+ self._ebpf_makeall()
+
+ def post_suite(self, index):
+ super().post_suite(index)
+
+ self._ebpf_makeclean()
+
+ def add_args(self, parser):
+ super().add_args(parser)
+
+ self.argparser_group = self.argparser.add_argument_group(
+ 'buildebpf',
+ 'options for buildebpfPlugin')
+ self.argparser_group.add_argument(
+ '-B', '--buildebpf', action='store_true',
+ help='build eBPF programs')
+
+ return self.argparser
+
+ def _ebpf_makeall(self):
+ if self.args.buildebpf:
+ self._make('all')
+
+ def _ebpf_makeclean(self):
+ if self.args.buildebpf:
+ self._make('clean')
+
+ def _make(self, target):
+ command = 'make -C {} {}'.format(self.args.NAMES['EBPFDIR'], target)
+ proc = subprocess.Popen(command,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=ENVIR)
+ (rawout, serr) = proc.communicate()
+
+ if proc.returncode != 0 and len(serr) > 0:
+ foutput = serr.decode("utf-8")
+ else:
+ foutput = rawout.decode("utf-8")
+
+ proc.stdout.close()
+ proc.stderr.close()
+ return proc, foutput
diff --git a/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
index 477a7bd7d7fb..e00c798de0bb 100644
--- a/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
+++ b/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
@@ -11,6 +11,7 @@ from string import Template
import subprocess
import time
from TdcPlugin import TdcPlugin
+from TdcResults import *
from tdc_config import *
@@ -21,6 +22,7 @@ class SubPlugin(TdcPlugin):
def __init__(self):
self.sub_class = 'valgrind/SubPlugin'
self.tap = ''
+ self._tsr = TestSuiteReport()
super().__init__()
def pre_suite(self, testcount, testidlist):
@@ -34,10 +36,14 @@ class SubPlugin(TdcPlugin):
def post_suite(self, index):
'''run commands after test_runner goes into a test loop'''
super().post_suite(index)
- self._add_to_tap('\n|---\n')
if self.args.verbose > 1:
print('{}.post_suite'.format(self.sub_class))
- print('{}'.format(self.tap))
+ #print('{}'.format(self.tap))
+ for xx in range(index - 1, self.testcount):
+ res = TestResult('{}-mem'.format(self.testidlist[xx]), 'Test skipped')
+ res.set_result(ResultState.skip)
+ res.set_errormsg('Skipped because of prior setup/teardown failure')
+ self._add_results(res)
if self.args.verbose < 4:
subprocess.check_output('rm -f vgnd-*.log', shell=True)
@@ -128,8 +134,17 @@ class SubPlugin(TdcPlugin):
nle_num = int(nle_mo.group(1))
mem_results = ''
+ res = TestResult('{}-mem'.format(self.args.testid),
+ '{} memory leak check'.format(self.args.test_name))
if (def_num > 0) or (ind_num > 0) or (pos_num > 0) or (nle_num > 0):
mem_results += 'not '
+ res.set_result(ResultState.fail)
+ res.set_failmsg('Memory leak detected')
+ res.append_failmsg(content)
+ else:
+ res.set_result(ResultState.success)
+
+ self._add_results(res)
mem_results += 'ok {} - {}-mem # {}\n'.format(
self.args.test_ordinal, self.args.testid, 'memory leak check')
@@ -138,5 +153,8 @@ class SubPlugin(TdcPlugin):
print('{}'.format(content))
self._add_to_tap(content)
+ def _add_results(self, res):
+ self._tsr.add_resultdata(res)
+
def _add_to_tap(self, more_tap_output):
self.tap += more_tap_output
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
index 6f289a49e5ec..5970cee6d05f 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
@@ -55,7 +55,6 @@
"bpf"
],
"setup": [
- "printf '#include <linux/bpf.h>\nchar l[] __attribute__((section(\"license\"),used))=\"GPL\"; __attribute__((section(\"action\"),used)) int m(struct __sk_buff *s) { return 2; }' | clang -O2 -x c -c - -target bpf -o _b.o",
[
"$TC action flush action bpf",
0,
@@ -63,14 +62,13 @@
255
]
],
- "cmdUnderTest": "$TC action add action bpf object-file _b.o index 667",
+ "cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action.o section action-ok index 667",
"expExitCode": "0",
"verifyCmd": "$TC action get action bpf index 667",
- "matchPattern": "action order [0-9]*: bpf _b.o:\\[action\\] id [0-9]* tag 3b185187f1855c4c( jited)? default-action pipe.*index 667 ref",
+ "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ok\\] id [0-9]* tag [0-9a-f]{16}( jited)? default-action pipe.*index 667 ref",
"matchCount": "1",
"teardown": [
- "$TC action flush action bpf",
- "rm -f _b.o"
+ "$TC action flush action bpf"
]
},
{
@@ -81,7 +79,6 @@
"bpf"
],
"setup": [
- "printf '#include <linux/bpf.h>\nchar l[] __attribute__((section(\"license\"),used))=\"GPL\"; __attribute__((section(\"action\"),used)) int m(struct __sk_buff *s) { s->data = 0x0; return 2; }' | clang -O2 -x c -c - -target bpf -o _c.o",
[
"$TC action flush action bpf",
0,
@@ -89,10 +86,10 @@
255
]
],
- "cmdUnderTest": "$TC action add action bpf object-file _c.o index 667",
+ "cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action.o section action-ko index 667",
"expExitCode": "255",
"verifyCmd": "$TC action get action bpf index 667",
- "matchPattern": "action order [0-9]*: bpf _c.o:\\[action\\] id [0-9].*index 667 ref",
+ "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ko\\] id [0-9].*index 667 ref",
"matchCount": "0",
"teardown": [
[
@@ -100,8 +97,7 @@
0,
1,
255
- ],
- "rm -f _c.o"
+ ]
]
},
{
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
index 68c91023cdb9..89189a03ce3d 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
@@ -536,5 +536,29 @@
"matchPattern": "^[ \t]+index [0-9]+ ref",
"matchCount": "0",
"teardown": []
+ },
+ {
+ "id": "8e47",
+ "name": "Add gact action with random determ goto chain control action",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pass random determ goto chain 1 2 index 90",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action pass random type determ goto chain 1 val 2.*index 90 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
index 30f9b54bd666..4086a50a670e 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
@@ -715,5 +715,29 @@
"teardown": [
"$TC actions flush action police"
]
+ },
+ {
+ "id": "b48b",
+ "name": "Add police action with exceed goto chain control action",
+ "category": [
+ "actions",
+ "police"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action police",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action police rate 1mbit burst 1k conform-exceed pass / goto chain 42",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions ls action police",
+ "matchPattern": "action order [0-9]*: police 0x1 rate 1Mbit burst 1Kb mtu 2Kb action pass/goto chain 42",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action police"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index 87a04a8a5945..e6e4ce80a726 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -23,6 +23,7 @@ from tdc_config import *
from tdc_helper import *
import TdcPlugin
+from TdcResults import *
class PluginMgrTestFail(Exception):
@@ -60,10 +61,10 @@ class PluginMgr:
for pgn_inst in reversed(self.plugin_instances):
pgn_inst.post_suite(index)
- def call_pre_case(self, test_ordinal, testid):
+ def call_pre_case(self, test_ordinal, testid, test_name):
for pgn_inst in self.plugin_instances:
try:
- pgn_inst.pre_case(test_ordinal, testid)
+ pgn_inst.pre_case(test_ordinal, testid, test_name)
except Exception as ee:
print('exception {} in call to pre_case for {} plugin'.
format(ee, pgn_inst.__class__))
@@ -102,7 +103,6 @@ class PluginMgr:
self.argparser = argparse.ArgumentParser(
description='Linux TC unit tests')
-
def replace_keywords(cmd):
"""
For a given executable command, substitute any known
@@ -131,12 +131,16 @@ def exec_cmd(args, pm, stage, command):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=ENVIR)
- (rawout, serr) = proc.communicate()
- if proc.returncode != 0 and len(serr) > 0:
- foutput = serr.decode("utf-8")
- else:
- foutput = rawout.decode("utf-8")
+ try:
+ (rawout, serr) = proc.communicate(timeout=NAMES['TIMEOUT'])
+ if proc.returncode != 0 and len(serr) > 0:
+ foutput = serr.decode("utf-8", errors="ignore")
+ else:
+ foutput = rawout.decode("utf-8", errors="ignore")
+ except subprocess.TimeoutExpired:
+ foutput = "Command \"{}\" timed out\n".format(command)
+ proc.returncode = 255
proc.stdout.close()
proc.stderr.close()
@@ -169,6 +173,8 @@ def prepare_env(args, pm, stage, prefix, cmdlist, output = None):
file=sys.stderr)
print("\n{} *** Error message: \"{}\"".format(prefix, foutput),
file=sys.stderr)
+ print("returncode {}; expected {}".format(proc.returncode,
+ exit_codes))
print("\n{} *** Aborting test run.".format(prefix), file=sys.stderr)
print("\n\n{} *** stdout ***".format(proc.stdout), file=sys.stderr)
print("\n\n{} *** stderr ***".format(proc.stderr), file=sys.stderr)
@@ -181,6 +187,7 @@ def run_one_test(pm, args, index, tidx):
result = True
tresult = ""
tap = ""
+ res = TestResult(tidx['id'], tidx['name'])
if args.verbose > 0:
print("\t====================\n=====> ", end="")
print("Test " + tidx["id"] + ": " + tidx["name"])
@@ -188,19 +195,26 @@ def run_one_test(pm, args, index, tidx):
# populate NAMES with TESTID for this test
NAMES['TESTID'] = tidx['id']
- pm.call_pre_case(index, tidx['id'])
+ pm.call_pre_case(index, tidx['id'], tidx['name'])
prepare_env(args, pm, 'setup', "-----> prepare stage", tidx["setup"])
if (args.verbose > 0):
print('-----> execute stage')
pm.call_pre_execute()
(p, procout) = exec_cmd(args, pm, 'execute', tidx["cmdUnderTest"])
- exit_code = p.returncode
+ if p:
+ exit_code = p.returncode
+ else:
+ exit_code = None
+
pm.call_post_execute()
- if (exit_code != int(tidx["expExitCode"])):
- result = False
- print("exit:", exit_code, int(tidx["expExitCode"]))
+ if (exit_code is None or exit_code != int(tidx["expExitCode"])):
+ print("exit: {!r}".format(exit_code))
+ print("exit: {}".format(int(tidx["expExitCode"])))
+ #print("exit: {!r} {}".format(exit_code, int(tidx["expExitCode"])))
+ res.set_result(ResultState.fail)
+ res.set_failmsg('Command exited with {}, expected {}\n{}'.format(exit_code, tidx["expExitCode"], procout))
print(procout)
else:
if args.verbose > 0:
@@ -211,20 +225,15 @@ def run_one_test(pm, args, index, tidx):
if procout:
match_index = re.findall(match_pattern, procout)
if len(match_index) != int(tidx["matchCount"]):
- result = False
+ res.set_result(ResultState.fail)
+ res.set_failmsg('Could not match regex pattern. Verify command output:\n{}'.format(procout))
+ else:
+ res.set_result(ResultState.success)
elif int(tidx["matchCount"]) != 0:
- result = False
-
- if not result:
- tresult += 'not '
- tresult += 'ok {} - {} # {}\n'.format(str(index), tidx['id'], tidx['name'])
- tap += tresult
-
- if result == False:
- if procout:
- tap += procout
+ res.set_result(ResultState.fail)
+ res.set_failmsg('No output generated by verify command.')
else:
- tap += 'No output!\n'
+ res.set_result(ResultState.success)
prepare_env(args, pm, 'teardown', '-----> teardown stage', tidx['teardown'], procout)
pm.call_post_case()
@@ -233,7 +242,7 @@ def run_one_test(pm, args, index, tidx):
# remove TESTID from NAMES
del(NAMES['TESTID'])
- return tap
+ return res
def test_runner(pm, args, filtered_tests):
"""
@@ -253,25 +262,15 @@ def test_runner(pm, args, filtered_tests):
emergency_exit = False
emergency_exit_message = ''
- if args.notap:
- if args.verbose:
- tap = 'notap requested: omitting test plan\n'
- else:
- tap = str(index) + ".." + str(tcount) + "\n"
+ tsr = TestSuiteReport()
+
try:
pm.call_pre_suite(tcount, [tidx['id'] for tidx in testlist])
except Exception as ee:
ex_type, ex, ex_tb = sys.exc_info()
print('Exception {} {} (caught in pre_suite).'.
format(ex_type, ex))
- # when the extra print statements are uncommented,
- # the traceback does not appear between them
- # (it appears way earlier in the tdc.py output)
- # so don't bother ...
- # print('--------------------(')
- # print('traceback')
traceback.print_tb(ex_tb)
- # print('--------------------)')
emergency_exit_message = 'EMERGENCY EXIT, call_pre_suite failed with exception {} {}\n'.format(ex_type, ex)
emergency_exit = True
stage = 'pre-SUITE'
@@ -287,15 +286,26 @@ def test_runner(pm, args, filtered_tests):
if args.verbose > 1:
print('Not executing test {} {} because DEV2 not defined'.
format(tidx['id'], tidx['name']))
+ res = TestResult(tidx['id'], tidx['name'])
+ res.set_result(ResultState.skip)
+ res.set_errormsg('Not executed because DEV2 is not defined')
+ tsr.add_resultdata(res)
continue
try:
badtest = tidx # in case it goes bad
- tap += run_one_test(pm, args, index, tidx)
+ res = run_one_test(pm, args, index, tidx)
+ tsr.add_resultdata(res)
except PluginMgrTestFail as pmtf:
ex_type, ex, ex_tb = sys.exc_info()
stage = pmtf.stage
message = pmtf.message
output = pmtf.output
+ res = TestResult(tidx['id'], tidx['name'])
+ res.set_result(ResultState.skip)
+ res.set_errormsg(pmtf.message)
+ res.set_failmsg(pmtf.output)
+ tsr.add_resultdata(res)
+ index += 1
print(message)
print('Exception {} {} (caught in test_runner, running test {} {} {} stage {})'.
format(ex_type, ex, index, tidx['id'], tidx['name'], stage))
@@ -314,16 +324,16 @@ def test_runner(pm, args, filtered_tests):
# if we failed in setup or teardown,
# fill in the remaining tests with ok-skipped
count = index
- if not args.notap:
- tap += 'about to flush the tap output if tests need to be skipped\n'
- if tcount + 1 != index:
- for tidx in testlist[index - 1:]:
- msg = 'skipped - previous {} failed'.format(stage)
- tap += 'ok {} - {} # {} {} {}\n'.format(
- count, tidx['id'], msg, index, badtest.get('id', '--Unknown--'))
- count += 1
- tap += 'done flushing skipped test tap output\n'
+ if tcount + 1 != count:
+ for tidx in testlist[count - 1:]:
+ res = TestResult(tidx['id'], tidx['name'])
+ res.set_result(ResultState.skip)
+ msg = 'skipped - previous {} failed {} {}'.format(stage,
+ index, badtest.get('id', '--Unknown--'))
+ res.set_errormsg(msg)
+ tsr.add_resultdata(res)
+ count += 1
if args.pause:
print('Want to pause\nPress enter to continue ...')
@@ -332,7 +342,7 @@ def test_runner(pm, args, filtered_tests):
pm.call_post_suite(index)
- return tap
+ return tsr
def has_blank_ids(idlist):
"""
@@ -373,6 +383,10 @@ def set_args(parser):
Set the command line arguments for tdc.
"""
parser.add_argument(
+ '--outfile', type=str,
+ help='Path to the file in which results should be saved. ' +
+ 'Default target is the current directory.')
+ parser.add_argument(
'-p', '--path', type=str,
help='The full path to the tc executable to use')
sg = parser.add_argument_group(
@@ -408,8 +422,9 @@ def set_args(parser):
'-v', '--verbose', action='count', default=0,
help='Show the commands that are being run')
parser.add_argument(
- '-N', '--notap', action='store_true',
- help='Suppress tap results for command under test')
+ '--format', default='tap', const='tap', nargs='?',
+ choices=['none', 'xunit', 'tap'],
+ help='Specify the format for test results. (Default: TAP)')
parser.add_argument('-d', '--device',
help='Execute the test case in flower category')
parser.add_argument(
@@ -430,6 +445,8 @@ def check_default_settings(args, remaining, pm):
NAMES['TC'] = args.path
if args.device != None:
NAMES['DEV2'] = args.device
+ if 'TIMEOUT' not in NAMES:
+ NAMES['TIMEOUT'] = None
if not os.path.isfile(NAMES['TC']):
print("The specified tc path " + NAMES['TC'] + " does not exist.")
exit(1)
@@ -624,12 +641,30 @@ def set_operation_mode(pm, args):
if len(alltests):
catresults = test_runner(pm, args, alltests)
+ if args.format == 'none':
+ print('Test results output suppression requested\n')
+ else:
+ print('\nAll test results: \n')
+ if args.format == 'xunit':
+ suffix = 'xml'
+ res = catresults.format_xunit()
+ elif args.format == 'tap':
+ suffix = 'tap'
+ res = catresults.format_tap()
+ print(res)
+ print('\n\n')
+ if not args.outfile:
+ fname = 'test-results.{}'.format(suffix)
+ else:
+ fname = args.outfile
+ with open(fname, 'w') as fh:
+ fh.write(res)
+ fh.close()
+ if os.getenv('SUDO_UID') is not None:
+ os.chown(fname, uid=int(os.getenv('SUDO_UID')),
+ gid=int(os.getenv('SUDO_GID')))
else:
- catresults = 'No tests found\n'
- if args.notap:
- print('Tap output suppression requested\n')
- else:
- print('All test results: \n\n{}'.format(catresults))
+ print('No tests found\n')
def main():
"""
diff --git a/tools/testing/selftests/tc-testing/tdc_config.py b/tools/testing/selftests/tc-testing/tdc_config.py
index a023d0d62b25..6d91e48c2625 100644
--- a/tools/testing/selftests/tc-testing/tdc_config.py
+++ b/tools/testing/selftests/tc-testing/tdc_config.py
@@ -15,8 +15,12 @@ NAMES = {
'DEV1': 'v0p1',
'DEV2': '',
'BATCH_FILE': './batch.txt',
+ # Length of time in seconds to wait before terminating a command
+ 'TIMEOUT': 12,
# Name of the namespace to use
- 'NS': 'tcut'
+ 'NS': 'tcut',
+ # Directory containing eBPF test programs
+ 'EBPFDIR': './bpf'
}
OpenPOWER on IntegriCloud