summaryrefslogtreecommitdiffstats
path: root/llvm/utils/lit/tests
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/utils/lit/tests')
-rw-r--r--llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py52
-rw-r--r--llvm/utils/lit/tests/Inputs/test-data-micro/lit.cfg10
-rw-r--r--llvm/utils/lit/tests/Inputs/test-data-micro/micro-tests.ini16
-rw-r--r--llvm/utils/lit/tests/test-data-micro.py21
-rw-r--r--llvm/utils/lit/tests/test-output-micro.py51
5 files changed, 150 insertions, 0 deletions
diff --git a/llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py b/llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py
new file mode 100644
index 00000000000..5842f5a5ba3
--- /dev/null
+++ b/llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py
@@ -0,0 +1,52 @@
+import os
+try:
+ import ConfigParser
+except ImportError:
+ import configparser as ConfigParser
+
+import lit.formats
+import lit.Test
+
+class DummyFormat(lit.formats.FileBasedTest):
+ def execute(self, test, lit_config):
+ # In this dummy format, expect that each test file is actually just a
+ # .ini format dump of the results to report.
+
+ source_path = test.getSourcePath()
+
+ cfg = ConfigParser.ConfigParser()
+ cfg.read(source_path)
+
+ # Create the basic test result.
+ result_code = cfg.get('global', 'result_code')
+ result_output = cfg.get('global', 'result_output')
+ result = lit.Test.Result(getattr(lit.Test, result_code),
+ result_output)
+
+ # Load additional metrics.
+ for key,value_str in cfg.items('results'):
+ value = eval(value_str)
+ if isinstance(value, int):
+ metric = lit.Test.IntMetricValue(value)
+ elif isinstance(value, float):
+ metric = lit.Test.RealMetricValue(value)
+ else:
+ raise RuntimeError("unsupported result type")
+ result.addMetric(key, metric)
+
+ # Create micro test results
+ for key,micro_name in cfg.items('micro-tests'):
+ micro_result = lit.Test.Result(getattr(lit.Test, result_code, ''))
+ # Load micro test additional metrics
+ for key,value_str in cfg.items('micro-results'):
+ value = eval(value_str)
+ if isinstance(value, int):
+ metric = lit.Test.IntMetricValue(value)
+ elif isinstance(value, float):
+ metric = lit.Test.RealMetricValue(value)
+ else:
+ raise RuntimeError("unsupported result type")
+ micro_result.addMetric(key, metric)
+ result.addMicroResult(micro_name, micro_result)
+
+ return result
diff --git a/llvm/utils/lit/tests/Inputs/test-data-micro/lit.cfg b/llvm/utils/lit/tests/Inputs/test-data-micro/lit.cfg
new file mode 100644
index 00000000000..3fc1e859736
--- /dev/null
+++ b/llvm/utils/lit/tests/Inputs/test-data-micro/lit.cfg
@@ -0,0 +1,10 @@
+import site
+site.addsitedir(os.path.dirname(__file__))
+import dummy_format
+
+config.name = 'test-data-micro'
+config.suffixes = ['.ini']
+config.test_format = dummy_format.DummyFormat()
+config.test_source_root = None
+config.test_exec_root = None
+config.target_triple = None
diff --git a/llvm/utils/lit/tests/Inputs/test-data-micro/micro-tests.ini b/llvm/utils/lit/tests/Inputs/test-data-micro/micro-tests.ini
new file mode 100644
index 00000000000..1e5d76ac7ae
--- /dev/null
+++ b/llvm/utils/lit/tests/Inputs/test-data-micro/micro-tests.ini
@@ -0,0 +1,16 @@
+[global]
+result_code = PASS
+result_output = Test passed.
+
+[results]
+value0 = 1
+value1 = 2.3456
+
+[micro-tests]
+microtest0 = test0
+microtest1 = test1
+microtest2 = test2
+
+[micro-results]
+micro_value0 = 4
+micro_value1 = 1.3
diff --git a/llvm/utils/lit/tests/test-data-micro.py b/llvm/utils/lit/tests/test-data-micro.py
new file mode 100644
index 00000000000..634139e233f
--- /dev/null
+++ b/llvm/utils/lit/tests/test-data-micro.py
@@ -0,0 +1,21 @@
+# Test features related to formats which support reporting additional test data.
+# and multiple test results.
+
+# RUN: %{lit} -j 1 -v %{inputs}/test-data-micro | FileCheck %s
+
+# CHECK: -- Testing:
+
+# CHECK: PASS: test-data-micro :: micro-tests.ini
+# CHECK-NEXT: *** TEST 'test-data-micro :: micro-tests.ini' RESULTS ***
+# CHECK-NEXT: value0: 1
+# CHECK-NEXT: value1: 2.3456
+# CHECK-NEXT: ***
+# CHECK-NEXT: *** MICRO-TEST: test0
+# CHECK-NEXT: micro_value0: 4
+# CHECK-NEXT: micro_value1: 1.3
+# CHECK-NEXT: *** MICRO-TEST: test1
+# CHECK-NEXT: micro_value0: 4
+# CHECK-NEXT: micro_value1: 1.3
+# CHECK-NEXT: *** MICRO-TEST: test2
+# CHECK-NEXT: micro_value0: 4
+# CHECK-NEXT: micro_value1: 1.3
diff --git a/llvm/utils/lit/tests/test-output-micro.py b/llvm/utils/lit/tests/test-output-micro.py
new file mode 100644
index 00000000000..4357fe88f90
--- /dev/null
+++ b/llvm/utils/lit/tests/test-output-micro.py
@@ -0,0 +1,51 @@
+# RUN: %{lit} -j 1 -v %{inputs}/test-data-micro --output %t.results.out
+# RUN: FileCheck < %t.results.out %s
+# RUN: rm %t.results.out
+
+
+# CHECK: {
+# CHECK: "__version__"
+# CHECK: "elapsed"
+# CHECK-NEXT: "tests": [
+# CHECK-NEXT: {
+# CHECK-NEXT: "code": "PASS",
+# CHECK-NEXT: "elapsed": null,
+# CHECK-NEXT: "metrics": {
+# CHECK-NEXT: "micro_value0": 4,
+# CHECK-NEXT: "micro_value1": 1.3
+# CHECK-NEXT: },
+# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini:test{{[0-2]}}",
+# CHECK-NEXT: "output": ""
+# CHECK-NEXT: },
+# CHECK-NEXT: {
+# CHECK-NEXT: "code": "PASS",
+# CHECK-NEXT: "elapsed": null,
+# CHECK-NEXT: "metrics": {
+# CHECK-NEXT: "micro_value0": 4,
+# CHECK-NEXT: "micro_value1": 1.3
+# CHECK-NEXT: },
+# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini:test{{[0-2]}}",
+# CHECK-NEXT: "output": ""
+# CHECK-NEXT: },
+# CHECK-NEXT: {
+# CHECK-NEXT: "code": "PASS",
+# CHECK-NEXT: "elapsed": null,
+# CHECK-NEXT: "metrics": {
+# CHECK-NEXT: "micro_value0": 4,
+# CHECK-NEXT: "micro_value1": 1.3
+# CHECK-NEXT: },
+# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini:test{{[0-2]}}",
+# CHECK-NEXT: "output": ""
+# CHECK-NEXT: },
+# CHECK-NEXT: {
+# CHECK-NEXT: "code": "PASS",
+# CHECK-NEXT: "elapsed": {{[0-9.]+}},
+# CHECK-NEXT: "metrics": {
+# CHECK-NEXT: "value0": 1,
+# CHECK-NEXT: "value1": 2.3456
+# CHECK-NEXT: },
+# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini",
+# CHECK-NEXT: "output": "Test passed."
+# CHECK-NEXT: }
+# CHECK-NEXT: ]
+# CHECK-NEXT: }
OpenPOWER on IntegriCloud