diff options
| author | Brian Homerding <homerdin@gmail.com> | 2018-03-13 16:37:59 +0000 |
|---|---|---|
| committer | Brian Homerding <homerdin@gmail.com> | 2018-03-13 16:37:59 +0000 |
| commit | d5c558ff216c94c5acf292047597736543bf7113 (patch) | |
| tree | f25d9f288de6af2adcfad219818f2b54a7abb525 /llvm/utils/lit/tests | |
| parent | 5182113f07baf2403801df8684391b096e356775 (diff) | |
| download | bcm5719-llvm-d5c558ff216c94c5acf292047597736543bf7113.tar.gz bcm5719-llvm-d5c558ff216c94c5acf292047597736543bf7113.zip | |
[lit] - Allow 1 test to report multiple micro-test results to provide support for microbenchmarks.
Summary:
These changes are to allow to a Result object to have nested Result objects in
order to support microbenchmarks. Currently lit is restricted to reporting one
result object for one test, this change provides support tests that want to
report individual timings for individual kernels.
This revision is the result of the discussions in
https://reviews.llvm.org/D32272#794759,
https://reviews.llvm.org/D37421#f8003b27 and https://reviews.llvm.org/D38496.
It is a separation of the changes purposed in https://reviews.llvm.org/D40077.
This change will enable adding LCALS (Livermore Compiler Analysis Loop Suite)
collection of loop kernels to the llvm test suite using the google benchmark
library (https://reviews.llvm.org/D43319) with tracking of individual kernel
timings.
Previously microbenchmarks had been handled by using macros to section groups
of microbenchmarks together and build many executables while still getting a
grouped timing (MultiSource/TSVC). Recently the google benchmark library was
added to the test suite and utilized with a litsupport plugin. However the
limitation of 1 test 1 result limited its use to passing a runtime option to
run only 1 microbenchmark with several hand written tests
(MicroBenchmarks/XRay). This runs the same executable many times with different
hand-written tests. I will update the litsupport plugin to utilize the new
functionality (https://reviews.llvm.org/D43316).
These changes allow lit to report micro test results if desired in order to get
many precise timing results from 1 run of 1 test executable.
Reviewers: MatzeB, hfinkel, rengolin, delcypher
Differential Revision: https://reviews.llvm.org/D43314
llvm-svn: 327422
Diffstat (limited to 'llvm/utils/lit/tests')
| -rw-r--r-- | llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py | 52 | ||||
| -rw-r--r-- | llvm/utils/lit/tests/Inputs/test-data-micro/lit.cfg | 10 | ||||
| -rw-r--r-- | llvm/utils/lit/tests/Inputs/test-data-micro/micro-tests.ini | 16 | ||||
| -rw-r--r-- | llvm/utils/lit/tests/test-data-micro.py | 21 | ||||
| -rw-r--r-- | llvm/utils/lit/tests/test-output-micro.py | 51 |
5 files changed, 150 insertions, 0 deletions
diff --git a/llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py b/llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py new file mode 100644 index 00000000000..5842f5a5ba3 --- /dev/null +++ b/llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py @@ -0,0 +1,52 @@ +import os +try: + import ConfigParser +except ImportError: + import configparser as ConfigParser + +import lit.formats +import lit.Test + +class DummyFormat(lit.formats.FileBasedTest): + def execute(self, test, lit_config): + # In this dummy format, expect that each test file is actually just a + # .ini format dump of the results to report. + + source_path = test.getSourcePath() + + cfg = ConfigParser.ConfigParser() + cfg.read(source_path) + + # Create the basic test result. + result_code = cfg.get('global', 'result_code') + result_output = cfg.get('global', 'result_output') + result = lit.Test.Result(getattr(lit.Test, result_code), + result_output) + + # Load additional metrics. + for key,value_str in cfg.items('results'): + value = eval(value_str) + if isinstance(value, int): + metric = lit.Test.IntMetricValue(value) + elif isinstance(value, float): + metric = lit.Test.RealMetricValue(value) + else: + raise RuntimeError("unsupported result type") + result.addMetric(key, metric) + + # Create micro test results + for key,micro_name in cfg.items('micro-tests'): + micro_result = lit.Test.Result(getattr(lit.Test, result_code, '')) + # Load micro test additional metrics + for key,value_str in cfg.items('micro-results'): + value = eval(value_str) + if isinstance(value, int): + metric = lit.Test.IntMetricValue(value) + elif isinstance(value, float): + metric = lit.Test.RealMetricValue(value) + else: + raise RuntimeError("unsupported result type") + micro_result.addMetric(key, metric) + result.addMicroResult(micro_name, micro_result) + + return result diff --git a/llvm/utils/lit/tests/Inputs/test-data-micro/lit.cfg b/llvm/utils/lit/tests/Inputs/test-data-micro/lit.cfg new file mode 100644 index 00000000000..3fc1e859736 --- /dev/null +++ b/llvm/utils/lit/tests/Inputs/test-data-micro/lit.cfg @@ -0,0 +1,10 @@ +import site +site.addsitedir(os.path.dirname(__file__)) +import dummy_format + +config.name = 'test-data-micro' +config.suffixes = ['.ini'] +config.test_format = dummy_format.DummyFormat() +config.test_source_root = None +config.test_exec_root = None +config.target_triple = None diff --git a/llvm/utils/lit/tests/Inputs/test-data-micro/micro-tests.ini b/llvm/utils/lit/tests/Inputs/test-data-micro/micro-tests.ini new file mode 100644 index 00000000000..1e5d76ac7ae --- /dev/null +++ b/llvm/utils/lit/tests/Inputs/test-data-micro/micro-tests.ini @@ -0,0 +1,16 @@ +[global] +result_code = PASS +result_output = Test passed. + +[results] +value0 = 1 +value1 = 2.3456 + +[micro-tests] +microtest0 = test0 +microtest1 = test1 +microtest2 = test2 + +[micro-results] +micro_value0 = 4 +micro_value1 = 1.3 diff --git a/llvm/utils/lit/tests/test-data-micro.py b/llvm/utils/lit/tests/test-data-micro.py new file mode 100644 index 00000000000..634139e233f --- /dev/null +++ b/llvm/utils/lit/tests/test-data-micro.py @@ -0,0 +1,21 @@ +# Test features related to formats which support reporting additional test data. +# and multiple test results. + +# RUN: %{lit} -j 1 -v %{inputs}/test-data-micro | FileCheck %s + +# CHECK: -- Testing: + +# CHECK: PASS: test-data-micro :: micro-tests.ini +# CHECK-NEXT: *** TEST 'test-data-micro :: micro-tests.ini' RESULTS *** +# CHECK-NEXT: value0: 1 +# CHECK-NEXT: value1: 2.3456 +# CHECK-NEXT: *** +# CHECK-NEXT: *** MICRO-TEST: test0 +# CHECK-NEXT: micro_value0: 4 +# CHECK-NEXT: micro_value1: 1.3 +# CHECK-NEXT: *** MICRO-TEST: test1 +# CHECK-NEXT: micro_value0: 4 +# CHECK-NEXT: micro_value1: 1.3 +# CHECK-NEXT: *** MICRO-TEST: test2 +# CHECK-NEXT: micro_value0: 4 +# CHECK-NEXT: micro_value1: 1.3 diff --git a/llvm/utils/lit/tests/test-output-micro.py b/llvm/utils/lit/tests/test-output-micro.py new file mode 100644 index 00000000000..4357fe88f90 --- /dev/null +++ b/llvm/utils/lit/tests/test-output-micro.py @@ -0,0 +1,51 @@ +# RUN: %{lit} -j 1 -v %{inputs}/test-data-micro --output %t.results.out +# RUN: FileCheck < %t.results.out %s +# RUN: rm %t.results.out + + +# CHECK: { +# CHECK: "__version__" +# CHECK: "elapsed" +# CHECK-NEXT: "tests": [ +# CHECK-NEXT: { +# CHECK-NEXT: "code": "PASS", +# CHECK-NEXT: "elapsed": null, +# CHECK-NEXT: "metrics": { +# CHECK-NEXT: "micro_value0": 4, +# CHECK-NEXT: "micro_value1": 1.3 +# CHECK-NEXT: }, +# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini:test{{[0-2]}}", +# CHECK-NEXT: "output": "" +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "code": "PASS", +# CHECK-NEXT: "elapsed": null, +# CHECK-NEXT: "metrics": { +# CHECK-NEXT: "micro_value0": 4, +# CHECK-NEXT: "micro_value1": 1.3 +# CHECK-NEXT: }, +# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini:test{{[0-2]}}", +# CHECK-NEXT: "output": "" +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "code": "PASS", +# CHECK-NEXT: "elapsed": null, +# CHECK-NEXT: "metrics": { +# CHECK-NEXT: "micro_value0": 4, +# CHECK-NEXT: "micro_value1": 1.3 +# CHECK-NEXT: }, +# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini:test{{[0-2]}}", +# CHECK-NEXT: "output": "" +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "code": "PASS", +# CHECK-NEXT: "elapsed": {{[0-9.]+}}, +# CHECK-NEXT: "metrics": { +# CHECK-NEXT: "value0": 1, +# CHECK-NEXT: "value1": 2.3456 +# CHECK-NEXT: }, +# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini", +# CHECK-NEXT: "output": "Test passed." +# CHECK-NEXT: } +# CHECK-NEXT: ] +# CHECK-NEXT: } |

