diff options
| author | Kirill Bobyrev <kbobyrev.opensource@gmail.com> | 2018-08-28 09:42:41 +0000 |
|---|---|---|
| committer | Kirill Bobyrev <kbobyrev.opensource@gmail.com> | 2018-08-28 09:42:41 +0000 |
| commit | 0addd170ab0880941fa4089c2717f3f3a0e4e25a (patch) | |
| tree | 71d7a249b508800e1be898aa7cb789d4ed7c8f2b /llvm/utils/benchmark/tools | |
| parent | 0c4b84e2df541b42238b1e15d2abb5ee4b262402 (diff) | |
| download | bcm5719-llvm-0addd170ab0880941fa4089c2717f3f3a0e4e25a.tar.gz bcm5719-llvm-0addd170ab0880941fa4089c2717f3f3a0e4e25a.zip | |
Pull google/benchmark library to the LLVM tree
This patch pulls google/benchmark v1.4.1 into the LLVM tree so that any
project could use it for benchmark generation. A dummy benchmark is
added to `llvm/benchmarks/DummyYAML.cpp` to validate the correctness of
the build process.
The current version does not utilize LLVM LNT and LLVM CMake
infrastructure, but that might be sufficient for most users. Two
introduced CMake variables:
* `LLVM_INCLUDE_BENCHMARKS` (`ON` by default) generates benchmark
targets
* `LLVM_BUILD_BENCHMARKS` (`OFF` by default) adds generated
benchmark targets to the list of default LLVM targets (i.e. if `ON`
benchmarks will be built upon standard build invocation, e.g. `ninja` or
`make` with no specific targets)
List of modifications:
* `BENCHMARK_ENABLE_TESTING` is disabled
* `BENCHMARK_ENABLE_EXCEPTIONS` is disabled
* `BENCHMARK_ENABLE_INSTALL` is disabled
* `BENCHMARK_ENABLE_GTEST_TESTS` is disabled
* `BENCHMARK_DOWNLOAD_DEPENDENCIES` is disabled
Original discussion can be found here:
http://lists.llvm.org/pipermail/llvm-dev/2018-August/125023.html
Reviewed by: dberris, lebedev.ri
Subscribers: ilya-biryukov, ioeric, EricWF, lebedev.ri, srhines,
dschuff, mgorny, krytarowski, fedor.sergeev, mgrang, jfb, llvm-commits
Differential Revision: https://reviews.llvm.org/D50894
llvm-svn: 340809
Diffstat (limited to 'llvm/utils/benchmark/tools')
| -rw-r--r-- | llvm/utils/benchmark/tools/compare.py | 316 | ||||
| -rw-r--r-- | llvm/utils/benchmark/tools/gbench/Inputs/test1_run1.json | 102 | ||||
| -rw-r--r-- | llvm/utils/benchmark/tools/gbench/Inputs/test1_run2.json | 102 | ||||
| -rw-r--r-- | llvm/utils/benchmark/tools/gbench/Inputs/test2_run.json | 81 | ||||
| -rw-r--r-- | llvm/utils/benchmark/tools/gbench/__init__.py | 8 | ||||
| -rw-r--r-- | llvm/utils/benchmark/tools/gbench/report.py | 208 | ||||
| -rw-r--r-- | llvm/utils/benchmark/tools/gbench/util.py | 159 | ||||
| -rw-r--r-- | llvm/utils/benchmark/tools/strip_asm.py | 151 |
8 files changed, 1127 insertions, 0 deletions
diff --git a/llvm/utils/benchmark/tools/compare.py b/llvm/utils/benchmark/tools/compare.py new file mode 100644 index 00000000000..f0a4455f5fb --- /dev/null +++ b/llvm/utils/benchmark/tools/compare.py @@ -0,0 +1,316 @@ +#!/usr/bin/env python + +""" +compare.py - versatile benchmark output compare tool +""" + +import argparse +from argparse import ArgumentParser +import sys +import gbench +from gbench import util, report +from gbench.util import * + + +def check_inputs(in1, in2, flags): + """ + Perform checking on the user provided inputs and diagnose any abnormalities + """ + in1_kind, in1_err = classify_input_file(in1) + in2_kind, in2_err = classify_input_file(in2) + output_file = find_benchmark_flag('--benchmark_out=', flags) + output_type = find_benchmark_flag('--benchmark_out_format=', flags) + if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file: + print(("WARNING: '--benchmark_out=%s' will be passed to both " + "benchmarks causing it to be overwritten") % output_file) + if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0: + print("WARNING: passing optional flags has no effect since both " + "inputs are JSON") + if output_type is not None and output_type != 'json': + print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`" + " is not supported.") % output_type) + sys.exit(1) + + +def create_parser(): + parser = ArgumentParser( + description='versatile benchmark output compare tool') + subparsers = parser.add_subparsers( + help='This tool has multiple modes of operation:', + dest='mode') + + parser_a = subparsers.add_parser( + 'benchmarks', + help='The most simple use-case, compare all the output of these two benchmarks') + baseline = parser_a.add_argument_group( + 'baseline', 'The benchmark baseline') + baseline.add_argument( + 'test_baseline', + metavar='test_baseline', + type=argparse.FileType('r'), + nargs=1, + help='A benchmark executable or JSON output file') + contender = parser_a.add_argument_group( + 'contender', 'The benchmark that will be compared against the baseline') + contender.add_argument( + 'test_contender', + metavar='test_contender', + type=argparse.FileType('r'), + nargs=1, + help='A benchmark executable or JSON output file') + parser_a.add_argument( + 'benchmark_options', + metavar='benchmark_options', + nargs=argparse.REMAINDER, + help='Arguments to pass when running benchmark executables') + + parser_b = subparsers.add_parser( + 'filters', help='Compare filter one with the filter two of benchmark') + baseline = parser_b.add_argument_group( + 'baseline', 'The benchmark baseline') + baseline.add_argument( + 'test', + metavar='test', + type=argparse.FileType('r'), + nargs=1, + help='A benchmark executable or JSON output file') + baseline.add_argument( + 'filter_baseline', + metavar='filter_baseline', + type=str, + nargs=1, + help='The first filter, that will be used as baseline') + contender = parser_b.add_argument_group( + 'contender', 'The benchmark that will be compared against the baseline') + contender.add_argument( + 'filter_contender', + metavar='filter_contender', + type=str, + nargs=1, + help='The second filter, that will be compared against the baseline') + parser_b.add_argument( + 'benchmark_options', + metavar='benchmark_options', + nargs=argparse.REMAINDER, + help='Arguments to pass when running benchmark executables') + + parser_c = subparsers.add_parser( + 'benchmarksfiltered', + help='Compare filter one of first benchmark with filter two of the second benchmark') + baseline = parser_c.add_argument_group( + 'baseline', 'The benchmark baseline') + baseline.add_argument( + 'test_baseline', + metavar='test_baseline', + type=argparse.FileType('r'), + nargs=1, + help='A benchmark executable or JSON output file') + baseline.add_argument( + 'filter_baseline', + metavar='filter_baseline', + type=str, + nargs=1, + help='The first filter, that will be used as baseline') + contender = parser_c.add_argument_group( + 'contender', 'The benchmark that will be compared against the baseline') + contender.add_argument( + 'test_contender', + metavar='test_contender', + type=argparse.FileType('r'), + nargs=1, + help='The second benchmark executable or JSON output file, that will be compared against the baseline') + contender.add_argument( + 'filter_contender', + metavar='filter_contender', + type=str, + nargs=1, + help='The second filter, that will be compared against the baseline') + parser_c.add_argument( + 'benchmark_options', + metavar='benchmark_options', + nargs=argparse.REMAINDER, + help='Arguments to pass when running benchmark executables') + + return parser + + +def main(): + # Parse the command line flags + parser = create_parser() + args, unknown_args = parser.parse_known_args() + if args.mode is None: + parser.print_help() + exit(1) + assert not unknown_args + benchmark_options = args.benchmark_options + + if args.mode == 'benchmarks': + test_baseline = args.test_baseline[0].name + test_contender = args.test_contender[0].name + filter_baseline = '' + filter_contender = '' + + # NOTE: if test_baseline == test_contender, you are analyzing the stdev + + description = 'Comparing %s to %s' % (test_baseline, test_contender) + elif args.mode == 'filters': + test_baseline = args.test[0].name + test_contender = args.test[0].name + filter_baseline = args.filter_baseline[0] + filter_contender = args.filter_contender[0] + + # NOTE: if filter_baseline == filter_contender, you are analyzing the + # stdev + + description = 'Comparing %s to %s (from %s)' % ( + filter_baseline, filter_contender, args.test[0].name) + elif args.mode == 'benchmarksfiltered': + test_baseline = args.test_baseline[0].name + test_contender = args.test_contender[0].name + filter_baseline = args.filter_baseline[0] + filter_contender = args.filter_contender[0] + + # NOTE: if test_baseline == test_contender and + # filter_baseline == filter_contender, you are analyzing the stdev + + description = 'Comparing %s (from %s) to %s (from %s)' % ( + filter_baseline, test_baseline, filter_contender, test_contender) + else: + # should never happen + print("Unrecognized mode of operation: '%s'" % args.mode) + parser.print_help() + exit(1) + + check_inputs(test_baseline, test_contender, benchmark_options) + + options_baseline = [] + options_contender = [] + + if filter_baseline and filter_contender: + options_baseline = ['--benchmark_filter=%s' % filter_baseline] + options_contender = ['--benchmark_filter=%s' % filter_contender] + + # Run the benchmarks and report the results + json1 = json1_orig = gbench.util.run_or_load_benchmark( + test_baseline, benchmark_options + options_baseline) + json2 = json2_orig = gbench.util.run_or_load_benchmark( + test_contender, benchmark_options + options_contender) + + # Now, filter the benchmarks so that the difference report can work + if filter_baseline and filter_contender: + replacement = '[%s vs. %s]' % (filter_baseline, filter_contender) + json1 = gbench.report.filter_benchmark( + json1_orig, filter_baseline, replacement) + json2 = gbench.report.filter_benchmark( + json2_orig, filter_contender, replacement) + + # Diff and output + output_lines = gbench.report.generate_difference_report(json1, json2) + print(description) + for ln in output_lines: + print(ln) + + +import unittest + + +class TestParser(unittest.TestCase): + def setUp(self): + self.parser = create_parser() + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'gbench', + 'Inputs') + self.testInput0 = os.path.join(testInputs, 'test1_run1.json') + self.testInput1 = os.path.join(testInputs, 'test1_run2.json') + + def test_benchmarks_basic(self): + parsed = self.parser.parse_args( + ['benchmarks', self.testInput0, self.testInput1]) + self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertFalse(parsed.benchmark_options) + + def test_benchmarks_with_remainder(self): + parsed = self.parser.parse_args( + ['benchmarks', self.testInput0, self.testInput1, 'd']) + self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertEqual(parsed.benchmark_options, ['d']) + + def test_benchmarks_with_remainder_after_doubleminus(self): + parsed = self.parser.parse_args( + ['benchmarks', self.testInput0, self.testInput1, '--', 'e']) + self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertEqual(parsed.benchmark_options, ['e']) + + def test_filters_basic(self): + parsed = self.parser.parse_args( + ['filters', self.testInput0, 'c', 'd']) + self.assertEqual(parsed.mode, 'filters') + self.assertEqual(parsed.test[0].name, self.testInput0) + self.assertEqual(parsed.filter_baseline[0], 'c') + self.assertEqual(parsed.filter_contender[0], 'd') + self.assertFalse(parsed.benchmark_options) + + def test_filters_with_remainder(self): + parsed = self.parser.parse_args( + ['filters', self.testInput0, 'c', 'd', 'e']) + self.assertEqual(parsed.mode, 'filters') + self.assertEqual(parsed.test[0].name, self.testInput0) + self.assertEqual(parsed.filter_baseline[0], 'c') + self.assertEqual(parsed.filter_contender[0], 'd') + self.assertEqual(parsed.benchmark_options, ['e']) + + def test_filters_with_remainder_after_doubleminus(self): + parsed = self.parser.parse_args( + ['filters', self.testInput0, 'c', 'd', '--', 'f']) + self.assertEqual(parsed.mode, 'filters') + self.assertEqual(parsed.test[0].name, self.testInput0) + self.assertEqual(parsed.filter_baseline[0], 'c') + self.assertEqual(parsed.filter_contender[0], 'd') + self.assertEqual(parsed.benchmark_options, ['f']) + + def test_benchmarksfiltered_basic(self): + parsed = self.parser.parse_args( + ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e']) + self.assertEqual(parsed.mode, 'benchmarksfiltered') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.filter_baseline[0], 'c') + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertEqual(parsed.filter_contender[0], 'e') + self.assertFalse(parsed.benchmark_options) + + def test_benchmarksfiltered_with_remainder(self): + parsed = self.parser.parse_args( + ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f']) + self.assertEqual(parsed.mode, 'benchmarksfiltered') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.filter_baseline[0], 'c') + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertEqual(parsed.filter_contender[0], 'e') + self.assertEqual(parsed.benchmark_options[0], 'f') + + def test_benchmarksfiltered_with_remainder_after_doubleminus(self): + parsed = self.parser.parse_args( + ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g']) + self.assertEqual(parsed.mode, 'benchmarksfiltered') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.filter_baseline[0], 'c') + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertEqual(parsed.filter_contender[0], 'e') + self.assertEqual(parsed.benchmark_options[0], 'g') + + +if __name__ == '__main__': + # unittest.main() + main() + +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 +# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; +# kate: indent-mode python; remove-trailing-spaces modified; diff --git a/llvm/utils/benchmark/tools/gbench/Inputs/test1_run1.json b/llvm/utils/benchmark/tools/gbench/Inputs/test1_run1.json new file mode 100644 index 00000000000..d7ec6a9c8f6 --- /dev/null +++ b/llvm/utils/benchmark/tools/gbench/Inputs/test1_run1.json @@ -0,0 +1,102 @@ +{ + "context": { + "date": "2016-08-02 17:44:46", + "num_cpus": 4, + "mhz_per_cpu": 4228, + "cpu_scaling_enabled": false, + "library_build_type": "release" + }, + "benchmarks": [ + { + "name": "BM_SameTimes", + "iterations": 1000, + "real_time": 10, + "cpu_time": 10, + "time_unit": "ns" + }, + { + "name": "BM_2xFaster", + "iterations": 1000, + "real_time": 50, + "cpu_time": 50, + "time_unit": "ns" + }, + { + "name": "BM_2xSlower", + "iterations": 1000, + "real_time": 50, + "cpu_time": 50, + "time_unit": "ns" + }, + { + "name": "BM_1PercentFaster", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_1PercentSlower", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_10PercentFaster", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_10PercentSlower", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_100xSlower", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_100xFaster", + "iterations": 1000, + "real_time": 10000, + "cpu_time": 10000, + "time_unit": "ns" + }, + { + "name": "BM_10PercentCPUToTime", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_ThirdFaster", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_BadTimeUnit", + "iterations": 1000, + "real_time": 0.4, + "cpu_time": 0.5, + "time_unit": "s" + }, + { + "name": "BM_DifferentTimeUnit", + "iterations": 1, + "real_time": 1, + "cpu_time": 1, + "time_unit": "s" + } + ] +} diff --git a/llvm/utils/benchmark/tools/gbench/Inputs/test1_run2.json b/llvm/utils/benchmark/tools/gbench/Inputs/test1_run2.json new file mode 100644 index 00000000000..59a5ffaca4d --- /dev/null +++ b/llvm/utils/benchmark/tools/gbench/Inputs/test1_run2.json @@ -0,0 +1,102 @@ +{ + "context": { + "date": "2016-08-02 17:44:46", + "num_cpus": 4, + "mhz_per_cpu": 4228, + "cpu_scaling_enabled": false, + "library_build_type": "release" + }, + "benchmarks": [ + { + "name": "BM_SameTimes", + "iterations": 1000, + "real_time": 10, + "cpu_time": 10, + "time_unit": "ns" + }, + { + "name": "BM_2xFaster", + "iterations": 1000, + "real_time": 25, + "cpu_time": 25, + "time_unit": "ns" + }, + { + "name": "BM_2xSlower", + "iterations": 20833333, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_1PercentFaster", + "iterations": 1000, + "real_time": 98.9999999, + "cpu_time": 98.9999999, + "time_unit": "ns" + }, + { + "name": "BM_1PercentSlower", + "iterations": 1000, + "real_time": 100.9999999, + "cpu_time": 100.9999999, + "time_unit": "ns" + }, + { + "name": "BM_10PercentFaster", + "iterations": 1000, + "real_time": 90, + "cpu_time": 90, + "time_unit": "ns" + }, + { + "name": "BM_10PercentSlower", + "iterations": 1000, + "real_time": 110, + "cpu_time": 110, + "time_unit": "ns" + }, + { + "name": "BM_100xSlower", + "iterations": 1000, + "real_time": 1.0000e+04, + "cpu_time": 1.0000e+04, + "time_unit": "ns" + }, + { + "name": "BM_100xFaster", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_10PercentCPUToTime", + "iterations": 1000, + "real_time": 110, + "cpu_time": 90, + "time_unit": "ns" + }, + { + "name": "BM_ThirdFaster", + "iterations": 1000, + "real_time": 66.665, + "cpu_time": 66.664, + "time_unit": "ns" + }, + { + "name": "BM_BadTimeUnit", + "iterations": 1000, + "real_time": 0.04, + "cpu_time": 0.6, + "time_unit": "s" + }, + { + "name": "BM_DifferentTimeUnit", + "iterations": 1, + "real_time": 1, + "cpu_time": 1, + "time_unit": "ns" + } + ] +} diff --git a/llvm/utils/benchmark/tools/gbench/Inputs/test2_run.json b/llvm/utils/benchmark/tools/gbench/Inputs/test2_run.json new file mode 100644 index 00000000000..15bc6980304 --- /dev/null +++ b/llvm/utils/benchmark/tools/gbench/Inputs/test2_run.json @@ -0,0 +1,81 @@ +{ + "context": { + "date": "2016-08-02 17:44:46", + "num_cpus": 4, + "mhz_per_cpu": 4228, + "cpu_scaling_enabled": false, + "library_build_type": "release" + }, + "benchmarks": [ + { + "name": "BM_Hi", + "iterations": 1234, + "real_time": 42, + "cpu_time": 24, + "time_unit": "ms" + }, + { + "name": "BM_Zero", + "iterations": 1000, + "real_time": 10, + "cpu_time": 10, + "time_unit": "ns" + }, + { + "name": "BM_Zero/4", + "iterations": 4000, + "real_time": 40, + "cpu_time": 40, + "time_unit": "ns" + }, + { + "name": "Prefix/BM_Zero", + "iterations": 2000, + "real_time": 20, + "cpu_time": 20, + "time_unit": "ns" + }, + { + "name": "Prefix/BM_Zero/3", + "iterations": 3000, + "real_time": 30, + "cpu_time": 30, + "time_unit": "ns" + }, + { + "name": "BM_One", + "iterations": 5000, + "real_time": 5, + "cpu_time": 5, + "time_unit": "ns" + }, + { + "name": "BM_One/4", + "iterations": 2000, + "real_time": 20, + "cpu_time": 20, + "time_unit": "ns" + }, + { + "name": "Prefix/BM_One", + "iterations": 1000, + "real_time": 10, + "cpu_time": 10, + "time_unit": "ns" + }, + { + "name": "Prefix/BM_One/3", + "iterations": 1500, + "real_time": 15, + "cpu_time": 15, + "time_unit": "ns" + }, + { + "name": "BM_Bye", + "iterations": 5321, + "real_time": 11, + "cpu_time": 63, + "time_unit": "ns" + } + ] +} diff --git a/llvm/utils/benchmark/tools/gbench/__init__.py b/llvm/utils/benchmark/tools/gbench/__init__.py new file mode 100644 index 00000000000..fce1a1acfbb --- /dev/null +++ b/llvm/utils/benchmark/tools/gbench/__init__.py @@ -0,0 +1,8 @@ +"""Google Benchmark tooling""" + +__author__ = 'Eric Fiselier' +__email__ = 'eric@efcs.ca' +__versioninfo__ = (0, 5, 0) +__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev' + +__all__ = [] diff --git a/llvm/utils/benchmark/tools/gbench/report.py b/llvm/utils/benchmark/tools/gbench/report.py new file mode 100644 index 00000000000..0c090981a83 --- /dev/null +++ b/llvm/utils/benchmark/tools/gbench/report.py @@ -0,0 +1,208 @@ +"""report.py - Utilities for reporting statistics about benchmark results +""" +import os +import re +import copy + +class BenchmarkColor(object): + def __init__(self, name, code): + self.name = name + self.code = code + + def __repr__(self): + return '%s%r' % (self.__class__.__name__, + (self.name, self.code)) + + def __format__(self, format): + return self.code + +# Benchmark Colors Enumeration +BC_NONE = BenchmarkColor('NONE', '') +BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m') +BC_CYAN = BenchmarkColor('CYAN', '\033[96m') +BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m') +BC_HEADER = BenchmarkColor('HEADER', '\033[92m') +BC_WARNING = BenchmarkColor('WARNING', '\033[93m') +BC_WHITE = BenchmarkColor('WHITE', '\033[97m') +BC_FAIL = BenchmarkColor('FAIL', '\033[91m') +BC_ENDC = BenchmarkColor('ENDC', '\033[0m') +BC_BOLD = BenchmarkColor('BOLD', '\033[1m') +BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m') + +def color_format(use_color, fmt_str, *args, **kwargs): + """ + Return the result of 'fmt_str.format(*args, **kwargs)' after transforming + 'args' and 'kwargs' according to the value of 'use_color'. If 'use_color' + is False then all color codes in 'args' and 'kwargs' are replaced with + the empty string. + """ + assert use_color is True or use_color is False + if not use_color: + args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE + for arg in args] + kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE + for key, arg in kwargs.items()} + return fmt_str.format(*args, **kwargs) + + +def find_longest_name(benchmark_list): + """ + Return the length of the longest benchmark name in a given list of + benchmark JSON objects + """ + longest_name = 1 + for bc in benchmark_list: + if len(bc['name']) > longest_name: + longest_name = len(bc['name']) + return longest_name + + +def calculate_change(old_val, new_val): + """ + Return a float representing the decimal change between old_val and new_val. + """ + if old_val == 0 and new_val == 0: + return 0.0 + if old_val == 0: + return float(new_val - old_val) / (float(old_val + new_val) / 2) + return float(new_val - old_val) / abs(old_val) + + +def filter_benchmark(json_orig, family, replacement=""): + """ + Apply a filter to the json, and only leave the 'family' of benchmarks. + """ + regex = re.compile(family) + filtered = {} + filtered['benchmarks'] = [] + for be in json_orig['benchmarks']: + if not regex.search(be['name']): + continue + filteredbench = copy.deepcopy(be) # Do NOT modify the old name! + filteredbench['name'] = regex.sub(replacement, filteredbench['name']) + filtered['benchmarks'].append(filteredbench) + return filtered + + +def generate_difference_report(json1, json2, use_color=True): + """ + Calculate and report the difference between each test of two benchmarks + runs specified as 'json1' and 'json2'. + """ + first_col_width = find_longest_name(json1['benchmarks']) + def find_test(name): + for b in json2['benchmarks']: + if b['name'] == name: + return b + return None + first_col_width = max(first_col_width, len('Benchmark')) + first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format( + 'Benchmark', 12 + first_col_width) + output_strs = [first_line, '-' * len(first_line)] + + gen = (bn for bn in json1['benchmarks'] if 'real_time' in bn and 'cpu_time' in bn) + for bn in gen: + other_bench = find_test(bn['name']) + if not other_bench: + continue + + if bn['time_unit'] != other_bench['time_unit']: + continue + + def get_color(res): + if res > 0.05: + return BC_FAIL + elif res > -0.07: + return BC_WHITE + else: + return BC_CYAN + fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" + tres = calculate_change(bn['real_time'], other_bench['real_time']) + cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time']) + output_strs += [color_format(use_color, fmt_str, + BC_HEADER, bn['name'], first_col_width, + get_color(tres), tres, get_color(cpures), cpures, + bn['real_time'], other_bench['real_time'], + bn['cpu_time'], other_bench['cpu_time'], + endc=BC_ENDC)] + return output_strs + +############################################################################### +# Unit tests + +import unittest + +class TestReportDifference(unittest.TestCase): + def load_results(self): + import json + testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs') + testOutput1 = os.path.join(testInputs, 'test1_run1.json') + testOutput2 = os.path.join(testInputs, 'test1_run2.json') + with open(testOutput1, 'r') as f: + json1 = json.load(f) + with open(testOutput2, 'r') as f: + json2 = json.load(f) + return json1, json2 + + def test_basic(self): + expect_lines = [ + ['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'], + ['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'], + ['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'], + ['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'], + ['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'], + ['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'], + ['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'], + ['BM_100xSlower', '+99.0000', '+99.0000', '100', '10000', '100', '10000'], + ['BM_100xFaster', '-0.9900', '-0.9900', '10000', '100', '10000', '100'], + ['BM_10PercentCPUToTime', '+0.1000', '-0.1000', '100', '110', '100', '90'], + ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'], + ['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'], + ] + json1, json2 = self.load_results() + output_lines_with_header = generate_difference_report(json1, json2, use_color=False) + output_lines = output_lines_with_header[2:] + print("\n".join(output_lines_with_header)) + self.assertEqual(len(output_lines), len(expect_lines)) + for i in range(0, len(output_lines)): + parts = [x for x in output_lines[i].split(' ') if x] + self.assertEqual(len(parts), 7) + self.assertEqual(parts, expect_lines[i]) + + +class TestReportDifferenceBetweenFamilies(unittest.TestCase): + def load_result(self): + import json + testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs') + testOutput = os.path.join(testInputs, 'test2_run.json') + with open(testOutput, 'r') as f: + json = json.load(f) + return json + + def test_basic(self): + expect_lines = [ + ['.', '-0.5000', '-0.5000', '10', '5', '10', '5'], + ['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'], + ['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'], + ['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'], + ] + json = self.load_result() + json1 = filter_benchmark(json, "BM_Z.ro", ".") + json2 = filter_benchmark(json, "BM_O.e", ".") + output_lines_with_header = generate_difference_report(json1, json2, use_color=False) + output_lines = output_lines_with_header[2:] + print("\n") + print("\n".join(output_lines_with_header)) + self.assertEqual(len(output_lines), len(expect_lines)) + for i in range(0, len(output_lines)): + parts = [x for x in output_lines[i].split(' ') if x] + self.assertEqual(len(parts), 7) + self.assertEqual(parts, expect_lines[i]) + + +if __name__ == '__main__': + unittest.main() + +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 +# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; +# kate: indent-mode python; remove-trailing-spaces modified; diff --git a/llvm/utils/benchmark/tools/gbench/util.py b/llvm/utils/benchmark/tools/gbench/util.py new file mode 100644 index 00000000000..07c23772754 --- /dev/null +++ b/llvm/utils/benchmark/tools/gbench/util.py @@ -0,0 +1,159 @@ +"""util.py - General utilities for running, loading, and processing benchmarks +""" +import json +import os +import tempfile +import subprocess +import sys + +# Input file type enumeration +IT_Invalid = 0 +IT_JSON = 1 +IT_Executable = 2 + +_num_magic_bytes = 2 if sys.platform.startswith('win') else 4 +def is_executable_file(filename): + """ + Return 'True' if 'filename' names a valid file which is likely + an executable. A file is considered an executable if it starts with the + magic bytes for a EXE, Mach O, or ELF file. + """ + if not os.path.isfile(filename): + return False + with open(filename, mode='rb') as f: + magic_bytes = f.read(_num_magic_bytes) + if sys.platform == 'darwin': + return magic_bytes in [ + b'\xfe\xed\xfa\xce', # MH_MAGIC + b'\xce\xfa\xed\xfe', # MH_CIGAM + b'\xfe\xed\xfa\xcf', # MH_MAGIC_64 + b'\xcf\xfa\xed\xfe', # MH_CIGAM_64 + b'\xca\xfe\xba\xbe', # FAT_MAGIC + b'\xbe\xba\xfe\xca' # FAT_CIGAM + ] + elif sys.platform.startswith('win'): + return magic_bytes == b'MZ' + else: + return magic_bytes == b'\x7FELF' + + +def is_json_file(filename): + """ + Returns 'True' if 'filename' names a valid JSON output file. + 'False' otherwise. + """ + try: + with open(filename, 'r') as f: + json.load(f) + return True + except: + pass + return False + + +def classify_input_file(filename): + """ + Return a tuple (type, msg) where 'type' specifies the classified type + of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable + string represeting the error. + """ + ftype = IT_Invalid + err_msg = None + if not os.path.exists(filename): + err_msg = "'%s' does not exist" % filename + elif not os.path.isfile(filename): + err_msg = "'%s' does not name a file" % filename + elif is_executable_file(filename): + ftype = IT_Executable + elif is_json_file(filename): + ftype = IT_JSON + else: + err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename + return ftype, err_msg + + +def check_input_file(filename): + """ + Classify the file named by 'filename' and return the classification. + If the file is classified as 'IT_Invalid' print an error message and exit + the program. + """ + ftype, msg = classify_input_file(filename) + if ftype == IT_Invalid: + print("Invalid input file: %s" % msg) + sys.exit(1) + return ftype + +def find_benchmark_flag(prefix, benchmark_flags): + """ + Search the specified list of flags for a flag matching `<prefix><arg>` and + if it is found return the arg it specifies. If specified more than once the + last value is returned. If the flag is not found None is returned. + """ + assert prefix.startswith('--') and prefix.endswith('=') + result = None + for f in benchmark_flags: + if f.startswith(prefix): + result = f[len(prefix):] + return result + +def remove_benchmark_flags(prefix, benchmark_flags): + """ + Return a new list containing the specified benchmark_flags except those + with the specified prefix. + """ + assert prefix.startswith('--') and prefix.endswith('=') + return [f for f in benchmark_flags if not f.startswith(prefix)] + +def load_benchmark_results(fname): + """ + Read benchmark output from a file and return the JSON object. + REQUIRES: 'fname' names a file containing JSON benchmark output. + """ + with open(fname, 'r') as f: + return json.load(f) + + +def run_benchmark(exe_name, benchmark_flags): + """ + Run a benchmark specified by 'exe_name' with the specified + 'benchmark_flags'. The benchmark is run directly as a subprocess to preserve + real time console output. + RETURNS: A JSON object representing the benchmark output + """ + output_name = find_benchmark_flag('--benchmark_out=', + benchmark_flags) + is_temp_output = False + if output_name is None: + is_temp_output = True + thandle, output_name = tempfile.mkstemp() + os.close(thandle) + benchmark_flags = list(benchmark_flags) + \ + ['--benchmark_out=%s' % output_name] + + cmd = [exe_name] + benchmark_flags + print("RUNNING: %s" % ' '.join(cmd)) + exitCode = subprocess.call(cmd) + if exitCode != 0: + print('TEST FAILED...') + sys.exit(exitCode) + json_res = load_benchmark_results(output_name) + if is_temp_output: + os.unlink(output_name) + return json_res + + +def run_or_load_benchmark(filename, benchmark_flags): + """ + Get the results for a specified benchmark. If 'filename' specifies + an executable benchmark then the results are generated by running the + benchmark. Otherwise 'filename' must name a valid JSON output file, + which is loaded and the result returned. + """ + ftype = check_input_file(filename) + if ftype == IT_JSON: + return load_benchmark_results(filename) + elif ftype == IT_Executable: + return run_benchmark(filename, benchmark_flags) + else: + assert False # This branch is unreachable
\ No newline at end of file diff --git a/llvm/utils/benchmark/tools/strip_asm.py b/llvm/utils/benchmark/tools/strip_asm.py new file mode 100644 index 00000000000..9030550b43b --- /dev/null +++ b/llvm/utils/benchmark/tools/strip_asm.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python + +""" +strip_asm.py - Cleanup ASM output for the specified file +""" + +from argparse import ArgumentParser +import sys +import os +import re + +def find_used_labels(asm): + found = set() + label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)") + for l in asm.splitlines(): + m = label_re.match(l) + if m: + found.add('.L%s' % m.group(1)) + return found + + +def normalize_labels(asm): + decls = set() + label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") + for l in asm.splitlines(): + m = label_decl.match(l) + if m: + decls.add(m.group(0)) + if len(decls) == 0: + return asm + needs_dot = next(iter(decls))[0] != '.' + if not needs_dot: + return asm + for ld in decls: + asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm) + return asm + + +def transform_labels(asm): + asm = normalize_labels(asm) + used_decls = find_used_labels(asm) + new_asm = '' + label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") + for l in asm.splitlines(): + m = label_decl.match(l) + if not m or m.group(0) in used_decls: + new_asm += l + new_asm += '\n' + return new_asm + + +def is_identifier(tk): + if len(tk) == 0: + return False + first = tk[0] + if not first.isalpha() and first != '_': + return False + for i in range(1, len(tk)): + c = tk[i] + if not c.isalnum() and c != '_': + return False + return True + +def process_identifiers(l): + """ + process_identifiers - process all identifiers and modify them to have + consistent names across all platforms; specifically across ELF and MachO. + For example, MachO inserts an additional understore at the beginning of + names. This function removes that. + """ + parts = re.split(r'([a-zA-Z0-9_]+)', l) + new_line = '' + for tk in parts: + if is_identifier(tk): + if tk.startswith('__Z'): + tk = tk[1:] + elif tk.startswith('_') and len(tk) > 1 and \ + tk[1].isalpha() and tk[1] != 'Z': + tk = tk[1:] + new_line += tk + return new_line + + +def process_asm(asm): + """ + Strip the ASM of unwanted directives and lines + """ + new_contents = '' + asm = transform_labels(asm) + + # TODO: Add more things we want to remove + discard_regexes = [ + re.compile("\s+\..*$"), # directive + re.compile("\s*#(NO_APP|APP)$"), #inline ASM + re.compile("\s*#.*$"), # comment line + re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive + re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"), + ] + keep_regexes = [ + + ] + fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:") + for l in asm.splitlines(): + # Remove Mach-O attribute + l = l.replace('@GOTPCREL', '') + add_line = True + for reg in discard_regexes: + if reg.match(l) is not None: + add_line = False + break + for reg in keep_regexes: + if reg.match(l) is not None: + add_line = True + break + if add_line: + if fn_label_def.match(l) and len(new_contents) != 0: + new_contents += '\n' + l = process_identifiers(l) + new_contents += l + new_contents += '\n' + return new_contents + +def main(): + parser = ArgumentParser( + description='generate a stripped assembly file') + parser.add_argument( + 'input', metavar='input', type=str, nargs=1, + help='An input assembly file') + parser.add_argument( + 'out', metavar='output', type=str, nargs=1, + help='The output file') + args, unknown_args = parser.parse_known_args() + input = args.input[0] + output = args.out[0] + if not os.path.isfile(input): + print(("ERROR: input file '%s' does not exist") % input) + sys.exit(1) + contents = None + with open(input, 'r') as f: + contents = f.read() + new_contents = process_asm(contents) + with open(output, 'w') as f: + f.write(new_contents) + + +if __name__ == '__main__': + main() + +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 +# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; +# kate: indent-mode python; remove-trailing-spaces modified; |

