diff options
Diffstat (limited to 'lldb/packages/Python/lldbsuite/test/dotest.py')
-rw-r--r-- | lldb/packages/Python/lldbsuite/test/dotest.py | 855 |
1 files changed, 232 insertions, 623 deletions
diff --git a/lldb/packages/Python/lldbsuite/test/dotest.py b/lldb/packages/Python/lldbsuite/test/dotest.py index 2be8ac9c99f..ce14fd803c7 100644 --- a/lldb/packages/Python/lldbsuite/test/dotest.py +++ b/lldb/packages/Python/lldbsuite/test/dotest.py @@ -40,6 +40,7 @@ import unittest2 # LLDB Modules import lldbsuite +from . import configuration from . import dotest_args from . import lldbtest_config from . import test_categories @@ -82,173 +83,9 @@ class _WritelnDecorator(object): # # Global variables: # - -# The test suite. -suite = unittest2.TestSuite() - -# By default, benchmarks tests are not run. -just_do_benchmarks_test = False - -dont_do_dsym_test = False -dont_do_dwarf_test = False -dont_do_dwo_test = False - -# The blacklist is optional (-b blacklistFile) and allows a central place to skip -# testclass's and/or testclass.testmethod's. -blacklist = None - -# The dictionary as a result of sourcing blacklistFile. -blacklistConfig = {} - -# The list of categories we said we care about -categoriesList = None -# set to true if we are going to use categories for cherry-picking test cases -useCategories = False -# Categories we want to skip -skipCategories = [] -# use this to track per-category failures -failuresPerCategory = {} - -# The path to LLDB.framework is optional. -lldbFrameworkPath = None - -# The config file is optional. -configFile = None - -# Test suite repeat count. Can be overwritten with '-# count'. -count = 1 - -# The dictionary as a result of sourcing configFile. -config = {} -# The pre_flight and post_flight functions come from reading a config file. -pre_flight = None -post_flight = None -# So do the lldbtest_remote_sandbox and lldbtest_remote_shell_template variables. -lldbtest_remote_sandbox = None -lldbtest_remote_shell_template = None - -# The 'archs' and 'compilers' can be specified via either command line or configFile, -# with the command line overriding the configFile. The corresponding options can be -# specified more than once. For example, "-A x86_64 -A i386" => archs=['x86_64', 'i386'] -# and "-C gcc -C clang" => compilers=['gcc', 'clang']. -archs = None # Must be initialized after option parsing -compilers = None # Must be initialized after option parsing - -# The arch might dictate some specific CFLAGS to be passed to the toolchain to build -# the inferior programs. The global variable cflags_extras provides a hook to do -# just that. -cflags_extras = '' - -# Dump the Python sys.path variable. Use '-D' to dump sys.path. -dumpSysPath = False - -# Full path of the benchmark executable, as specified by the '-e' option. -bmExecutable = None -# The breakpoint specification of bmExecutable, as specified by the '-x' option. -bmBreakpointSpec = None -# The benchmark iteration count, as specified by the '-y' option. -bmIterationCount = -1 - -# By default, don't exclude any directories. Use '-X' to add one excluded directory. -excluded = set(['.svn', '.git']) - -# By default, failfast is False. Use '-F' to overwrite it. -failfast = False - -# The filters (testclass.testmethod) used to admit tests into our test suite. -filters = [] - -# The runhooks is a list of lldb commands specifically for the debugger. -# Use '-k' to specify a runhook. -runHooks = [] - -# If '-g' is specified, the filterspec is not exclusive. If a test module does -# not contain testclass.testmethod which matches the filterspec, the whole test -# module is still admitted into our test suite. fs4all flag defaults to True. -fs4all = True - -# Ignore the build search path relative to this script to locate the lldb.py module. -ignore = False - -# By default, we do not skip build and cleanup. Use '-S' option to override. -skip_build_and_cleanup = False - -# By default, we skip long running test case. Use '-l' option to override. -skip_long_running_test = True - -# By default, we print the build dir, lldb version, and svn info. Use '-n' option to -# turn it off. -noHeaders = False - -# Parsable mode silences headers, and any other output this script might generate, and instead -# prints machine-readable output similar to what clang tests produce. -parsable = False - -# The regular expression pattern to match against eligible filenames as our test cases. -regexp = None - -# By default, tests are executed in place and cleanups are performed afterwards. -# Use '-r dir' option to relocate the tests and their intermediate files to a -# different directory and to forgo any cleanups. The directory specified must -# not exist yet. -rdir = None - -# By default, recorded session info for errored/failed test are dumped into its -# own file under a session directory named after the timestamp of the test suite -# run. Use '-s session-dir-name' to specify a specific dir name. -sdir_name = None - -# Set this flag if there is any session info dumped during the test run. -sdir_has_content = False - -# svn_info stores the output from 'svn info lldb.base.dir'. -svn_info = '' - -# svn_silent means do not try to obtain svn status -svn_silent = True - -# Default verbosity is 0. -verbose = 1 - -# Set to True only if verbose is 0 and LLDB trace mode is off. -progress_bar = False - -# By default, search from the script directory. -# We can't use sys.path[0] to determine the script directory -# because it doesn't work under a debugger -testdirs = [ os.path.dirname(os.path.realpath(__file__)) ] - -# Separator string. -separator = '-' * 70 - -failed = False - -# LLDB Remote platform setting -lldb_platform_name = None -lldb_platform_url = None -lldb_platform_working_dir = None - -# Parallel execution settings -is_inferior_test_runner = False -multiprocess_test_subdir = None -num_threads = None -output_on_success = False -no_multiprocess_test_runner = False -test_runner_name = None - -# Test results handling globals -results_filename = None -results_port = None -results_formatter_name = None -results_formatter_object = None -results_formatter_options = None - -# The names of all tests. Used to assert we don't have two tests with the same base name. -all_tests = set() - def usage(parser): parser.print_help() - if verbose > 0: + if configuration.verbose > 0: print(""" Examples: @@ -360,63 +197,12 @@ o GDB_REMOTE_LOG: if defined, specifies the log file pathname for the """) sys.exit(0) - -def setCrashInfoHook_Mac(text): - from . import crashinfo - crashinfo.setCrashReporterDescription(text) - -# implement this in some suitable way for your platform, and then bind it -# to setCrashInfoHook -def setCrashInfoHook_NonMac(text): - pass - -setCrashInfoHook = None - -def deleteCrashInfoDylib(dylib_path): - try: - # Need to modify this to handle multiple tests running at the same time. If we move this - # to the test's real dir, all should be we run sequentially within a test directory. - # os.remove(dylib_path) - None - finally: - pass - -def setupCrashInfoHook(): - global setCrashInfoHook - setCrashInfoHook = setCrashInfoHook_NonMac # safe default - if platform.system() == "Darwin": - from . import lock - test_dir = os.environ['LLDB_TEST'] - if not test_dir or not os.path.exists(test_dir): - return - dylib_lock = os.path.join(test_dir,"crashinfo.lock") - dylib_src = os.path.join(test_dir,"crashinfo.c") - dylib_dst = os.path.join(test_dir,"crashinfo.so") - try: - compile_lock = lock.Lock(dylib_lock) - compile_lock.acquire() - if not os.path.isfile(dylib_dst) or os.path.getmtime(dylib_dst) < os.path.getmtime(dylib_src): - # we need to compile - cmd = "SDKROOT= xcrun clang %s -o %s -framework Python -Xlinker -dylib -iframework /System/Library/Frameworks/ -Xlinker -F /System/Library/Frameworks/" % (dylib_src,dylib_dst) - if subprocess.call(cmd,shell=True) != 0 or not os.path.isfile(dylib_dst): - raise Exception('command failed: "{}"'.format(cmd)) - finally: - compile_lock.release() - del compile_lock - - setCrashInfoHook = setCrashInfoHook_Mac - - else: - pass - def shouldSkipBecauseOfCategories(test_categories): - global useCategories, categoriesList, skipCategories - - if useCategories: - if len(test_categories) == 0 or len(categoriesList & set(test_categories)) == 0: + if configuration.useCategories: + if len(test_categories) == 0 or len(configuration.categoriesList & set(test_categories)) == 0: return True - for category in skipCategories: + for category in configuration.skipCategories: if category in test_categories: return True @@ -428,56 +214,6 @@ def parseOptionsAndInitTestdirs(): '-h/--help as the first option prints out usage info and exit the program. """ - global just_do_benchmarks_test - global dont_do_dsym_test - global dont_do_dwarf_test - global dont_do_dwo_test - global blacklist - global blacklistConfig - global categoriesList - global validCategories - global useCategories - global skipCategories - global lldbFrameworkPath - global configFile - global archs - global compilers - global count - global dumpSysPath - global bmExecutable - global bmBreakpointSpec - global bmIterationCount - global failfast - global filters - global fs4all - global ignore - global progress_bar - global runHooks - global skip_build_and_cleanup - global skip_long_running_test - global noHeaders - global parsable - global regexp - global rdir - global sdir_name - global svn_silent - global verbose - global testdirs - global lldb_platform_name - global lldb_platform_url - global lldb_platform_working_dir - global setCrashInfoHook - global is_inferior_test_runner - global multiprocess_test_subdir - global num_threads - global output_on_success - global no_multiprocess_test_runner - global test_runner_name - global results_filename - global results_formatter_name - global results_formatter_options - global results_port - do_help = False platform_system = platform.system() @@ -510,17 +246,17 @@ def parseOptionsAndInitTestdirs(): do_help = True if args.compilers: - compilers = args.compilers + configuration.compilers = args.compilers else: # Use a compiler appropriate appropriate for the Apple SDK if one was specified if platform_system == 'Darwin' and args.apple_sdk: - compilers = [seven.get_command_output('xcrun -sdk "%s" -find clang 2> /dev/null' % (args.apple_sdk))] + configuration.compilers = [seven.get_command_output('xcrun -sdk "%s" -find clang 2> /dev/null' % (args.apple_sdk))] else: # 'clang' on ubuntu 14.04 is 3.4 so we try clang-3.5 first candidateCompilers = ['clang-3.5', 'clang', 'gcc'] for candidate in candidateCompilers: if which(candidate): - compilers = [candidate] + configuration.compilers = [candidate] break if args.channels: @@ -534,26 +270,26 @@ def parseOptionsAndInitTestdirs(): os.environ['SDKROOT'] = seven.get_command_output('xcrun --sdk "%s" --show-sdk-path 2> /dev/null' % (args.apple_sdk)) if args.archs: - archs = args.archs - for arch in archs: + configuration.archs = args.archs + for arch in configuration.archs: if arch.startswith('arm') and platform_system == 'Darwin' and not args.apple_sdk: os.environ['SDKROOT'] = seven.get_command_output('xcrun --sdk iphoneos.internal --show-sdk-path 2> /dev/null') if not os.path.exists(os.environ['SDKROOT']): os.environ['SDKROOT'] = seven.get_command_output('xcrun --sdk iphoneos --show-sdk-path 2> /dev/null') else: - archs = [platform_machine] + configuration.archs = [platform_machine] if args.categoriesList: - categoriesList = set(test_categories.validate(args.categoriesList, False)) - useCategories = True + configuration.categoriesList = set(test_categories.validate(args.categoriesList, False)) + configuration.useCategories = True else: - categoriesList = [] + configuration.categoriesList = [] if args.skipCategories: - skipCategories = test_categories.validate(args.skipCategories, False) + configuration.skipCategories = test_categories.validate(args.skipCategories, False) if args.D: - dumpSysPath = True + configuration.dumpSysPath = True if args.E: cflags_extras = args.E @@ -561,11 +297,11 @@ def parseOptionsAndInitTestdirs(): # argparse makes sure we have correct options if args.N == 'dwarf': - dont_do_dwarf_test = True + configuration.dont_do_dwarf_test = True elif args.N == 'dwo': - dont_do_dwo_test = True + configuration.dont_do_dwo_test = True elif args.N == 'dsym': - dont_do_dsym_test = True + configuration.dont_do_dsym_test = True if args.a or args.plus_a: print("Options '-a' and '+a' have been deprecated. Please use the test category\n" @@ -578,7 +314,7 @@ def parseOptionsAndInitTestdirs(): sys.exit(1) if args.plus_b: - just_do_benchmarks_test = True + lldbsuite.test.just_do_benchmarks_test = True if args.b: if args.b.startswith('-'): @@ -588,15 +324,15 @@ def parseOptionsAndInitTestdirs(): print('Blacklist file:', blacklistFile, 'does not exist!') usage(parser) # Now read the blacklist contents and assign it to blacklist. - execfile(blacklistFile, globals(), blacklistConfig) - blacklist = blacklistConfig.get('blacklist') + execfile(blacklistFile, globals(), configuration.blacklistConfig) + configuration.blacklist = configuration.blacklistConfig.get('blacklist') if args.c: if args.c.startswith('-'): usage(parser) - configFile = args.c - if not os.path.isfile(configFile): - print('Config file:', configFile, 'does not exist!') + configuration.configFile = args.c + if not os.path.isfile(configuration.configFile): + print('Config file:', configuration.configFile, 'does not exist!') usage(parser) if args.d: @@ -607,17 +343,17 @@ def parseOptionsAndInitTestdirs(): if args.e: if args.e.startswith('-'): usage(parser) - bmExecutable = args.e - if not is_exe(bmExecutable): + configuration.bmExecutable = args.e + if not is_exe(configuration.bmExecutable): usage(parser) if args.F: - failfast = True + configuration.failfast = True if args.f: if any([x.startswith('-') for x in args.f]): usage(parser) - filters.extend(args.f) + configuration.filters.extend(args.f) # Shut off multiprocessing mode when additional filters are specified. # The rational is that the user is probably going after a very specific # test and doesn't need a bunch of parallel test runners all looking for @@ -628,22 +364,22 @@ def parseOptionsAndInitTestdirs(): # manner. If we really need filtered parallel runs in the future, consider # adding a --no-output-on-success that prevents -v from setting # output-on-success. - no_multiprocess_test_runner = True + configuration.no_multiprocess_test_runner = True if args.g: - fs4all = False + configuration.fs4all = False if args.i: - ignore = True + configuration.ignore = True if args.k: - runHooks.extend(args.k) + configuration.runHooks.extend(args.k) if args.l: - skip_long_running_test = False + configuration.skip_long_running_test = False if args.framework: - lldbFrameworkPath = args.framework + configuration.lldbFrameworkPath = args.framework if args.executable: lldbtest_config.lldbExec = args.executable @@ -652,54 +388,54 @@ def parseOptionsAndInitTestdirs(): os.environ["LIBCXX_PATH"] = args.libcxx if args.n: - noHeaders = True + configuration.noHeaders = True if args.p: if args.p.startswith('-'): usage(parser) - regexp = args.p + configuration.regexp = args.p if args.q: - noHeaders = True - parsable = True + configuration.noHeaders = True + configuration.parsable = True if args.P and not args.v: - progress_bar = True - verbose = 0 + configuration.progress_bar = True + configuration.verbose = 0 if args.R: if args.R.startswith('-'): usage(parser) - rdir = os.path.abspath(args.R) - if os.path.exists(rdir): + configuration.rdir = os.path.abspath(args.R) + if os.path.exists(configuration.rdir): import shutil - print('Removing tree:', rdir) - shutil.rmtree(rdir) + print('Removing tree:', configuration.rdir) + shutil.rmtree(configuration.rdir) if args.r: if args.r.startswith('-'): usage(parser) - rdir = os.path.abspath(args.r) - if os.path.exists(rdir): - print('Relocated directory:', rdir, 'must not exist!') + configuration.rdir = os.path.abspath(args.r) + if os.path.exists(configuration.rdir): + print('Relocated directory:', configuration.rdir, 'must not exist!') usage(parser) if args.S: - skip_build_and_cleanup = True + configuration.skip_build_and_cleanup = True if args.s: if args.s.startswith('-'): usage(parser) - sdir_name = args.s + configuration.sdir_name = args.s if args.t: os.environ['LLDB_COMMAND_TRACE'] = 'YES' if args.T: - svn_silent = False + configuration.svn_silent = False if args.v: - verbose = 2 + configuration.verbose = 2 if args.w: os.environ['LLDB_WAIT_BETWEEN_TEST_CASES'] = 'YES' @@ -707,20 +443,20 @@ def parseOptionsAndInitTestdirs(): if args.X: if args.X.startswith('-'): usage(parser) - excluded.add(args.X) + configuration.excluded.add(args.X) if args.x: if args.x.startswith('-'): usage(parser) - bmBreakpointSpec = args.x + configuration.bmBreakpointSpec = args.x # argparse makes sure we have a number if args.y: - bmIterationCount = args.y + configuration.bmIterationCount = args.y # argparse makes sure we have a number if args.sharp: - count = args.sharp + configuration.count = args.sharp if sys.platform.startswith('win32'): os.environ['LLDB_DISABLE_CRASH_DIALOG'] = str(args.disable_crash_dialog) @@ -730,23 +466,23 @@ def parseOptionsAndInitTestdirs(): usage(parser) if args.no_multiprocess: - no_multiprocess_test_runner = True + configuration.no_multiprocess_test_runner = True if args.inferior: - is_inferior_test_runner = True + configuration.is_inferior_test_runner = True # Turn on output_on_sucess if either explicitly added or -v specified. if args.output_on_success or args.v: - output_on_success = True + configuration.output_on_success = True if args.num_threads: - num_threads = args.num_threads + configuration.num_threads = args.num_threads if args.test_subdir: - multiprocess_test_subdir = args.test_subdir + configuration.multiprocess_test_subdir = args.test_subdir if args.test_runner_name: - test_runner_name = args.test_runner_name + configuration.test_runner_name = args.test_runner_name # Capture test results-related args. if args.curses and not args.inferior: @@ -755,10 +491,10 @@ def parseOptionsAndInitTestdirs(): args.results_file = "stdout" if args.results_file: - results_filename = args.results_file + configuration.results_filename = args.results_file if args.results_port: - results_port = args.results_port + configuration.results_port = args.results_port if args.results_file and args.results_port: sys.stderr.write( @@ -767,16 +503,16 @@ def parseOptionsAndInitTestdirs(): usage(args) if args.results_formatter: - results_formatter_name = args.results_formatter + configuration.results_formatter_name = args.results_formatter if args.results_formatter_options: - results_formatter_options = args.results_formatter_options + configuration.results_formatter_options = args.results_formatter_options if args.lldb_platform_name: - lldb_platform_name = args.lldb_platform_name + configuration.lldb_platform_name = args.lldb_platform_name if args.lldb_platform_url: - lldb_platform_url = args.lldb_platform_url + configuration.lldb_platform_url = args.lldb_platform_url if args.lldb_platform_working_dir: - lldb_platform_working_dir = args.lldb_platform_working_dir + configuration.lldb_platform_working_dir = args.lldb_platform_working_dir if args.event_add_entries and len(args.event_add_entries) > 0: entries = {} @@ -798,18 +534,18 @@ def parseOptionsAndInitTestdirs(): # Gather all the dirs passed on the command line. if len(args.args) > 0: - testdirs = list(map(os.path.abspath, args.args)) + configuration.testdirs = list(map(os.path.abspath, args.args)) # Shut off multiprocessing mode when test directories are specified. - no_multiprocess_test_runner = True + configuration.no_multiprocess_test_runner = True # If '-r dir' is specified, the tests should be run under the relocated # directory. Let's copy the testdirs over. - if rdir: + if configuration.rdir: from shutil import copytree, ignore_patterns tmpdirs = [] - orig_testdirs = testdirs[:] - for srcdir in testdirs: + orig_testdirs = configuration.testdirs[:] + for srcdir in configuration.testdirs: # For example, /Volumes/data/lldb/svn/ToT/test/functionalities/watchpoint/hello_watchpoint # shall be split into ['/Volumes/data/lldb/svn/ToT/', 'functionalities/watchpoint/hello_watchpoint']. # Utilize the relative path to the 'test' directory to make our destination dir path. @@ -817,14 +553,14 @@ def parseOptionsAndInitTestdirs(): to_split_on = "test" + os.sep else: to_split_on = "test" - dstdir = os.path.join(rdir, srcdir.split(to_split_on)[1]) + dstdir = os.path.join(configuration.rdir, srcdir.split(to_split_on)[1]) dstdir = dstdir.rstrip(os.sep) # Don't copy the *.pyc and .svn stuffs. copytree(srcdir, dstdir, ignore=ignore_patterns('*.pyc', '.svn')) tmpdirs.append(dstdir) # This will be our modified testdirs. - testdirs = tmpdirs + configuration.testdirs = tmpdirs # With '-r dir' specified, there's no cleanup of intermediate test files. os.environ["LLDB_DO_CLEANUP"] = 'NO' @@ -837,7 +573,7 @@ def parseOptionsAndInitTestdirs(): # array_types conditional_break make # # where the make directory contains the Makefile.rules file. - if len(testdirs) != 1 or os.path.basename(orig_testdirs[0]) != 'test': + if len(configuration.testdirs) != 1 or os.path.basename(orig_testdirs[0]) != 'test': scriptdir = os.path.dirname(__file__) # Don't copy the .svn stuffs. copytree(os.path.join(scriptdir, 'make'), os.path.join(rdir, 'make'), @@ -856,25 +592,24 @@ def parseOptionsAndInitTestdirs(): # respectively. # # See also lldb-trunk/examples/test/usage-config. - global config, pre_flight, post_flight, lldbtest_remote_sandbox, lldbtest_remote_shell_template - if configFile: + if configuration.configFile: # Pass config (a dictionary) as the locals namespace for side-effect. - execfile(configFile, globals(), config) + execfile(configuration.configFile, globals(), configuration.config) #print("config:", config) - if "pre_flight" in config: - pre_flight = config["pre_flight"] - if not six.callable(pre_flight): + if "pre_flight" in configuration.config: + configuration.pre_flight = configuration.config["pre_flight"] + if not six.callable(configuration.pre_flight): print("fatal error: pre_flight is not callable, exiting.") sys.exit(1) - if "post_flight" in config: - post_flight = config["post_flight"] - if not six.callable(post_flight): + if "post_flight" in configuration.config: + configuration.post_flight = configuration.config["post_flight"] + if not six.callable(configuration.post_flight): print("fatal error: post_flight is not callable, exiting.") sys.exit(1) - if "lldbtest_remote_sandbox" in config: - lldbtest_remote_sandbox = config["lldbtest_remote_sandbox"] - if "lldbtest_remote_shell_template" in config: - lldbtest_remote_shell_template = config["lldbtest_remote_shell_template"] + if "lldbtest_remote_sandbox" in configuration.config: + configuration.lldbtest_remote_sandbox = configuration.config["lldbtest_remote_sandbox"] + if "lldbtest_remote_shell_template" in configuration.config: + configuration.lldbtest_remote_shell_template = configuration.config["lldbtest_remote_shell_template"] #print("sys.stderr:", sys.stderr) #print("sys.stdout:", sys.stdout) @@ -908,33 +643,21 @@ def createSocketToLocalPort(port): def setupTestResults(): """Sets up test results-related objects based on arg settings.""" - global results_filename - global results_formatter_name - global results_formatter_object - global results_formatter_options - global results_port - - # Setup the results formatter configuration. - config = result_formatter.FormatterConfig() - config.filename = results_filename - config.formatter_name = results_formatter_name - config.formatter_options = results_formatter_options - config.port = results_port # Create the results formatter. - formatter_spec = result_formatter.create_results_formatter(config) + formatter_spec = result_formatter.create_results_formatter() if formatter_spec is not None and formatter_spec.formatter is not None: - results_formatter_object = formatter_spec.formatter + configuration.results_formatter_object = formatter_spec.formatter # Send an intialize message to the formatter. initialize_event = EventBuilder.bare_event("initialize") if isMultiprocessTestRunner(): - if test_runner_name is not None and test_runner_name == "serial": + if configuration.test_runner_name is not None and configuration.test_runner_name == "serial": # Only one worker queue here. worker_count = 1 else: # Workers will be the number of threads specified. - worker_count = num_threads + worker_count = configuration.num_threads else: worker_count = 1 initialize_event["worker_count"] = worker_count @@ -976,14 +699,6 @@ def setupSysPath(): As a side effect, we also discover the 'lldb' executable and export it here. """ - global rdir - global testdirs - global dumpSysPath - global noHeaders - global svn_info - global svn_silent - global lldbFrameworkPath - # Get the directory containing the current script. if "DOTEST_PROFILE" in os.environ and "DOTEST_SCRIPT_DIR" in os.environ: scriptPath = os.environ["DOTEST_SCRIPT_DIR"] @@ -993,15 +708,15 @@ def setupSysPath(): print("This script expects to reside in lldb's test directory.") sys.exit(-1) - if rdir: + if configuration.rdir: # Set up the LLDB_TEST environment variable appropriately, so that the # individual tests can be located relatively. # # See also lldbtest.TestBase.setUpClass(cls). - if len(testdirs) == 1 and os.path.basename(testdirs[0]) == 'test': - os.environ["LLDB_TEST"] = os.path.join(rdir, 'test') + if len(configuration.testdirs) == 1 and os.path.basename(configuration.testdirs[0]) == 'test': + os.environ["LLDB_TEST"] = os.path.join(configuration.rdir, 'test') else: - os.environ["LLDB_TEST"] = rdir + os.environ["LLDB_TEST"] = configuration.rdir else: os.environ["LLDB_TEST"] = scriptPath @@ -1061,7 +776,7 @@ def setupSysPath(): os.environ["LLDB_LIB_DIR"] = lldbLibDir lldbImpLibDir = os.path.join(lldbLibDir, '..', 'lib') if sys.platform.startswith('win32') else lldbLibDir os.environ["LLDB_IMPLIB_DIR"] = lldbImpLibDir - if not noHeaders: + if not configuration.noHeaders: print("LLDB library dir:", os.environ["LLDB_LIB_DIR"]) print("LLDB import library dir:", os.environ["LLDB_IMPLIB_DIR"]) os.system('%s -v' % lldbtest_config.lldbExec) @@ -1074,34 +789,32 @@ def setupSysPath(): if not lldbMiExec: if not shouldSkipBecauseOfCategories(["lldb-mi"]): print("The 'lldb-mi' executable cannot be located. The lldb-mi tests can not be run as a result.") - skipCategories.append("lldb-mi") + configuration.skipCategories.append("lldb-mi") else: os.environ["LLDBMI_EXEC"] = lldbMiExec # Skip printing svn/git information when running in parsable (lit-test compatibility) mode - if not svn_silent and not parsable: + if not configuration.svn_silent and not configuration.parsable: if os.path.isdir(os.path.join(lldbRootDirectory, '.svn')) and which("svn") is not None: pipe = subprocess.Popen([which("svn"), "info", lldbRootDirectory], stdout = subprocess.PIPE) - svn_info = pipe.stdout.read() + configuration.svn_info = pipe.stdout.read() elif os.path.isdir(os.path.join(lldbRootDirectory, '.git')) and which("git") is not None: pipe = subprocess.Popen([which("git"), "svn", "info", lldbRootDirectory], stdout = subprocess.PIPE) - svn_info = pipe.stdout.read() - if not noHeaders: - print(svn_info) - - global ignore + configuration.svn_info = pipe.stdout.read() + if not configuration.noHeaders: + print(configuration.svn_info) lldbPythonDir = None # The directory that contains 'lldb/__init__.py' - if lldbFrameworkPath: - candidatePath = os.path.join(lldbFrameworkPath, 'Resources', 'Python') + if configuration.lldbFrameworkPath: + candidatePath = os.path.join(configuration.lldbFrameworkPath, 'Resources', 'Python') if os.path.isfile(os.path.join(candidatePath, 'lldb/__init__.py')): lldbPythonDir = candidatePath if not lldbPythonDir: - print('Resources/Python/lldb/__init__.py was not found in ' + lldbFrameworkPath) + print('Resources/Python/lldb/__init__.py was not found in ' + configuration.lldbFrameworkPath) sys.exit(-1) else: # The '-i' option is used to skip looking for lldb.py in the build tree. - if ignore: + if configuration.ignore: return # If our lldb supports the -P option, use it to find the python path: @@ -1172,34 +885,27 @@ def setupSysPath(): # This is to locate the lldb.py module. Insert it right after sys.path[0]. sys.path[1:1] = [lldbPythonDir] - if dumpSysPath: + if configuration.dumpSysPath: print("sys.path:", sys.path) def visit(prefix, dir, names): """Visitor function for os.path.walk(path, visit, arg).""" - global suite - global regexp - global filters - global fs4all - global excluded - global all_tests - - if set(dir.split(os.sep)).intersection(excluded): + if set(dir.split(os.sep)).intersection(configuration.excluded): #print("Detected an excluded dir component: %s" % dir) return for name in names: if '.py' == os.path.splitext(name)[1] and name.startswith(prefix): - if name in all_tests: + if name in configuration.all_tests: raise Exception("Found multiple tests with the name %s" % name) - all_tests.add(name) + configuration.all_tests.add(name) # Try to match the regexp pattern, if specified. - if regexp: + if configuration.regexp: import re - if re.search(regexp, name): + if re.search(configuration.regexp, name): #print("Filename: '%s' matches pattern: '%s'" % (name, regexp)) pass else: @@ -1216,7 +922,7 @@ def visit(prefix, dir, names): # Thoroughly check the filterspec against the base module and admit # the (base, filterspec) combination only when it makes sense. filterspec = None - for filterspec in filters: + for filterspec in configuration.filters: # Optimistically set the flag to True. filtered = True module = __import__(base) @@ -1233,13 +939,13 @@ def visit(prefix, dir, names): # If filtered, we have a good filterspec. Add it. if filtered: #print("adding filter spec %s to module %s" % (filterspec, module)) - suite.addTests( + configuration.suite.addTests( unittest2.defaultTestLoader.loadTestsFromName(filterspec, module)) continue # Forgo this module if the (base, filterspec) combo is invalid # and no '-g' option is specified - if filters and fs4all and not filtered: + if configuration.filters and configuration.fs4all and not filtered: continue # Add either the filtered test case(s) (which is done before) or the entire test class. @@ -1247,7 +953,7 @@ def visit(prefix, dir, names): # A simple case of just the module name. Also the failover case # from the filterspec branch when the (base, filterspec) combo # doesn't make sense. - suite.addTests(unittest2.defaultTestLoader.loadTestsFromName(base)) + configuration.suite.addTests(unittest2.defaultTestLoader.loadTestsFromName(base)) def disabledynamics(): @@ -1336,7 +1042,7 @@ def isMultiprocessTestRunner(): # the inferior (as specified by the multiprocess test # runner) OR we've been told to skip using the multiprocess # test runner - return not (is_inferior_test_runner or no_multiprocess_test_runner) + return not (configuration.is_inferior_test_runner or configuration.no_multiprocess_test_runner) def getVersionForSDK(sdk): sdk = str.lower(sdk) @@ -1354,64 +1060,13 @@ def getPathForSDK(sdk): return None def setDefaultTripleForPlatform(): - if lldb_platform_name == 'ios-simulator': + if configuration.lldb_platform_name == 'ios-simulator': triple_str = 'x86_64-apple-ios%s' % (getVersionForSDK('iphonesimulator')) os.environ['TRIPLE'] = triple_str return {'TRIPLE':triple_str} return {} def run_suite(): - global just_do_benchmarks_test - global dont_do_dsym_test - global dont_do_dwarf_test - global dont_do_dwo_test - global blacklist - global blacklistConfig - global categoriesList - global validCategories - global useCategories - global skipCategories - global lldbFrameworkPath - global configFile - global archs - global compilers - global count - global dumpSysPath - global bmExecutable - global bmBreakpointSpec - global bmIterationCount - global failed - global failfast - global filters - global fs4all - global ignore - global progress_bar - global runHooks - global skip_build_and_cleanup - global skip_long_running_test - global noHeaders - global parsable - global regexp - global rdir - global sdir_name - global svn_silent - global verbose - global testdirs - global lldb_platform_name - global lldb_platform_url - global lldb_platform_working_dir - global setCrashInfoHook - global is_inferior_test_runner - global multiprocess_test_subdir - global num_threads - global output_on_success - global no_multiprocess_test_runner - global test_runner_name - global results_filename - global results_formatter_name - global results_formatter_options - global results_port - # On MacOS X, check to make sure that domain for com.apple.DebugSymbols defaults # does not exist before proceeding to running the test suite. if sys.platform.startswith("darwin"): @@ -1431,20 +1086,20 @@ def run_suite(): # multiprocess test runner here. if isMultiprocessTestRunner(): from . import dosep - dosep.main(output_on_success, num_threads, multiprocess_test_subdir, - test_runner_name, results_formatter_object) + dosep.main(configuration.output_on_success, configuration.num_threads, + configuration.multiprocess_test_subdir, + configuration.test_runner_name, configuration.results_formatter_object) raise Exception("should never get here") - elif is_inferior_test_runner: + elif configuration.is_inferior_test_runner: # Shut off Ctrl-C processing in inferiors. The parallel # test runner handles this more holistically. signal.signal(signal.SIGINT, signal.SIG_IGN) setupSysPath() - setupCrashInfoHook() # # If '-l' is specified, do not skip the long running tests. - if not skip_long_running_test: + if not configuration.skip_long_running_test: os.environ["LLDB_SKIP_LONG_RUNNING_TEST"] = "NO" # For the time being, let's bracket the test runner within the @@ -1454,26 +1109,24 @@ def run_suite(): # Create a singleton SBDebugger in the lldb namespace. lldb.DBG = lldb.SBDebugger.Create() - if lldb_platform_name: - print("Setting up remote platform '%s'" % (lldb_platform_name)) - lldb.remote_platform = lldb.SBPlatform(lldb_platform_name) - lldb.remote_platform_name = lldb_platform_name + if configuration.lldb_platform_name: + print("Setting up remote platform '%s'" % (configuration.lldb_platform_name)) + lldb.remote_platform = lldb.SBPlatform(configuration.lldb_platform_name) if not lldb.remote_platform.IsValid(): - print("error: unable to create the LLDB platform named '%s'." % (lldb_platform_name)) + print("error: unable to create the LLDB platform named '%s'." % (configuration.lldb_platform_name)) exitTestSuite(1) - if lldb_platform_url: + if configuration.lldb_platform_url: # We must connect to a remote platform if a LLDB platform URL was specified - print("Connecting to remote platform '%s' at '%s'..." % (lldb_platform_name, lldb_platform_url)) - lldb.platform_url = lldb_platform_url - platform_connect_options = lldb.SBPlatformConnectOptions(lldb_platform_url) + print("Connecting to remote platform '%s' at '%s'..." % (configuration.lldb_platform_name, configuration.lldb_platform_url)) + platform_connect_options = lldb.SBPlatformConnectOptions(configuration.lldb_platform_url) err = lldb.remote_platform.ConnectRemote(platform_connect_options) if err.Success(): print("Connected.") else: - print("error: failed to connect to remote platform using URL '%s': %s" % (lldb_platform_url, err)) + print("error: failed to connect to remote platform using URL '%s': %s" % (configuration.lldb_platform_url, err)) exitTestSuite(1) else: - lldb.platform_url = None + configuration.lldb_platform_url = None platform_changes = setDefaultTripleForPlatform() first = True @@ -1483,15 +1136,14 @@ def run_suite(): first = False print("%s = %s" % (key,platform_changes[key])) - if lldb_platform_working_dir: - print("Setting remote platform working directory to '%s'..." % (lldb_platform_working_dir)) - lldb.remote_platform.SetWorkingDirectory(lldb_platform_working_dir) - lldb.remote_platform_working_dir = lldb_platform_working_dir + if configuration.lldb_platform_working_dir: + print("Setting remote platform working directory to '%s'..." % (configuration.lldb_platform_working_dir)) + lldb.remote_platform.SetWorkingDirectory(configuration.lldb_platform_working_dir) lldb.DBG.SetSelectedPlatform(lldb.remote_platform) else: lldb.remote_platform = None - lldb.remote_platform_working_dir = None - lldb.platform_url = None + configuration.lldb_platform_working_dir = None + configuration.lldb_platform_url = None target_platform = lldb.DBG.GetSelectedPlatform().GetTriple().split('-')[2] @@ -1499,19 +1151,21 @@ def run_suite(): # Use @dsym_test, @dwarf_test or @dwo_test decorators, defined in lldbtest.py, to mark a test as # a dsym, dwarf or dwo test. Use '-N dsym', '-N dwarf' or '-N dwo' to exclude dsym, dwarf or # dwo tests from running. - dont_do_dsym_test = dont_do_dsym_test or any(platform in target_platform for platform in ["linux", "freebsd", "windows"]) - dont_do_dwo_test = dont_do_dwo_test or any(platform in target_platform for platform in ["darwin", "macosx", "ios"]) + configuration.dont_do_dsym_test = configuration.dont_do_dsym_test \ + or any(platform in target_platform for platform in ["linux", "freebsd", "windows"]) + configuration.dont_do_dwo_test = configuration.dont_do_dwo_test \ + or any(platform in target_platform for platform in ["darwin", "macosx", "ios"]) # Don't do debugserver tests on everything except OS X. - dont_do_debugserver_test = "linux" in target_platform or "freebsd" in target_platform or "windows" in target_platform + configuration.dont_do_debugserver_test = "linux" in target_platform or "freebsd" in target_platform or "windows" in target_platform # Don't do lldb-server (llgs) tests on anything except Linux. - dont_do_llgs_test = not ("linux" in target_platform) + configuration.dont_do_llgs_test = not ("linux" in target_platform) # # Walk through the testdirs while collecting tests. # - for testdir in testdirs: + for testdir in configuration.testdirs: for (dirpath, dirnames, filenames) in os.walk(testdir): visit('Test', dirpath, filenames) @@ -1519,12 +1173,7 @@ def run_suite(): # Now that we have loaded all the test cases, run the whole test suite. # - # Put the blacklist in the lldb namespace, to be used by lldb.TestBase. - lldb.blacklist = blacklist - # The pre_flight and post_flight come from reading a config file. - lldb.pre_flight = pre_flight - lldb.post_flight = post_flight def getsource_if_available(obj): """ Return the text of the source code for an object if available. Otherwise, @@ -1536,39 +1185,12 @@ def run_suite(): except: return repr(obj) - if not noHeaders: - print("lldb.pre_flight:", getsource_if_available(lldb.pre_flight)) - print("lldb.post_flight:", getsource_if_available(lldb.post_flight)) - - # If either pre_flight or post_flight is defined, set lldb.test_remote to True. - if lldb.pre_flight or lldb.post_flight: - lldb.test_remote = True - else: - lldb.test_remote = False - - # So do the lldbtest_remote_sandbox and lldbtest_remote_shell_template variables. - lldb.lldbtest_remote_sandbox = lldbtest_remote_sandbox - lldb.lldbtest_remote_sandboxed_executable = None - lldb.lldbtest_remote_shell_template = lldbtest_remote_shell_template - - # Put all these test decorators in the lldb namespace. - lldb.just_do_benchmarks_test = just_do_benchmarks_test - lldb.dont_do_dsym_test = dont_do_dsym_test - lldb.dont_do_dwarf_test = dont_do_dwarf_test - lldb.dont_do_dwo_test = dont_do_dwo_test - lldb.dont_do_debugserver_test = dont_do_debugserver_test - lldb.dont_do_llgs_test = dont_do_llgs_test - - # Do we need to skip build and cleanup? - lldb.skip_build_and_cleanup = skip_build_and_cleanup - - # Put bmExecutable, bmBreakpointSpec, and bmIterationCount into the lldb namespace, too. - lldb.bmExecutable = bmExecutable - lldb.bmBreakpointSpec = bmBreakpointSpec - lldb.bmIterationCount = bmIterationCount + if not configuration.noHeaders: + print("configuration.pre_flight:", getsource_if_available(configuration.pre_flight)) + print("configuration.post_flight:", getsource_if_available(configuration.post_flight)) - # And don't forget the runHooks! - lldb.runHooks = runHooks + # If either pre_flight or post_flight is defined, set configuration.test_remote to True. + configuration.test_remote = configuration.pre_flight or configuration.post_flight # Turn on lldb loggings if necessary. lldbLoggings() @@ -1588,26 +1210,26 @@ def run_suite(): import datetime # The windows platforms don't like ':' in the pathname. timestamp_started = datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S") - if not sdir_name: - sdir_name = timestamp_started - os.environ["LLDB_SESSION_DIRNAME"] = os.path.join(os.getcwd(), sdir_name) + if not configuration.sdir_name: + configuration.sdir_name = timestamp_started + os.environ["LLDB_SESSION_DIRNAME"] = os.path.join(os.getcwd(), configuration.sdir_name) - if not noHeaders: + if not configuration.noHeaders: sys.stderr.write("\nSession logs for test failures/errors/unexpected successes" - " will go into directory '%s'\n" % sdir_name) + " will go into directory '%s'\n" % configuration.sdir_name) sys.stderr.write("Command invoked: %s\n" % getMyCommandLine()) - if not os.path.isdir(sdir_name): + if not os.path.isdir(configuration.sdir_name): try: - os.mkdir(sdir_name) + os.mkdir(configuration.sdir_name) except OSError as exception: if exception.errno != errno.EEXIST: raise where_to_save_session = os.getcwd() - fname = os.path.join(sdir_name, "TestStarted-%d" % os.getpid()) + fname = os.path.join(configuration.sdir_name, "TestStarted-%d" % os.getpid()) with open(fname, "w") as f: print("Test started at: %s\n" % timestamp_started, file=f) - print(svn_info, file=f) + print(configuration.svn_info, file=f) print("Command invoked: %s\n" % getMyCommandLine(), file=f) # @@ -1618,20 +1240,20 @@ def run_suite(): iterArchs = False iterCompilers = False - if not archs and "archs" in config: - archs = config["archs"] + if not configuration.archs and "archs" in configuration.config: + configuration.archs = configuration.config["archs"] - if isinstance(archs, list) and len(archs) >= 1: + if isinstance(configuration.archs, list) and len(configuration.archs) >= 1: iterArchs = True - if not compilers and "compilers" in config: - compilers = config["compilers"] + if not configuration.compilers and "compilers" in configuration.config: + configuration.compilers = configuration.config["compilers"] # # Add some intervention here to sanity check that the compilers requested are sane. # If found not to be an executable program, the invalid one is dropped from the list. - for i in range(len(compilers)): - c = compilers[i] + for i in range(len(configuration.compilers)): + c = configuration.compilers[i] if which(c): continue else: @@ -1641,24 +1263,24 @@ def run_suite(): if cmd_output: if "not found" in cmd_output: print("dropping %s from the compilers used" % c) - compilers.remove(i) + configuration.compilers.remove(i) else: - compilers[i] = cmd_output.split('\n')[0] - print("'xcrun -find %s' returning %s" % (c, compilers[i])) + configuration.compilers[i] = cmd_output.split('\n')[0] + print("'xcrun -find %s' returning %s" % (c, configuration.compilers[i])) - if not parsable: - print("compilers=%s" % str(compilers)) + if not configuration.parsable: + print("compilers=%s" % str(configuration.compilers)) - if not compilers or len(compilers) == 0: + if not configuration.compilers or len(configuration.compilers) == 0: print("No eligible compiler found, exiting.") exitTestSuite(1) - if isinstance(compilers, list) and len(compilers) >= 1: + if isinstance(configuration.compilers, list) and len(configuration.compilers) >= 1: iterCompilers = True # Make a shallow copy of sys.path, we need to manipulate the search paths later. # This is only necessary if we are relocated and with different configurations. - if rdir: + if configuration.rdir: old_sys_path = sys.path[:] # If we iterate on archs or compilers, there is a chance we want to split stderr/stdout. if iterArchs or iterCompilers: @@ -1668,15 +1290,15 @@ def run_suite(): new_stdout = None # Iterating over all possible architecture and compiler combinations. - for ia in range(len(archs) if iterArchs else 1): + for ia in range(len(configuration.archs) if iterArchs else 1): archConfig = "" if iterArchs: - os.environ["ARCH"] = archs[ia] - archConfig = "arch=%s" % archs[ia] - for ic in range(len(compilers) if iterCompilers else 1): + os.environ["ARCH"] = configuration.archs[ia] + archConfig = "arch=%s" % configuration.archs[ia] + for ic in range(len(configuration.compilers) if iterCompilers else 1): if iterCompilers: - os.environ["CC"] = compilers[ic] - configString = "%s compiler=%s" % (archConfig, compilers[ic]) + os.environ["CC"] = configuration.compilers[ic] + configString = "%s compiler=%s" % (archConfig, configuration.compilers[ic]) else: configString = archConfig @@ -1691,12 +1313,12 @@ def run_suite(): # Check whether we need to split stderr/stdout into configuration # specific files. - if old_stderr.name != '<stderr>' and config.get('split_stderr'): + if old_stderr.name != '<stderr>' and configuration.config.get('split_stderr'): if new_stderr: new_stderr.close() new_stderr = open("%s.%s" % (old_stderr.name, configPostfix), "w") sys.stderr = new_stderr - if old_stdout.name != '<stdout>' and config.get('split_stdout'): + if old_stdout.name != '<stdout>' and configuration.config.get('split_stdout'): if new_stdout: new_stdout.close() new_stdout = open("%s.%s" % (old_stdout.name, configPostfix), "w") @@ -1707,7 +1329,7 @@ def run_suite(): # directory and to update sys.path before invoking the test runner. # The purpose is to separate the configuration-specific directories # from each other. - if rdir: + if configuration.rdir: from shutil import copytree, rmtree, ignore_patterns newrdir = "%s.%s" % (rdir, configPostfix) @@ -1721,7 +1343,7 @@ def run_suite(): # level test directory. # # See also lldbtest.TestBase.setUpClass(cls). - if len(testdirs) == 1 and os.path.basename(testdirs[0]) == 'test': + if len(configuration.testdirs) == 1 and os.path.basename(configuration.testdirs[0]) == 'test': os.environ["LLDB_TEST"] = os.path.join(newrdir, 'test') else: os.environ["LLDB_TEST"] = newrdir @@ -1730,18 +1352,18 @@ def run_suite(): sys.path = [x.replace(rdir, newrdir, 1) for x in old_sys_path] # Output the configuration. - if not parsable: + if not configuration.parsable: sys.stderr.write("\nConfiguration: " + configString + "\n") #print("sys.stderr name is", sys.stderr.name) #print("sys.stdout name is", sys.stdout.name) # First, write out the number of collected test cases. - if not parsable: + if not configuration.parsable: sys.stderr.write(separator + "\n") sys.stderr.write("Collected %d test%s\n\n" - % (suite.countTestCases(), - suite.countTestCases() != 1 and "s" or "")) + % (configuration.suite.countTestCases(), + configuration.suite.countTestCases() != 1 and "s" or "")) class LLDBTestResult(unittest2.TextTestResult): """ @@ -1785,23 +1407,25 @@ def run_suite(): super(LLDBTestResult, self).__init__(*args) LLDBTestResult.__singleton__ = self # Now put this singleton into the lldb module namespace. - lldb.test_result = self + configuration.test_result = self # Computes the format string for displaying the counter. - global suite - counterWidth = len(str(suite.countTestCases())) + counterWidth = len(str(configuration.suite.countTestCases())) self.fmt = "%" + str(counterWidth) + "d: " self.indentation = ' ' * (counterWidth + 2) # This counts from 1 .. suite.countTestCases(). self.counter = 0 (width, height) = LLDBTestResult.getTerminalSize() self.progressbar = None - global progress_bar - if width > 10 and not parsable and progress_bar: + if width > 10 and not configuration.parsable and configuration.progress_bar: try: - self.progressbar = progress.ProgressWithEvents(stdout=self.stream,start=0,end=suite.countTestCases(),width=width-10) + self.progressbar = progress.ProgressWithEvents( + stdout=self.stream, + start=0, + end=configuration.suite.countTestCases(), + width=width-10) except: self.progressbar = None - self.results_formatter = results_formatter_object + self.results_formatter = configuration.results_formatter_object def _config_string(self, test): compiler = getattr(test, "getCompiler", None) @@ -1852,8 +1476,7 @@ def run_suite(): def startTest(self, test): if shouldSkipBecauseOfCategories(self.getCategoriesForTest(test)): self.hardMarkAsSkipped(test) - global setCrashInfoHook - setCrashInfoHook("%s at %s" % (str(test),inspect.getfile(test.__class__))) + configuration.setCrashInfoHook("%s at %s" % (str(test),inspect.getfile(test.__class__))) self.counter += 1 #if self.counter == 4: # import crashinfo @@ -1867,37 +1490,32 @@ def run_suite(): EventBuilder.event_for_start(test)) def addSuccess(self, test): - global parsable super(LLDBTestResult, self).addSuccess(test) - if parsable: + if configuration.parsable: self.stream.write("PASS: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))) if self.results_formatter: self.results_formatter.handle_event( EventBuilder.event_for_success(test)) def addError(self, test, err): - global sdir_has_content - global parsable - sdir_has_content = True + configuration.sdir_has_content = True super(LLDBTestResult, self).addError(test, err) method = getattr(test, "markError", None) if method: method() - if parsable: + if configuration.parsable: self.stream.write("FAIL: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))) if self.results_formatter: self.results_formatter.handle_event( EventBuilder.event_for_error(test, err)) def addCleanupError(self, test, err): - global sdir_has_content - global parsable - sdir_has_content = True + configuration.sdir_has_content = True super(LLDBTestResult, self).addCleanupError(test, err) method = getattr(test, "markCleanupError", None) if method: method() - if parsable: + if configuration.parsable: self.stream.write("CLEANUP ERROR: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))) if self.results_formatter: self.results_formatter.handle_event( @@ -1905,37 +1523,32 @@ def run_suite(): test, err)) def addFailure(self, test, err): - global sdir_has_content - global failuresPerCategory - global parsable - sdir_has_content = True + configuration.sdir_has_content = True super(LLDBTestResult, self).addFailure(test, err) method = getattr(test, "markFailure", None) if method: method() - if parsable: + if configuration.parsable: self.stream.write("FAIL: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))) - if useCategories: + if configuration.useCategories: test_categories = self.getCategoriesForTest(test) for category in test_categories: - if category in failuresPerCategory: - failuresPerCategory[category] = failuresPerCategory[category] + 1 + if category in configuration.failuresPerCategory: + configuration.failuresPerCategory[category] = configuration.failuresPerCategory[category] + 1 else: - failuresPerCategory[category] = 1 + configuration.failuresPerCategory[category] = 1 if self.results_formatter: self.results_formatter.handle_event( EventBuilder.event_for_failure(test, err)) def addExpectedFailure(self, test, err, bugnumber): - global sdir_has_content - global parsable - sdir_has_content = True + configuration.sdir_has_content = True super(LLDBTestResult, self).addExpectedFailure(test, err, bugnumber) method = getattr(test, "markExpectedFailure", None) if method: method(err, bugnumber) - if parsable: + if configuration.parsable: self.stream.write("XFAIL: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))) if self.results_formatter: self.results_formatter.handle_event( @@ -1943,28 +1556,24 @@ def run_suite(): test, err, bugnumber)) def addSkip(self, test, reason): - global sdir_has_content - global parsable - sdir_has_content = True + configuration.sdir_has_content = True super(LLDBTestResult, self).addSkip(test, reason) method = getattr(test, "markSkippedTest", None) if method: method() - if parsable: + if configuration.parsable: self.stream.write("UNSUPPORTED: LLDB (%s) :: %s (%s) \n" % (self._config_string(test), str(test), reason)) if self.results_formatter: self.results_formatter.handle_event( EventBuilder.event_for_skip(test, reason)) def addUnexpectedSuccess(self, test, bugnumber): - global sdir_has_content - global parsable - sdir_has_content = True + configuration.sdir_has_content = True super(LLDBTestResult, self).addUnexpectedSuccess(test, bugnumber) method = getattr(test, "markUnexpectedSuccess", None) if method: method(bugnumber) - if parsable: + if configuration.parsable: self.stream.write("XPASS: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))) if self.results_formatter: self.results_formatter.handle_event( @@ -1972,19 +1581,19 @@ def run_suite(): test, bugnumber)) - if parsable: + if configuration.parsable: v = 0 - elif progress_bar: + elif configuration.progress_bar: v = 1 else: - v = verbose + v = configuration.verbose # Invoke the test runner. - if count == 1: + if configuration.count == 1: result = unittest2.TextTestRunner(stream=sys.stderr, verbosity=v, - failfast=failfast, - resultclass=LLDBTestResult).run(suite) + failfast=configuration.failfast, + resultclass=LLDBTestResult).run(configuration.suite) else: # We are invoking the same test suite more than once. In this case, # mark __ignore_singleton__ flag as True so the signleton pattern is @@ -1994,22 +1603,22 @@ def run_suite(): result = unittest2.TextTestRunner(stream=sys.stderr, verbosity=v, - failfast=failfast, - resultclass=LLDBTestResult).run(suite) + failfast=configuration.failfast, + resultclass=LLDBTestResult).run(configuration.suite) - failed = failed or not result.wasSuccessful() + configuration.failed = configuration.failed or not result.wasSuccessful() - if sdir_has_content and not parsable: + if configuration.sdir_has_content and not configuration.parsable: sys.stderr.write("Session logs for test failures/errors/unexpected successes" - " can be found in directory '%s'\n" % sdir_name) + " can be found in directory '%s'\n" % configuration.sdir_name) - if useCategories and len(failuresPerCategory) > 0: + if configuration.useCategories and len(configuration.failuresPerCategory) > 0: sys.stderr.write("Failures per category:\n") - for category in failuresPerCategory: - sys.stderr.write("%s - %d\n" % (category,failuresPerCategory[category])) + for category in configuration.failuresPerCategory: + sys.stderr.write("%s - %d\n" % (category, configuration.failuresPerCategory[category])) os.chdir(where_to_save_session) - fname = os.path.join(sdir_name, "TestFinished-%d" % os.getpid()) + fname = os.path.join(configuration.sdir_name, "TestFinished-%d" % os.getpid()) with open(fname, "w") as f: print("Test finished at: %s\n" % datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S"), file=f) @@ -2020,7 +1629,7 @@ def run_suite(): subprocess.Popen(["/bin/sh", "-c", "kill %s; exit 0" % (os.getpid())]) # Exiting. - exitTestSuite(failed) + exitTestSuite(configuration.failed) if __name__ == "__main__": print(__file__ + " is for use as a module only. It should not be run as a standalone script.") |