1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
|
#!/usr/bin/env python
"""
Run the test suite using a separate process for each test file.
Each test will run with a time limit of 10 minutes by default.
Override the default time limit of 10 minutes by setting
the environment variable LLDB_TEST_TIMEOUT.
E.g., export LLDB_TEST_TIMEOUT=10m
Override the time limit for individual tests by setting
the environment variable LLDB_[TEST NAME]_TIMEOUT.
E.g., export LLDB_TESTCONCURRENTEVENTS_TIMEOUT=2m
Set to "0" to run without time limit.
E.g., export LLDB_TEST_TIMEOUT=0
or export LLDB_TESTCONCURRENTEVENTS_TIMEOUT=0
To collect core files for timed out tests, do the following before running dosep.py
OSX
ulimit -c unlimited
sudo sysctl -w kern.corefile=core.%P
Linux:
ulimit -c unlimited
echo core.%p | sudo tee /proc/sys/kernel/core_pattern
"""
import multiprocessing
import os
import fnmatch
import platform
import re
import dotest_args
import shlex
import subprocess
import sys
from optparse import OptionParser
def get_timeout_command():
"""Search for a suitable timeout command."""
if sys.platform.startswith("win32"):
return None
try:
subprocess.call("timeout", stderr=subprocess.PIPE)
return "timeout"
except OSError:
pass
try:
subprocess.call("gtimeout", stderr=subprocess.PIPE)
return "gtimeout"
except OSError:
pass
return None
timeout_command = get_timeout_command()
# Status codes for running command with timeout.
eTimedOut, ePassed, eFailed = 124, 0, 1
output_lock = None
test_counter = None
total_tests = None
dotest_options = None
def setup_global_variables(lock, counter, total, options):
global output_lock, test_counter, total_tests, dotest_options
output_lock = lock
test_counter = counter
total_tests = total
dotest_options = options
def update_status(name = None, command = None, output = None):
global output_lock, test_counter, total_tests
with output_lock:
if output is not None:
print >> sys.stderr
print >> sys.stderr, "Failed test suite: %s" % name
print >> sys.stderr, "Command invoked: %s" % ' '.join(command)
print >> sys.stderr, "stdout:\n%s" % output[0]
print >> sys.stderr, "stderr:\n%s" % output[1]
sys.stderr.write("\r%*d out of %d test suites processed" %
(len(str(total_tests)), test_counter.value, total_tests))
test_counter.value += 1
def parse_test_results(output):
passes = 0
failures = 0
for result in output:
pass_count = re.search("^RESULT:.*([0-9]+) passes", result, re.MULTILINE)
fail_count = re.search("^RESULT:.*([0-9]+) failures", result, re.MULTILINE)
error_count = re.search("^RESULT:.*([0-9]+) errors", result, re.MULTILINE)
this_fail_count = 0
this_error_count = 0
if pass_count != None:
passes = passes + int(pass_count.group(1))
if fail_count != None:
failures = failures + int(fail_count.group(1))
if error_count != None:
failures = failures + int(error_count.group(1))
pass
return passes, failures
def call_with_timeout(command, timeout, name):
"""Run command with a timeout if possible."""
"""-s QUIT will create a coredump if they are enabled on your system"""
process = None
if timeout_command and timeout != "0":
command = [timeout_command, '-s', 'QUIT', timeout] + command
# Specifying a value for close_fds is unsupported on Windows when using subprocess.PIPE
if os.name != "nt":
process = subprocess.Popen(command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
else:
process = subprocess.Popen(command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output = process.communicate()
exit_status = process.returncode
passes, failures = parse_test_results(output)
update_status(name, command, output if exit_status != 0 else None)
return exit_status, passes, failures
def process_dir(root, files, test_root, dotest_argv):
"""Examine a directory for tests, and invoke any found within it."""
timed_out = []
failed = []
passed = []
pass_sub_count = 0
fail_sub_count = 0
for name in files:
path = os.path.join(root, name)
# We're only interested in the test file with the "Test*.py" naming pattern.
if not name.startswith("Test") or not name.endswith(".py"):
continue
# Neither a symbolically linked file.
if os.path.islink(path):
continue
script_file = os.path.join(test_root, "dotest.py")
command = ([sys.executable, script_file] +
dotest_argv +
["-p", name, root])
timeout_name = os.path.basename(os.path.splitext(name)[0]).upper()
timeout = os.getenv("LLDB_%s_TIMEOUT" % timeout_name) or getDefaultTimeout(dotest_options.lldb_platform_name)
exit_status, pass_count, fail_count = call_with_timeout(command, timeout, name)
pass_sub_count = pass_sub_count + pass_count
fail_sub_count = fail_sub_count + fail_count
if exit_status == ePassed:
passed.append(name)
else:
if eTimedOut == exit_status:
timed_out.append(name)
failed.append(name)
return (timed_out, failed, passed, fail_sub_count, pass_sub_count)
in_q = None
out_q = None
def process_dir_worker(arg_tuple):
"""Worker thread main loop when in multithreaded mode.
Takes one directory specification at a time and works on it."""
(root, files, test_root, dotest_argv) = arg_tuple
return process_dir(root, files, test_root, dotest_argv)
def walk_and_invoke(test_directory, test_subdir, dotest_argv, num_threads):
"""Look for matched files and invoke test driver on each one.
In single-threaded mode, each test driver is invoked directly.
In multi-threaded mode, submit each test driver to a worker
queue, and then wait for all to complete.
test_directory - lldb/test/ directory
test_subdir - lldb/test/ or a subfolder with the tests we're interested in running
"""
# Collect the test files that we'll run.
test_work_items = []
for root, dirs, files in os.walk(test_subdir, topdown=False):
test_work_items.append((root, files, test_directory, dotest_argv))
global output_lock, test_counter, total_tests
output_lock = multiprocessing.Lock()
total_tests = len(test_work_items)
test_counter = multiprocessing.Value('i', 0)
print >> sys.stderr, "Testing: %d tests, %d threads" % (total_tests, num_threads)
update_status()
# Run the items, either in a pool (for multicore speedup) or
# calling each individually.
if num_threads > 1:
pool = multiprocessing.Pool(num_threads,
initializer = setup_global_variables,
initargs = (output_lock, test_counter, total_tests, dotest_options))
test_results = pool.map(process_dir_worker, test_work_items)
else:
test_results = []
for work_item in test_work_items:
test_results.append(process_dir_worker(work_item))
timed_out = []
failed = []
passed = []
fail_sub_count = 0
pass_sub_count = 0
for test_result in test_results:
(dir_timed_out, dir_failed, dir_passed, dir_fail_sub_count, dir_pass_sub_count) = test_result
timed_out += dir_timed_out
failed += dir_failed
passed += dir_passed
fail_sub_count = fail_sub_count + dir_fail_sub_count
pass_sub_count = pass_sub_count + dir_pass_sub_count
return (timed_out, failed, passed, fail_sub_count, pass_sub_count)
def getExpectedTimeouts(platform_name):
# returns a set of test filenames that might timeout
# are we running against a remote target?
if platform_name is None:
target = sys.platform
remote = False
else:
m = re.search('remote-(\w+)', platform_name)
target = m.group(1)
remote = True
expected_timeout = set()
if target.startswith("linux"):
expected_timeout |= {
"TestAttachDenied.py",
"TestAttachResume.py",
"TestConnectRemote.py",
"TestCreateAfterAttach.py",
"TestEvents.py",
"TestExitDuringStep.py",
"TestHelloWorld.py", # Times out in ~10% of the times on the build bot
"TestMultithreaded.py",
"TestRegisters.py", # ~12/600 dosep runs (build 3120-3122)
"TestThreadStepOut.py",
}
elif target.startswith("android"):
expected_timeout |= {
"TestExitDuringStep.py",
"TestHelloWorld.py",
}
elif target.startswith("freebsd"):
expected_timeout |= {
"TestBreakpointConditions.py",
"TestChangeProcessGroup.py",
"TestValueObjectRecursion.py",
"TestWatchpointConditionAPI.py",
}
elif target.startswith("darwin"):
expected_timeout |= {
"TestThreadSpecificBreakpoint.py", # times out on MBP Retina, Mid 2012
}
return expected_timeout
def getDefaultTimeout(platform_name):
if os.getenv("LLDB_TEST_TIMEOUT"):
return os.getenv("LLDB_TEST_TIMEOUT")
if platform_name is None:
platform_name = sys.platform
if platform_name.startswith("remote-"):
return "10m"
else:
return "4m"
def touch(fname, times=None):
with open(fname, 'a'):
os.utime(fname, times)
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
def main():
# We can't use sys.path[0] to determine the script directory
# because it doesn't work under a debugger
test_directory = os.path.dirname(os.path.realpath(__file__))
parser = OptionParser(usage="""\
Run lldb test suite using a separate process for each test file.
Each test will run with a time limit of 10 minutes by default.
Override the default time limit of 10 minutes by setting
the environment variable LLDB_TEST_TIMEOUT.
E.g., export LLDB_TEST_TIMEOUT=10m
Override the time limit for individual tests by setting
the environment variable LLDB_[TEST NAME]_TIMEOUT.
E.g., export LLDB_TESTCONCURRENTEVENTS_TIMEOUT=2m
Set to "0" to run without time limit.
E.g., export LLDB_TEST_TIMEOUT=0
or export LLDB_TESTCONCURRENTEVENTS_TIMEOUT=0
""")
parser.add_option('-o', '--options',
type='string', action='store',
dest='dotest_options',
help="""The options passed to 'dotest.py' if specified.""")
parser.add_option('-t', '--threads',
type='int',
dest='num_threads',
help="""The number of threads to use when running tests separately.""")
opts, args = parser.parse_args()
dotest_option_string = opts.dotest_options
is_posix = (os.name == "posix")
dotest_argv = shlex.split(dotest_option_string, posix=is_posix) if dotest_option_string else []
parser = dotest_args.create_parser()
global dotest_options
dotest_options = dotest_args.parse_args(parser, dotest_argv)
if not dotest_options.s:
# no session log directory, we need to add this to prevent
# every dotest invocation from creating its own directory
import datetime
# The windows platforms don't like ':' in the pathname.
timestamp_started = datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S")
dotest_argv.append('-s')
dotest_argv.append(timestamp_started)
dotest_options.s = timestamp_started
session_dir = os.path.join(os.getcwd(), dotest_options.s)
# The root directory was specified on the command line
if len(args) == 0:
test_subdir = test_directory
else:
test_subdir = os.path.join(test_directory, args[0])
# clean core files in test tree from previous runs (Linux)
cores = find('core.*', test_subdir)
for core in cores:
os.unlink(core)
if opts.num_threads:
num_threads = opts.num_threads
else:
num_threads_str = os.environ.get("LLDB_TEST_THREADS")
if num_threads_str:
num_threads = int(num_threads_str)
else:
num_threads = multiprocessing.cpu_count()
if num_threads < 1:
num_threads = 1
system_info = " ".join(platform.uname())
(timed_out, failed, passed, all_fails, all_passes) = walk_and_invoke(test_directory, test_subdir, dotest_argv, num_threads)
timed_out = set(timed_out)
num_test_files = len(failed) + len(passed)
num_tests = all_fails + all_passes
# move core files into session dir
cores = find('core.*', test_subdir)
for core in cores:
dst = core.replace(test_directory, "")[1:]
dst = dst.replace(os.path.sep, "-")
os.rename(core, os.path.join(session_dir, dst))
# remove expected timeouts from failures
expected_timeout = getExpectedTimeouts(dotest_options.lldb_platform_name)
for xtime in expected_timeout:
if xtime in timed_out:
timed_out.remove(xtime)
failed.remove(xtime)
result = "ExpectedTimeout"
elif xtime in passed:
result = "UnexpectedCompletion"
else:
result = None # failed
if result:
test_name = os.path.splitext(xtime)[0]
touch(os.path.join(session_dir, "{}-{}".format(result, test_name)))
print
print "Ran %d test suites (%d failed) (%f%%)" % (num_test_files, len(failed),
(100.0 * len(failed) / num_test_files) if num_test_files > 0 else float('NaN'))
print "Ran %d test cases (%d failed) (%f%%)" % (num_tests, all_fails,
(100.0 * all_fails / num_tests) if num_tests > 0 else float('NaN'))
if len(failed) > 0:
failed.sort()
print "Failing Tests (%d)" % len(failed)
for f in failed:
print "%s: LLDB (suite) :: %s (%s)" % (
"TIMEOUT" if f in timed_out else "FAIL", f, system_info
)
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
|