llvm.org GIT mirror llvm / ff2dcd3
lit: Rename main lit module to main.py, lit/lit/lit.py was a bit too, err, alliterate. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@113922 91177308-0d34-0410-b5e6-96231b3b80d8 Daniel Dunbar 9 years ago
3 changed file(s) with 649 addition(s) and 649 deletion(s). Raw diff Collapse all Expand all
0 """'lit' Testing Tool"""
11
2 from lit import main
2 from main import main
33
44 __author__ = 'Daniel Dunbar'
55 __email__ = 'daniel@zuster.org'
+0
-648
utils/lit/lit/lit.py less more
None #!/usr/bin/env python
1
2 """
3 lit - LLVM Integrated Tester.
4
5 See lit.pod for more information.
6 """
7
8 import math, os, platform, random, re, sys, time, threading, traceback
9
10 import ProgressBar
11 import TestRunner
12 import Util
13
14 from TestingConfig import TestingConfig
15 import LitConfig
16 import Test
17
18 # Configuration files to look for when discovering test suites. These can be
19 # overridden with --config-prefix.
20 #
21 # FIXME: Rename to 'config.lit', 'site.lit', and 'local.lit' ?
22 gConfigName = 'lit.cfg'
23 gSiteConfigName = 'lit.site.cfg'
24
25 kLocalConfigName = 'lit.local.cfg'
26
27 class TestingProgressDisplay:
28 def __init__(self, opts, numTests, progressBar=None):
29 self.opts = opts
30 self.numTests = numTests
31 self.current = None
32 self.lock = threading.Lock()
33 self.progressBar = progressBar
34 self.completed = 0
35
36 def update(self, test):
37 # Avoid locking overhead in quiet mode
38 if self.opts.quiet and not test.result.isFailure:
39 self.completed += 1
40 return
41
42 # Output lock.
43 self.lock.acquire()
44 try:
45 self.handleUpdate(test)
46 finally:
47 self.lock.release()
48
49 def finish(self):
50 if self.progressBar:
51 self.progressBar.clear()
52 elif self.opts.quiet:
53 pass
54 elif self.opts.succinct:
55 sys.stdout.write('\n')
56
57 def handleUpdate(self, test):
58 self.completed += 1
59 if self.progressBar:
60 self.progressBar.update(float(self.completed)/self.numTests,
61 test.getFullName())
62
63 if self.opts.succinct and not test.result.isFailure:
64 return
65
66 if self.progressBar:
67 self.progressBar.clear()
68
69 print '%s: %s (%d of %d)' % (test.result.name, test.getFullName(),
70 self.completed, self.numTests)
71
72 if test.result.isFailure and self.opts.showOutput:
73 print "%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
74 '*'*20)
75 print test.output
76 print "*" * 20
77
78 sys.stdout.flush()
79
80 class TestProvider:
81 def __init__(self, tests, maxTime):
82 self.maxTime = maxTime
83 self.iter = iter(tests)
84 self.lock = threading.Lock()
85 self.startTime = time.time()
86
87 def get(self):
88 # Check if we have run out of time.
89 if self.maxTime is not None:
90 if time.time() - self.startTime > self.maxTime:
91 return None
92
93 # Otherwise take the next test.
94 self.lock.acquire()
95 try:
96 item = self.iter.next()
97 except StopIteration:
98 item = None
99 self.lock.release()
100 return item
101
102 class Tester(threading.Thread):
103 def __init__(self, litConfig, provider, display):
104 threading.Thread.__init__(self)
105 self.litConfig = litConfig
106 self.provider = provider
107 self.display = display
108
109 def run(self):
110 while 1:
111 item = self.provider.get()
112 if item is None:
113 break
114 self.runTest(item)
115
116 def runTest(self, test):
117 result = None
118 startTime = time.time()
119 try:
120 result, output = test.config.test_format.execute(test,
121 self.litConfig)
122 except KeyboardInterrupt:
123 # This is a sad hack. Unfortunately subprocess goes
124 # bonkers with ctrl-c and we start forking merrily.
125 print '\nCtrl-C detected, goodbye.'
126 os.kill(0,9)
127 except:
128 if self.litConfig.debug:
129 raise
130 result = Test.UNRESOLVED
131 output = 'Exception during script execution:\n'
132 output += traceback.format_exc()
133 output += '\n'
134 elapsed = time.time() - startTime
135
136 test.setResult(result, output, elapsed)
137 self.display.update(test)
138
139 def dirContainsTestSuite(path):
140 cfgpath = os.path.join(path, gSiteConfigName)
141 if os.path.exists(cfgpath):
142 return cfgpath
143 cfgpath = os.path.join(path, gConfigName)
144 if os.path.exists(cfgpath):
145 return cfgpath
146
147 def getTestSuite(item, litConfig, cache):
148 """getTestSuite(item, litConfig, cache) -> (suite, relative_path)
149
150 Find the test suite containing @arg item.
151
152 @retval (None, ...) - Indicates no test suite contains @arg item.
153 @retval (suite, relative_path) - The suite that @arg item is in, and its
154 relative path inside that suite.
155 """
156 def search1(path):
157 # Check for a site config or a lit config.
158 cfgpath = dirContainsTestSuite(path)
159
160 # If we didn't find a config file, keep looking.
161 if not cfgpath:
162 parent,base = os.path.split(path)
163 if parent == path:
164 return (None, ())
165
166 ts, relative = search(parent)
167 return (ts, relative + (base,))
168
169 # We found a config file, load it.
170 if litConfig.debug:
171 litConfig.note('loading suite config %r' % cfgpath)
172
173 cfg = TestingConfig.frompath(cfgpath, None, litConfig, mustExist = True)
174 source_root = os.path.realpath(cfg.test_source_root or path)
175 exec_root = os.path.realpath(cfg.test_exec_root or path)
176 return Test.TestSuite(cfg.name, source_root, exec_root, cfg), ()
177
178 def search(path):
179 # Check for an already instantiated test suite.
180 res = cache.get(path)
181 if res is None:
182 cache[path] = res = search1(path)
183 return res
184
185 # Canonicalize the path.
186 item = os.path.realpath(item)
187
188 # Skip files and virtual components.
189 components = []
190 while not os.path.isdir(item):
191 parent,base = os.path.split(item)
192 if parent == item:
193 return (None, ())
194 components.append(base)
195 item = parent
196 components.reverse()
197
198 ts, relative = search(item)
199 return ts, tuple(relative + tuple(components))
200
201 def getLocalConfig(ts, path_in_suite, litConfig, cache):
202 def search1(path_in_suite):
203 # Get the parent config.
204 if not path_in_suite:
205 parent = ts.config
206 else:
207 parent = search(path_in_suite[:-1])
208
209 # Load the local configuration.
210 source_path = ts.getSourcePath(path_in_suite)
211 cfgpath = os.path.join(source_path, kLocalConfigName)
212 if litConfig.debug:
213 litConfig.note('loading local config %r' % cfgpath)
214 return TestingConfig.frompath(cfgpath, parent, litConfig,
215 mustExist = False,
216 config = parent.clone(cfgpath))
217
218 def search(path_in_suite):
219 key = (ts, path_in_suite)
220 res = cache.get(key)
221 if res is None:
222 cache[key] = res = search1(path_in_suite)
223 return res
224
225 return search(path_in_suite)
226
227 def getTests(path, litConfig, testSuiteCache, localConfigCache):
228 # Find the test suite for this input and its relative path.
229 ts,path_in_suite = getTestSuite(path, litConfig, testSuiteCache)
230 if ts is None:
231 litConfig.warning('unable to find test suite for %r' % path)
232 return (),()
233
234 if litConfig.debug:
235 litConfig.note('resolved input %r to %r::%r' % (path, ts.name,
236 path_in_suite))
237
238 return ts, getTestsInSuite(ts, path_in_suite, litConfig,
239 testSuiteCache, localConfigCache)
240
241 def getTestsInSuite(ts, path_in_suite, litConfig,
242 testSuiteCache, localConfigCache):
243 # Check that the source path exists (errors here are reported by the
244 # caller).
245 source_path = ts.getSourcePath(path_in_suite)
246 if not os.path.exists(source_path):
247 return
248
249 # Check if the user named a test directly.
250 if not os.path.isdir(source_path):
251 lc = getLocalConfig(ts, path_in_suite[:-1], litConfig, localConfigCache)
252 yield Test.Test(ts, path_in_suite, lc)
253 return
254
255 # Otherwise we have a directory to search for tests, start by getting the
256 # local configuration.
257 lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache)
258
259 # Search for tests.
260 if lc.test_format is not None:
261 for res in lc.test_format.getTestsInDirectory(ts, path_in_suite,
262 litConfig, lc):
263 yield res
264
265 # Search subdirectories.
266 for filename in os.listdir(source_path):
267 # FIXME: This doesn't belong here?
268 if filename in ('Output', '.svn') or filename in lc.excludes:
269 continue
270
271 # Ignore non-directories.
272 file_sourcepath = os.path.join(source_path, filename)
273 if not os.path.isdir(file_sourcepath):
274 continue
275
276 # Check for nested test suites, first in the execpath in case there is a
277 # site configuration and then in the source path.
278 file_execpath = ts.getExecPath(path_in_suite + (filename,))
279 if dirContainsTestSuite(file_execpath):
280 sub_ts, subiter = getTests(file_execpath, litConfig,
281 testSuiteCache, localConfigCache)
282 elif dirContainsTestSuite(file_sourcepath):
283 sub_ts, subiter = getTests(file_sourcepath, litConfig,
284 testSuiteCache, localConfigCache)
285 else:
286 # Otherwise, continue loading from inside this test suite.
287 subiter = getTestsInSuite(ts, path_in_suite + (filename,),
288 litConfig, testSuiteCache,
289 localConfigCache)
290 sub_ts = None
291
292 N = 0
293 for res in subiter:
294 N += 1
295 yield res
296 if sub_ts and not N:
297 litConfig.warning('test suite %r contained no tests' % sub_ts.name)
298
299 def runTests(numThreads, litConfig, provider, display):
300 # If only using one testing thread, don't use threads at all; this lets us
301 # profile, among other things.
302 if numThreads == 1:
303 t = Tester(litConfig, provider, display)
304 t.run()
305 return
306
307 # Otherwise spin up the testing threads and wait for them to finish.
308 testers = [Tester(litConfig, provider, display)
309 for i in range(numThreads)]
310 for t in testers:
311 t.start()
312 try:
313 for t in testers:
314 t.join()
315 except KeyboardInterrupt:
316 sys.exit(2)
317
318 def load_test_suite(inputs):
319 import unittest
320
321 # Create the global config object.
322 litConfig = LitConfig.LitConfig(progname = 'lit',
323 path = [],
324 quiet = False,
325 useValgrind = False,
326 valgrindLeakCheck = False,
327 valgrindArgs = [],
328 useTclAsSh = False,
329 noExecute = False,
330 debug = False,
331 isWindows = (platform.system()=='Windows'),
332 params = {})
333
334 # Load the tests from the inputs.
335 tests = []
336 testSuiteCache = {}
337 localConfigCache = {}
338 for input in inputs:
339 prev = len(tests)
340 tests.extend(getTests(input, litConfig,
341 testSuiteCache, localConfigCache)[1])
342 if prev == len(tests):
343 litConfig.warning('input %r contained no tests' % input)
344
345 # If there were any errors during test discovery, exit now.
346 if litConfig.numErrors:
347 print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors
348 sys.exit(2)
349
350 # Return a unittest test suite which just runs the tests in order.
351 def get_test_fn(test):
352 return unittest.FunctionTestCase(
353 lambda: test.config.test_format.execute(
354 test, litConfig),
355 description = test.getFullName())
356
357 from LitTestCase import LitTestCase
358 return unittest.TestSuite([LitTestCase(test, litConfig) for test in tests])
359
360 def main(builtinParameters = {}): # Bump the GIL check interval, its more important to get any one thread to a
361 # blocking operation (hopefully exec) than to try and unblock other threads.
362 #
363 # FIXME: This is a hack.
364 import sys
365 sys.setcheckinterval(1000)
366
367 global options
368 from optparse import OptionParser, OptionGroup
369 parser = OptionParser("usage: %prog [options] {file-or-path}")
370
371 parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
372 help="Number of testing threads",
373 type=int, action="store", default=None)
374 parser.add_option("", "--config-prefix", dest="configPrefix",
375 metavar="NAME", help="Prefix for 'lit' config files",
376 action="store", default=None)
377 parser.add_option("", "--param", dest="userParameters",
378 metavar="NAME=VAL",
379 help="Add 'NAME' = 'VAL' to the user defined parameters",
380 type=str, action="append", default=[])
381
382 group = OptionGroup(parser, "Output Format")
383 # FIXME: I find these names very confusing, although I like the
384 # functionality.
385 group.add_option("-q", "--quiet", dest="quiet",
386 help="Suppress no error output",
387 action="store_true", default=False)
388 group.add_option("-s", "--succinct", dest="succinct",
389 help="Reduce amount of output",
390 action="store_true", default=False)
391 group.add_option("-v", "--verbose", dest="showOutput",
392 help="Show all test output",
393 action="store_true", default=False)
394 group.add_option("", "--no-progress-bar", dest="useProgressBar",
395 help="Do not use curses based progress bar",
396 action="store_false", default=True)
397 parser.add_option_group(group)
398
399 group = OptionGroup(parser, "Test Execution")
400 group.add_option("", "--path", dest="path",
401 help="Additional paths to add to testing environment",
402 action="append", type=str, default=[])
403 group.add_option("", "--vg", dest="useValgrind",
404 help="Run tests under valgrind",
405 action="store_true", default=False)
406 group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
407 help="Check for memory leaks under valgrind",
408 action="store_true", default=False)
409 group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
410 help="Specify an extra argument for valgrind",
411 type=str, action="append", default=[])
412 group.add_option("", "--time-tests", dest="timeTests",
413 help="Track elapsed wall time for each test",
414 action="store_true", default=False)
415 group.add_option("", "--no-execute", dest="noExecute",
416 help="Don't execute any tests (assume PASS)",
417 action="store_true", default=False)
418 parser.add_option_group(group)
419
420 group = OptionGroup(parser, "Test Selection")
421 group.add_option("", "--max-tests", dest="maxTests", metavar="N",
422 help="Maximum number of tests to run",
423 action="store", type=int, default=None)
424 group.add_option("", "--max-time", dest="maxTime", metavar="N",
425 help="Maximum time to spend testing (in seconds)",
426 action="store", type=float, default=None)
427 group.add_option("", "--shuffle", dest="shuffle",
428 help="Run tests in random order",
429 action="store_true", default=False)
430 parser.add_option_group(group)
431
432 group = OptionGroup(parser, "Debug and Experimental Options")
433 group.add_option("", "--debug", dest="debug",
434 help="Enable debugging (for 'lit' development)",
435 action="store_true", default=False)
436 group.add_option("", "--show-suites", dest="showSuites",
437 help="Show discovered test suites",
438 action="store_true", default=False)
439 group.add_option("", "--no-tcl-as-sh", dest="useTclAsSh",
440 help="Don't run Tcl scripts using 'sh'",
441 action="store_false", default=True)
442 group.add_option("", "--repeat", dest="repeatTests", metavar="N",
443 help="Repeat tests N times (for timing)",
444 action="store", default=None, type=int)
445 parser.add_option_group(group)
446
447 (opts, args) = parser.parse_args()
448
449 if not args:
450 parser.error('No inputs specified')
451
452 if opts.configPrefix is not None:
453 global gConfigName, gSiteConfigName
454 gConfigName = '%s.cfg' % opts.configPrefix
455 gSiteConfigName = '%s.site.cfg' % opts.configPrefix
456
457 if opts.numThreads is None:
458 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
459 # http://bugs.python.org/issue1731717
460 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
461 # threads by default there.
462 if sys.hexversion >= 0x2050200:
463 opts.numThreads = Util.detectCPUs()
464 else:
465 opts.numThreads = 1
466
467 inputs = args
468
469 # Create the user defined parameters.
470 userParams = dict(builtinParameters)
471 for entry in opts.userParameters:
472 if '=' not in entry:
473 name,val = entry,''
474 else:
475 name,val = entry.split('=', 1)
476 userParams[name] = val
477
478 # Create the global config object.
479 litConfig = LitConfig.LitConfig(progname = os.path.basename(sys.argv[0]),
480 path = opts.path,
481 quiet = opts.quiet,
482 useValgrind = opts.useValgrind,
483 valgrindLeakCheck = opts.valgrindLeakCheck,
484 valgrindArgs = opts.valgrindArgs,
485 useTclAsSh = opts.useTclAsSh,
486 noExecute = opts.noExecute,
487 debug = opts.debug,
488 isWindows = (platform.system()=='Windows'),
489 params = userParams)
490
491 # Expand '@...' form in inputs.
492 actual_inputs = []
493 for input in inputs:
494 if os.path.exists(input) or not input.startswith('@'):
495 actual_inputs.append(input)
496 else:
497 f = open(input[1:])
498 try:
499 for ln in f:
500 ln = ln.strip()
501 if ln:
502 actual_inputs.append(ln)
503 finally:
504 f.close()
505
506
507 # Load the tests from the inputs.
508 tests = []
509 testSuiteCache = {}
510 localConfigCache = {}
511 for input in actual_inputs:
512 prev = len(tests)
513 tests.extend(getTests(input, litConfig,
514 testSuiteCache, localConfigCache)[1])
515 if prev == len(tests):
516 litConfig.warning('input %r contained no tests' % input)
517
518 # If there were any errors during test discovery, exit now.
519 if litConfig.numErrors:
520 print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors
521 sys.exit(2)
522
523 if opts.showSuites:
524 suitesAndTests = dict([(ts,[])
525 for ts,_ in testSuiteCache.values()
526 if ts])
527 for t in tests:
528 suitesAndTests[t.suite].append(t)
529
530 print '-- Test Suites --'
531 suitesAndTests = suitesAndTests.items()
532 suitesAndTests.sort(key = lambda (ts,_): ts.name)
533 for ts,ts_tests in suitesAndTests:
534 print ' %s - %d tests' %(ts.name, len(ts_tests))
535 print ' Source Root: %s' % ts.source_root
536 print ' Exec Root : %s' % ts.exec_root
537
538 # Select and order the tests.
539 numTotalTests = len(tests)
540 if opts.shuffle:
541 random.shuffle(tests)
542 else:
543 tests.sort(key = lambda t: t.getFullName())
544 if opts.maxTests is not None:
545 tests = tests[:opts.maxTests]
546
547 extra = ''
548 if len(tests) != numTotalTests:
549 extra = ' of %d' % numTotalTests
550 header = '-- Testing: %d%s tests, %d threads --'%(len(tests),extra,
551 opts.numThreads)
552
553 if opts.repeatTests:
554 tests = [t.copyWithIndex(i)
555 for t in tests
556 for i in range(opts.repeatTests)]
557
558 progressBar = None
559 if not opts.quiet:
560 if opts.succinct and opts.useProgressBar:
561 try:
562 tc = ProgressBar.TerminalController()
563 progressBar = ProgressBar.ProgressBar(tc, header)
564 except ValueError:
565 print header
566 progressBar = ProgressBar.SimpleProgressBar('Testing: ')
567 else:
568 print header
569
570 # Don't create more threads than tests.
571 opts.numThreads = min(len(tests), opts.numThreads)
572
573 startTime = time.time()
574 display = TestingProgressDisplay(opts, len(tests), progressBar)
575 provider = TestProvider(tests, opts.maxTime)
576 runTests(opts.numThreads, litConfig, provider, display)
577 display.finish()
578
579 if not opts.quiet:
580 print 'Testing Time: %.2fs'%(time.time() - startTime)
581
582 # Update results for any tests which weren't run.
583 for t in tests:
584 if t.result is None:
585 t.setResult(Test.UNRESOLVED, '', 0.0)
586
587 # List test results organized by kind.
588 hasFailures = False
589 byCode = {}
590 for t in tests:
591 if t.result not in byCode:
592 byCode[t.result] = []
593 byCode[t.result].append(t)
594 if t.result.isFailure:
595 hasFailures = True
596
597 # FIXME: Show unresolved and (optionally) unsupported tests.
598 for title,code in (('Unexpected Passing Tests', Test.XPASS),
599 ('Failing Tests', Test.FAIL)):
600 elts = byCode.get(code)
601 if not elts:
602 continue
603 print '*'*20
604 print '%s (%d):' % (title, len(elts))
605 for t in elts:
606 print ' %s' % t.getFullName()
607 print
608
609 if opts.timeTests:
610 # Collate, in case we repeated tests.
611 times = {}
612 for t in tests:
613 key = t.getFullName()
614 times[key] = times.get(key, 0.) + t.elapsed
615
616 byTime = list(times.items())
617 byTime.sort(key = lambda (name,elapsed): elapsed)
618 if byTime:
619 Util.printHistogram(byTime, title='Tests')
620
621 for name,code in (('Expected Passes ', Test.PASS),
622 ('Expected Failures ', Test.XFAIL),
623 ('Unsupported Tests ', Test.UNSUPPORTED),
624 ('Unresolved Tests ', Test.UNRESOLVED),
625 ('Unexpected Passes ', Test.XPASS),
626 ('Unexpected Failures', Test.FAIL),):
627 if opts.quiet and not code.isFailure:
628 continue
629 N = len(byCode.get(code,[]))
630 if N:
631 print ' %s: %d' % (name,N)
632
633 # If we encountered any additional errors, exit abnormally.
634 if litConfig.numErrors:
635 print >>sys.stderr, '\n%d error(s), exiting.' % litConfig.numErrors
636 sys.exit(2)
637
638 # Warn about warnings.
639 if litConfig.numWarnings:
640 print >>sys.stderr, '\n%d warning(s) in tests.' % litConfig.numWarnings
641
642 if hasFailures:
643 sys.exit(1)
644 sys.exit(0)
645
646 if __name__=='__main__':
647 main()
0 #!/usr/bin/env python
1
2 """
3 lit - LLVM Integrated Tester.
4
5 See lit.pod for more information.
6 """
7
8 import math, os, platform, random, re, sys, time, threading, traceback
9
10 import ProgressBar
11 import TestRunner
12 import Util
13
14 from TestingConfig import TestingConfig
15 import LitConfig
16 import Test
17
18 # Configuration files to look for when discovering test suites. These can be
19 # overridden with --config-prefix.
20 #
21 # FIXME: Rename to 'config.lit', 'site.lit', and 'local.lit' ?
22 gConfigName = 'lit.cfg'
23 gSiteConfigName = 'lit.site.cfg'
24
25 kLocalConfigName = 'lit.local.cfg'
26
27 class TestingProgressDisplay:
28 def __init__(self, opts, numTests, progressBar=None):
29 self.opts = opts
30 self.numTests = numTests
31 self.current = None
32 self.lock = threading.Lock()
33 self.progressBar = progressBar
34 self.completed = 0
35
36 def update(self, test):
37 # Avoid locking overhead in quiet mode
38 if self.opts.quiet and not test.result.isFailure:
39 self.completed += 1
40 return
41
42 # Output lock.
43 self.lock.acquire()
44 try:
45 self.handleUpdate(test)
46 finally:
47 self.lock.release()
48
49 def finish(self):
50 if self.progressBar:
51 self.progressBar.clear()
52 elif self.opts.quiet:
53 pass
54 elif self.opts.succinct:
55 sys.stdout.write('\n')
56
57 def handleUpdate(self, test):
58 self.completed += 1
59 if self.progressBar:
60 self.progressBar.update(float(self.completed)/self.numTests,
61 test.getFullName())
62
63 if self.opts.succinct and not test.result.isFailure:
64 return
65
66 if self.progressBar:
67 self.progressBar.clear()
68
69 print '%s: %s (%d of %d)' % (test.result.name, test.getFullName(),
70 self.completed, self.numTests)
71
72 if test.result.isFailure and self.opts.showOutput:
73 print "%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
74 '*'*20)
75 print test.output
76 print "*" * 20
77
78 sys.stdout.flush()
79
80 class TestProvider:
81 def __init__(self, tests, maxTime):
82 self.maxTime = maxTime
83 self.iter = iter(tests)
84 self.lock = threading.Lock()
85 self.startTime = time.time()
86
87 def get(self):
88 # Check if we have run out of time.
89 if self.maxTime is not None:
90 if time.time() - self.startTime > self.maxTime:
91 return None
92
93 # Otherwise take the next test.
94 self.lock.acquire()
95 try:
96 item = self.iter.next()
97 except StopIteration:
98 item = None
99 self.lock.release()
100 return item
101
102 class Tester(threading.Thread):
103 def __init__(self, litConfig, provider, display):
104 threading.Thread.__init__(self)
105 self.litConfig = litConfig
106 self.provider = provider
107 self.display = display
108
109 def run(self):
110 while 1:
111 item = self.provider.get()
112 if item is None:
113 break
114 self.runTest(item)
115
116 def runTest(self, test):
117 result = None
118 startTime = time.time()
119 try:
120 result, output = test.config.test_format.execute(test,
121 self.litConfig)
122 except KeyboardInterrupt:
123 # This is a sad hack. Unfortunately subprocess goes
124 # bonkers with ctrl-c and we start forking merrily.
125 print '\nCtrl-C detected, goodbye.'
126 os.kill(0,9)
127 except:
128 if self.litConfig.debug:
129 raise
130 result = Test.UNRESOLVED
131 output = 'Exception during script execution:\n'
132 output += traceback.format_exc()
133 output += '\n'
134 elapsed = time.time() - startTime
135
136 test.setResult(result, output, elapsed)
137 self.display.update(test)
138
139 def dirContainsTestSuite(path):
140 cfgpath = os.path.join(path, gSiteConfigName)
141 if os.path.exists(cfgpath):
142 return cfgpath
143 cfgpath = os.path.join(path, gConfigName)
144 if os.path.exists(cfgpath):
145 return cfgpath
146
147 def getTestSuite(item, litConfig, cache):
148 """getTestSuite(item, litConfig, cache) -> (suite, relative_path)
149
150 Find the test suite containing @arg item.
151
152 @retval (None, ...) - Indicates no test suite contains @arg item.
153 @retval (suite, relative_path) - The suite that @arg item is in, and its
154 relative path inside that suite.
155 """
156 def search1(path):
157 # Check for a site config or a lit config.
158 cfgpath = dirContainsTestSuite(path)
159
160 # If we didn't find a config file, keep looking.
161 if not cfgpath:
162 parent,base = os.path.split(path)
163 if parent == path:
164 return (None, ())
165
166 ts, relative = search(parent)
167 return (ts, relative + (base,))
168
169 # We found a config file, load it.
170 if litConfig.debug:
171 litConfig.note('loading suite config %r' % cfgpath)
172
173 cfg = TestingConfig.frompath(cfgpath, None, litConfig, mustExist = True)
174 source_root = os.path.realpath(cfg.test_source_root or path)
175 exec_root = os.path.realpath(cfg.test_exec_root or path)
176 return Test.TestSuite(cfg.name, source_root, exec_root, cfg), ()
177
178 def search(path):
179 # Check for an already instantiated test suite.
180 res = cache.get(path)
181 if res is None:
182 cache[path] = res = search1(path)
183 return res
184
185 # Canonicalize the path.
186 item = os.path.realpath(item)
187
188 # Skip files and virtual components.
189 components = []
190 while not os.path.isdir(item):
191 parent,base = os.path.split(item)
192 if parent == item:
193 return (None, ())
194 components.append(base)
195 item = parent
196 components.reverse()
197
198 ts, relative = search(item)
199 return ts, tuple(relative + tuple(components))
200
201 def getLocalConfig(ts, path_in_suite, litConfig, cache):
202 def search1(path_in_suite):
203 # Get the parent config.
204 if not path_in_suite:
205 parent = ts.config
206 else:
207 parent = search(path_in_suite[:-1])
208
209 # Load the local configuration.
210 source_path = ts.getSourcePath(path_in_suite)
211 cfgpath = os.path.join(source_path, kLocalConfigName)
212 if litConfig.debug:
213 litConfig.note('loading local config %r' % cfgpath)
214 return TestingConfig.frompath(cfgpath, parent, litConfig,
215 mustExist = False,
216 config = parent.clone(cfgpath))
217
218 def search(path_in_suite):
219 key = (ts, path_in_suite)
220 res = cache.get(key)
221 if res is None:
222 cache[key] = res = search1(path_in_suite)
223 return res
224
225 return search(path_in_suite)
226
227 def getTests(path, litConfig, testSuiteCache, localConfigCache):
228 # Find the test suite for this input and its relative path.
229 ts,path_in_suite = getTestSuite(path, litConfig, testSuiteCache)
230 if ts is None:
231 litConfig.warning('unable to find test suite for %r' % path)
232 return (),()
233
234 if litConfig.debug:
235 litConfig.note('resolved input %r to %r::%r' % (path, ts.name,
236 path_in_suite))
237
238 return ts, getTestsInSuite(ts, path_in_suite, litConfig,
239 testSuiteCache, localConfigCache)
240
241 def getTestsInSuite(ts, path_in_suite, litConfig,
242 testSuiteCache, localConfigCache):
243 # Check that the source path exists (errors here are reported by the
244 # caller).
245 source_path = ts.getSourcePath(path_in_suite)
246 if not os.path.exists(source_path):
247 return
248
249 # Check if the user named a test directly.
250 if not os.path.isdir(source_path):
251 lc = getLocalConfig(ts, path_in_suite[:-1], litConfig, localConfigCache)
252 yield Test.Test(ts, path_in_suite, lc)
253 return
254
255 # Otherwise we have a directory to search for tests, start by getting the
256 # local configuration.
257 lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache)
258
259 # Search for tests.
260 if lc.test_format is not None:
261 for res in lc.test_format.getTestsInDirectory(ts, path_in_suite,
262 litConfig, lc):
263 yield res
264
265 # Search subdirectories.
266 for filename in os.listdir(source_path):
267 # FIXME: This doesn't belong here?
268 if filename in ('Output', '.svn') or filename in lc.excludes:
269 continue
270
271 # Ignore non-directories.
272 file_sourcepath = os.path.join(source_path, filename)
273 if not os.path.isdir(file_sourcepath):
274 continue
275
276 # Check for nested test suites, first in the execpath in case there is a
277 # site configuration and then in the source path.
278 file_execpath = ts.getExecPath(path_in_suite + (filename,))
279 if dirContainsTestSuite(file_execpath):
280 sub_ts, subiter = getTests(file_execpath, litConfig,
281 testSuiteCache, localConfigCache)
282 elif dirContainsTestSuite(file_sourcepath):
283 sub_ts, subiter = getTests(file_sourcepath, litConfig,
284 testSuiteCache, localConfigCache)
285 else:
286 # Otherwise, continue loading from inside this test suite.
287 subiter = getTestsInSuite(ts, path_in_suite + (filename,),
288 litConfig, testSuiteCache,
289 localConfigCache)
290 sub_ts = None
291
292 N = 0
293 for res in subiter:
294 N += 1
295 yield res
296 if sub_ts and not N:
297 litConfig.warning('test suite %r contained no tests' % sub_ts.name)
298
299 def runTests(numThreads, litConfig, provider, display):
300 # If only using one testing thread, don't use threads at all; this lets us
301 # profile, among other things.
302 if numThreads == 1:
303 t = Tester(litConfig, provider, display)
304 t.run()
305 return
306
307 # Otherwise spin up the testing threads and wait for them to finish.
308 testers = [Tester(litConfig, provider, display)
309 for i in range(numThreads)]
310 for t in testers:
311 t.start()
312 try:
313 for t in testers:
314 t.join()
315 except KeyboardInterrupt:
316 sys.exit(2)
317
318 def load_test_suite(inputs):
319 import unittest
320
321 # Create the global config object.
322 litConfig = LitConfig.LitConfig(progname = 'lit',
323 path = [],
324 quiet = False,
325 useValgrind = False,
326 valgrindLeakCheck = False,
327 valgrindArgs = [],
328 useTclAsSh = False,
329 noExecute = False,
330 debug = False,
331 isWindows = (platform.system()=='Windows'),
332 params = {})
333
334 # Load the tests from the inputs.
335 tests = []
336 testSuiteCache = {}
337 localConfigCache = {}
338 for input in inputs:
339 prev = len(tests)
340 tests.extend(getTests(input, litConfig,
341 testSuiteCache, localConfigCache)[1])
342 if prev == len(tests):
343 litConfig.warning('input %r contained no tests' % input)
344
345 # If there were any errors during test discovery, exit now.
346 if litConfig.numErrors:
347 print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors
348 sys.exit(2)
349
350 # Return a unittest test suite which just runs the tests in order.
351 def get_test_fn(test):
352 return unittest.FunctionTestCase(
353 lambda: test.config.test_format.execute(
354 test, litConfig),
355 description = test.getFullName())
356
357 from LitTestCase import LitTestCase
358 return unittest.TestSuite([LitTestCase(test, litConfig) for test in tests])
359
360 def main(builtinParameters = {}): # Bump the GIL check interval, its more important to get any one thread to a
361 # blocking operation (hopefully exec) than to try and unblock other threads.
362 #
363 # FIXME: This is a hack.
364 import sys
365 sys.setcheckinterval(1000)
366
367 global options
368 from optparse import OptionParser, OptionGroup
369 parser = OptionParser("usage: %prog [options] {file-or-path}")
370
371 parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
372 help="Number of testing threads",
373 type=int, action="store", default=None)
374 parser.add_option("", "--config-prefix", dest="configPrefix",
375 metavar="NAME", help="Prefix for 'lit' config files",
376 action="store", default=None)
377 parser.add_option("", "--param", dest="userParameters",
378 metavar="NAME=VAL",
379 help="Add 'NAME' = 'VAL' to the user defined parameters",
380 type=str, action="append", default=[])
381
382 group = OptionGroup(parser, "Output Format")
383 # FIXME: I find these names very confusing, although I like the
384 # functionality.
385 group.add_option("-q", "--quiet", dest="quiet",
386 help="Suppress no error output",
387 action="store_true", default=False)
388 group.add_option("-s", "--succinct", dest="succinct",
389 help="Reduce amount of output",
390 action="store_true", default=False)
391 group.add_option("-v", "--verbose", dest="showOutput",
392 help="Show all test output",
393 action="store_true", default=False)
394 group.add_option("", "--no-progress-bar", dest="useProgressBar",
395 help="Do not use curses based progress bar",
396 action="store_false", default=True)
397 parser.add_option_group(group)
398
399 group = OptionGroup(parser, "Test Execution")
400 group.add_option("", "--path", dest="path",
401 help="Additional paths to add to testing environment",
402 action="append", type=str, default=[])
403 group.add_option("", "--vg", dest="useValgrind",
404 help="Run tests under valgrind",
405 action="store_true", default=False)
406 group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
407 help="Check for memory leaks under valgrind",
408 action="store_true", default=False)
409 group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
410 help="Specify an extra argument for valgrind",
411 type=str, action="append", default=[])
412 group.add_option("", "--time-tests", dest="timeTests",
413 help="Track elapsed wall time for each test",
414 action="store_true", default=False)
415 group.add_option("", "--no-execute", dest="noExecute",
416 help="Don't execute any tests (assume PASS)",
417 action="store_true", default=False)
418 parser.add_option_group(group)
419
420 group = OptionGroup(parser, "Test Selection")
421 group.add_option("", "--max-tests", dest="maxTests", metavar="N",
422 help="Maximum number of tests to run",
423 action="store", type=int, default=None)
424 group.add_option("", "--max-time", dest="maxTime", metavar="N",
425 help="Maximum time to spend testing (in seconds)",
426 action="store", type=float, default=None)
427 group.add_option("", "--shuffle", dest="shuffle",
428 help="Run tests in random order",
429 action="store_true", default=False)
430 parser.add_option_group(group)
431
432 group = OptionGroup(parser, "Debug and Experimental Options")
433 group.add_option("", "--debug", dest="debug",
434 help="Enable debugging (for 'lit' development)",
435 action="store_true", default=False)
436 group.add_option("", "--show-suites", dest="showSuites",
437 help="Show discovered test suites",
438 action="store_true", default=False)
439 group.add_option("", "--no-tcl-as-sh", dest="useTclAsSh",
440 help="Don't run Tcl scripts using 'sh'",
441 action="store_false", default=True)
442 group.add_option("", "--repeat", dest="repeatTests", metavar="N",
443 help="Repeat tests N times (for timing)",
444 action="store", default=None, type=int)
445 parser.add_option_group(group)
446
447 (opts, args) = parser.parse_args()
448
449 if not args:
450 parser.error('No inputs specified')
451
452 if opts.configPrefix is not None:
453 global gConfigName, gSiteConfigName
454 gConfigName = '%s.cfg' % opts.configPrefix
455 gSiteConfigName = '%s.site.cfg' % opts.configPrefix
456
457 if opts.numThreads is None:
458 # Python <2.5 has a race condition causing lit to always fail with numThreads>1
459 # http://bugs.python.org/issue1731717
460 # I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
461 # threads by default there.
462 if sys.hexversion >= 0x2050200:
463 opts.numThreads = Util.detectCPUs()
464 else:
465 opts.numThreads = 1
466
467 inputs = args
468
469 # Create the user defined parameters.
470 userParams = dict(builtinParameters)
471 for entry in opts.userParameters:
472 if '=' not in entry:
473 name,val = entry,''
474 else:
475 name,val = entry.split('=', 1)
476 userParams[name] = val
477
478 # Create the global config object.
479 litConfig = LitConfig.LitConfig(progname = os.path.basename(sys.argv[0]),
480 path = opts.path,
481 quiet = opts.quiet,
482 useValgrind = opts.useValgrind,
483 valgrindLeakCheck = opts.valgrindLeakCheck,
484 valgrindArgs = opts.valgrindArgs,
485 useTclAsSh = opts.useTclAsSh,
486 noExecute = opts.noExecute,
487 debug = opts.debug,
488 isWindows = (platform.system()=='Windows'),
489 params = userParams)
490
491 # Expand '@...' form in inputs.
492 actual_inputs = []
493 for input in inputs:
494 if os.path.exists(input) or not input.startswith('@'):
495 actual_inputs.append(input)
496 else:
497 f = open(input[1:])
498 try:
499 for ln in f:
500 ln = ln.strip()
501 if ln:
502 actual_inputs.append(ln)
503 finally:
504 f.close()
505
506
507 # Load the tests from the inputs.
508 tests = []
509 testSuiteCache = {}
510 localConfigCache = {}
511 for input in actual_inputs:
512 prev = len(tests)
513 tests.extend(getTests(input, litConfig,
514 testSuiteCache, localConfigCache)[1])
515 if prev == len(tests):
516 litConfig.warning('input %r contained no tests' % input)
517
518 # If there were any errors during test discovery, exit now.
519 if litConfig.numErrors:
520 print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors
521 sys.exit(2)
522
523 if opts.showSuites:
524 suitesAndTests = dict([(ts,[])
525 for ts,_ in testSuiteCache.values()
526 if ts])
527 for t in tests:
528 suitesAndTests[t.suite].append(t)
529
530 print '-- Test Suites --'
531 suitesAndTests = suitesAndTests.items()
532 suitesAndTests.sort(key = lambda (ts,_): ts.name)
533 for ts,ts_tests in suitesAndTests:
534 print ' %s - %d tests' %(ts.name, len(ts_tests))
535 print ' Source Root: %s' % ts.source_root
536 print ' Exec Root : %s' % ts.exec_root
537
538 # Select and order the tests.
539 numTotalTests = len(tests)
540 if opts.shuffle:
541 random.shuffle(tests)
542 else:
543 tests.sort(key = lambda t: t.getFullName())
544 if opts.maxTests is not None:
545 tests = tests[:opts.maxTests]
546
547 extra = ''
548 if len(tests) != numTotalTests:
549 extra = ' of %d' % numTotalTests
550 header = '-- Testing: %d%s tests, %d threads --'%(len(tests),extra,
551 opts.numThreads)
552
553 if opts.repeatTests:
554 tests = [t.copyWithIndex(i)
555 for t in tests
556 for i in range(opts.repeatTests)]
557
558 progressBar = None
559 if not opts.quiet:
560 if opts.succinct and opts.useProgressBar:
561 try:
562 tc = ProgressBar.TerminalController()
563 progressBar = ProgressBar.ProgressBar(tc, header)
564 except ValueError:
565 print header
566 progressBar = ProgressBar.SimpleProgressBar('Testing: ')
567 else:
568 print header
569
570 # Don't create more threads than tests.
571 opts.numThreads = min(len(tests), opts.numThreads)
572
573 startTime = time.time()
574 display = TestingProgressDisplay(opts, len(tests), progressBar)
575 provider = TestProvider(tests, opts.maxTime)
576 runTests(opts.numThreads, litConfig, provider, display)
577 display.finish()
578
579 if not opts.quiet:
580 print 'Testing Time: %.2fs'%(time.time() - startTime)
581
582 # Update results for any tests which weren't run.
583 for t in tests:
584 if t.result is None:
585 t.setResult(Test.UNRESOLVED, '', 0.0)
586
587 # List test results organized by kind.
588 hasFailures = False
589 byCode = {}
590 for t in tests:
591 if t.result not in byCode:
592 byCode[t.result] = []
593 byCode[t.result].append(t)
594 if t.result.isFailure:
595 hasFailures = True
596
597 # FIXME: Show unresolved and (optionally) unsupported tests.
598 for title,code in (('Unexpected Passing Tests', Test.XPASS),
599 ('Failing Tests', Test.FAIL)):
600 elts = byCode.get(code)
601 if not elts:
602 continue
603 print '*'*20
604 print '%s (%d):' % (title, len(elts))
605 for t in elts:
606 print ' %s' % t.getFullName()
607 print
608
609 if opts.timeTests:
610 # Collate, in case we repeated tests.
611 times = {}
612 for t in tests:
613 key = t.getFullName()
614 times[key] = times.get(key, 0.) + t.elapsed
615
616 byTime = list(times.items())
617 byTime.sort(key = lambda (name,elapsed): elapsed)
618 if byTime:
619 Util.printHistogram(byTime, title='Tests')
620
621 for name,code in (('Expected Passes ', Test.PASS),
622 ('Expected Failures ', Test.XFAIL),
623 ('Unsupported Tests ', Test.UNSUPPORTED),
624 ('Unresolved Tests ', Test.UNRESOLVED),
625 ('Unexpected Passes ', Test.XPASS),
626 ('Unexpected Failures', Test.FAIL),):
627 if opts.quiet and not code.isFailure:
628 continue
629 N = len(byCode.get(code,[]))
630 if N:
631 print ' %s: %d' % (name,N)
632
633 # If we encountered any additional errors, exit abnormally.
634 if litConfig.numErrors:
635 print >>sys.stderr, '\n%d error(s), exiting.' % litConfig.numErrors
636 sys.exit(2)
637
638 # Warn about warnings.
639 if litConfig.numWarnings:
640 print >>sys.stderr, '\n%d warning(s) in tests.' % litConfig.numWarnings
641
642 if hasFailures:
643 sys.exit(1)
644 sys.exit(0)
645
646 if __name__=='__main__':
647 main()