llvm.org GIT mirror llvm / ff058f0
[lit] Add support for attach arbitrary metrics to test results. - This is a work-in-progress and all details are subject to change, but I am trying to build up support for allowing lit to be used as a driver for performance tests (or other tests which might want to record information beyond simple PASS/FAIL). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@190535 91177308-0d34-0410-b5e6-96231b3b80d8 Daniel Dunbar 6 years ago
5 changed file(s) with 119 addition(s) and 2 deletion(s). Raw diff Collapse all Expand all
0 import os
11
2 # Test results.
2 # Test result codes.
33
44 class ResultCode(object):
55 """Test result codes."""
3030 UNRESOLVED = ResultCode('UNRESOLVED', True)
3131 UNSUPPORTED = ResultCode('UNSUPPORTED', False)
3232
33 # Test metric values.
34
35 class MetricValue(object):
36 def format(self):
37 raise RuntimeError("abstract method")
38
39 class IntMetricValue(MetricValue):
40 def __init__(self, value):
41 self.value = value
42
43 def format(self):
44 return str(self.value)
45
46 class RealMetricValue(MetricValue):
47 def __init__(self, value):
48 self.value = value
49
50 def format(self):
51 return '%.4f' % self.value
52
53 # Test results.
54
3355 class Result(object):
3456 """Wrapper for the results of executing an individual test."""
3557
4062 self.output = output
4163 # The wall timing to execute the test, if timing.
4264 self.elapsed = elapsed
65 # The metrics reported by this test.
66 self.metrics = {}
67
68 def addMetric(self, name, value):
69 """
70 addMetric(name, value)
71
72 Attach a test metric to the test result, with the given name and list of
73 values. It is an error to attempt to attach the metrics with the same
74 name multiple times.
75
76 Each value must be an instance of a MetricValue subclass.
77 """
78 if name in self.metrics:
79 raise ValueError("result already includes metrics for %r" % (
80 name,))
81 if not isinstance(value, MetricValue):
82 raise TypeError("unexpected metric value: %r" % (value,))
83 self.metrics[name] = value
4384
4485 # Test classes.
4586
4444 if self.progressBar:
4545 self.progressBar.clear()
4646
47 print('%s: %s (%d of %d)' % (test.result.code.name, test.getFullName(),
47 # Show the test result line.
48 test_name = test.getFullName()
49 print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
4850 self.completed, self.numTests))
4951
52 # Show the test failure output, if requested.
5053 if test.result.code.isFailure and self.opts.showOutput:
5154 print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
5255 '*'*20))
5356 print(test.result.output)
5457 print("*" * 20)
5558
59 # Report test metrics, if present.
60 if test.result.metrics:
61 print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
62 '*'*10))
63 items = sorted(test.result.metrics.items())
64 for metric_name, value in items:
65 print('%s: %s ' % (metric_name, value.format()))
66 print("*" * 10)
67
68 # Ensure the output is flushed.
5669 sys.stdout.flush()
5770
5871 def main(builtinParameters = {}):
0 import os
1 try:
2 import ConfigParser
3 except ImportError:
4 import configparser as ConfigParser
5
6 import lit.formats
7 import lit.Test
8
9 class DummyFormat(lit.formats.FileBasedTest):
10 def execute(self, test, lit_config):
11 # In this dummy format, expect that each test file is actually just a
12 # .ini format dump of the results to report.
13
14 source_path = test.getSourcePath()
15
16 cfg = ConfigParser.ConfigParser()
17 cfg.read(source_path)
18
19 # Create the basic test result.
20 result_code = cfg.get('global', 'result_code')
21 result_output = cfg.get('global', 'result_output')
22 result = lit.Test.Result(getattr(lit.Test, result_code),
23 result_output)
24
25 # Load additional metrics.
26 for key,value_str in cfg.items('results'):
27 value = eval(value_str)
28 if isinstance(value, int):
29 metric = lit.Test.IntMetricValue(value)
30 elif isinstance(value, float):
31 metric = lit.Test.RealMetricValue(value)
32 else:
33 raise RuntimeError("unsupported result type")
34 result.addMetric(key, metric)
35
36 return result
37
38 config.name = 'test-data'
39 config.suffixes = ['.ini']
40 config.test_format = DummyFormat()
41 config.test_source_root = None
42 config.test_exec_root = None
43 config.target_triple = None
0 [global]
1 result_code = PASS
2 result_output = 'Test passed.'
3
4 [results]
5 value0 = 1
6 value1 = 2.3456
0 # Test features related to formats which support reporting additional test data.
1
2 # RUN: %{lit} -j 1 -v %{inputs}/test-data > %t.out
3 # RUN: FileCheck < %t.out %s
4
5 # CHECK: -- Testing:
6
7 # CHECK: PASS: test-data :: metrics.ini
8 # CHECK-NEXT: *** TEST 'test-data :: metrics.ini' RESULTS ***
9 # CHECK-NEXT: value0: 1
10 # CHECK-NEXT: value1: 2.3456
11 # CHECK-NEXT: ***