[llvm] r190535 - [lit] Add support for attach arbitrary metrics to test results.
Daniel Dunbar
daniel at zuster.org
Wed Sep 11 10:45:12 PDT 2013
Author: ddunbar
Date: Wed Sep 11 12:45:11 2013
New Revision: 190535
URL: http://llvm.org/viewvc/llvm-project?rev=190535&view=rev
Log:
[lit] Add support for attach arbitrary metrics to test results.
- This is a work-in-progress and all details are subject to change, but I am
trying to build up support for allowing lit to be used as a driver for
performance tests (or other tests which might want to record information
beyond simple PASS/FAIL).
Added:
llvm/trunk/utils/lit/tests/Inputs/test-data/
llvm/trunk/utils/lit/tests/Inputs/test-data/lit.cfg
llvm/trunk/utils/lit/tests/Inputs/test-data/metrics.ini
llvm/trunk/utils/lit/tests/test-data.py
Modified:
llvm/trunk/utils/lit/lit/Test.py
llvm/trunk/utils/lit/lit/main.py
Modified: llvm/trunk/utils/lit/lit/Test.py
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/lit/lit/Test.py?rev=190535&r1=190534&r2=190535&view=diff
==============================================================================
--- llvm/trunk/utils/lit/lit/Test.py (original)
+++ llvm/trunk/utils/lit/lit/Test.py Wed Sep 11 12:45:11 2013
@@ -1,6 +1,6 @@
import os
-# Test results.
+# Test result codes.
class ResultCode(object):
"""Test result codes."""
@@ -31,6 +31,28 @@ XPASS = ResultCode('XPASS', True)
UNRESOLVED = ResultCode('UNRESOLVED', True)
UNSUPPORTED = ResultCode('UNSUPPORTED', False)
+# Test metric values.
+
+class MetricValue(object):
+ def format(self):
+ raise RuntimeError("abstract method")
+
+class IntMetricValue(MetricValue):
+ def __init__(self, value):
+ self.value = value
+
+ def format(self):
+ return str(self.value)
+
+class RealMetricValue(MetricValue):
+ def __init__(self, value):
+ self.value = value
+
+ def format(self):
+ return '%.4f' % self.value
+
+# Test results.
+
class Result(object):
"""Wrapper for the results of executing an individual test."""
@@ -41,6 +63,25 @@ class Result(object):
self.output = output
# The wall timing to execute the test, if timing.
self.elapsed = elapsed
+ # The metrics reported by this test.
+ self.metrics = {}
+
+ def addMetric(self, name, value):
+ """
+ addMetric(name, value)
+
+ Attach a test metric to the test result, with the given name and list of
+ values. It is an error to attempt to attach the metrics with the same
+ name multiple times.
+
+ Each value must be an instance of a MetricValue subclass.
+ """
+ if name in self.metrics:
+ raise ValueError("result already includes metrics for %r" % (
+ name,))
+ if not isinstance(value, MetricValue):
+ raise TypeError("unexpected metric value: %r" % (value,))
+ self.metrics[name] = value
# Test classes.
Modified: llvm/trunk/utils/lit/lit/main.py
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/lit/lit/main.py?rev=190535&r1=190534&r2=190535&view=diff
==============================================================================
--- llvm/trunk/utils/lit/lit/main.py (original)
+++ llvm/trunk/utils/lit/lit/main.py Wed Sep 11 12:45:11 2013
@@ -45,15 +45,28 @@ class TestingProgressDisplay(object):
if self.progressBar:
self.progressBar.clear()
- print('%s: %s (%d of %d)' % (test.result.code.name, test.getFullName(),
+ # Show the test result line.
+ test_name = test.getFullName()
+ print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
self.completed, self.numTests))
+ # Show the test failure output, if requested.
if test.result.code.isFailure and self.opts.showOutput:
print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
'*'*20))
print(test.result.output)
print("*" * 20)
+ # Report test metrics, if present.
+ if test.result.metrics:
+ print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
+ '*'*10))
+ items = sorted(test.result.metrics.items())
+ for metric_name, value in items:
+ print('%s: %s ' % (metric_name, value.format()))
+ print("*" * 10)
+
+ # Ensure the output is flushed.
sys.stdout.flush()
def main(builtinParameters = {}):
Added: llvm/trunk/utils/lit/tests/Inputs/test-data/lit.cfg
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/lit/tests/Inputs/test-data/lit.cfg?rev=190535&view=auto
==============================================================================
--- llvm/trunk/utils/lit/tests/Inputs/test-data/lit.cfg (added)
+++ llvm/trunk/utils/lit/tests/Inputs/test-data/lit.cfg Wed Sep 11 12:45:11 2013
@@ -0,0 +1,44 @@
+import os
+try:
+ import ConfigParser
+except ImportError:
+ import configparser as ConfigParser
+
+import lit.formats
+import lit.Test
+
+class DummyFormat(lit.formats.FileBasedTest):
+ def execute(self, test, lit_config):
+ # In this dummy format, expect that each test file is actually just a
+ # .ini format dump of the results to report.
+
+ source_path = test.getSourcePath()
+
+ cfg = ConfigParser.ConfigParser()
+ cfg.read(source_path)
+
+ # Create the basic test result.
+ result_code = cfg.get('global', 'result_code')
+ result_output = cfg.get('global', 'result_output')
+ result = lit.Test.Result(getattr(lit.Test, result_code),
+ result_output)
+
+ # Load additional metrics.
+ for key,value_str in cfg.items('results'):
+ value = eval(value_str)
+ if isinstance(value, int):
+ metric = lit.Test.IntMetricValue(value)
+ elif isinstance(value, float):
+ metric = lit.Test.RealMetricValue(value)
+ else:
+ raise RuntimeError("unsupported result type")
+ result.addMetric(key, metric)
+
+ return result
+
+config.name = 'test-data'
+config.suffixes = ['.ini']
+config.test_format = DummyFormat()
+config.test_source_root = None
+config.test_exec_root = None
+config.target_triple = None
Added: llvm/trunk/utils/lit/tests/Inputs/test-data/metrics.ini
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/lit/tests/Inputs/test-data/metrics.ini?rev=190535&view=auto
==============================================================================
--- llvm/trunk/utils/lit/tests/Inputs/test-data/metrics.ini (added)
+++ llvm/trunk/utils/lit/tests/Inputs/test-data/metrics.ini Wed Sep 11 12:45:11 2013
@@ -0,0 +1,7 @@
+[global]
+result_code = PASS
+result_output = 'Test passed.'
+
+[results]
+value0 = 1
+value1 = 2.3456
\ No newline at end of file
Added: llvm/trunk/utils/lit/tests/test-data.py
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/lit/tests/test-data.py?rev=190535&view=auto
==============================================================================
--- llvm/trunk/utils/lit/tests/test-data.py (added)
+++ llvm/trunk/utils/lit/tests/test-data.py Wed Sep 11 12:45:11 2013
@@ -0,0 +1,12 @@
+# Test features related to formats which support reporting additional test data.
+
+# RUN: %{lit} -j 1 -v %{inputs}/test-data > %t.out
+# RUN: FileCheck < %t.out %s
+
+# CHECK: -- Testing:
+
+# CHECK: PASS: test-data :: metrics.ini
+# CHECK-NEXT: *** TEST 'test-data :: metrics.ini' RESULTS ***
+# CHECK-NEXT: value0: 1
+# CHECK-NEXT: value1: 2.3456
+# CHECK-NEXT: ***
More information about the llvm-commits
mailing list