[LNT] r282888 - xml XUnit test reports for LNT

Chris Matthews via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 30 11:08:01 PDT 2016


Author: cmatthews
Date: Fri Sep 30 13:08:00 2016
New Revision: 282888

URL: http://llvm.org/viewvc/llvm-project?rev=282888&view=rev
Log:
xml XUnit test reports for LNT

Jenkins and other test runners like to read XUnit test reports. This
commit makes LNT generate those test reports in the build directory.

Reports are useful because they allow system's like Jenkins to know the
difference between failures, and present useful output, like why a test
failed.

Modified:
    lnt/trunk/lnt/tests/test_suite.py
    lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results-profile.json
    lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results.json
    lnt/trunk/tests/runtest/test_suite.py

Modified: lnt/trunk/lnt/tests/test_suite.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/tests/test_suite.py?rev=282888&r1=282887&r2=282888&view=diff
==============================================================================
--- lnt/trunk/lnt/tests/test_suite.py (original)
+++ lnt/trunk/lnt/tests/test_suite.py Fri Sep 30 13:08:00 2016
@@ -12,6 +12,10 @@ import re
 import multiprocessing
 import getpass
 
+import datetime
+import jinja2
+from collections import defaultdict
+
 from optparse import OptionParser, OptionGroup
 
 import lnt.testing
@@ -30,6 +34,32 @@ from lnt.tests.builtintest import Builti
 TEST_SUITE_KNOWN_ARCHITECTURES = ['ARM', 'AArch64', 'Mips', 'X86']
 KNOWN_SAMPLE_KEYS = ['compile', 'exec', 'hash', 'score']
 
+XML_REPORT_TEMPLATE = """
+<?xml version="1.0" encoding="UTF-8"?>
+{%  for suite in suites %}
+<testsuite name="{{ suite.name }}"
+           tests="{{ suite.num_tests }}"
+           errors="{{ suite.num_errors }}"
+           failures="{{ suite.num_failures }}"
+           skip="{{ suite.num_skipped }}"
+           timestamp="{{suite.timestamp}}">
+    {% for test in suite.tests %}
+    <testcase classname="{{ test.path }}"
+              name="{{ test.name }}" time="{{ test.time }}">
+        {% if test.code == "NOEXE"%}
+            <error>
+            {{ test.output }}
+            </error>
+        {% endif %}
+        {% if test.code == "FAIL"%}
+            <failure>
+            {{ test.output }}
+            </failure>
+        {% endif %}
+    </testcase>
+    {% endfor %}
+</testsuite>
+{% endfor %}"""
 
 # _importProfile imports a single profile. It must be at the top level (and
 # not within TestSuiteTest) so that multiprocessing can import it correctly.
@@ -52,6 +82,49 @@ def _importProfile(name_filename):
                                    str)
 
 
+def _lit_json_to_xunit_xml(json_reports):
+    # type: (list) -> str
+    """Take the lit report jason dicts and convert them
+    to an xunit xml report for CI to digest."""
+    template_engine = jinja2.Template(XML_REPORT_TEMPLATE)
+    # For now, only show first runs report.
+    json_report = json_reports[0]
+    tests_by_suite = defaultdict(list)
+    for tests in json_report['tests']:
+        name = tests['name']
+        code = tests['code']
+        time = tests['elapsed']
+        output = tests.get('output', 'No output collected for this test.')
+
+        x = name.split("::")
+        suite_name = x[1].strip().split("/")[0]
+        test_name = x[1].strip().split("/")[-1]
+        path = x[1].strip().split("/")[:-1]
+
+        entry = {'name': test_name,
+                 'path': '.'.join(path),
+                 'time': time,
+                 'code': code}
+        if code != "PASS":
+            entry['output'] = output
+
+        tests_by_suite[suite_name].append(entry)
+    suites = []
+    for suite in tests_by_suite:
+        tests = tests_by_suite[suite]
+        entry = {'name': suite,
+                 'tests': tests,
+                 'timestamp': datetime.datetime.now().isoformat(),
+                 'num_tests': len(tests),
+                 'num_failures': len(
+                     [x for x in tests if x['code'] == 'FAIL']),
+                 'num_errors': len(
+                     [x for x in tests if x['code'] == 'NOEXE'])}
+        suites.append(entry)
+    str_template = template_engine.render(suites=suites)
+    return str_template
+
+
 class TestSuiteTest(BuiltinTest):
     def __init__(self):
         super(TestSuiteTest, self).__init__()
@@ -363,11 +436,14 @@ class TestSuiteTest(BuiltinTest):
             return self.diagnose()
         # Now do the actual run.
         reports = []
+        json_reports = []
         for i in range(max(opts.exec_multisample, opts.compile_multisample)):
             c = i < opts.compile_multisample
             e = i < opts.exec_multisample
-            reports.append(self.run(self.nick, compile=c, test=e))
-            
+            run_report, json_data = self.run(self.nick, compile=c, test=e)
+            reports.append(run_report)
+            json_reports.append(json_data)
+
         report = self._create_merged_report(reports)
 
         # Write the report out so it can be read by the submission tool.
@@ -375,23 +451,30 @@ class TestSuiteTest(BuiltinTest):
         with open(report_path, 'w') as fd:
             fd.write(report.render())
 
+        xml_report_path = os.path.join(self._base_path,
+                                       'test-results.xunit.xml')
+
+        str_template = _lit_json_to_xunit_xml(json_reports)
+        with open(xml_report_path, 'w') as fd:
+            fd.write(str_template)
+
         return self.submit(report_path, self.opts, commit=True)
-    
+
     def run(self, nick, compile=True, test=True):
         path = self._base_path
-        
+
         if not os.path.exists(path):
             mkdir_p(path)
-            
+
         if self.opts.pgo:
             self._collect_pgo(path)
             self.trained = True
-            
+
         if not self.configured and self._need_to_configure(path):
             self._configure(path)
             self._clean(path)
             self.configured = True
-            
+
         if self.compiled and compile:
             self._clean(path)
         if not self.compiled or compile:
@@ -399,7 +482,7 @@ class TestSuiteTest(BuiltinTest):
             self.compiled = True
 
         data = self._lit(path, test)
-        return self._parse_lit_output(path, data)
+        return self._parse_lit_output(path, data), data
 
     def _create_merged_report(self, reports):
         if len(reports) == 1:

Modified: lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results-profile.json
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results-profile.json?rev=282888&r1=282887&r2=282888&view=diff
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results-profile.json (original)
+++ lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results-profile.json Fri Sep 30 13:08:00 2016
@@ -3,6 +3,7 @@
         {
             "name": "test-suite :: foo",
             "code": "PASS",
+            "elapsed": "1.0",
             "metrics": {
                 "compile_time": 1.3,
                 "exec_time": 1.4,

Modified: lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results.json
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results.json?rev=282888&r1=282887&r2=282888&view=diff
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results.json (original)
+++ lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results.json Fri Sep 30 13:08:00 2016
@@ -2,6 +2,7 @@
     "tests": [
         {
             "name": "test-suite :: foo",
+            "elapsed": "1.0",
             "code": "PASS",
             "metrics": {
                 "compile_time": 1.3,

Modified: lnt/trunk/tests/runtest/test_suite.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/test_suite.py?rev=282888&r1=282887&r2=282888&view=diff
==============================================================================
--- lnt/trunk/tests/runtest/test_suite.py (original)
+++ lnt/trunk/tests/runtest/test_suite.py Fri Sep 30 13:08:00 2016
@@ -15,6 +15,7 @@
 # RUN: FileCheck  --check-prefix CHECK-STDOUT < %t.log %s
 # RUN: FileCheck  --check-prefix CHECK-BASIC < %t.err %s
 # RUN: FileCheck  --check-prefix CHECK-REPORT < %t.SANDBOX/build/report.json %s
+# RUN: FileCheck  --check-prefix CHECK-XML < %t.SANDBOX/build/test-results.xunit.xml %s
 
 # CHECK-REPORT: "run_order": "154331"
 # CHECK-REPORT: "Name": "nts.{{[^.]+}}.compile"
@@ -32,6 +33,18 @@
 # CHECK-BASIC: submitting result to dummy instance
 # CHECK-BASIC: Successfully created db_None/v4/nts/1
 
+# CHECK-XML: <?xml version="1.0" encoding="UTF-8"?>
+# CHECK-XML: <testsuite name="foo"
+# CHECK-XML:            tests="{{\d+}}"
+# CHECK-XML:            errors="0"
+# CHECK-XML:            failures="0"
+# CHECK-XML:            skip=""
+# CHECK-XML:            timestamp="2
+# CHECK-XML:     <testcase classname=""
+# CHECK-XML:               name="foo" time="1.0">
+# CHECK-XML:     </testcase>
+# CHECK-XML: </testsuite>
+
 # Use the same sandbox again with --no-configure
 # RUN: lnt runtest test-suite \
 # RUN:     --sandbox %t.SANDBOX \




More information about the llvm-commits mailing list