[llvm] 253cb50 - [lit] Add the option to output test result as resultdb json format

Haowei Wu via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 23 17:04:03 PDT 2021


Author: Haowei Wu
Date: 2021-08-23T17:00:50-07:00
New Revision: 253cb50c60991f155e49f1d76cd01f8ba66d3524

URL: https://github.com/llvm/llvm-project/commit/253cb50c60991f155e49f1d76cd01f8ba66d3524
DIFF: https://github.com/llvm/llvm-project/commit/253cb50c60991f155e49f1d76cd01f8ba66d3524.diff

LOG: [lit] Add the option to output test result as resultdb json format

This change adds the option --resultdb-output=path allow llvm-lit
generating LuCI ResultDB JSON output for the test results, which
can be better integrated with certain CI/CQ framework.

Differential Revision: https://reviews.llvm.org/D108238

Added: 
    llvm/utils/lit/tests/test-output-micro-resultdb.py
    llvm/utils/lit/tests/test-output-resultdb.py

Modified: 
    llvm/utils/lit/lit/cl_arguments.py
    llvm/utils/lit/lit/reports.py

Removed: 
    


################################################################################
diff  --git a/llvm/utils/lit/lit/cl_arguments.py b/llvm/utils/lit/lit/cl_arguments.py
index 70e0c8d6a17ec..aa83c7ddad4e6 100644
--- a/llvm/utils/lit/lit/cl_arguments.py
+++ b/llvm/utils/lit/lit/cl_arguments.py
@@ -115,6 +115,9 @@ def parse_args():
     execution_group.add_argument("--xunit-xml-output",
             type=lit.reports.XunitReport,
             help="Write XUnit-compatible XML test reports to the specified file")
+    execution_group.add_argument("--resultdb-output",
+            type=lit.reports.ResultDBReport,
+            help="Write LuCI ResuldDB compatible JSON to the specified file")
     execution_group.add_argument("--time-trace-output",
             type=lit.reports.TimeTraceReport,
             help="Write Chrome tracing compatible JSON to the specified file")
@@ -229,7 +232,7 @@ def parse_args():
     else:
         opts.shard = None
 
-    opts.reports = filter(None, [opts.output, opts.xunit_xml_output, opts.time_trace_output])
+    opts.reports = filter(None, [opts.output, opts.xunit_xml_output, opts.resultdb_output, opts.time_trace_output])
 
     return opts
 

diff  --git a/llvm/utils/lit/lit/reports.py b/llvm/utils/lit/lit/reports.py
index c7eed2220a4d1..c8add0e5e593b 100755
--- a/llvm/utils/lit/lit/reports.py
+++ b/llvm/utils/lit/lit/reports.py
@@ -1,3 +1,5 @@
+import base64
+import datetime
 import itertools
 import json
 
@@ -153,6 +155,90 @@ def _get_skip_reason(self, test):
         return 'Unsupported configuration'
 
 
+def gen_resultdb_test_entry(
+    test_name, start_time, elapsed_time, test_output, result_code, is_expected
+):
+    test_data = {
+        'testId': test_name,
+        'start_time': datetime.datetime.fromtimestamp(start_time).isoformat() + 'Z',
+        'duration': '%.9fs' % elapsed_time,
+        'summary_html': '<p><text-artifact artifact-id="artifact-content-in-request"></p>',
+        'artifacts': {
+            'artifact-content-in-request': {
+                'contents': base64.b64encode(test_output.encode('utf-8')).decode(
+                    'utf-8'
+                ),
+            },
+        },
+        'expected': is_expected,
+    }
+    if (
+        result_code == lit.Test.PASS
+        or result_code == lit.Test.XPASS
+        or result_code == lit.Test.FLAKYPASS
+    ):
+        test_data['status'] = 'PASS'
+    elif result_code == lit.Test.FAIL or result_code == lit.Test.XFAIL:
+        test_data['status'] = 'FAIL'
+    elif (
+        result_code == lit.Test.UNSUPPORTED
+        or result_code == lit.Test.SKIPPED
+        or result_code == lit.Test.EXCLUDED
+    ):
+        test_data['status'] = 'SKIP'
+    elif result_code == lit.Test.UNRESOLVED or result_code == lit.Test.TIMEOUT:
+        test_data['status'] = 'ABORT'
+    return test_data
+
+
+class ResultDBReport(object):
+    def __init__(self, output_file):
+        self.output_file = output_file
+
+    def write_results(self, tests, elapsed):
+        unexecuted_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED}
+        tests = [t for t in tests if t.result.code not in unexecuted_codes]
+        data = {}
+        data['__version__'] = lit.__versioninfo__
+        data['elapsed'] = elapsed
+        # Encode the tests.
+        data['tests'] = tests_data = []
+        for test in tests:
+            tests_data.append(
+                gen_resultdb_test_entry(
+                    test_name=test.getFullName(),
+                    start_time=test.result.start,
+                    elapsed_time=test.result.elapsed,
+                    test_output=test.result.output,
+                    result_code=test.result.code,
+                    is_expected=not test.result.code.isFailure,
+                )
+            )
+            if test.result.microResults:
+                for key, micro_test in test.result.microResults.items():
+                    # Expand parent test name with micro test name
+                    parent_name = test.getFullName()
+                    micro_full_name = parent_name + ':' + key + 'microres'
+                    tests_data.append(
+                        gen_resultdb_test_entry(
+                            test_name=micro_full_name,
+                            start_time=micro_test.start
+                            if micro_test.start
+                            else test.result.start,
+                            elapsed_time=micro_test.elapsed
+                            if micro_test.elapsed
+                            else test.result.elapsed,
+                            test_output=micro_test.output,
+                            result_code=micro_test.code,
+                            is_expected=not micro_test.code.isFailure,
+                        )
+                    )
+
+        with open(self.output_file, 'w') as file:
+            json.dump(data, file, indent=2, sort_keys=True)
+            file.write('\n')
+
+
 class TimeTraceReport(object):
     def __init__(self, output_file):
         self.output_file = output_file

diff  --git a/llvm/utils/lit/tests/test-output-micro-resultdb.py b/llvm/utils/lit/tests/test-output-micro-resultdb.py
new file mode 100644
index 0000000000000..a67a17916f237
--- /dev/null
+++ b/llvm/utils/lit/tests/test-output-micro-resultdb.py
@@ -0,0 +1,63 @@
+# RUN: %{lit} -j 1 -v %{inputs}/test-data-micro --resultdb-output %t.results.out
+# RUN: FileCheck < %t.results.out %s
+# RUN: rm %t.results.out
+
+
+# CHECK: {
+# CHECK: "__version__"
+# CHECK: "elapsed"
+# CHECK-NEXT: "tests": [
+# CHECK-NEXT:    {
+# CHECK-NEXT:      "artifacts": {
+# CHECK-NEXT:        "artifact-content-in-request": {
+# CHECK-NEXT:          "contents": "VGVzdCBwYXNzZWQu"
+# CHECK-NEXT:        }
+# CHECK-NEXT:      },
+# CHECK-NEXT:      "duration"
+# CHECK-NEXT:      "expected": true,
+# CHECK-NEXT:      "start_time"
+# CHECK-NEXT:      "status": "PASS",
+# CHECK-NEXT:      "summary_html": "<p><text-artifact artifact-id=\"artifact-content-in-request\"></p>",
+# CHECK-NEXT:      "testId": "test-data-micro :: micro-tests.ini"
+# CHECK-NEXT:    },
+# CHECK-NEXT:    {
+# CHECK-NEXT:      "artifacts": {
+# CHECK-NEXT:        "artifact-content-in-request": {
+# CHECK-NEXT:          "contents": ""
+# CHECK-NEXT:        }
+# CHECK-NEXT:      },
+# CHECK-NEXT:      "duration"
+# CHECK-NEXT:      "expected": true,
+# CHECK-NEXT:      "start_time"
+# CHECK-NEXT:      "status": "PASS",
+# CHECK-NEXT:      "summary_html": "<p><text-artifact artifact-id=\"artifact-content-in-request\"></p>",
+# CHECK-NEXT:      "testId": "test-data-micro :: micro-tests.ini:test0microres"
+# CHECK-NEXT:    },
+# CHECK-NEXT:    {
+# CHECK-NEXT:      "artifacts": {
+# CHECK-NEXT:        "artifact-content-in-request": {
+# CHECK-NEXT:          "contents": ""
+# CHECK-NEXT:        }
+# CHECK-NEXT:      },
+# CHECK-NEXT:      "duration"
+# CHECK-NEXT:      "expected": true,
+# CHECK-NEXT:      "start_time"
+# CHECK-NEXT:      "status": "PASS",
+# CHECK-NEXT:      "summary_html": "<p><text-artifact artifact-id=\"artifact-content-in-request\"></p>",
+# CHECK-NEXT:      "testId": "test-data-micro :: micro-tests.ini:test1microres"
+# CHECK-NEXT:    },
+# CHECK-NEXT:    {
+# CHECK-NEXT:      "artifacts": {
+# CHECK-NEXT:        "artifact-content-in-request": {
+# CHECK-NEXT:          "contents": ""
+# CHECK-NEXT:        }
+# CHECK-NEXT:      },
+# CHECK-NEXT:      "duration"
+# CHECK-NEXT:      "expected": true,
+# CHECK-NEXT:      "start_time"
+# CHECK-NEXT:      "status": "PASS",
+# CHECK-NEXT:      "summary_html": "<p><text-artifact artifact-id=\"artifact-content-in-request\"></p>",
+# CHECK-NEXT:      "testId": "test-data-micro :: micro-tests.ini:test2microres"
+# CHECK-NEXT:    }
+# CHECK-NEXT: ]
+# CHECK-NEXT: }

diff  --git a/llvm/utils/lit/tests/test-output-resultdb.py b/llvm/utils/lit/tests/test-output-resultdb.py
new file mode 100644
index 0000000000000..04d7e626adbe1
--- /dev/null
+++ b/llvm/utils/lit/tests/test-output-resultdb.py
@@ -0,0 +1,22 @@
+# RUN: %{lit} -j 1 -v %{inputs}/test-data --resultdb-output %t.results.out > %t.out
+# RUN: FileCheck < %t.results.out %s
+
+# CHECK: {
+# CHECK: "__version__"
+# CHECK: "elapsed"
+# CHECK-NEXT: "tests": [
+# CHECK-NEXT:   {
+# CHECK-NEXT:      "artifacts": {
+# CHECK-NEXT:        "artifact-content-in-request": {
+# CHECK-NEXT:          "contents": "VGVzdCBwYXNzZWQu"
+# CHECK-NEXT:        }
+# CHECK-NEXT:      },
+# CHECK-NEXT:      "duration"
+# CHECK-NEXT:      "expected": true,
+# CHECK-NEXT:      "start_time"
+# CHECK-NEXT:      "status": "PASS",
+# CHECK-NEXT:      "summary_html": "<p><text-artifact artifact-id=\"artifact-content-in-request\"></p>",
+# CHECK-NEXT:      "testId": "test-data :: metrics.ini"
+# CHECK-NEXT:    }
+# CHECK-NEXT: ]
+# CHECK-NEXT: }


        


More information about the llvm-commits mailing list