[llvm] 4147450 - [lit] Improve test summary output
Julian Lettner via llvm-commits
llvm-commits at lists.llvm.org
Tue Apr 7 22:22:17 PDT 2020
Author: Julian Lettner
Date: 2020-04-07T22:19:50-07:00
New Revision: 414745026ca78a3f4f773d61aaababe6a0971af1
URL: https://github.com/llvm/llvm-project/commit/414745026ca78a3f4f773d61aaababe6a0971af1
DIFF: https://github.com/llvm/llvm-project/commit/414745026ca78a3f4f773d61aaababe6a0971af1.diff
LOG: [lit] Improve test summary output
This change aligns the test summary output along the longest
category label. We also properly align test counts.
Before:
```
Testing Time: 10.30s
Unsupported Tests : 1
Expected Passes : 30
```
After:
```
Testing Time: 10.29s
Unsupported Tests: 1
Expected Passes : 30
```
Added:
Modified:
llvm/utils/lit/lit/main.py
llvm/utils/lit/tests/allow-retries.py
llvm/utils/lit/tests/lit-opts.py
llvm/utils/lit/tests/parallelism-groups.py
llvm/utils/lit/tests/shtest-inject.py
Removed:
################################################################################
diff --git a/llvm/utils/lit/lit/main.py b/llvm/utils/lit/lit/main.py
index 2930555269de..80eb1d57e291 100755
--- a/llvm/utils/lit/lit/main.py
+++ b/llvm/utils/lit/lit/main.py
@@ -91,7 +91,7 @@ def main(builtin_params={}):
executed_tests = [t for t in filtered_tests if t.result]
- print_summary(executed_tests, elapsed, opts)
+ print_results(executed_tests, elapsed, opts)
if opts.output_path:
#TODO(yln): pass in discovered_tests
@@ -239,35 +239,35 @@ def execute_in_tmp_dir(run, lit_config):
lit_config.warning("Failed to delete temp directory '%s'" % tmp_dir)
-def print_summary(tests, elapsed, opts):
- # Status code, summary label, group label
- groups = [
- # Successes
- (lit.Test.UNSUPPORTED, 'Unsupported Tests ', 'Unsupported'),
- (lit.Test.PASS, 'Expected Passes ', ''),
- (lit.Test.FLAKYPASS, 'Passes With Retry ', ''),
- (lit.Test.XFAIL, 'Expected Failures ', 'Expected Failing'),
- # Failures
- (lit.Test.UNRESOLVED, 'Unresolved Tests ', 'Unresolved'),
- (lit.Test.TIMEOUT, 'Individual Timeouts', 'Timed Out'),
- (lit.Test.FAIL, 'Unexpected Failures', 'Failing'),
- (lit.Test.XPASS, 'Unexpected Passes ', 'Unexpected Passing')]
-
- by_code = {code: [] for (code, _, _) in groups}
+# Status code, summary label, group label
+failure_codes = [
+ (lit.Test.UNRESOLVED, 'Unresolved Tests', 'Unresolved'),
+ (lit.Test.TIMEOUT, 'Individual Timeouts', 'Timed Out'),
+ (lit.Test.FAIL, 'Unexpected Failures', 'Failing'),
+ (lit.Test.XPASS, 'Unexpected Passes', 'Unexpected Passing')
+]
+
+all_codes = [
+ (lit.Test.UNSUPPORTED, 'Unsupported Tests', 'Unsupported'),
+ (lit.Test.PASS, 'Expected Passes', ''),
+ (lit.Test.FLAKYPASS, 'Passes With Retry', ''),
+ (lit.Test.XFAIL, 'Expected Failures', 'Expected Failing'),
+] + failure_codes
+
+
+def print_results(tests, elapsed, opts):
+ tests_by_code = {code: [] for (code, _, _) in all_codes}
for test in tests:
- by_code[test.result.code].append(test)
+ tests_by_code[test.result.code].append(test)
- for (code, _, group_label) in groups:
- print_group(code, group_label, by_code[code], opts)
+ for (code, _, group_label) in all_codes:
+ print_group(code, group_label, tests_by_code[code], opts)
if opts.timeTests and tests:
test_times = [(t.getFullName(), t.result.elapsed) for t in tests]
lit.util.printHistogram(test_times, title='Tests')
- if not opts.quiet:
- print('\nTesting Time: %.2fs' % elapsed)
- for (code, summary_label, _) in groups:
- print_group_summary(code, summary_label, by_code[code], opts.quiet)
+ print_summary(tests_by_code, opts.quiet, elapsed)
def print_group(code, label, tests, opts):
@@ -286,10 +286,23 @@ def print_group(code, label, tests, opts):
sys.stdout.write('\n')
-def print_group_summary(code, label, tests, quiet):
- count = len(tests)
- if count and (code.isFailure or not quiet):
- print(' %s: %d' % (label, count))
+def print_summary(tests_by_code, quiet, elapsed):
+ if not quiet:
+ print('\nTesting Time: %.2fs' % elapsed)
+
+ codes = failure_codes if quiet else all_codes
+ groups = [(label, len(tests_by_code[code])) for code, label, _ in codes]
+ groups = [(label, count) for label, count in groups if count]
+ if not groups:
+ return
+
+ max_label_len = max(len(label) for label, _ in groups)
+ max_count_len = max(len(str(count)) for _, count in groups)
+
+ for (label, count) in groups:
+ label = label.ljust(max_label_len)
+ count = str(count).rjust(max_count_len)
+ print(' %s: %s' % (label, count))
def write_test_results(tests, lit_config, elapsed, output_path):
diff --git a/llvm/utils/lit/tests/allow-retries.py b/llvm/utils/lit/tests/allow-retries.py
index 3f6cf8f1faa5..a4c963a7e205 100644
--- a/llvm/utils/lit/tests/allow-retries.py
+++ b/llvm/utils/lit/tests/allow-retries.py
@@ -5,13 +5,13 @@
#
# RUN: rm -f %t.counter
# RUN: %{lit} -j 1 %{inputs}/allow-retries/succeeds-within-limit.py -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST1 %s
-# CHECK-TEST1: Passes With Retry : 1
+# CHECK-TEST1: Passes With Retry: 1
# Test that a per-file ALLOW_RETRIES overwrites the config-wide test_retry_attempts property, if any.
#
# RUN: rm -f %t.counter
# RUN: %{lit} -j 1 %{inputs}/allow-retries/succeeds-within-limit.py -Dtest_retry_attempts=2 -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST2 %s
-# CHECK-TEST2: Passes With Retry : 1
+# CHECK-TEST2: Passes With Retry: 1
# This test does not succeed within the allowed retry limit
#
@@ -38,4 +38,4 @@
#
# RUN: rm -f %t.counter
# RUN: %{lit} -j 1 %{inputs}/test_retry_attempts/test.py -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST6 %s
-# CHECK-TEST6: Passes With Retry : 1
+# CHECK-TEST6: Passes With Retry: 1
diff --git a/llvm/utils/lit/tests/lit-opts.py b/llvm/utils/lit/tests/lit-opts.py
index 4c068dd8ac2b..281d4f0b5c4f 100644
--- a/llvm/utils/lit/tests/lit-opts.py
+++ b/llvm/utils/lit/tests/lit-opts.py
@@ -24,10 +24,10 @@
# CHECK: Testing: 1 tests
# CHECK-NOT: PASS
-# CHECK: Expected Passes : 1
+# CHECK: Expected Passes: 1
# SHOW-ALL: Testing: 1 tests
# SHOW-ALL: PASS: lit-opts :: test.txt (1 of 1)
# SHOW-ALL: {{^}}[[VAR]]
# SHOW-ALL-NOT: PASS
-# SHOW-ALL: Expected Passes : 1
+# SHOW-ALL: Expected Passes: 1
diff --git a/llvm/utils/lit/tests/parallelism-groups.py b/llvm/utils/lit/tests/parallelism-groups.py
index bd0cf37ab0f0..4d137b1f139a 100644
--- a/llvm/utils/lit/tests/parallelism-groups.py
+++ b/llvm/utils/lit/tests/parallelism-groups.py
@@ -15,4 +15,4 @@
# CHECK: -- Testing: 2 tests, 2 workers --
# CHECK-DAG: PASS: parallelism-groups :: test1.txt
# CHECK-DAG: PASS: parallelism-groups :: test2.txt
-# CHECK: Expected Passes : 2
+# CHECK: Expected Passes: 2
diff --git a/llvm/utils/lit/tests/shtest-inject.py b/llvm/utils/lit/tests/shtest-inject.py
index 9f9ff6097921..0986614c1cab 100644
--- a/llvm/utils/lit/tests/shtest-inject.py
+++ b/llvm/utils/lit/tests/shtest-inject.py
@@ -11,7 +11,7 @@
# CHECK-TEST1: THIS WAS
# CHECK-TEST1: INJECTED
#
-# CHECK-TEST1: Expected Passes : 1
+# CHECK-TEST1: Expected Passes: 1
# RUN: %{lit} -j 1 %{inputs}/shtest-inject/test-one.txt --show-all | FileCheck --check-prefix=CHECK-TEST2 %s
#
@@ -26,7 +26,7 @@
# CHECK-TEST2: INJECTED
# CHECK-TEST2: IN THE FILE
#
-# CHECK-TEST2: Expected Passes : 1
+# CHECK-TEST2: Expected Passes: 1
# RUN: %{lit} -j 1 %{inputs}/shtest-inject/test-many.txt --show-all | FileCheck --check-prefix=CHECK-TEST3 %s
#
@@ -45,4 +45,4 @@
# CHECK-TEST3: IF IT WORKS
# CHECK-TEST3: AS EXPECTED
#
-# CHECK-TEST3: Expected Passes : 1
+# CHECK-TEST3: Expected Passes: 1
More information about the llvm-commits
mailing list