[libcxx-commits] [compiler-rt] [libcxx] [llvm] [utils] revamp options controlling lit's output (PR #167192)

via libcxx-commits libcxx-commits at lists.llvm.org
Sat Nov 8 16:41:47 PST 2025


github-actions[bot] wrote:

<!--LLVM CODE FORMAT COMMENT: {darker}-->


:warning: Python code formatter, darker found issues in your code. :warning:

<details>
<summary>
You can test this locally with the following command:
</summary>

``````````bash
darker --check --diff -r origin/main...HEAD llvm/utils/lit/tests/verbosity.py compiler-rt/test/lit.common.cfg.py libcxx/utils/libcxx/test/config.py llvm/utils/lit/lit/LitConfig.py llvm/utils/lit/lit/LitTestCase.py llvm/utils/lit/lit/TestingConfig.py llvm/utils/lit/lit/cl_arguments.py llvm/utils/lit/lit/discovery.py llvm/utils/lit/lit/display.py llvm/utils/lit/lit/llvm/config.py llvm/utils/lit/lit/main.py llvm/utils/lit/tests/lit-opts.py llvm/utils/lit/tests/shtest-env-positive.py llvm/utils/lit/tests/shtest-not.py llvm/utils/lit/tests/unit/TestRunner.py
``````````

:warning:
The reproduction instructions above might return results for more than one PR
in a stack if you are using a stacked PR workflow. You can limit the results by
changing `origin/main` to the base branch/commit you want to compare against.
:warning:

</details>

<details>
<summary>
View the diff from darker here.
</summary>

``````````diff
--- llvm/utils/lit/lit/LitConfig.py	2025-11-09 00:35:03.000000 +0000
+++ llvm/utils/lit/lit/LitConfig.py	2025-11-09 00:41:14.562843 +0000
@@ -237,11 +237,13 @@
             )
 
     def diagnostic_level_enabled(self, kind):
         if kind == "debug":
             return self.debug
-        return DiagnosticLevel.create(self.diagnostic_level) >= DiagnosticLevel.create(kind)
+        return DiagnosticLevel.create(self.diagnostic_level) >= DiagnosticLevel.create(
+            kind
+        )
 
     def dbg(self, message):
         self._write_message("debug", message)
 
     def note(self, message):
@@ -256,10 +258,11 @@
         self.numErrors += 1
 
     def fatal(self, message):
         self._write_message("fatal", message)
         sys.exit(2)
+
 
 @enum.unique
 class DiagnosticLevel(enum.IntEnum):
     FATAL = 0
     ERROR = 1
@@ -274,6 +277,8 @@
             return cls.ERROR
         if value == "warning":
             return cls.WARNING
         if value == "note":
             return cls.NOTE
-        raise ValueError(f"invalid diagnostic level {repr(value)} of type {type(value)}")
+        raise ValueError(
+            f"invalid diagnostic level {repr(value)} of type {type(value)}"
+        )
--- llvm/utils/lit/lit/cl_arguments.py	2025-11-09 00:35:03.000000 +0000
+++ llvm/utils/lit/lit/cl_arguments.py	2025-11-09 00:41:14.775503 +0000
@@ -40,16 +40,20 @@
         TestOutputAction.setOutputLevel(namespace, self.dest, value)
 
     @classmethod
     def setOutputLevel(cls, namespace, dest, value):
         setattr(namespace, dest, value)
-        #print(dest, value)
-        if dest == "test_output" and TestOutputLevel.create(namespace.print_result_after) < TestOutputLevel.create(value):
-            #print("print_result_after", value)
+        # print(dest, value)
+        if dest == "test_output" and TestOutputLevel.create(
+            namespace.print_result_after
+        ) < TestOutputLevel.create(value):
+            # print("print_result_after", value)
             setattr(namespace, "print_result_after", value)
-        elif dest == "print_result_after" and TestOutputLevel.create(namespace.test_output) > TestOutputLevel.create(value):
-            #print("test_output", value)
+        elif dest == "print_result_after" and TestOutputLevel.create(
+            namespace.test_output
+        ) > TestOutputLevel.create(value):
+            # print("test_output", value)
             setattr(namespace, "test_output", value)
 
 
 class AliasAction(argparse.Action):
     def __init__(self, option_strings, dest, nargs=None, **kwargs):
@@ -139,55 +143,70 @@
         action="store_false",
         dest="terse_summary",
     )
     parser.set_defaults(terse_summary=False)
     format_group.add_argument(
-        "-q", "--quiet", help="Alias for '--diagnostic-level=error --test-output=off --terse-summary'", action=AliasAction,
+        "-q",
+        "--quiet",
+        help="Alias for '--diagnostic-level=error --test-output=off --terse-summary'",
+        action=AliasAction,
         alias=[
-            lambda namespace: TestOutputAction.setOutputLevel(namespace, "print_result_after", "failed"),
-            lambda namespace: TestOutputAction.setOutputLevel(namespace, "test_output", "off"),
+            lambda namespace: TestOutputAction.setOutputLevel(
+                namespace, "print_result_after", "failed"
+            ),
+            lambda namespace: TestOutputAction.setOutputLevel(
+                namespace, "test_output", "off"
+            ),
             ("diagnostic_level", "error"),
             ("terse_summary", True),
-            ],
+        ],
     )
     format_group.add_argument(
         "-s",
         "--succinct",
         help="Alias for '--progress-bar --print-result-after=failed'",
         action=AliasAction,
         alias=[
             ("useProgressBar", True),
-            lambda namespace: TestOutputAction.setOutputLevel(namespace, "print_result_after", "failed"),
+            lambda namespace: TestOutputAction.setOutputLevel(
+                namespace, "print_result_after", "failed"
+            ),
         ],
     )
     format_group.add_argument(
         "-v",
         "--verbose",
         help="For failed tests, show all output. For example, each command is"
         " printed before it is executed, so the last printed command is the one"
         " that failed. Alias for '--test-output=failed'",
         action=AliasAction,
         alias=[
-            lambda namespace: TestOutputAction.setOutputLevel(namespace, "test_output", "failed"),
+            lambda namespace: TestOutputAction.setOutputLevel(
+                namespace, "test_output", "failed"
+            ),
         ],
     )
     format_group.add_argument(
         "-vv",
         "--echo-all-commands",
         help="Deprecated alias for -v.",
         action=AliasAction,
         alias=[
-            lambda namespace: TestOutputAction.setOutputLevel(namespace, "test_output", "all"),
+            lambda namespace: TestOutputAction.setOutputLevel(
+                namespace, "test_output", "all"
+            ),
         ],
     )
     format_group.add_argument(
         "-a",
         "--show-all",
         help="Enable -v, but for all tests not just failed tests. Alias for '--test-output=all'",
         action=AliasAction,
         alias=[
-            lambda namespace: TestOutputAction.setOutputLevel(namespace, "test_output", "all"),
+            lambda namespace: TestOutputAction.setOutputLevel(
+                namespace, "test_output", "all"
+            ),
         ],
     )
     format_group.add_argument(
         "-r",
         "--relative-paths",
@@ -496,12 +515,12 @@
         )
     )
 
     for report in opts.reports:
         report.use_unique_output_file_name = opts.use_unique_output_file_name
-    #print("test_output", opts.test_output)
-    #print("print_result_after", opts.print_result_after)
+    # print("test_output", opts.test_output)
+    # print("print_result_after", opts.print_result_after)
 
     return opts
 
 
 def _positive_int(arg):
--- llvm/utils/lit/lit/display.py	2025-11-09 00:35:03.000000 +0000
+++ llvm/utils/lit/lit/display.py	2025-11-09 00:41:14.907901 +0000
@@ -93,11 +93,12 @@
 
     def update(self, test):
         self.completed += 1
 
         show_result = (
-            test.isFailure() and self.opts.print_result_after == "failed"
+            test.isFailure()
+            and self.opts.print_result_after == "failed"
             or self.opts.print_result_after == "all"
         )
         if show_result:
             if self.progress_bar:
                 self.progress_bar.clear(interrupted=False)
@@ -131,12 +132,14 @@
                 extra_info,
             )
         )
 
         # Show the test failure output, if requested.
-        #print("test_output: ", self.opts.test_output)
-        if (test.isFailure() and self.opts.test_output == "failed") or self.opts.test_output == "all":
+        # print("test_output: ", self.opts.test_output)
+        if (
+            test.isFailure() and self.opts.test_output == "failed"
+        ) or self.opts.test_output == "all":
             if test.isFailure():
                 print("%s TEST '%s' FAILED %s" % ("*" * 20, test_name, "*" * 20))
             out = test.result.output
             # Encode/decode so that, when using Python 3.6.5 in Windows 10,
             # print(out) doesn't raise UnicodeEncodeError if out contains
--- llvm/utils/lit/lit/llvm/config.py	2025-11-09 00:39:55.000000 +0000
+++ llvm/utils/lit/lit/llvm/config.py	2025-11-09 00:41:15.207780 +0000
@@ -51,11 +51,14 @@
                 ["SystemDrive", "SystemRoot", "TEMP", "TMP", "PLATFORM"]
             )
             self.use_lit_shell = True
 
             global lit_path_displayed
-            if self.lit_config.diagnostic_level_enabled("note") and lit_path_displayed is False:
+            if (
+                self.lit_config.diagnostic_level_enabled("note")
+                and lit_path_displayed is False
+            ):
                 self.lit_config.note("using lit tools: {}".format(path))
                 lit_path_displayed = True
 
         if platform.system() == "OS/390":
             self.with_environment("_BPXK_AUTOCVT", "ON")

``````````

</details>


https://github.com/llvm/llvm-project/pull/167192


More information about the libcxx-commits mailing list