[libcxx-commits] [libcxx] [libcxx] Use %{temp} instead of %T (PR #162323)

via libcxx-commits libcxx-commits at lists.llvm.org
Tue Oct 7 09:55:26 PDT 2025


github-actions[bot] wrote:

<!--LLVM CODE FORMAT COMMENT: {darker}-->


:warning: Python code formatter, darker found issues in your code. :warning:

<details>
<summary>
You can test this locally with the following command:
</summary>

``````````bash
darker --check --diff -r origin/main...HEAD libcxx/test/benchmarks/spec.gen.py libcxx/test/selftest/dsl/dsl.sh.py libcxx/utils/libcxx/test/dsl.py libcxx/utils/libcxx/test/format.py libcxx/utils/ssh.py
``````````

:warning:
The reproduction instructions above might return results for more than one PR
in a stack if you are using a stacked PR workflow. You can limit the results by
changing `origin/main` to the base branch/commit you want to compare against.
:warning:

</details>

<details>
<summary>
View the diff from darker here.
</summary>

``````````diff
--- test/benchmarks/spec.gen.py	2025-10-07 16:50:51.000000 +0000
+++ test/benchmarks/spec.gen.py	2025-10-07 16:54:56.773733 +0000
@@ -63,21 +63,29 @@
 with open(spec_dir / 'benchspec' / 'CPU' / 'no_fortran.bset', 'r') as f:
     no_fortran.update(json.load(f)['benchmarks'])
 spec_benchmarks &= no_fortran
 
 for benchmark in spec_benchmarks:
-    print(f'#--- {benchmark}.sh.test')
-    print(f'RUN: rm -rf %{temp}') # clean up any previous (potentially incomplete) run
-    print(f'RUN: mkdir %{temp}')
-    print(f'RUN: cp {spec_config} %{temp}/spec-config.cfg')
-    print(f'RUN: %{{spec_dir}}/bin/runcpu --config %{temp}/spec-config.cfg --size train --output-root %{temp} --rebuild {benchmark}')
-    print(f'RUN: rm -rf %{temp}/benchspec') # remove the temporary directory, which can become quite large
+    print(f"#--- {benchmark}.sh.test")
+    print(f"RUN: rm -rf %{temp}")  # clean up any previous (potentially incomplete) run
+    print(f"RUN: mkdir %{temp}")
+    print(f"RUN: cp {spec_config} %{temp}/spec-config.cfg")
+    print(
+        f"RUN: %{{spec_dir}}/bin/runcpu --config %{temp}/spec-config.cfg --size train --output-root %{temp} --rebuild {benchmark}"
+    )
+    print(
+        f"RUN: rm -rf %{temp}/benchspec"
+    )  # remove the temporary directory, which can become quite large
 
     # The `runcpu` command above doesn't fail even if the benchmark fails to run. To determine failure, parse the CSV
     # results and ensure there are no compilation errors or runtime errors in the status row. Also print the logs and
     # fail if there are no CSV files at all, which implies a SPEC error.
-    print(f'RUN: %{{libcxx-dir}}/utils/parse-spec-results --extract "Base Status" --keep-failed %{temp}/result/*.train.csv > %{temp}/status || ! cat %{temp}/result/*.log')
+    print(
+        f'RUN: %{{libcxx-dir}}/utils/parse-spec-results --extract "Base Status" --keep-failed %{temp}/result/*.train.csv > %{temp}/status || ! cat %{temp}/result/*.log'
+    )
     print(f'RUN: ! grep -E "CE|RE" %{temp}/status || ! cat %{temp}/result/*.log')
 
     # If there were no errors, parse the results into LNT-compatible format and print them.
-    print(f'RUN: %{{libcxx-dir}}/utils/parse-spec-results %{temp}/result/*.train.csv --output-format=lnt > %{temp}/results.lnt')
-    print(f'RUN: cat %{temp}/results.lnt')
+    print(
+        f"RUN: %{{libcxx-dir}}/utils/parse-spec-results %{temp}/result/*.train.csv --output-format=lnt > %{temp}/results.lnt"
+    )
+    print(f"RUN: cat %{temp}/results.lnt")
--- utils/libcxx/test/format.py	2025-10-07 16:50:51.000000 +0000
+++ utils/libcxx/test/format.py	2025-10-07 16:54:57.567103 +0000
@@ -176,15 +176,18 @@
             script.insert(
                 0,
                 "%dbg(MODULE std.compat) %{cxx} %{flags} "
                 f"{compileFlags} "
                 "-Wno-reserved-module-identifier -Wno-reserved-user-defined-literal "
-                "-fmodule-file=std=%{temp}/std.pcm " # The std.compat module imports std.
+                "-fmodule-file=std=%{temp}/std.pcm "  # The std.compat module imports std.
                 "--precompile -o %{temp}/std.compat.pcm -c %{module-dir}/std.compat.cppm",
             )
             moduleCompileFlags.extend(
-                ["-fmodule-file=std.compat=%{temp}/std.compat.pcm", "%{temp}/std.compat.pcm"]
+                [
+                    "-fmodule-file=std.compat=%{temp}/std.compat.pcm",
+                    "%{temp}/std.compat.pcm",
+                ]
             )
 
         # Make sure the std module is built before std.compat. Libc++'s
         # std.compat module depends on the std module. It is not
         # known whether the compiler expects the modules in the order of
@@ -195,11 +198,13 @@
             "%dbg(MODULE std) %{cxx} %{flags} "
             f"{compileFlags} "
             "-Wno-reserved-module-identifier -Wno-reserved-user-defined-literal "
             "--precompile -o %{temp}/std.pcm -c %{module-dir}/std.cppm",
         )
-        moduleCompileFlags.extend(["-fmodule-file=std=%{temp}/std.pcm", "%{temp}/std.pcm"])
+        moduleCompileFlags.extend(
+            ["-fmodule-file=std=%{temp}/std.pcm", "%{temp}/std.pcm"]
+        )
 
         # Add compile flags required for the modules.
         substitutions = config._appendToSubstitution(
             substitutions, "%{compile_flags}", " ".join(moduleCompileFlags)
         )
@@ -353,13 +358,19 @@
                 )
             steps = [
                 "%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{benchmark_flags} %{link_flags} -o %t.exe",
             ]
             if "enable-benchmarks=run" in test.config.available_features:
-                steps += ["%dbg(EXECUTED AS) %{exec} %t.exe --benchmark_out=%{temp}/benchmark-result.json --benchmark_out_format=json"]
-                parse_results = os.path.join(LIBCXX_UTILS, 'parse-google-benchmark-results')
-                steps += [f"{parse_results} %{temp}/benchmark-result.json --output-format=lnt > %{temp}/results.lnt"]
+                steps += [
+                    "%dbg(EXECUTED AS) %{exec} %t.exe --benchmark_out=%{temp}/benchmark-result.json --benchmark_out_format=json"
+                ]
+                parse_results = os.path.join(
+                    LIBCXX_UTILS, "parse-google-benchmark-results"
+                )
+                steps += [
+                    f"{parse_results} %{temp}/benchmark-result.json --output-format=lnt > %{temp}/results.lnt"
+                ]
             return self._executeShTest(test, litConfig, steps)
         elif re.search('[.]gen[.][^.]+$', filename): # This only happens when a generator test is not supported
             return self._executeShTest(test, litConfig, [])
         else:
             return lit.Test.Result(

``````````

</details>


https://github.com/llvm/llvm-project/pull/162323


More information about the libcxx-commits mailing list