[libcxx-commits] [libcxx] [libc++] Parse Google Benchmark results into LNT-compatible format (PR #157466)

via libcxx-commits libcxx-commits at lists.llvm.org
Mon Sep 8 06:56:11 PDT 2025


github-actions[bot] wrote:

<!--LLVM CODE FORMAT COMMENT: {darker}-->


:warning: Python code formatter, darker found issues in your code. :warning:

<details>
<summary>
You can test this locally with the following command:
</summary>

``````````bash
darker --check --diff -r origin/main...HEAD libcxx/test/benchmarks/spec.gen.py libcxx/utils/libcxx/test/format.py
``````````

:warning:
The reproduction instructions above might return results for more than one PR
in a stack if you are using a stacked PR workflow. You can limit the results by
changing `origin/main` to the base branch/commit you want to compare against.
:warning:

</details>

<details>
<summary>
View the diff from darker here.
</summary>

``````````diff
--- test/benchmarks/spec.gen.py	2025-09-08 13:53:20.000000 +0000
+++ test/benchmarks/spec.gen.py	2025-09-08 13:55:45.875775 +0000
@@ -72,7 +72,9 @@
     print(f'RUN: %{{spec_dir}}/bin/runcpu --config %T/spec-config.cfg --size train --output-root %T --rebuild {benchmark}')
     print(f'RUN: rm -rf %T/benchspec') # remove the temporary directory, which can become quite large
 
     # Parse the results into a LNT-compatible format. This also errors out if there are no CSV files, which
     # means that the benchmark didn't run properly (the `runcpu` command above never reports a failure).
-    print(f'RUN: %{{libcxx-dir}}/utils/parse-spec-results %T/result/CPUv8.001.*.train.csv --output-format=lnt > %T/results.lnt')
-    print(f'RUN: cat %T/results.lnt')
+    print(
+        f"RUN: %{{libcxx-dir}}/utils/parse-spec-results %T/result/CPUv8.001.*.train.csv --output-format=lnt > %T/results.lnt"
+    )
+    print(f"RUN: cat %T/results.lnt")
--- utils/libcxx/test/format.py	2025-09-08 13:53:20.000000 +0000
+++ utils/libcxx/test/format.py	2025-09-08 13:55:45.983884 +0000
@@ -352,13 +352,19 @@
                 )
             steps = [
                 "%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{benchmark_flags} %{link_flags} -o %t.exe",
             ]
             if "enable-benchmarks=run" in test.config.available_features:
-                steps += ["%dbg(EXECUTED AS) %{exec} %t.exe --benchmark_out=%T/benchmark-result.json --benchmark_out_format=json"]
-                parse_results = os.path.join(LIBCXX_UTILS, 'parse-google-benchmark-results')
-                steps += [f"{parse_results} %T/benchmark-result.json --output-format=lnt > %T/results.lnt"]
+                steps += [
+                    "%dbg(EXECUTED AS) %{exec} %t.exe --benchmark_out=%T/benchmark-result.json --benchmark_out_format=json"
+                ]
+                parse_results = os.path.join(
+                    LIBCXX_UTILS, "parse-google-benchmark-results"
+                )
+                steps += [
+                    f"{parse_results} %T/benchmark-result.json --output-format=lnt > %T/results.lnt"
+                ]
             return self._executeShTest(test, litConfig, steps)
         elif re.search('[.]gen[.][^.]+$', filename): # This only happens when a generator test is not supported
             return self._executeShTest(test, litConfig, [])
         else:
             return lit.Test.Result(

``````````

</details>


https://github.com/llvm/llvm-project/pull/157466


More information about the libcxx-commits mailing list