[libcxx-commits] [libcxx] [libc++] Add scripts to run benchmarks and submit to LNT on a regular basis (PR #180849)
Louis Dionne via libcxx-commits
libcxx-commits at lists.llvm.org
Wed Feb 11 07:01:05 PST 2026
https://github.com/ldionne updated https://github.com/llvm/llvm-project/pull/180849
>From 66ea06de708e8a91a7d2b9505d816a23fca0c0ab Mon Sep 17 00:00:00 2001
From: Louis Dionne <ldionne.2 at gmail.com>
Date: Tue, 10 Feb 2026 13:57:17 -0500
Subject: [PATCH 1/4] [libc++] Add scripts to run benchmarks and submit to LNT
on a regular basis
Also add a libc++ schema that can be used to create a test suite capable
of storing libc++ benchmarking data on any LNT instance.
---
libcxx/utils/ci/benchmark-for-lnt.py | 133 --------------------
libcxx/utils/ci/lnt/README.md | 29 +++++
libcxx/utils/ci/lnt/commit-watch | 116 +++++++++++++++++
libcxx/utils/ci/lnt/run-benchmarks | 182 +++++++++++++++++++++++++++
libcxx/utils/ci/lnt/schema.yaml | 44 +++++++
5 files changed, 371 insertions(+), 133 deletions(-)
delete mode 100755 libcxx/utils/ci/benchmark-for-lnt.py
create mode 100644 libcxx/utils/ci/lnt/README.md
create mode 100755 libcxx/utils/ci/lnt/commit-watch
create mode 100755 libcxx/utils/ci/lnt/run-benchmarks
create mode 100644 libcxx/utils/ci/lnt/schema.yaml
diff --git a/libcxx/utils/ci/benchmark-for-lnt.py b/libcxx/utils/ci/benchmark-for-lnt.py
deleted file mode 100755
index a139c58bb366f..0000000000000
--- a/libcxx/utils/ci/benchmark-for-lnt.py
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/usr/bin/env python
-# ===----------------------------------------------------------------------===##
-#
-# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-# See https://llvm.org/LICENSE.txt for license information.
-# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-#
-# ===----------------------------------------------------------------------===##
-
-import argparse
-import os
-import pathlib
-import subprocess
-import sys
-import tempfile
-
-def step(message: str) -> None:
- print(message, file=sys.stderr)
-
-def directory_path(string):
- if os.path.isdir(string):
- return pathlib.Path(string)
- else:
- raise NotADirectoryError(string)
-
-def main(argv):
- parser = argparse.ArgumentParser(
- prog='benchmark-for-lnt',
- description='Benchmark libc++ at the given commit for submitting to LNT.')
- parser.add_argument('-o', '--output', type=argparse.FileType('w'), default='-',
- help='Path to the file where the resulting LNT report containing benchmark results is written. '
- 'By default, stdout.')
- parser.add_argument('--benchmark-commit', type=str, required=True,
- help='The SHA representing the version of the library to benchmark.')
- parser.add_argument('--test-suite-commit', type=str, required=True,
- help='The SHA representing the version of the test suite to use for benchmarking.')
- parser.add_argument('--machine', type=str, required=True,
- help='The name of the machine for reporting LNT results.')
- parser.add_argument('--spec-dir', type=pathlib.Path, required=False,
- help='Optional path to a SPEC installation to use for benchmarking.')
- parser.add_argument('--git-repo', type=directory_path, default=os.getcwd(),
- help='Optional path to the Git repository to use. By default, the current working directory is used.')
- parser.add_argument('--dry-run', action='store_true',
- help='Only print what would be executed.')
- parser.add_argument('-v', '--verbose', action='store_true',
- help='Print the output of all subcommands.')
- args = parser.parse_args(argv)
-
- def run(command, *posargs, enforce_success=True, **kwargs):
- command = [str(c) for c in command]
- if args.dry_run:
- print(f'$ {" ".join(command)}')
- else:
- # If we're running with verbose, print everything but redirect output to stderr since
- # we already output the json to stdout in some cases. Otherwise, hush everything.
- if args.verbose:
- if 'stdout' not in kwargs:
- kwargs.update({'stdout': sys.stderr})
- else:
- if 'stdout' not in kwargs:
- kwargs.update({'stdout': subprocess.DEVNULL})
- if 'stderr' not in kwargs:
- kwargs.update({'stderr': subprocess.DEVNULL})
- if enforce_success:
- subprocess.check_call(command, *posargs, **kwargs)
- else:
- subprocess.call(command, *posargs, **kwargs)
-
- with tempfile.TemporaryDirectory() as build_dir:
- build_dir = pathlib.Path(build_dir)
-
- step(f'Building libc++ at commit {args.benchmark_commit}')
- run([args.git_repo / 'libcxx/utils/build-at-commit',
- '--git-repo', args.git_repo,
- '--install-dir', build_dir / 'install',
- '--commit', args.benchmark_commit,
- '--', '-DCMAKE_BUILD_TYPE=RelWithDebInfo'])
-
- if args.spec_dir is not None:
- step(f'Running SPEC benchmarks from {args.test_suite_commit} against libc++ {args.benchmark_commit}')
- run([args.git_repo / 'libcxx/utils/test-at-commit',
- '--git-repo', args.git_repo,
- '--build-dir', build_dir / 'spec',
- '--test-suite-commit', args.test_suite_commit,
- '--libcxx-installation', build_dir / 'install',
- '--',
- '-j1', '--time-tests',
- '--param', 'optimization=speed',
- '--param', 'std=c++17',
- '--param', f'spec_dir={args.spec_dir}',
- build_dir / 'spec/libcxx/test',
- '--filter', 'benchmarks/spec.gen.py'],
- enforce_success=False)
-
- # TODO: For now, we run only a subset of the benchmarks because running the whole test suite is too slow.
- # Run the whole test suite once https://github.com/llvm/llvm-project/issues/173032 is resolved.
- step(f'Running microbenchmarks from {args.test_suite_commit} against libc++ {args.benchmark_commit}')
- run([args.git_repo / 'libcxx/utils/test-at-commit',
- '--git-repo', args.git_repo,
- '--build-dir', build_dir / 'micro',
- '--test-suite-commit', args.test_suite_commit,
- '--libcxx-installation', build_dir / 'install',
- '--',
- '-j1', '--time-tests',
- '--param', 'optimization=speed',
- '--param', 'std=c++26',
- build_dir / 'micro/libcxx/test',
- '--filter', 'benchmarks/(algorithms|containers|iterators|locale|memory|streams|numeric|utility)'],
- enforce_success=False)
-
- step('Installing LNT')
- run(['python', '-m', 'venv', build_dir / '.venv'])
- run([build_dir / '.venv/bin/pip', 'install', 'llvm-lnt'])
-
- step('Consolidating benchmark results and creating JSON report')
- if args.spec_dir is not None:
- with open(build_dir / 'benchmarks.lnt', 'w') as f:
- run([args.git_repo / 'libcxx/utils/consolidate-benchmarks', build_dir / 'spec'], stdout=f)
- with open(build_dir / 'benchmarks.lnt', 'a') as f:
- run([args.git_repo / 'libcxx/utils/consolidate-benchmarks', build_dir / 'micro'], stdout=f)
- order = len(subprocess.check_output(['git', '-C', args.git_repo, 'rev-list', args.benchmark_commit]).splitlines())
- commit_info = subprocess.check_output(['git', '-C', args.git_repo, 'show', args.benchmark_commit, '--no-patch']).decode()
- run([build_dir / '.venv/bin/lnt', 'importreport', '--order', str(order), '--machine', args.machine,
- '--run-info', f'commit_info={commit_info}',
- build_dir / 'benchmarks.lnt', build_dir / 'benchmarks.json'])
-
- if not args.dry_run:
- with open(build_dir / 'benchmarks.json', 'r') as f:
- args.output.write(f.read())
-
-
-if __name__ == '__main__':
- main(sys.argv[1:])
diff --git a/libcxx/utils/ci/lnt/README.md b/libcxx/utils/ci/lnt/README.md
new file mode 100644
index 0000000000000..f0fe4524e7bd4
--- /dev/null
+++ b/libcxx/utils/ci/lnt/README.md
@@ -0,0 +1,29 @@
+This directory contains utilities for continuous benchmarking of libc++ with LNT.
+This can be done locally using a local instance, or using a public instance like http://lnt.llvm.org.
+
+Example for running locally:
+
+```
+# Create an instance and run a server
+lnt create my-instance
+echo "api_auth_token = 'example_token'" >> my-instance/lnt.cfg
+lnt runserver my-instance
+
+# In another terminal, create the libcxx test suite on the locally-running server
+cat <<EOF > lnt-admin-config.yaml
+lnt_url: "http://localhost:8000"
+database: default
+auth_token: example_token
+EOF
+lnt admin --config lnt-admin-config.yaml --testsuite libcxx test-suite add libcxx/utils/ci/lnt/schema.yaml
+
+# Then, watch for libc++ commits and submit benchmark results to the locally-running instance
+libcxx/utils/ci/lnt/commit-watch --machine my-laptop --test-suite libcxx --lnt-url http://localhost:8000 -- \
+ libcxx/utils/ci/lnt/run-benchmarks \
+ --test-suite-commit abcdef09 \
+ --lnt-url http://localhost:8000 \
+ --machine my-laptop \
+ --test-suite libcxx \
+ --compiler clang++ \
+ --benchmark-commit
+```
diff --git a/libcxx/utils/ci/lnt/commit-watch b/libcxx/utils/ci/lnt/commit-watch
new file mode 100755
index 0000000000000..f4200a3b7eeff
--- /dev/null
+++ b/libcxx/utils/ci/lnt/commit-watch
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+# ===----------------------------------------------------------------------===##
+#
+# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+# ===----------------------------------------------------------------------===##
+
+from typing import List, Set
+import argparse
+import json
+import logging
+import os
+import pathlib
+import subprocess
+import sys
+
+
+def directory_path(string):
+ if os.path.isdir(string):
+ return pathlib.Path(string)
+ else:
+ raise NotADirectoryError(string)
+
+def api(lnt_url: str, test_suite: str, endpoint: str):
+ url = f'{lnt_url}/api/db_default/v4/{test_suite}{endpoint}'
+ logging.debug(f'Querying {url}')
+ result = json.loads(subprocess.check_output(['curl', '-sS', url]).decode())
+ return result
+
+def get_benchmarked_commits(lnt_url: str, test_suite: str, machine: str) -> Set[str]:
+ """
+ Return the set of commits that have already been benchmarked on the given LNT
+ instance, test suite and machine.
+ """
+ result = api(lnt_url, test_suite, f'/machines/{machine}')
+ commits = set()
+ if 'runs' not in result: # there is no such machine
+ return set()
+ for run in result['runs']:
+ if 'git_sha' not in run:
+ raise ValueError(f'Found run without a git_sha field: are you using the right LNT test suite? {run}')
+ commits.add(run['git_sha'])
+ return commits
+
+def git_rev_list(git_repo: str, paths: List[str] = []) -> List[str]:
+ """
+ Return the list of all revisions in the given Git repository. Older commits are earlier in the list.
+ """
+ cli = ['git', '-C', git_repo, 'rev-list', 'origin', '--', *paths]
+ rev_list = subprocess.check_output(cli).decode().strip().splitlines()
+ return list(reversed(rev_list))
+
+def git_sorted_revlist(git_repo: str, commits: List[str]) -> List[str]:
+ """
+ Return the list of commits sorted by their chronological order (from oldest to newest) in the
+ provided Git repository. Items earlier in the list are older than items later in the list.
+ """
+ revlist_cmd = ['git', '-C', git_repo, 'rev-list', '--no-walk'] + list(commits)
+ revlist = subprocess.check_output(revlist_cmd, text=True).strip().splitlines()
+ return list(reversed(revlist))
+
+def get_all_libcxx_commits(git_repo: str) -> Set[str]:
+ """
+ Return the set of commits available to benchmark for libc++: this is the list of
+ commits that touch code in libc++ that we care to benchmark.
+ """
+ logging.debug(f'Fetching {git_repo}')
+ subprocess.check_call(['git', '-C', git_repo, 'fetch', '--quiet', 'origin'])
+ return set(git_rev_list(git_repo, paths=['libcxx/include', 'libcxx/src']))
+
+
+def main(argv):
+ parser = argparse.ArgumentParser(
+ prog='commit-watch',
+ description='Watch for libc++ commits to run benchmarks on, and run the specified command on them.')
+ parser.add_argument('--machine', type=str, required=True,
+ help='The name of the machine that we are producing results for on the LNT instance.')
+ parser.add_argument('--test-suite', type=str, required=True,
+ help='The name of the test suite that we are producing results for on the LNT instance.')
+ parser.add_argument('--lnt-url', type=str, required=True,
+ help='The URL of the LNT instance to use as the source of truth for finding already-benchmarked commits.')
+ parser.add_argument('--git-repo', type=directory_path, default=os.getcwd(),
+ help='Optional path to the Git repository to use. By default, the current working directory is used.')
+ parser.add_argument('callback', nargs=argparse.REMAINDER,
+ help='The command to run on each commit that we determine to require benchmarking. Should be provided last and '
+ 'separated from other arguments with a `--`.')
+ args = parser.parse_args(argv)
+
+ logging.basicConfig(level=logging.INFO)
+
+ # Gather callback command
+ if not args.callback:
+ raise ValueError('A callback must be provided')
+ else:
+ if args.callback[0] != '--':
+ raise ValueError('For clarity, the callback must be separated from other options by --')
+ callback = args.callback[1:]
+
+ while True:
+ logging.info(f'Getting benchmarked commits for {args.machine}')
+ benchmarked_commits = get_benchmarked_commits(args.lnt_url, args.test_suite, args.machine)
+
+ logging.info(f'Getting all libc++ commits in {args.git_repo}')
+ all_commits = get_all_libcxx_commits(args.git_repo)
+
+ # Invoke the callback, processing most recent commits first
+ commits_to_benchmark = git_sorted_revlist(args.git_repo, list(all_commits - benchmarked_commits))
+ most_recent = commits_to_benchmark[-1]
+ logging.info(f'Benchmarking libc++ at {most_recent}')
+ subprocess.check_call(callback + [most_recent])
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/libcxx/utils/ci/lnt/run-benchmarks b/libcxx/utils/ci/lnt/run-benchmarks
new file mode 100755
index 0000000000000..65b07e97b8982
--- /dev/null
+++ b/libcxx/utils/ci/lnt/run-benchmarks
@@ -0,0 +1,182 @@
+#!/usr/bin/env python
+# ===----------------------------------------------------------------------===##
+#
+# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+# ===----------------------------------------------------------------------===##
+
+import argparse
+import json
+import logging
+import os
+import pathlib
+import platform
+import subprocess
+import sys
+import tempfile
+
+
+def directory_path(string):
+ if os.path.isdir(string):
+ return pathlib.Path(string)
+ else:
+ raise NotADirectoryError(string)
+
+def gather_machine_information(args):
+ """
+ Gather the machine information to upload to LNT as part of the submission.
+ """
+ info = {}
+ if platform.system() == 'Darwin':
+ profiler_info = json.loads(subprocess.check_output(['system_profiler', 'SPHardwareDataType', 'SPSoftwareDataType', '-json']).decode())
+ info['hardware'] = profiler_info['SPHardwareDataType'][0]['chip_type']
+ info['os'] = profiler_info['SPSoftwareDataType'][0]['os_version']
+ info['sdk'] = subprocess.check_output(['xcrun', '--show-sdk-version']).decode().strip()
+
+ info['compiler'] = subprocess.check_output([args.compiler, '--version']).decode().strip().splitlines()[0]
+ info['test_suite_commit'] = subprocess.check_output(['git', '-C', args.git_repo, 'rev-parse', args.test_suite_commit]).decode().strip()
+
+ return info
+
+def gather_run_information(args):
+ """
+ Gather the run information to upload to LNT as part of the submission.
+ """
+ info = {}
+ info['commit_info'] = subprocess.check_output(['git', '-C', args.git_repo, 'show', args.benchmark_commit, '--no-patch']).decode()
+ info['git_sha'] = subprocess.check_output(['git', '-C', args.git_repo, 'rev-parse', args.benchmark_commit]).decode().strip()
+ return info
+
+def dict_to_params(d):
+ """
+ Return a list of 'key=value' strings from a dictionary.
+ """
+ res = []
+ for (k, v) in d.items():
+ res.append(f'{k}={v}')
+ return res
+
+def main(argv):
+ parser = argparse.ArgumentParser(
+ prog='run-benchmarks',
+ description='Benchmark libc++ at the given commit and submit to LNT.')
+ parser.add_argument('--benchmark-commit', type=str, required=True,
+ help='The SHA representing the version of the library to benchmark.')
+ parser.add_argument('--test-suite-commit', type=str, required=True,
+ help='The SHA representing the version of the test suite to use for benchmarking.')
+ parser.add_argument('--compiler', type=str, required=True,
+ help='Path to the compiler to use for testing.')
+ parser.add_argument('--machine', type=str, required=True,
+ help='The name of the machine for reporting LNT results.')
+ parser.add_argument('--test-suite', type=str, required=True,
+ help='The name of the test suite for reporting LNT results.')
+ parser.add_argument('--lnt-url', type=str, required=True,
+ help='The URL of the LNT instance to submit results to.')
+ parser.add_argument('--disable-microbenchmarks', action='store_true',
+ help="Do not run the microbenchmarks, only run SPEC (if possible).")
+ parser.add_argument('--spec-dir', type=pathlib.Path, required=False,
+ help='Optional path to a SPEC installation to use for benchmarking.')
+ parser.add_argument('--git-repo', type=directory_path, default=os.getcwd(),
+ help='Optional path to the Git repository to use. By default, the current working directory is used.')
+ parser.add_argument('--dry-run', action='store_true',
+ help='Only print what would be executed.')
+ parser.add_argument('-v', '--verbose', action='store_true',
+ help='Print the output of all subcommands.')
+ args = parser.parse_args(argv)
+
+ logging.basicConfig(level=logging.INFO)
+
+ do_spec = args.spec_dir is not None
+ do_micro = not args.disable_microbenchmarks
+ if not (do_spec or do_micro):
+ raise ValueError("You must run at least the microbenchmarks or SPEC")
+
+ def run(command, *posargs, enforce_success=True, **kwargs):
+ command = [str(c) for c in command]
+ if args.dry_run or args.verbose:
+ print(f'$ {" ".join(command)}')
+
+ if not args.dry_run:
+ # If we're running with verbose, print everything, otherwise, hush everything.
+ if not args.verbose:
+ if 'stdout' not in kwargs:
+ kwargs.update({'stdout': subprocess.DEVNULL})
+ if 'stderr' not in kwargs:
+ kwargs.update({'stderr': subprocess.DEVNULL})
+ if enforce_success:
+ subprocess.check_call(command, *posargs, **kwargs)
+ else:
+ subprocess.call(command, *posargs, **kwargs)
+
+ with tempfile.TemporaryDirectory() as build_dir:
+ build_dir = pathlib.Path(build_dir)
+
+ logging.info('Installing LNT')
+ run(['python', '-m', 'venv', build_dir / '.venv'])
+ run([build_dir / '.venv/bin/pip', 'install', 'llvm-lnt'])
+
+ logging.info(f'Building libc++ at commit {args.benchmark_commit}')
+ run([args.git_repo / 'libcxx/utils/build-at-commit',
+ '--git-repo', args.git_repo,
+ '--install-dir', build_dir / 'install',
+ '--commit', args.benchmark_commit,
+ '--', '-DCMAKE_BUILD_TYPE=RelWithDebInfo', f'-DCMAKE_CXX_COMPILER={args.compiler}'])
+
+ if do_spec:
+ logging.info(f'Running SPEC benchmarks from {args.test_suite_commit} against libc++ {args.benchmark_commit}')
+ run([args.git_repo / 'libcxx/utils/test-at-commit',
+ '--git-repo', args.git_repo,
+ '--build-dir', build_dir / 'spec',
+ '--test-suite-commit', args.test_suite_commit,
+ '--libcxx-installation', build_dir / 'install',
+ '--',
+ '-j1', '--time-tests',
+ '--param', f'compiler={args.compiler}',
+ '--param', 'optimization=speed',
+ '--param', 'std=c++17',
+ '--param', f'spec_dir={args.spec_dir}',
+ build_dir / 'spec/libcxx/test',
+ '--filter', 'benchmarks/spec.gen.py'],
+ enforce_success=False)
+ with open(build_dir / 'benchmarks.lnt', 'a') as f:
+ run([args.git_repo / 'libcxx/utils/consolidate-benchmarks', build_dir / 'spec'], stdout=f)
+
+ # TODO: For now, we run only a subset of the benchmarks because running the whole test suite is too slow.
+ # Run the whole test suite once https://github.com/llvm/llvm-project/issues/173032 is resolved.
+ if do_micro:
+ logging.info(f'Running microbenchmarks from {args.test_suite_commit} against libc++ {args.benchmark_commit}')
+ run([args.git_repo / 'libcxx/utils/test-at-commit',
+ '--git-repo', args.git_repo,
+ '--build-dir', build_dir / 'micro',
+ '--test-suite-commit', args.test_suite_commit,
+ '--libcxx-installation', build_dir / 'install',
+ '--',
+ '-j1', '--time-tests',
+ '--param', f'compiler={args.compiler}',
+ '--param', 'optimization=speed',
+ '--param', 'std=c++26',
+ build_dir / 'micro/libcxx/test',
+ '--filter', 'benchmarks/(algorithms|containers|iterators|locale|memory|streams|numeric|utility)'],
+ enforce_success=False)
+ with open(build_dir / 'benchmarks.lnt', 'a') as f:
+ run([args.git_repo / 'libcxx/utils/consolidate-benchmarks', build_dir / 'micro'], stdout=f)
+
+ logging.info('Creating JSON report for LNT')
+ order = len(subprocess.check_output(['git', '-C', args.git_repo, 'rev-list', args.benchmark_commit]).splitlines())
+ importreport = [build_dir / '.venv/bin/lnt', 'importreport', '--order', str(order), '--machine', args.machine]
+ for arg in dict_to_params(gather_run_information(args)):
+ importreport += ['--run-info', arg]
+ for arg in dict_to_params(gather_machine_information(args)):
+ importreport += ['--machine-info', arg]
+ importreport += [build_dir / 'benchmarks.lnt', build_dir / 'benchmarks.json']
+ run(importreport)
+
+ logging.info(f'Submitting results to {args.lnt_url}')
+ submission_url = f'{args.lnt_url}/db_default/v4/{args.test_suite}/submitRun'
+ run([build_dir / '.venv/bin/lnt', 'submit', '--ignore-regressions', submission_url, build_dir / 'benchmarks.json'])
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/libcxx/utils/ci/lnt/schema.yaml b/libcxx/utils/ci/lnt/schema.yaml
new file mode 100644
index 0000000000000..7b4e3da42635e
--- /dev/null
+++ b/libcxx/utils/ci/lnt/schema.yaml
@@ -0,0 +1,44 @@
+# Schema definition for the libc++ benchmark suite when running a LNT
+# instance. The metrics in this schema correspond to the metrics gathered
+# by libc++'s benchmark suite, and the "run fields" correspond to the
+# information gathered by the script that runs those benchmarks for
+# uploading with LNT.
+format_version: '2'
+name: libcxx
+metrics:
+- name: execution_time
+ type: Real
+ display_name: Execution Time
+ unit: seconds
+ unit_abbrev: s
+- name: instructions
+ type: Real
+ display_name: Instructions Retired
+ unit: instructions
+ unit_abbrev: instr
+- name: max_rss
+ type: Real
+ display_name: Maximum Resident Set Size
+ unit: megabytes
+ unit_abbrev: mb
+- name: cycles
+ type: Real
+ display_name: Cycles Elapsed
+ unit: cycles
+ unit_abbrev: cycles
+- name: peak_memory
+ type: Real
+ display_name: Peak Memory Footprint
+ unit: megabytes
+ unit_abbrev: mb
+run_fields:
+- name: llvm_project_revision
+ order: true
+- name: commit_info
+- name: git_sha
+machine_fields:
+- name: hardware
+- name: os
+- name: test_suite_commit
+- name: compiler
+- name: sdk
>From ab841db060e2b4ac45d4f66d52cacf57e3f3b756 Mon Sep 17 00:00:00 2001
From: Louis Dionne <ldionne.2 at gmail.com>
Date: Wed, 11 Feb 2026 09:21:44 -0500
Subject: [PATCH 2/4] Rename git_sorted_revlist
---
libcxx/utils/ci/lnt/commit-watch | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/libcxx/utils/ci/lnt/commit-watch b/libcxx/utils/ci/lnt/commit-watch
index f4200a3b7eeff..d27e028324d4a 100755
--- a/libcxx/utils/ci/lnt/commit-watch
+++ b/libcxx/utils/ci/lnt/commit-watch
@@ -52,7 +52,7 @@ def git_rev_list(git_repo: str, paths: List[str] = []) -> List[str]:
rev_list = subprocess.check_output(cli).decode().strip().splitlines()
return list(reversed(rev_list))
-def git_sorted_revlist(git_repo: str, commits: List[str]) -> List[str]:
+def git_sort_revlist(git_repo: str, commits: List[str]) -> List[str]:
"""
Return the list of commits sorted by their chronological order (from oldest to newest) in the
provided Git repository. Items earlier in the list are older than items later in the list.
@@ -106,7 +106,7 @@ def main(argv):
all_commits = get_all_libcxx_commits(args.git_repo)
# Invoke the callback, processing most recent commits first
- commits_to_benchmark = git_sorted_revlist(args.git_repo, list(all_commits - benchmarked_commits))
+ commits_to_benchmark = git_sort_revlist(args.git_repo, list(all_commits - benchmarked_commits))
most_recent = commits_to_benchmark[-1]
logging.info(f'Benchmarking libc++ at {most_recent}')
subprocess.check_call(callback + [most_recent])
>From e1cd0b9b839d2195a8b9804d7dbe6a2901b0e8a4 Mon Sep 17 00:00:00 2001
From: Louis Dionne <ldionne.2 at gmail.com>
Date: Wed, 11 Feb 2026 09:23:07 -0500
Subject: [PATCH 3/4] Use elif
---
libcxx/utils/ci/lnt/commit-watch | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/libcxx/utils/ci/lnt/commit-watch b/libcxx/utils/ci/lnt/commit-watch
index d27e028324d4a..9d5157eeb4c37 100755
--- a/libcxx/utils/ci/lnt/commit-watch
+++ b/libcxx/utils/ci/lnt/commit-watch
@@ -93,10 +93,9 @@ def main(argv):
# Gather callback command
if not args.callback:
raise ValueError('A callback must be provided')
- else:
- if args.callback[0] != '--':
- raise ValueError('For clarity, the callback must be separated from other options by --')
- callback = args.callback[1:]
+ elif args.callback[0] != '--':
+ raise ValueError('For clarity, the callback must be separated from other options by --')
+ callback = args.callback[1:]
while True:
logging.info(f'Getting benchmarked commits for {args.machine}')
>From dea12f29f639ba525c15c4315993be5ec81b9167 Mon Sep 17 00:00:00 2001
From: Louis Dionne <ldionne.2 at gmail.com>
Date: Wed, 11 Feb 2026 09:59:53 -0500
Subject: [PATCH 4/4] Switch to a more fswatch-like API
---
libcxx/utils/ci/lnt/README.md | 18 ++++++------
libcxx/utils/ci/lnt/commit-watch | 49 ++++++++++++++++++--------------
2 files changed, 37 insertions(+), 30 deletions(-)
diff --git a/libcxx/utils/ci/lnt/README.md b/libcxx/utils/ci/lnt/README.md
index f0fe4524e7bd4..2f2f42e89d6db 100644
--- a/libcxx/utils/ci/lnt/README.md
+++ b/libcxx/utils/ci/lnt/README.md
@@ -18,12 +18,14 @@ EOF
lnt admin --config lnt-admin-config.yaml --testsuite libcxx test-suite add libcxx/utils/ci/lnt/schema.yaml
# Then, watch for libc++ commits and submit benchmark results to the locally-running instance
-libcxx/utils/ci/lnt/commit-watch --machine my-laptop --test-suite libcxx --lnt-url http://localhost:8000 -- \
- libcxx/utils/ci/lnt/run-benchmarks \
- --test-suite-commit abcdef09 \
- --lnt-url http://localhost:8000 \
- --machine my-laptop \
- --test-suite libcxx \
- --compiler clang++ \
- --benchmark-commit
+libcxx/utils/ci/lnt/commit-watch --lnt-url http://localhost:8000 --test-suite libcxx --machine my-laptop | \
+ while read commit; do \
+ libcxx/utils/ci/lnt/run-benchmarks \
+ --test-suite-commit abcdef09 \
+ --lnt-url http://localhost:8000 \
+ --machine my-laptop \
+ --test-suite libcxx \
+ --compiler clang++ \
+ --benchmark-commit ${commit} \
+ done
```
diff --git a/libcxx/utils/ci/lnt/commit-watch b/libcxx/utils/ci/lnt/commit-watch
index 9d5157eeb4c37..19cefa1f4b894 100755
--- a/libcxx/utils/ci/lnt/commit-watch
+++ b/libcxx/utils/ci/lnt/commit-watch
@@ -15,6 +15,7 @@ import os
import pathlib
import subprocess
import sys
+import time
def directory_path(string):
@@ -74,41 +75,45 @@ def get_all_libcxx_commits(git_repo: str) -> Set[str]:
def main(argv):
parser = argparse.ArgumentParser(
prog='commit-watch',
- description='Watch for libc++ commits to run benchmarks on, and run the specified command on them.')
- parser.add_argument('--machine', type=str, required=True,
- help='The name of the machine that we are producing results for on the LNT instance.')
- parser.add_argument('--test-suite', type=str, required=True,
- help='The name of the test suite that we are producing results for on the LNT instance.')
+ description='Watch for libc++ commits to run benchmarks on and print them to standard output.')
parser.add_argument('--lnt-url', type=str, required=True,
help='The URL of the LNT instance to use as the source of truth for finding already-benchmarked commits.')
+ parser.add_argument('--test-suite', type=str, required=True,
+ help='The name of the test suite that we are producing results for on the LNT instance.')
+ parser.add_argument('--machine', type=str, required=True,
+ help='The name of the machine that we are producing results for on the LNT instance.')
parser.add_argument('--git-repo', type=directory_path, default=os.getcwd(),
help='Optional path to the Git repository to use. By default, the current working directory is used.')
- parser.add_argument('callback', nargs=argparse.REMAINDER,
- help='The command to run on each commit that we determine to require benchmarking. Should be provided last and '
- 'separated from other arguments with a `--`.')
+ parser.add_argument('-v', '--verbose', action='count', default=0,
+ help='Verbosity level: passing the option multiple times increases the level.')
args = parser.parse_args(argv)
- logging.basicConfig(level=logging.INFO)
-
- # Gather callback command
- if not args.callback:
- raise ValueError('A callback must be provided')
- elif args.callback[0] != '--':
- raise ValueError('For clarity, the callback must be separated from other options by --')
- callback = args.callback[1:]
+ VERY_VERBOSE = logging.DEBUG - 1
+ if args.verbose == 1:
+ logging.basicConfig(level=logging.INFO)
+ elif args.verbose == 2:
+ logging.basicConfig(level=logging.DEBUG)
+ elif args.verbose > 2:
+ logging.basicConfig(level=VERY_VERBOSE)
while True:
- logging.info(f'Getting benchmarked commits for {args.machine}')
- benchmarked_commits = get_benchmarked_commits(args.lnt_url, args.test_suite, args.machine)
-
logging.info(f'Getting all libc++ commits in {args.git_repo}')
all_commits = get_all_libcxx_commits(args.git_repo)
+ logging.log(VERY_VERBOSE, f'Found all libc++ commits: {all_commits}')
- # Invoke the callback, processing most recent commits first
+ logging.info(f'Getting benchmarked commits for {args.machine}')
+ benchmarked_commits = get_benchmarked_commits(args.lnt_url, args.test_suite, args.machine)
+ logging.debug(f'Found benchmarked commits: {benchmarked_commits}')
+
+ # Print the next commit that should be benchmarked
commits_to_benchmark = git_sort_revlist(args.git_repo, list(all_commits - benchmarked_commits))
most_recent = commits_to_benchmark[-1]
- logging.info(f'Benchmarking libc++ at {most_recent}')
- subprocess.check_call(callback + [most_recent])
+ logging.info(f'Selected {most_recent}')
+ print(most_recent, flush=True)
+
+ # Sleep between runs to ensure that the process reading data has time to make the
+ # submission, and also to avoid busy-polling.
+ time.sleep(5)
if __name__ == '__main__':
More information about the libcxx-commits
mailing list