[llvm] Update Benchmark (PR #83488)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Feb 29 13:41:47 PST 2024
github-actions[bot] wrote:
<!--LLVM CODE FORMAT COMMENT: {darker}-->
:warning: Python code formatter, darker found issues in your code. :warning:
<details>
<summary>
You can test this locally with the following command:
</summary>
``````````bash
darker --check --diff -r cff36bb198759c4fe557adc594eabc097cf7d565...1c69b4c1d7a413cd9872a18dfe1ae1753b2f2f54 third-party/benchmark/.ycm_extra_conf.py third-party/benchmark/bindings/python/google_benchmark/__init__.py third-party/benchmark/bindings/python/google_benchmark/example.py third-party/benchmark/setup.py third-party/benchmark/tools/compare.py third-party/benchmark/tools/gbench/__init__.py third-party/benchmark/tools/gbench/report.py third-party/benchmark/tools/gbench/util.py third-party/benchmark/tools/strip_asm.py
``````````
</details>
<details>
<summary>
View the diff from darker here.
</summary>
``````````diff
--- .ycm_extra_conf.py 2024-02-29 20:59:50.000000 +0000
+++ .ycm_extra_conf.py 2024-02-29 21:41:38.794232 +0000
@@ -90,13 +90,11 @@
if IsHeaderFile(filename):
basename = os.path.splitext(filename)[0]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists(replacement_file):
- compilation_info = database.GetCompilationInfoForFile(
- replacement_file
- )
+ compilation_info = database.GetCompilationInfoForFile(replacement_file)
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile(filename)
--- bindings/python/google_benchmark/example.py 2024-02-29 20:59:50.000000 +0000
+++ bindings/python/google_benchmark/example.py 2024-02-29 21:41:38.865129 +0000
@@ -84,13 +84,11 @@
# Automatic Counter from numbers.
state.counters["foo"] = num_foo
# Set a counter as a rate.
state.counters["foo_rate"] = Counter(num_foo, Counter.kIsRate)
# Set a counter as an inverse of rate.
- state.counters["foo_inv_rate"] = Counter(
- num_foo, Counter.kIsRate | Counter.kInvert
- )
+ state.counters["foo_inv_rate"] = Counter(num_foo, Counter.kIsRate | Counter.kInvert)
# Set a counter as a thread-average quantity.
state.counters["foo_avg"] = Counter(num_foo, Counter.kAvgThreads)
# There's also a combined flag:
state.counters["foo_avg_rate"] = Counter(num_foo, Counter.kAvgThreadsRate)
--- setup.py 2024-02-29 20:59:50.000000 +0000
+++ setup.py 2024-02-29 21:41:38.909700 +0000
@@ -102,13 +102,11 @@
self.spawn(bazel_argv)
shared_lib_suffix = ".dll" if IS_WINDOWS else ".so"
ext_name = ext.target_name + shared_lib_suffix
- ext_bazel_bin_path = (
- temp_path / "bazel-bin" / ext.relpath / ext_name
- )
+ ext_bazel_bin_path = temp_path / "bazel-bin" / ext.relpath / ext_name
ext_dest_path = Path(self.get_ext_fullpath(ext.name))
shutil.copyfile(ext_bazel_bin_path, ext_dest_path)
--- tools/compare.py 2024-02-29 20:59:50.000000 +0000
+++ tools/compare.py 2024-02-29 21:41:39.084265 +0000
@@ -40,12 +40,11 @@
if in1_kind == util.IT_JSON and in2_kind == util.IT_JSON:
# When both sides are JSON the only supported flag is
# --benchmark_filter=
for flag in util.remove_benchmark_flags("--benchmark_filter=", flags):
print(
- "WARNING: passing %s has no effect since both "
- "inputs are JSON" % flag
+ "WARNING: passing %s has no effect since both " "inputs are JSON" % flag
)
if output_type is not None and output_type != "json":
print(
(
"ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
@@ -55,13 +54,11 @@
)
sys.exit(1)
def create_parser():
- parser = ArgumentParser(
- description="versatile benchmark output compare tool"
- )
+ parser = ArgumentParser(description="versatile benchmark output compare tool")
parser.add_argument(
"-a",
"--display_aggregates_only",
dest="display_aggregates_only",
@@ -303,13 +300,11 @@
)
# Now, filter the benchmarks so that the difference report can work
if filter_baseline and filter_contender:
replacement = "[%s vs. %s]" % (filter_baseline, filter_contender)
- json1 = gbench.report.filter_benchmark(
- json1_orig, filter_baseline, replacement
- )
+ json1 = gbench.report.filter_benchmark(json1_orig, filter_baseline, replacement)
json2 = gbench.report.filter_benchmark(
json2_orig, filter_contender, replacement
)
diff_report = gbench.report.get_difference_report(json1, json2, args.utest)
@@ -434,13 +429,11 @@
self.assertEqual(parsed.filter_baseline[0], "c")
self.assertEqual(parsed.filter_contender[0], "d")
self.assertFalse(parsed.benchmark_options)
def test_filters_with_remainder(self):
- parsed = self.parser.parse_args(
- ["filters", self.testInput0, "c", "d", "e"]
- )
+ parsed = self.parser.parse_args(["filters", self.testInput0, "c", "d", "e"])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, "filters")
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], "c")
--- tools/gbench/report.py 2024-02-29 20:59:50.000000 +0000
+++ tools/gbench/report.py 2024-02-29 21:41:39.555223 +0000
@@ -59,14 +59,11 @@
is False then all color codes in 'args' and 'kwargs' are replaced with
the empty string.
"""
assert use_color is True or use_color is False
if not use_color:
- args = [
- arg if not isinstance(arg, BenchmarkColor) else BC_NONE
- for arg in args
- ]
+ args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE for arg in args]
kwargs = {
key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE
for key, arg in kwargs.items()
}
return fmt_str.format(*args, **kwargs)
@@ -297,16 +294,12 @@
{
"real_time": bn["real_time"],
"cpu_time": bn["cpu_time"],
"real_time_other": other_bench["real_time"],
"cpu_time_other": other_bench["cpu_time"],
- "time": calculate_change(
- bn["real_time"], other_bench["real_time"]
- ),
- "cpu": calculate_change(
- bn["cpu_time"], other_bench["cpu_time"]
- ),
+ "time": calculate_change(bn["real_time"], other_bench["real_time"]),
+ "cpu": calculate_change(bn["cpu_time"], other_bench["cpu_time"]),
}
)
# After processing the whole partition, if requested, do the U test.
if utest:
@@ -328,18 +321,15 @@
# E.g. partition_benchmarks will filter out the benchmarks having
# time units which are not compatible with other time units in the
# benchmark suite.
if measurements:
run_type = (
- partition[0][0]["run_type"]
- if "run_type" in partition[0][0]
- else ""
+ partition[0][0]["run_type"] if "run_type" in partition[0][0] else ""
)
aggregate_name = (
partition[0][0]["aggregate_name"]
- if run_type == "aggregate"
- and "aggregate_name" in partition[0][0]
+ if run_type == "aggregate" and "aggregate_name" in partition[0][0]
else ""
)
diff_report.append(
{
"name": benchmark_name,
@@ -458,13 +448,11 @@
class TestGetUniqueBenchmarkNames(unittest.TestCase):
def load_results(self):
import json
- testInputs = os.path.join(
- os.path.dirname(os.path.realpath(__file__)), "Inputs"
- )
+ testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), "Inputs")
testOutput = os.path.join(testInputs, "test3_run0.json")
with open(testOutput, "r") as f:
json = json.load(f)
return json
@@ -1195,13 +1183,11 @@
self.assertEqual(out["time_unit"], expected["time_unit"])
assert_utest(self, out, expected)
assert_measurements(self, out, expected)
-class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
- unittest.TestCase
-):
+class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(unittest.TestCase):
@classmethod
def setUpClass(cls):
def load_results():
import json
@@ -1480,21 +1466,17 @@
"88 family 1 instance 1 aggregate",
]
for n in range(len(self.json["benchmarks"]) ** 2):
random.shuffle(self.json["benchmarks"])
- sorted_benchmarks = util.sort_benchmark_results(self.json)[
- "benchmarks"
- ]
+ sorted_benchmarks = util.sort_benchmark_results(self.json)["benchmarks"]
self.assertEqual(len(expected_names), len(sorted_benchmarks))
for out, expected in zip(sorted_benchmarks, expected_names):
self.assertEqual(out["name"], expected)
-class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly2(
- unittest.TestCase
-):
+class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly2(unittest.TestCase):
@classmethod
def setUpClass(cls):
def load_results():
import json
@@ -1503,18 +1485,14 @@
)
testOutput1 = os.path.join(testInputs, "test5_run0.json")
testOutput2 = os.path.join(testInputs, "test5_run1.json")
with open(testOutput1, "r") as f:
json1 = json.load(f)
- json1["benchmarks"] = [
- json1["benchmarks"][0] for i in range(1000)
- ]
+ json1["benchmarks"] = [json1["benchmarks"][0] for i in range(1000)]
with open(testOutput2, "r") as f:
json2 = json.load(f)
- json2["benchmarks"] = [
- json2["benchmarks"][0] for i in range(1000)
- ]
+ json2["benchmarks"] = [json2["benchmarks"][0] for i in range(1000)]
return json1, json2
json1, json2 = load_results()
cls.json_diff_report = get_difference_report(json1, json2, utest=True)
--- tools/gbench/util.py 2024-02-29 20:59:50.000000 +0000
+++ tools/gbench/util.py 2024-02-29 21:41:39.667233 +0000
@@ -70,12 +70,11 @@
ftype = IT_Executable
elif is_json_file(filename):
ftype = IT_JSON
else:
err_msg = (
- "'%s' does not name a valid benchmark executable or JSON file"
- % filename
+ "'%s' does not name a valid benchmark executable or JSON file" % filename
)
return ftype, err_msg
def check_input_file(filename):
@@ -195,13 +194,11 @@
is_temp_output = False
if output_name is None:
is_temp_output = True
thandle, output_name = tempfile.mkstemp()
os.close(thandle)
- benchmark_flags = list(benchmark_flags) + [
- "--benchmark_out=%s" % output_name
- ]
+ benchmark_flags = list(benchmark_flags) + ["--benchmark_out=%s" % output_name]
cmd = [exe_name] + benchmark_flags
print("RUNNING: %s" % " ".join(cmd))
exitCode = subprocess.call(cmd)
if exitCode != 0:
@@ -220,12 +217,10 @@
benchmark. Otherwise 'filename' must name a valid JSON output file,
which is loaded and the result returned.
"""
ftype = check_input_file(filename)
if ftype == IT_JSON:
- benchmark_filter = find_benchmark_flag(
- "--benchmark_filter=", benchmark_flags
- )
+ benchmark_filter = find_benchmark_flag("--benchmark_filter=", benchmark_flags)
return load_benchmark_results(filename, benchmark_filter)
if ftype == IT_Executable:
return run_benchmark(filename, benchmark_flags)
raise ValueError("Unknown file type %s" % ftype)
--- tools/strip_asm.py 2024-02-29 20:59:50.000000 +0000
+++ tools/strip_asm.py 2024-02-29 21:41:39.726606 +0000
@@ -75,14 +75,11 @@
for tk in parts:
if is_identifier(tk):
if tk.startswith("__Z"):
tk = tk[1:]
elif (
- tk.startswith("_")
- and len(tk) > 1
- and tk[1].isalpha()
- and tk[1] != "Z"
+ tk.startswith("_") and len(tk) > 1 and tk[1].isalpha() and tk[1] != "Z"
):
tk = tk[1:]
new_line += tk
return new_line
@@ -97,13 +94,11 @@
# TODO: Add more things we want to remove
discard_regexes = [
re.compile(r"\s+\..*$"), # directive
re.compile(r"\s*#(NO_APP|APP)$"), # inline ASM
re.compile(r"\s*#.*$"), # comment line
- re.compile(
- r"\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"
- ), # global directive
+ re.compile(r"\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), # global directive
re.compile(
r"\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"
),
]
keep_regexes: list[re.Pattern] = []
``````````
</details>
https://github.com/llvm/llvm-project/pull/83488
More information about the llvm-commits
mailing list