[PATCH] D60205: [test-suite] Update test-suite microbenchmarks to use JSON (fix bug 41327)
Brian Homerding via Phabricator via llvm-commits
llvm-commits at lists.llvm.org
Wed Apr 3 08:15:00 PDT 2019
homerdin created this revision.
homerdin added reviewers: MatzeB, hfinkel, lebedev.ri.
Google benchmark is hoping to drop the CSV output format. This updates the microbenchmark module to use the JSON output.
This fixes PR41327
https://reviews.llvm.org/D60205
Files:
litsupport/modules/microbenchmark.py
Index: litsupport/modules/microbenchmark.py
===================================================================
--- litsupport/modules/microbenchmark.py
+++ litsupport/modules/microbenchmark.py
@@ -1,17 +1,17 @@
'''Test module to collect google benchmark results.'''
from litsupport import shellcommand
from litsupport import testplan
-import csv
+import json
import lit.Test
def _mutateCommandLine(context, commandline):
cmd = shellcommand.parse(commandline)
- cmd.arguments.append("--benchmark_format=csv")
+ cmd.arguments.append("--benchmark_format=json")
# We need stdout outself to get the benchmark csv data.
if cmd.stdout is not None:
raise Exception("Rerouting stdout not allowed for microbenchmarks")
- benchfile = context.tmpBase + '.bench.csv'
+ benchfile = context.tmpBase + '.bench.json'
cmd.stdout = benchfile
context.microbenchfiles.append(benchfile)
@@ -25,18 +25,18 @@
def _collectMicrobenchmarkTime(context, microbenchfiles):
for f in microbenchfiles:
content = context.read_result_file(context, f)
- lines = csv.reader(content.splitlines())
- # First line: "name,iterations,real_time,cpu_time,time_unit..."
- for line in lines:
- if line[0] == 'name':
- continue
+ data = json.loads(content)
+
+ # Create a micro_result for each benchmark
+ for benchmark in data['benchmarks']:
# Name for MicroBenchmark
- name = line[0]
+ name = benchmark['name']
+
# Create Result object with PASS
microBenchmark = lit.Test.Result(lit.Test.PASS)
- # Index 3 is cpu_time
- exec_time_metric = lit.Test.toMetricValue(float(line[3]))
+ # Add the exec_time metric for this result
+ exec_time_metric = lit.Test.toMetricValue(benchmark['cpu_time'])
microBenchmark.addMetric('exec_time', exec_time_metric)
# Add Micro Result
-------------- next part --------------
A non-text attachment was scrubbed...
Name: D60205.193497.patch
Type: text/x-patch
Size: 1996 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20190403/e396ff9d/attachment.bin>
More information about the llvm-commits
mailing list