[test-suite] r357704 - [test-suite] Update test-suite microbenchmarks to use JSON (fix bug 41327)

Brian Homerding via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 4 08:12:08 PDT 2019


Author: homerdin
Date: Thu Apr  4 08:12:08 2019
New Revision: 357704

URL: http://llvm.org/viewvc/llvm-project?rev=357704&view=rev
Log:
[test-suite] Update test-suite microbenchmarks to use JSON (fix bug 41327)

Google benchmark is hoping to drop the CSV output format. This updates the microbenchmark module to use the JSON output.

This fixes PR41327

Reviewers: lebedev.ri

https://reviews.llvm.org/D60205

Modified:
    test-suite/trunk/litsupport/modules/microbenchmark.py

Modified: test-suite/trunk/litsupport/modules/microbenchmark.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/modules/microbenchmark.py?rev=357704&r1=357703&r2=357704&view=diff
==============================================================================
--- test-suite/trunk/litsupport/modules/microbenchmark.py (original)
+++ test-suite/trunk/litsupport/modules/microbenchmark.py Thu Apr  4 08:12:08 2019
@@ -1,17 +1,17 @@
 '''Test module to collect google benchmark results.'''
 from litsupport import shellcommand
 from litsupport import testplan
-import csv
+import json
 import lit.Test
 
 
 def _mutateCommandLine(context, commandline):
     cmd = shellcommand.parse(commandline)
-    cmd.arguments.append("--benchmark_format=csv")
+    cmd.arguments.append("--benchmark_format=json")
     # We need stdout outself to get the benchmark csv data.
     if cmd.stdout is not None:
         raise Exception("Rerouting stdout not allowed for microbenchmarks")
-    benchfile = context.tmpBase + '.bench.csv'
+    benchfile = context.tmpBase + '.bench.json'
     cmd.stdout = benchfile
     context.microbenchfiles.append(benchfile)
 
@@ -25,18 +25,18 @@ def _mutateScript(context, script):
 def _collectMicrobenchmarkTime(context, microbenchfiles):
     for f in microbenchfiles:
         content = context.read_result_file(context, f)
-        lines = csv.reader(content.splitlines())
-        # First line: "name,iterations,real_time,cpu_time,time_unit..."
-        for line in lines:
-            if line[0] == 'name':
-                continue
+        data = json.loads(content)
+
+        # Create a micro_result for each benchmark
+        for benchmark in data['benchmarks']:
             # Name for MicroBenchmark
-            name = line[0]
+            name = benchmark['name']
+
             # Create Result object with PASS
             microBenchmark = lit.Test.Result(lit.Test.PASS)
 
-            # Index 3 is cpu_time
-            exec_time_metric = lit.Test.toMetricValue(float(line[3]))
+            # Add the exec_time metric for this result
+            exec_time_metric = lit.Test.toMetricValue(benchmark['cpu_time'])
             microBenchmark.addMetric('exec_time', exec_time_metric)
 
             # Add Micro Result




More information about the llvm-commits mailing list