[test-suite] r317310 - add microbenchmark litsupport plugin

Eizan Miyamoto via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 3 03:01:13 PDT 2017


Author: eizan
Date: Fri Nov  3 03:01:12 2017
New Revision: 317310

URL: http://llvm.org/viewvc/llvm-project?rev=317310&view=rev
Log:
add microbenchmark litsupport plugin
use microbenchmark litsupport plugin for XRay benchmarks

The litsuport module was originally written by @MatzeB provided in
https://reviews.llvm.org/D37421 with some minor edits to the test output
name string (note: s/exec_time/microbenchmark_time_ns/)

Approved in https://reviews.llvm.org/D38496

Added:
    test-suite/trunk/litsupport/modules/microbenchmark.py
Modified:
    test-suite/trunk/MicroBenchmarks/XRay/lit.local.cfg

Modified: test-suite/trunk/MicroBenchmarks/XRay/lit.local.cfg
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/MicroBenchmarks/XRay/lit.local.cfg?rev=317310&r1=317309&r2=317310&view=diff
==============================================================================
--- test-suite/trunk/MicroBenchmarks/XRay/lit.local.cfg (original)
+++ test-suite/trunk/MicroBenchmarks/XRay/lit.local.cfg Fri Nov  3 03:01:12 2017
@@ -1 +1,8 @@
 config.environment['XRAY_OPTIONS'] = 'patch_premain=false xray_naive_log=false'
+test_modules = config.test_modules
+if 'run' in test_modules:
+    # Insert microbenchmark module behind 'run'
+    test_modules.insert(test_modules.index('run')+1, 'microbenchmark')
+    # Timeit results are not useful for microbenchmarks
+    if 'timeit' in test_modules:
+        test_modules.remove('timeit')

Added: test-suite/trunk/litsupport/modules/microbenchmark.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/modules/microbenchmark.py?rev=317310&view=auto
==============================================================================
--- test-suite/trunk/litsupport/modules/microbenchmark.py (added)
+++ test-suite/trunk/litsupport/modules/microbenchmark.py Fri Nov  3 03:01:12 2017
@@ -0,0 +1,46 @@
+'''Test module to collect google benchmark results.'''
+from litsupport import shellcommand
+from litsupport import testplan
+import csv
+import lit.Test
+
+
+def _mutateCommandLine(context, commandline):
+    cmd = shellcommand.parse(commandline)
+    cmd.arguments.append("--benchmark_format=csv")
+    # We need stdout outself to get the benchmark csv data.
+    if cmd.stdout is not None:
+        raise Exception("Rerouting stdout not allowed for microbenchmarks")
+    benchfile = context.tmpBase + '.bench.csv'
+    cmd.stdout = benchfile
+    context.microbenchfiles.append(benchfile)
+
+    return cmd.toCommandline()
+
+
+def _mutateScript(context, script):
+    return testplan.mutateScript(context, script, _mutateCommandLine)
+
+
+def _collectMicrobenchmarkTime(context, microbenchfiles):
+    result = 0.0
+    for f in microbenchfiles:
+        with open(f) as inp:
+            lines = csv.reader(inp)
+            # First line: "name,iterations,real_time,cpu_time,time_unit..."
+            for line in lines:
+                if line[0] == 'name':
+                    continue
+                # Note that we cannot create new tests here, so for now we just
+                # add up all the numbers here.
+                result += float(line[3])
+    return {'microbenchmark_time_ns': lit.Test.toMetricValue(result)}
+
+
+def mutatePlan(context, plan):
+    context.microbenchfiles = []
+    plan.runscript = _mutateScript(context, plan.runscript)
+    plan.metric_collectors.append(
+        lambda context: _collectMicrobenchmarkTime(context,
+                                                   context.microbenchfiles)
+    )




More information about the llvm-commits mailing list