[test-suite] r269246 - litsupport: Introduce a PREPARE: part for test files.
Matthias Braun via llvm-commits
llvm-commits at lists.llvm.org
Wed May 11 15:22:30 PDT 2016
Author: matze
Date: Wed May 11 17:22:30 2016
New Revision: 269246
URL: http://llvm.org/viewvc/llvm-project?rev=269246&view=rev
Log:
litsupport: Introduce a PREPARE: part for test files.
PREPARE: allows to specify steps that should happen before the benchmark
runs. While you should generally try to keep preparation steps to the
build phase of the benchmark, it is sometimes necessary to remove the
previous results of a benchmark before it can be run again.
We need a PREPARE: step so we do not measure the runtime or attempt to
merge profile data for these steps.
Modified:
test-suite/trunk/litsupport/remote.py
test-suite/trunk/litsupport/run.py
test-suite/trunk/litsupport/testfile.py
test-suite/trunk/litsupport/testplan.py
Modified: test-suite/trunk/litsupport/remote.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/remote.py?rev=269246&r1=269245&r2=269246&view=diff
==============================================================================
--- test-suite/trunk/litsupport/remote.py (original)
+++ test-suite/trunk/litsupport/remote.py Wed May 11 17:22:30 2016
@@ -27,4 +27,5 @@ def mutateScript(context, script):
def mutatePlan(context, plan):
+ plan.preparescript = mutateScript(context, plan.preparescript)
plan.runscript = mutateScript(context, plan.runscript)
Modified: test-suite/trunk/litsupport/run.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/run.py?rev=269246&r1=269245&r2=269246&view=diff
==============================================================================
--- test-suite/trunk/litsupport/run.py (original)
+++ test-suite/trunk/litsupport/run.py Wed May 11 17:22:30 2016
@@ -1,6 +1,7 @@
def mutatePlan(context, plan):
"""The most basic test module: Execute the RUN:, VERIFY: and METRIC:
scripts"""
+ plan.preparescript = context.parsed_preparescript
plan.runscript = context.parsed_runscript
plan.verifyscript = context.parsed_verifyscript
plan.metricscripts = context.parsed_metricscripts
Modified: test-suite/trunk/litsupport/testfile.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/testfile.py?rev=269246&r1=269245&r2=269246&view=diff
==============================================================================
--- test-suite/trunk/litsupport/testfile.py (original)
+++ test-suite/trunk/litsupport/testfile.py Wed May 11 17:22:30 2016
@@ -23,13 +23,16 @@ def parse(context, filename):
specifying shell commands to run the benchmark and verifying the result.
Returns a tuple with two arrays for the run and verify commands."""
# Collect the test lines from the script.
+ preparescript = []
runscript = []
verifyscript = []
metricscripts = {}
- keywords = ['RUN:', 'VERIFY:', 'METRIC:']
+ keywords = ['PREPARE:', 'RUN:', 'VERIFY:', 'METRIC:']
for line_number, command_type, ln in \
parseIntegratedTestScriptCommands(filename, keywords):
- if command_type == 'RUN':
+ if command_type == 'PREPARE':
+ _parseShellCommand(preparescript, ln)
+ elif command_type == 'RUN':
_parseShellCommand(runscript, ln)
elif command_type == 'VERIFY':
_parseShellCommand(verifyscript, ln)
@@ -46,7 +49,7 @@ def parse(context, filename):
raise ValueError("Test has no RUN: line!")
# Check for unterminated run lines.
- for script in runscript, verifyscript:
+ for script in preparescript, runscript, verifyscript:
if script and script[-1][-1] == '\\':
raise ValueError("Test has unterminated RUN/VERIFY lines " +
"(ending with '\\')")
@@ -56,12 +59,14 @@ def parse(context, filename):
substitutions = getDefaultSubstitutions(context.test, context.tmpDir,
context.tmpBase)
substitutions += [('%o', outfile)]
+ preparescript = applySubstitutions(preparescript, substitutions)
runscript = applySubstitutions(runscript, substitutions)
verifyscript = applySubstitutions(verifyscript, substitutions)
metricscripts = {k: applySubstitutions(v, substitutions)
for k, v in metricscripts.items()}
# Put things into the context
+ context.parsed_preparescript = preparescript
context.parsed_runscript = runscript
context.parsed_verifyscript = verifyscript
context.parsed_metricscripts = metricscripts
Modified: test-suite/trunk/litsupport/testplan.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/testplan.py?rev=269246&r1=269245&r2=269246&view=diff
==============================================================================
--- test-suite/trunk/litsupport/testplan.py (original)
+++ test-suite/trunk/litsupport/testplan.py Wed May 11 17:22:30 2016
@@ -87,12 +87,17 @@ def check_call(commandline, *aargs, **da
def executePlan(context, plan):
"""This is the main driver for executing a benchmark."""
- # Execute RUN: part of the test file.
+ # Execute PREPARE: part of the test.
+ _, _, exitCode, _ = executeScript(context, plan.preparescript)
+ if exitCode != 0:
+ return lit.Test.FAIL
+
+ # Execute RUN: part of the test.
_, _, exitCode, _ = executeScript(context, plan.runscript)
if exitCode != 0:
return lit.Test.FAIL
- # Execute VERIFY: part of the test file.
+ # Execute VERIFY: part of the test.
_, _, exitCode, _ = executeScript(context, plan.verifyscript)
if exitCode != 0:
# The question here is whether to still collects metrics if the
@@ -110,7 +115,7 @@ def executePlan(context, plan):
logging.error("Could not collect metric with %s", metric_collector,
exc_info=e)
- # Execute the METRIC: part of the test file.
+ # Execute the METRIC: part of the test.
for metric, metricscript in plan.metricscripts.items():
out, err, exitCode, timeoutInfo = executeScript(context, metricscript)
if exitCode != 0:
More information about the llvm-commits
mailing list