[PATCH] D28668: [test-suite] Make all lit scripts have a unique name.
Phabricator via Phabricator via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 16 06:59:55 PST 2017
This revision was automatically updated to reflect the committed changes.
Closed by commit rL292129: Make all lit scripts have a unique name. (authored by kbeyls).
Changed prior to commit:
https://reviews.llvm.org/D28668?vs=84263&id=84558#toc
Repository:
rL LLVM
https://reviews.llvm.org/D28668
Files:
test-suite/trunk/litsupport/testplan.py
Index: test-suite/trunk/litsupport/testplan.py
===================================================================
--- test-suite/trunk/litsupport/testplan.py
+++ test-suite/trunk/litsupport/testplan.py
@@ -35,7 +35,7 @@
return mutated_script
-def executeScript(context, script, useExternalSh=True):
+def executeScript(context, script, scriptBaseName, useExternalSh=True):
if len(script) == 0:
return "", "", 0, None
@@ -47,8 +47,8 @@
executeFunc = lit.TestRunner.executeScriptInternal
logging.info("\n".join(script))
- res = executeFunc(context.test, context.litConfig, context.tmpBase, script,
- execdir)
+ res = executeFunc(context.test, context.litConfig,
+ context.tmpBase + "_" + scriptBaseName, script, execdir)
# The executeScript() functions return lit.Test.Result in some error
# conditions instead of the normal tuples. Having different return types is
# really annoying so we transform it back to the usual tuple.
@@ -89,17 +89,17 @@
def executePlan(context, plan):
"""This is the main driver for executing a benchmark."""
# Execute PREPARE: part of the test.
- _, _, exitCode, _ = executeScript(context, plan.preparescript)
+ _, _, exitCode, _ = executeScript(context, plan.preparescript, "prepare")
if exitCode != 0:
return lit.Test.FAIL
# Execute RUN: part of the test.
- _, _, exitCode, _ = executeScript(context, plan.runscript)
+ _, _, exitCode, _ = executeScript(context, plan.runscript, "run")
if exitCode != 0:
return lit.Test.FAIL
# Execute VERIFY: part of the test.
- _, _, exitCode, _ = executeScript(context, plan.verifyscript)
+ _, _, exitCode, _ = executeScript(context, plan.verifyscript, "verify")
if exitCode != 0:
# The question here is whether to still collects metrics if the
# benchmark results are invalid. I choose to avoid getting potentially
@@ -118,7 +118,8 @@
# Execute the METRIC: part of the test.
for metric, metricscript in plan.metricscripts.items():
- out, err, exitCode, timeoutInfo = executeScript(context, metricscript)
+ out, err, exitCode, timeoutInfo = executeScript(context, metricscript,
+ "metric")
if exitCode != 0:
logging.warning("Metric script for '%s' failed", metric)
continue
@@ -130,7 +131,7 @@
metric, out)
# Execute additional profile gathering actions setup by testing modules.
- _, _, exitCode, _ = executeScript(context, plan.profilescript)
+ _, _, exitCode, _ = executeScript(context, plan.profilescript, "profile")
if exitCode != 0:
logging.warning("Profile script '%s' failed", plan.profilescript)
-------------- next part --------------
A non-text attachment was scrubbed...
Name: D28668.84558.patch
Type: text/x-patch
Size: 2841 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20170116/938fde6d/attachment.bin>
More information about the llvm-commits
mailing list