[test-suite] r313709 - litsupport: Mark file-local functions as such
Matthias Braun via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 19 20:00:03 PDT 2017
Author: matze
Date: Tue Sep 19 20:00:03 2017
New Revision: 313709
URL: http://llvm.org/viewvc/llvm-project?rev=313709&view=rev
Log:
litsupport: Mark file-local functions as such
Start local functions with an underscore `_` for clarity and so that
other modules cannot (easily) import them.
Modified:
test-suite/trunk/litsupport/perf.py
test-suite/trunk/litsupport/profilegen.py
test-suite/trunk/litsupport/remote.py
test-suite/trunk/litsupport/run_under.py
test-suite/trunk/litsupport/testplan.py
test-suite/trunk/litsupport/timeit.py
Modified: test-suite/trunk/litsupport/perf.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/perf.py?rev=313709&r1=313708&r2=313709&view=diff
==============================================================================
--- test-suite/trunk/litsupport/perf.py (original)
+++ test-suite/trunk/litsupport/perf.py Tue Sep 19 20:00:03 2017
@@ -6,7 +6,7 @@ from litsupport import run_under
import lit.Test
-def mutateCommandLine(context, commandline):
+def _mutateCommandLine(context, commandline):
profilefile = context.tmpBase + ".perf_data"
cmd = shellcommand.parse(commandline)
cmd.wrap('perf', [
@@ -31,7 +31,7 @@ def mutatePlan(context, plan):
if context.config.run_under:
script = testplan.mutateScript(context, script,
run_under.mutateCommandLine)
- script = testplan.mutateScript(context, script, mutateCommandLine)
+ script = testplan.mutateScript(context, script, _mutateCommandLine)
plan.profilescript += script
plan.metric_collectors.append(
lambda context: {
Modified: test-suite/trunk/litsupport/profilegen.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/profilegen.py?rev=313709&r1=313708&r2=313709&view=diff
==============================================================================
--- test-suite/trunk/litsupport/profilegen.py (original)
+++ test-suite/trunk/litsupport/profilegen.py Tue Sep 19 20:00:03 2017
@@ -3,7 +3,7 @@ from litsupport import shellcommand
from litsupport import testplan
-def mutateCommandline(context, commandline):
+def _mutateCommandline(context, commandline):
"""Adjust runscript to set a different value to the LLVM_PROFILE_FILE
environment variable for each execution."""
profilefile = context.tmpBase + ".profraw"
@@ -12,14 +12,14 @@ def mutateCommandline(context, commandli
return prefix + commandline
-def mutateScript(context, script):
- return testplan.mutateScript(context, script, mutateCommandline)
+def _mutateScript(context, script):
+ return testplan.mutateScript(context, script, _mutateCommandline)
def mutatePlan(context, plan):
context.profilefiles = []
# Adjust run steps to set LLVM_PROFILE_FILE
- plan.runscript = mutateScript(context, plan.runscript)
+ plan.runscript = _mutateScript(context, plan.runscript)
# Run profdata merge at the end
profdatafile = context.executable + ".profdata"
args = ['merge', '-output=%s' % profdatafile] + context.profilefiles
Modified: test-suite/trunk/litsupport/remote.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/remote.py?rev=313709&r1=313708&r2=313709&view=diff
==============================================================================
--- test-suite/trunk/litsupport/remote.py (original)
+++ test-suite/trunk/litsupport/remote.py Tue Sep 19 20:00:03 2017
@@ -5,7 +5,7 @@ from litsupport import testplan
import logging
-def mutateCommandline(context, commandline, suffix=""):
+def _mutateCommandline(context, commandline, suffix=""):
shfilename = context.tmpBase + suffix + ".sh"
shfile = open(shfilename, "w")
shfile.write(commandline + "\n")
@@ -25,11 +25,11 @@ def mutateCommandline(context, commandli
return remote_commandline
-def mutateScript(context, script, suffix=""):
- mutate = lambda c, cmd: mutateCommandline(c, cmd, suffix)
+def _mutateScript(context, script, suffix=""):
+ mutate = lambda c, cmd: _mutateCommandline(c, cmd, suffix)
return testplan.mutateScript(context, script, mutate)
def mutatePlan(context, plan):
- plan.preparescript = mutateScript(context, plan.preparescript, "-prepare")
- plan.runscript = mutateScript(context, plan.runscript)
+ plan.preparescript = _mutateScript(context, plan.preparescript, "-prepare")
+ plan.runscript = _mutateScript(context, plan.runscript)
Modified: test-suite/trunk/litsupport/run_under.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/run_under.py?rev=313709&r1=313708&r2=313709&view=diff
==============================================================================
--- test-suite/trunk/litsupport/run_under.py (original)
+++ test-suite/trunk/litsupport/run_under.py Tue Sep 19 20:00:03 2017
@@ -4,7 +4,7 @@ from litsupport import shellcommand
from litsupport import testplan
-def mutateCommandLine(context, commandline):
+def _mutateCommandLine(context, commandline):
cmd = shellcommand.parse(commandline)
run_under_cmd = shellcommand.parse(context.config.run_under)
@@ -24,4 +24,4 @@ def mutatePlan(context, plan):
run_under = context.config.run_under
if run_under:
plan.runscript = testplan.mutateScript(context, plan.runscript,
- mutateCommandLine)
+ _mutateCommandLine)
Modified: test-suite/trunk/litsupport/testplan.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/testplan.py?rev=313709&r1=313708&r2=313709&view=diff
==============================================================================
--- test-suite/trunk/litsupport/testplan.py (original)
+++ test-suite/trunk/litsupport/testplan.py Tue Sep 19 20:00:03 2017
@@ -35,7 +35,7 @@ def mutateScript(context, script, mutato
return mutated_script
-def executeScript(context, script, scriptBaseName, useExternalSh=True):
+def _executeScript(context, script, scriptBaseName, useExternalSh=True):
if len(script) == 0:
return "", "", 0, None
@@ -86,20 +86,20 @@ def check_call(commandline, *aargs, **da
return subprocess.check_call(commandline, *aargs, **dargs)
-def executePlan(context, plan):
+def _executePlan(context, plan):
"""This is the main driver for executing a benchmark."""
# Execute PREPARE: part of the test.
- _, _, exitCode, _ = executeScript(context, plan.preparescript, "prepare")
+ _, _, exitCode, _ = _executeScript(context, plan.preparescript, "prepare")
if exitCode != 0:
return lit.Test.FAIL
# Execute RUN: part of the test.
- _, _, exitCode, _ = executeScript(context, plan.runscript, "run")
+ _, _, exitCode, _ = _executeScript(context, plan.runscript, "run")
if exitCode != 0:
return lit.Test.FAIL
# Execute VERIFY: part of the test.
- _, _, exitCode, _ = executeScript(context, plan.verifyscript, "verify")
+ _, _, exitCode, _ = _executeScript(context, plan.verifyscript, "verify")
if exitCode != 0:
# The question here is whether to still collects metrics if the
# benchmark results are invalid. I choose to avoid getting potentially
@@ -107,7 +107,7 @@ def executePlan(context, plan):
return lit.Test.FAIL
# Execute additional profile gathering actions setup by testing modules.
- _, _, exitCode, _ = executeScript(context, plan.profilescript, "profile")
+ _, _, exitCode, _ = _executeScript(context, plan.profilescript, "profile")
if exitCode != 0:
logging.warning("Profile script '%s' failed", plan.profilescript)
@@ -123,8 +123,8 @@ def executePlan(context, plan):
# Execute the METRIC: part of the test.
for metric, metricscript in plan.metricscripts.items():
- out, err, exitCode, timeoutInfo = executeScript(context, metricscript,
- "metric")
+ out, err, exitCode, timeoutInfo = _executeScript(context, metricscript,
+ "metric")
if exitCode != 0:
logging.warning("Metric script for '%s' failed", metric)
continue
@@ -139,12 +139,12 @@ def executePlan(context, plan):
def executePlanTestResult(context, testplan):
- """Convenience function to invoke executePlan() and construct a
+ """Convenience function to invoke _executePlan() and construct a
lit.test.Result() object for the results."""
context.result_output = ""
context.result_metrics = {}
- result_code = executePlan(context, testplan)
+ result_code = _executePlan(context, testplan)
# Build test result object
result = lit.Test.Result(result_code, context.result_output)
Modified: test-suite/trunk/litsupport/timeit.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/timeit.py?rev=313709&r1=313708&r2=313709&view=diff
==============================================================================
--- test-suite/trunk/litsupport/timeit.py (original)
+++ test-suite/trunk/litsupport/timeit.py Tue Sep 19 20:00:03 2017
@@ -4,7 +4,7 @@ import lit.Test
import re
-def mutateCommandLine(context, commandline):
+def _mutateCommandLine(context, commandline):
outfile = context.tmpBase + ".out"
timefile = context.tmpBase + ".time"
config = context.config
@@ -47,10 +47,10 @@ def mutateCommandLine(context, commandli
return cmd.toCommandline()
-def mutateScript(context, script):
+def _mutateScript(context, script):
if not hasattr(context, "timefiles"):
context.timefiles = []
- return testplan.mutateScript(context, script, mutateCommandLine)
+ return testplan.mutateScript(context, script, _mutateCommandLine)
def _collectTime(context, timefiles, metric_name='exec_time'):
@@ -64,7 +64,7 @@ def mutatePlan(context, plan):
if len(plan.runscript) == 0:
return
context.timefiles = []
- plan.runscript = mutateScript(context, plan.runscript)
+ plan.runscript = _mutateScript(context, plan.runscript)
plan.metric_collectors.append(
lambda context: _collectTime(context, context.timefiles)
)
More information about the llvm-commits
mailing list