[test-suite] r259753 - lit: Split lit.cfg into separate files
Matthias Braun via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 3 21:15:01 PST 2016
Author: matze
Date: Wed Feb 3 23:15:01 2016
New Revision: 259753
URL: http://llvm.org/viewvc/llvm-project?rev=259753&view=rev
Log:
lit: Split lit.cfg into separate files
Added:
test-suite/trunk/litsupport/
test-suite/trunk/litsupport/__init__.py
test-suite/trunk/litsupport/compiletime.py
test-suite/trunk/litsupport/perf.py
test-suite/trunk/litsupport/runsafely.py
test-suite/trunk/litsupport/test.py
test-suite/trunk/litsupport/testscript.py
test-suite/trunk/litsupport/timeit.py
Modified:
test-suite/trunk/.gitignore
test-suite/trunk/lit.cfg
Modified: test-suite/trunk/.gitignore
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/.gitignore?rev=259753&r1=259752&r2=259753&view=diff
==============================================================================
--- test-suite/trunk/.gitignore (original)
+++ test-suite/trunk/.gitignore Wed Feb 3 23:15:01 2016
@@ -1,3 +1,4 @@
# This is the default location to checkout sourcecode for the benchmarks in
# External/*
/test-suite-externals
+*.pyc
Modified: test-suite/trunk/lit.cfg
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/lit.cfg?rev=259753&r1=259752&r2=259753&view=diff
==============================================================================
--- test-suite/trunk/lit.cfg (original)
+++ test-suite/trunk/lit.cfg Wed Feb 3 23:15:01 2016
@@ -1,272 +1,10 @@
-import glob
-import lit
-import lit.formats
-import lit.util
+import site
import os
-import pipes
-import re
-import shlex
-from lit.formats import FileBasedTest
-from lit.TestRunner import executeScript, executeScriptInternal, \
- parseIntegratedTestScriptCommands, getDefaultSubstitutions, \
- applySubstitutions, getTempPaths
-from lit import Test
-from lit.util import to_bytes, to_string
-try:
- from shlex import quote # python 3.3 and above
-except:
- from pipes import quote # python 3.2 and earlier
-
-
-def parseBenchmarkScript(test):
- """Scan a llvm-testsuite like benchmark .test script."""
- def parseShellCommand(script, ln):
- # Trim trailing whitespace.
- ln = ln.rstrip()
-
- # Collapse lines with trailing '\\'.
- if script and script[-1][-1] == '\\':
- script[-1] = script[-1][:-1] + ln
- else:
- script.append(ln)
-
- # Collect the test lines from the script.
- sourcepath = test.getSourcePath()
- runscript = []
- verifyscript = []
- keywords = ['RUN:', 'VERIFY:']
- for line_number, command_type, ln in \
- parseIntegratedTestScriptCommands(sourcepath, keywords):
- if command_type == 'RUN':
- parseShellCommand(runscript, ln)
- elif command_type == 'VERIFY':
- parseShellCommand(verifyscript, ln)
- else:
- raise ValueError("unknown script command type: %r" % (
- command_type,))
-
- # Verify the script contains a run line.
- if runscript == []:
- return lit.Test.Result(Test.UNRESOLVED, "Test has no RUN: line!")
-
- # Check for unterminated run lines.
- for script in runscript, verifyscript:
- if script and script[-1][-1] == '\\':
- return lit.Test.Result(Test.UNRESOLVED,
- "Test has unterminated RUN/VERIFY lines (with '\\')")
-
- return runscript, verifyscript
-
-
-def getUserTimeFromTimeOutput(f):
- with open(f) as fd:
- l = [l for l in fd.readlines()
- if l.startswith('user')]
- assert len(l) == 1
-
- m = re.match(r'user\s+([0-9.]+)', l[0])
- return float(m.group(1))
-
-
-def collectCompileTime(test):
- # TODO: This is not correct yet as the directory may contain .o.time files
- # of multiple benchmarks in the case of SingleSource tests.
- compile_time = 0.0
- basepath = os.path.dirname(test.getFilePath())
- for path, subdirs, files in os.walk(basepath):
- for file in files:
- if file.endswith('.o.time'):
- fullpath = os.path.join(path, file)
- compile_time += getUserTimeFromTimeOutput(fullpath)
- return compile_time
-
-
-def runScript(test, litConfig, script, tmpBase, useExternalSh=True):
- execdir = os.path.dirname(test.getExecPath())
- if useExternalSh:
- res = executeScript(test, litConfig, tmpBase, script, execdir)
- else:
- res = executeScriptInternal(test, litConfig, tmpBase, script, execdir)
- return res
-
-
-def prepareRunSafely(config, commandline, outfile):
- stdin = None
- stdout = None
- stderr = None
- workdir = None
- tokens = shlex.split(commandline)
- # Parse "< INPUTFILE", "> OUTFILE", "2> OUTFILE" patterns
- i = 0
- while i < len(tokens):
- if tokens[i] == "<" and i+1 < len(tokens):
- stdin = tokens[i+1]
- del tokens[i+1]
- del tokens[i]
- continue
- elif tokens[i] == ">" and i+1 < len(tokens):
- stdout = tokens[i+1]
- del tokens[i+1]
- del tokens[i]
- continue
- elif tokens[i] == "2>" and i+1 < len(tokens):
- stderr = tokens[i+1]
- del tokens[i+1]
- del tokens[i]
- continue
- if i+2 < len(tokens) and tokens[i] == "cd" and tokens[i+2] == ";":
- workdir = tokens[i+1]
- del tokens[i+2]
- del tokens[i+1]
- del tokens[i]
- continue
- i += 1
-
- runsafely = "%s/RunSafely.sh" % config.test_suite_root
- runsafely_prefix = [runsafely]
- if workdir is not None:
- runsafely_prefix += ["-d", workdir]
- timeit = "%s/tools/timeit" % config.test_source_root
- if config.remote_host:
- timeit = "%s/tools/timeit-target" % config.test_source_root
- runsafely_prefix += ["-r", config.remote_host]
- if config.remote_user:
- runsafely_prefix += ["-l", config.remote_user]
- if config.remote_client:
- runsafely_prefix += ["-rc", config.remote_client]
- if config.remote_port:
- runsafely_prefix += ["-rp", config.remote_port]
- if config.run_under:
- runsafely_prefix += ["-u", config.run_under]
- if not config.traditional_output:
- runsafely_prefix += ["-n"]
- if stdout is not None:
- runsafely_prefix += ["-o", stdout]
- if stderr is not None:
- runsafely_prefix += ["-e", stderr]
- else:
- if stdout is not None or stderr is not None:
- raise Exception("separate stdout/stderr redirection not possible with traditional output")
- timeout = "7200"
- if stdin is None:
- stdin = "/dev/null"
- runsafely_prefix += ["-t", timeit, timeout, stdin, outfile]
-
- new_commandline = " ".join(map(quote, runsafely_prefix + tokens))
- return new_commandline
-
-
-def wrapScriptInRunSafely(config, script, outfile):
- adjusted_script = []
- for line in script:
- line = prepareRunSafely(config, line, outfile)
- adjusted_script.append(line)
- return adjusted_script
-
-
-def wrapScriptInPerf(config, runscript, tmpBase):
- profilefile = tmpBase + ".perf_data"
- profilescript = []
- for line in runscript:
- profilescript.append(
- ' '.join(
- ['perf record -e cycles,cache-misses,branch-misses -o',
- profilefile, line]))
- return profilescript
-
-
-class TestSuiteTest(FileBasedTest):
- def __init__(self):
- super(TestSuiteTest, self).__init__()
-
- def execute(self, test, litConfig):
- config = test.config
- if config.unsupported:
- return (Test.UNSUPPORTED, 'Test is unsupported')
-
- # Parse benchmark script
- res = parseBenchmarkScript(test)
- if isinstance(res, lit.Test.Result):
- return res
- if litConfig.noExecute:
- return lit.Test.Result(Test.PASS)
- runscript, verifyscript = res
-
- tmpDir, tmpBase = getTempPaths(test)
- outfile = tmpBase + ".out"
- substitutions = getDefaultSubstitutions(test, tmpDir, tmpBase)
- substitutions += [('%o', outfile)]
- runscript = applySubstitutions(runscript, substitutions)
- verifyscript = applySubstitutions(verifyscript, substitutions)
-
- if litConfig.params.get('profile') == 'perf':
- profilescript = wrapScriptInPerf(config, runscript, tmpBase)
- profilescript = wrapScriptInRunSafely(config, profilescript,
- outfile=tmpBase+".perf.out")
-
- runscript = wrapScriptInRunSafely(config, runscript, outfile)
-
- # Create the output directory if it does not already exist.
- lit.util.mkdir_p(os.path.dirname(tmpBase))
-
- # Execute runscript (the "RUN:" part)
- output = ""
- n_runs = 1
- runtimes = []
- for n in range(n_runs):
- res = runScript(test, litConfig, runscript, tmpBase)
- if isinstance(res, lit.Test.Result):
- return res
-
- output += "\n" + "\n".join(runscript)
-
- out, err, exitCode, timeoutInfo = res
- if exitCode == Test.FAIL:
- # Only show command output in case of errors
- output += "\n" + out
- output += "\n" + err
- return lit.Test.Result(Test.FAIL, output)
-
- timefile = "%s.time" % (outfile,)
- try:
- runtime = getUserTimeFromTimeOutput(timefile)
- runtimes.append(runtime)
- except IOError:
- pass
-
- if litConfig.params.get('profile') == 'perf':
- res = runScript(test, litConfig, profilescript, tmpBase)
- out, err, exitCode, timeoutInfo = res
-
- # Run verification script (the "VERIFY:" part)
- if len(verifyscript) > 0:
- res = runScript(test, litConfig, verifyscript, tmpBase)
- if isinstance(res, lit.Test.Result):
- return res
- out, err, exitCode, timeoutInfo = res
-
- output += "\n" + "\n".join(verifyscript)
- if exitCode != 0:
- output += "\n" + out
- output += "\n" + err
- return lit.Test.Result(Test.FAIL, output)
-
- # Put metrics into the test result.
- result = lit.Test.Result(Test.PASS, output)
- if len(runtimes) > 0:
- result.addMetric('exec_time', lit.Test.toMetricValue(runtimes[0]))
- try:
- compile_time = collectCompileTime(test)
- result.addMetric('compile_time',
- lit.Test.toMetricValue(compile_time))
- except IOError:
- pass
-
- return result
-
+site.addsitedir(os.path.dirname(__file__))
+from litsupport import test
config.name = 'test-suite'
-config.test_format = TestSuiteTest()
+config.test_format = test.TestSuiteTest()
config.suffixes = ['.test']
config.excludes = ['ABI-Testsuite']
config.traditional_output = True
Added: test-suite/trunk/litsupport/__init__.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/__init__.py?rev=259753&view=auto
==============================================================================
(empty)
Added: test-suite/trunk/litsupport/compiletime.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/compiletime.py?rev=259753&view=auto
==============================================================================
--- test-suite/trunk/litsupport/compiletime.py (added)
+++ test-suite/trunk/litsupport/compiletime.py Wed Feb 3 23:15:01 2016
@@ -0,0 +1,22 @@
+import glob
+import lit.Test
+import logging
+import os
+import timeit
+
+
+def collect(test, result):
+ # TODO: This is not correct yet as the directory may contain .o.time files
+ # of multiple benchmarks in the case of SingleSource tests.
+ try:
+ compile_time = 0.0
+ basepath = os.path.dirname(test.getFilePath())
+ for path, subdirs, files in os.walk(basepath):
+ for file in files:
+ if file.endswith('.o.time'):
+ fullpath = os.path.join(path, file)
+ compile_time += timeit.getUserTime(fullpath)
+ except IOError:
+ logging.info("Could not find compiletime for %s" % test.getFullName())
+ return
+ result.addMetric('compile_time', lit.Test.toMetricValue(compile_time))
Added: test-suite/trunk/litsupport/perf.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/perf.py?rev=259753&view=auto
==============================================================================
--- test-suite/trunk/litsupport/perf.py (added)
+++ test-suite/trunk/litsupport/perf.py Wed Feb 3 23:15:01 2016
@@ -0,0 +1,9 @@
+def wrapScript(config, runscript, tmpBase):
+ profilefile = tmpBase + ".perf_data"
+ profilescript = []
+ for line in runscript:
+ profilescript.append(
+ ' '.join(
+ ['perf record -e cycles,cache-misses,branch-misses -o',
+ profilefile, line]))
+ return profilescript
Added: test-suite/trunk/litsupport/runsafely.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/runsafely.py?rev=259753&view=auto
==============================================================================
--- test-suite/trunk/litsupport/runsafely.py (added)
+++ test-suite/trunk/litsupport/runsafely.py Wed Feb 3 23:15:01 2016
@@ -0,0 +1,79 @@
+import shlex
+try:
+ from shlex import quote # python 3.3 and above
+except:
+ from pipes import quote # python 3.2 and earlier
+
+
+def prepareRunSafely(config, commandline, outfile):
+ stdin = None
+ stdout = None
+ stderr = None
+ workdir = None
+ tokens = shlex.split(commandline)
+ # Parse "< INPUTFILE", "> OUTFILE", "2> OUTFILE" patterns
+ i = 0
+ while i < len(tokens):
+ if tokens[i] == "<" and i+1 < len(tokens):
+ stdin = tokens[i+1]
+ del tokens[i+1]
+ del tokens[i]
+ continue
+ elif tokens[i] == ">" and i+1 < len(tokens):
+ stdout = tokens[i+1]
+ del tokens[i+1]
+ del tokens[i]
+ continue
+ elif tokens[i] == "2>" and i+1 < len(tokens):
+ stderr = tokens[i+1]
+ del tokens[i+1]
+ del tokens[i]
+ continue
+ if i+2 < len(tokens) and tokens[i] == "cd" and tokens[i+2] == ";":
+ workdir = tokens[i+1]
+ del tokens[i+2]
+ del tokens[i+1]
+ del tokens[i]
+ continue
+ i += 1
+
+ runsafely = "%s/RunSafely.sh" % config.test_suite_root
+ runsafely_prefix = [runsafely]
+ if workdir is not None:
+ runsafely_prefix += ["-d", workdir]
+ timeit = "%s/tools/timeit" % config.test_source_root
+ if config.remote_host:
+ timeit = "%s/tools/timeit-target" % config.test_source_root
+ runsafely_prefix += ["-r", config.remote_host]
+ if config.remote_user:
+ runsafely_prefix += ["-l", config.remote_user]
+ if config.remote_client:
+ runsafely_prefix += ["-rc", config.remote_client]
+ if config.remote_port:
+ runsafely_prefix += ["-rp", config.remote_port]
+ if config.run_under:
+ runsafely_prefix += ["-u", config.run_under]
+ if not config.traditional_output:
+ runsafely_prefix += ["-n"]
+ if stdout is not None:
+ runsafely_prefix += ["-o", stdout]
+ if stderr is not None:
+ runsafely_prefix += ["-e", stderr]
+ else:
+ if stdout is not None or stderr is not None:
+ raise Exception("separate stdout/stderr redirection not possible with traditional output")
+ timeout = "7200"
+ if stdin is None:
+ stdin = "/dev/null"
+ runsafely_prefix += ["-t", timeit, timeout, stdin, outfile]
+
+ new_commandline = " ".join(map(quote, runsafely_prefix + tokens))
+ return new_commandline
+
+
+def wrapScript(config, script, outfile):
+ adjusted_script = []
+ for line in script:
+ line = prepareRunSafely(config, line, outfile)
+ adjusted_script.append(line)
+ return adjusted_script
Added: test-suite/trunk/litsupport/test.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/test.py?rev=259753&view=auto
==============================================================================
--- test-suite/trunk/litsupport/test.py (added)
+++ test-suite/trunk/litsupport/test.py Wed Feb 3 23:15:01 2016
@@ -0,0 +1,108 @@
+import os
+import lit
+import lit.util
+from lit.formats import FileBasedTest
+from lit.TestRunner import executeScript, executeScriptInternal, \
+ parseIntegratedTestScriptCommands, getDefaultSubstitutions, \
+ applySubstitutions, getTempPaths
+from lit import Test
+from lit.util import to_bytes, to_string
+import testscript
+import runsafely
+import perf
+import compiletime
+import timeit
+
+
+def runScript(test, litConfig, script, tmpBase, useExternalSh=True):
+ execdir = os.path.dirname(test.getExecPath())
+ if useExternalSh:
+ res = executeScript(test, litConfig, tmpBase, script, execdir)
+ else:
+ res = executeScriptInternal(test, litConfig, tmpBase, script, execdir)
+ return res
+
+
+class TestSuiteTest(FileBasedTest):
+ def __init__(self):
+ super(TestSuiteTest, self).__init__()
+
+ def execute(self, test, litConfig):
+ config = test.config
+ if config.unsupported:
+ return (Test.UNSUPPORTED, 'Test is unsupported')
+
+ # Parse benchmark script
+ res = testscript.parse(test.getSourcePath())
+ if litConfig.noExecute:
+ return lit.Test.Result(Test.PASS)
+ runscript, verifyscript = res
+
+ # Apply the usual lit substitutions (%s, %S, %p, %T, ...)
+ tmpDir, tmpBase = getTempPaths(test)
+ outfile = tmpBase + ".out"
+ substitutions = getDefaultSubstitutions(test, tmpDir, tmpBase)
+ substitutions += [('%o', outfile)]
+ runscript = applySubstitutions(runscript, substitutions)
+ verifyscript = applySubstitutions(verifyscript, substitutions)
+
+ profilescript = None
+ if litConfig.params.get('profile') == 'perf':
+ profilescript = perf.wrapScript(config, runscript, tmpBase)
+ profilescript = runsafely.wrapScript(config, profilescript,
+ outfile=tmpBase+".perf.out")
+
+ runscript = runsafely.wrapScript(config, runscript, outfile)
+
+ # Create the output directory if it does not already exist.
+ lit.util.mkdir_p(os.path.dirname(tmpBase))
+
+ # Execute runscript (the "RUN:" part)
+ output = ""
+ n_runs = 1
+ runtimes = []
+ for n in range(n_runs):
+ res = runScript(test, litConfig, runscript, tmpBase)
+ if isinstance(res, lit.Test.Result):
+ return res
+
+ output += "\n" + "\n".join(runscript)
+
+ out, err, exitCode, timeoutInfo = res
+ if exitCode == Test.FAIL:
+ # Only show command output in case of errors
+ output += "\n" + out
+ output += "\n" + err
+ return lit.Test.Result(Test.FAIL, output)
+
+ timefile = "%s.time" % (outfile,)
+ try:
+ runtime = timeit.getUserTime(timefile)
+ runtimes.append(runtime)
+ except IOError:
+ pass
+
+ if profilescript:
+ res = runScript(test, litConfig, profilescript, tmpBase)
+ out, err, exitCode, timeoutInfo = res
+
+ # Run verification script (the "VERIFY:" part)
+ if len(verifyscript) > 0:
+ res = runScript(test, litConfig, verifyscript, tmpBase)
+ if isinstance(res, lit.Test.Result):
+ return res
+ out, err, exitCode, timeoutInfo = res
+
+ output += "\n" + "\n".join(verifyscript)
+ if exitCode != 0:
+ output += "\n" + out
+ output += "\n" + err
+ return lit.Test.Result(Test.FAIL, output)
+
+ # Put metrics into the test result.
+ result = lit.Test.Result(Test.PASS, output)
+ if len(runtimes) > 0:
+ result.addMetric('exec_time', lit.Test.toMetricValue(runtimes[0]))
+ compiletime.collect(test, result)
+
+ return result
Added: test-suite/trunk/litsupport/testscript.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/testscript.py?rev=259753&view=auto
==============================================================================
--- test-suite/trunk/litsupport/testscript.py (added)
+++ test-suite/trunk/litsupport/testscript.py Wed Feb 3 23:15:01 2016
@@ -0,0 +1,46 @@
+# Code to parse .test files
+from lit.TestRunner import parseIntegratedTestScriptCommands
+import lit.Test
+
+
+def _parseShellCommand(script, ln):
+ # Trim trailing whitespace.
+ ln = ln.rstrip()
+
+ # Collapse lines with trailing '\\'.
+ if script and script[-1][-1] == '\\':
+ script[-1] = script[-1][:-1] + ln
+ else:
+ script.append(ln)
+
+
+def parse(filename):
+ """Parse a .test file as used in the llvm test-suite.
+ The file comporises of a number of lines starting with RUN: and VERIFY:
+ specifying shell commands to run the benchmark and verifying the result.
+ Returns a tuple with two arrays for the run and verify commands."""
+ # Collect the test lines from the script.
+ runscript = []
+ verifyscript = []
+ keywords = ['RUN:', 'VERIFY:']
+ for line_number, command_type, ln in \
+ parseIntegratedTestScriptCommands(filename, keywords):
+ if command_type == 'RUN':
+ _parseShellCommand(runscript, ln)
+ elif command_type == 'VERIFY':
+ _parseShellCommand(verifyscript, ln)
+ else:
+ raise ValueError("unknown script command type: %r" % (
+ command_type,))
+
+ # Verify the script contains a run line.
+ if runscript == []:
+ return ValueError("Test has no RUN: line!")
+
+ # Check for unterminated run lines.
+ for script in runscript, verifyscript:
+ if script and script[-1][-1] == '\\':
+ return ValueError("Test has unterminated RUN/VERIFY lines " +
+ "(ending with '\\')")
+
+ return runscript, verifyscript
Added: test-suite/trunk/litsupport/timeit.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/timeit.py?rev=259753&view=auto
==============================================================================
--- test-suite/trunk/litsupport/timeit.py (added)
+++ test-suite/trunk/litsupport/timeit.py Wed Feb 3 23:15:01 2016
@@ -0,0 +1,12 @@
+import re
+
+
+def getUserTime(filename):
+ """Extract the user time form a .time file produced by timeit"""
+ with open(filename) as fd:
+ l = [l for l in fd.readlines()
+ if l.startswith('user')]
+ assert len(l) == 1
+
+ m = re.match(r'user\s+([0-9.]+)', l[0])
+ return float(m.group(1))
More information about the llvm-commits
mailing list