[test-suite] r251602 - Introduce RUN: and VERIFY: lines in .test files

Matthias Braun via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 28 20:46:29 PDT 2015


Author: matze
Date: Wed Oct 28 22:46:29 2015
New Revision: 251602

URL: http://llvm.org/viewvc/llvm-project?rev=251602&view=rev
Log:
Introduce RUN: and VERIFY: lines in .test files

This split allows us to have the benchmarking running machinery
(RunSafely.sh, timeit) for the RUN: line abstracted away in the lit
testrunner so execution gets more streamlined and the tests simpler.

Differential Revision: http://reviews.llvm.org/D14106

Modified:
    test-suite/trunk/cmake/lit-test-template.in
    test-suite/trunk/lit.cfg

Modified: test-suite/trunk/cmake/lit-test-template.in
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/cmake/lit-test-template.in?rev=251602&r1=251601&r2=251602&view=diff
==============================================================================
--- test-suite/trunk/cmake/lit-test-template.in (original)
+++ test-suite/trunk/cmake/lit-test-template.in Wed Oct 28 22:46:29 2015
@@ -1,3 +1,3 @@
-; RUN: ${RUNUNDER} ${CMAKE_SOURCE_DIR}/RunSafely.sh -t ${TIMEIT} 7200 ${STDIN_FILENAME} %t ${CMAKE_CURRENT_BINARY_DIR}/${exename} ${RUN_OPTIONS}
-; RUN: ${PROGRAM_OUTPUT_FILTER} %t
-; RUN: ${DIFFPROG} %t ${REFERENCE_OUTPUT}
+; RUN: ${CMAKE_CURRENT_BINARY_DIR}/${exename} ${RUN_OPTIONS} < ${STDIN_FILENAME}
+; VERIFY: ${PROGRAM_OUTPUT_FILTER} %o
+; VERIFY: ${DIFFPROG} %o ${REFERENCE_OUTPUT}

Modified: test-suite/trunk/lit.cfg
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/lit.cfg?rev=251602&r1=251601&r2=251602&view=diff
==============================================================================
--- test-suite/trunk/lit.cfg (original)
+++ test-suite/trunk/lit.cfg Wed Oct 28 22:46:29 2015
@@ -1,45 +1,171 @@
 import lit.formats
+import lit.util
 import lit
 import os, glob, re
+import shlex
+import pipes
+from lit.formats import FileBasedTest
+from lit.TestRunner import executeScriptInternal, parseIntegratedTestScriptCommands, getDefaultSubstitutions, applySubstitutions, getTempPaths
+from lit import Test
+from lit.util import to_bytes, to_string
+try:
+    from shlex import quote # python 3.3 and above
+except:
+    from pipes import quote # python 3.2 and earlier
+
+def parseBenchmarkScript(test):
+    """Scan a llvm-testsuite like benchmark .test script."""
+    def parseShellCommand(script, ln):
+        # Trim trailing whitespace.
+        ln = ln.rstrip()
+
+        # Collapse lines with trailing '\\'.
+        if script and script[-1][-1] == '\\':
+            script[-1] = script[-1][:-1] + ln
+        else:
+            script.append(ln)
+
+    # Collect the test lines from the script.
+    sourcepath = test.getSourcePath()
+    runscript = []
+    verifyscript = []
+    keywords = ['RUN:', 'VERIFY:']
+    for line_number, command_type, ln in \
+            parseIntegratedTestScriptCommands(sourcepath, keywords):
+        if command_type == 'RUN':
+            parseShellCommand(runscript, ln)
+        elif command_type == 'VERIFY':
+            parseShellCommand(verifyscript, ln)
+        else:
+            raise ValueError("unknown script command type: %r" % (
+                    command_type,))
+
+    # Verify the script contains a run line.
+    if runscript == []:
+        return lit.Test.Result(Test.UNRESOLVED, "Test has no RUN: line!")
+
+    # Check for unterminated run lines.
+    for script in runscript, verifyscript:
+        if script and script[-1][-1] == '\\':
+            return lit.Test.Result(Test.UNRESOLVED,
+                                   "Test has unterminated RUN/VERIFY lines (with '\\')")
+
+    return runscript,verifyscript
 
 def getUserTimeFromTimeOutput(f):
     with open(f) as fd:
         l = [l for l in fd.readlines()
              if l.startswith('user')]
     assert len(l) == 1
-        
+
     m = re.match(r'user\s+([0-9.]+)', l[0])
     return float(m.group(1))
 
-class TestSuiteTest(lit.formats.ShTest):
+def collectTimes(test, timefile, result):
+    time = getUserTimeFromTimeOutput(timefile)
+    result.addMetric('exec_time', lit.Test.toMetricValue(time))
+
+    # For completeness, attempt to find compile time information too.
+    compile_time = 0.0
+    basepath = os.path.dirname(test.getFilePath())
+    for path, subdirs, files in os.walk(basepath):
+        for file in files:
+            if file.endswith('.o.time'):
+                compile_time += getUserTimeFromTimeOutput(os.path.join(path, file))
+    result.addMetric('compile_time', lit.Test.toMetricValue(compile_time))
+
+def runScript(test, litConfig, script, tmpBase, useExternalSh = False):
+    # Create the output directory if it does not already exist.
+    lit.util.mkdir_p(os.path.dirname(tmpBase))
+
+    execdir = os.path.dirname(test.getExecPath())
+    if useExternalSh:
+        res = executeScript(test, litConfig, tmpBase, script, execdir)
+    else:
+        res = executeScriptInternal(test, litConfig, tmpBase, script, execdir)
+    if isinstance(res, lit.Test.Result):
+        return res
+
+    out,err,exitCode = res
+    # Form the output log.
+    output = """Script:\n--\n%s\n--\nExit Code: %d\n\n""" % (
+        '\n'.join(script), exitCode)
+
+    # Append the outputs, if present.
+    if out:
+        output += """Command Output (stdout):\n--\n%s\n--\n""" % (out,)
+    if err:
+        output += """Command Output (stderr):\n--\n%s\n--\n""" % (err,)
+
+    if exitCode == 0:
+        status = Test.PASS
+    else:
+        status = Test.FAIL
+
+    result = lit.Test.Result(status, output)
+    return result
+
+class TestSuiteTest(FileBasedTest):
     def __init__(self):
-        lit.formats.ShTest.__init__(self, False)
+        super(TestSuiteTest, self).__init__()
         
     def execute(self, test, litConfig):
-        result = lit.formats.ShTest.execute(self, test, litConfig)
-        basepath = os.path.dirname(test.getFilePath())
-        
-        if not result.code.isFailure:
-            # Collect the timing information.
-            timeglob = os.path.join(basepath, 'Output', '*.time')
-            times = glob.glob(timeglob)
-            assert len(times) == 1
-            time = getUserTimeFromTimeOutput(times[0])
-
-            result.addMetric('exec_time', lit.Test.toMetricValue(time))
-
-        # For completeness, attempt to find compile time information too.
-        compile_time = 0.0
-        for path, subdirs, files in os.walk(basepath):
-            for file in files:
-                if file.endswith('.o.time'):
-                    compile_time += getUserTimeFromTimeOutput(os.path.join(path, file))
-        result.addMetric('compile_time', lit.Test.toMetricValue(compile_time))
-        
-        return result
+        if test.config.unsupported:
+            return (Test.UNSUPPORTED, 'Test is unsupported')
 
-config.name = 'test-suite'
+        # Parse benchmark script
+        res = parseBenchmarkScript(test)
+        if isinstance(res, lit.Test.Result):
+            return res
+        if litConfig.noExecute:
+            return lit.Test.Result(Test.PASS)
+        runscript, verifyscript = res
+
+        tmpDir, tmpBase = getTempPaths(test)
+        outfile = tmpBase + ".out"
+        substitutions = getDefaultSubstitutions(test, tmpDir, tmpBase)
+        substitutions += [('%o', outfile)]
+        runscript = applySubstitutions(runscript, substitutions)
+        verifyscript = applySubstitutions(verifyscript, substitutions)
+
+        # Prepend runscript with RunSafely and timeit stuff
+        def prependRunSafely(line):
+            # Search for "< INPUTFILE" in the line and use that for stdin
+            stdin = "/dev/null"
+            commandline = shlex.split(line)
+            for i in range(len(commandline)):
+                if commandline[i] == "<" and i+1 < len(commandline):
+                    stdin = commandline[i+1]
+                    del commandline[i+1]
+                    del commandline[i]
+                    break
+            timeit = config.test_source_root + "/tools/timeit"
+            timeout = "7200"
+            runsafely_prefix = ["%s/RunSafely.sh" % config.test_suite_root,
+                                "-t", timeit, timeout, stdin, outfile]
+
+            line = " ".join(map(quote, runsafely_prefix + commandline))
+            return line
+        runscript = map(prependRunSafely, runscript)
+
+        # Run RUN: part of the script n times
+        n_runs = 1
+        for n in range(n_runs):
+            runresult = runScript(test, litConfig, runscript, tmpBase)
+            if runresult.code == Test.FAIL:
+                return runresult
+            timefile = "%s.time" % (outfile,)
+            collectTimes(test, timefile, runresult)
+            # TODO: aggregate times of multiple runs
+
+        # Run verification of results
+        verifyresult = runScript(test, litConfig, verifyscript, tmpBase)
+        if verifyresult.code == Test.FAIL:
+            return verifyresult
 
+        return runresult
+
+config.name = 'test-suite'
 config.test_format = TestSuiteTest()
 config.suffixes = ['.test']
 config.excludes = ['ABI-Testsuite']




More information about the llvm-commits mailing list