[test-suite] r263532 - lit: Big refactoring, introduce TestPlan object

Matthias Braun via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 14 22:12:22 PDT 2016


Author: matze
Date: Tue Mar 15 00:12:22 2016
New Revision: 263532

URL: http://llvm.org/viewvc/llvm-project?rev=263532&view=rev
Log:
lit: Big refactoring, introduce TestPlan object

The testplan object describes the sequence of steps that will get
executed to run the benchmark and collect the metrics. This allows for a
nicer separation of different aspects into modules.

For example the codesize and compiletime module can add a metric collection
step to the testplan, the profilegen module can modify all commandline
of the runscript and add a later step for merging the profile data. All
these aspects can be added with a unified 'module.mutatePlan()' from test.py.

This also unifies a bunch of code paths that deal with error handling
and reporting so we should properly display errors in metric collection
steps now.

Added:
    test-suite/trunk/litsupport/testplan.py
Removed:
    test-suite/trunk/litsupport/testscript.py
Modified:
    test-suite/trunk/litsupport/codesize.py
    test-suite/trunk/litsupport/compiletime.py
    test-suite/trunk/litsupport/hash.py
    test-suite/trunk/litsupport/perf.py
    test-suite/trunk/litsupport/profilegen.py
    test-suite/trunk/litsupport/runsafely.py
    test-suite/trunk/litsupport/test.py

Modified: test-suite/trunk/litsupport/codesize.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/codesize.py?rev=263532&r1=263531&r2=263532&view=diff
==============================================================================
--- test-suite/trunk/litsupport/codesize.py (original)
+++ test-suite/trunk/litsupport/codesize.py Tue Mar 15 00:12:22 2016
@@ -1,13 +1,12 @@
 import lit.Test
 import logging
-import shellcommand
 import os.path
 
 
-def collect(context, result):
-    try:
-        size = os.path.getsize(context.executable)
-        result.addMetric('size', lit.Test.toMetricValue(size))
-    except:
-        logging.info('Could not calculate filesize for %s' %
-                     context.executable)
+def _getCodeSize(context):
+    size = os.path.getsize(context.executable)
+    return {'size': lit.Test.toMetricValue(size)}
+
+
+def mutatePlan(context, plan):
+    plan.metric_collectors.append(_getCodeSize)

Modified: test-suite/trunk/litsupport/compiletime.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/compiletime.py?rev=263532&r1=263531&r2=263532&view=diff
==============================================================================
--- test-suite/trunk/litsupport/compiletime.py (original)
+++ test-suite/trunk/litsupport/compiletime.py Tue Mar 15 00:12:22 2016
@@ -1,23 +1,20 @@
-import glob
 import lit.Test
-import logging
 import os
 import timeit
 
 
-def collect(context, result):
+def _getCompileTime(context):
     # TODO: This is not correct yet as the directory may contain .o.time files
     # of multiple benchmarks in the case of SingleSource tests.
-    try:
-        compile_time = 0.0
-        basepath = os.path.dirname(context.test.getFilePath())
-        for path, subdirs, files in os.walk(basepath):
-            for file in files:
-                if file.endswith('.o.time'):
-                    fullpath = os.path.join(path, file)
-                    compile_time += timeit.getUserTime(fullpath)
-    except IOError:
-        logging.info("Could not find compiletime for %s" %
-                     context.test.getFullName())
-        return
-    result.addMetric('compile_time', lit.Test.toMetricValue(compile_time))
+    compile_time = 0.0
+    basepath = os.path.dirname(context.test.getFilePath())
+    for path, subdirs, files in os.walk(basepath):
+        for file in files:
+            if file.endswith('.o.time'):
+                fullpath = os.path.join(path, file)
+                compile_time += timeit.getUserTime(fullpath)
+    return {'compile_time': lit.Test.toMetricValue(compile_time)}
+
+
+def mutatePlan(context, plan):
+    plan.metric_collectors.append(_getCompileTime)

Modified: test-suite/trunk/litsupport/hash.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/hash.py?rev=263532&r1=263531&r2=263532&view=diff
==============================================================================
--- test-suite/trunk/litsupport/hash.py (original)
+++ test-suite/trunk/litsupport/hash.py Tue Mar 15 00:12:22 2016
@@ -1,4 +1,5 @@
 from lit.Test import toMetricValue
+import lit.Test
 import hashlib
 import logging
 import subprocess
@@ -45,6 +46,10 @@ def same_as_previous(context):
     return False
 
 
-def collect(context, result):
+def _getHash(context):
+    return {'hash': lit.Test.toMetricValue(context.executable_hash)}
+
+
+def mutatePlan(context, plan):
     if context.executable_hash is not None:
-        result.addMetric('hash', toMetricValue(context.executable_hash))
+        plan.metric_collectors.append(_getHash)

Modified: test-suite/trunk/litsupport/perf.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/perf.py?rev=263532&r1=263531&r2=263532&view=diff
==============================================================================
--- test-suite/trunk/litsupport/perf.py (original)
+++ test-suite/trunk/litsupport/perf.py Tue Mar 15 00:12:22 2016
@@ -1,9 +1,19 @@
-def wrapScript(context, runscript):
+import shellcommand
+import testplan
+
+
+def mutateCommandLine(context, commandline):
     profilefile = context.tmpBase + ".perf_data"
-    profilescript = []
-    for line in runscript:
-        profilescript.append(
-            ' '.join(
-                ['perf record -e cycles,cache-misses,branch-misses -o',
-                 profilefile, line]))
-    return profilescript
+    cmd = shellcommand.parse(commandline)
+    cmd.wrap('perf', [
+        'record',
+        '-e', 'cycles,cache-misses,branch-misses',
+        '-o', profilefile
+    ])
+    return cmd.toCommandline()
+
+
+def mutatePlan(context, plan):
+    script = testplan.mutateScript(context, context.original_runscript,
+                                   mutateCommandLine)
+    plan.profilescript += script

Modified: test-suite/trunk/litsupport/profilegen.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/profilegen.py?rev=263532&r1=263531&r2=263532&view=diff
==============================================================================
--- test-suite/trunk/litsupport/profilegen.py (original)
+++ test-suite/trunk/litsupport/profilegen.py Tue Mar 15 00:12:22 2016
@@ -1,31 +1,26 @@
 import shellcommand
-try:
-    from shlex import quote  # python 3.3 and above
-except:
-    from pipes import quote  # python 3.2 and earlier
+import testplan
 
 
-def wrapScript(context, script):
+def mutateCommandline(context, commandline):
     """Adjust runscript to set a different value to the LLVM_PROFILE_FILE
     environment variable for each execution."""
-    i = 0
-    adjusted_script = []
+    profilefile = context.tmpBase + ".profraw"
+    prefix = "env LLVM_PROFILE_FILE=%s " % profilefile
+    context.profilefiles.append(profilefile)
+    return prefix + commandline
+
+
+def mutateScript(context, script):
+    return testplan.mutateScript(context, script, mutateCommandline)
+
+
+def mutatePlan(context, plan):
     context.profilefiles = []
-    for line in script:
-        number = ""
-        if len(script) > 1:
-            number = "-%s" % (i,)
-            i += 1
-        profilefile = "%s%s.profraw" % (context.tmpBase, number)
-        prefix = "LLVM_PROFILE_FILE=%s " % quote(profilefile)
-        context.profilefiles.append(profilefile)
-        adjusted_script.append(prefix + line)
-    return adjusted_script
-
-
-def getMergeProfilesScript(context):
-    datafile = context.executable + ".profdata"
-    mergecmd = [context.config.llvm_profdata, 'merge', '-output=%s' % datafile]
-    mergecmd += context.profilefiles
-    cmdline = " ".join(map(quote, mergecmd))
-    return [cmdline]
+    # Adjust run steps to set LLVM_PROFILE_FILE
+    plan.runscript = mutateScript(context, plan.runscript)
+    # Run profdata merge at the end
+    profdatafile = context.executable + ".profdata"
+    args = ['merge', '-output=%s' % profdatafile] + context.profilefiles
+    mergecmd = shellcommand.ShellCommand(context.config.llvm_profdata, args)
+    plan.profilescript += [mergecmd.toCommandline()]

Modified: test-suite/trunk/litsupport/runsafely.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/runsafely.py?rev=263532&r1=263531&r2=263532&view=diff
==============================================================================
--- test-suite/trunk/litsupport/runsafely.py (original)
+++ test-suite/trunk/litsupport/runsafely.py Tue Mar 15 00:12:22 2016
@@ -1,20 +1,20 @@
-import shlex
-import timeit
+import lit.Test
 import shellcommand
-try:
-    from shlex import quote  # python 3.3 and above
-except:
-    from pipes import quote  # python 3.2 and earlier
+import testplan
+import timeit
 
 
-def prepareRunSafely(context, commandline, outfile):
+def mutateCommandline(context, commandline):
+    outfile = context.tmpBase + ".out"
+    timefile = outfile + ".time"
     config = context.config
     cmd = shellcommand.parse(commandline)
 
     runsafely = "%s/RunSafely.sh" % config.test_suite_root
-    runsafely_prefix = [runsafely]
+    runsafely_prefix = []
     if cmd.workdir is not None:
         runsafely_prefix += ["-d", cmd.workdir]
+        cmd.workdir = None
     timeit = "%s/tools/timeit" % config.test_source_root
     if config.remote_host:
         timeit = "%s/tools/timeit-target" % config.test_source_root
@@ -31,8 +31,10 @@ def prepareRunSafely(context, commandlin
         runsafely_prefix += ["-n"]
         if cmd.stdout is not None:
             runsafely_prefix += ["-o", cmd.stdout]
+            cmd.stdout = None
         if cmd.stderr is not None:
             runsafely_prefix += ["-e", cmd.stderr]
+            cmd.stderr = None
     else:
         if cmd.stdout is not None or cmd.stderr is not None:
             raise Exception("Separate stdout/stderr redirection not " +
@@ -40,36 +42,31 @@ def prepareRunSafely(context, commandlin
     timeout = "7200"
     if cmd.stdin is not None:
         stdin = cmd.stdin
+        cmd.stdin = None
     else:
         stdin = "/dev/null"
     runsafely_prefix += ["-t", timeit, timeout, stdin, outfile]
 
-    complete_command = runsafely_prefix + [cmd.executable] + cmd.arguments
-    new_commandline = " ".join(map(quote, complete_command))
-    return new_commandline
+    context.timefiles.append(outfile + ".time")
 
+    cmd.wrap(runsafely, runsafely_prefix)
+    return cmd.toCommandline()
 
-def wrapScript(context, script, suffix):
-    adjusted_script = []
-    outfile = context.tmpBase + suffix
-    # Set name of timefile so getTime() can use it
-    context.timefiles = []
-    i = 0
-    for line in script:
-        number = ""
-        if len(script) > 1:
-            number = "-%s" % (i,)
-            i += 1
-        outfile = context.tmpBase + number + suffix
-        context.timefiles.append(outfile + ".time")
-
-        line = prepareRunSafely(context, line, outfile)
-        adjusted_script.append(line)
-    return adjusted_script
+
+def mutateScript(context, script):
+    return testplan.mutateScript(context, script, mutateCommandline)
 
 
-def getTime(context):
+def _getTime(context, timefiles, metric_name='exec_time'):
     time = 0.0
-    for timefile in context.timefiles:
+    for timefile in timefiles:
         time += timeit.getUserTime(timefile)
-    return time
+    return {metric_name: lit.Test.toMetricValue(time)}
+
+
+def mutatePlan(context, plan):
+    context.timefiles = []
+    plan.runscript = mutateScript(context, plan.runscript)
+    plan.metric_collectors.append(
+        lambda context: _getTime(context, context.timefiles)
+    )

Modified: test-suite/trunk/litsupport/test.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/test.py?rev=263532&r1=263531&r2=263532&view=diff
==============================================================================
--- test-suite/trunk/litsupport/test.py (original)
+++ test-suite/trunk/litsupport/test.py Tue Mar 15 00:12:22 2016
@@ -1,10 +1,10 @@
 import os
 import lit
 import lit.util
+import logging
 from lit.formats import FileBasedTest
-from lit.TestRunner import executeScript, executeScriptInternal, \
-    parseIntegratedTestScriptCommands, getDefaultSubstitutions, \
-    applySubstitutions, getTempPaths
+from lit.TestRunner import getDefaultSubstitutions, applySubstitutions, \
+    getTempPaths
 from lit import Test
 from lit.util import to_bytes, to_string
 
@@ -15,8 +15,7 @@ import perf
 import profilegen
 import runsafely
 import shellcommand
-import testscript
-import timeit
+import testplan
 
 
 SKIPPED = lit.Test.ResultCode('SKIPPED', False)
@@ -38,16 +37,6 @@ class TestContext:
         self.tmpBase = tmpBase
 
 
-def runScript(context, script, useExternalSh=True):
-    execdir = os.path.dirname(context.test.getExecPath())
-    if useExternalSh:
-        res = executeScript(context.test, context.litConfig, context.tmpBase,
-                            script, execdir)
-    else:
-        res = executeScriptInternal(context.test, context.litConfig,
-                                    context.tmpBase, script, execdir)
-    return res
-
 
 class TestSuiteTest(FileBasedTest):
     def __init__(self):
@@ -59,22 +48,21 @@ class TestSuiteTest(FileBasedTest):
             return lit.Test.Result(Test.UNSUPPORTED, 'Test is unsupported')
 
         # Parse benchmark script
-        res = testscript.parse(test.getSourcePath())
+        plan = testplan.parse(test.getSourcePath())
         if litConfig.noExecute:
             return lit.Test.Result(Test.PASS)
-        runscript, verifyscript, metricscripts = res
 
         # Apply the usual lit substitutions (%s, %S, %p, %T, ...)
         tmpDir, tmpBase = getTempPaths(test)
         outfile = tmpBase + ".out"
         substitutions = getDefaultSubstitutions(test, tmpDir, tmpBase)
         substitutions += [('%o', outfile)]
-        runscript = applySubstitutions(runscript, substitutions)
-        verifyscript = applySubstitutions(verifyscript, substitutions)
-        metricscripts = {k: applySubstitutions(v, substitutions)
-                         for k, v in metricscripts.items()}
-        context = TestContext(test, litConfig, runscript, verifyscript, tmpDir,
-                              tmpBase)
+        plan.runscript = applySubstitutions(plan.runscript, substitutions)
+        plan.verifyscript = applySubstitutions(plan.verifyscript, substitutions)
+        plan.metricscripts = {k: applySubstitutions(v, substitutions)
+                         for k, v in plan.metricscripts.items()}
+        context = TestContext(test, litConfig, plan.runscript,
+                              plan.verifyscript, tmpDir, tmpBase)
         context.executable = shellcommand.getMainExecutable(context)
         if context.executable is None:
             return lit.Test.Result(Test.UNSUPPORTED,
@@ -84,81 +72,20 @@ class TestSuiteTest(FileBasedTest):
             return lit.Test.Result(SKIPPED,
                                    'Executable identical to previous run')
 
-        runscript = runsafely.wrapScript(context, runscript, suffix=".out")
-
-        if config.profile_generate:
-            runscript = profilegen.wrapScript(context, runscript)
-
         # Create the output directory if it does not already exist.
         lit.util.mkdir_p(os.path.dirname(tmpBase))
 
-        # Execute runscript (the "RUN:" part)
-        output = ""
-        n_runs = 1
-        runtimes = []
-        metrics = {}
-        for n in range(n_runs):
-            res = runScript(context, runscript)
-            if isinstance(res, lit.Test.Result):
-                return res
-
-            output += "\n" + "\n".join(runscript)
-
-            out, err, exitCode, timeoutInfo = res
-            if exitCode != 0:
-                # Only show command output in case of errors
-                output += "\n" + out
-                output += "\n" + err
-                return lit.Test.Result(Test.FAIL, output)
-
-            # Execute metric extraction scripts.
-            for metric, script in metricscripts.items():
-                res = runScript(context, script)
-                if isinstance(res, lit.Test.Result):
-                    return res
-
-                out, err, exitCode, timeoutInfo = res
-                metrics.setdefault(metric, list()).append(float(out))
-
-            try:
-                runtime = runsafely.getTime(context)
-                runtimes.append(runtime)
-            except IOError:
-                pass
-
-        if litConfig.params.get('profile') == 'perf':
-            profilescript = perf.wrapScript(context,
-                                            context.original_runscript)
-            profilescript = runsafely.wrapScript(context, profilescript,
-                                                 suffix=".perf.out")
-            runScript(context, context.profilescript)  # ignore result
-
-        # Merge llvm profile data
+        # Prepare test plan
+        runsafely.mutatePlan(context, plan)
+        compiletime.mutatePlan(context, plan)
+        codesize.mutatePlan(context, plan)
+        hash.mutatePlan(context, plan)
         if config.profile_generate:
-            mergescript = profilegen.getMergeProfilesScript(context)
-            runScript(context, mergescript)  # ignore result
+            profilegen.mutatePlan(context, plan)
+        if litConfig.params.get('profile') == 'perf':
+            perf.mutatePlan(context, plan)
 
-        # Run verification script (the "VERIFY:" part)
-        if len(verifyscript) > 0:
-            res = runScript(context, verifyscript)
-            if isinstance(res, lit.Test.Result):
-                return res
-            out, err, exitCode, timeoutInfo = res
-
-            output += "\n" + "\n".join(verifyscript)
-            if exitCode != 0:
-                output += "\n" + out
-                output += "\n" + err
-                return lit.Test.Result(Test.FAIL, output)
-
-        # Put metrics into the test result.
-        result = lit.Test.Result(Test.PASS, output)
-        if len(runtimes) > 0:
-            result.addMetric('exec_time', lit.Test.toMetricValue(runtimes[0]))
-        for metric, values in metrics.items():
-            result.addMetric(metric, lit.Test.toMetricValue(values[0]))
-        compiletime.collect(context, result)
-        hash.collect(context, result)
-        codesize.collect(context, result)
+        # Execute Test plan
+        result = testplan.executePlanTestResult(context, plan)
 
         return result

Added: test-suite/trunk/litsupport/testplan.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/testplan.py?rev=263532&view=auto
==============================================================================
--- test-suite/trunk/litsupport/testplan.py (added)
+++ test-suite/trunk/litsupport/testplan.py Tue Mar 15 00:12:22 2016
@@ -0,0 +1,175 @@
+"""
+Datastructures for test plans; Parsing of .test files; Executing test plans.
+"""
+from lit.TestRunner import parseIntegratedTestScriptCommands
+import lit.Test
+import lit.TestRunner
+import logging
+import os
+import shellcommand
+
+
+class TestPlan(object):
+    def __init__(self, runscript, verifyscript, metricscripts):
+        self.runscript = runscript
+        self.verifyscript = verifyscript
+        self.metricscripts = metricscripts
+        self.metric_collectors = []
+        self.profilescript = []
+
+
+def mutateScript(context, script, mutator):
+    previous_tmpbase = context.tmpBase
+    i = 0
+    mutated_script = []
+    for line in script:
+        number = ""
+        if len(script) > 1:
+            number = "-%s" % (i,)
+            i += 1
+        context.tmpBase = previous_tmpbase + number
+
+        mutated_line = mutator(context, line)
+        mutated_script.append(mutated_line)
+    return mutated_script
+
+
+def _parseShellCommand(script, ln):
+    # Trim trailing whitespace.
+    ln = ln.rstrip()
+
+    # Collapse lines with trailing '\\'.
+    if script and script[-1][-1] == '\\':
+        script[-1] = script[-1][:-1] + ln
+    else:
+        script.append(ln)
+
+
+def parse(filename):
+    """Parse a .test file as used in the llvm test-suite.
+    The file comporises of a number of lines starting with RUN: and VERIFY:
+    specifying shell commands to run the benchmark and verifying the result.
+    Returns a tuple with two arrays for the run and verify commands."""
+    # Collect the test lines from the script.
+    runscript = []
+    verifyscript = []
+    metricscripts = {}
+    keywords = ['RUN:', 'VERIFY:', 'METRIC:']
+    for line_number, command_type, ln in \
+            parseIntegratedTestScriptCommands(filename, keywords):
+        if command_type == 'RUN':
+            _parseShellCommand(runscript, ln)
+        elif command_type == 'VERIFY':
+            _parseShellCommand(verifyscript, ln)
+        elif command_type == 'METRIC':
+            metric, ln = ln.split(':', 1)
+            metricscript = metricscripts.setdefault(metric.strip(), list())
+            _parseShellCommand(metricscript, ln)
+        else:
+            raise ValueError("unknown script command type: %r" % (
+                             command_type,))
+
+    # Verify the script contains a run line.
+    if runscript == []:
+        raise ValueError("Test has no RUN: line!")
+
+    # Check for unterminated run lines.
+    for script in runscript, verifyscript:
+        if script and script[-1][-1] == '\\':
+            raise ValueError("Test has unterminated RUN/VERIFY lines " +
+                             "(ending with '\\')")
+
+    return TestPlan(runscript, verifyscript, metricscripts)
+
+
+def executeScript(context, script, useExternalSh=True):
+    if len(script) == 0:
+        return "", "", 0, None
+
+    execdir = os.path.dirname(context.test.getExecPath())
+    executeFunc = lit.TestRunner.executeScriptInternal
+    if useExternalSh:
+        executeFunc = lit.TestRunner.executeScript
+
+    res = executeFunc(context.test, context.litConfig, context.tmpBase, script,
+                      execdir)
+    # The executeScript() functions return lit.Test.Result in some error
+    # conditions instead of the normal tuples. Having different return types is
+    # really annoying so we transform it back to the usual tuple.
+    if isinstance(res, lit.Test.Result):
+        out = ""
+        err = res.output
+        exitCode = 1
+        timeoutInfo = None
+    else:
+        (out, err, exitCode, timeoutInfo) = res
+
+    # Log script in test output
+    context.result_output += "\n" + "\n".join(script)
+    # In case of an exitCode != 0 also log stdout/stderr
+    if exitCode != 0:
+        context.result_output += "\n" + out
+        context.result_output += "\n" + err
+
+    return (out, err, exitCode, timeoutInfo)
+
+
+def executePlan(context, plan):
+    """This is the main driver for executing a benchmark."""
+    # Execute RUN: part of the test file.
+    _, _, exitCode, _ = executeScript(context, plan.runscript)
+    if exitCode != 0:
+        return lit.Test.FAIL
+
+    # Execute VERIFY: part of the test file.
+    _, _, exitCode, _ = executeScript(context, plan.verifyscript)
+    if exitCode != 0:
+        # The question here is whether to still collects metrics if the
+        # benchmark results are invalid. I choose to avoid getting potentially
+        # broken metric values as well for a broken test.
+        return lit.Test.FAIL
+
+    # Perform various metric extraction steps setup by testing modules.
+    for metric_collector in plan.metric_collectors:
+        try:
+            additional_metrics = metric_collector(context)
+            for metric, value in additional_metrics.items():
+                context.result_metrics[metric] = value
+        except Exception as e:
+            logging.error("Could not collect metric with %s", metric_collector,
+                          exc_info=e)
+
+    # Execute the METRIC: part of the test file.
+    for metric, metricscript in plan.metricscripts.items():
+        out, err, exitCode, timeoutInfo = executeScript(context, metricscript)
+        if exitCode != 0:
+            logging.warning("Metric script for '%s' failed", metric)
+            continue
+        try:
+            value = float(out)
+            context.result_metrics[metric] = value
+        except ValueError:
+            logging.warning("Metric reported for '%s' is not a float: '%s'",
+                            metric, out)
+
+    # Execute additional profile gathering actions setup by testing modules.
+    _, _, exitCode, _ = executeScript(context, plan.profilescript) 
+    if exitCode != 0:
+        logging.warning("Profile script '%s' failed", plan.profilescript)
+
+    return lit.Test.PASS
+
+
+def executePlanTestResult(context, testplan):
+    """Convenience function to invoke executePlan() and construct a
+    lit.test.Result() object for the results."""
+    context.result_output = ""
+    context.result_metrics = {}
+
+    result_code = executePlan(context, testplan)
+
+    # Build test result object
+    result = lit.Test.Result(result_code, context.result_output)
+    for key, value in context.result_metrics.items():
+        result.addMetric(key, value)
+    return result

Removed: test-suite/trunk/litsupport/testscript.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/testscript.py?rev=263531&view=auto
==============================================================================
--- test-suite/trunk/litsupport/testscript.py (original)
+++ test-suite/trunk/litsupport/testscript.py (removed)
@@ -1,51 +0,0 @@
-# Code to parse .test files
-from lit.TestRunner import parseIntegratedTestScriptCommands
-import lit.Test
-
-
-def _parseShellCommand(script, ln):
-    # Trim trailing whitespace.
-    ln = ln.rstrip()
-
-    # Collapse lines with trailing '\\'.
-    if script and script[-1][-1] == '\\':
-        script[-1] = script[-1][:-1] + ln
-    else:
-        script.append(ln)
-
-
-def parse(filename):
-    """Parse a .test file as used in the llvm test-suite.
-    The file comporises of a number of lines starting with RUN: and VERIFY:
-    specifying shell commands to run the benchmark and verifying the result.
-    Returns a tuple with two arrays for the run and verify commands."""
-    # Collect the test lines from the script.
-    runscript = []
-    verifyscript = []
-    metricscripts = {}
-    keywords = ['RUN:', 'VERIFY:', 'METRIC:']
-    for line_number, command_type, ln in \
-            parseIntegratedTestScriptCommands(filename, keywords):
-        if command_type == 'RUN':
-            _parseShellCommand(runscript, ln)
-        elif command_type == 'VERIFY':
-            _parseShellCommand(verifyscript, ln)
-        elif command_type == 'METRIC':
-            metric, ln = ln.split(':', 1)
-            metricscript = metricscripts.setdefault(metric.strip(), list())
-            _parseShellCommand(metricscript, ln)
-        else:
-            raise ValueError("unknown script command type: %r" % (
-                             command_type,))
-
-    # Verify the script contains a run line.
-    if runscript == []:
-        raise ValueError("Test has no RUN: line!")
-
-    # Check for unterminated run lines.
-    for script in runscript, verifyscript:
-        if script and script[-1][-1] == '\\':
-            raise ValueError("Test has unterminated RUN/VERIFY lines " +
-                             "(ending with '\\')")
-
-    return runscript, verifyscript, metricscripts




More information about the llvm-commits mailing list