[test-suite] r314239 - litsupport: Rework test module support
Matthias Braun via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 26 12:48:04 PDT 2017
Author: matze
Date: Tue Sep 26 12:48:04 2017
New Revision: 314239
URL: http://llvm.org/viewvc/llvm-project?rev=314239&view=rev
Log:
litsupport: Rework test module support
- Moved modules to litsupport.modules
- Import all modules on startup
- Only apply modules in config.test_modules to a benchmark. This allows
to modify the module list per-directory in `lit.local.cfg`.
Added:
test-suite/trunk/litsupport/modules/
test-suite/trunk/litsupport/modules/__init__.py
test-suite/trunk/litsupport/modules/codesize.py
- copied, changed from r313711, test-suite/trunk/litsupport/codesize.py
test-suite/trunk/litsupport/modules/compiletime.py
- copied, changed from r313711, test-suite/trunk/litsupport/compiletime.py
test-suite/trunk/litsupport/modules/hash.py
- copied, changed from r313711, test-suite/trunk/litsupport/hash.py
test-suite/trunk/litsupport/modules/perf.py
- copied, changed from r313711, test-suite/trunk/litsupport/perf.py
test-suite/trunk/litsupport/modules/profilegen.py
- copied, changed from r313711, test-suite/trunk/litsupport/profilegen.py
test-suite/trunk/litsupport/modules/remote.py
- copied, changed from r313711, test-suite/trunk/litsupport/remote.py
test-suite/trunk/litsupport/modules/run.py
- copied, changed from r313711, test-suite/trunk/litsupport/run.py
test-suite/trunk/litsupport/modules/run_under.py
- copied, changed from r313711, test-suite/trunk/litsupport/run_under.py
test-suite/trunk/litsupport/modules/stats.py
- copied, changed from r313711, test-suite/trunk/litsupport/stats.py
test-suite/trunk/litsupport/modules/timeit.py
- copied, changed from r313711, test-suite/trunk/litsupport/timeit.py
Removed:
test-suite/trunk/litsupport-tests/run/check/test.log
test-suite/trunk/litsupport/codesize.py
test-suite/trunk/litsupport/compiletime.py
test-suite/trunk/litsupport/hash.py
test-suite/trunk/litsupport/perf.py
test-suite/trunk/litsupport/profilegen.py
test-suite/trunk/litsupport/remote.py
test-suite/trunk/litsupport/run.py
test-suite/trunk/litsupport/run_under.py
test-suite/trunk/litsupport/stats.py
test-suite/trunk/litsupport/timeit.py
Modified:
test-suite/trunk/lit.cfg
test-suite/trunk/litsupport/README.md
test-suite/trunk/litsupport/test.py
Modified: test-suite/trunk/lit.cfg
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/lit.cfg?rev=314239&r1=314238&r2=314239&view=diff
==============================================================================
--- test-suite/trunk/lit.cfg (original)
+++ test-suite/trunk/lit.cfg Tue Sep 26 12:48:04 2017
@@ -2,11 +2,24 @@ import logging
import os
import site
import sys
+
+# Setup logging.
+logger = logging.getLogger()
+logger.setLevel(logging.DEBUG)
+file_log = logging.FileHandler("%s/test.log" % config.test_exec_root,
+ mode="w")
+file_log.setLevel(logging.DEBUG)
+logger.addHandler(file_log)
+console_log = logging.StreamHandler()
+console_log.setLevel(logging.WARNING)
+logger.addHandler(console_log)
+
+# Load test-suite litsupport code
site.addsitedir(os.path.dirname(__file__))
-from litsupport import test
+import litsupport.test
config.name = 'test-suite'
-config.test_format = test.TestSuiteTest()
+config.test_format = litsupport.test.TestSuiteTest()
config.suffixes = ['.test']
config.excludes = ['ABI-Testsuite']
config.remote_flags = ""
@@ -23,17 +36,6 @@ if previous_results_file:
else:
config.previous_results = None
-# Setup logging
-logger = logging.getLogger()
-logger.setLevel(logging.DEBUG)
-file_log = logging.FileHandler("%s/test.log" % config.test_exec_root,
- mode="w")
-file_log.setLevel(logging.DEBUG)
-logger.addHandler(file_log)
-console_log = logging.StreamHandler()
-console_log.setLevel(logging.WARNING)
-logger.addHandler(console_log)
-
# Pass on some options to context object:
config.perf_profile_events = "cycles,cache-misses,branch-misses,instructions"
if lit_config.params.get('perf_profile_events'):
@@ -42,4 +44,3 @@ if lit_config.params.get('perf_profile_e
# Find and initialize lit modules.
if lit_config.params.get('profile') == 'perf':
config.test_modules += ['perf']
-test.load_modules(config.test_modules)
Removed: test-suite/trunk/litsupport-tests/run/check/test.log
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport-tests/run/check/test.log?rev=314238&view=auto
==============================================================================
--- test-suite/trunk/litsupport-tests/run/check/test.log (original)
+++ test-suite/trunk/litsupport-tests/run/check/test.log (removed)
@@ -1 +0,0 @@
-CHECK: Loaded test module {{.*}}run.py
Modified: test-suite/trunk/litsupport/README.md
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/README.md?rev=314239&r1=314238&r2=314239&view=diff
==============================================================================
--- test-suite/trunk/litsupport/README.md (original)
+++ test-suite/trunk/litsupport/README.md Tue Sep 26 12:48:04 2017
@@ -72,6 +72,8 @@ code; typical examples are:
`-fprofile-instr-generate` and enables the `profilegen` module that runs
`llvm-profdata` after running the benchmarks.
+Available modules are found in the `litsupport/modules` directory.
+
Developing New Modules
----------------------
Removed: test-suite/trunk/litsupport/codesize.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/codesize.py?rev=314238&view=auto
==============================================================================
--- test-suite/trunk/litsupport/codesize.py (original)
+++ test-suite/trunk/litsupport/codesize.py (removed)
@@ -1,46 +0,0 @@
-"""Test module to collect code size metrics of the benchmark executable."""
-from litsupport import testplan
-import logging
-import os.path
-
-
-def _getCodeSize(context):
- # First get the filesize: This should always work.
- metrics = {}
- metrics['size'] = os.path.getsize(context.executable)
-
- # If we have the llvm-size tool available get the size per segment.
- llvm_size = context.config.llvm_size
- if llvm_size:
- # -format=sysv is easier to parse than darwin/berkeley.
- cmdline = [llvm_size, '-format=sysv', context.executable]
- out = testplan.check_output(cmdline).decode('utf-8', errors='ignore')
- lines = out.splitlines()
- # First line contains executable name, second line should be a
- # "section size addr" header, numbers start after that.
- if "section" not in lines[1] or "size" not in lines[1]:
- logging.warning("Unexpected output from llvm-size on '%s'",
- context.executable)
- else:
- for l in lines[2:]:
- l = l.strip()
- if l == "":
- continue
- values = l.split()
- if len(values) < 2:
- logging.info("Ignoring malformed output line: %s", l)
- continue
- if values[0] == 'Total':
- continue
- try:
- name = values[0]
- val = int(values[1])
- metrics['size.%s' % name] = val
- except ValueError as e:
- logging.info("Ignoring malformed output line: %s", l)
-
- return metrics
-
-
-def mutatePlan(context, plan):
- plan.metric_collectors.append(_getCodeSize)
Removed: test-suite/trunk/litsupport/compiletime.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/compiletime.py?rev=314238&view=auto
==============================================================================
--- test-suite/trunk/litsupport/compiletime.py (original)
+++ test-suite/trunk/litsupport/compiletime.py (removed)
@@ -1,32 +0,0 @@
-"""Test module to collect compile time metrics. This just finds and summarizes
-the *.time files generated by the build."""
-from litsupport import timeit
-import os
-
-
-def _getCompileTime(context):
- # We compile multiple benchmarks in the same directory in SingleSource
- # mode. Only look at compiletime files starting with the name of our test.
- prefix = ""
- if context.config.single_source:
- prefix = "%s." % os.path.basename(context.executable)
-
- compile_time = 0.0
- link_time = 0.0
- dir = os.path.dirname(context.test.getFilePath())
- for path, subdirs, files in os.walk(dir):
- for file in files:
- if file.endswith('.o.time') and file.startswith(prefix):
- fullpath = os.path.join(path, file)
- compile_time += timeit.getUserTime(fullpath)
- if file.endswith('.link.time') and file.startswith(prefix):
- fullpath = os.path.join(path, file)
- link_time += timeit.getUserTime(fullpath)
- return {
- 'compile_time': compile_time,
- 'link_time': link_time,
- }
-
-
-def mutatePlan(context, plan):
- plan.metric_collectors.append(_getCompileTime)
Removed: test-suite/trunk/litsupport/hash.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/hash.py?rev=314238&view=auto
==============================================================================
--- test-suite/trunk/litsupport/hash.py (original)
+++ test-suite/trunk/litsupport/hash.py (removed)
@@ -1,55 +0,0 @@
-"""Test module to collect test executable hashsum."""
-from litsupport import shellcommand
-from litsupport import testplan
-import hashlib
-import logging
-import platform
-
-
-def compute(context):
- if hasattr(context, 'executable_hash'):
- return
- executable = context.executable
- try:
- # Darwin's "strip" doesn't support these arguments.
- if platform.system() != 'Darwin':
- stripped_executable = executable + '.stripped'
- testplan.check_call([context.config.strip_tool,
- '--remove-section=.comment',
- "--remove-section='.note*'",
- '-o', stripped_executable,
- executable])
- executable = stripped_executable
-
- h = hashlib.md5()
- h.update(open(executable, 'rb').read())
- context.executable_hash = h.hexdigest()
- except:
- logging.info('Could not calculate hash for %s' % executable)
- context.executable_hash = ''
-
-
-def same_as_previous(context):
- """Check whether hash has changed compared to the results in
- config.previous_results."""
- previous_results = context.config.previous_results
- testname = context.test.getFullName()
- executable_hash = context.executable_hash
- if previous_results and "tests" in previous_results:
- for test in previous_results["tests"]:
- if "name" not in test or test["name"] != testname:
- continue
- if "metrics" not in test:
- continue
- metrics = test["metrics"]
- return "hash" in metrics and metrics["hash"] == executable_hash
- return False
-
-
-def _getHash(context):
- compute(context)
- return {'hash': context.executable_hash}
-
-
-def mutatePlan(context, plan):
- plan.metric_collectors.append(_getHash)
Added: test-suite/trunk/litsupport/modules/__init__.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/modules/__init__.py?rev=314239&view=auto
==============================================================================
--- test-suite/trunk/litsupport/modules/__init__.py (added)
+++ test-suite/trunk/litsupport/modules/__init__.py Tue Sep 26 12:48:04 2017
@@ -0,0 +1,16 @@
+import importlib
+import logging
+import pkgutil
+
+# Load all modules
+modules = dict()
+for importer, modname, ispkg in pkgutil.walk_packages(path=__path__,
+ prefix=__name__+'.'):
+ module = importlib.import_module(modname)
+ if not hasattr(module, 'mutatePlan'):
+ logging.error('Skipping %s: No mutatePlan function' % modname)
+ continue
+ assert modname.startswith('litsupport.modules.')
+ shortname = modname[len('litsupport.modules.'):]
+ modules[shortname] = module
+ logging.info("Loaded test module %s" % module.__file__)
Copied: test-suite/trunk/litsupport/modules/codesize.py (from r313711, test-suite/trunk/litsupport/codesize.py)
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/modules/codesize.py?p2=test-suite/trunk/litsupport/modules/codesize.py&p1=test-suite/trunk/litsupport/codesize.py&r1=313711&r2=314239&rev=314239&view=diff
==============================================================================
(empty)
Copied: test-suite/trunk/litsupport/modules/compiletime.py (from r313711, test-suite/trunk/litsupport/compiletime.py)
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/modules/compiletime.py?p2=test-suite/trunk/litsupport/modules/compiletime.py&p1=test-suite/trunk/litsupport/compiletime.py&r1=313711&r2=314239&rev=314239&view=diff
==============================================================================
--- test-suite/trunk/litsupport/compiletime.py (original)
+++ test-suite/trunk/litsupport/modules/compiletime.py Tue Sep 26 12:48:04 2017
@@ -1,6 +1,6 @@
"""Test module to collect compile time metrics. This just finds and summarizes
the *.time files generated by the build."""
-from litsupport import timeit
+from litsupport.modules import timeit
import os
Copied: test-suite/trunk/litsupport/modules/hash.py (from r313711, test-suite/trunk/litsupport/hash.py)
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/modules/hash.py?p2=test-suite/trunk/litsupport/modules/hash.py&p1=test-suite/trunk/litsupport/hash.py&r1=313711&r2=314239&rev=314239&view=diff
==============================================================================
(empty)
Copied: test-suite/trunk/litsupport/modules/perf.py (from r313711, test-suite/trunk/litsupport/perf.py)
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/modules/perf.py?p2=test-suite/trunk/litsupport/modules/perf.py&p1=test-suite/trunk/litsupport/perf.py&r1=313711&r2=314239&rev=314239&view=diff
==============================================================================
--- test-suite/trunk/litsupport/perf.py (original)
+++ test-suite/trunk/litsupport/modules/perf.py Tue Sep 26 12:48:04 2017
@@ -2,7 +2,7 @@
perf tool."""
from litsupport import shellcommand
from litsupport import testplan
-from litsupport import run_under
+from litsupport.modules import run_under
def _mutateCommandLine(context, commandline):
Copied: test-suite/trunk/litsupport/modules/profilegen.py (from r313711, test-suite/trunk/litsupport/profilegen.py)
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/modules/profilegen.py?p2=test-suite/trunk/litsupport/modules/profilegen.py&p1=test-suite/trunk/litsupport/profilegen.py&r1=313711&r2=314239&rev=314239&view=diff
==============================================================================
(empty)
Copied: test-suite/trunk/litsupport/modules/remote.py (from r313711, test-suite/trunk/litsupport/remote.py)
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/modules/remote.py?p2=test-suite/trunk/litsupport/modules/remote.py&p1=test-suite/trunk/litsupport/remote.py&r1=313711&r2=314239&rev=314239&view=diff
==============================================================================
(empty)
Copied: test-suite/trunk/litsupport/modules/run.py (from r313711, test-suite/trunk/litsupport/run.py)
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/modules/run.py?p2=test-suite/trunk/litsupport/modules/run.py&p1=test-suite/trunk/litsupport/run.py&r1=313711&r2=314239&rev=314239&view=diff
==============================================================================
(empty)
Copied: test-suite/trunk/litsupport/modules/run_under.py (from r313711, test-suite/trunk/litsupport/run_under.py)
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/modules/run_under.py?p2=test-suite/trunk/litsupport/modules/run_under.py&p1=test-suite/trunk/litsupport/run_under.py&r1=313711&r2=314239&rev=314239&view=diff
==============================================================================
--- test-suite/trunk/litsupport/run_under.py (original)
+++ test-suite/trunk/litsupport/modules/run_under.py Tue Sep 26 12:48:04 2017
@@ -8,11 +8,11 @@ def _mutateCommandLine(context, commandl
cmd = shellcommand.parse(commandline)
run_under_cmd = shellcommand.parse(context.config.run_under)
- if (run_under_cmd.stdin is not None or
- run_under_cmd.stdout is not None or
- run_under_cmd.stderr is not None or
- run_under_cmd.workdir is not None or
- run_under_cmd.envvars):
+ if run_under_cmd.stdin is not None or \
+ run_under_cmd.stdout is not None or \
+ run_under_cmd.stderr is not None or \
+ run_under_cmd.workdir is not None or \
+ run_under_cmd.envvars:
raise Exception("invalid run_under argument!")
cmd.wrap(run_under_cmd.executable, run_under_cmd.arguments)
Copied: test-suite/trunk/litsupport/modules/stats.py (from r313711, test-suite/trunk/litsupport/stats.py)
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/modules/stats.py?p2=test-suite/trunk/litsupport/modules/stats.py&p1=test-suite/trunk/litsupport/stats.py&r1=313711&r2=314239&rev=314239&view=diff
==============================================================================
(empty)
Copied: test-suite/trunk/litsupport/modules/timeit.py (from r313711, test-suite/trunk/litsupport/timeit.py)
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/modules/timeit.py?p2=test-suite/trunk/litsupport/modules/timeit.py&p1=test-suite/trunk/litsupport/timeit.py&r1=313711&r2=314239&rev=314239&view=diff
==============================================================================
(empty)
Removed: test-suite/trunk/litsupport/perf.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/perf.py?rev=314238&view=auto
==============================================================================
--- test-suite/trunk/litsupport/perf.py (original)
+++ test-suite/trunk/litsupport/perf.py (removed)
@@ -1,36 +0,0 @@
-"""Test Module to perform an extra execution of the benchmark in the linux
-perf tool."""
-from litsupport import shellcommand
-from litsupport import testplan
-from litsupport import run_under
-
-
-def _mutateCommandLine(context, commandline):
- profilefile = context.tmpBase + ".perf_data"
- cmd = shellcommand.parse(commandline)
- cmd.wrap('perf', [
- 'record',
- '-e', context.config.perf_profile_events,
- '-o', profilefile,
- '--'
- ])
- if cmd.stdout is None:
- cmd.stdout = "/dev/null"
- else:
- cmd.stdout += ".perfrecord"
- if cmd.stderr is None:
- cmd.stderr = "/dev/null"
- else:
- cmd.stderr += ".perfrecord"
- return cmd.toCommandline()
-
-
-def mutatePlan(context, plan):
- script = context.parsed_runscript
- if context.config.run_under:
- script = testplan.mutateScript(context, script,
- run_under.mutateCommandLine)
- script = testplan.mutateScript(context, script, _mutateCommandLine)
- plan.profilescript += script
- plan.metric_collectors.append(
- lambda context: {'profile': context.tmpBase + '.perf_data'})
Removed: test-suite/trunk/litsupport/profilegen.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/profilegen.py?rev=314238&view=auto
==============================================================================
--- test-suite/trunk/litsupport/profilegen.py (original)
+++ test-suite/trunk/litsupport/profilegen.py (removed)
@@ -1,27 +0,0 @@
-"""Test module that runs llvm-profdata merge after executing the benchmark."""
-from litsupport import shellcommand
-from litsupport import testplan
-
-
-def _mutateCommandline(context, commandline):
- """Adjust runscript to set a different value to the LLVM_PROFILE_FILE
- environment variable for each execution."""
- profilefile = context.tmpBase + ".profraw"
- prefix = "env LLVM_PROFILE_FILE=%s " % profilefile
- context.profilefiles.append(profilefile)
- return prefix + commandline
-
-
-def _mutateScript(context, script):
- return testplan.mutateScript(context, script, _mutateCommandline)
-
-
-def mutatePlan(context, plan):
- context.profilefiles = []
- # Adjust run steps to set LLVM_PROFILE_FILE environment variable.
- plan.runscript = _mutateScript(context, plan.runscript)
- # Run profdata merge at the end
- profdatafile = context.executable + ".profdata"
- args = ['merge', '-output=%s' % profdatafile] + context.profilefiles
- mergecmd = shellcommand.ShellCommand(context.config.llvm_profdata, args)
- plan.profilescript += [mergecmd.toCommandline()]
Removed: test-suite/trunk/litsupport/remote.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/remote.py?rev=314238&view=auto
==============================================================================
--- test-suite/trunk/litsupport/remote.py (original)
+++ test-suite/trunk/litsupport/remote.py (removed)
@@ -1,35 +0,0 @@
-"""Test module to execute a benchmark through ssh on a remote device.
-This assumes all relevant directories and files are present on the remote
-device (typically shared by NFS)."""
-from litsupport import testplan
-import logging
-
-
-def _mutateCommandline(context, commandline, suffix=""):
- shfilename = context.tmpBase + suffix + ".sh"
- shfile = open(shfilename, "w")
- shfile.write(commandline + "\n")
- logging.info("Created shfile '%s'", shfilename)
- shfile.close()
-
- config = context.config
- remote_commandline = config.remote_client
- if config.remote_user:
- remote_commandline += " -l %s" % config.remote_user
- if config.remote_port:
- remote_commandline += " -p %s" % config.remote_port
- if config.remote_flags:
- remote_commandline += config.remote_flags
- remote_commandline += " %s" % config.remote_host
- remote_commandline += " /bin/sh %s" % shfilename
- return remote_commandline
-
-
-def _mutateScript(context, script, suffix=""):
- mutate = lambda c, cmd: _mutateCommandline(c, cmd, suffix)
- return testplan.mutateScript(context, script, mutate)
-
-
-def mutatePlan(context, plan):
- plan.preparescript = _mutateScript(context, plan.preparescript, "-prepare")
- plan.runscript = _mutateScript(context, plan.runscript)
Removed: test-suite/trunk/litsupport/run.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/run.py?rev=314238&view=auto
==============================================================================
--- test-suite/trunk/litsupport/run.py (original)
+++ test-suite/trunk/litsupport/run.py (removed)
@@ -1,10 +0,0 @@
-"""Test module to just run the benchmark. Without this module the benchmark is
-not executed. This may be interesting when just collecting compile time and
-code size."""
-
-
-def mutatePlan(context, plan):
- plan.preparescript = context.parsed_preparescript
- plan.runscript = context.parsed_runscript
- plan.verifyscript = context.parsed_verifyscript
- plan.metricscripts = context.parsed_metricscripts
Removed: test-suite/trunk/litsupport/run_under.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/run_under.py?rev=314238&view=auto
==============================================================================
--- test-suite/trunk/litsupport/run_under.py (original)
+++ test-suite/trunk/litsupport/run_under.py (removed)
@@ -1,27 +0,0 @@
-"""Test module to run benchmarks in a wrapper application. This is typically
-used to prefix the benchmark command with simulator commands."""
-from litsupport import shellcommand
-from litsupport import testplan
-
-
-def _mutateCommandLine(context, commandline):
- cmd = shellcommand.parse(commandline)
- run_under_cmd = shellcommand.parse(context.config.run_under)
-
- if (run_under_cmd.stdin is not None or
- run_under_cmd.stdout is not None or
- run_under_cmd.stderr is not None or
- run_under_cmd.workdir is not None or
- run_under_cmd.envvars):
- raise Exception("invalid run_under argument!")
-
- cmd.wrap(run_under_cmd.executable, run_under_cmd.arguments)
-
- return cmd.toCommandline()
-
-
-def mutatePlan(context, plan):
- run_under = context.config.run_under
- if run_under:
- plan.runscript = testplan.mutateScript(context, plan.runscript,
- _mutateCommandLine)
Removed: test-suite/trunk/litsupport/stats.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/stats.py?rev=314238&view=auto
==============================================================================
--- test-suite/trunk/litsupport/stats.py (original)
+++ test-suite/trunk/litsupport/stats.py (removed)
@@ -1,46 +0,0 @@
-"""test-suite/lit plugin to collect internal llvm json statistics.
-
-This assumes the benchmarks were built with the -save-stats=obj flag."""
-import json
-import logging
-import os
-from collections import defaultdict
-
-
-def _mergeStats(global_stats, statsfilename):
- try:
- f = open(statsfilename, "rt")
- stats = json.load(f)
- except Exception as e:
- logging.warning("Could not read '%s'", statsfilename, exc_info=e)
- return
- for name, value in stats.iteritems():
- global_stats[name] += value
-
-
-def _getStats(context):
- # We compile multiple benchmarks in the same directory in SingleSource
- # mode. Only look at compiletime files starting with the name of our test.
- prefix = ""
- if context.config.single_source:
- prefix = "%s." % os.path.basename(context.executable)
-
- stats = defaultdict(lambda: 0.0)
- dir = os.path.dirname(context.test.getFilePath())
- for path, subdirs, files in os.walk(dir):
- for file in files:
- if file.endswith('.stats') and file.startswith(prefix):
- fullpath = os.path.join(path, file)
- _mergeStats(stats, fullpath)
-
- if len(stats) == 0:
- logging.warning("No stats for '%s'", context.test.getFullName())
-
- result = dict()
- for key, value in stats.iteritems():
- result[key] = value
- return result
-
-
-def mutatePlan(context, plan):
- plan.metric_collectors.append(_getStats)
Modified: test-suite/trunk/litsupport/test.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/test.py?rev=314239&r1=314238&r2=314239&view=diff
==============================================================================
--- test-suite/trunk/litsupport/test.py (original)
+++ test-suite/trunk/litsupport/test.py Tue Sep 26 12:48:04 2017
@@ -2,32 +2,20 @@
Main integration for llvm-lit: This defines a lit test format.
Also contains logic to load benchmark modules.
"""
-import importlib
import lit
+import lit.TestRunner
import lit.util
+import lit.formats
+import litsupport.modules
+import litsupport.modules.hash
+import litsupport.testfile
+import litsupport.testplan
import logging
import os
-from lit.formats import ShTest
-from lit.TestRunner import getTempPaths
-from lit import Test
-from lit.util import to_bytes, to_string
-
-from litsupport import codesize
-from litsupport import compiletime
-from litsupport import hash
-from litsupport import perf
-from litsupport import profilegen
-from litsupport import remote
-from litsupport import run
-from litsupport import run_under
-from litsupport import testfile
-from litsupport import testplan
-from litsupport import timeit
SKIPPED = lit.Test.ResultCode('SKIPPED', False)
NOEXE = lit.Test.ResultCode('NOEXE', True)
-modules = []
class TestContext:
@@ -43,23 +31,7 @@ class TestContext:
self.tmpBase = tmpBase
-def load_modules(test_modules):
- for name in test_modules:
- modulename = 'litsupport.%s' % name
- try:
- module = importlib.import_module(modulename)
- except ImportError as e:
- logging.error("Could not import module '%s'" % modulename)
- sys.exit(1)
- if not hasattr(module, 'mutatePlan'):
- logging.error("Invalid test module '%s': No mutatePlan() function."
- % modulename)
- sys.exit(1)
- logging.info("Loaded test module %s" % module.__file__)
- modules.append(module)
-
-
-class TestSuiteTest(ShTest):
+class TestSuiteTest(lit.formats.ShTest):
def __init__(self):
super(TestSuiteTest, self).__init__()
@@ -71,11 +43,11 @@ class TestSuiteTest(ShTest):
return lit.Test.Result(Test.PASS)
# Parse .test file and initialize context
- tmpDir, tmpBase = getTempPaths(test)
+ tmpDir, tmpBase = lit.TestRunner.getTempPaths(test)
lit.util.mkdir_p(os.path.dirname(tmpBase))
context = TestContext(test, litConfig, tmpDir, tmpBase)
- testfile.parse(context, test.getSourcePath())
- plan = testplan.TestPlan()
+ litsupport.testfile.parse(context, test.getSourcePath())
+ plan = litsupport.testplan.TestPlan()
# Report missing test executables.
if not os.path.exists(context.executable):
@@ -84,8 +56,8 @@ class TestSuiteTest(ShTest):
# Skip unchanged tests
if config.previous_results:
- hash.compute(context)
- if hash.same_as_previous(context):
+ litsupport.modules.hash.compute(context)
+ if litsupport.modules.hash.same_as_previous(context):
result = lit.Test.Result(
SKIPPED, 'Executable identical to previous run')
val = lit.Test.toMetricValue(context.executable_hash)
@@ -93,10 +65,13 @@ class TestSuiteTest(ShTest):
return result
# Let test modules modify the test plan.
- for module in modules:
+ for modulename in config.test_modules:
+ module = litsupport.modules.modules.get(modulename)
+ if module is None:
+ raise Exception("Unknown testmodule '%s'" % modulename)
module.mutatePlan(context, plan)
# Execute Test plan
- result = testplan.executePlanTestResult(context, plan)
+ result = litsupport.testplan.executePlanTestResult(context, plan)
return result
Removed: test-suite/trunk/litsupport/timeit.py
URL: http://llvm.org/viewvc/llvm-project/test-suite/trunk/litsupport/timeit.py?rev=314238&view=auto
==============================================================================
--- test-suite/trunk/litsupport/timeit.py (original)
+++ test-suite/trunk/litsupport/timeit.py (removed)
@@ -1,80 +0,0 @@
-from litsupport import shellcommand
-from litsupport import testplan
-import re
-
-
-def _mutateCommandLine(context, commandline):
- outfile = context.tmpBase + ".out"
- timefile = context.tmpBase + ".time"
- config = context.config
- cmd = shellcommand.parse(commandline)
-
- timeit = "%s/tools/timeit" % config.test_source_root
- if config.remote_host:
- timeit = "%s/tools/timeit-target" % config.test_source_root
- args = ["--limit-core", "0"]
- args += ["--limit-cpu", "7200"]
- args += ["--timeout", "7200"]
- args += ["--limit-file-size", "104857600"]
- args += ["--limit-rss-size", "838860800"]
- if cmd.workdir is not None:
- args += ["--chdir", cmd.workdir]
- cmd.workdir = None
- if not config.traditional_output:
- if cmd.stdout is not None:
- args += ["--redirect-stdout", cmd.stdout]
- cmd.stdout = None
- if cmd.stderr is not None:
- args += ["--redirect-stderr", cmd.stderr]
- cmd.stderr = None
- else:
- if cmd.stdout is not None or cmd.stderr is not None:
- raise Exception("Separate stdout/stderr redirection not " +
- "possible with traditional output")
- args += ["--append-exitstatus"]
- args += ["--redirect-output", outfile]
- if cmd.stdin is not None:
- args += ["--redirect-input", cmd.stdin]
- cmd.stdin = None
- else:
- args += ["--redirect-input", "/dev/null"]
- args += ["--summary", timefile]
- # Remember timefilename for later
- context.timefiles.append(timefile)
-
- cmd.wrap(timeit, args)
- return cmd.toCommandline()
-
-
-def _mutateScript(context, script):
- if not hasattr(context, "timefiles"):
- context.timefiles = []
- return testplan.mutateScript(context, script, _mutateCommandLine)
-
-
-def _collectTime(context, timefiles, metric_name='exec_time'):
- time = 0.0
- for timefile in timefiles:
- time += getUserTime(timefile)
- return {metric_name: time}
-
-
-def mutatePlan(context, plan):
- if len(plan.runscript) == 0:
- return
- context.timefiles = []
- plan.runscript = _mutateScript(context, plan.runscript)
- plan.metric_collectors.append(
- lambda context: _collectTime(context, context.timefiles)
- )
-
-
-def getUserTime(filename):
- """Extract the user time form a .time file produced by timeit"""
- with open(filename) as fd:
- l = [l for l in fd.readlines()
- if l.startswith('user')]
- assert len(l) == 1
-
- m = re.match(r'user\s+([0-9.]+)', l[0])
- return float(m.group(1))
More information about the llvm-commits
mailing list