[LNT] r309063 - lnt/tests: Add 'no_errors' to run_info; add lnt check-no-errors
Matthias Braun via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 25 17:07:52 PDT 2017
Author: matze
Date: Tue Jul 25 17:07:52 2017
New Revision: 309063
URL: http://llvm.org/viewvc/llvm-project?rev=309063&view=rev
Log:
lnt/tests: Add 'no_errors' to run_info; add lnt check-no-errors
This is motivated by the problem that you often want your CI scripts to
continue to collect additional data even if some intermediate steps or
some of the benchmarks failed. Thus `lnt runtest` usually return with a
returncode of 0. However you still want to communicate this error to the
CI system.
This change makes `lnt runtest` add a 'no_errors' field into the json
report and adds a new `lnt check-no-errors` option that checks for the
presence of this field. So you can put an `lnt check-no-errors`
invocation at the very end of your CI scripts to have them fail after
they finished measuring, reporting, submitting, analyzing, etc.
Added:
lnt/trunk/tests/lnttool/Inputs/error_0.json
lnt/trunk/tests/lnttool/Inputs/error_1.json
lnt/trunk/tests/lnttool/Inputs/error_2.json
lnt/trunk/tests/lnttool/Inputs/error_3.json
lnt/trunk/tests/lnttool/Inputs/error_4.json
lnt/trunk/tests/lnttool/Inputs/no_error_0.json
lnt/trunk/tests/lnttool/Inputs/no_error_1.json
lnt/trunk/tests/lnttool/check_no_errors.shtest
Modified:
lnt/trunk/docs/tools.rst
lnt/trunk/lnt/lnttool/main.py
lnt/trunk/lnt/testing/__init__.py
lnt/trunk/lnt/tests/compile.py
lnt/trunk/lnt/tests/nt.py
lnt/trunk/lnt/tests/test_suite.py
lnt/trunk/tests/runtest/test_suite.py
Modified: lnt/trunk/docs/tools.rst
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/docs/tools.rst?rev=309063&r1=309062&r2=309063&view=diff
==============================================================================
--- lnt/trunk/docs/tools.rst (original)
+++ lnt/trunk/docs/tools.rst Tue Jul 25 17:07:52 2017
@@ -44,6 +44,12 @@ Client-Side Tools
Run a built-in test. See the :ref:`tests` documentation for more
details on this tool.
+ ``lnt check-no-errors <file>+``
+ Check that the report file contains `"no_errors": true` in their run section
+ otherwise end with returncode 1. This is useful for continuous integration
+ scripts which want to report an error if any of the benchmarks didn't
+ compile or run correctly.
+
Server Administration
~~~~~~~~~~~~~~~~~~~~~
Modified: lnt/trunk/lnt/lnttool/main.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/lnttool/main.py?rev=309063&r1=309062&r2=309063&view=diff
==============================================================================
--- lnt/trunk/lnt/lnttool/main.py (original)
+++ lnt/trunk/lnt/lnttool/main.py Tue Jul 25 17:07:52 2017
@@ -98,6 +98,36 @@ def action_checkformat(files, testsuite)
sys.stderr, verbose=True)
+ at click.command("check-no-errors")
+ at click.argument("files", nargs=-1, type=click.Path(exists=True))
+def action_check_no_errors(files):
+ '''Check that report contains "no_error": true.'''
+ import json
+ error_msg = None
+ for file in files:
+ try:
+ data = json.load(open(file))
+ except Exception as e:
+ error_msg = 'Could not read report: %s' % e
+ break
+ # Get 'run' or 'Run' { 'Info' } section (old/new format)
+ run_info = data.get('run', None)
+ if run_info is None:
+ run_info = data.get('Run', None)
+ if run_info is not None:
+ run_info = run_info.get('Info', None)
+ if run_info is None:
+ error_msg = 'Could not find run section'
+ break
+ no_errors = run_info.get('no_errors', False)
+ if no_errors != True:
+ error_msg = 'run section does not specify "no_errors": true'
+ break
+ if error_msg is not None:
+ sys.stderr.write("%s: %s\n" % (file, error_msg))
+ sys.exit(1)
+
+
def _print_result_url(results, verbose):
result_url = results.get('result_url')
if result_url is not None:
@@ -465,6 +495,7 @@ def cli():
\b
Use ``lnt <command> --help`` for more information on a specific command.
"""
+cli.add_command(action_check_no_errors)
cli.add_command(action_checkformat)
cli.add_command(action_convert)
cli.add_command(action_create)
Modified: lnt/trunk/lnt/testing/__init__.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/testing/__init__.py?rev=309063&r1=309062&r2=309063&view=diff
==============================================================================
--- lnt/trunk/lnt/testing/__init__.py (original)
+++ lnt/trunk/lnt/testing/__init__.py Tue Jul 25 17:07:52 2017
@@ -109,8 +109,16 @@ class Run:
self.start_time = normalize_time(start_time)
self.end_time = normalize_time(end_time)
- self.info = dict((str(key), str(value))
- for key, value in info.items())
+ self.info = dict()
+ # Convert keys/values that are not json encodable to strings.
+ for key, value in info.items():
+ key = str(key)
+ # Keep True, False, None as they are trivially json encodable.
+ # I would love to do the same for numbers but I fear that will
+ # break compatibility...
+ if value is not True and value is not False and value is not None:
+ value = str(value)
+ self.info[key] = value
if '__report_version__' in self.info:
raise ValueError("'__report_version__' key is reserved")
# TODO: Convert to version 2
Modified: lnt/trunk/lnt/tests/compile.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/tests/compile.py?rev=309063&r1=309062&r2=309063&view=diff
==============================================================================
--- lnt/trunk/lnt/tests/compile.py (original)
+++ lnt/trunk/lnt/tests/compile.py Tue Jul 25 17:07:52 2017
@@ -918,6 +918,7 @@ class CompileTest(builtintest.BuiltinTes
g_log.info('run started')
g_log.info('using CC: %r' % opts.cc)
g_log.info('using CXX: %r' % opts.cxx)
+ no_errors = True
for basename, test_fn in tests_to_run:
for success, name, samples in test_fn(basename, run_info,
variables):
@@ -935,9 +936,11 @@ class CompileTest(builtintest.BuiltinTes
if not success:
testsamples.append(lnt.testing.TestSamples(
test_name + '.status', [lnt.testing.FAIL]))
+ no_errors = False
if samples:
testsamples.append(lnt.testing.TestSamples(
test_name, samples))
+ run_info['no_errors'] = no_errors
end_time = datetime.utcnow()
g_log.info('run complete')
Modified: lnt/trunk/lnt/tests/nt.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/tests/nt.py?rev=309063&r1=309062&r2=309063&view=diff
==============================================================================
--- lnt/trunk/lnt/tests/nt.py (original)
+++ lnt/trunk/lnt/tests/nt.py Tue Jul 25 17:07:52 2017
@@ -770,6 +770,7 @@ def load_nt_report_file(report_path, con
# We don't use the test info, currently.
test_info = {}
test_samples = []
+ no_errors = True
for row in reader_it:
record = dict(zip(header, row))
@@ -819,6 +820,7 @@ def load_nt_report_file(report_path, con
if is_subtest:
continue
status_value = lnt.testing.FAIL
+ no_errors = False
elif success_value == 'xfail':
status_value = lnt.testing.XFAIL
else:
@@ -842,7 +844,7 @@ def load_nt_report_file(report_path, con
report_file.close()
- return test_samples
+ return test_samples, no_errors
def prepare_report_dir(config):
# Set up the sandbox.
@@ -1033,9 +1035,11 @@ def run_test(nick_prefix, iteration, con
if not os.path.exists(build_report_path):
fatal('nightly test failed, no report generated')
- test_samples = load_nt_report_file(build_report_path, config)
+ test_samples, no_errors = \
+ load_nt_report_file(build_report_path, config)
else:
test_samples = []
+ no_errors = True
# Merge in the test samples from all of the test modules.
existing_tests = set(s.name for s in test_samples)
@@ -1063,6 +1067,7 @@ def run_test(nick_prefix, iteration, con
# FIXME: We aren't getting the LLCBETA options.
run_info = {}
run_info['tag'] = test_namespace
+ run_info['no_errors'] = no_errors
run_info.update(config.cc_info)
# Capture sw_vers if this looks like Darwin.
@@ -1172,19 +1177,18 @@ def rerun_test(config, name, num_times):
test_full_path
results = []
+ no_errors = True
for _ in xrange(0, num_times):
- test_results = _execute_test_again(config,
- test_name,
- test_full_path,
- relative_test_path,
- logfile)
+ test_results, t_no_errors = _execute_test_again(
+ config, test_name, test_full_path, relative_test_path, logfile)
+ no_errors &= t_no_errors
results.extend(test_results)
# Check we got an exec and status from each run.
assert len(results) >= num_times, "Did not get all the runs?" + str(results)
logfile.close()
- return results
+ return results, no_errors
def _prepare_testsuite_for_rerun(test_name, test_full_path, config):
@@ -1258,9 +1262,9 @@ def _execute_test_again(config, test_nam
assert returncode == 0, "command failed"
assert os.path.exists(result_path), "Missing results file."
- results = load_nt_report_file(result_path, config)
+ results, no_errors = load_nt_report_file(result_path, config)
assert len(results) > 0
- return results
+ return results, no_errors
def _unix_quote_args(s):
return map(pipes.quote, shlex.split(s))
@@ -1452,9 +1456,8 @@ def _process_reruns(config, server_reply
i + 1,
len(rerunable_benches)))
- fresh_samples = rerun_test(config,
- bench.name,
- NUMBER_OF_RERUNS)
+ fresh_samples, t_no_errors = rerun_test(config,
+ bench.name, NUMBER_OF_RERUNS)
rerun_results.extend(fresh_samples)
return rerun_results
Modified: lnt/trunk/lnt/tests/test_suite.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/tests/test_suite.py?rev=309063&r1=309062&r2=309063&view=diff
==============================================================================
--- lnt/trunk/lnt/tests/test_suite.py (original)
+++ lnt/trunk/lnt/tests/test_suite.py Tue Jul 25 17:07:52 2017
@@ -695,6 +695,7 @@ class TestSuiteTest(BuiltinTest):
ignore.append('compile')
profiles_to_import = []
+ no_errors = True
for test_data in data['tests']:
raw_name = test_data['name'].split(' :: ', 1)[1]
@@ -738,12 +739,14 @@ class TestSuiteTest(BuiltinTest):
lnt.testing.TestSamples(name + '.compile.status',
[lnt.testing.FAIL],
test_info))
+ no_errors = False
elif not is_pass:
test_samples.append(
lnt.testing.TestSamples(name + '.exec.status',
[self._get_lnt_code(test_data['code'])],
test_info))
+ no_errors = False
# Now import the profiles in parallel.
if profiles_to_import:
@@ -769,7 +772,8 @@ class TestSuiteTest(BuiltinTest):
# FIXME: Add more machine info!
run_info = {
- 'tag': 'nts'
+ 'tag': 'nts',
+ 'no_errors': no_errors,
}
run_info.update(self._get_cc_info(cmake_vars))
run_info['run_order'] = run_info['inferred_run_order']
Added: lnt/trunk/tests/lnttool/Inputs/error_0.json
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/lnttool/Inputs/error_0.json?rev=309063&view=auto
==============================================================================
--- lnt/trunk/tests/lnttool/Inputs/error_0.json (added)
+++ lnt/trunk/tests/lnttool/Inputs/error_0.json Tue Jul 25 17:07:52 2017
@@ -0,0 +1 @@
+NOT A JSON FILE
Added: lnt/trunk/tests/lnttool/Inputs/error_1.json
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/lnttool/Inputs/error_1.json?rev=309063&view=auto
==============================================================================
--- lnt/trunk/tests/lnttool/Inputs/error_1.json (added)
+++ lnt/trunk/tests/lnttool/Inputs/error_1.json Tue Jul 25 17:07:52 2017
@@ -0,0 +1,4 @@
+{
+ "there is no run section": {
+ }
+}
Added: lnt/trunk/tests/lnttool/Inputs/error_2.json
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/lnttool/Inputs/error_2.json?rev=309063&view=auto
==============================================================================
--- lnt/trunk/tests/lnttool/Inputs/error_2.json (added)
+++ lnt/trunk/tests/lnttool/Inputs/error_2.json Tue Jul 25 17:07:52 2017
@@ -0,0 +1,7 @@
+{
+ "run": {
+ "foo": "bar",
+ "no_errors": false,
+ "Baz": 42
+ }
+}
Added: lnt/trunk/tests/lnttool/Inputs/error_3.json
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/lnttool/Inputs/error_3.json?rev=309063&view=auto
==============================================================================
--- lnt/trunk/tests/lnttool/Inputs/error_3.json (added)
+++ lnt/trunk/tests/lnttool/Inputs/error_3.json Tue Jul 25 17:07:52 2017
@@ -0,0 +1,7 @@
+{
+ "Run": {
+ "foo": "bar",
+ "no_errors": false,
+ "Baz": 42
+ }
+}
Added: lnt/trunk/tests/lnttool/Inputs/error_4.json
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/lnttool/Inputs/error_4.json?rev=309063&view=auto
==============================================================================
--- lnt/trunk/tests/lnttool/Inputs/error_4.json (added)
+++ lnt/trunk/tests/lnttool/Inputs/error_4.json Tue Jul 25 17:07:52 2017
@@ -0,0 +1,9 @@
+{
+ "Run": {
+ "foo": "bar",
+ "Info": {
+ "no_errors": false
+ },
+ "Baz": 42
+ }
+}
Added: lnt/trunk/tests/lnttool/Inputs/no_error_0.json
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/lnttool/Inputs/no_error_0.json?rev=309063&view=auto
==============================================================================
--- lnt/trunk/tests/lnttool/Inputs/no_error_0.json (added)
+++ lnt/trunk/tests/lnttool/Inputs/no_error_0.json Tue Jul 25 17:07:52 2017
@@ -0,0 +1,7 @@
+{
+ "run": {
+ "foo": "bar",
+ "no_errors": true,
+ "Baz": 42
+ }
+}
Added: lnt/trunk/tests/lnttool/Inputs/no_error_1.json
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/lnttool/Inputs/no_error_1.json?rev=309063&view=auto
==============================================================================
--- lnt/trunk/tests/lnttool/Inputs/no_error_1.json (added)
+++ lnt/trunk/tests/lnttool/Inputs/no_error_1.json Tue Jul 25 17:07:52 2017
@@ -0,0 +1,9 @@
+{
+ "Run": {
+ "foo": "bar",
+ "Info": {
+ "no_errors": true
+ },
+ "Baz": 42
+ }
+}
Added: lnt/trunk/tests/lnttool/check_no_errors.shtest
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/lnttool/check_no_errors.shtest?rev=309063&view=auto
==============================================================================
--- lnt/trunk/tests/lnttool/check_no_errors.shtest (added)
+++ lnt/trunk/tests/lnttool/check_no_errors.shtest Tue Jul 25 17:07:52 2017
@@ -0,0 +1,8 @@
+# RUN: lnt check-no-errors %S/Inputs/no_error_0.json
+# RUN: lnt check-no-errors %S/Inputs/no_error_1.json
+# RUN: not lnt check-no-errors %S/Inputs/error_0.json
+# RUN: not lnt check-no-errors %S/Inputs/error_1.json
+# RUN: not lnt check-no-errors %S/Inputs/error_2.json
+# RUN: not lnt check-no-errors %S/Inputs/error_3.json
+# RUN: not lnt check-no-errors %S/Inputs/error_4.json
+# RUN: not lnt check-no-errors %S/Inputs/error_5.json
Modified: lnt/trunk/tests/runtest/test_suite.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/test_suite.py?rev=309063&r1=309062&r2=309063&view=diff
==============================================================================
--- lnt/trunk/tests/runtest/test_suite.py (original)
+++ lnt/trunk/tests/runtest/test_suite.py Tue Jul 25 17:07:52 2017
@@ -20,6 +20,7 @@
# RUN: FileCheck --check-prefix CHECK-CSV < %t.SANDBOX/build/test-results.csv %s
# RUN: FileCheck --check-prefix CHECK-CHECKFORMAT < %t.checkformat %s
+# CHECK-REPORT: "no_errors": false,
# CHECK-REPORT: "run_order": "154331"
# CHECK-REPORT: "Name": "nts.{{[^.]+}}.compile"
# CHECK-REPORT: "Name": "nts.{{[^.]+}}.compile.status"
More information about the llvm-commits
mailing list