[LNT] r315099 - runtest/test_suite: Make lit result parsing more robust
Matthias Braun via llvm-commits
llvm-commits at lists.llvm.org
Fri Oct 6 13:22:55 PDT 2017
Author: matze
Date: Fri Oct 6 13:22:55 2017
New Revision: 315099
URL: http://llvm.org/viewvc/llvm-project?rev=315099&view=rev
Log:
runtest/test_suite: Make lit result parsing more robust
- Check lit NOEXE status instead of using a heuristic to search for the
executable. This fixes invalid failure reports in situations in which
the executable name does not correspond to the test name.
- Sort metrics to get deterministic output
- Make name parsing logic more robust
Added:
lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails-compile
- copied, changed from r314962, lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails
lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails-exec
- copied, changed from r314962, lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails
lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results-fail-compile.json
lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results-fail-exec.json
Removed:
lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails
Modified:
lnt/trunk/lnt/tests/test_suite.py
lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-cmake
lnt/trunk/tests/runtest/test_suite.py
Modified: lnt/trunk/lnt/tests/test_suite.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/lnt/tests/test_suite.py?rev=315099&r1=315098&r2=315099&view=diff
==============================================================================
--- lnt/trunk/lnt/tests/test_suite.py (original)
+++ lnt/trunk/lnt/tests/test_suite.py Fri Oct 6 13:22:55 2017
@@ -610,16 +610,12 @@ class TestSuiteTest(BuiltinTest):
def _get_lnt_code(self, code):
return {'PASS': lnt.testing.PASS,
'FAIL': lnt.testing.FAIL,
+ 'NOEXE': lnt.testing.FAIL,
'XFAIL': lnt.testing.XFAIL,
'XPASS': lnt.testing.FAIL,
'UNRESOLVED': lnt.testing.FAIL
}[code]
- def _test_failed_to_compile(self, raw_name, path):
- # FIXME: Do we need to add ".exe" in windows?
- name = raw_name.rsplit('.test', 1)[0]
- return not os.path.exists(os.path.join(path, name))
-
def _extract_cmake_vars_from_cache(self):
assert self.configured is True
cmake_lah_output = self._check_output(
@@ -699,11 +695,21 @@ class TestSuiteTest(BuiltinTest):
no_errors = True
for test_data in data['tests']:
- raw_name = test_data['name'].split(' :: ', 1)[1]
- name = 'nts.' + raw_name.rsplit('.test', 1)[0]
- is_pass = self._is_pass_code(test_data['code'])
+ code = test_data['code']
+ raw_name = test_data['name']
+
+ split_name = raw_name.split(' :: ', 1)
+ if len(split_name) > 1:
+ name = split_name[1]
+ else:
+ name = split_name[0]
+
+ if name.endswith('.test'):
+ name = name[:-5]
+ name = 'nts.' + name
# If --single-result is given, exit based on --single-result-predicate
+ is_pass = self._is_pass_code(code)
if self.opts.single_result and \
raw_name == self.opts.single_result + '.test':
env = {'status': is_pass}
@@ -716,7 +722,7 @@ class TestSuiteTest(BuiltinTest):
sys.exit(0 if status else 1)
if 'metrics' in test_data:
- for k, v in test_data['metrics'].items():
+ for k, v in sorted(test_data['metrics'].items()):
if k == 'profile':
profiles_to_import.append((name, v))
continue
@@ -735,7 +741,7 @@ class TestSuiteTest(BuiltinTest):
test_info,
LIT_METRIC_CONV_FN[k]))
- if self._test_failed_to_compile(raw_name, path):
+ if code == 'NOEXE':
test_samples.append(
lnt.testing.TestSamples(name + '.compile.status',
[lnt.testing.FAIL],
Modified: lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-cmake
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-cmake?rev=315099&r1=315098&r2=315099&view=diff
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-cmake (original)
+++ lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-cmake Fri Oct 6 13:22:55 2017
@@ -52,7 +52,13 @@ then
echo CMAKE_C_FLAGS_RELWITHDEBINFO:STRING=-O2 -g
echo CMAKE_CXX_FLAGS_RELWITHDEBINFO:STRING=-O2 -g
else
- cp $CMAKE_SRC_DIR/fake-test $CMAKE_SRC_DIR/fake-results.json $CMAKE_SRC_DIR/fake-results-profile.json .
+ cp \
+ $CMAKE_SRC_DIR/fake-test \
+ $CMAKE_SRC_DIR/fake-results.json \
+ $CMAKE_SRC_DIR/fake-results-fail-compile.json \
+ $CMAKE_SRC_DIR/fake-results-fail-exec.json \
+ $CMAKE_SRC_DIR/fake-results-profile.json \
+ .
echo "Dummy" > CMakeCache.txt
echo CMAKE_C_COMPILER:FILEPATH=$CMAKE_C_COMPILER >> CMakeCache.txt
echo CMAKE_CXX_COMPILER:FILEPATH=$CMAKE_CXX_COMPILER >> CMakeCache.txt
@@ -60,6 +66,12 @@ else
echo CMAKE_C_COMPILER_TARGET:STRING=$CMAKE_C_COMPILER_TARGET >> CMakeCache.txt
fi
mkdir subtest
- cp $CMAKE_SRC_DIR/fake-test $CMAKE_SRC_DIR/fake-results.json subtest
+ cp \
+ $CMAKE_SRC_DIR/fake-test \
+ $CMAKE_SRC_DIR/fake-results.json \
+ $CMAKE_SRC_DIR/fake-results-fail-compile.json \
+ $CMAKE_SRC_DIR/fake-results-fail-exec.json \
+ $CMAKE_SRC_DIR/fake-results-profile.json \
+ subtest
fi
exit 0
Removed: lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails?rev=315098&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails (original)
+++ lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails (removed)
@@ -1,11 +0,0 @@
-#!/usr/bin/python
-
-import argparse, shutil, sys
-parser = argparse.ArgumentParser(description='dummy lit')
-parser.add_argument('-o')
-parser.add_argument('-j', type=int)
-parser.add_argument('bar')
-args, _ = parser.parse_known_args()
-
-shutil.copyfile(args.bar + '/fake-results.json', args.o)
-sys.exit(1)
\ No newline at end of file
Copied: lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails-compile (from r314962, lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails)
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails-compile?p2=lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails-compile&p1=lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails&r1=314962&r2=315099&rev=315099&view=diff
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails (original)
+++ lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails-compile Fri Oct 6 13:22:55 2017
@@ -7,5 +7,5 @@ parser.add_argument('-j', type=int)
parser.add_argument('bar')
args, _ = parser.parse_known_args()
-shutil.copyfile(args.bar + '/fake-results.json', args.o)
-sys.exit(1)
\ No newline at end of file
+shutil.copyfile(args.bar + '/fake-results-fail-compile.json', args.o)
+sys.exit(1)
Copied: lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails-exec (from r314962, lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails)
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails-exec?p2=lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails-exec&p1=lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails&r1=314962&r2=315099&rev=315099&view=diff
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails (original)
+++ lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-lit-fails-exec Fri Oct 6 13:22:55 2017
@@ -7,5 +7,5 @@ parser.add_argument('-j', type=int)
parser.add_argument('bar')
args, _ = parser.parse_known_args()
-shutil.copyfile(args.bar + '/fake-results.json', args.o)
-sys.exit(1)
\ No newline at end of file
+shutil.copyfile(args.bar + '/fake-results-fail-exec.json', args.o)
+sys.exit(1)
Added: lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results-fail-compile.json
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results-fail-compile.json?rev=315099&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results-fail-compile.json (added)
+++ lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results-fail-compile.json Fri Oct 6 13:22:55 2017
@@ -0,0 +1,23 @@
+{
+ "tests": [
+ {
+ "name": "test-suite :: bar",
+ "elapsed": "1.0",
+ "code": "NOEXE",
+ "metrics": {
+ }
+ },
+ {
+ "name": "test-suite :: foo",
+ "elapsed": "1.0",
+ "code": "PASS",
+ "metrics": {
+ "compile_time": 1.3,
+ "exec_time": 1.4,
+ "score": 1.5,
+ "hash": "xyz",
+ "unknown": "unknown"
+ }
+ }
+ ]
+}
Added: lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results-fail-exec.json
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results-fail-exec.json?rev=315099&view=auto
==============================================================================
--- lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results-fail-exec.json (added)
+++ lnt/trunk/tests/runtest/Inputs/test-suite-cmake/fake-results-fail-exec.json Fri Oct 6 13:22:55 2017
@@ -0,0 +1,28 @@
+{
+ "tests": [
+ {
+ "name": "test-suite :: baz",
+ "elapsed": "1.0",
+ "code": "FAIL",
+ "metrics": {
+ "compile_time": 2.3,
+ "exec_time": 3.4,
+ "score": 7.1,
+ "hash": "xyz",
+ "unknown": "unknown"
+ }
+ },
+ {
+ "name": "test-suite :: foo",
+ "elapsed": "1.0",
+ "code": "PASS",
+ "metrics": {
+ "compile_time": 1.3,
+ "exec_time": 1.4,
+ "score": 1.5,
+ "hash": "xyz",
+ "unknown": "unknown"
+ }
+ }
+ ]
+}
Modified: lnt/trunk/tests/runtest/test_suite.py
URL: http://llvm.org/viewvc/llvm-project/lnt/trunk/tests/runtest/test_suite.py?rev=315099&r1=315098&r2=315099&view=diff
==============================================================================
--- lnt/trunk/tests/runtest/test_suite.py (original)
+++ lnt/trunk/tests/runtest/test_suite.py Fri Oct 6 13:22:55 2017
@@ -20,10 +20,12 @@
# RUN: FileCheck --check-prefix CHECK-CSV < %t.SANDBOX/build/test-results.csv %s
# RUN: FileCheck --check-prefix CHECK-CHECKFORMAT < %t.checkformat %s
-# CHECK-REPORT: "no_errors": "False",
+# CHECK-REPORT: "no_errors": "True",
# CHECK-REPORT: "run_order": "154331"
-# CHECK-REPORT: "Name": "nts.{{[^.]+}}.compile"
-# CHECK-REPORT: "Name": "nts.{{[^.]+}}.compile.status"
+# CHECK-REPORT: "Name": "nts.foo.compile"
+# CHECK-REPORT: "Name": "nts.foo.exec"
+# CHECK-REPORT: "Name": "nts.foo.hash"
+# CHECK-REPORT: "Name": "nts.foo.score"
#
# CHECK-BASIC: Inferred C++ compiler under test
# CHECK-BASIC: Configuring
@@ -320,10 +322,29 @@
# RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \
# RUN: --use-cmake %S/Inputs/test-suite-cmake/fake-cmake \
# RUN: --use-make %S/Inputs/test-suite-cmake/fake-make \
-# RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit-fails \
+# RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit-fails-compile \
# RUN: --run-order=123 > %t.log 2> %t.err
-# RUN: FileCheck --check-prefix CHECK-RESULTS-FAIL < %t.SANDBOX/build/report.json %s
-# CHECK-RESULTS-FAIL: "run_order": "123"
+# RUN: FileCheck --check-prefix CHECK-RESULTS-FAIL-COMPILE < %t.SANDBOX/build/report.json %s
+# CHECK-RESULTS-FAIL-COMPILE: "no_errors": "False"
+# CHECK-RESULTS-FAIL-COMPILE: "run_order": "123"
+# CHECK-RESULTS-FAIL-COMPILE: "Name": "nts.bar.compile.status"
+
+# Check that with a failing test, a report is still produced.
+# RUN: rm -f %t.SANDBOX/build/report.json
+# RUN: lnt runtest test-suite \
+# RUN: --sandbox %t.SANDBOX \
+# RUN: --no-timestamp \
+# RUN: --no-configure \
+# RUN: --test-suite %S/Inputs/test-suite-cmake \
+# RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \
+# RUN: --use-cmake %S/Inputs/test-suite-cmake/fake-cmake \
+# RUN: --use-make %S/Inputs/test-suite-cmake/fake-make \
+# RUN: --use-lit %S/Inputs/test-suite-cmake/fake-lit-fails-exec \
+# RUN: --run-order=123 > %t.log 2> %t.err
+# RUN: FileCheck --check-prefix CHECK-RESULTS-FAIL-EXEC < %t.SANDBOX/build/report.json %s
+# CHECK-RESULTS-FAIL-EXEC: "no_errors": "False"
+# CHECK-RESULTS-FAIL-EXEC: "run_order": "123"
+# CHECK-RESULTS-FAIL-EXEC: "Name": "nts.baz.exec.status"
# Check a run of test-suite using a cmake cache
# Also make sure we get: compiler defines, cache, other defines on the cmake
More information about the llvm-commits
mailing list