[Lldb-commits] [lldb] r282990 - test infra: clear file-charged issues on rerun of file
Todd Fiala via lldb-commits
lldb-commits at lists.llvm.org
Fri Sep 30 17:17:08 PDT 2016
Author: tfiala
Date: Fri Sep 30 19:17:08 2016
New Revision: 282990
URL: http://llvm.org/viewvc/llvm-project?rev=282990&view=rev
Log:
test infra: clear file-charged issues on rerun of file
This change addresses the corner case bug in the test
infrastructure where a test file times out *outside*
of any running test method. In those cases, the issue
was charged to the file, not to a test method within
the file. When that file is re-run successfully,
none of the test-method-level successes would clear
the file-level issue.
This change fixes that: for all test files that are
getting rerun (whether by being marked flaky or
via the --rerun-all-issues flag), file-level test
issues are searched for in each of those files. Each
file-level issue found in the rerun file list then
gets cleared.
A test of this feature is added to issue_verification,
using the technique there of moving the *.py.park file
to *.py to do an end-to-end validation.
This change also adds a .gitignore entry for pyenv
project-level files and fixes up a few minor pep8
formatting violations in files I touched.
Fixes:
llvm.org/pr27423
Added:
lldb/trunk/packages/Python/lldbsuite/test/issue_verification/TestRerunFileLevelTimeout.py.park
Modified:
lldb/trunk/.gitignore
lldb/trunk/packages/Python/lldbsuite/test/dosep.py
lldb/trunk/packages/Python/lldbsuite/test_event/formatter/results_formatter.py
Modified: lldb/trunk/.gitignore
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/.gitignore?rev=282990&r1=282989&r2=282990&view=diff
==============================================================================
--- lldb/trunk/.gitignore (original)
+++ lldb/trunk/.gitignore Fri Sep 30 19:17:08 2016
@@ -16,6 +16,8 @@
# Byte compiled python modules.
*.pyc
*.pyproj
+# pyenv settings
+.python-version
*.sln
*.suo
# vim swap files
Modified: lldb/trunk/packages/Python/lldbsuite/test/dosep.py
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/dosep.py?rev=282990&r1=282989&r2=282990&view=diff
==============================================================================
--- lldb/trunk/packages/Python/lldbsuite/test/dosep.py (original)
+++ lldb/trunk/packages/Python/lldbsuite/test/dosep.py Fri Sep 30 19:17:08 2016
@@ -1151,8 +1151,11 @@ def inprocess_exec_test_runner(test_work
runner_context)
# We're always worker index 0
+ def get_single_worker_index():
+ return 0
+
global GET_WORKER_INDEX
- GET_WORKER_INDEX = lambda: 0
+ GET_WORKER_INDEX = get_single_worker_index
# Run the listener and related channel maps in a separate thread.
# global RUNNER_PROCESS_ASYNC_MAP
@@ -1443,7 +1446,8 @@ def adjust_inferior_options(dotest_argv)
# every dotest invocation from creating its own directory
import datetime
# The windows platforms don't like ':' in the pathname.
- timestamp_started = datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S")
+ timestamp_started = (datetime.datetime.now()
+ .strftime("%Y-%m-%d-%H_%M_%S"))
dotest_argv.append('-s')
dotest_argv.append(timestamp_started)
dotest_options.s = timestamp_started
@@ -1627,7 +1631,8 @@ def main(num_threads, test_subdir, test_
test_subdir = os.path.join(test_directory, test_subdir)
if not os.path.isdir(test_subdir):
print(
- 'specified test subdirectory {} is not a valid directory\n'.format(test_subdir))
+ 'specified test subdirectory {} is not a valid directory\n'
+ .format(test_subdir))
else:
test_subdir = test_directory
@@ -1696,6 +1701,12 @@ def main(num_threads, test_subdir, test_
print("\n{} test files marked for rerun\n".format(
rerun_file_count))
+ # Clear errors charged to any of the files of the tests that
+ # we are rerunning.
+ # https://llvm.org/bugs/show_bug.cgi?id=27423
+ results_formatter.clear_file_level_issues(tests_for_rerun,
+ sys.stdout)
+
# Check if the number of files exceeds the max cutoff. If so,
# we skip the rerun step.
if rerun_file_count > configuration.rerun_max_file_threshold:
Added: lldb/trunk/packages/Python/lldbsuite/test/issue_verification/TestRerunFileLevelTimeout.py.park
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/issue_verification/TestRerunFileLevelTimeout.py.park?rev=282990&view=auto
==============================================================================
--- lldb/trunk/packages/Python/lldbsuite/test/issue_verification/TestRerunFileLevelTimeout.py.park (added)
+++ lldb/trunk/packages/Python/lldbsuite/test/issue_verification/TestRerunFileLevelTimeout.py.park Fri Sep 30 19:17:08 2016
@@ -0,0 +1,33 @@
+"""Tests that a timeout is detected by the testbot."""
+from __future__ import print_function
+
+import atexit
+import time
+
+from lldbsuite.test import decorators
+import rerun_base
+
+
+class RerunTimeoutTestCase(rerun_base.RerunBaseTestCase):
+ def maybe_do_timeout(self):
+ # Do the timeout here if we're going to time out.
+ if self.should_generate_issue():
+ # We time out this time.
+ while True:
+ try:
+ time.sleep(1)
+ except:
+ print("ignoring exception during sleep")
+
+ # call parent
+ super(RerunTimeoutTestCase, self).tearDown()
+
+ @decorators.no_debug_info_test
+ def test_timeout_file_level_timeout_rerun_succeeds(self):
+ """Tests that file-level timeout is cleared on rerun."""
+
+ # This test just needs to pass. It is the exit hook (outside
+ # the test method) that will time out.
+
+ # Add the exit handler that will time out the first time around.
+ atexit.register(RerunTimeoutTestCase.maybe_do_timeout, self)
Modified: lldb/trunk/packages/Python/lldbsuite/test_event/formatter/results_formatter.py
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test_event/formatter/results_formatter.py?rev=282990&r1=282989&r2=282990&view=diff
==============================================================================
--- lldb/trunk/packages/Python/lldbsuite/test_event/formatter/results_formatter.py (original)
+++ lldb/trunk/packages/Python/lldbsuite/test_event/formatter/results_formatter.py Fri Sep 30 19:17:08 2016
@@ -14,6 +14,7 @@ from __future__ import absolute_import
# System modules
import argparse
import os
+import re
import sys
import threading
@@ -27,6 +28,9 @@ from ..event_builder import EventBuilder
import lldbsuite
+FILE_LEVEL_KEY_RE = re.compile(r"^(.+\.py)[^.:]*$")
+
+
class ResultsFormatter(object):
"""Provides interface to formatting test results out to a file-like object.
@@ -207,6 +211,26 @@ class ResultsFormatter(object):
component_count += 1
return key
+ @classmethod
+ def _is_file_level_issue(cls, key, event):
+ """Returns whether a given key represents a file-level event.
+
+ @param cls this class. Unused, but following PEP8 for
+ preferring @classmethod over @staticmethod.
+
+ @param key the key for the issue being tested.
+
+ @param event the event for the issue being tested.
+
+ @return True when the given key (as made by _make_key())
+ represents an event that is at the test file level (i.e.
+ it isn't scoped to a test class or method).
+ """
+ if key is None:
+ return False
+ else:
+ return FILE_LEVEL_KEY_RE.match(key) is not None
+
def _mark_test_as_expected_failure(self, test_result_event):
key = self._make_key(test_result_event)
if key is not None:
@@ -321,8 +345,8 @@ class ResultsFormatter(object):
# after this check below since this check may rewrite
# the event type
if event_type == EventBuilder.TYPE_JOB_RESULT:
- # Possibly convert the job status (timeout, exceptional exit)
- # to an appropriate test_result event.
+ # Possibly convert the job status (timeout,
+ # exceptional exit) # to an appropriate test_result event.
self._maybe_remap_job_result_event(test_event)
event_type = test_event.get("event", "")
@@ -335,10 +359,10 @@ class ResultsFormatter(object):
if event_type == "terminate":
self.terminate_called = True
elif event_type in EventBuilder.RESULT_TYPES:
- # Keep track of event counts per test/job result status type.
- # The only job (i.e. inferior process) results that make it
- # here are ones that cannot be remapped to the most recently
- # started test for the given worker index.
+ # Keep track of event counts per test/job result status
+ # type. The only job (i.e. inferior process) results that
+ # make it here are ones that cannot be remapped to the most
+ # recently started test for the given worker index.
status = test_event["status"]
self.result_status_counts[status] += 1
# Clear the most recently started test for the related
@@ -349,8 +373,8 @@ class ResultsFormatter(object):
if status in EventBuilder.TESTRUN_ERROR_STATUS_VALUES:
# A test/job status value in any of those status values
- # causes a testrun failure. If such a test fails, check
- # whether it can be rerun. If it can be rerun, add it
+ # causes a testrun failure. If such a test fails, check
+ # whether it can be rerun. If it can be rerun, add it
# to the rerun job.
self._maybe_add_test_to_rerun_list(test_event)
@@ -361,14 +385,13 @@ class ResultsFormatter(object):
"failed to find test filename for "
"test event {}".format(test_event))
- # Save the most recent test event for the test key.
- # This allows a second test phase to overwrite the most
- # recent result for the test key (unique per method).
- # We do final reporting at the end, so we'll report based
- # on final results.
- # We do this so that a re-run caused by, perhaps, the need
- # to run a low-load, single-worker test run can have the final
- # run's results to always be used.
+ # Save the most recent test event for the test key. This
+ # allows a second test phase to overwrite the most recent
+ # result for the test key (unique per method). We do final
+ # reporting at the end, so we'll report based on final
+ # results. We do this so that a re-run caused by, perhaps,
+ # the need to run a low-load, single-worker test run can
+ # have the final run's results to always be used.
if test_key in self.result_events:
# We are replacing the result of something that was
# already counted by the base class. Remove the double
@@ -394,7 +417,8 @@ class ResultsFormatter(object):
elif event_type == EventBuilder.TYPE_MARK_TEST_RERUN_ELIGIBLE:
self._mark_test_for_rerun_eligibility(test_event)
- elif event_type == EventBuilder.TYPE_MARK_TEST_EXPECTED_FAILURE:
+ elif (event_type ==
+ EventBuilder.TYPE_MARK_TEST_EXPECTED_FAILURE):
self._mark_test_as_expected_failure(test_event)
def set_expected_timeouts_by_basename(self, basenames):
@@ -716,3 +740,50 @@ class ResultsFormatter(object):
for key, event in events_by_key:
out_file.write("key: {}\n".format(key))
out_file.write("event: {}\n".format(event))
+
+ def clear_file_level_issues(self, tests_for_rerun, out_file):
+ """Clear file-charged issues in any of the test rerun files.
+
+ @param tests_for_rerun the list of test-dir-relative paths that have
+ functions that require rerunning. This is the test list
+ returned by the results_formatter at the end of the previous run.
+
+ @return the number of file-level issues that were cleared.
+ """
+ if tests_for_rerun is None:
+ return 0
+
+ cleared_file_level_issues = 0
+ # Find the unique set of files that are covered by the given tests
+ # that are to be rerun. We derive the files that are eligible for
+ # having their markers cleared, because we support running in a mode
+ # where only flaky tests are eligible for rerun. If the file-level
+ # issue occurred in a file that was not marked as flaky, then we
+ # shouldn't be clearing the event here.
+ basename_set = set()
+ for test_file_relpath in tests_for_rerun:
+ basename_set.add(os.path.basename(test_file_relpath))
+
+ # Find all the keys for file-level events that are considered
+ # test issues.
+ file_level_issues = [(key, event)
+ for key, event in self.result_events.items()
+ if ResultsFormatter._is_file_level_issue(
+ key, event)
+ and event.get("status", "") in
+ EventBuilder.TESTRUN_ERROR_STATUS_VALUES]
+
+ # Now remove any file-level error for the given test base name.
+ for key, event in file_level_issues:
+ # If the given file base name is in the rerun set, then we
+ # clear that entry from the result set.
+ if os.path.basename(key) in basename_set:
+ self.result_events.pop(key, None)
+ cleared_file_level_issues += 1
+ if out_file is not None:
+ out_file.write(
+ "clearing file-level issue for file {} "
+ "(issue type: {})"
+ .format(key, event.get("status", "<unset-status>")))
+
+ return cleared_file_level_issues
More information about the lldb-commits
mailing list