[Lldb-commits] [lldb] r255543 - test infra: enable single-worker rerun phase for flakey tests.

Todd Fiala via lldb-commits lldb-commits at lists.llvm.org
Mon Dec 14 13:28:47 PST 2015


Author: tfiala
Date: Mon Dec 14 15:28:46 2015
New Revision: 255543

URL: http://llvm.org/viewvc/llvm-project?rev=255543&view=rev
Log:
test infra: enable single-worker rerun phase for flakey tests.

Use of --rerun-all-issues will enable any test method failure, not just
test methods marked with the flakey decorator, to rerun.

Currently this does not change the flakey logic's immediate rerun
attempt.  I want to make sure this doesn't cause any significant issues
before changing that part.

The rerun reporting is only known to work properly with the
default (new) BasicResultsFormatter reporting.  Once we work out
any issues, I'll go back and make sure the curses output handles
it properly as well.

Added:
    lldb/trunk/packages/Python/lldbsuite/test/issue_verification/TestRerunFail.py.park
    lldb/trunk/packages/Python/lldbsuite/test/issue_verification/TestRerunTimeout.py.park
    lldb/trunk/packages/Python/lldbsuite/test/issue_verification/rerun_base.py
Modified:
    lldb/trunk/packages/Python/lldbsuite/test/basic_results_formatter.py
    lldb/trunk/packages/Python/lldbsuite/test/configuration.py
    lldb/trunk/packages/Python/lldbsuite/test/dosep.py
    lldb/trunk/packages/Python/lldbsuite/test/dotest.py
    lldb/trunk/packages/Python/lldbsuite/test/dotest_args.py
    lldb/trunk/packages/Python/lldbsuite/test/result_formatter.py

Modified: lldb/trunk/packages/Python/lldbsuite/test/basic_results_formatter.py
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/basic_results_formatter.py?rev=255543&r1=255542&r2=255543&view=diff
==============================================================================
--- lldb/trunk/packages/Python/lldbsuite/test/basic_results_formatter.py (original)
+++ lldb/trunk/packages/Python/lldbsuite/test/basic_results_formatter.py Mon Dec 14 15:28:46 2015
@@ -31,6 +31,11 @@ class BasicResultsFormatter(result_forma
             action="store_true",
             help=('cause unknown test events to generate '
                   'a python assert.  Default is to ignore.'))
+        parser.add_argument(
+            "--dump-results",
+            action="store_true",
+            help=('dump the raw results data after printing '
+                  'the summary output.'))
         return parser
 
     def __init__(self, out_file, options):
@@ -56,21 +61,21 @@ class BasicResultsFormatter(result_forma
         if event_type is None:
             return
 
-        if event_type == "terminate":
+        if event_type == EventBuilder.TYPE_SESSION_TERMINATE:
             self._finish_output()
-        elif event_type == "test_start":
+        elif event_type == EventBuilder.TYPE_TEST_START:
             self.track_start_time(
                 test_event["test_class"],
                 test_event["test_name"],
                 test_event["event_time"])
-        elif event_type == "test_result":
+        elif event_type == EventBuilder.TYPE_TEST_RESULT:
             # Build the test key.
             test_key = test_event.get("test_filename", None)
             if test_key is None:
                 raise Exception(
                     "failed to find test filename for test event {}".format(
                         test_event))
-            test_key += ".{}.{}".format(
+            test_key += ":{}.{}".format(
                 test_event.get("test_class", ""),
                 test_event.get("test_name", ""))
 
@@ -91,14 +96,8 @@ class BasicResultsFormatter(result_forma
                 self.result_status_counts[old_status] -= 1
 
                 self.test_method_rerun_count += 1
-                if self.options.warn_on_multiple_results:
-                    print(
-                        "WARNING: test key {} already has a result: "
-                        "old:{} new:{}",
-                        self.result_events[test_key],
-                        test_event)
             self.result_events[test_key] = test_event
-        elif event_type == "job_result":
+        elif event_type == EventBuilder.TYPE_JOB_RESULT:
             # Build the job key.
             test_key = test_event.get("test_filename", None)
             if test_key is None:
@@ -336,6 +335,15 @@ class BasicResultsFormatter(result_forma
         self._print_summary_counts(
             categories, result_events_by_status, extra_results)
 
+        if self.options.dump_results:
+            # Debug dump of the key/result info for all categories.
+            self._print_banner("Results Dump")
+            for status, events_by_key in result_events_by_status.items():
+                print("\nSTATUS: {}".format(status))
+                for key, event in events_by_key:
+                    print("key:   {}".format(key))
+                    print("event: {}".format(event))
+
     def _finish_output(self):
         """Prepare and write the results report as all incoming events have
         arrived.

Modified: lldb/trunk/packages/Python/lldbsuite/test/configuration.py
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/configuration.py?rev=255543&r1=255542&r2=255543&view=diff
==============================================================================
--- lldb/trunk/packages/Python/lldbsuite/test/configuration.py (original)
+++ lldb/trunk/packages/Python/lldbsuite/test/configuration.py Mon Dec 14 15:28:46 2015
@@ -141,6 +141,7 @@ test_result = None
 
 # Test rerun configuration vars
 rerun_all_issues = False
+rerun_max_file_threhold = 0
 
 # The names of all tests. Used to assert we don't have two tests with the same base name.
 all_tests = set()

Modified: lldb/trunk/packages/Python/lldbsuite/test/dosep.py
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/dosep.py?rev=255543&r1=255542&r2=255543&view=diff
==============================================================================
--- lldb/trunk/packages/Python/lldbsuite/test/dosep.py (original)
+++ lldb/trunk/packages/Python/lldbsuite/test/dosep.py Mon Dec 14 15:28:46 2015
@@ -49,8 +49,10 @@ import threading
 from six.moves import queue
 
 # Our packages and modules
+import lldbsuite
 import lldbsuite.support.seven as seven
 
+from . import configuration
 from . import dotest_channels
 from . import dotest_args
 from . import result_formatter
@@ -416,21 +418,20 @@ def call_with_timeout(
 def process_dir(root, files, dotest_argv, inferior_pid_events):
     """Examine a directory for tests, and invoke any found within it."""
     results = []
-    for name in files:
+    for (base_name, full_test_path) in files:
         import __main__ as main
         script_file = main.__file__
         command = ([sys.executable, script_file] +
                    dotest_argv +
-                   ["--inferior", "-p", name, root])
+                   ["--inferior", "-p", base_name, root])
 
-        timeout_name = os.path.basename(os.path.splitext(name)[0]).upper()
+        timeout_name = os.path.basename(os.path.splitext(base_name)[0]).upper()
 
         timeout = (os.getenv("LLDB_%s_TIMEOUT" % timeout_name) or
                    getDefaultTimeout(dotest_options.lldb_platform_name))
 
-        test_filename = os.path.join(root, name)
         results.append(call_with_timeout(
-            command, timeout, name, inferior_pid_events, test_filename))
+            command, timeout, base_name, inferior_pid_events, full_test_path))
 
     # result = (name, status, passes, failures, unexpected_successes)
     timed_out = [name for name, status, _, _, _ in results
@@ -615,8 +616,10 @@ def find_test_files_in_dir_tree(dir_root
             return (base_filename.startswith("Test") and
                     base_filename.endswith(".py"))
 
-        tests = [filename for filename in files
-                 if is_test_filename(root, filename)]
+        tests = [
+            (filename, os.path.join(root, filename))
+            for filename in files
+            if is_test_filename(root, filename)]
         if tests:
             found_func(root, tests)
 
@@ -1097,8 +1100,16 @@ def walk_and_invoke(test_files, dotest_a
             dotest_channels.UnpicklingForwardingListenerChannel(
                 RUNNER_PROCESS_ASYNC_MAP, "localhost", 0,
                 2 * num_workers, forwarding_func))
-        dotest_argv.append("--results-port")
-        dotest_argv.append(str(RESULTS_LISTENER_CHANNEL.address[1]))
+        # Set the results port command line arg.  Might have been
+        # inserted previous, so first try to replace.
+        listener_port = str(RESULTS_LISTENER_CHANNEL.address[1])
+        try:
+            port_value_index = dotest_argv.index("--results-port") + 1
+            dotest_argv[port_value_index] = listener_port
+        except ValueError:
+            # --results-port doesn't exist (yet), add it
+            dotest_argv.append("--results-port")
+            dotest_argv.append(listener_port)
 
     # Build the test work items out of the (dir, file_list) entries passed in.
     test_work_items = []
@@ -1424,6 +1435,58 @@ def default_test_runner_name(num_threads
     return test_runner_name
 
 
+def rerun_tests(test_subdir, tests_for_rerun, dotest_argv):
+    # Build the list of test files to rerun.  Some future time we'll
+    # enable re-run by test method so we can constrain the rerun set
+    # to just the method(s) that were in issued within a file.
+
+    # Sort rerun files into subdirectories.
+    print("\nRerunning the following files:")
+    rerun_files_by_subdir = {}
+    for test_filename in tests_for_rerun.keys():
+        # Print the file we'll be rerunning
+        test_relative_path = os.path.relpath(
+            test_filename, lldbsuite.lldb_test_root)
+        print("  {}".format(test_relative_path))
+
+        # Store test filenames by subdir.
+        test_dir = os.path.dirname(test_filename)
+        test_basename = os.path.basename(test_filename)
+        if test_dir in rerun_files_by_subdir:
+            rerun_files_by_subdir[test_dir].append(
+                (test_basename, test_filename))
+        else:
+            rerun_files_by_subdir[test_dir] = [(test_basename, test_filename)]
+
+    # Break rerun work up by subdirectory.  We do this since
+    # we have an invariant that states only one test file can
+    # be run at a time in any given subdirectory (related to
+    # rules around built inferior test program lifecycle).
+    rerun_work = []
+    for files_by_subdir in rerun_files_by_subdir.values():
+        rerun_work.append((test_subdir, files_by_subdir))
+
+    # Run the work with the serial runner.
+    # Do not update legacy counts, I am getting rid of
+    # them so no point adding complicated merge logic here.
+    rerun_thread_count = 1
+    rerun_runner_name = default_test_runner_name(rerun_thread_count)
+    runner_strategies_by_name = get_test_runner_strategies(rerun_thread_count)
+    rerun_runner_func = runner_strategies_by_name[
+        rerun_runner_name]
+    if rerun_runner_func is None:
+        raise Exception(
+            "failed to find rerun test runner "
+            "function named '{}'".format(rerun_runner_name))
+
+    walk_and_invoke(
+        rerun_work,
+        dotest_argv,
+        rerun_thread_count,
+        rerun_runner_func)
+    print("\nTest rerun complete\n")
+
+
 def main(num_threads, test_subdir, test_runner_name, results_formatter):
     """Run dotest.py in inferior mode in parallel.
 
@@ -1501,11 +1564,13 @@ def main(num_threads, test_subdir, test_
                 list(runner_strategies_by_name.keys())))
     test_runner_func = runner_strategies_by_name[test_runner_name]
 
+    # Collect the files on which we'll run the first test run phase.
     test_files = []
     find_test_files_in_dir_tree(
         test_subdir, lambda tdir, tfiles: test_files.append(
             (test_subdir, tfiles)))
 
+    # Do the first test run phase.
     summary_results = walk_and_invoke(
         test_files,
         dotest_argv,
@@ -1515,15 +1580,24 @@ def main(num_threads, test_subdir, test_
     (timed_out, passed, failed, unexpected_successes, pass_count,
      fail_count) = summary_results
 
-    # Check if we have any tests to rerun.
+    # Check if we have any tests to rerun as phase 2.
     if results_formatter is not None:
         tests_for_rerun = results_formatter.tests_for_rerun
-        results_formatter.tests_for_rerun = None
+        results_formatter.tests_for_rerun = {}
 
         if tests_for_rerun is not None and len(tests_for_rerun) > 0:
-            # Here's where we trigger the re-run in a future change.
-            # Make sure the rest of the changes don't break anything.
-            pass
+            rerun_file_count = len(tests_for_rerun)
+            print("\n{} test files marked for rerun\n".format(
+                rerun_file_count))
+
+            # Check if the number of files exceeds the max cutoff.  If so,
+            # we skip the rerun step.
+            if rerun_file_count > configuration.rerun_max_file_threshold:
+                print("Skipping rerun: max rerun file threshold ({}) "
+                      "exceeded".format(
+                          configuration.rerun_max_file_threshold))
+            else:
+                rerun_tests(test_subdir, tests_for_rerun, dotest_argv)
 
     # The results formatter - if present - is done now.  Tell it to
     # terminate.

Modified: lldb/trunk/packages/Python/lldbsuite/test/dotest.py
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/dotest.py?rev=255543&r1=255542&r2=255543&view=diff
==============================================================================
--- lldb/trunk/packages/Python/lldbsuite/test/dotest.py (original)
+++ lldb/trunk/packages/Python/lldbsuite/test/dotest.py Mon Dec 14 15:28:46 2015
@@ -387,6 +387,7 @@ def parseOptionsAndInitTestdirs():
 
     # rerun-related arguments
     configuration.rerun_all_issues = args.rerun_all_issues
+    configuration.rerun_max_file_threshold = args.rerun_max_file_threshold
 
     if args.lldb_platform_name:
         configuration.lldb_platform_name = args.lldb_platform_name

Modified: lldb/trunk/packages/Python/lldbsuite/test/dotest_args.py
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/dotest_args.py?rev=255543&r1=255542&r2=255543&view=diff
==============================================================================
--- lldb/trunk/packages/Python/lldbsuite/test/dotest_args.py (original)
+++ lldb/trunk/packages/Python/lldbsuite/test/dotest_args.py Mon Dec 14 15:28:46 2015
@@ -160,6 +160,7 @@ def create_parser():
               'be specified as VAL:TYPE, where TYPE may be int to convert '
               'the value to an int'))
 
+    # Re-run related arguments
     group = parser.add_argument_group('Test Re-run Options')
     group.add_argument(
         '--rerun-all-issues',
@@ -168,6 +169,16 @@ def create_parser():
               'irrespective of the test method\'s marking as flakey. '
               'Default behavior is to apply re-runs only to flakey '
               'tests that generate issues.'))
+    group.add_argument(
+        '--rerun-max-file-threshold',
+        action='store',
+        type=int,
+        default=50,
+        help=('Maximum number of files requiring a rerun beyond '
+              'which the rerun will not occur.  This is meant to '
+              'stop a catastrophically failing test suite from forcing '
+              'all tests to be rerun in the single-worker phase.'))
+
     # Remove the reference to our helper function
     del X
 

Added: lldb/trunk/packages/Python/lldbsuite/test/issue_verification/TestRerunFail.py.park
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/issue_verification/TestRerunFail.py.park?rev=255543&view=auto
==============================================================================
--- lldb/trunk/packages/Python/lldbsuite/test/issue_verification/TestRerunFail.py.park (added)
+++ lldb/trunk/packages/Python/lldbsuite/test/issue_verification/TestRerunFail.py.park Mon Dec 14 15:28:46 2015
@@ -0,0 +1,23 @@
+"""Tests that a flakey fail is rerun, and will pass on the rerun.
+Run this test with --rerun-all-issues specified to test that
+the tests fail on the first run, then pass on the second.
+Do not mark them as flakey as, at this time, flakey tests will
+run twice, thus causing the second run to succeed."""
+
+from __future__ import print_function
+
+import rerun_base
+
+import lldbsuite.test.lldbtest as lldbtest
+
+
+class RerunFailTestCase(rerun_base.RerunBaseTestCase):
+    """Forces test failure on first run, success on rerun."""
+    @lldbtest.no_debug_info_test
+    def test_buildbot_catches_failure(self):
+        """Issues a failing test assertion."""
+        if self.should_generate_issue():
+            self.assertTrue(
+                False,
+                "This will fail on the first call, succeed on rerun, and "
+                "alternate thereafter.")

Added: lldb/trunk/packages/Python/lldbsuite/test/issue_verification/TestRerunTimeout.py.park
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/issue_verification/TestRerunTimeout.py.park?rev=255543&view=auto
==============================================================================
--- lldb/trunk/packages/Python/lldbsuite/test/issue_verification/TestRerunTimeout.py.park (added)
+++ lldb/trunk/packages/Python/lldbsuite/test/issue_verification/TestRerunTimeout.py.park Mon Dec 14 15:28:46 2015
@@ -0,0 +1,22 @@
+"""Tests that a timeout is detected by the testbot."""
+from __future__ import print_function
+
+import time
+
+import lldbsuite.test.lldbtest as lldbtest
+import rerun_base
+
+
+class RerunTimeoutTestCase(rerun_base.RerunBaseTestCase):
+    @lldbtest.no_debug_info_test
+    def test_timeout_rerun_succeeds(self):
+        """Tests that timeout logic kicks in and is picked up."""
+        if not self.should_generate_issue():
+            # We pass this time.
+            return
+        # We time out this time.
+        while True:
+            try:
+                time.sleep(1)
+            except:
+                print("ignoring exception during sleep")

Added: lldb/trunk/packages/Python/lldbsuite/test/issue_verification/rerun_base.py
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/issue_verification/rerun_base.py?rev=255543&view=auto
==============================================================================
--- lldb/trunk/packages/Python/lldbsuite/test/issue_verification/rerun_base.py (added)
+++ lldb/trunk/packages/Python/lldbsuite/test/issue_verification/rerun_base.py Mon Dec 14 15:28:46 2015
@@ -0,0 +1,28 @@
+from __future__ import print_function
+
+import os
+
+import lldbsuite.test.lldbtest as lldbtest
+
+
+# pylint: disable=too-few-public-methods
+class RerunBaseTestCase(lldbtest.TestBase):
+    """Forces test failure."""
+    mydir = lldbtest.TestBase.compute_mydir(__file__)
+
+    def should_generate_issue(self):
+        """Returns whether a test issue should be generated.
+
+        @returns True on the first and every other call via a given
+        test method.
+        """
+        should_pass_filename = "{}.{}.succeed-marker".format(
+            __file__, self.id())
+        fail = not os.path.exists(should_pass_filename)
+        if fail:
+            # Create the marker so that next call to this passes.
+            open(should_pass_filename, 'w').close()
+        else:
+            # Delete the marker so next time we fail.
+            os.remove(should_pass_filename)
+        return fail

Modified: lldb/trunk/packages/Python/lldbsuite/test/result_formatter.py
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/result_formatter.py?rev=255543&r1=255542&r2=255543&view=diff
==============================================================================
--- lldb/trunk/packages/Python/lldbsuite/test/result_formatter.py (original)
+++ lldb/trunk/packages/Python/lldbsuite/test/result_formatter.py Mon Dec 14 15:28:46 2015
@@ -163,6 +163,7 @@ class EventBuilder(object):
     TYPE_TEST_RESULT = "test_result"
     TYPE_TEST_START = "test_start"
     TYPE_MARK_TEST_RERUN_ELIGIBLE = "test_eligible_for_rerun"
+    TYPE_SESSION_TERMINATE = "terminate"
 
     RESULT_TYPES = set([
         TYPE_JOB_RESULT,
@@ -687,7 +688,7 @@ class ResultsFormatter(object):
             component_count += 1
         if "test_class" in test_result_event:
             if component_count > 0:
-                key += "."
+                key += ":"
             key += test_result_event["test_class"]
             component_count += 1
         if "test_name" in test_result_event:




More information about the lldb-commits mailing list