[Lldb-commits] [lldb] r255676 - test-infra: refactored new summary results into base ResultsFormatter class
Todd Fiala via lldb-commits
lldb-commits at lists.llvm.org
Tue Dec 15 13:33:38 PST 2015
Author: tfiala
Date: Tue Dec 15 15:33:38 2015
New Revision: 255676
URL: http://llvm.org/viewvc/llvm-project?rev=255676&view=rev
Log:
test-infra: refactored new summary results into base ResultsFormatter class
This allows more specialized formatters to still reuse the results
summarization display from the base class.
Removed:
lldb/trunk/packages/Python/lldbsuite/test/basic_results_formatter.py
Modified:
lldb/trunk/packages/Python/lldbsuite/test/dosep.py
lldb/trunk/packages/Python/lldbsuite/test/dotest.py
lldb/trunk/packages/Python/lldbsuite/test/result_formatter.py
Removed: lldb/trunk/packages/Python/lldbsuite/test/basic_results_formatter.py
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/basic_results_formatter.py?rev=255675&view=auto
==============================================================================
--- lldb/trunk/packages/Python/lldbsuite/test/basic_results_formatter.py (original)
+++ lldb/trunk/packages/Python/lldbsuite/test/basic_results_formatter.py (removed)
@@ -1,355 +0,0 @@
-"""
- The LLVM Compiler Infrastructure
-
-This file is distributed under the University of Illinois Open Source
-License. See LICENSE.TXT for details.
-
-Provides basic test result output. This is intended to be suitable for
-normal LLDB test run output when no other option is specified.
-"""
-from __future__ import print_function
-
-# Python system includes
-import os
-
-# Our imports
-from . import configuration
-from . import result_formatter
-from .result_formatter import EventBuilder
-import lldbsuite
-
-
-class BasicResultsFormatter(result_formatter.ResultsFormatter):
- """Provides basic test result output."""
- @classmethod
- def arg_parser(cls):
- """@return arg parser used to parse formatter-specific options."""
- parser = super(BasicResultsFormatter, cls).arg_parser()
-
- parser.add_argument(
- "--assert-on-unknown-events",
- action="store_true",
- help=('cause unknown test events to generate '
- 'a python assert. Default is to ignore.'))
- parser.add_argument(
- "--dump-results",
- action="store_true",
- help=('dump the raw results data after printing '
- 'the summary output.'))
- return parser
-
- def __init__(self, out_file, options):
- """Initializes the BasicResultsFormatter instance.
- @param out_file file-like object where formatted output is written.
- @param options_dict specifies a dictionary of options for the
- formatter.
- """
- # Initialize the parent
- super(BasicResultsFormatter, self).__init__(out_file, options)
-
- # self.result_event will store the most current result_event
- # by test method
- self.result_events = {}
- self.test_method_rerun_count = 0
-
- def handle_event(self, test_event):
- super(BasicResultsFormatter, self).handle_event(test_event)
- if test_event is None:
- return
-
- event_type = test_event["event"]
- if event_type is None:
- return
-
- if event_type == EventBuilder.TYPE_SESSION_TERMINATE:
- self._finish_output()
- elif event_type == EventBuilder.TYPE_TEST_START:
- self.track_start_time(
- test_event["test_class"],
- test_event["test_name"],
- test_event["event_time"])
- elif event_type == EventBuilder.TYPE_TEST_RESULT:
- # Build the test key.
- test_key = test_event.get("test_filename", None)
- if test_key is None:
- raise Exception(
- "failed to find test filename for test event {}".format(
- test_event))
- test_key += ":{}.{}".format(
- test_event.get("test_class", ""),
- test_event.get("test_name", ""))
-
- # Save the most recent test event for the test key.
- # This allows a second test phase to overwrite the most
- # recent result for the test key (unique per method).
- # We do final reporting at the end, so we'll report based
- # on final results.
- # We do this so that a re-run caused by, perhaps, the need
- # to run a low-load, single-worker test run can have the final
- # run's results to always be used.
- if test_key in self.result_events:
- # We are replacing the result of something that was
- # already counted by the base class. Remove the double
- # counting by reducing by one the count for the test
- # result status.
- old_status = self.result_events[test_key]["status"]
- self.result_status_counts[old_status] -= 1
-
- self.test_method_rerun_count += 1
- self.result_events[test_key] = test_event
- elif event_type == EventBuilder.TYPE_JOB_RESULT:
- # Build the job key.
- test_key = test_event.get("test_filename", None)
- if test_key is None:
- raise Exception(
- "failed to find test filename for job event {}".format(
- test_event))
- self.result_events[test_key] = test_event
- else:
- # This is an unknown event.
- if self.options.assert_on_unknown_events:
- raise Exception("unknown event type {} from {}\n".format(
- event_type, test_event))
-
- @classmethod
- def _event_sort_key(cls, event):
- """Returns the sort key to be used for a test event.
-
- This method papers over the differences in a test method result vs. a
- job (i.e. inferior process) result.
-
- @param event a test result or job result event.
- @return a key useful for sorting events by name (test name preferably,
- then by test filename).
- """
- if "test_name" in event:
- return event["test_name"]
- else:
- return event.get("test_filename", None)
-
- def _partition_results_by_status(self, categories):
- """Partitions the captured test results by event status.
-
- This permits processing test results by the category ids.
-
- @param categories the list of categories on which to partition.
- Follows the format described in _report_category_details().
-
- @return a dictionary where each key is the test result status,
- and each entry is a list containing all the test result events
- that matched that test result status. Result status IDs with
- no matching entries will have a zero-length list.
- """
- partitioned_events = {}
- for category in categories:
- result_status_id = category[0]
- matching_events = [
- [key, event] for (key, event) in self.result_events.items()
- if event.get("status", "") == result_status_id]
- partitioned_events[result_status_id] = sorted(
- matching_events,
- key=lambda x: self._event_sort_key(x[1]))
- return partitioned_events
-
- def _print_banner(self, banner_text):
- """Prints an ASCII banner around given text.
-
- Output goes to the out file for the results formatter.
-
- @param banner_text the text to display, with a banner
- of '=' around the line above and line below.
- """
- banner_separator = "".ljust(len(banner_text), "=")
-
- self.out_file.write("\n{}\n{}\n{}\n".format(
- banner_separator,
- banner_text,
- banner_separator))
-
- def _print_summary_counts(
- self, categories, result_events_by_status, extra_rows):
- """Prints summary counts for all categories.
-
- @param categories the list of categories on which to partition.
- Follows the format described in _report_category_details().
-
- @param result_events_by_status the partitioned list of test
- result events in a dictionary, with the key set to the test
- result status id and the value set to the list of test method
- results that match the status id.
- """
-
- # Get max length for category printed name
- category_with_max_printed_name = max(
- categories, key=lambda x: len(x[1]))
- max_category_name_length = len(category_with_max_printed_name[1])
-
- # If we are provided with extra rows, consider these row name lengths.
- if extra_rows is not None:
- for row in extra_rows:
- name_length = len(row[0])
- if name_length > max_category_name_length:
- max_category_name_length = name_length
-
- self._print_banner("Test Result Summary")
-
- # Prepend extra rows
- if extra_rows is not None:
- for row in extra_rows:
- extra_label = "{}:".format(row[0]).ljust(
- max_category_name_length + 1)
- self.out_file.write("{} {:4}\n".format(extra_label, row[1]))
-
- for category in categories:
- result_status_id = category[0]
- result_label = "{}:".format(category[1]).ljust(
- max_category_name_length + 1)
- count = len(result_events_by_status[result_status_id])
- self.out_file.write("{} {:4}\n".format(
- result_label,
- count))
-
- @classmethod
- def _has_printable_details(cls, categories, result_events_by_status):
- """Returns whether there are any test result details that need to be printed.
-
- This will spin through the results and see if any result in a category
- that is printable has any results to print.
-
- @param categories the list of categories on which to partition.
- Follows the format described in _report_category_details().
-
- @param result_events_by_status the partitioned list of test
- result events in a dictionary, with the key set to the test
- result status id and the value set to the list of test method
- results that match the status id.
-
- @return True if there are any details (i.e. test results
- for failures, errors, unexpected successes); False otherwise.
- """
- for category in categories:
- result_status_id = category[0]
- print_matching_tests = category[2]
- if print_matching_tests:
- if len(result_events_by_status[result_status_id]) > 0:
- # We found a printable details test result status
- # that has details to print.
- return True
- # We didn't find any test result category with printable
- # details.
- return False
-
- def _report_category_details(self, category, result_events_by_status):
- """Reports all test results matching the given category spec.
-
- @param category a category spec of the format [test_event_name,
- printed_category_name, print_matching_entries?]
-
- @param result_events_by_status the partitioned list of test
- result events in a dictionary, with the key set to the test
- result status id and the value set to the list of test method
- results that match the status id.
- """
- result_status_id = category[0]
- print_matching_tests = category[2]
- detail_label = category[3]
-
- if print_matching_tests:
- # Sort by test name
- for (_, event) in result_events_by_status[result_status_id]:
- # Convert full test path into test-root-relative.
- test_relative_path = os.path.relpath(
- os.path.realpath(event["test_filename"]),
- lldbsuite.lldb_test_root)
-
- # Create extra info component (used for exceptional exit info)
- if result_status_id == EventBuilder.STATUS_EXCEPTIONAL_EXIT:
- extra_info = "[EXCEPTIONAL EXIT {} ({})] ".format(
- event["exception_code"],
- event["exception_description"])
- else:
- extra_info = ""
-
- # Figure out the identity we will use for this test.
- if configuration.verbose and ("test_class" in event):
- test_id = "{}.{}".format(
- event["test_class"], event["test_name"])
- elif "test_name" in event:
- test_id = event["test_name"]
- else:
- test_id = "<no_running_test_method>"
-
- # Display the info.
- self.out_file.write("{}: {}{} ({})\n".format(
- detail_label,
- extra_info,
- test_id,
- test_relative_path))
-
- def _finish_output_no_lock(self):
- """Writes the test result report to the output file."""
- extra_results = [
- # Total test methods processed, excluding reruns.
- ["Test Methods", len(self.result_events)],
- ["Reruns", self.test_method_rerun_count]]
-
- # Output each of the test result entries.
- categories = [
- # result id, printed name, print matching tests?, detail label
- [result_formatter.EventBuilder.STATUS_SUCCESS,
- "Success", False, None],
- [result_formatter.EventBuilder.STATUS_EXPECTED_FAILURE,
- "Expected Failure", False, None],
- [result_formatter.EventBuilder.STATUS_FAILURE,
- "Failure", True, "FAIL"],
- [result_formatter.EventBuilder.STATUS_ERROR,
- "Error", True, "ERROR"],
- [result_formatter.EventBuilder.STATUS_EXCEPTIONAL_EXIT,
- "Exceptional Exit", True, "ERROR"],
- [result_formatter.EventBuilder.STATUS_UNEXPECTED_SUCCESS,
- "Unexpected Success", True, "UNEXPECTED SUCCESS"],
- [result_formatter.EventBuilder.STATUS_SKIP, "Skip", False, None],
- [result_formatter.EventBuilder.STATUS_TIMEOUT,
- "Timeout", True, "TIMEOUT"],
- [result_formatter.EventBuilder.STATUS_EXPECTED_TIMEOUT,
- # Intentionally using the unusual hyphenation in TIME-OUT to
- # prevent buildbots from thinking it is an issue when scanning
- # for TIMEOUT.
- "Expected Timeout", True, "EXPECTED TIME-OUT"]
- ]
-
- # Partition all the events by test result status
- result_events_by_status = self._partition_results_by_status(
- categories)
-
- # Print the details
- have_details = self._has_printable_details(
- categories, result_events_by_status)
- if have_details:
- self._print_banner("Issue Details")
- for category in categories:
- self._report_category_details(
- category, result_events_by_status)
-
- # Print the summary
- self._print_summary_counts(
- categories, result_events_by_status, extra_results)
-
- if self.options.dump_results:
- # Debug dump of the key/result info for all categories.
- self._print_banner("Results Dump")
- for status, events_by_key in result_events_by_status.items():
- print("\nSTATUS: {}".format(status))
- for key, event in events_by_key:
- print("key: {}".format(key))
- print("event: {}".format(event))
-
- def _finish_output(self):
- """Prepare and write the results report as all incoming events have
- arrived.
- """
- with self.lock:
- self._finish_output_no_lock()
-
- def replaces_summary(self):
- return True
Modified: lldb/trunk/packages/Python/lldbsuite/test/dosep.py
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/dosep.py?rev=255676&r1=255675&r2=255676&view=diff
==============================================================================
--- lldb/trunk/packages/Python/lldbsuite/test/dosep.py (original)
+++ lldb/trunk/packages/Python/lldbsuite/test/dosep.py Tue Dec 15 15:33:38 2015
@@ -1632,23 +1632,18 @@ def main(num_threads, test_subdir, test_
# Only run the old summary logic if we don't have a results formatter
# that already prints the summary.
- if results_formatter is None or not results_formatter.replaces_summary():
- print_legacy_summary = True
- else:
- print_legacy_summary = False
-
+ print_legacy_summary = results_formatter is None
if not print_legacy_summary:
+ # Print summary results. Summarized results at the end always
+ # get printed to stdout, even if --results-file specifies a different
+ # file for, say, xUnit output.
+ results_formatter.print_results(sys.stdout)
+
# Figure out exit code by count of test result types.
- issue_count = (
- results_formatter.counts_by_test_result_status(
- EventBuilder.STATUS_ERROR) +
- results_formatter.counts_by_test_result_status(
- EventBuilder.STATUS_FAILURE) +
- results_formatter.counts_by_test_result_status(
- EventBuilder.STATUS_TIMEOUT) +
- results_formatter.counts_by_test_result_status(
- EventBuilder.STATUS_EXCEPTIONAL_EXIT)
- )
+ issue_count = 0
+ for issue_status in EventBuilder.TESTRUN_ERROR_STATUS_VALUES:
+ issue_count += results_formatter.counts_by_test_result_status(
+ issue_status)
# Return with appropriate result code
if issue_count > 0:
Modified: lldb/trunk/packages/Python/lldbsuite/test/dotest.py
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/dotest.py?rev=255676&r1=255675&r2=255676&view=diff
==============================================================================
--- lldb/trunk/packages/Python/lldbsuite/test/dotest.py (original)
+++ lldb/trunk/packages/Python/lldbsuite/test/dotest.py Tue Dec 15 15:33:38 2015
@@ -383,7 +383,7 @@ def parseOptionsAndInitTestdirs():
# and we're not a test inferior.
if not args.inferior and configuration.results_formatter_name is None:
configuration.results_formatter_name = (
- "lldbsuite.test.basic_results_formatter.BasicResultsFormatter")
+ "lldbsuite.test.result_formatter.ResultsFormatter")
# rerun-related arguments
configuration.rerun_all_issues = args.rerun_all_issues
Modified: lldb/trunk/packages/Python/lldbsuite/test/result_formatter.py
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/result_formatter.py?rev=255676&r1=255675&r2=255676&view=diff
==============================================================================
--- lldb/trunk/packages/Python/lldbsuite/test/result_formatter.py (original)
+++ lldb/trunk/packages/Python/lldbsuite/test/result_formatter.py Tue Dec 15 15:33:38 2015
@@ -30,6 +30,8 @@ from six.moves import cPickle
# LLDB modules
from . import configuration
+import lldbsuite
+
# Ignore method count on DTOs.
# pylint: disable=too-few-public-methods
@@ -624,6 +626,11 @@ class ResultsFormatter(object):
description='{} options'.format(cls.__name__),
usage=('dotest.py --results-formatter-options='
'"--option1 value1 [--option2 value2 [...]]"'))
+ parser.add_argument(
+ "--dump-results",
+ action="store_true",
+ help=('dump the raw results data after printing '
+ 'the summary output.'))
return parser
def __init__(self, out_file, options):
@@ -655,11 +662,17 @@ class ResultsFormatter(object):
# worker index.
self.started_tests_by_worker = {}
+ # Store the most recent test_method/job status.
+ self.result_events = {}
+
+ # Track the number of test method reruns.
+ self.test_method_rerun_count = 0
+
# Lock that we use while mutating inner state, like the
# total test count and the elements. We minimize how
# long we hold the lock just to keep inner state safe, not
# entirely consistent from the outside.
- self.lock = threading.Lock()
+ self.lock = threading.RLock()
# Keeps track of the test base filenames for tests that
# are expected to timeout. If a timeout occurs in any test
@@ -679,27 +692,37 @@ class ResultsFormatter(object):
self.tests_for_rerun = {}
@classmethod
- def _make_rerun_eligibility_key(cls, test_result_event):
- if test_result_event is None:
+ def _make_key(cls, result_event):
+ """Creates a key from a test or job result event.
+
+ This key attempts to be as unique as possible. For
+ test result events, it will be unique per test method.
+ For job events (ones not promoted to a test result event),
+ it will be unique per test case file.
+
+ @return a string-based key of the form
+ {test_filename}:{test_class}.{test_name}
+ """
+ if result_event is None:
return None
component_count = 0
- if "test_filename" in test_result_event:
- key = test_result_event["test_filename"]
+ if "test_filename" in result_event:
+ key = result_event["test_filename"]
component_count += 1
- if "test_class" in test_result_event:
+ if "test_class" in result_event:
if component_count > 0:
key += ":"
- key += test_result_event["test_class"]
+ key += result_event["test_class"]
component_count += 1
- if "test_name" in test_result_event:
+ if "test_name" in result_event:
if component_count > 0:
key += "."
- key += test_result_event["test_name"]
+ key += result_event["test_name"]
component_count += 1
return key
def _mark_test_for_rerun_eligibility(self, test_result_event):
- key = self._make_rerun_eligibility_key(test_result_event)
+ key = self._make_key(test_result_event)
if key is not None:
self.rerun_eligible_tests.add(key)
else:
@@ -708,13 +731,14 @@ class ResultsFormatter(object):
"failed to create key.\n")
def _maybe_add_test_to_rerun_list(self, result_event):
- key = self._make_rerun_eligibility_key(result_event)
+ key = self._make_key(result_event)
if key is not None:
- if key in self.rerun_eligible_tests or configuration.rerun_all_issues:
+ if (key in self.rerun_eligible_tests or
+ configuration.rerun_all_issues):
test_filename = result_event.get("test_filename", None)
if test_filename is not None:
test_name = result_event.get("test_name", None)
- if not test_filename in self.tests_for_rerun:
+ if test_filename not in self.tests_for_rerun:
self.tests_for_rerun[test_filename] = []
if test_name is not None:
self.tests_for_rerun[test_filename].append(test_name)
@@ -817,12 +841,45 @@ class ResultsFormatter(object):
# whether it can be rerun. If it can be rerun, add it
# to the rerun job.
self._maybe_add_test_to_rerun_list(test_event)
+
+ # Build the test key.
+ test_key = self._make_key(test_event)
+ if test_key is None:
+ raise Exception(
+ "failed to find test filename for "
+ "test event {}".format(test_event))
+
+ # Save the most recent test event for the test key.
+ # This allows a second test phase to overwrite the most
+ # recent result for the test key (unique per method).
+ # We do final reporting at the end, so we'll report based
+ # on final results.
+ # We do this so that a re-run caused by, perhaps, the need
+ # to run a low-load, single-worker test run can have the final
+ # run's results to always be used.
+ if test_key in self.result_events:
+ # We are replacing the result of something that was
+ # already counted by the base class. Remove the double
+ # counting by reducing by one the count for the test
+ # result status.
+ old_status = self.result_events[test_key]["status"]
+ self.result_status_counts[old_status] -= 1
+ self.test_method_rerun_count += 1
+ self.result_events[test_key] = test_event
elif event_type == EventBuilder.TYPE_TEST_START:
- # Keep track of the most recent test start event
- # for the related worker.
+ # Track the start time for the test method.
+ self.track_start_time(
+ test_event["test_class"],
+ test_event["test_name"],
+ test_event["event_time"])
+ # Track of the most recent test method start event
+ # for the related worker. This allows us to figure
+ # out whether a process timeout or exceptional exit
+ # can be charged (i.e. assigned) to a test method.
worker_index = test_event.get("worker_index", None)
if worker_index is not None:
self.started_tests_by_worker[worker_index] = test_event
+
elif event_type == EventBuilder.TYPE_MARK_TEST_RERUN_ELIGIBLE:
self._mark_test_for_rerun_eligibility(test_event)
@@ -902,6 +959,249 @@ class ResultsFormatter(object):
"""
return self.result_status_counts[status]
+ @classmethod
+ def _event_sort_key(cls, event):
+ """Returns the sort key to be used for a test event.
+
+ This method papers over the differences in a test method result vs. a
+ job (i.e. inferior process) result.
+
+ @param event a test result or job result event.
+ @return a key useful for sorting events by name (test name preferably,
+ then by test filename).
+ """
+ if "test_name" in event:
+ return event["test_name"]
+ else:
+ return event.get("test_filename", None)
+
+ def _partition_results_by_status(self, categories):
+ """Partitions the captured test results by event status.
+
+ This permits processing test results by the category ids.
+
+ @param categories the list of categories on which to partition.
+ Follows the format described in _report_category_details().
+
+ @return a dictionary where each key is the test result status,
+ and each entry is a list containing all the test result events
+ that matched that test result status. Result status IDs with
+ no matching entries will have a zero-length list.
+ """
+ partitioned_events = {}
+ for category in categories:
+ result_status_id = category[0]
+ matching_events = [
+ [key, event] for (key, event) in self.result_events.items()
+ if event.get("status", "") == result_status_id]
+ partitioned_events[result_status_id] = sorted(
+ matching_events,
+ key=lambda x: self._event_sort_key(x[1]))
+ return partitioned_events
+
+ def _print_banner(self, out_file, banner_text):
+ """Prints an ASCII banner around given text.
+
+ Output goes to the out file for the results formatter.
+
+ @param out_file a file-like object where output will be written.
+ @param banner_text the text to display, with a banner
+ of '=' around the line above and line below.
+ """
+ banner_separator = "".ljust(len(banner_text), "=")
+
+ out_file.write("\n{}\n{}\n{}\n".format(
+ banner_separator,
+ banner_text,
+ banner_separator))
+
+ def _print_summary_counts(
+ self, out_file, categories, result_events_by_status, extra_rows):
+ """Prints summary counts for all categories.
+
+ @param out_file a file-like object used to print output.
+
+ @param categories the list of categories on which to partition.
+ Follows the format described in _report_category_details().
+
+ @param result_events_by_status the partitioned list of test
+ result events in a dictionary, with the key set to the test
+ result status id and the value set to the list of test method
+ results that match the status id.
+ """
+
+ # Get max length for category printed name
+ category_with_max_printed_name = max(
+ categories, key=lambda x: len(x[1]))
+ max_category_name_length = len(category_with_max_printed_name[1])
+
+ # If we are provided with extra rows, consider these row name lengths.
+ if extra_rows is not None:
+ for row in extra_rows:
+ name_length = len(row[0])
+ if name_length > max_category_name_length:
+ max_category_name_length = name_length
+
+ self._print_banner(out_file, "Test Result Summary")
+
+ # Prepend extra rows
+ if extra_rows is not None:
+ for row in extra_rows:
+ extra_label = "{}:".format(row[0]).ljust(
+ max_category_name_length + 1)
+ out_file.write("{} {:4}\n".format(extra_label, row[1]))
+
+ for category in categories:
+ result_status_id = category[0]
+ result_label = "{}:".format(category[1]).ljust(
+ max_category_name_length + 1)
+ count = len(result_events_by_status[result_status_id])
+ out_file.write("{} {:4}\n".format(
+ result_label,
+ count))
+
+ @classmethod
+ def _has_printable_details(cls, categories, result_events_by_status):
+ """Returns whether there are any test result details that need to be printed.
+
+ This will spin through the results and see if any result in a category
+ that is printable has any results to print.
+
+ @param categories the list of categories on which to partition.
+ Follows the format described in _report_category_details().
+
+ @param result_events_by_status the partitioned list of test
+ result events in a dictionary, with the key set to the test
+ result status id and the value set to the list of test method
+ results that match the status id.
+
+ @return True if there are any details (i.e. test results
+ for failures, errors, unexpected successes); False otherwise.
+ """
+ for category in categories:
+ result_status_id = category[0]
+ print_matching_tests = category[2]
+ if print_matching_tests:
+ if len(result_events_by_status[result_status_id]) > 0:
+ # We found a printable details test result status
+ # that has details to print.
+ return True
+ # We didn't find any test result category with printable
+ # details.
+ return False
+
+ def _report_category_details(self, out_file, category, result_events_by_status):
+ """Reports all test results matching the given category spec.
+
+ @param out_file a file-like object used to print output.
+
+ @param category a category spec of the format [test_event_name,
+ printed_category_name, print_matching_entries?]
+
+ @param result_events_by_status the partitioned list of test
+ result events in a dictionary, with the key set to the test
+ result status id and the value set to the list of test method
+ results that match the status id.
+ """
+ result_status_id = category[0]
+ print_matching_tests = category[2]
+ detail_label = category[3]
+
+ if print_matching_tests:
+ # Sort by test name
+ for (_, event) in result_events_by_status[result_status_id]:
+ # Convert full test path into test-root-relative.
+ test_relative_path = os.path.relpath(
+ os.path.realpath(event["test_filename"]),
+ lldbsuite.lldb_test_root)
+
+ # Create extra info component (used for exceptional exit info)
+ if result_status_id == EventBuilder.STATUS_EXCEPTIONAL_EXIT:
+ extra_info = "[EXCEPTIONAL EXIT {} ({})] ".format(
+ event["exception_code"],
+ event["exception_description"])
+ else:
+ extra_info = ""
+
+ # Figure out the identity we will use for this test.
+ if configuration.verbose and ("test_class" in event):
+ test_id = "{}.{}".format(
+ event["test_class"], event["test_name"])
+ elif "test_name" in event:
+ test_id = event["test_name"]
+ else:
+ test_id = "<no_running_test_method>"
+
+ # Display the info.
+ out_file.write("{}: {}{} ({})\n".format(
+ detail_label,
+ extra_info,
+ test_id,
+ test_relative_path))
+
+ def print_results(self, out_file):
+ """Writes the test result report to the output file.
+
+ @param out_file a file-like object used for printing summary
+ results. This is different than self.out_file, which might
+ be something else for non-summary data.
+ """
+ extra_results = [
+ # Total test methods processed, excluding reruns.
+ ["Test Methods", len(self.result_events)],
+ ["Reruns", self.test_method_rerun_count]]
+
+ # Output each of the test result entries.
+ categories = [
+ # result id, printed name, print matching tests?, detail label
+ [EventBuilder.STATUS_SUCCESS,
+ "Success", False, None],
+ [EventBuilder.STATUS_EXPECTED_FAILURE,
+ "Expected Failure", False, None],
+ [EventBuilder.STATUS_FAILURE,
+ "Failure", True, "FAIL"],
+ [EventBuilder.STATUS_ERROR,
+ "Error", True, "ERROR"],
+ [EventBuilder.STATUS_EXCEPTIONAL_EXIT,
+ "Exceptional Exit", True, "ERROR"],
+ [EventBuilder.STATUS_UNEXPECTED_SUCCESS,
+ "Unexpected Success", True, "UNEXPECTED SUCCESS"],
+ [EventBuilder.STATUS_SKIP, "Skip", False, None],
+ [EventBuilder.STATUS_TIMEOUT,
+ "Timeout", True, "TIMEOUT"],
+ [EventBuilder.STATUS_EXPECTED_TIMEOUT,
+ # Intentionally using the unusual hyphenation in TIME-OUT to
+ # prevent buildbots from thinking it is an issue when scanning
+ # for TIMEOUT.
+ "Expected Timeout", True, "EXPECTED TIME-OUT"]
+ ]
+
+ # Partition all the events by test result status
+ result_events_by_status = self._partition_results_by_status(
+ categories)
+
+ # Print the details
+ have_details = self._has_printable_details(
+ categories, result_events_by_status)
+ if have_details:
+ self._print_banner(out_file, "Issue Details")
+ for category in categories:
+ self._report_category_details(
+ out_file, category, result_events_by_status)
+
+ # Print the summary
+ self._print_summary_counts(
+ out_file, categories, result_events_by_status, extra_results)
+
+ if self.options.dump_results:
+ # Debug dump of the key/result info for all categories.
+ self._print_banner("Results Dump")
+ for status, events_by_key in result_events_by_status.items():
+ out_file.write("\nSTATUS: {}\n".format(status))
+ for key, event in events_by_key:
+ out_file.write("key: {}\n".format(key))
+ out_file.write("event: {}\n".format(event))
+
class RawPickledFormatter(ResultsFormatter):
"""Formats events as a pickled stream.
More information about the lldb-commits
mailing list