<div dir="ltr">This is okay, but I was planning on breaking that out into multiple files. Right now the test_results module also has the EventBuilder in it, which is not a results formatter. But we can break that out separately.</div><div class="gmail_extra"><br><div class="gmail_quote">On Mon, Dec 7, 2015 at 1:23 PM, Zachary Turner via lldb-commits <span dir="ltr"><<a href="mailto:lldb-commits@lists.llvm.org" target="_blank">lldb-commits@lists.llvm.org</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">Author: zturner<br>
Date: Mon Dec 7 15:23:41 2015<br>
New Revision: 254946<br>
<br>
URL: <a href="http://llvm.org/viewvc/llvm-project?rev=254946&view=rev" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project?rev=254946&view=rev</a><br>
Log:<br>
Rename test_results.py to result_formatter.py.<br>
<br>
There is already a class called LLDBTestResults which I would like<br>
to move into a separate file, but the most appropriate filename<br>
was taken.<br>
<br>
Added:<br>
lldb/trunk/packages/Python/lldbsuite/test/result_formatter.py<br>
- copied, changed from r254944, lldb/trunk/packages/Python/lldbsuite/test/test_results.py<br>
Removed:<br>
lldb/trunk/packages/Python/lldbsuite/test/test_results.py<br>
Modified:<br>
lldb/trunk/packages/Python/lldbsuite/test/basic_results_formatter.py<br>
lldb/trunk/packages/Python/lldbsuite/test/curses_results.py<br>
lldb/trunk/packages/Python/lldbsuite/test/dosep.py<br>
lldb/trunk/packages/Python/lldbsuite/test/dotest.py<br>
<br>
Modified: lldb/trunk/packages/Python/lldbsuite/test/basic_results_formatter.py<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/basic_results_formatter.py?rev=254946&r1=254945&r2=254946&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/basic_results_formatter.py?rev=254946&r1=254945&r2=254946&view=diff</a><br>
==============================================================================<br>
--- lldb/trunk/packages/Python/lldbsuite/test/basic_results_formatter.py (original)<br>
+++ lldb/trunk/packages/Python/lldbsuite/test/basic_results_formatter.py Mon Dec 7 15:23:41 2015<br>
@@ -13,10 +13,11 @@ from __future__ import print_function<br>
import os<br>
<br>
# Our imports<br>
-from . import test_results<br>
+from . import result_formatter<br>
import lldbsuite<br>
<br>
-class BasicResultsFormatter(test_results.ResultsFormatter):<br>
+<br>
+class BasicResultsFormatter(result_formatter.ResultsFormatter):<br>
"""Provides basic test result output."""<br>
@classmethod<br>
def arg_parser(cls):<br>
@@ -240,16 +241,16 @@ class BasicResultsFormatter(test_results<br>
# Output each of the test result entries.<br>
categories = [<br>
# result id, printed name, print matching tests?, detail label<br>
- [test_results.EventBuilder.STATUS_SUCCESS,<br>
+ [result_formatter.EventBuilder.STATUS_SUCCESS,<br>
"Success", False, None],<br>
- [test_results.EventBuilder.STATUS_EXPECTED_FAILURE,<br>
+ [result_formatter.EventBuilder.STATUS_EXPECTED_FAILURE,<br>
"Expected Failure", False, None],<br>
- [test_results.EventBuilder.STATUS_FAILURE,<br>
+ [result_formatter.EventBuilder.STATUS_FAILURE,<br>
"Failure", True, "FAIL"],<br>
- [test_results.EventBuilder.STATUS_ERROR, "Error", True, "ERROR"],<br>
- [test_results.EventBuilder.STATUS_UNEXPECTED_SUCCESS,<br>
+ [result_formatter.EventBuilder.STATUS_ERROR, "Error", True, "ERROR"],<br>
+ [result_formatter.EventBuilder.STATUS_UNEXPECTED_SUCCESS,<br>
"Unexpected Success", True, "UNEXPECTED SUCCESS"],<br>
- [test_results.EventBuilder.STATUS_SKIP, "Skip", False, None]]<br>
+ [result_formatter.EventBuilder.STATUS_SKIP, "Skip", False, None]]<br>
<br>
# Partition all the events by test result status<br>
result_events_by_status = self._partition_results_by_status(<br>
<br>
Modified: lldb/trunk/packages/Python/lldbsuite/test/curses_results.py<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/curses_results.py?rev=254946&r1=254945&r2=254946&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/curses_results.py?rev=254946&r1=254945&r2=254946&view=diff</a><br>
==============================================================================<br>
--- lldb/trunk/packages/Python/lldbsuite/test/curses_results.py (original)<br>
+++ lldb/trunk/packages/Python/lldbsuite/test/curses_results.py Mon Dec 7 15:23:41 2015<br>
@@ -23,11 +23,11 @@ import time<br>
<br>
# LLDB modules<br>
from . import lldbcurses<br>
-from . import test_results<br>
-from .test_results import EventBuilder<br>
+from . import result_formatter<br>
+from .result_formatter import EventBuilder<br>
<br>
<br>
-class Curses(test_results.ResultsFormatter):<br>
+class Curses(result_formatter.ResultsFormatter):<br>
"""Receives live results from tests that are running and reports them to the terminal in a curses GUI"""<br>
<br>
def __init__(self, out_file, options):<br>
<br>
Modified: lldb/trunk/packages/Python/lldbsuite/test/dosep.py<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/dosep.py?rev=254946&r1=254945&r2=254946&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/dosep.py?rev=254946&r1=254945&r2=254946&view=diff</a><br>
==============================================================================<br>
--- lldb/trunk/packages/Python/lldbsuite/test/dosep.py (original)<br>
+++ lldb/trunk/packages/Python/lldbsuite/test/dosep.py Mon Dec 7 15:23:41 2015<br>
@@ -53,7 +53,7 @@ import lldbsuite.support.seven as seven<br>
<br>
from . import dotest_channels<br>
from . import dotest_args<br>
-from . import test_results<br>
+from . import result_formatter<br>
<br>
# Todo: Convert this folder layout to be relative-import friendly and don't hack up<br>
# sys.path like this<br>
@@ -1429,9 +1429,9 @@ def main(print_details_on_success, num_t<br>
# Figure out exit code by count of test result types.<br>
issue_count = (<br>
results_formatter.counts_by_test_result_status(<br>
- test_results.EventBuilder.STATUS_ERROR) +<br>
+ result_formatter.EventBuilder.STATUS_ERROR) +<br>
results_formatter.counts_by_test_result_status(<br>
- test_results.EventBuilder.STATUS_FAILURE) +<br>
+ result_formatter.EventBuilder.STATUS_FAILURE) +<br>
timeout_count)<br>
# Return with appropriate result code<br>
if issue_count > 0:<br>
<br>
Modified: lldb/trunk/packages/Python/lldbsuite/test/dotest.py<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/dotest.py?rev=254946&r1=254945&r2=254946&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/dotest.py?rev=254946&r1=254945&r2=254946&view=diff</a><br>
==============================================================================<br>
--- lldb/trunk/packages/Python/lldbsuite/test/dotest.py (original)<br>
+++ lldb/trunk/packages/Python/lldbsuite/test/dotest.py Mon Dec 7 15:23:41 2015<br>
@@ -43,8 +43,8 @@ import lldbsuite<br>
from . import dotest_args<br>
from . import lldbtest_config<br>
from . import test_categories<br>
-from . import test_results<br>
-from .test_results import EventBuilder<br>
+from . import result_formatter<br>
+from .result_formatter import EventBuilder<br>
from ..support import seven<br>
<br>
def is_exe(fpath):<br>
@@ -795,7 +795,7 @@ def parseOptionsAndInitTestdirs():<br>
# Tell the event builder to create all events with these<br>
# key/val pairs in them.<br>
if len(entries) > 0:<br>
- test_results.EventBuilder.add_entries_to_all_events(entries)<br>
+ result_formatter.EventBuilder.add_entries_to_all_events(entries)<br>
<br>
# Gather all the dirs passed on the command line.<br>
if len(args.args) > 0:<br>
@@ -930,13 +930,13 @@ def setupTestResults():<br>
else:<br>
results_file_object = open(results_filename, "w")<br>
cleanup_func = results_file_object.close<br>
- default_formatter_name = "lldbsuite.test.test_results.XunitFormatter"<br>
+ default_formatter_name = "lldbsuite.test.result_formatter.XunitFormatter"<br>
elif results_port:<br>
# Connect to the specified localhost port.<br>
results_file_object, cleanup_func = createSocketToLocalPort(<br>
results_port)<br>
default_formatter_name = (<br>
- "lldbsuite.test.test_results.RawPickledFormatter")<br>
+ "lldbsuite.test.result_formatter.RawPickledFormatter")<br>
<br>
# If we have a results formatter name specified and we didn't specify<br>
# a results file, we should use stdout.<br>
<br>
Copied: lldb/trunk/packages/Python/lldbsuite/test/result_formatter.py (from r254944, lldb/trunk/packages/Python/lldbsuite/test/test_results.py)<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/result_formatter.py?p2=lldb/trunk/packages/Python/lldbsuite/test/result_formatter.py&p1=lldb/trunk/packages/Python/lldbsuite/test/test_results.py&r1=254944&r2=254946&rev=254946&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/result_formatter.py?p2=lldb/trunk/packages/Python/lldbsuite/test/result_formatter.py&p1=lldb/trunk/packages/Python/lldbsuite/test/test_results.py&r1=254944&r2=254946&rev=254946&view=diff</a><br>
==============================================================================<br>
(empty)<br>
<br>
Removed: lldb/trunk/packages/Python/lldbsuite/test/test_results.py<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/test_results.py?rev=254945&view=auto" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/lldb/trunk/packages/Python/lldbsuite/test/test_results.py?rev=254945&view=auto</a><br>
==============================================================================<br>
--- lldb/trunk/packages/Python/lldbsuite/test/test_results.py (original)<br>
+++ lldb/trunk/packages/Python/lldbsuite/test/test_results.py (removed)<br>
@@ -1,1042 +0,0 @@<br>
-"""<br>
- The LLVM Compiler Infrastructure<br>
-<br>
-This file is distributed under the University of Illinois Open Source<br>
-License. See LICENSE.TXT for details.<br>
-<br>
-Provides classes used by the test results reporting infrastructure<br>
-within the LLDB test suite.<br>
-"""<br>
-<br>
-from __future__ import print_function<br>
-from __future__ import absolute_import<br>
-<br>
-# System modules<br>
-import argparse<br>
-import inspect<br>
-import os<br>
-import pprint<br>
-import re<br>
-import sys<br>
-import threading<br>
-import time<br>
-import traceback<br>
-import xml.sax.saxutils<br>
-<br>
-# Third-party modules<br>
-import six<br>
-from six.moves import cPickle<br>
-<br>
-# LLDB modules<br>
-<br>
-<br>
-class EventBuilder(object):<br>
- """Helper class to build test result event dictionaries."""<br>
-<br>
- BASE_DICTIONARY = None<br>
-<br>
- # Test Status Tags<br>
- STATUS_SUCCESS = "success"<br>
- STATUS_FAILURE = "failure"<br>
- STATUS_EXPECTED_FAILURE = "expected_failure"<br>
- STATUS_UNEXPECTED_SUCCESS = "unexpected_success"<br>
- STATUS_SKIP = "skip"<br>
- STATUS_ERROR = "error"<br>
-<br>
- @staticmethod<br>
- def _get_test_name_info(test):<br>
- """Returns (test-class-name, test-method-name) from a test case instance.<br>
-<br>
- @param test a unittest.TestCase instance.<br>
-<br>
- @return tuple containing (test class name, test method name)<br>
- """<br>
- test_class_components = <a href="http://test.id" rel="noreferrer" target="_blank">test.id</a>().split(".")<br>
- test_class_name = ".".join(test_class_components[:-1])<br>
- test_name = test_class_components[-1]<br>
- return (test_class_name, test_name)<br>
-<br>
- @staticmethod<br>
- def bare_event(event_type):<br>
- """Creates an event with default additions, event type and timestamp.<br>
-<br>
- @param event_type the value set for the "event" key, used<br>
- to distinguish events.<br>
-<br>
- @returns an event dictionary with all default additions, the "event"<br>
- key set to the passed in event_type, and the event_time value set to<br>
- time.time().<br>
- """<br>
- if EventBuilder.BASE_DICTIONARY is not None:<br>
- # Start with a copy of the "always include" entries.<br>
- event = dict(EventBuilder.BASE_DICTIONARY)<br>
- else:<br>
- event = {}<br>
-<br>
- event.update({<br>
- "event": event_type,<br>
- "event_time": time.time()<br>
- })<br>
- return event<br>
-<br>
- @staticmethod<br>
- def _event_dictionary_common(test, event_type):<br>
- """Returns an event dictionary setup with values for the given event type.<br>
-<br>
- @param test the unittest.TestCase instance<br>
-<br>
- @param event_type the name of the event type (string).<br>
-<br>
- @return event dictionary with common event fields set.<br>
- """<br>
- test_class_name, test_name = EventBuilder._get_test_name_info(test)<br>
-<br>
- event = EventBuilder.bare_event(event_type)<br>
- event.update({<br>
- "test_class": test_class_name,<br>
- "test_name": test_name,<br>
- "test_filename": inspect.getfile(test.__class__)<br>
- })<br>
- return event<br>
-<br>
- @staticmethod<br>
- def _error_tuple_class(error_tuple):<br>
- """Returns the unittest error tuple's error class as a string.<br>
-<br>
- @param error_tuple the error tuple provided by the test framework.<br>
-<br>
- @return the error type (typically an exception) raised by the<br>
- test framework.<br>
- """<br>
- type_var = error_tuple[0]<br>
- module = inspect.getmodule(type_var)<br>
- if module:<br>
- return "{}.{}".format(module.__name__, type_var.__name__)<br>
- else:<br>
- return type_var.__name__<br>
-<br>
- @staticmethod<br>
- def _error_tuple_message(error_tuple):<br>
- """Returns the unittest error tuple's error message.<br>
-<br>
- @param error_tuple the error tuple provided by the test framework.<br>
-<br>
- @return the error message provided by the test framework.<br>
- """<br>
- return str(error_tuple[1])<br>
-<br>
- @staticmethod<br>
- def _error_tuple_traceback(error_tuple):<br>
- """Returns the unittest error tuple's error message.<br>
-<br>
- @param error_tuple the error tuple provided by the test framework.<br>
-<br>
- @return the error message provided by the test framework.<br>
- """<br>
- return error_tuple[2]<br>
-<br>
- @staticmethod<br>
- def _event_dictionary_test_result(test, status):<br>
- """Returns an event dictionary with common test result fields set.<br>
-<br>
- @param test a unittest.TestCase instance.<br>
-<br>
- @param status the status/result of the test<br>
- (e.g. "success", "failure", etc.)<br>
-<br>
- @return the event dictionary<br>
- """<br>
- event = EventBuilder._event_dictionary_common(test, "test_result")<br>
- event["status"] = status<br>
- return event<br>
-<br>
- @staticmethod<br>
- def _event_dictionary_issue(test, status, error_tuple):<br>
- """Returns an event dictionary with common issue-containing test result<br>
- fields set.<br>
-<br>
- @param test a unittest.TestCase instance.<br>
-<br>
- @param status the status/result of the test<br>
- (e.g. "success", "failure", etc.)<br>
-<br>
- @param error_tuple the error tuple as reported by the test runner.<br>
- This is of the form (type<error>, error).<br>
-<br>
- @return the event dictionary<br>
- """<br>
- event = EventBuilder._event_dictionary_test_result(test, status)<br>
- event["issue_class"] = EventBuilder._error_tuple_class(error_tuple)<br>
- event["issue_message"] = EventBuilder._error_tuple_message(error_tuple)<br>
- backtrace = EventBuilder._error_tuple_traceback(error_tuple)<br>
- if backtrace is not None:<br>
- event["issue_backtrace"] = traceback.format_tb(backtrace)<br>
- return event<br>
-<br>
- @staticmethod<br>
- def event_for_start(test):<br>
- """Returns an event dictionary for the test start event.<br>
-<br>
- @param test a unittest.TestCase instance.<br>
-<br>
- @return the event dictionary<br>
- """<br>
- return EventBuilder._event_dictionary_common(test, "test_start")<br>
-<br>
- @staticmethod<br>
- def event_for_success(test):<br>
- """Returns an event dictionary for a successful test.<br>
-<br>
- @param test a unittest.TestCase instance.<br>
-<br>
- @return the event dictionary<br>
- """<br>
- return EventBuilder._event_dictionary_test_result(<br>
- test, EventBuilder.STATUS_SUCCESS)<br>
-<br>
- @staticmethod<br>
- def event_for_unexpected_success(test, bugnumber):<br>
- """Returns an event dictionary for a test that succeeded but was<br>
- expected to fail.<br>
-<br>
- @param test a unittest.TestCase instance.<br>
-<br>
- @param bugnumber the issue identifier for the bug tracking the<br>
- fix request for the test expected to fail (but is in fact<br>
- passing here).<br>
-<br>
- @return the event dictionary<br>
-<br>
- """<br>
- event = EventBuilder._event_dictionary_test_result(<br>
- test, EventBuilder.STATUS_UNEXPECTED_SUCCESS)<br>
- if bugnumber:<br>
- event["bugnumber"] = str(bugnumber)<br>
- return event<br>
-<br>
- @staticmethod<br>
- def event_for_failure(test, error_tuple):<br>
- """Returns an event dictionary for a test that failed.<br>
-<br>
- @param test a unittest.TestCase instance.<br>
-<br>
- @param error_tuple the error tuple as reported by the test runner.<br>
- This is of the form (type<error>, error).<br>
-<br>
- @return the event dictionary<br>
- """<br>
- return EventBuilder._event_dictionary_issue(<br>
- test, EventBuilder.STATUS_FAILURE, error_tuple)<br>
-<br>
- @staticmethod<br>
- def event_for_expected_failure(test, error_tuple, bugnumber):<br>
- """Returns an event dictionary for a test that failed as expected.<br>
-<br>
- @param test a unittest.TestCase instance.<br>
-<br>
- @param error_tuple the error tuple as reported by the test runner.<br>
- This is of the form (type<error>, error).<br>
-<br>
- @param bugnumber the issue identifier for the bug tracking the<br>
- fix request for the test expected to fail.<br>
-<br>
- @return the event dictionary<br>
-<br>
- """<br>
- event = EventBuilder._event_dictionary_issue(<br>
- test, EventBuilder.STATUS_EXPECTED_FAILURE, error_tuple)<br>
- if bugnumber:<br>
- event["bugnumber"] = str(bugnumber)<br>
- return event<br>
-<br>
- @staticmethod<br>
- def event_for_skip(test, reason):<br>
- """Returns an event dictionary for a test that was skipped.<br>
-<br>
- @param test a unittest.TestCase instance.<br>
-<br>
- @param reason the reason why the test is being skipped.<br>
-<br>
- @return the event dictionary<br>
- """<br>
- event = EventBuilder._event_dictionary_test_result(<br>
- test, EventBuilder.STATUS_SKIP)<br>
- event["skip_reason"] = reason<br>
- return event<br>
-<br>
- @staticmethod<br>
- def event_for_error(test, error_tuple):<br>
- """Returns an event dictionary for a test that hit a test execution error.<br>
-<br>
- @param test a unittest.TestCase instance.<br>
-<br>
- @param error_tuple the error tuple as reported by the test runner.<br>
- This is of the form (type<error>, error).<br>
-<br>
- @return the event dictionary<br>
- """<br>
- return EventBuilder._event_dictionary_issue(<br>
- test, EventBuilder.STATUS_ERROR, error_tuple)<br>
-<br>
- @staticmethod<br>
- def event_for_cleanup_error(test, error_tuple):<br>
- """Returns an event dictionary for a test that hit a test execution error<br>
- during the test cleanup phase.<br>
-<br>
- @param test a unittest.TestCase instance.<br>
-<br>
- @param error_tuple the error tuple as reported by the test runner.<br>
- This is of the form (type<error>, error).<br>
-<br>
- @return the event dictionary<br>
- """<br>
- event = EventBuilder._event_dictionary_issue(<br>
- test, EventBuilder.STATUS_ERROR, error_tuple)<br>
- event["issue_phase"] = "cleanup"<br>
- return event<br>
-<br>
- @staticmethod<br>
- def add_entries_to_all_events(entries_dict):<br>
- """Specifies a dictionary of entries to add to all test events.<br>
-<br>
- This provides a mechanism for, say, a parallel test runner to<br>
- indicate to each inferior dotest.py that it should add a<br>
- worker index to each.<br>
-<br>
- Calling this method replaces all previous entries added<br>
- by a prior call to this.<br>
-<br>
- Event build methods will overwrite any entries that collide.<br>
- Thus, the passed in dictionary is the base, which gets merged<br>
- over by event building when keys collide.<br>
-<br>
- @param entries_dict a dictionary containing key and value<br>
- pairs that should be merged into all events created by the<br>
- event generator. May be None to clear out any extra entries.<br>
- """<br>
- EventBuilder.BASE_DICTIONARY = dict(entries_dict)<br>
-<br>
-<br>
-class ResultsFormatter(object):<br>
-<br>
- """Provides interface to formatting test results out to a file-like object.<br>
-<br>
- This class allows the LLDB test framework's raw test-realted<br>
- events to be processed and formatted in any manner desired.<br>
- Test events are represented by python dictionaries, formatted<br>
- as in the EventBuilder class above.<br>
-<br>
- ResultFormatter instances are given a file-like object in which<br>
- to write their results.<br>
-<br>
- ResultFormatter lifetime looks like the following:<br>
-<br>
- # The result formatter is created.<br>
- # The argparse options dictionary is generated from calling<br>
- # the SomeResultFormatter.arg_parser() with the options data<br>
- # passed to dotest.py via the "--results-formatter-options"<br>
- # argument. See the help on that for syntactic requirements<br>
- # on getting that parsed correctly.<br>
- formatter = SomeResultFormatter(file_like_object, argpared_options_dict)<br>
-<br>
- # Single call to session start, before parsing any events.<br>
- formatter.begin_session()<br>
-<br>
- formatter.handle_event({"event":"initialize",...})<br>
-<br>
- # Zero or more calls specified for events recorded during the test session.<br>
- # The parallel test runner manages getting results from all the inferior<br>
- # dotest processes, so from a new format perspective, don't worry about<br>
- # that. The formatter will be presented with a single stream of events<br>
- # sandwiched between a single begin_session()/end_session() pair in the<br>
- # parallel test runner process/thread.<br>
- for event in zero_or_more_test_events():<br>
- formatter.handle_event(event)<br>
-<br>
- # Single call to terminate/wrap-up. Formatters that need all the<br>
- # data before they can print a correct result (e.g. xUnit/JUnit),<br>
- # this is where the final report can be generated.<br>
- formatter.handle_event({"event":"terminate",...})<br>
-<br>
- It is not the formatter's responsibility to close the file_like_object.<br>
- (i.e. do not close it).<br>
-<br>
- The lldb test framework passes these test events in real time, so they<br>
- arrive as they come in.<br>
-<br>
- In the case of the parallel test runner, the dotest inferiors<br>
- add a 'pid' field to the dictionary that indicates which inferior<br>
- pid generated the event.<br>
-<br>
- Note more events may be added in the future to support richer test<br>
- reporting functionality. One example: creating a true flaky test<br>
- result category so that unexpected successes really mean the test<br>
- is marked incorrectly (either should be marked flaky, or is indeed<br>
- passing consistently now and should have the xfail marker<br>
- removed). In this case, a flaky_success and flaky_fail event<br>
- likely will be added to capture these and support reporting things<br>
- like percentages of flaky test passing so we can see if we're<br>
- making some things worse/better with regards to failure rates.<br>
-<br>
- Another example: announcing all the test methods that are planned<br>
- to be run, so we can better support redo operations of various kinds<br>
- (redo all non-run tests, redo non-run tests except the one that<br>
- was running [perhaps crashed], etc.)<br>
-<br>
- Implementers are expected to override all the public methods<br>
- provided in this class. See each method's docstring to see<br>
- expectations about when the call should be chained.<br>
-<br>
- """<br>
- @classmethod<br>
- def arg_parser(cls):<br>
- """@return arg parser used to parse formatter-specific options."""<br>
- parser = argparse.ArgumentParser(<br>
- description='{} options'.format(cls.__name__),<br>
- usage=('dotest.py --results-formatter-options='<br>
- '"--option1 value1 [--option2 value2 [...]]"'))<br>
- return parser<br>
-<br>
- def __init__(self, out_file, options):<br>
- super(ResultsFormatter, self).__init__()<br>
- self.out_file = out_file<br>
- self.options = options<br>
- self.using_terminal = False<br>
- if not self.out_file:<br>
- raise Exception("ResultsFormatter created with no file object")<br>
- self.start_time_by_test = {}<br>
- self.terminate_called = False<br>
-<br>
- # Store counts of test_result events by status.<br>
- self.result_status_counts = {<br>
- EventBuilder.STATUS_SUCCESS: 0,<br>
- EventBuilder.STATUS_EXPECTED_FAILURE: 0,<br>
- EventBuilder.STATUS_SKIP: 0,<br>
- EventBuilder.STATUS_UNEXPECTED_SUCCESS: 0,<br>
- EventBuilder.STATUS_FAILURE: 0,<br>
- EventBuilder.STATUS_ERROR: 0<br>
- }<br>
-<br>
- # Lock that we use while mutating inner state, like the<br>
- # total test count and the elements. We minimize how<br>
- # long we hold the lock just to keep inner state safe, not<br>
- # entirely consistent from the outside.<br>
- self.lock = threading.Lock()<br>
-<br>
- def handle_event(self, test_event):<br>
- """Handles the test event for collection into the formatter output.<br>
-<br>
- Derived classes may override this but should call down to this<br>
- implementation first.<br>
-<br>
- @param test_event the test event as formatted by one of the<br>
- event_for_* calls.<br>
- """<br>
- # Keep track of whether terminate was received. We do this so<br>
- # that a process can call the 'terminate' event on its own, to<br>
- # close down a formatter at the appropriate time. Then the<br>
- # atexit() cleanup can call the "terminate if it hasn't been<br>
- # called yet".<br>
- if test_event is not None:<br>
- event_type = test_event.get("event", "")<br>
- if event_type == "terminate":<br>
- self.terminate_called = True<br>
- elif event_type == "test_result":<br>
- # Keep track of event counts per test result status type<br>
- status = test_event["status"]<br>
- self.result_status_counts[status] += 1<br>
-<br>
- def track_start_time(self, test_class, test_name, start_time):<br>
- """tracks the start time of a test so elapsed time can be computed.<br>
-<br>
- this alleviates the need for test results to be processed serially<br>
- by test. it will save the start time for the test so that<br>
- elapsed_time_for_test() can compute the elapsed time properly.<br>
- """<br>
- if test_class is None or test_name is None:<br>
- return<br>
-<br>
- test_key = "{}.{}".format(test_class, test_name)<br>
- with self.lock:<br>
- self.start_time_by_test[test_key] = start_time<br>
-<br>
- def elapsed_time_for_test(self, test_class, test_name, end_time):<br>
- """returns the elapsed time for a test.<br>
-<br>
- this function can only be called once per test and requires that<br>
- the track_start_time() method be called sometime prior to calling<br>
- this method.<br>
- """<br>
- if test_class is None or test_name is None:<br>
- return -2.0<br>
-<br>
- test_key = "{}.{}".format(test_class, test_name)<br>
- with self.lock:<br>
- if test_key not in self.start_time_by_test:<br>
- return -1.0<br>
- else:<br>
- start_time = self.start_time_by_test[test_key]<br>
- del self.start_time_by_test[test_key]<br>
- return end_time - start_time<br>
-<br>
- def is_using_terminal(self):<br>
- """returns true if this results formatter is using the terminal and<br>
- output should be avoided."""<br>
- return self.using_terminal<br>
-<br>
- def send_terminate_as_needed(self):<br>
- """sends the terminate event if it hasn't been received yet."""<br>
- if not self.terminate_called:<br>
- terminate_event = EventBuilder.bare_event("terminate")<br>
- self.handle_event(terminate_event)<br>
-<br>
- # Derived classes may require self access<br>
- # pylint: disable=no-self-use<br>
- def replaces_summary(self):<br>
- """Returns whether the results formatter includes a summary<br>
- suitable to replace the old lldb test run results.<br>
-<br>
- @return True if the lldb test runner can skip its summary<br>
- generation when using this results formatter; False otherwise.<br>
- """<br>
- return False<br>
-<br>
- def counts_by_test_result_status(self, status):<br>
- """Returns number of test method results for the given status.<br>
-<br>
- @status_result a test result status (e.g. success, fail, skip)<br>
- as defined by the EventBuilder.STATUS_* class members.<br>
-<br>
- @return an integer returning the number of test methods matching<br>
- the given test result status.<br>
- """<br>
- return self.result_status_counts[status]<br>
-<br>
-<br>
-class XunitFormatter(ResultsFormatter):<br>
- """Provides xUnit-style formatted output.<br>
- """<br>
-<br>
- # Result mapping arguments<br>
- RM_IGNORE = 'ignore'<br>
- RM_SUCCESS = 'success'<br>
- RM_FAILURE = 'failure'<br>
- RM_PASSTHRU = 'passthru'<br>
-<br>
- @staticmethod<br>
- def _build_illegal_xml_regex():<br>
- """Contructs a regex to match all illegal xml characters.<br>
-<br>
- Expects to be used against a unicode string."""<br>
- # Construct the range pairs of invalid unicode chareacters.<br>
- illegal_chars_u = [<br>
- (0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x1F), (0x7F, 0x84),<br>
- (0x86, 0x9F), (0xFDD0, 0xFDDF), (0xFFFE, 0xFFFF)]<br>
-<br>
- # For wide builds, we have more.<br>
- if sys.maxunicode >= 0x10000:<br>
- illegal_chars_u.extend(<br>
- [(0x1FFFE, 0x1FFFF), (0x2FFFE, 0x2FFFF), (0x3FFFE, 0x3FFFF),<br>
- (0x4FFFE, 0x4FFFF), (0x5FFFE, 0x5FFFF), (0x6FFFE, 0x6FFFF),<br>
- (0x7FFFE, 0x7FFFF), (0x8FFFE, 0x8FFFF), (0x9FFFE, 0x9FFFF),<br>
- (0xAFFFE, 0xAFFFF), (0xBFFFE, 0xBFFFF), (0xCFFFE, 0xCFFFF),<br>
- (0xDFFFE, 0xDFFFF), (0xEFFFE, 0xEFFFF), (0xFFFFE, 0xFFFFF),<br>
- (0x10FFFE, 0x10FFFF)])<br>
-<br>
- # Build up an array of range expressions.<br>
- illegal_ranges = [<br>
- "%s-%s" % (six.unichr(low), six.unichr(high))<br>
- for (low, high) in illegal_chars_u]<br>
-<br>
- # Compile the regex<br>
- return re.compile(six.u('[%s]') % six.u('').join(illegal_ranges))<br>
-<br>
- @staticmethod<br>
- def _quote_attribute(text):<br>
- """Returns the given text in a manner safe for usage in an XML attribute.<br>
-<br>
- @param text the text that should appear within an XML attribute.<br>
- @return the attribute-escaped version of the input text.<br>
- """<br>
- return xml.sax.saxutils.quoteattr(text)<br>
-<br>
- def _replace_invalid_xml(self, str_or_unicode):<br>
- """Replaces invalid XML characters with a '?'.<br>
-<br>
- @param str_or_unicode a string to replace invalid XML<br>
- characters within. Can be unicode or not. If not unicode,<br>
- assumes it is a byte string in utf-8 encoding.<br>
-<br>
- @returns a utf-8-encoded byte string with invalid<br>
- XML replaced with '?'.<br>
- """<br>
- # Get the content into unicode<br>
- if isinstance(str_or_unicode, str):<br>
- unicode_content = str_or_unicode.decode('utf-8')<br>
- else:<br>
- unicode_content = str_or_unicode<br>
- return self.invalid_xml_re.sub(<br>
- six.u('?'), unicode_content).encode('utf-8')<br>
-<br>
- @classmethod<br>
- def arg_parser(cls):<br>
- """@return arg parser used to parse formatter-specific options."""<br>
- parser = super(XunitFormatter, cls).arg_parser()<br>
-<br>
- # These are valid choices for results mapping.<br>
- results_mapping_choices = [<br>
- XunitFormatter.RM_IGNORE,<br>
- XunitFormatter.RM_SUCCESS,<br>
- XunitFormatter.RM_FAILURE,<br>
- XunitFormatter.RM_PASSTHRU]<br>
- parser.add_argument(<br>
- "--assert-on-unknown-events",<br>
- action="store_true",<br>
- help=('cause unknown test events to generate '<br>
- 'a python assert. Default is to ignore.'))<br>
- parser.add_argument(<br>
- "--ignore-skip-name",<br>
- "-n",<br>
- metavar='PATTERN',<br>
- action="append",<br>
- dest='ignore_skip_name_patterns',<br>
- help=('a python regex pattern, where '<br>
- 'any skipped test with a test method name where regex '<br>
- 'matches (via search) will be ignored for xUnit test '<br>
- 'result purposes. Can be specified multiple times.'))<br>
- parser.add_argument(<br>
- "--ignore-skip-reason",<br>
- "-r",<br>
- metavar='PATTERN',<br>
- action="append",<br>
- dest='ignore_skip_reason_patterns',<br>
- help=('a python regex pattern, where '<br>
- 'any skipped test with a skip reason where the regex '<br>
- 'matches (via search) will be ignored for xUnit test '<br>
- 'result purposes. Can be specified multiple times.'))<br>
- parser.add_argument(<br>
- "--xpass", action="store", choices=results_mapping_choices,<br>
- default=XunitFormatter.RM_FAILURE,<br>
- help=('specify mapping from unexpected success to jUnit/xUnit '<br>
- 'result type'))<br>
- parser.add_argument(<br>
- "--xfail", action="store", choices=results_mapping_choices,<br>
- default=XunitFormatter.RM_IGNORE,<br>
- help=('specify mapping from expected failure to jUnit/xUnit '<br>
- 'result type'))<br>
- return parser<br>
-<br>
- @staticmethod<br>
- def _build_regex_list_from_patterns(patterns):<br>
- """Builds a list of compiled regexes from option value.<br>
-<br>
- @param option string containing a comma-separated list of regex<br>
- patterns. Zero-length or None will produce an empty regex list.<br>
-<br>
- @return list of compiled regular expressions, empty if no<br>
- patterns provided.<br>
- """<br>
- regex_list = []<br>
- if patterns is not None:<br>
- for pattern in patterns:<br>
- regex_list.append(re.compile(pattern))<br>
- return regex_list<br>
-<br>
- def __init__(self, out_file, options):<br>
- """Initializes the XunitFormatter instance.<br>
- @param out_file file-like object where formatted output is written.<br>
- @param options_dict specifies a dictionary of options for the<br>
- formatter.<br>
- """<br>
- # Initialize the parent<br>
- super(XunitFormatter, self).__init__(out_file, options)<br>
- self.text_encoding = "UTF-8"<br>
- self.invalid_xml_re = XunitFormatter._build_illegal_xml_regex()<br>
- self.total_test_count = 0<br>
- self.ignore_skip_name_regexes = (<br>
- XunitFormatter._build_regex_list_from_patterns(<br>
- options.ignore_skip_name_patterns))<br>
- self.ignore_skip_reason_regexes = (<br>
- XunitFormatter._build_regex_list_from_patterns(<br>
- options.ignore_skip_reason_patterns))<br>
-<br>
- self.elements = {<br>
- "successes": [],<br>
- "errors": [],<br>
- "failures": [],<br>
- "skips": [],<br>
- "unexpected_successes": [],<br>
- "expected_failures": [],<br>
- "all": []<br>
- }<br>
-<br>
- self.status_handlers = {<br>
- EventBuilder.STATUS_SUCCESS: self._handle_success,<br>
- EventBuilder.STATUS_FAILURE: self._handle_failure,<br>
- EventBuilder.STATUS_ERROR: self._handle_error,<br>
- EventBuilder.STATUS_SKIP: self._handle_skip,<br>
- EventBuilder.STATUS_EXPECTED_FAILURE:<br>
- self._handle_expected_failure,<br>
- EventBuilder.STATUS_UNEXPECTED_SUCCESS:<br>
- self._handle_unexpected_success<br>
- }<br>
-<br>
- def handle_event(self, test_event):<br>
- super(XunitFormatter, self).handle_event(test_event)<br>
-<br>
- event_type = test_event["event"]<br>
- if event_type is None:<br>
- return<br>
-<br>
- if event_type == "terminate":<br>
- self._finish_output()<br>
- elif event_type == "test_start":<br>
- self.track_start_time(<br>
- test_event["test_class"],<br>
- test_event["test_name"],<br>
- test_event["event_time"])<br>
- elif event_type == "test_result":<br>
- self._process_test_result(test_event)<br>
- else:<br>
- # This is an unknown event.<br>
- if self.options.assert_on_unknown_events:<br>
- raise Exception("unknown event type {} from {}\n".format(<br>
- event_type, test_event))<br>
-<br>
- def _handle_success(self, test_event):<br>
- """Handles a test success.<br>
- @param test_event the test event to handle.<br>
- """<br>
- result = self._common_add_testcase_entry(test_event)<br>
- with self.lock:<br>
- self.elements["successes"].append(result)<br>
-<br>
- def _handle_failure(self, test_event):<br>
- """Handles a test failure.<br>
- @param test_event the test event to handle.<br>
- """<br>
- message = self._replace_invalid_xml(test_event["issue_message"])<br>
- backtrace = self._replace_invalid_xml(<br>
- "".join(test_event.get("issue_backtrace", [])))<br>
-<br>
- result = self._common_add_testcase_entry(<br>
- test_event,<br>
- inner_content=(<br>
- '<failure type={} message={}><![CDATA[{}]]></failure>'.format(<br>
- XunitFormatter._quote_attribute(test_event["issue_class"]),<br>
- XunitFormatter._quote_attribute(message),<br>
- backtrace)<br>
- ))<br>
- with self.lock:<br>
- self.elements["failures"].append(result)<br>
-<br>
- def _handle_error(self, test_event):<br>
- """Handles a test error.<br>
- @param test_event the test event to handle.<br>
- """<br>
- message = self._replace_invalid_xml(test_event["issue_message"])<br>
- backtrace = self._replace_invalid_xml(<br>
- "".join(test_event.get("issue_backtrace", [])))<br>
-<br>
- result = self._common_add_testcase_entry(<br>
- test_event,<br>
- inner_content=(<br>
- '<error type={} message={}><![CDATA[{}]]></error>'.format(<br>
- XunitFormatter._quote_attribute(test_event["issue_class"]),<br>
- XunitFormatter._quote_attribute(message),<br>
- backtrace)<br>
- ))<br>
- with self.lock:<br>
- self.elements["errors"].append(result)<br>
-<br>
- @staticmethod<br>
- def _ignore_based_on_regex_list(test_event, test_key, regex_list):<br>
- """Returns whether to ignore a test event based on patterns.<br>
-<br>
- @param test_event the test event dictionary to check.<br>
- @param test_key the key within the dictionary to check.<br>
- @param regex_list a list of zero or more regexes. May contain<br>
- zero or more compiled regexes.<br>
-<br>
- @return True if any o the regex list match based on the<br>
- re.search() method; false otherwise.<br>
- """<br>
- for regex in regex_list:<br>
- match = regex.search(test_event.get(test_key, ''))<br>
- if match:<br>
- return True<br>
- return False<br>
-<br>
- def _handle_skip(self, test_event):<br>
- """Handles a skipped test.<br>
- @param test_event the test event to handle.<br>
- """<br>
-<br>
- # Are we ignoring this test based on test name?<br>
- if XunitFormatter._ignore_based_on_regex_list(<br>
- test_event, 'test_name', self.ignore_skip_name_regexes):<br>
- return<br>
-<br>
- # Are we ignoring this test based on skip reason?<br>
- if XunitFormatter._ignore_based_on_regex_list(<br>
- test_event, 'skip_reason', self.ignore_skip_reason_regexes):<br>
- return<br>
-<br>
- # We're not ignoring this test. Process the skip.<br>
- reason = self._replace_invalid_xml(test_event.get("skip_reason", ""))<br>
- result = self._common_add_testcase_entry(<br>
- test_event,<br>
- inner_content='<skipped message={} />'.format(<br>
- XunitFormatter._quote_attribute(reason)))<br>
- with self.lock:<br>
- self.elements["skips"].append(result)<br>
-<br>
- def _handle_expected_failure(self, test_event):<br>
- """Handles a test that failed as expected.<br>
- @param test_event the test event to handle.<br>
- """<br>
- if self.options.xfail == XunitFormatter.RM_PASSTHRU:<br>
- # This is not a natively-supported junit/xunit<br>
- # testcase mode, so it might fail a validating<br>
- # test results viewer.<br>
- if "bugnumber" in test_event:<br>
- bug_id_attribute = 'bug-id={} '.format(<br>
- XunitFormatter._quote_attribute(test_event["bugnumber"]))<br>
- else:<br>
- bug_id_attribute = ''<br>
-<br>
- result = self._common_add_testcase_entry(<br>
- test_event,<br>
- inner_content=(<br>
- '<expected-failure {}type={} message={} />'.format(<br>
- bug_id_attribute,<br>
- XunitFormatter._quote_attribute(<br>
- test_event["issue_class"]),<br>
- XunitFormatter._quote_attribute(<br>
- test_event["issue_message"]))<br>
- ))<br>
- with self.lock:<br>
- self.elements["expected_failures"].append(result)<br>
- elif self.options.xfail == XunitFormatter.RM_SUCCESS:<br>
- result = self._common_add_testcase_entry(test_event)<br>
- with self.lock:<br>
- self.elements["successes"].append(result)<br>
- elif self.options.xfail == XunitFormatter.RM_FAILURE:<br>
- result = self._common_add_testcase_entry(<br>
- test_event,<br>
- inner_content='<failure type={} message={} />'.format(<br>
- XunitFormatter._quote_attribute(test_event["issue_class"]),<br>
- XunitFormatter._quote_attribute(<br>
- test_event["issue_message"])))<br>
- with self.lock:<br>
- self.elements["failures"].append(result)<br>
- elif self.options.xfail == XunitFormatter.RM_IGNORE:<br>
- pass<br>
- else:<br>
- raise Exception(<br>
- "unknown xfail option: {}".format(self.options.xfail))<br>
-<br>
- def _handle_unexpected_success(self, test_event):<br>
- """Handles a test that passed but was expected to fail.<br>
- @param test_event the test event to handle.<br>
- """<br>
- if self.options.xpass == XunitFormatter.RM_PASSTHRU:<br>
- # This is not a natively-supported junit/xunit<br>
- # testcase mode, so it might fail a validating<br>
- # test results viewer.<br>
- result = self._common_add_testcase_entry(<br>
- test_event,<br>
- inner_content=("<unexpected-success />"))<br>
- with self.lock:<br>
- self.elements["unexpected_successes"].append(result)<br>
- elif self.options.xpass == XunitFormatter.RM_SUCCESS:<br>
- # Treat the xpass as a success.<br>
- result = self._common_add_testcase_entry(test_event)<br>
- with self.lock:<br>
- self.elements["successes"].append(result)<br>
- elif self.options.xpass == XunitFormatter.RM_FAILURE:<br>
- # Treat the xpass as a failure.<br>
- if "bugnumber" in test_event:<br>
- message = "unexpected success (bug_id:{})".format(<br>
- test_event["bugnumber"])<br>
- else:<br>
- message = "unexpected success (bug_id:none)"<br>
- result = self._common_add_testcase_entry(<br>
- test_event,<br>
- inner_content='<failure type={} message={} />'.format(<br>
- XunitFormatter._quote_attribute("unexpected_success"),<br>
- XunitFormatter._quote_attribute(message)))<br>
- with self.lock:<br>
- self.elements["failures"].append(result)<br>
- elif self.options.xpass == XunitFormatter.RM_IGNORE:<br>
- # Ignore the xpass result as far as xUnit reporting goes.<br>
- pass<br>
- else:<br>
- raise Exception("unknown xpass option: {}".format(<br>
- self.options.xpass))<br>
-<br>
- def _process_test_result(self, test_event):<br>
- """Processes the test_event known to be a test result.<br>
-<br>
- This categorizes the event appropriately and stores the data needed<br>
- to generate the final xUnit report. This method skips events that<br>
- cannot be represented in xUnit output.<br>
- """<br>
- if "status" not in test_event:<br>
- raise Exception("test event dictionary missing 'status' key")<br>
-<br>
- status = test_event["status"]<br>
- if status not in self.status_handlers:<br>
- raise Exception("test event status '{}' unsupported".format(<br>
- status))<br>
-<br>
- # Call the status handler for the test result.<br>
- self.status_handlers[status](test_event)<br>
-<br>
- def _common_add_testcase_entry(self, test_event, inner_content=None):<br>
- """Registers a testcase result, and returns the text created.<br>
-<br>
- The caller is expected to manage failure/skip/success counts<br>
- in some kind of appropriate way. This call simply constructs<br>
- the XML and appends the returned result to the self.all_results<br>
- list.<br>
-<br>
- @param test_event the test event dictionary.<br>
-<br>
- @param inner_content if specified, gets included in the <testcase><br>
- inner section, at the point before stdout and stderr would be<br>
- included. This is where a <failure/>, <skipped/>, <error/>, etc.<br>
- could go.<br>
-<br>
- @return the text of the xml testcase element.<br>
- """<br>
-<br>
- # Get elapsed time.<br>
- test_class = test_event["test_class"]<br>
- test_name = test_event["test_name"]<br>
- event_time = test_event["event_time"]<br>
- time_taken = self.elapsed_time_for_test(<br>
- test_class, test_name, event_time)<br>
-<br>
- # Plumb in stdout/stderr once we shift over to only test results.<br>
- test_stdout = ''<br>
- test_stderr = ''<br>
-<br>
- # Formulate the output xml.<br>
- if not inner_content:<br>
- inner_content = ""<br>
- result = (<br>
- '<testcase classname="{}" name="{}" time="{:.3f}">'<br>
- '{}{}{}</testcase>'.format(<br>
- test_class,<br>
- test_name,<br>
- time_taken,<br>
- inner_content,<br>
- test_stdout,<br>
- test_stderr))<br>
-<br>
- # Save the result, update total test count.<br>
- with self.lock:<br>
- self.total_test_count += 1<br>
- self.elements["all"].append(result)<br>
-<br>
- return result<br>
-<br>
- def _finish_output_no_lock(self):<br>
- """Flushes out the report of test executions to form valid xml output.<br>
-<br>
- xUnit output is in XML. The reporting system cannot complete the<br>
- formatting of the output without knowing when there is no more input.<br>
- This call addresses notifcation of the completed test run and thus is<br>
- when we can finish off the report output.<br>
- """<br>
-<br>
- # Figure out the counts line for the testsuite. If we have<br>
- # been counting either unexpected successes or expected<br>
- # failures, we'll output those in the counts, at the risk of<br>
- # being invalidated by a validating test results viewer.<br>
- # These aren't counted by default so they won't show up unless<br>
- # the user specified a formatter option to include them.<br>
- xfail_count = len(self.elements["expected_failures"])<br>
- xpass_count = len(self.elements["unexpected_successes"])<br>
- if xfail_count > 0 or xpass_count > 0:<br>
- extra_testsuite_attributes = (<br>
- ' expected-failures="{}"'<br>
- ' unexpected-successes="{}"'.format(xfail_count, xpass_count))<br>
- else:<br>
- extra_testsuite_attributes = ""<br>
-<br>
- # Output the header.<br>
- self.out_file.write(<br>
- '<?xml version="1.0" encoding="{}"?>\n'<br>
- '<testsuites>'<br>
- '<testsuite name="{}" tests="{}" errors="{}" failures="{}" '<br>
- 'skip="{}"{}>\n'.format(<br>
- self.text_encoding,<br>
- "LLDB test suite",<br>
- self.total_test_count,<br>
- len(self.elements["errors"]),<br>
- len(self.elements["failures"]),<br>
- len(self.elements["skips"]),<br>
- extra_testsuite_attributes))<br>
-<br>
- # Output each of the test result entries.<br>
- for result in self.elements["all"]:<br>
- self.out_file.write(result + '\n')<br>
-<br>
- # Close off the test suite.<br>
- self.out_file.write('</testsuite></testsuites>\n')<br>
-<br>
- def _finish_output(self):<br>
- """Finish writing output as all incoming events have arrived."""<br>
- with self.lock:<br>
- self._finish_output_no_lock()<br>
-<br>
-<br>
-class RawPickledFormatter(ResultsFormatter):<br>
- """Formats events as a pickled stream.<br>
-<br>
- The parallel test runner has inferiors pickle their results and send them<br>
- over a socket back to the parallel test. The parallel test runner then<br>
- aggregates them into the final results formatter (e.g. xUnit).<br>
- """<br>
-<br>
- @classmethod<br>
- def arg_parser(cls):<br>
- """@return arg parser used to parse formatter-specific options."""<br>
- parser = super(RawPickledFormatter, cls).arg_parser()<br>
- return parser<br>
-<br>
- def __init__(self, out_file, options):<br>
- super(RawPickledFormatter, self).__init__(out_file, options)<br>
- self.pid = os.getpid()<br>
-<br>
- def handle_event(self, test_event):<br>
- super(RawPickledFormatter, self).handle_event(test_event)<br>
-<br>
- # Convert initialize/terminate events into job_begin/job_end events.<br>
- event_type = test_event["event"]<br>
- if event_type is None:<br>
- return<br>
-<br>
- if event_type == "initialize":<br>
- test_event["event"] = "job_begin"<br>
- elif event_type == "terminate":<br>
- test_event["event"] = "job_end"<br>
-<br>
- # Tack on the pid.<br>
- test_event["pid"] = self.pid<br>
-<br>
- # Send it as {serialized_length_of_serialized_bytes}{serialized_bytes}<br>
- import struct<br>
- msg = cPickle.dumps(test_event)<br>
- packet = struct.pack("!I%ds" % len(msg), len(msg), msg)<br>
- self.out_file.send(packet)<br>
-<br>
-<br>
-class DumpFormatter(ResultsFormatter):<br>
- """Formats events to the file as their raw python dictionary format."""<br>
-<br>
- def handle_event(self, test_event):<br>
- super(DumpFormatter, self).handle_event(test_event)<br>
- self.out_file.write("\n" + pprint.pformat(test_event) + "\n")<br>
<br>
<br>
_______________________________________________<br>
lldb-commits mailing list<br>
<a href="mailto:lldb-commits@lists.llvm.org">lldb-commits@lists.llvm.org</a><br>
<a href="http://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits" rel="noreferrer" target="_blank">http://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits</a><br>
</blockquote></div><br><br clear="all"><div><br></div>-- <br><div class="gmail_signature"><div dir="ltr">-Todd</div></div>
</div>