[Lldb-commits] [lldb] 9f18f3c - [lldb] Improve test failure reporting for expect()

David Spickett via lldb-commits lldb-commits at lists.llvm.org
Thu Sep 3 05:35:15 PDT 2020


Author: David Spickett
Date: 2020-09-03T13:35:05+01:00
New Revision: 9f18f3c858d5ff8154701a50c2be6f7b19667a1d

URL: https://github.com/llvm/llvm-project/commit/9f18f3c858d5ff8154701a50c2be6f7b19667a1d
DIFF: https://github.com/llvm/llvm-project/commit/9f18f3c858d5ff8154701a50c2be6f7b19667a1d.diff

LOG: [lldb] Improve test failure reporting for expect()

This updates the errors reported by expect()
to something like:

```
Ran command:
"help"

Got output:
Debugger commands:
<...>

Expecting start string: "Debugger commands:" (was found)
Expecting end string: "foo" (was not found)
```
(see added tests for more examples)

This shows the user exactly what was run,
what checks passed and which failed. Along with
whether that check was supposed to pass.
(including what regex patterns matched)

These lines are also output to the test
trace file, whether the test passes or not.

Note that expect() will still fail at the first failed
check, in line with previous behaviour.

Also I have flipped the wording of the assert
message functions (.*_MSG) to describe failures
not successes. This makes more sense as they are
only shown on assert failures.

Reviewed By: labath

Differential Revision: https://reviews.llvm.org/D86792

Added: 
    lldb/test/API/assert_messages_test/TestAssertMessages.py

Modified: 
    lldb/packages/Python/lldbsuite/test/lldbtest.py

Removed: 
    


################################################################################
diff  --git a/lldb/packages/Python/lldbsuite/test/lldbtest.py b/lldb/packages/Python/lldbsuite/test/lldbtest.py
index dacd5ed734b5..e1966692b03c 100644
--- a/lldb/packages/Python/lldbsuite/test/lldbtest.py
+++ b/lldb/packages/Python/lldbsuite/test/lldbtest.py
@@ -179,12 +179,12 @@
 
 
 def CMD_MSG(str):
-    '''A generic "Command '%s' returns successfully" message generator.'''
-    return "Command '%s' returns successfully" % str
+    '''A generic "Command '%s' did not return successfully" message generator.'''
+    return "Command '%s' did not return successfully" % str
 
 
 def COMPLETION_MSG(str_before, str_after, completions):
-    '''A generic message generator for the completion mechanism.'''
+    '''A generic assertion failed message generator for the completion mechanism.'''
     return ("'%s' successfully completes to '%s', but completions were:\n%s"
            % (str_before, str_after, "\n".join(completions)))
 
@@ -198,8 +198,8 @@ def EXP_MSG(str, actual, exe):
 
 
 def SETTING_MSG(setting):
-    '''A generic "Value of setting '%s' is correct" message generator.'''
-    return "Value of setting '%s' is correct" % setting
+    '''A generic "Value of setting '%s' is not correct" message generator.'''
+    return "Value of setting '%s' is not correct" % setting
 
 
 def line_number(filename, string_to_match):
@@ -2433,58 +2433,76 @@ def expect(
             with recording(self, trace) as sbuf:
                 print("looking at:", output, file=sbuf)
 
-        # The heading says either "Expecting" or "Not expecting".
-        heading = "Expecting" if matching else "Not expecting"
+        expecting_str = "Expecting" if matching else "Not expecting"
+        def found_str(matched):
+            return "was found" if matched else "was not found"
+
+        # To be used as assert fail message and/or trace content
+        log_lines = [
+                "{}:".format("Ran command" if exe else "Checking string"),
+                "\"{}\"".format(str),
+                # Space out command and output
+                "",
+        ]
+        if exe:
+            # Newline before output to make large strings more readable
+            log_lines.append("Got output:\n{}".format(output))
 
-        # Start from the startstr, if specified.
-        # If there's no startstr, set the initial state appropriately.
-        matched = output.startswith(startstr) if startstr else (
-            True if matching else False)
+        # Assume that we start matched if we want a match
+        # Meaning if you have no conditions, matching or
+        # not matching will always pass
+        matched = matching
 
+        # We will stop checking on first failure
         if startstr:
-            with recording(self, trace) as sbuf:
-                print("%s start string: %s" % (heading, startstr), file=sbuf)
-                print("Matched" if matched else "Not matched", file=sbuf)
+            matched = output.startswith(startstr)
+            log_lines.append("{} start string: \"{}\" ({})".format(
+                    expecting_str, startstr, found_str(matched)))
 
-        # Look for endstr, if specified.
-        keepgoing = matched if matching else not matched
-        if endstr:
+        if endstr and matched == matching:
             matched = output.endswith(endstr)
-            with recording(self, trace) as sbuf:
-                print("%s end string: %s" % (heading, endstr), file=sbuf)
-                print("Matched" if matched else "Not matched", file=sbuf)
+            log_lines.append("{} end string: \"{}\" ({})".format(
+                    expecting_str, endstr, found_str(matched)))
 
-        # Look for sub strings, if specified.
-        keepgoing = matched if matching else not matched
-        if substrs and keepgoing:
+        if substrs and matched == matching:
             start = 0
             for substr in substrs:
                 index = output[start:].find(substr)
                 start = start + index if ordered and matching else 0
                 matched = index != -1
-                with recording(self, trace) as sbuf:
-                    print("%s sub string: %s" % (heading, substr), file=sbuf)
-                    print("Matched" if matched else "Not matched", file=sbuf)
-                keepgoing = matched if matching else not matched
-                if not keepgoing:
+                log_lines.append("{} sub string: \"{}\" ({})".format(
+                        expecting_str, substr, found_str(matched)))
+
+                if matched != matching:
                     break
 
-        # Search for regular expression patterns, if specified.
-        keepgoing = matched if matching else not matched
-        if patterns and keepgoing:
+        if patterns and matched == matching:
             for pattern in patterns:
-                # Match Objects always have a boolean value of True.
-                matched = bool(re.search(pattern, output))
-                with recording(self, trace) as sbuf:
-                    print("%s pattern: %s" % (heading, pattern), file=sbuf)
-                    print("Matched" if matched else "Not matched", file=sbuf)
-                keepgoing = matched if matching else not matched
-                if not keepgoing:
+                matched = re.search(pattern, output)
+
+                pattern_line = "{} regex pattern: \"{}\" ({}".format(
+                        expecting_str, pattern, found_str(matched))
+                if matched:
+                    pattern_line += ", matched \"{}\"".format(
+                            matched.group(0))
+                pattern_line += ")"
+                log_lines.append(pattern_line)
+
+                # Convert to bool because match objects
+                # are True-ish but != True itself
+                matched = bool(matched)
+                if matched != matching:
                     break
 
-        self.assertTrue(matched if matching else not matched,
-                        msg + "\nCommand output:\n" + EXP_MSG(str, output, exe)
-                        if msg else EXP_MSG(str, output, exe))
+        # If a check failed, add any extra assert message
+        if msg is not None and matched != matching:
+            log_lines.append(msg)
+
+        log_msg = "\n".join(log_lines)
+        with recording(self, trace) as sbuf:
+            print(log_msg, file=sbuf)
+        if matched != matching:
+            self.fail(log_msg)
 
     def expect_expr(
             self,

diff  --git a/lldb/test/API/assert_messages_test/TestAssertMessages.py b/lldb/test/API/assert_messages_test/TestAssertMessages.py
new file mode 100644
index 000000000000..6619a65ad69e
--- /dev/null
+++ b/lldb/test/API/assert_messages_test/TestAssertMessages.py
@@ -0,0 +1,115 @@
+"""
+Test the format of API test suite assert failure messages
+"""
+
+
+import lldb
+import lldbsuite.test.lldbutil as lldbutil
+from lldbsuite.test.lldbtest import *
+from textwrap import dedent
+
+
+class AssertMessagesTestCase(TestBase):
+
+    mydir = TestBase.compute_mydir(__file__)
+    NO_DEBUG_INFO_TESTCASE = True
+
+    def assert_expect_fails_with(self, cmd, expect_args, expected_msg):
+        try:
+            # This expect should fail
+            self.expect(cmd, **expect_args)
+        except AssertionError as e:
+            # Then check message from previous expect
+            self.expect(str(e), exe=False, substrs=[dedent(expected_msg)])
+        else:
+            self.fail("Initial expect should have raised AssertionError!")
+
+    def test_expect(self):
+        """Test format of messages produced by expect(...)"""
+
+        # When an expect passes the messages are sent to the trace
+        # file which we can't access here. So really, these only
+        # check what failures look like, but it *should* be the same
+        # content for the trace log too.
+
+        # Will stop at startstr fail
+        self.assert_expect_fails_with("settings list prompt",
+            dict(startstr="dog", endstr="cat"),
+            """\
+               Ran command:
+               "settings list prompt"
+
+               Got output:
+                 prompt -- The debugger command line prompt displayed for the user.
+
+               Expecting start string: "dog" (was not found)""")
+
+        # startstr passes, endstr fails
+        # We see both reported
+        self.assert_expect_fails_with("settings list prompt",
+            dict(startstr="  prompt -- ", endstr="foo"),
+            """\
+               Ran command:
+               "settings list prompt"
+
+               Got output:
+                 prompt -- The debugger command line prompt displayed for the user.
+
+               Expecting start string: "  prompt -- " (was found)
+               Expecting end string: "foo" (was not found)""")
+
+        # Same thing for substrs, regex patterns ignored because of substr failure
+        # Any substr after the first missing is also ignored
+        self.assert_expect_fails_with("abcdefg",
+            dict(substrs=["abc", "ijk", "xyz"],
+            patterns=["foo", "bar"], exe=False),
+            """\
+               Checking string:
+               "abcdefg"
+
+               Expecting sub string: "abc" (was found)
+               Expecting sub string: "ijk" (was not found)""")
+
+        # Regex patterns also stop at first failure, subsequent patterns ignored
+        # They are last in the chain so no other check gets skipped
+        # Including the rest of the conditions here to prove they are run and shown
+        self.assert_expect_fails_with("0123456789",
+            dict(startstr="012", endstr="789", substrs=["345", "678"],
+            patterns=["[0-9]+", "[a-f]+", "a|b|c"], exe=False),
+            """\
+               Checking string:
+               "0123456789"
+
+               Expecting start string: "012" (was found)
+               Expecting end string: "789" (was found)
+               Expecting sub string: "345" (was found)
+               Expecting sub string: "678" (was found)
+               Expecting regex pattern: "[0-9]+" (was found, matched "0123456789")
+               Expecting regex pattern: "[a-f]+" (was not found)""")
+
+        # This time we dont' want matches but we do get them
+        self.assert_expect_fails_with("the quick brown fox",
+            # Note that the second pattern *will* match
+            dict(patterns=["[0-9]+", "fox"], exe=False, matching=False,
+            startstr="cat", endstr="rabbit", substrs=["abc", "def"]),
+            """\
+               Checking string:
+               "the quick brown fox"
+
+               Not expecting start string: "cat" (was not found)
+               Not expecting end string: "rabbit" (was not found)
+               Not expecting sub string: "abc" (was not found)
+               Not expecting sub string: "def" (was not found)
+               Not expecting regex pattern: "[0-9]+" (was not found)
+               Not expecting regex pattern: "fox" (was found, matched "fox")""")
+
+        # Extra assert messages are only printed when we get a failure
+        # So I can't test that from here, just how it looks when it's printed
+        self.assert_expect_fails_with("mouse",
+            dict(startstr="cat", exe=False, msg="Reason for check goes here!"),
+            """\
+               Checking string:
+               "mouse"
+
+               Expecting start string: "cat" (was not found)
+               Reason for check goes here!""")


        


More information about the lldb-commits mailing list