[llvm] 948f3de - Reland "[lit] Use sharding for GoogleTest format"

Yuanfang Chen via llvm-commits llvm-commits at lists.llvm.org
Sun Apr 3 22:37:18 PDT 2022


Author: Yuanfang Chen
Date: 2022-04-03T22:35:45-07:00
New Revision: 948f3deca91a66caf4a618f826ff6de8277bed9c

URL: https://github.com/llvm/llvm-project/commit/948f3deca91a66caf4a618f826ff6de8277bed9c
DIFF: https://github.com/llvm/llvm-project/commit/948f3deca91a66caf4a618f826ff6de8277bed9c.diff

LOG: Reland "[lit] Use sharding for GoogleTest format"

This relands commit a87ba5c86d5d72defdbcdb278baad6515ec99463.

Adjust llvm/utils/lit/tests/googletest-timeout.py for new test output.

Added: 
    llvm/utils/lit/tests/Inputs/googletest-crash/DummySubDir/OneTest.py
    llvm/utils/lit/tests/Inputs/googletest-crash/lit.cfg
    llvm/utils/lit/tests/googletest-crash.py

Modified: 
    llvm/unittests/Support/CrashRecoveryTest.cpp
    llvm/unittests/Support/ProgramTest.cpp
    llvm/utils/lit/lit/Test.py
    llvm/utils/lit/lit/TestingConfig.py
    llvm/utils/lit/lit/formats/googletest.py
    llvm/utils/lit/lit/main.py
    llvm/utils/lit/tests/Inputs/googletest-format/DummySubDir/OneTest.py
    llvm/utils/lit/tests/Inputs/googletest-timeout/DummySubDir/OneTest.py
    llvm/utils/lit/tests/Inputs/googletest-timeout/lit.cfg
    llvm/utils/lit/tests/googletest-format.py
    llvm/utils/lit/tests/googletest-timeout.py

Removed: 
    llvm/utils/lit/tests/Inputs/googletest-upstream-format/DummySubDir/OneTest.py
    llvm/utils/lit/tests/Inputs/googletest-upstream-format/lit.cfg
    llvm/utils/lit/tests/googletest-upstream-format.py


################################################################################
diff  --git a/llvm/unittests/Support/CrashRecoveryTest.cpp b/llvm/unittests/Support/CrashRecoveryTest.cpp
index e95513eb28416..c7350452034a3 100644
--- a/llvm/unittests/Support/CrashRecoveryTest.cpp
+++ b/llvm/unittests/Support/CrashRecoveryTest.cpp
@@ -178,6 +178,11 @@ TEST(CrashRecoveryTest, UnixCRCReturnCode) {
   int Res = setenv("LLVM_CRC_UNIXCRCRETURNCODE", "1", 0);
   ASSERT_EQ(Res, 0);
 
+  Res = unsetenv("GTEST_SHARD_INDEX");
+  ASSERT_EQ(Res, 0);
+  Res = unsetenv("GTEST_TOTAL_SHARDS");
+  ASSERT_EQ(Res, 0);
+
   std::string Error;
   bool ExecutionFailed;
   int RetCode = ExecuteAndWait(Executable, argv, {}, {}, 0, 0, &Error,

diff  --git a/llvm/unittests/Support/ProgramTest.cpp b/llvm/unittests/Support/ProgramTest.cpp
index fdd0478f70515..f93a77ab67057 100644
--- a/llvm/unittests/Support/ProgramTest.cpp
+++ b/llvm/unittests/Support/ProgramTest.cpp
@@ -95,7 +95,9 @@ class ProgramEnvTest : public testing::Test {
     };
 
     while (*EnvP != nullptr) {
-      EnvTable.emplace_back(prepareEnvVar(*EnvP));
+      auto S = prepareEnvVar(*EnvP);
+      if (!StringRef(S).startswith("GTEST_"))
+        EnvTable.emplace_back(S);
       ++EnvP;
     }
   }

diff  --git a/llvm/utils/lit/lit/Test.py b/llvm/utils/lit/lit/Test.py
index 77b9c235e40d4..dc1c66e896c54 100644
--- a/llvm/utils/lit/lit/Test.py
+++ b/llvm/utils/lit/lit/Test.py
@@ -219,11 +219,12 @@ def getExecPath(self, components):
 class Test:
     """Test - Information on a single test instance."""
 
-    def __init__(self, suite, path_in_suite, config, file_path = None):
+    def __init__(self, suite, path_in_suite, config, file_path = None, gtest_json_file = None):
         self.suite = suite
         self.path_in_suite = path_in_suite
         self.config = config
         self.file_path = file_path
+        self.gtest_json_file = gtest_json_file
 
         # A list of conditions under which this test is expected to fail.
         # Each condition is a boolean expression of features and target
@@ -258,7 +259,7 @@ def __init__(self, suite, path_in_suite, config, file_path = None):
         # The previous test elapsed time, if applicable.
         self.previous_elapsed = 0.0
 
-        if '/'.join(path_in_suite) in suite.test_times:
+        if suite.test_times and '/'.join(path_in_suite) in suite.test_times:
             time = suite.test_times['/'.join(path_in_suite)]
             self.previous_elapsed = abs(time)
             self.previous_failure = time < 0

diff  --git a/llvm/utils/lit/lit/TestingConfig.py b/llvm/utils/lit/lit/TestingConfig.py
index e80369377857b..a9660b39c9cdb 100644
--- a/llvm/utils/lit/lit/TestingConfig.py
+++ b/llvm/utils/lit/lit/TestingConfig.py
@@ -28,7 +28,7 @@ def fromdefaults(litConfig):
                      'TMPDIR', 'TMP', 'TEMP', 'TEMPDIR', 'AVRLIT_BOARD',
                      'AVRLIT_PORT', 'FILECHECK_OPTS', 'VCINSTALLDIR',
                      'VCToolsinstallDir', 'VSINSTALLDIR', 'WindowsSdkDir',
-                     'WindowsSDKLibVersion', 'SOURCE_DATE_EPOCH']
+                     'WindowsSDKLibVersion', 'SOURCE_DATE_EPOCH','GTEST_FILTER']
 
         if sys.platform == 'win32':
             pass_vars.append('COMSPEC')

diff  --git a/llvm/utils/lit/lit/formats/googletest.py b/llvm/utils/lit/lit/formats/googletest.py
index 5329f5773e54c..4ce14b78fe10d 100644
--- a/llvm/utils/lit/lit/formats/googletest.py
+++ b/llvm/utils/lit/lit/formats/googletest.py
@@ -1,8 +1,8 @@
 from __future__ import absolute_import
+import json
+import math
 import os
-import re
 import shlex
-import subprocess
 import sys
 
 import lit.Test
@@ -25,74 +25,19 @@ def __init__(self, test_sub_dirs, test_suffix, run_under = []):
         self.test_suffixes = {exe_suffix, test_suffix + '.py'}
         self.run_under = run_under
 
-    def getGTestTests(self, path, litConfig, localConfig):
-        """getGTestTests(path) - [name]
-
-        Return the tests available in gtest executable.
-
-        Args:
-          path: String path to a gtest executable
-          litConfig: LitConfig instance
-          localConfig: TestingConfig instance"""
-
-        list_test_cmd = self.prepareCmd([path, '--gtest_list_tests'])
-
-        try:
-            output = subprocess.check_output(list_test_cmd,
-                                             env=localConfig.environment)
-        except subprocess.CalledProcessError as exc:
-            litConfig.warning(
-                "unable to discover google-tests in %r: %s. Process output: %s"
-                % (path, sys.exc_info()[1], exc.output))
-            # This doesn't look like a valid gtest file.  This can
-            # have a number of causes, none of them good.  For
-            # instance, we could have created a broken executable.
-            # Alternatively, someone has cruft in their test
-            # directory.  If we don't return a test here, then no
-            # failures will get reported, so return a dummy test name
-            # so that the failure is reported later.
-            yield 'failed_to_discover_tests_from_gtest'
-            return
-
-        upstream_prefix = re.compile('Running main\(\) from .*gtest_main\.cc')
-        nested_tests = []
-        for ln in output.splitlines(False):  # Don't keep newlines.
-            ln = lit.util.to_string(ln)
-
-            if upstream_prefix.fullmatch(ln):
-                # Upstream googletest prints this to stdout prior to running
-                # tests. LLVM removed that print statement in r61540, but we
-                # handle it here in case upstream googletest is being used.
-                continue
-
-            # The test name list includes trailing comments beginning with
-            # a '#' on some lines, so skip those. We don't support test names
-            # that use escaping to embed '#' into their name as the names come
-            # from C++ class and method names where such things are hard and
-            # uninteresting to support.
-            ln = ln.split('#', 1)[0].rstrip()
-            if not ln.lstrip():
-                continue
-
-            index = 0
-            while ln[index*2:index*2+2] == '  ':
-                index += 1
-            while len(nested_tests) > index:
-                nested_tests.pop()
-
-            ln = ln[index*2:]
-            if ln.endswith('.'):
-                nested_tests.append(ln)
-            elif any([name.startswith('DISABLED_')
-                      for name in nested_tests + [ln]]):
-                # Gtest will internally skip these tests. No need to launch a
-                # child process for it.
-                continue
-            else:
-                yield ''.join(nested_tests) + ln
+    def get_num_tests(self, path, localConfig):
+        cmd = [path, '--gtest_list_tests', '--gtest_filter=-*DISABLED_*']
+        if cmd[0].endswith('.py'):
+            cmd = [sys.executable] + cmd
+        out, _, exitCode = lit.util.executeCommand(cmd, env=localConfig.environment)
+        if exitCode == 0:
+            return sum(map(lambda line: line.startswith('  '), out.splitlines()))
+        return None
 
     def getTestsInDirectory(self, testSuite, path_in_suite,
                             litConfig, localConfig):
+        init_shard_size = 512 # number of tests in a shard
+        core_count = lit.util.usable_core_count()
         source_path = testSuite.getSourcePath(path_in_suite)
         for subdir in self.test_sub_dirs:
             dir_path = os.path.join(source_path, subdir)
@@ -102,13 +47,40 @@ def getTestsInDirectory(self, testSuite, path_in_suite,
                                              suffixes=self.test_suffixes):
                 # Discover the tests in this executable.
                 execpath = os.path.join(source_path, subdir, fn)
-                testnames = self.getGTestTests(execpath, litConfig, localConfig)
-                for testname in testnames:
-                    testPath = path_in_suite + (subdir, fn, testname)
-                    yield lit.Test.Test(testSuite, testPath, localConfig,
-                                        file_path=execpath)
+                num_tests = self.get_num_tests(execpath, localConfig)
+                if num_tests is not None:
+                    # Compute the number of shards.
+                    shard_size = init_shard_size
+                    nshard = int(math.ceil(num_tests/shard_size))
+                    while nshard < core_count and shard_size > 1:
+                        shard_size = shard_size//2
+                        nshard = int(math.ceil(num_tests/shard_size))
+
+                    # Create one lit test for each shard.
+                    for idx in range(nshard):
+                        testPath = path_in_suite + (subdir, fn,
+                                                        str(idx), str(nshard))
+                        json_file = '-'.join([execpath, testSuite.config.name,
+                                              str(os.getpid()), str(idx),
+                                              str(nshard)]) + '.json'
+                        yield lit.Test.Test(testSuite, testPath, localConfig,
+                                            file_path=execpath,
+                                            gtest_json_file=json_file)
+                else:
+                    # This doesn't look like a valid gtest file.  This can
+                    # have a number of causes, none of them good.  For
+                    # instance, we could have created a broken executable.
+                    # Alternatively, someone has cruft in their test
+                    # directory.  If we don't return a test here, then no
+                    # failures will get reported, so return a dummy test name
+                    # so that the failure is reported later.
+                    testPath = path_in_suite + (subdir, fn, 'failed_to_discover_tests_from_gtest')
+                    yield lit.Test.Test(testSuite, testPath, localConfig, file_path=execpath)
 
     def execute(self, test, litConfig):
+        if test.gtest_json_file is None:
+            return lit.Test.FAIL, ''
+
         testPath,testName = os.path.split(test.getSourcePath())
         while not os.path.exists(testPath):
             # Handle GTest parametrized and typed tests, whose name includes
@@ -116,7 +88,12 @@ def execute(self, test, litConfig):
             testPath, namePrefix = os.path.split(testPath)
             testName = namePrefix + '/' + testName
 
-        cmd = [testPath, '--gtest_filter=' + testName]
+        testName,total_shards = os.path.split(testName)
+        testName,shard_idx = os.path.split(testName)
+        shard_env = {'GTEST_COLOR':'no','GTEST_TOTAL_SHARDS':total_shards, 'GTEST_SHARD_INDEX':shard_idx, 'GTEST_OUTPUT':'json:'+test.gtest_json_file}
+        test.config.environment.update(shard_env)
+
+        cmd = [testPath]
         cmd = self.prepareCmd(cmd)
         if litConfig.useValgrind:
             cmd = litConfig.valgrindArgs + cmd
@@ -124,30 +101,43 @@ def execute(self, test, litConfig):
         if litConfig.noExecute:
             return lit.Test.PASS, ''
 
-        header = f"Script:\n--\n{' '.join(cmd)}\n--\n"
+        shard_envs= '\n'.join([k + '=' + v for k, v in shard_env.items()])
+        shard_header = f"Script(shard):\n--\n{shard_envs}\n{' '.join(cmd)}\n--\n"
 
         try:
-            out, err, exitCode = lit.util.executeCommand(
+            _, _, exitCode = lit.util.executeCommand(
                 cmd, env=test.config.environment,
                 timeout=litConfig.maxIndividualTestTime)
         except lit.util.ExecuteCommandTimeoutException:
             return (lit.Test.TIMEOUT,
-                    f'{header}Reached timeout of '
+                    f'{shard_header}Reached timeout of '
                     f'{litConfig.maxIndividualTestTime} seconds')
 
-        if exitCode:
-            return lit.Test.FAIL, header + out + err
-
-        if '[  SKIPPED ] 1 test,' in out:
-            return lit.Test.SKIPPED, ''
-
-        passing_test_line = '[  PASSED  ] 1 test.'
-        if passing_test_line not in out:
-            return (lit.Test.UNRESOLVED,
-                    f'{header}Unable to find {passing_test_line} '
-                    f'in gtest output:\n\n{out}{err}')
+        if not os.path.exists(test.gtest_json_file):
+            errmsg = f"shard JSON output does not exist: %s" % (test.gtest_json_file)
+            return lit.Test.FAIL, shard_header + errmsg
 
-        return lit.Test.PASS,''
+        if exitCode:
+            output = shard_header + '\n'
+            with open(test.gtest_json_file, encoding='utf-8') as f:
+                testsuites = json.load(f)['testsuites']
+                for testcase in testsuites:
+                    for testinfo in testcase['testsuite']:
+                        if testinfo['result'] == 'SUPPRESSED' or testinfo['result'] == 'SKIPPED':
+                            continue
+                        testname = testcase['name'] + '.' + testinfo['name']
+                        header = f"Script:\n--\n{' '.join(cmd)} --gtest_filter={testname}\n--\n"
+                        if 'failures' in testinfo:
+                            output += header
+                            for fail in testinfo['failures']:
+                                output += fail['failure'] + '\n'
+                            output += '\n'
+                        elif testinfo['result'] != 'COMPLETED':
+                            output += header
+                            output += 'unresolved test result\n'
+            return lit.Test.FAIL, output
+        else:
+            return lit.Test.PASS, ''
 
     def prepareCmd(self, cmd):
         """Insert interpreter if needed.
@@ -166,3 +156,61 @@ def prepareCmd(self, cmd):
             else:
                 cmd = shlex.split(self.run_under) + cmd
         return cmd
+
+    @staticmethod
+    def post_process_shard_results(selected_tests, discovered_tests):
+        def remove_gtest(tests):
+            idxs = []
+            for idx, t in enumerate(tests):
+                if t.gtest_json_file:
+                    idxs.append(idx)
+            for i in range(len(idxs)):
+                del tests[idxs[i]-i]
+
+        remove_gtest(discovered_tests)
+        gtests = [t for t in selected_tests if t.gtest_json_file]
+        remove_gtest(selected_tests)
+        for test in gtests:
+            # In case gtest has bugs such that no JSON file was emitted.
+            if not os.path.exists(test.gtest_json_file):
+                selected_tests.append(test)
+                discovered_tests.append(test)
+                continue
+
+            # Load json file to retrieve results.
+            with open(test.gtest_json_file, encoding='utf-8') as f:
+                testsuites = json.load(f)['testsuites']
+                for testcase in testsuites:
+                    for testinfo in testcase['testsuite']:
+                        # Ignore disabled tests.
+                        if testinfo['result'] == 'SUPPRESSED':
+                            continue
+
+                        testPath = test.path_in_suite[:-2] + (testcase['name'], testinfo['name'])
+                        subtest = lit.Test.Test(test.suite, testPath,
+                                                test.config, test.file_path)
+
+                        testname = testcase['name'] + '.' + testinfo['name']
+                        header = f"Script:\n--\n{test.file_path} --gtest_filter={testname}\n--\n"
+
+                        output = ''
+                        if testinfo['result'] == 'SKIPPED':
+                            returnCode = lit.Test.SKIPPED
+                        elif 'failures' in testinfo:
+                            returnCode = lit.Test.FAIL
+                            output = header
+                            for fail in testinfo['failures']:
+                                output += fail['failure'] + '\n'
+                        elif testinfo['result'] == 'COMPLETED':
+                            returnCode = lit.Test.PASS
+                        else:
+                            returnCode = lit.Test.UNRESOLVED
+                            output = header + 'unresolved test result\n'
+
+                        subtest.setResult(lit.Test.Result(returnCode, output, float(testinfo['time'][:-1])))
+
+                        selected_tests.append(subtest)
+                        discovered_tests.append(subtest)
+            os.remove(test.gtest_json_file)
+
+        return selected_tests, discovered_tests

diff  --git a/llvm/utils/lit/lit/main.py b/llvm/utils/lit/lit/main.py
index 41f124a27ad7f..aacb579c8b03c 100755
--- a/llvm/utils/lit/lit/main.py
+++ b/llvm/utils/lit/lit/main.py
@@ -18,6 +18,7 @@
 import lit.run
 import lit.Test
 import lit.util
+from lit.formats.googletest import GoogleTest
 from lit.TestTimes import record_test_times
 
 
@@ -108,6 +109,9 @@ def main(builtin_params={}):
 
     record_test_times(selected_tests, lit_config)
 
+    selected_tests, discovered_tests = GoogleTest.post_process_shard_results(
+        selected_tests, discovered_tests)
+
     if opts.time_tests:
         print_histogram(discovered_tests)
 

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-crash/DummySubDir/OneTest.py b/llvm/utils/lit/tests/Inputs/googletest-crash/DummySubDir/OneTest.py
new file mode 100644
index 0000000000000..bebedb6f0e12f
--- /dev/null
+++ b/llvm/utils/lit/tests/Inputs/googletest-crash/DummySubDir/OneTest.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+import os
+import sys
+
+if len(sys.argv) == 3 and sys.argv[1] == "--gtest_list_tests":
+    if sys.argv[2] != '--gtest_filter=-*DISABLED_*':
+        raise ValueError("unexpected argument: %s" % (sys.argv[2]))
+    print("""\
+FirstTest.
+  subTestA
+  subTestB
+  subTestC
+  subTestD
+ParameterizedTest/0.
+  subTest
+ParameterizedTest/1.
+  subTest""")
+    sys.exit(0)
+elif len(sys.argv) != 1:
+    # sharding and json output are specified using environment variables
+    raise ValueError("unexpected argument: %r" % (' '.join(sys.argv[1:])))
+
+for e in ['GTEST_TOTAL_SHARDS', 'GTEST_SHARD_INDEX', 'GTEST_OUTPUT']:
+    if e not in os.environ:
+        raise ValueError("missing environment variables: " + e)
+
+if not os.environ['GTEST_OUTPUT'].startswith('json:'):
+    raise ValueError("must emit json output: " + os.environ['GTEST_OUTPUT'])
+
+dummy_output = """\
+{
+"testsuites": [
+]
+}"""
+
+if os.environ['GTEST_SHARD_INDEX'] == '0':
+    exit_code = 1
+else:
+    json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
+    with open(json_filename, 'w') as f:
+        f.write(dummy_output)
+    exit_code = 0
+
+sys.exit(exit_code)

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-upstream-format/lit.cfg b/llvm/utils/lit/tests/Inputs/googletest-crash/lit.cfg
similarity index 66%
rename from llvm/utils/lit/tests/Inputs/googletest-upstream-format/lit.cfg
rename to llvm/utils/lit/tests/Inputs/googletest-crash/lit.cfg
index 9fb5d2b0247be..8048411a70882 100644
--- a/llvm/utils/lit/tests/Inputs/googletest-upstream-format/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/googletest-crash/lit.cfg
@@ -1,3 +1,3 @@
 import lit.formats
-config.name = 'googletest-upstream-format'
+config.name = 'googletest-crash'
 config.test_format = lit.formats.GoogleTest('DummySubDir', 'Test')

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-format/DummySubDir/OneTest.py b/llvm/utils/lit/tests/Inputs/googletest-format/DummySubDir/OneTest.py
index 7bff6b6252b25..c66696f77a519 100644
--- a/llvm/utils/lit/tests/Inputs/googletest-format/DummySubDir/OneTest.py
+++ b/llvm/utils/lit/tests/Inputs/googletest-format/DummySubDir/OneTest.py
@@ -1,11 +1,11 @@
 #!/usr/bin/env python
 
+import os
 import sys
 
-if len(sys.argv) != 2:
-    raise ValueError("unexpected number of args")
-
-if sys.argv[1] == "--gtest_list_tests":
+if len(sys.argv) == 3 and sys.argv[1] == "--gtest_list_tests":
+    if sys.argv[2] != '--gtest_filter=-*DISABLED_*':
+        raise ValueError("unexpected argument: %s" % (sys.argv[2]))
     print("""\
 FirstTest.
   subTestA
@@ -17,31 +17,87 @@
 ParameterizedTest/1.
   subTest""")
     sys.exit(0)
-elif not sys.argv[1].startswith("--gtest_filter="):
-    raise ValueError("unexpected argument: %r" % (sys.argv[1]))
+elif len(sys.argv) != 1:
+    # sharding and json output are specified using environment variables
+    raise ValueError("unexpected argument: %r" % (' '.join(sys.argv[1:])))
 
-test_name = sys.argv[1].split('=',1)[1]
-if test_name == 'FirstTest.subTestA':
-    print('I am subTest A, I PASS')
-    print('[  PASSED  ] 1 test.')
-    sys.exit(0)
-elif test_name == 'FirstTest.subTestB':
-    print('I am subTest B, I FAIL')
-    print('And I have two lines of output')
-    sys.exit(1)
-elif test_name == 'FirstTest.subTestC':
-    print('I am subTest C, I am SKIPPED')
-    print('[  PASSED  ] 0 tests.')
-    print('[  SKIPPED ] 1 test, listed below:')
-    print('[  SKIPPED ] FirstTest.subTestC')
-    sys.exit(0)
-elif test_name == 'FirstTest.subTestD':
-    print('I am subTest D, I am UNRESOLVED')
-    sys.exit(0)
-elif test_name in ('ParameterizedTest/0.subTest',
-                   'ParameterizedTest/1.subTest'):
-    print('I am a parameterized test, I also PASS')
-    print('[  PASSED  ] 1 test.')
-    sys.exit(0)
-else:
-    raise SystemExit("error: invalid test name: %r" % (test_name,))
+for e in ['GTEST_TOTAL_SHARDS', 'GTEST_SHARD_INDEX', 'GTEST_OUTPUT']:
+    if e not in os.environ:
+        raise ValueError("missing environment variables: " + e)
+
+if not os.environ['GTEST_OUTPUT'].startswith('json:'):
+    raise ValueError("must emit json output: " + os.environ['GTEST_OUTPUT'])
+
+output = """\
+{
+"testsuites": [
+    {
+        "name": "FirstTest",
+        "testsuite": [
+            {
+                "name": "subTestA",
+                "result": "COMPLETED",
+                "time": "0.001s"
+            },
+            {
+                "name": "subTestB",
+                "result": "COMPLETED",
+                "time": "0.001s",
+                "failures": [
+                    {
+                        "failure": "I am subTest B, I FAIL\\nAnd I have two lines of output",
+                        "type": ""
+                    }
+                ]
+            },
+            {
+                "name": "subTestC",
+                "result": "SKIPPED",
+                "time": "0.001s"
+            },
+            {
+                "name": "subTestD",
+                "result": "UNRESOLVED",
+                "time": "0.001s"
+            }
+        ]
+    },
+    {
+        "name": "ParameterizedTest/0",
+        "testsuite": [
+            {
+                "name": "subTest",
+                "result": "COMPLETED",
+                "time": "0.001s"
+            }
+        ]
+    },
+    {
+        "name": "ParameterizedTest/1",
+        "testsuite": [
+            {
+                "name": "subTest",
+                "result": "COMPLETED",
+                "time": "0.001s"
+            }
+        ]
+    }
+]
+}"""
+
+dummy_output = """\
+{
+"testsuites": [
+]
+}"""
+
+json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
+with open(json_filename, 'w') as f:
+    if os.environ['GTEST_SHARD_INDEX'] == '0':
+        f.write(output)
+        exit_code = 1
+    else:
+        f.write(dummy_output)
+        exit_code = 0
+
+sys.exit(exit_code)

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-timeout/DummySubDir/OneTest.py b/llvm/utils/lit/tests/Inputs/googletest-timeout/DummySubDir/OneTest.py
index acf13a466fa71..3747232bf9e35 100644
--- a/llvm/utils/lit/tests/Inputs/googletest-timeout/DummySubDir/OneTest.py
+++ b/llvm/utils/lit/tests/Inputs/googletest-timeout/DummySubDir/OneTest.py
@@ -1,29 +1,66 @@
 #!/usr/bin/env python
 
+import os
 import sys
-import time
 
-if len(sys.argv) != 2:
-    raise ValueError("unexpected number of args")
-
-if sys.argv[1] == "--gtest_list_tests":
+if len(sys.argv) == 3 and sys.argv[1] == "--gtest_list_tests":
+    if sys.argv[2] != '--gtest_filter=-*DISABLED_*':
+        raise ValueError("unexpected argument: %s" % (sys.argv[2]))
     print("""\
 T.
   QuickSubTest
   InfiniteLoopSubTest
 """)
     sys.exit(0)
-elif not sys.argv[1].startswith("--gtest_filter="):
-    raise ValueError("unexpected argument: %r" % (sys.argv[1]))
+elif len(sys.argv) != 1:
+    # sharding and json output are specified using environment variables
+    raise ValueError("unexpected argument: %r" % (' '.join(sys.argv[1:])))
 
-test_name = sys.argv[1].split('=',1)[1]
-if test_name == 'T.QuickSubTest':
-    print('I am QuickSubTest, I PASS')
-    print('[  PASSED  ] 1 test.')
-    sys.exit(0)
-elif test_name == 'T.InfiniteLoopSubTest':
-    print('I am InfiniteLoopSubTest, I will hang')
-    while True:
-        pass
+for e in ['GTEST_TOTAL_SHARDS', 'GTEST_SHARD_INDEX', 'GTEST_OUTPUT', 'GTEST_FILTER']:
+    if e not in os.environ:
+        raise ValueError("missing environment variables: " + e)
+
+if not os.environ['GTEST_OUTPUT'].startswith('json:'):
+    raise ValueError("must emit json output: " + os.environ['GTEST_OUTPUT'])
+
+output = """\
+{
+"testsuites": [
+    {
+        "name": "T",
+        "testsuite": [
+            {
+                "name": "QuickSubTest",
+                "result": "COMPLETED",
+                "time": "2s"
+            }
+        ]
+    }
+]
+}"""
+
+dummy_output = """\
+{
+"testsuites": [
+]
+}"""
+
+json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
+
+if os.environ['GTEST_SHARD_INDEX'] == '0':
+    test_name = os.environ['GTEST_FILTER']
+    if test_name == 'QuickSubTest':
+        with open(json_filename, 'w') as f:
+            f.write(output)
+        exit_code = 0
+    elif test_name == 'InfiniteLoopSubTest':
+        while True:
+            pass
+    else:
+        raise SystemExit("error: invalid test name: %r" % (test_name,))
 else:
-    raise SystemExit("error: invalid test name: %r" % (test_name,))
+    with open(json_filename, 'w') as f:
+        f.write(dummy_output)
+    exit_code = 0
+
+sys.exit(exit_code)

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-timeout/lit.cfg b/llvm/utils/lit/tests/Inputs/googletest-timeout/lit.cfg
index bf8a4db2bf90e..af6a4846af56a 100644
--- a/llvm/utils/lit/tests/Inputs/googletest-timeout/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/googletest-timeout/lit.cfg
@@ -3,6 +3,7 @@ config.name = 'googletest-timeout'
 config.test_format = lit.formats.GoogleTest('DummySubDir', 'Test')
 
 configSetTimeout = lit_config.params.get('set_timeout', '0')
+config.environment['GTEST_FILTER'] = lit_config.params.get('gtest_filter')
 
 if configSetTimeout == '1':
     # Try setting the max individual test time in the configuration

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-upstream-format/DummySubDir/OneTest.py b/llvm/utils/lit/tests/Inputs/googletest-upstream-format/DummySubDir/OneTest.py
deleted file mode 100644
index aa79a22c0b106..0000000000000
--- a/llvm/utils/lit/tests/Inputs/googletest-upstream-format/DummySubDir/OneTest.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import sys
-
-if len(sys.argv) != 2:
-    raise ValueError("unexpected number of args")
-
-if sys.argv[1] == "--gtest_list_tests":
-    print(f"""\
-Running main() from {os.getcwd()}/gtest_main.cc
-FirstTest.
-  subTestA
-  subTestB
-  subTestC
-  subTestD
-ParameterizedTest/0.
-  subTest
-ParameterizedTest/1.
-  subTest""")
-    sys.exit(0)
-elif not sys.argv[1].startswith("--gtest_filter="):
-    raise ValueError("unexpected argument: %r" % (sys.argv[1]))
-
-test_name = sys.argv[1].split('=',1)[1]
-print('Running main() from gtest_main.cc')
-if test_name == 'FirstTest.subTestA':
-    print('I am subTest A, I PASS')
-    print('[  PASSED  ] 1 test.')
-    sys.exit(0)
-elif test_name == 'FirstTest.subTestB':
-    print('I am subTest B, I FAIL')
-    print('And I have two lines of output')
-    sys.exit(1)
-elif test_name == 'FirstTest.subTestC':
-    print('I am subTest C, I am SKIPPED')
-    print('[  PASSED  ] 0 tests.')
-    print('[  SKIPPED ] 1 test, listed below:')
-    print('[  SKIPPED ] FirstTest.subTestC')
-    sys.exit(0)
-elif test_name == 'FirstTest.subTestD':
-    print('I am subTest D, I am UNRESOLVED')
-    sys.exit(0)
-elif test_name in ('ParameterizedTest/0.subTest',
-                   'ParameterizedTest/1.subTest'):
-    print('I am a parameterized test, I also PASS')
-    print('[  PASSED  ] 1 test.')
-    sys.exit(0)
-else:
-    raise SystemExit("error: invalid test name: %r" % (test_name,))

diff  --git a/llvm/utils/lit/tests/googletest-crash.py b/llvm/utils/lit/tests/googletest-crash.py
new file mode 100644
index 0000000000000..1cbe05b4150ea
--- /dev/null
+++ b/llvm/utils/lit/tests/googletest-crash.py
@@ -0,0 +1,20 @@
+# Check GoogleTest shard test crashes are handled.
+
+# RUN: not %{lit} -v %{inputs}/googletest-crash | FileCheck %s
+
+# CHECK: -- Testing:
+# CHECK: FAIL: googletest-crash :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/0
+# CHECK: *** TEST 'googletest-crash :: [[PATH]][[FILE]]/0{{.*}} FAILED ***
+# CHECK-NEXT: Script(shard):
+# CHECK-NEXT: --
+# CHECK-NEXT: GTEST_COLOR=no
+# CHECK-NEXT: GTEST_TOTAL_SHARDS=6
+# CHECK-NEXT: GTEST_SHARD_INDEX=0
+# CHECK-NEXT: GTEST_OUTPUT=json:[[JSON:.*\.json]]
+# CHECK-NEXT: [[FILE]]
+# CHECK-NEXT: --
+# CHECK-NEXT: shard JSON output does not exist: [[JSON]]
+# CHECK-NEXT: ***
+# CHECK: Failed Tests (1):
+# CHECK-NEXT:   googletest-crash :: [[PATH]][[FILE]]/0/6
+# CHECK: Failed{{ *}}: 1
\ No newline at end of file

diff  --git a/llvm/utils/lit/tests/googletest-format.py b/llvm/utils/lit/tests/googletest-format.py
index b960d0cdc64e8..5d5f1f9e96f04 100644
--- a/llvm/utils/lit/tests/googletest-format.py
+++ b/llvm/utils/lit/tests/googletest-format.py
@@ -9,28 +9,35 @@
 # END.
 
 # CHECK: -- Testing:
-# CHECK: PASS: googletest-format :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/FirstTest.subTestA
-# CHECK: FAIL: googletest-format :: [[PATH]][[FILE]]/[[TEST:FirstTest\.subTestB]]
-# CHECK-NEXT: *** TEST 'googletest-format :: [[PATH]][[FILE]]/[[TEST]]' FAILED ***
+# CHECK: FAIL: googletest-format :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/0
+# CHECK: *** TEST 'googletest-format :: [[PATH]][[FILE]]/0{{.*}} FAILED ***
+# CHECK-NEXT: Script(shard):
+# CHECK-NEXT: --
+# CHECK-NEXT: GTEST_COLOR=no
+# CHECK-NEXT: GTEST_TOTAL_SHARDS=6
+# CHECK-NEXT: GTEST_SHARD_INDEX=0
+# CHECK-NEXT: GTEST_OUTPUT=json:{{.*\.json}}
+# CHECK-NEXT: [[FILE]]
+# CHECK-NEXT: --
+# CHECK-EMPTY:
 # CHECK-NEXT: Script:
 # CHECK-NEXT: --
-# CHECK-NEXT: [[FILE]] --gtest_filter=[[TEST]]
+# CHECK-NEXT: [[FILE]] --gtest_filter=FirstTest.subTestB
 # CHECK-NEXT: --
 # CHECK-NEXT: I am subTest B, I FAIL
 # CHECK-NEXT: And I have two lines of output
-# CHECK: ***
-# CHECK: SKIPPED: googletest-format :: [[PATH]][[FILE]]/FirstTest.subTestC
-# CHECK: UNRESOLVED: googletest-format :: [[PATH]][[FILE]]/[[TEST:FirstTest\.subTestD]]
-# CHECK-NEXT: *** TEST 'googletest-format :: [[PATH]][[FILE]]/[[TEST]]' FAILED ***
-# CHECK-NEXT: Script:
+# CHECK-EMPTY:
+# CHECK: Script:
 # CHECK-NEXT: --
-# CHECK-NEXT: [[FILE]] --gtest_filter=[[TEST]]
+# CHECK-NEXT: [[FILE]] --gtest_filter=FirstTest.subTestD
 # CHECK-NEXT: --
-# CHECK-NEXT: Unable to find [ PASSED ] 1 test. in gtest output
-# CHECK: I am subTest D, I am UNRESOLVED
-# CHECK: PASS: googletest-format :: [[PATH]][[FILE]]/ParameterizedTest/0.subTest
-# CHECK: PASS: googletest-format :: [[PATH]][[FILE]]/ParameterizedTest/1.subTest
-# CHECK: Failed Tests (1)
+# CHECK-NEXT: unresolved test result
+# CHECK: ***
+# CHECK: Unresolved Tests (1):
+# CHECK-NEXT:   googletest-format :: [[PATH]][[FILE]]/FirstTest/subTestD
+# CHECK: ***
+# CHECK-NEXT: Failed Tests (1):
+# CHECK-NEXT:   googletest-format :: [[PATH]][[FILE]]/FirstTest/subTestB
 # CHECK: Skipped{{ *}}: 1
 # CHECK: Passed{{ *}}: 3
 # CHECK: Unresolved{{ *}}: 1

diff  --git a/llvm/utils/lit/tests/googletest-timeout.py b/llvm/utils/lit/tests/googletest-timeout.py
index b7b9b64d4ed56..dc80636340e6b 100644
--- a/llvm/utils/lit/tests/googletest-timeout.py
+++ b/llvm/utils/lit/tests/googletest-timeout.py
@@ -7,24 +7,29 @@
 # Check that the per test timeout is enforced when running GTest tests.
 #
 # RUN: not %{lit} -v %{inputs}/googletest-timeout \
-# RUN:   --filter=InfiniteLoopSubTest --timeout=1 > %t.cmd.out
+# RUN:   --param gtest_filter=InfiniteLoopSubTest --timeout=1 > %t.cmd.out
 # RUN: FileCheck --check-prefix=CHECK-INF < %t.cmd.out %s
 
 # Check that the per test timeout is enforced when running GTest tests via
 # the configuration file
 #
 # RUN: not %{lit} -v %{inputs}/googletest-timeout \
-# RUN:  --filter=InfiniteLoopSubTest  --param set_timeout=1 \
+# RUN:  --param gtest_filter=InfiniteLoopSubTest  --param set_timeout=1 \
 # RUN:  > %t.cfgset.out
 # RUN: FileCheck --check-prefix=CHECK-INF < %t.cfgset.out %s
 
 # CHECK-INF: -- Testing:
-# CHECK-INF: TIMEOUT: googletest-timeout :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/[[TEST:T\.InfiniteLoopSubTest]]
-# CHECK-INF-NEXT: ******************** TEST 'googletest-timeout :: [[PATH]][[FILE]]/[[TEST]]' FAILED ********************
-# CHECK-INF-NEXT: Script:
+# CHECK-INF: TIMEOUT: googletest-timeout :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/0/2
+# CHECK-INF-NEXT: ******************** TEST 'googletest-timeout :: [[PATH]][[FILE]]/0/2' FAILED ********************
+# CHECK-INF-NEXT: Script(shard):
 # CHECK-INF-NEXT: --
-# CHECK-INF-NEXT: [[FILE]] --gtest_filter=[[TEST]]
+# CHECK-INF-NEXT: GTEST_COLOR=no
+# CHECK-INF-NEXT: GTEST_TOTAL_SHARDS=2
+# CHECK-INF-NEXT: GTEST_SHARD_INDEX=0
+# CHECK-INF-NEXT: GTEST_OUTPUT=json:{{.*\.json}}
+# CHECK-INF-NEXT: [[FILE]]
 # CHECK-INF-NEXT: --
+# CHECK-INF-NEXT: Reached timeout of 1 seconds
 # CHECK-INF: Timed Out: 1
 
 ###############################################################################
@@ -35,15 +40,15 @@
 ###############################################################################
 
 # RUN: %{lit} -v %{inputs}/googletest-timeout \
-# RUN:   --filter=QuickSubTest --timeout=3600 > %t.cmd.out
+# RUN:   --param gtest_filter=QuickSubTest --timeout=3600 > %t.cmd.out
 # RUN: FileCheck --check-prefix=CHECK-QUICK < %t.cmd.out %s
 
-# CHECK-QUICK: PASS: googletest-timeout :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/T.QuickSubTest
-# CHECK-QUICK: Passed : 1
+# CHECK-QUICK: PASS: googletest-timeout :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/0/2 {{.*}}
+# CHECK-QUICK: Passed: 1
 
 # Test per test timeout via a config file and on the command line.
 # The value set on the command line should override the config file.
-# RUN: %{lit} -v %{inputs}/googletest-timeout --filter=QuickSubTest \
+# RUN: %{lit} -v %{inputs}/googletest-timeout --param gtest_filter=QuickSubTest \
 # RUN:   --param set_timeout=1 --timeout=3600 \
 # RUN:   > %t.cmdover.out 2> %t.cmdover.err
 # RUN: FileCheck --check-prefix=CHECK-QUICK < %t.cmdover.out %s

diff  --git a/llvm/utils/lit/tests/googletest-upstream-format.py b/llvm/utils/lit/tests/googletest-upstream-format.py
deleted file mode 100644
index cf0f73e2d31ed..0000000000000
--- a/llvm/utils/lit/tests/googletest-upstream-format.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Check the various features of the GoogleTest format.
-
-# RUN: not %{lit} -v %{inputs}/googletest-upstream-format > %t.out
-# RUN: FileCheck < %t.out %s
-#
-# END.
-
-# CHECK: -- Testing:
-# CHECK: PASS: googletest-upstream-format :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/FirstTest.subTestA
-# CHECK: FAIL: googletest-upstream-format :: [[PATH]][[FILE]]/[[TEST:FirstTest\.subTestB]]
-# CHECK-NEXT: *** TEST 'googletest-upstream-format :: [[PATH]][[FILE]]/[[TEST]]' FAILED ***
-# CHECK-NEXT: Script:
-# CHECK-NEXT: --
-# CHECK-NEXT: [[FILE]] --gtest_filter=[[TEST]]
-# CHECK-NEXT: --
-# CHECK-NEXT: Running main() from gtest_main.cc
-# CHECK-NEXT: I am subTest B, I FAIL
-# CHECK-NEXT: And I have two lines of output
-# CHECK: SKIPPED: googletest-upstream-format :: [[PATH]][[FILE]]/FirstTest.subTestC
-# CHECK: UNRESOLVED: googletest-upstream-format :: [[PATH]][[FILE]]/[[TEST:FirstTest\.subTestD]]
-# CHECK-NEXT: *** TEST 'googletest-upstream-format :: [[PATH]][[FILE]]/[[TEST]]' FAILED ***
-# CHECK-NEXT: Script:
-# CHECK-NEXT: --
-# CHECK-NEXT: [[FILE]] --gtest_filter=[[TEST]]
-# CHECK-NEXT: --
-# CHECK-NEXT: Unable to find [ PASSED ] 1 test. in gtest output
-# CHECK: I am subTest D, I am UNRESOLVED
-# CHECK: ***
-# CHECK: PASS: googletest-upstream-format :: [[PATH]][[FILE]]/ParameterizedTest/0.subTest
-# CHECK: PASS: googletest-upstream-format :: [[PATH]][[FILE]]/ParameterizedTest/1.subTest
-# CHECK: Failed Tests (1)
-# CHECK: Skipped{{ *}}: 1
-# CHECK: Passed{{ *}}: 3
-# CHECK: Unresolved{{ *}}: 1
-# CHECK: Failed{{ *}}: 1


        


More information about the llvm-commits mailing list