[llvm] b71edfa - [NFC][Py Reformat] Reformat python files in llvm

Tobias Hieta via llvm-commits llvm-commits at lists.llvm.org
Wed May 17 01:49:04 PDT 2023


Author: Tobias Hieta
Date: 2023-05-17T10:48:52+02:00
New Revision: b71edfaa4ec3c998aadb35255ce2f60bba2940b0

URL: https://github.com/llvm/llvm-project/commit/b71edfaa4ec3c998aadb35255ce2f60bba2940b0
DIFF: https://github.com/llvm/llvm-project/commit/b71edfaa4ec3c998aadb35255ce2f60bba2940b0.diff

LOG: [NFC][Py Reformat] Reformat python files in llvm

This is the first commit in a series that will reformat
all the python files in the LLVM repository.

Reformatting is done with `black`.

See more information here:

https://discourse.llvm.org/t/rfc-document-and-standardize-python-code-style

Reviewed By: jhenderson, JDevlieghere, MatzeB

Differential Revision: https://reviews.llvm.org/D150545

Added: 
    

Modified: 
    llvm/bindings/python/llvm/bit_reader.py
    llvm/bindings/python/llvm/common.py
    llvm/bindings/python/llvm/core.py
    llvm/bindings/python/llvm/disassembler.py
    llvm/bindings/python/llvm/enumerations.py
    llvm/bindings/python/llvm/object.py
    llvm/bindings/python/llvm/tests/base.py
    llvm/bindings/python/llvm/tests/test_bitreader.py
    llvm/bindings/python/llvm/tests/test_core.py
    llvm/bindings/python/llvm/tests/test_disassembler.py
    llvm/bindings/python/llvm/tests/test_object.py
    llvm/docs/conf.py
    llvm/examples/Kaleidoscope/MCJIT/cached/genk-timing.py
    llvm/examples/Kaleidoscope/MCJIT/cached/split-lib.py
    llvm/examples/Kaleidoscope/MCJIT/complete/genk-timing.py
    llvm/examples/Kaleidoscope/MCJIT/complete/split-lib.py
    llvm/examples/Kaleidoscope/MCJIT/lazy/genk-timing.py
    llvm/lib/Analysis/models/gen-inline-oz-test-model.py
    llvm/lib/Analysis/models/gen-regalloc-eviction-test-model.py
    llvm/lib/Analysis/models/gen-regalloc-priority-test-model.py
    llvm/lib/Analysis/models/interactive_host.py
    llvm/lib/Analysis/models/log_reader.py
    llvm/lib/Analysis/models/saved-model-to-tflite.py
    llvm/test/BugPoint/compile-custom.ll.py
    llvm/test/CodeGen/AArch64/Atomics/generate-tests.py
    llvm/test/CodeGen/MLRegalloc/Inputs/interactive_main.py
    llvm/test/CodeGen/NVPTX/ld-st-addrrspace.py
    llvm/test/CodeGen/NVPTX/surf-tex.py
    llvm/test/CodeGen/NVPTX/wmma.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-01.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-02.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-03.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-04.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-05.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-06.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-07.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-08.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-09.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-10.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-11.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-12.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-13.py
    llvm/test/CodeGen/SystemZ/Large/spill-01.py
    llvm/test/CodeGen/SystemZ/Large/spill-02.py
    llvm/test/CodeGen/WebAssembly/multivalue-stackify.py
    llvm/test/MC/COFF/bigobj.py
    llvm/test/Other/opt-bisect-helper.py
    llvm/test/TableGen/JSON-check.py
    llvm/test/Transforms/Inline/ML/Inputs/interactive_main.py
    llvm/test/Unit/lit.cfg.py
    llvm/test/lit.cfg.py
    llvm/test/tools/llvm-debuginfod-find/Inputs/capture_req.py
    llvm/test/tools/llvm-libtool-darwin/Inputs/DependencyDump.py
    llvm/test/tools/llvm-objcopy/Inputs/ungzip.py
    llvm/test/tools/llvm-objcopy/MachO/Inputs/code-signature-check.py
    llvm/test/tools/llvm-reduce/Inputs/llvm-dis-and-filecheck.py
    llvm/test/tools/llvm-reduce/Inputs/remove-args.py
    llvm/test/tools/llvm-reduce/Inputs/remove-bbs.py
    llvm/test/tools/llvm-reduce/Inputs/remove-instructions.py
    llvm/test/tools/llvm-reduce/Inputs/sleep-and-check-stores.py
    llvm/test/tools/llvm-reduce/remove-bbs-sequence.py
    llvm/test/tools/llvm-symbolizer/Inputs/flush-output.py
    llvm/tools/llvm-shlib/gen-msvc-exports.py
    llvm/tools/opt-viewer/extract-reproducers.py
    llvm/tools/opt-viewer/opt-diff.py
    llvm/tools/opt-viewer/opt-stats.py
    llvm/tools/opt-viewer/opt-viewer.py
    llvm/tools/opt-viewer/optpmap.py
    llvm/tools/opt-viewer/optrecord.py
    llvm/tools/sancov/coverage-report-server.py
    llvm/utils/DSAclean.py
    llvm/utils/DSAextract.py
    llvm/utils/Reviewing/find_interesting_reviews.py
    llvm/utils/Target/ARM/analyze-match-table.py
    llvm/utils/UpdateTestChecks/asm.py
    llvm/utils/UpdateTestChecks/common.py
    llvm/utils/UpdateTestChecks/isel.py
    llvm/utils/abtest.py
    llvm/utils/add_argument_names.py
    llvm/utils/bugpoint_gisel_reducer.py
    llvm/utils/check_ninja_deps.py
    llvm/utils/chunk-print-before-all.py
    llvm/utils/collect_and_build_with_pgo.py
    llvm/utils/convert-constraint-log-to-z3.py
    llvm/utils/create_ladder_graph.py
    llvm/utils/demangle_tree.py
    llvm/utils/docker/scripts/llvm_checksum/llvm_checksum.py
    llvm/utils/docker/scripts/llvm_checksum/project_tree.py
    llvm/utils/extract-section.py
    llvm/utils/extract_symbols.py
    llvm/utils/extract_vplan.py
    llvm/utils/filecheck_lint/filecheck_lint.py
    llvm/utils/filecheck_lint/filecheck_lint_test.py
    llvm/utils/gdb-scripts/prettyprinters.py
    llvm/utils/git/github-automation.py
    llvm/utils/git/pre-push.py
    llvm/utils/gn/build/run_built_binary.py
    llvm/utils/gn/build/symbol_exports.py
    llvm/utils/gn/build/symlink_or_copy.py
    llvm/utils/gn/build/sync_source_lists_from_cmake.py
    llvm/utils/gn/build/write_cmake_config.py
    llvm/utils/gn/build/write_file.py
    llvm/utils/gn/build/write_library_dependencies.py
    llvm/utils/gn/build/write_vcsrevision.py
    llvm/utils/gn/get.py
    llvm/utils/gn/gn.py
    llvm/utils/gn/secondary/libcxx/utils/gen_link_script.py
    llvm/utils/gn/secondary/llvm/include/llvm/Support/write_extension_def.py
    llvm/utils/gn/secondary/llvm/tools/llvm-config/write_extension_dependencies.py
    llvm/utils/indirect_calls.py
    llvm/utils/lint/common_lint.py
    llvm/utils/lint/cpp_lint.py
    llvm/utils/lint/generic_lint.py
    llvm/utils/lit/examples/many-tests/lit.cfg
    llvm/utils/lit/lit.py
    llvm/utils/lit/lit/BooleanExpression.py
    llvm/utils/lit/lit/LitConfig.py
    llvm/utils/lit/lit/LitTestCase.py
    llvm/utils/lit/lit/ProgressBar.py
    llvm/utils/lit/lit/ShCommands.py
    llvm/utils/lit/lit/ShUtil.py
    llvm/utils/lit/lit/Test.py
    llvm/utils/lit/lit/TestRunner.py
    llvm/utils/lit/lit/TestTimes.py
    llvm/utils/lit/lit/TestingConfig.py
    llvm/utils/lit/lit/__init__.py
    llvm/utils/lit/lit/builtin_commands/cat.py
    llvm/utils/lit/lit/builtin_commands/diff.py
    llvm/utils/lit/lit/cl_arguments.py
    llvm/utils/lit/lit/discovery.py
    llvm/utils/lit/lit/display.py
    llvm/utils/lit/lit/formats/__init__.py
    llvm/utils/lit/lit/formats/base.py
    llvm/utils/lit/lit/formats/googletest.py
    llvm/utils/lit/lit/formats/shtest.py
    llvm/utils/lit/lit/llvm/config.py
    llvm/utils/lit/lit/llvm/subst.py
    llvm/utils/lit/lit/main.py
    llvm/utils/lit/lit/reports.py
    llvm/utils/lit/lit/run.py
    llvm/utils/lit/lit/util.py
    llvm/utils/lit/lit/worker.py
    llvm/utils/lit/setup.py
    llvm/utils/lit/tests/Inputs/allow-retries/lit.cfg
    llvm/utils/lit/tests/Inputs/allow-retries/succeeds-within-limit.py
    llvm/utils/lit/tests/Inputs/config-map-discovery/driver.py
    llvm/utils/lit/tests/Inputs/config-map-discovery/main-config/lit.cfg
    llvm/utils/lit/tests/Inputs/custom-result-category/format.py
    llvm/utils/lit/tests/Inputs/custom-result-category/lit.cfg
    llvm/utils/lit/tests/Inputs/discovery/lit.cfg
    llvm/utils/lit/tests/Inputs/discovery/subsuite/lit.cfg
    llvm/utils/lit/tests/Inputs/exec-discovery-in-tree/lit.cfg
    llvm/utils/lit/tests/Inputs/fake-externals/fake_external.py
    llvm/utils/lit/tests/Inputs/googletest-cmd-wrapper/lit.cfg
    llvm/utils/lit/tests/Inputs/googletest-crash/DummySubDir/OneTest.py
    llvm/utils/lit/tests/Inputs/googletest-crash/lit.cfg
    llvm/utils/lit/tests/Inputs/googletest-detect-duplicate/DummySubDir/OneTest.py
    llvm/utils/lit/tests/Inputs/googletest-detect-duplicate/lit.cfg
    llvm/utils/lit/tests/Inputs/googletest-discovery-failed/lit.cfg
    llvm/utils/lit/tests/Inputs/googletest-format-respect-gtest-sharding-env-vars/DummySubDir/OneTest.py
    llvm/utils/lit/tests/Inputs/googletest-format-respect-gtest-sharding-env-vars/lit.cfg
    llvm/utils/lit/tests/Inputs/googletest-format/DummySubDir/OneTest.py
    llvm/utils/lit/tests/Inputs/googletest-format/lit.cfg
    llvm/utils/lit/tests/Inputs/googletest-sanitizer-error/DummySubDir/OneTest.py
    llvm/utils/lit/tests/Inputs/googletest-sanitizer-error/lit.cfg
    llvm/utils/lit/tests/Inputs/googletest-timeout/DummySubDir/OneTest.py
    llvm/utils/lit/tests/Inputs/googletest-timeout/lit.cfg
    llvm/utils/lit/tests/Inputs/ignore-fail/lit.cfg
    llvm/utils/lit/tests/Inputs/lit-opts/lit.cfg
    llvm/utils/lit/tests/Inputs/lld-features/lit.cfg
    llvm/utils/lit/tests/Inputs/max-failures/lit.cfg
    llvm/utils/lit/tests/Inputs/max-time/lit.cfg
    llvm/utils/lit/tests/Inputs/parallelism-groups/lit.cfg
    llvm/utils/lit/tests/Inputs/progress-bar/lit.cfg
    llvm/utils/lit/tests/Inputs/py-config-discovery/lit.site.cfg.py
    llvm/utils/lit/tests/Inputs/reorder/lit.cfg
    llvm/utils/lit/tests/Inputs/show-result-codes/lit.cfg
    llvm/utils/lit/tests/Inputs/show-used-features/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-define/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-env/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-env/print_environment.py
    llvm/utils/lit/tests/Inputs/shtest-format-argv0/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-format/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-if-else/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-inject/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-keyword-parse-errors/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-not/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-not/print_environment.py
    llvm/utils/lit/tests/Inputs/shtest-output-printing/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-pushd-popd/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/does-not-substitute-no-limit/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/does-not-substitute-within-limit/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/escaping/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/negative-integer/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/not-an-integer/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/set-to-none/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/substitutes-within-limit/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-run-at-line/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-shell/check_args.py
    llvm/utils/lit/tests/Inputs/shtest-shell/check_path.py
    llvm/utils/lit/tests/Inputs/shtest-shell/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-timeout/lit.cfg
    llvm/utils/lit/tests/Inputs/standalone-tests-with-excludes/lit.cfg
    llvm/utils/lit/tests/Inputs/standalone-tests-with-suffixes/lit.cfg
    llvm/utils/lit/tests/Inputs/standalone-tests/lit.cfg
    llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py
    llvm/utils/lit/tests/Inputs/test-data-micro/lit.cfg
    llvm/utils/lit/tests/Inputs/test-data/dummy_format.py
    llvm/utils/lit/tests/Inputs/test-data/lit.cfg
    llvm/utils/lit/tests/Inputs/test_retry_attempts/lit.cfg
    llvm/utils/lit/tests/Inputs/test_retry_attempts/test.py
    llvm/utils/lit/tests/Inputs/testrunner-custom-parsers/lit.cfg
    llvm/utils/lit/tests/Inputs/unittest-adaptor/lit.cfg
    llvm/utils/lit/tests/Inputs/use-llvm-tool-required/lit.cfg
    llvm/utils/lit/tests/Inputs/use-llvm-tool/lit.cfg
    llvm/utils/lit/tests/Inputs/xfail-cl/a/lit.cfg
    llvm/utils/lit/tests/Inputs/xfail-cl/b/lit.cfg
    llvm/utils/lit/tests/Inputs/xfail-cl/lit.cfg
    llvm/utils/lit/tests/Inputs/xunit-output/dummy_format.py
    llvm/utils/lit/tests/Inputs/xunit-output/lit.cfg
    llvm/utils/lit/tests/lit.cfg
    llvm/utils/lit/tests/unit/ShUtil.py
    llvm/utils/lit/tests/unit/TestRunner.py
    llvm/utils/lit/tests/unparsed-requirements.py
    llvm/utils/lldbDataFormatters.py
    llvm/utils/llvm-gisel-cov.py
    llvm/utils/llvm-locstats/llvm-locstats.py
    llvm/utils/llvm-original-di-preservation.py
    llvm/utils/merge-stats.py
    llvm/utils/pipeline.py
    llvm/utils/prepare-code-coverage-artifact.py
    llvm/utils/reduce_pipeline.py
    llvm/utils/reduce_pipeline_test/fake_opt.py
    llvm/utils/reduce_pipeline_test/test.py
    llvm/utils/relative_lines.py
    llvm/utils/release/bump-version.py
    llvm/utils/release/findRegressions-nightly.py
    llvm/utils/release/findRegressions-simple.py
    llvm/utils/release/github-upload-release.py
    llvm/utils/remote-exec.py
    llvm/utils/revert_checker.py
    llvm/utils/revert_checker_test.py
    llvm/utils/rsp_bisect.py
    llvm/utils/rsp_bisect_test/test.py
    llvm/utils/rsp_bisect_test/test_script.py
    llvm/utils/rsp_bisect_test/test_script_inv.py
    llvm/utils/schedcover.py
    llvm/utils/shuffle_fuzz.py
    llvm/utils/shuffle_select_fuzz_tester.py
    llvm/utils/sort_includes.py
    llvm/utils/sysroot.py
    llvm/utils/testgen/mc-bundling-x86-gen.py
    llvm/utils/unicode-case-fold.py
    llvm/utils/update_analyze_test_checks.py
    llvm/utils/update_any_test_checks.py
    llvm/utils/update_cc_test_checks.py
    llvm/utils/update_llc_test_checks.py
    llvm/utils/update_mca_test_checks.py
    llvm/utils/update_mir_test_checks.py
    llvm/utils/update_test_checks.py
    llvm/utils/update_test_prefix.py
    llvm/utils/wciia.py

Removed: 
    


################################################################################
diff  --git a/llvm/bindings/python/llvm/bit_reader.py b/llvm/bindings/python/llvm/bit_reader.py
index 33b8211076b80..8d1c6d5451932 100644
--- a/llvm/bindings/python/llvm/bit_reader.py
+++ b/llvm/bindings/python/llvm/bit_reader.py
@@ -1,4 +1,3 @@
-
 from .common import LLVMObject
 from .common import c_object_p
 from .common import get_library
@@ -10,21 +9,25 @@
 from ctypes import byref
 from ctypes import c_char_p
 from ctypes import cast
-__all__ = ['parse_bitcode']
+
+__all__ = ["parse_bitcode"]
 lib = get_library()
 
+
 def parse_bitcode(mem_buffer):
     """Input is .core.MemoryBuffer"""
     module = c_object_p()
     result = lib.LLVMParseBitcode2(mem_buffer, byref(module))
     if result:
-        raise RuntimeError('LLVM Error')
+        raise RuntimeError("LLVM Error")
     m = Module(module)
     m.take_ownership(mem_buffer)
     return m
 
+
 def register_library(library):
     library.LLVMParseBitcode2.argtypes = [MemoryBuffer, POINTER(c_object_p)]
     library.LLVMParseBitcode2.restype = bool
 
+
 register_library(lib)

diff  --git a/llvm/bindings/python/llvm/common.py b/llvm/bindings/python/llvm/common.py
index 9c6c6d433458c..4f8912aec3fec 100644
--- a/llvm/bindings/python/llvm/common.py
+++ b/llvm/bindings/python/llvm/common.py
@@ -1,10 +1,10 @@
-#===- common.py - Python LLVM Bindings -----------------------*- python -*--===#
+# ===- common.py - Python LLVM Bindings -----------------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 from ctypes import POINTER
 from ctypes import c_void_p
@@ -15,20 +15,22 @@
 
 # LLVM_VERSION: sync with PACKAGE_VERSION in CMakeLists.txt
 #               but leave out the 'svn' suffix.
-LLVM_VERSION = '10.0.0'
+LLVM_VERSION = "10.0.0"
 
 __all__ = [
-    'c_object_p',
-    'get_library',
+    "c_object_p",
+    "get_library",
 ]
 
 c_object_p = POINTER(c_void_p)
 
+
 class LLVMObject(object):
     """Base class for objects that are backed by an LLVM data structure.
 
     This class should never be instantiated outside of this package.
     """
+
     def __init__(self, ptr, ownable=True, disposer=None):
         assert isinstance(ptr, c_object_p)
 
@@ -61,12 +63,13 @@ def from_param(self):
         return self._as_parameter_
 
     def __del__(self):
-        if not hasattr(self, '_self_owned') or not hasattr(self, '_disposer'):
+        if not hasattr(self, "_self_owned") or not hasattr(self, "_disposer"):
             return
 
         if self._self_owned and self._disposer:
             self._disposer(self)
 
+
 class CachedProperty(object):
     """Decorator that caches the result of a property lookup.
 
@@ -74,11 +77,12 @@ class CachedProperty(object):
     decorator on properties that invoke C API calls for which the result of the
     call will be idempotent.
     """
+
     def __init__(self, wrapped):
         self.wrapped = wrapped
         try:
             self.__doc__ = wrapped.__doc__
-        except: # pragma: no cover
+        except:  # pragma: no cover
             pass
 
     def __get__(self, instance, instance_type=None):
@@ -90,6 +94,7 @@ def __get__(self, instance, instance_type=None):
 
         return value
 
+
 def get_library():
     """Obtain a reference to the llvm library."""
 
@@ -101,14 +106,14 @@ def get_library():
     # library into a default linker search path.  Always Try ctypes.cdll.LoadLibrary()
     # with all possible library names first, then try ctypes.util.find_library().
 
-    names = ['LLVM-' + LLVM_VERSION, 'LLVM-' + LLVM_VERSION + 'svn']
+    names = ["LLVM-" + LLVM_VERSION, "LLVM-" + LLVM_VERSION + "svn"]
     t = platform.system()
-    if t == 'Darwin':
-        pfx, ext = 'lib', '.dylib'
-    elif t == 'Windows':
-        pfx, ext = '', '.dll'
+    if t == "Darwin":
+        pfx, ext = "lib", ".dylib"
+    elif t == "Windows":
+        pfx, ext = "", ".dll"
     else:
-        pfx, ext = 'lib', '.so'
+        pfx, ext = "lib", ".so"
 
     for i in names:
         try:
@@ -122,4 +127,4 @@ def get_library():
         t = ctypes.util.find_library(i)
         if t:
             return cdll.LoadLibrary(t)
-    raise Exception('LLVM shared library not found!')
+    raise Exception("LLVM shared library not found!")

diff  --git a/llvm/bindings/python/llvm/core.py b/llvm/bindings/python/llvm/core.py
index 812d5d0e94129..a2de827ed3bf6 100644
--- a/llvm/bindings/python/llvm/core.py
+++ b/llvm/bindings/python/llvm/core.py
@@ -1,10 +1,10 @@
-#===- core.py - Python LLVM Bindings -------------------------*- python -*--===#
+# ===- core.py - Python LLVM Bindings -------------------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 from __future__ import print_function
 
 from .common import LLVMObject
@@ -36,6 +36,7 @@
 lib = get_library()
 Enums = []
 
+
 class LLVMEnumeration(object):
     """Represents an individual LLVM enumeration."""
 
@@ -44,8 +45,7 @@ def __init__(self, name, value):
         self.value = value
 
     def __repr__(self):
-        return '%s.%s' % (self.__class__.__name__,
-                          self.name)
+        return "%s.%s" % (self.__class__.__name__, self.name)
 
     @classmethod
     def from_value(cls, value):
@@ -53,8 +53,7 @@ def from_value(cls, value):
         result = cls._value_map.get(value, None)
 
         if result is None:
-            raise ValueError('Unknown %s: %d' % (cls.__name__,
-                                                 value))
+            raise ValueError("Unknown %s: %d" % (cls.__name__, value))
 
         return result
 
@@ -66,12 +65,12 @@ def register(cls, name, value):
         enumerations. You should not need to call this outside this module.
         """
         if value in cls._value_map:
-            raise ValueError('%s value already registered: %d' % (cls.__name__,
-                                                                  value))
+            raise ValueError("%s value already registered: %d" % (cls.__name__, value))
         enum = cls(name, value)
         cls._value_map[value] = enum
         setattr(cls, name, enum)
 
+
 class Attribute(LLVMEnumeration):
     """Represents an individual Attribute enumeration."""
 
@@ -80,6 +79,7 @@ class Attribute(LLVMEnumeration):
     def __init__(self, name, value):
         super(Attribute, self).__init__(name, value)
 
+
 class OpCode(LLVMEnumeration):
     """Represents an individual OpCode enumeration."""
 
@@ -88,6 +88,7 @@ class OpCode(LLVMEnumeration):
     def __init__(self, name, value):
         super(OpCode, self).__init__(name, value)
 
+
 class TypeKind(LLVMEnumeration):
     """Represents an individual TypeKind enumeration."""
 
@@ -96,6 +97,7 @@ class TypeKind(LLVMEnumeration):
     def __init__(self, name, value):
         super(TypeKind, self).__init__(name, value)
 
+
 class Linkage(LLVMEnumeration):
     """Represents an individual Linkage enumeration."""
 
@@ -104,6 +106,7 @@ class Linkage(LLVMEnumeration):
     def __init__(self, name, value):
         super(Linkage, self).__init__(name, value)
 
+
 class Visibility(LLVMEnumeration):
     """Represents an individual visibility enumeration."""
 
@@ -112,6 +115,7 @@ class Visibility(LLVMEnumeration):
     def __init__(self, name, value):
         super(Visibility, self).__init__(name, value)
 
+
 class CallConv(LLVMEnumeration):
     """Represents an individual calling convention enumeration."""
 
@@ -120,6 +124,7 @@ class CallConv(LLVMEnumeration):
     def __init__(self, name, value):
         super(CallConv, self).__init__(name, value)
 
+
 class IntPredicate(LLVMEnumeration):
     """Represents an individual IntPredicate enumeration."""
 
@@ -128,6 +133,7 @@ class IntPredicate(LLVMEnumeration):
     def __init__(self, name, value):
         super(IntPredicate, self).__init__(name, value)
 
+
 class RealPredicate(LLVMEnumeration):
     """Represents an individual RealPredicate enumeration."""
 
@@ -136,6 +142,7 @@ class RealPredicate(LLVMEnumeration):
     def __init__(self, name, value):
         super(RealPredicate, self).__init__(name, value)
 
+
 class LandingPadClauseTy(LLVMEnumeration):
     """Represents an individual LandingPadClauseTy enumeration."""
 
@@ -144,6 +151,7 @@ class LandingPadClauseTy(LLVMEnumeration):
     def __init__(self, name, value):
         super(LandingPadClauseTy, self).__init__(name, value)
 
+
 class MemoryBuffer(LLVMObject):
     """Represents an opaque memory buffer."""
 
@@ -159,8 +167,9 @@ def __init__(self, filename=None):
         memory = c_object_p()
         out = c_char_p(None)
 
-        result = lib.LLVMCreateMemoryBufferWithContentsOfFile(filename,
-                byref(memory), byref(out))
+        result = lib.LLVMCreateMemoryBufferWithContentsOfFile(
+            filename, byref(memory), byref(out)
+        )
 
         if result:
             raise Exception("Could not create memory buffer: %s" % out.value)
@@ -170,8 +179,8 @@ def __init__(self, filename=None):
     def __len__(self):
         return lib.LLVMGetBufferSize(self)
 
+
 class Value(LLVMObject):
-    
     def __init__(self, value):
         LLVMObject.__init__(self, value)
 
@@ -181,16 +190,17 @@ def name(self):
 
     def dump(self):
         lib.LLVMDumpValue(self)
-    
+
     def get_operand(self, i):
         return Value(lib.LLVMGetOperand(self, i))
-    
+
     def set_operand(self, i, v):
         return lib.LLVMSetOperand(self, i, v)
-    
+
     def __len__(self):
         return lib.LLVMGetNumOperands(self)
 
+
 class Module(LLVMObject):
     """Represents the top-level structure of an llvm program in an opaque object."""
 
@@ -232,10 +242,10 @@ def __init__(self, module, reverse=False):
                 self.function = self.module.last
             else:
                 self.function = self.module.first
-        
+
         def __iter__(self):
             return self
-        
+
         def __next__(self):
             if not isinstance(self.function, Function):
                 raise StopIteration("")
@@ -266,25 +276,25 @@ def last(self):
     def print_module_to_file(self, filename):
         out = c_char_p(None)
         # Result is inverted so 0 means everything was ok.
-        result = lib.LLVMPrintModuleToFile(self, filename, byref(out))        
+        result = lib.LLVMPrintModuleToFile(self, filename, byref(out))
         if result:
             raise RuntimeError("LLVM Error: %s" % out.value)
 
-class Function(Value):
 
+class Function(Value):
     def __init__(self, value):
         Value.__init__(self, value)
-    
+
     @property
     def next(self):
         f = lib.LLVMGetNextFunction(self)
         return f and Function(f)
-    
+
     @property
     def prev(self):
         f = lib.LLVMGetPreviousFunction(self)
         return f and Function(f)
-    
+
     @property
     def first(self):
         b = lib.LLVMGetFirstBasicBlock(self)
@@ -303,10 +313,10 @@ def __init__(self, function, reverse=False):
                 self.bb = function.last
             else:
                 self.bb = function.first
-        
+
         def __iter__(self):
             return self
-        
+
         def __next__(self):
             if not isinstance(self.bb, BasicBlock):
                 raise StopIteration("")
@@ -319,18 +329,18 @@ def __next__(self):
 
         if sys.version_info.major == 2:
             next = __next__
-    
+
     def __iter__(self):
         return Function.__bb_iterator(self)
 
     def __reversed__(self):
         return Function.__bb_iterator(self, reverse=True)
-    
+
     def __len__(self):
         return lib.LLVMCountBasicBlocks(self)
 
+
 class BasicBlock(LLVMObject):
-    
     def __init__(self, value):
         LLVMObject.__init__(self, value)
 
@@ -343,7 +353,7 @@ def next(self):
     def prev(self):
         b = lib.LLVMGetPreviousBasicBlock(self)
         return b and BasicBlock(b)
-    
+
     @property
     def first(self):
         i = lib.LLVMGetFirstInstruction(self)
@@ -356,7 +366,7 @@ def last(self):
 
     def __as_value(self):
         return Value(lib.LLVMBasicBlockAsValue(self))
-    
+
     @property
     def name(self):
         return lib.LLVMGetValueName(self.__as_value())
@@ -365,28 +375,26 @@ def dump(self):
         lib.LLVMDumpValue(self.__as_value())
 
     def get_operand(self, i):
-        return Value(lib.LLVMGetOperand(self.__as_value(),
-                                        i))
-    
+        return Value(lib.LLVMGetOperand(self.__as_value(), i))
+
     def set_operand(self, i, v):
-        return lib.LLVMSetOperand(self.__as_value(),
-                                  i, v)
-    
+        return lib.LLVMSetOperand(self.__as_value(), i, v)
+
     def __len__(self):
         return lib.LLVMGetNumOperands(self.__as_value())
 
     class __inst_iterator(object):
-        def __init__(self, bb, reverse=False):            
+        def __init__(self, bb, reverse=False):
             self.bb = bb
             self.reverse = reverse
             if self.reverse:
                 self.inst = self.bb.last
             else:
                 self.inst = self.bb.first
-        
+
         def __iter__(self):
             return self
-        
+
         def __next__(self):
             if not isinstance(self.inst, Instruction):
                 raise StopIteration("")
@@ -408,7 +416,6 @@ def __reversed__(self):
 
 
 class Instruction(Value):
-
     def __init__(self, value):
         Value.__init__(self, value)
 
@@ -426,8 +433,8 @@ def prev(self):
     def opcode(self):
         return OpCode.from_value(lib.LLVMGetInstructionOpcode(self))
 
-class Context(LLVMObject):
 
+class Context(LLVMObject):
     def __init__(self, context=None):
         if context is None:
             context = lib.LLVMContextCreate()
@@ -439,6 +446,7 @@ def __init__(self, context=None):
     def GetGlobalContext(cls):
         return Context(lib.LLVMGetGlobalContext())
 
+
 def register_library(library):
     # Initialization/Shutdown declarations.
     library.LLVMShutdown.argtypes = []
@@ -455,8 +463,11 @@ def register_library(library):
     library.LLVMGetGlobalContext.restype = c_object_p
 
     # Memory buffer declarations
-    library.LLVMCreateMemoryBufferWithContentsOfFile.argtypes = [c_char_p,
-            POINTER(c_object_p), POINTER(c_char_p)]
+    library.LLVMCreateMemoryBufferWithContentsOfFile.argtypes = [
+        c_char_p,
+        POINTER(c_object_p),
+        POINTER(c_char_p),
+    ]
     library.LLVMCreateMemoryBufferWithContentsOfFile.restype = bool
 
     library.LLVMGetBufferSize.argtypes = [MemoryBuffer]
@@ -485,8 +496,7 @@ def register_library(library):
     library.LLVMDumpModule.argtypes = [Module]
     library.LLVMDumpModule.restype = None
 
-    library.LLVMPrintModuleToFile.argtypes = [Module, c_char_p,
-                                              POINTER(c_char_p)]
+    library.LLVMPrintModuleToFile.argtypes = [Module, c_char_p, POINTER(c_char_p)]
     library.LLVMPrintModuleToFile.restype = bool
 
     library.LLVMGetFirstFunction.argtypes = [Module]
@@ -552,6 +562,7 @@ def register_library(library):
     library.LLVMGetInstructionOpcode.argtypes = [Instruction]
     library.LLVMGetInstructionOpcode.restype = c_uint
 
+
 def register_enumerations():
     if Enums:
         return None
@@ -572,9 +583,11 @@ def register_enumerations():
             enum_class.register(name, value)
     return enums
 
+
 def initialize_llvm():
     Context.GetGlobalContext()
 
+
 register_library(lib)
 Enums = register_enumerations()
 initialize_llvm()

diff  --git a/llvm/bindings/python/llvm/disassembler.py b/llvm/bindings/python/llvm/disassembler.py
index 75625588911ca..a57b8b838832d 100644
--- a/llvm/bindings/python/llvm/disassembler.py
+++ b/llvm/bindings/python/llvm/disassembler.py
@@ -1,10 +1,10 @@
-#===- disassembler.py - Python LLVM Bindings -----------------*- python -*--===#
+# ===- disassembler.py - Python LLVM Bindings -----------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 from ctypes import CFUNCTYPE
 from ctypes import POINTER
@@ -23,7 +23,7 @@
 from .common import get_library
 
 __all__ = [
-    'Disassembler',
+    "Disassembler",
 ]
 
 lib = get_library()
@@ -33,9 +33,23 @@
 Option_UseMarkup = 1
 
 
-
 _initialized = False
-_targets = ['AArch64', 'ARM', 'Hexagon', 'MSP430', 'Mips', 'NVPTX', 'PowerPC', 'R600', 'Sparc', 'SystemZ', 'X86', 'XCore']
+_targets = [
+    "AArch64",
+    "ARM",
+    "Hexagon",
+    "MSP430",
+    "Mips",
+    "NVPTX",
+    "PowerPC",
+    "R600",
+    "Sparc",
+    "SystemZ",
+    "X86",
+    "XCore",
+]
+
+
 def _ensure_initialized():
     global _initialized
     if not _initialized:
@@ -63,6 +77,7 @@ class Disassembler(LLVMObject):
 
     Disassembler instances can disassemble instructions from multiple sources.
     """
+
     def __init__(self, triple):
         """Create a new disassembler instance.
 
@@ -72,11 +87,15 @@ def __init__(self, triple):
 
         _ensure_initialized()
 
-        ptr = lib.LLVMCreateDisasm(c_char_p(triple), c_void_p(None), c_int(0),
-                callbacks['op_info'](0), callbacks['symbol_lookup'](0))
+        ptr = lib.LLVMCreateDisasm(
+            c_char_p(triple),
+            c_void_p(None),
+            c_int(0),
+            callbacks["op_info"](0),
+            callbacks["symbol_lookup"](0),
+        )
         if not ptr:
-            raise Exception('Could not obtain disassembler for triple: %s' %
-                            triple)
+            raise Exception("Could not obtain disassembler for triple: %s" % triple)
 
         LLVMObject.__init__(self, ptr, disposer=lib.LLVMDisasmDispose)
 
@@ -100,8 +119,9 @@ def get_instruction(self, source, pc=0):
         buf = cast(c_char_p(source), POINTER(c_ubyte))
         out_str = cast((c_byte * 255)(), c_char_p)
 
-        result = lib.LLVMDisasmInstruction(self, buf, c_uint64(len(source)),
-                                           c_uint64(pc), out_str, 255)
+        result = lib.LLVMDisasmInstruction(
+            self, buf, c_uint64(len(source)), c_uint64(pc), out_str, 255
+        )
 
         return (result, out_str.value)
 
@@ -128,9 +148,9 @@ def get_instructions(self, source, pc=0):
         end_address = pc + len(source)
         while address < end_address:
             b = cast(addressof(buf) + offset, POINTER(c_ubyte))
-            result = lib.LLVMDisasmInstruction(self, b,
-                    c_uint64(len(source) - offset), c_uint64(address),
-                    out_str, 255)
+            result = lib.LLVMDisasmInstruction(
+                self, b, c_uint64(len(source) - offset), c_uint64(address), out_str, 255
+            )
 
             if result == 0:
                 break
@@ -142,28 +162,40 @@ def get_instructions(self, source, pc=0):
 
     def set_options(self, options):
         if not lib.LLVMSetDisasmOptions(self, options):
-            raise Exception('Unable to set all disassembler options in %i' % options)
+            raise Exception("Unable to set all disassembler options in %i" % options)
 
 
 def register_library(library):
-    library.LLVMCreateDisasm.argtypes = [c_char_p, c_void_p, c_int,
-        callbacks['op_info'], callbacks['symbol_lookup']]
+    library.LLVMCreateDisasm.argtypes = [
+        c_char_p,
+        c_void_p,
+        c_int,
+        callbacks["op_info"],
+        callbacks["symbol_lookup"],
+    ]
     library.LLVMCreateDisasm.restype = c_object_p
 
     library.LLVMDisasmDispose.argtypes = [Disassembler]
 
-    library.LLVMDisasmInstruction.argtypes = [Disassembler, POINTER(c_ubyte),
-            c_uint64, c_uint64, c_char_p, c_size_t]
+    library.LLVMDisasmInstruction.argtypes = [
+        Disassembler,
+        POINTER(c_ubyte),
+        c_uint64,
+        c_uint64,
+        c_char_p,
+        c_size_t,
+    ]
     library.LLVMDisasmInstruction.restype = c_size_t
 
     library.LLVMSetDisasmOptions.argtypes = [Disassembler, c_uint64]
     library.LLVMSetDisasmOptions.restype = c_int
 
 
-callbacks['op_info'] = CFUNCTYPE(c_int, c_void_p, c_uint64, c_uint64, c_uint64,
-                                 c_int, c_void_p)
-callbacks['symbol_lookup'] = CFUNCTYPE(c_char_p, c_void_p, c_uint64,
-                                       POINTER(c_uint64), c_uint64,
-                                       POINTER(c_char_p))
+callbacks["op_info"] = CFUNCTYPE(
+    c_int, c_void_p, c_uint64, c_uint64, c_uint64, c_int, c_void_p
+)
+callbacks["symbol_lookup"] = CFUNCTYPE(
+    c_char_p, c_void_p, c_uint64, POINTER(c_uint64), c_uint64, POINTER(c_char_p)
+)
 
 register_library(lib)

diff  --git a/llvm/bindings/python/llvm/enumerations.py b/llvm/bindings/python/llvm/enumerations.py
index ebb39a4ded831..34e297603b3f2 100644
--- a/llvm/bindings/python/llvm/enumerations.py
+++ b/llvm/bindings/python/llvm/enumerations.py
@@ -1,10 +1,10 @@
-#===- enumerations.py - Python LLVM Enumerations -------------*- python -*--===#
+# ===- enumerations.py - Python LLVM Enumerations -------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 r"""
 LLVM Enumerations
@@ -18,193 +18,193 @@
 """
 
 __all__ = [
-    'Attributes',
-    'OpCodes',
-    'TypeKinds',
-    'Linkages',
-    'Visibility',
-    'CallConv',
-    'IntPredicate',
-    'RealPredicate',
-    'LandingPadClauseTy',
+    "Attributes",
+    "OpCodes",
+    "TypeKinds",
+    "Linkages",
+    "Visibility",
+    "CallConv",
+    "IntPredicate",
+    "RealPredicate",
+    "LandingPadClauseTy",
 ]
 
 Attributes = [
-    ('ZExt', 1 << 0),
-    ('MSExt', 1 << 1),
-    ('NoReturn', 1 << 2),
-    ('InReg', 1 << 3),
-    ('StructRet', 1 << 4),
-    ('NoUnwind', 1 << 5),
-    ('NoAlias', 1 << 6),
-    ('ByVal', 1 << 7),
-    ('Nest', 1 << 8),
-    ('ReadNone', 1 << 9),
-    ('ReadOnly', 1 << 10),
-    ('NoInline', 1 << 11),
-    ('AlwaysInline', 1 << 12),
-    ('OptimizeForSize', 1 << 13),
-    ('StackProtect', 1 << 14),
-    ('StackProtectReq', 1 << 15),
-    ('Alignment', 31 << 16),
-    ('NoCapture', 1 << 21),
-    ('NoRedZone', 1 << 22),
-    ('ImplicitFloat', 1 << 23),
-    ('Naked', 1 << 24),
-    ('InlineHint', 1 << 25),
-    ('StackAlignment', 7 << 26),
-    ('ReturnsTwice', 1 << 29),
-    ('UWTable', 1 << 30),
-    ('NonLazyBind', 1 << 31),
+    ("ZExt", 1 << 0),
+    ("MSExt", 1 << 1),
+    ("NoReturn", 1 << 2),
+    ("InReg", 1 << 3),
+    ("StructRet", 1 << 4),
+    ("NoUnwind", 1 << 5),
+    ("NoAlias", 1 << 6),
+    ("ByVal", 1 << 7),
+    ("Nest", 1 << 8),
+    ("ReadNone", 1 << 9),
+    ("ReadOnly", 1 << 10),
+    ("NoInline", 1 << 11),
+    ("AlwaysInline", 1 << 12),
+    ("OptimizeForSize", 1 << 13),
+    ("StackProtect", 1 << 14),
+    ("StackProtectReq", 1 << 15),
+    ("Alignment", 31 << 16),
+    ("NoCapture", 1 << 21),
+    ("NoRedZone", 1 << 22),
+    ("ImplicitFloat", 1 << 23),
+    ("Naked", 1 << 24),
+    ("InlineHint", 1 << 25),
+    ("StackAlignment", 7 << 26),
+    ("ReturnsTwice", 1 << 29),
+    ("UWTable", 1 << 30),
+    ("NonLazyBind", 1 << 31),
 ]
 
 OpCodes = [
-    ('Ret', 1),
-    ('Br', 2),
-    ('Switch', 3),
-    ('IndirectBr', 4),
-    ('Invoke', 5),
-    ('Unreachable', 7),
-    ('Add', 8),
-    ('FAdd', 9),
-    ('Sub', 10),
-    ('FSub', 11),
-    ('Mul', 12),
-    ('FMul', 13),
-    ('UDiv', 14),
-    ('SDiv', 15),
-    ('FDiv', 16),
-    ('URem', 17),
-    ('SRem', 18),
-    ('FRem', 19),
-    ('Shl', 20),
-    ('LShr', 21),
-    ('AShr', 22),
-    ('And', 23),
-    ('Or', 24),
-    ('Xor', 25),
-    ('Alloca', 26),
-    ('Load', 27),
-    ('Store', 28),
-    ('GetElementPtr', 29),
-    ('Trunc', 30),
-    ('ZExt', 31),
-    ('SExt', 32),
-    ('FPToUI', 33),
-    ('FPToSI', 34),
-    ('UIToFP', 35),
-    ('SIToFP', 36),
-    ('FPTrunc', 37),
-    ('FPExt', 38),
-    ('PtrToInt', 39),
-    ('IntToPtr', 40),
-    ('BitCast', 41),
-    ('ICmp', 42),
-    ('FCmpl', 43),
-    ('PHI', 44),
-    ('Call', 45),
-    ('Select', 46),
-    ('UserOp1', 47),
-    ('UserOp2', 48),
-    ('AArg', 49),
-    ('ExtractElement', 50),
-    ('InsertElement', 51),
-    ('ShuffleVector', 52),
-    ('ExtractValue', 53),
-    ('InsertValue', 54),
-    ('Fence', 55),
-    ('AtomicCmpXchg', 56),
-    ('AtomicRMW', 57),
-    ('Resume', 58),
-    ('LandingPad', 59),
+    ("Ret", 1),
+    ("Br", 2),
+    ("Switch", 3),
+    ("IndirectBr", 4),
+    ("Invoke", 5),
+    ("Unreachable", 7),
+    ("Add", 8),
+    ("FAdd", 9),
+    ("Sub", 10),
+    ("FSub", 11),
+    ("Mul", 12),
+    ("FMul", 13),
+    ("UDiv", 14),
+    ("SDiv", 15),
+    ("FDiv", 16),
+    ("URem", 17),
+    ("SRem", 18),
+    ("FRem", 19),
+    ("Shl", 20),
+    ("LShr", 21),
+    ("AShr", 22),
+    ("And", 23),
+    ("Or", 24),
+    ("Xor", 25),
+    ("Alloca", 26),
+    ("Load", 27),
+    ("Store", 28),
+    ("GetElementPtr", 29),
+    ("Trunc", 30),
+    ("ZExt", 31),
+    ("SExt", 32),
+    ("FPToUI", 33),
+    ("FPToSI", 34),
+    ("UIToFP", 35),
+    ("SIToFP", 36),
+    ("FPTrunc", 37),
+    ("FPExt", 38),
+    ("PtrToInt", 39),
+    ("IntToPtr", 40),
+    ("BitCast", 41),
+    ("ICmp", 42),
+    ("FCmpl", 43),
+    ("PHI", 44),
+    ("Call", 45),
+    ("Select", 46),
+    ("UserOp1", 47),
+    ("UserOp2", 48),
+    ("AArg", 49),
+    ("ExtractElement", 50),
+    ("InsertElement", 51),
+    ("ShuffleVector", 52),
+    ("ExtractValue", 53),
+    ("InsertValue", 54),
+    ("Fence", 55),
+    ("AtomicCmpXchg", 56),
+    ("AtomicRMW", 57),
+    ("Resume", 58),
+    ("LandingPad", 59),
 ]
 
 TypeKinds = [
-    ('Void', 0),
-    ('Half', 1),
-    ('Float', 2),
-    ('Double', 3),
-    ('X86_FP80', 4),
-    ('FP128', 5),
-    ('PPC_FP128', 6),
-    ('Label', 7),
-    ('Integer', 8),
-    ('Function', 9),
-    ('Struct', 10),
-    ('Array', 11),
-    ('Pointer', 12),
-    ('Vector', 13),
-    ('Metadata', 14),
-    ('X86_MMX', 15),
+    ("Void", 0),
+    ("Half", 1),
+    ("Float", 2),
+    ("Double", 3),
+    ("X86_FP80", 4),
+    ("FP128", 5),
+    ("PPC_FP128", 6),
+    ("Label", 7),
+    ("Integer", 8),
+    ("Function", 9),
+    ("Struct", 10),
+    ("Array", 11),
+    ("Pointer", 12),
+    ("Vector", 13),
+    ("Metadata", 14),
+    ("X86_MMX", 15),
 ]
 
 Linkages = [
-    ('External', 0),
-    ('AvailableExternally', 1),
-    ('LinkOnceAny', 2),
-    ('LinkOnceODR', 3),
-    ('WeakAny', 4),
-    ('WeakODR', 5),
-    ('Appending', 6),
-    ('Internal', 7),
-    ('Private', 8),
-    ('DLLImport', 9),
-    ('DLLExport', 10),
-    ('ExternalWeak', 11),
-    ('Ghost', 12),
-    ('Common', 13),
-    ('LinkerPrivate', 14),
-    ('LinkerPrivateWeak', 15),
-    ('LinkerPrivateWeakDefAuto', 16),
+    ("External", 0),
+    ("AvailableExternally", 1),
+    ("LinkOnceAny", 2),
+    ("LinkOnceODR", 3),
+    ("WeakAny", 4),
+    ("WeakODR", 5),
+    ("Appending", 6),
+    ("Internal", 7),
+    ("Private", 8),
+    ("DLLImport", 9),
+    ("DLLExport", 10),
+    ("ExternalWeak", 11),
+    ("Ghost", 12),
+    ("Common", 13),
+    ("LinkerPrivate", 14),
+    ("LinkerPrivateWeak", 15),
+    ("LinkerPrivateWeakDefAuto", 16),
 ]
 
 Visibility = [
-    ('Default', 0),
-    ('Hidden', 1),
-    ('Protected', 2),
+    ("Default", 0),
+    ("Hidden", 1),
+    ("Protected", 2),
 ]
 
 CallConv = [
-    ('CCall', 0),
-    ('FastCall', 8),
-    ('ColdCall', 9),
-    ('X86StdcallCall', 64),
-    ('X86FastcallCall', 65),
+    ("CCall", 0),
+    ("FastCall", 8),
+    ("ColdCall", 9),
+    ("X86StdcallCall", 64),
+    ("X86FastcallCall", 65),
 ]
 
 IntPredicate = [
-    ('EQ', 32),
-    ('NE', 33),
-    ('UGT', 34),
-    ('UGE', 35),
-    ('ULT', 36),
-    ('ULE', 37),
-    ('SGT', 38),
-    ('SGE', 39),
-    ('SLT', 40),
-    ('SLE', 41),
+    ("EQ", 32),
+    ("NE", 33),
+    ("UGT", 34),
+    ("UGE", 35),
+    ("ULT", 36),
+    ("ULE", 37),
+    ("SGT", 38),
+    ("SGE", 39),
+    ("SLT", 40),
+    ("SLE", 41),
 ]
 
 RealPredicate = [
-    ('PredicateFalse', 0),
-    ('OEQ', 1),
-    ('OGT', 2),
-    ('OGE', 3),
-    ('OLT', 4),
-    ('OLE', 5),
-    ('ONE', 6),
-    ('ORD', 7),
-    ('UNO', 8),
-    ('UEQ', 9),
-    ('UGT', 10),
-    ('UGE', 11),
-    ('ULT', 12),
-    ('ULE', 13),
-    ('UNE', 14),
-    ('PredicateTrue', 15),
+    ("PredicateFalse", 0),
+    ("OEQ", 1),
+    ("OGT", 2),
+    ("OGE", 3),
+    ("OLT", 4),
+    ("OLE", 5),
+    ("ONE", 6),
+    ("ORD", 7),
+    ("UNO", 8),
+    ("UEQ", 9),
+    ("UGT", 10),
+    ("UGE", 11),
+    ("ULT", 12),
+    ("ULE", 13),
+    ("UNE", 14),
+    ("PredicateTrue", 15),
 ]
 
 LandingPadClauseTy = [
-    ('Catch', 0),
-    ('Filter', 1),
+    ("Catch", 0),
+    ("Filter", 1),
 ]

diff  --git a/llvm/bindings/python/llvm/object.py b/llvm/bindings/python/llvm/object.py
index e8841b6045f62..b63b9ce46c41d 100644
--- a/llvm/bindings/python/llvm/object.py
+++ b/llvm/bindings/python/llvm/object.py
@@ -1,10 +1,10 @@
-#===- object.py - Python Object Bindings --------------------*- python -*--===#
+# ===- object.py - Python Object Bindings --------------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 r"""
 Object File Interface
@@ -96,6 +96,7 @@
     "Symbol",
 ]
 
+
 class ObjectFile(LLVMObject):
     """Represents an object/binary file."""
 
@@ -113,7 +114,7 @@ def __init__(self, filename=None, contents=None):
             contents = MemoryBuffer(filename=filename)
 
         if contents is None:
-            raise Exception('No input found.')
+            raise Exception("No input found.")
 
         ptr = lib.LLVMCreateObjectFile(contents)
         LLVMObject.__init__(self, ptr, disposer=lib.LLVMDisposeObjectFile)
@@ -175,6 +176,7 @@ def get_symbols(self, cache=False):
 
         lib.LLVMDisposeSymbolIterator(symbols)
 
+
 class Section(LLVMObject):
     """Represents a section in an object file."""
 
@@ -196,7 +198,7 @@ def name(self):
         This is typically something like '.dynsym' or '.rodata'.
         """
         if self.expired:
-            raise Exception('Section instance has expired.')
+            raise Exception("Section instance has expired.")
 
         return lib.LLVMGetSectionName(self)
 
@@ -204,14 +206,14 @@ def name(self):
     def size(self):
         """The size of the section, in long bytes."""
         if self.expired:
-            raise Exception('Section instance has expired.')
+            raise Exception("Section instance has expired.")
 
         return lib.LLVMGetSectionSize(self)
 
     @CachedProperty
     def contents(self):
         if self.expired:
-            raise Exception('Section instance has expired.')
+            raise Exception("Section instance has expired.")
 
         siz = self.size
 
@@ -224,14 +226,14 @@ def contents(self):
     def address(self):
         """The address of this section, in long bytes."""
         if self.expired:
-            raise Exception('Section instance has expired.')
+            raise Exception("Section instance has expired.")
 
         return lib.LLVMGetSectionAddress(self)
 
     def has_symbol(self, symbol):
         """Returns whether a Symbol instance is present in this Section."""
         if self.expired:
-            raise Exception('Section instance has expired.')
+            raise Exception("Section instance has expired.")
 
         assert isinstance(symbol, Symbol)
         return lib.LLVMGetSectionContainsSymbol(self, symbol)
@@ -245,7 +247,7 @@ def get_relocations(self, cache=False):
         on iterators for more.
         """
         if self.expired:
-            raise Exception('Section instance has expired.')
+            raise Exception("Section instance has expired.")
 
         relocations = lib.LLVMGetRelocations(self)
         last = None
@@ -274,10 +276,10 @@ def cache(self):
         limitation. When called, the properties of the Section are fetched so
         they are still available after the Section has been marked inactive.
         """
-        getattr(self, 'name')
-        getattr(self, 'size')
-        getattr(self, 'contents')
-        getattr(self, 'address')
+        getattr(self, "name")
+        getattr(self, "size")
+        getattr(self, "contents")
+        getattr(self, "address")
 
     def expire(self):
         """Expire the section.
@@ -286,8 +288,10 @@ def expire(self):
         """
         self.expired = True
 
+
 class Symbol(LLVMObject):
     """Represents a symbol in an object file."""
+
     def __init__(self, ptr, object_file):
         assert isinstance(ptr, c_object_p)
         assert isinstance(object_file, ObjectFile)
@@ -305,7 +309,7 @@ def name(self):
         mangling could be in effect.
         """
         if self.expired:
-            raise Exception('Symbol instance has expired.')
+            raise Exception("Symbol instance has expired.")
 
         return lib.LLVMGetSymbolName(self)
 
@@ -313,7 +317,7 @@ def name(self):
     def address(self):
         """The address of this symbol, in long bytes."""
         if self.expired:
-            raise Exception('Symbol instance has expired.')
+            raise Exception("Symbol instance has expired.")
 
         return lib.LLVMGetSymbolAddress(self)
 
@@ -321,7 +325,7 @@ def address(self):
     def size(self):
         """The size of the symbol, in long bytes."""
         if self.expired:
-            raise Exception('Symbol instance has expired.')
+            raise Exception("Symbol instance has expired.")
 
         return lib.LLVMGetSymbolSize(self)
 
@@ -342,9 +346,9 @@ def section(self):
 
     def cache(self):
         """Cache all cacheable properties."""
-        getattr(self, 'name')
-        getattr(self, 'address')
-        getattr(self, 'size')
+        getattr(self, "name")
+        getattr(self, "address")
+        getattr(self, "size")
 
     def expire(self):
         """Mark the object as expired to prevent future API accesses.
@@ -354,8 +358,10 @@ def expire(self):
         """
         self.expired = True
 
+
 class Relocation(LLVMObject):
     """Represents a relocation definition."""
+
     def __init__(self, ptr):
         """Create a new relocation instance.
 
@@ -374,7 +380,7 @@ def __init__(self, ptr):
     def offset(self):
         """The offset of this relocation, in long bytes."""
         if self.expired:
-            raise Exception('Relocation instance has expired.')
+            raise Exception("Relocation instance has expired.")
 
         return lib.LLVMGetRelocationOffset(self)
 
@@ -382,7 +388,7 @@ def offset(self):
     def symbol(self):
         """The Symbol corresponding to this Relocation."""
         if self.expired:
-            raise Exception('Relocation instance has expired.')
+            raise Exception("Relocation instance has expired.")
 
         ptr = lib.LLVMGetRelocationSymbol(self)
         return Symbol(ptr)
@@ -391,7 +397,7 @@ def symbol(self):
     def type_number(self):
         """The relocation type, as a long."""
         if self.expired:
-            raise Exception('Relocation instance has expired.')
+            raise Exception("Relocation instance has expired.")
 
         return lib.LLVMGetRelocationType(self)
 
@@ -399,14 +405,14 @@ def type_number(self):
     def type_name(self):
         """The relocation type's name, as a str."""
         if self.expired:
-            raise Exception('Relocation instance has expired.')
+            raise Exception("Relocation instance has expired.")
 
         return lib.LLVMGetRelocationTypeName(self)
 
     @CachedProperty
     def value_string(self):
         if self.expired:
-            raise Exception('Relocation instance has expired.')
+            raise Exception("Relocation instance has expired.")
 
         return lib.LLVMGetRelocationValueString(self)
 
@@ -416,12 +422,13 @@ def expire(self):
 
     def cache(self):
         """Cache all cacheable properties on this instance."""
-        getattr(self, 'address')
-        getattr(self, 'offset')
-        getattr(self, 'symbol')
-        getattr(self, 'type')
-        getattr(self, 'type_name')
-        getattr(self, 'value_string')
+        getattr(self, "address")
+        getattr(self, "offset")
+        getattr(self, "symbol")
+        getattr(self, "type")
+        getattr(self, "type_name")
+        getattr(self, "value_string")
+
 
 def register_library(library):
     """Register function prototypes with LLVM library instance."""
@@ -504,5 +511,6 @@ def register_library(library):
     library.LLVMGetRelocationValueString.argtypes = [c_object_p]
     library.LLVMGetRelocationValueString.restype = c_char_p
 
+
 lib = get_library()
 register_library(lib)

diff  --git a/llvm/bindings/python/llvm/tests/base.py b/llvm/bindings/python/llvm/tests/base.py
index aa435bc1f35f5..7350cb419c1eb 100644
--- a/llvm/bindings/python/llvm/tests/base.py
+++ b/llvm/bindings/python/llvm/tests/base.py
@@ -4,18 +4,19 @@
 
 
 POSSIBLE_TEST_BINARIES = [
-    'libreadline.so.5',
-    'libreadline.so.6',
+    "libreadline.so.5",
+    "libreadline.so.6",
 ]
 
 POSSIBLE_TEST_BINARY_PATHS = [
-    '/usr/lib/debug',
-    '/lib',
-    '/usr/lib',
-    '/usr/local/lib',
-    '/lib/i386-linux-gnu',
+    "/usr/lib/debug",
+    "/lib",
+    "/usr/lib",
+    "/usr/local/lib",
+    "/lib/i386-linux-gnu",
 ]
 
+
 class TestBase(unittest.TestCase):
     if sys.version_info.major == 2:
         assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
@@ -33,7 +34,8 @@ def get_test_binary(self):
                 if os.path.exists(path):
                     return path
 
-        raise Exception('No suitable test binaries available!')
+        raise Exception("No suitable test binaries available!")
+
     get_test_binary.__test__ = False
 
     def get_test_file(self):

diff  --git a/llvm/bindings/python/llvm/tests/test_bitreader.py b/llvm/bindings/python/llvm/tests/test_bitreader.py
index 460005a2b87ab..08e55e1297714 100644
--- a/llvm/bindings/python/llvm/tests/test_bitreader.py
+++ b/llvm/bindings/python/llvm/tests/test_bitreader.py
@@ -8,8 +8,8 @@
 from ..core import Module
 from ..bit_reader import parse_bitcode
 
-class TestBitReader(TestBase):
 
+class TestBitReader(TestBase):
     def test_parse_bitcode(self):
         source = self.get_test_bc()
         m = parse_bitcode(MemoryBuffer(filename=source))

diff  --git a/llvm/bindings/python/llvm/tests/test_core.py b/llvm/bindings/python/llvm/tests/test_core.py
index 68572b50b3d66..76a2eaf9db900 100644
--- a/llvm/bindings/python/llvm/tests/test_core.py
+++ b/llvm/bindings/python/llvm/tests/test_core.py
@@ -9,6 +9,7 @@
 from ..core import OpCode
 from ..bit_reader import parse_bitcode
 
+
 class TestCore(TestBase):
     def test_enumerations(self):
         for enum_cls, enum_spec in Enums:
@@ -77,8 +78,7 @@ def test_module_print_module_to_file(self):
     def test_module_function_iteration(self):
         m = parse_bitcode(MemoryBuffer(filename=self.get_test_bc()))
         i = 0
-        functions = ["f", "f2", "f3", "f4", "f5", "f6", "g1", "g2", "h1", "h2",
-                     "h3"]
+        functions = ["f", "f2", "f3", "f4", "f5", "f6", "g1", "g2", "h1", "h2", "h3"]
         # Forward
         for f in m:
             self.assertEqual(f.name, functions[i])
@@ -94,7 +94,7 @@ def test_function_basicblock_iteration(self):
         m = parse_bitcode(MemoryBuffer(filename=self.get_test_bc()))
         i = 0
 
-        bb_list = ['b1', 'b2', 'end']
+        bb_list = ["b1", "b2", "end"]
 
         f = m.first
         while f.name != "f6":
@@ -116,10 +116,12 @@ def test_basicblock_instruction_iteration(self):
         m = parse_bitcode(MemoryBuffer(filename=self.get_test_bc()))
         i = 0
 
-        inst_list = [('arg1', OpCode.ExtractValue),
-                     ('arg2', OpCode.ExtractValue),
-                     ('', OpCode.Call),
-                     ('', OpCode.Ret)]
+        inst_list = [
+            ("arg1", OpCode.ExtractValue),
+            ("arg2", OpCode.ExtractValue),
+            ("", OpCode.Call),
+            ("", OpCode.Ret),
+        ]
 
         bb = m.first.first
 

diff  --git a/llvm/bindings/python/llvm/tests/test_disassembler.py b/llvm/bindings/python/llvm/tests/test_disassembler.py
index 29f2f7060bac5..d4620f69da733 100644
--- a/llvm/bindings/python/llvm/tests/test_disassembler.py
+++ b/llvm/bindings/python/llvm/tests/test_disassembler.py
@@ -4,42 +4,45 @@
 
 from ..disassembler import Disassembler, Option_UseMarkup
 
+
 class TestDisassembler(TestBase):
     def test_instantiate(self):
-         Disassembler('i686-apple-darwin9')
+        Disassembler("i686-apple-darwin9")
 
     def test_basic(self):
-        sequence = '\x67\xe3\x81' # jcxz -127
-        triple = 'i686-apple-darwin9'
+        sequence = "\x67\xe3\x81"  # jcxz -127
+        triple = "i686-apple-darwin9"
 
         disassembler = Disassembler(triple)
 
         count, s = disassembler.get_instruction(sequence)
         self.assertEqual(count, 3)
-        self.assertEqual(s, '\tjcxz\t-127')
+        self.assertEqual(s, "\tjcxz\t-127")
 
     def test_nonexistent_triple(self):
-        with self.assertRaisesRegex(Exception, "Could not obtain disassembler for triple"):
+        with self.assertRaisesRegex(
+            Exception, "Could not obtain disassembler for triple"
+        ):
             Disassembler("nonexistent-triple-raises")
 
     def test_get_instructions(self):
-        sequence = '\x67\xe3\x81\x01\xc7' # jcxz -127; addl %eax, %edi
+        sequence = "\x67\xe3\x81\x01\xc7"  # jcxz -127; addl %eax, %edi
 
-        disassembler = Disassembler('i686-apple-darwin9')
+        disassembler = Disassembler("i686-apple-darwin9")
 
         instructions = list(disassembler.get_instructions(sequence))
         self.assertEqual(len(instructions), 2)
 
-        self.assertEqual(instructions[0], (0, 3, '\tjcxz\t-127'))
-        self.assertEqual(instructions[1], (3, 2, '\taddl\t%eax, %edi'))
+        self.assertEqual(instructions[0], (0, 3, "\tjcxz\t-127"))
+        self.assertEqual(instructions[1], (3, 2, "\taddl\t%eax, %edi"))
 
     def test_set_options(self):
-        sequence = '\x10\x40\x2d\xe9'
-        triple = 'arm-linux-android'
+        sequence = "\x10\x40\x2d\xe9"
+        triple = "arm-linux-android"
 
         disassembler = Disassembler(triple)
         disassembler.set_options(Option_UseMarkup)
         count, s = disassembler.get_instruction(sequence)
         print(s)
         self.assertEqual(count, 4)
-        self.assertEqual(s, '\tpush\t{<reg:r4>, <reg:lr>}')
+        self.assertEqual(s, "\tpush\t{<reg:r4>, <reg:lr>}")

diff  --git a/llvm/bindings/python/llvm/tests/test_object.py b/llvm/bindings/python/llvm/tests/test_object.py
index a45b7beec3353..b9d5868dbfadc 100644
--- a/llvm/bindings/python/llvm/tests/test_object.py
+++ b/llvm/bindings/python/llvm/tests/test_object.py
@@ -6,6 +6,7 @@
 from ..object import Section
 from ..object import Symbol
 
+
 class TestObjectFile(TestBase):
     def get_object_file(self):
         source = self.get_test_binary()

diff  --git a/llvm/docs/conf.py b/llvm/docs/conf.py
index 617ce564bbeef..206f72285a830 100644
--- a/llvm/docs/conf.py
+++ b/llvm/docs/conf.py
@@ -17,211 +17,209 @@
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+# sys.path.insert(0, os.path.abspath('.'))
 
 # -- General configuration -----------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
+extensions = ["sphinx.ext.intersphinx", "sphinx.ext.todo"]
 
 # Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
 
 # The suffix of source filenames.
 source_suffix = {
-    '.rst': 'restructuredtext',
+    ".rst": "restructuredtext",
 }
 
 try:
-  import recommonmark
+    import recommonmark
 except ImportError:
-  # manpages do not use any .md sources
-  if not tags.has('builder-man'):
-    raise
+    # manpages do not use any .md sources
+    if not tags.has("builder-man"):
+        raise
 else:
-  import sphinx
-  if sphinx.version_info >= (3, 0):
-    # This requires 0.5 or later.
-    extensions.append('recommonmark')
-  else:
-    source_parsers = {'.md': 'recommonmark.parser.CommonMarkParser'}
-  source_suffix['.md'] = 'markdown'
+    import sphinx
+
+    if sphinx.version_info >= (3, 0):
+        # This requires 0.5 or later.
+        extensions.append("recommonmark")
+    else:
+        source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
+    source_suffix[".md"] = "markdown"
 
 # The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
 
 # The master toctree document.
-master_doc = 'index'
+master_doc = "index"
 
 # General information about the project.
-project = u'LLVM'
-copyright = u'2003-%d, LLVM Project' % date.today().year
+project = "LLVM"
+copyright = "2003-%d, LLVM Project" % date.today().year
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
-#language = None
+# language = None
 
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
-#today = ''
+# today = ''
 # Else, today_fmt is used as the format for a strftime call.
-today_fmt = '%Y-%m-%d'
+today_fmt = "%Y-%m-%d"
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
-exclude_patterns = ['_build']
+exclude_patterns = ["_build"]
 
 # The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# default_role = None
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
 
 # If true, the current module name will be prepended to all description
 # unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
 
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # output. They are ignored by default.
 show_authors = True
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'friendly'
+pygments_style = "friendly"
 
 # A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
 
 
 # -- Options for HTML output ---------------------------------------------------
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
-html_theme = 'llvm-theme'
+html_theme = "llvm-theme"
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
 # documentation.
-html_theme_options = { "nosidebar": False }
+html_theme_options = {"nosidebar": False}
 
 # Add any paths that contain custom themes here, relative to this directory.
 html_theme_path = ["_themes"]
 
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
-#html_title = None
+# html_title = None
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
-#html_logo = None
+# html_logo = None
 
 # The name of an image file (within the static path) to use as favicon of the
 # docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
 # pixels large.
-#html_favicon = None
+# html_favicon = None
 
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
-html_last_updated_fmt = '%Y-%m-%d'
+html_last_updated_fmt = "%Y-%m-%d"
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
 
 # Custom sidebar templates, maps document names to template names.
 
 html_sidebars = {
-    '**': [
-        'indexsidebar.html',
-        'sourcelink.html',
-        'searchbox.html',
+    "**": [
+        "indexsidebar.html",
+        "sourcelink.html",
+        "searchbox.html",
     ]
 }
 
 # Additional templates that should be rendered to pages, maps page names to
 # template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
 
 # If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
 
 # If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
 
 # If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
 html_show_sourcelink = True
 
 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
 
 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
 
 # If true, an OpenSearch description file will be output, and all pages will
 # contain a <link> tag referring to it.  The value of this option must be the
 # base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
 
 # This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = 'LLVMdoc'
+htmlhelp_basename = "LLVMdoc"
 
 
 # -- Options for LaTeX output --------------------------------------------------
 
 latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
+    # The paper size ('letterpaper' or 'a4paper').
+    #'papersize': 'letterpaper',
+    # The font size ('10pt', '11pt' or '12pt').
+    #'pointsize': '10pt',
+    # Additional stuff for the LaTeX preamble.
+    #'preamble': '',
 }
 
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
-  ('index', 'LLVM.tex', u'LLVM Documentation',
-   u'LLVM project', 'manual'),
+    ("index", "LLVM.tex", "LLVM Documentation", "LLVM project", "manual"),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
 # the title page.
-#latex_logo = None
+# latex_logo = None
 
 # For "manual" documents, if this is true, then toplevel headings are parts,
 # not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
 
 # If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
 
 # If true, show URL addresses after external links.
-#latex_show_urls = False
+# latex_show_urls = False
 
 # Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
 
 # If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
 
 
 # -- Options for manual page output --------------------------------------------
@@ -234,59 +232,73 @@
 # guide subdirectory.
 basedir = os.path.dirname(__file__)
 man_page_authors = "Maintained by the LLVM Team (https://llvm.org/)."
-command_guide_subpath = 'CommandGuide'
+command_guide_subpath = "CommandGuide"
 command_guide_path = os.path.join(basedir, command_guide_subpath)
-manpages_url = '{page}.html'
+manpages_url = "{page}.html"
 
 
 def process_md(name):
     file_subpath = os.path.join(command_guide_subpath, name)
     with open(os.path.join(command_guide_path, name)) as f:
-        title = f.readline().rstrip('\n')
+        title = f.readline().rstrip("\n")
 
-        m = re.match(r'^# (\S+) - (.+)$', title)
+        m = re.match(r"^# (\S+) - (.+)$", title)
         if m is None:
-            print("error: invalid title in %r "
-                  "(expected '# <name> - <description>')" % file_subpath,
-                  file=sys.stderr)
+            print(
+                "error: invalid title in %r "
+                "(expected '# <name> - <description>')" % file_subpath,
+                file=sys.stderr,
+            )
         else:
-            man_pages.append((file_subpath.replace('.md',''), m.group(1),
-                              m.group(2), man_page_authors, 1))
+            man_pages.append(
+                (
+                    file_subpath.replace(".md", ""),
+                    m.group(1),
+                    m.group(2),
+                    man_page_authors,
+                    1,
+                )
+            )
 
 
 def process_rst(name):
     file_subpath = os.path.join(command_guide_subpath, name)
     with open(os.path.join(command_guide_path, name)) as f:
-        title = f.readline().rstrip('\n')
-        header = f.readline().rstrip('\n')
+        title = f.readline().rstrip("\n")
+        header = f.readline().rstrip("\n")
 
         if len(header) != len(title):
-            print('error: invalid header in %r (does not match title)' %
-                  file_subpath, file=sys.stderr)
-        if ' - ' not in title:
-            print("error: invalid title in %r "
-                  "(expected '<name> - <description>')" % file_subpath,
-                  file=sys.stderr)
+            print(
+                "error: invalid header in %r (does not match title)" % file_subpath,
+                file=sys.stderr,
+            )
+        if " - " not in title:
+            print(
+                "error: invalid title in %r "
+                "(expected '<name> - <description>')" % file_subpath,
+                file=sys.stderr,
+            )
         # Split the name out of the title.
-        name,description = title.split(' - ', 1)
-        man_pages.append((file_subpath.replace('.rst',''), name,
-                          description, man_page_authors, 1))
+        name, description = title.split(" - ", 1)
+        man_pages.append(
+            (file_subpath.replace(".rst", ""), name, description, man_page_authors, 1)
+        )
 
 
 for name in os.listdir(command_guide_path):
     # Process Markdown files
-    if name.endswith('.md'):
+    if name.endswith(".md"):
         process_md(name)
     # Process ReST files apart from the index page.
-    elif name.endswith('.rst') and name != 'index.rst':
+    elif name.endswith(".rst") and name != "index.rst":
         process_rst(name)
 
 # If true, show URL addresses after external links.
-#man_show_urls = False
+# man_show_urls = False
 
 # FIXME: Define intersphinx configuration.
 intersphinx_mapping = {}
 
 # Pygment lexer are sometimes out of date (when parsing LLVM for example) or
 # wrong. Suppress the warning so the build doesn't abort.
-suppress_warnings = [ 'misc.highlighting_failure' ]
+suppress_warnings = ["misc.highlighting_failure"]

diff  --git a/llvm/examples/Kaleidoscope/MCJIT/cached/genk-timing.py b/llvm/examples/Kaleidoscope/MCJIT/cached/genk-timing.py
index 87bbfbf32bda3..2e39f103cd9f3 100644
--- a/llvm/examples/Kaleidoscope/MCJIT/cached/genk-timing.py
+++ b/llvm/examples/Kaleidoscope/MCJIT/cached/genk-timing.py
@@ -5,36 +5,53 @@
 import sys
 import random
 
+
 class TimingScriptGenerator:
     """Used to generate a bash script which will invoke the toy and time it"""
+
     def __init__(self, scriptname, outputname):
         self.timeFile = outputname
-        self.shfile = open(scriptname, 'w')
-        self.shfile.write("echo \"\" > %s\n" % self.timeFile)
+        self.shfile = open(scriptname, "w")
+        self.shfile.write('echo "" > %s\n' % self.timeFile)
 
     def writeTimingCall(self, filename, numFuncs, funcsCalled, totalCalls):
         """Echo some comments and invoke both versions of toy"""
         rootname = filename
-        if '.' in filename:
-            rootname = filename[:filename.rfind('.')]
-        self.shfile.write("echo \"%s: Calls %d of %d functions, %d total\" >> %s\n" % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With MCJIT\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        if "." in filename:
+            rootname = filename[: filename.rfind(".")]
+        self.shfile.write(
+            'echo "%s: Calls %d of %d functions, %d total" >> %s\n'
+            % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With MCJIT" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy-mcjit < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (filename, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        self.shfile.write(
+            "./toy-mcjit < %s > %s-mcjit.out 2> %s-mcjit.err\n"
+            % (filename, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With JIT" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy-jit < %s > %s-jit.out 2> %s-jit.err\n" % (filename, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
+        self.shfile.write(
+            "./toy-jit < %s > %s-jit.out 2> %s-jit.err\n"
+            % (filename, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+
 
 class KScriptGenerator:
     """Used to generate random Kaleidoscope code"""
+
     def __init__(self, filename):
-        self.kfile = open(filename, 'w')
+        self.kfile = open(filename, "w")
         self.nextFuncNum = 1
         self.lastFuncNum = None
         self.callWeighting = 0.1
@@ -80,20 +97,22 @@ def updateCalledFunctionList(self, callee):
                 self.updateCalledFunctionList(subCallee)
 
     def setCallWeighting(self, weight):
-        """ Sets the probably of generating a function call"""
+        """Sets the probably of generating a function call"""
         self.callWeighting = weight
 
     def writeln(self, line):
-        self.kfile.write(line + '\n')
+        self.kfile.write(line + "\n")
 
     def writeComment(self, comment):
-        self.writeln('# ' + comment)
+        self.writeln("# " + comment)
 
     def writeEmptyLine(self):
         self.writeln("")
 
     def writePredefinedFunctions(self):
-        self.writeComment("Define ':' for sequencing: as a low-precedence operator that ignores operands")
+        self.writeComment(
+            "Define ':' for sequencing: as a low-precedence operator that ignores operands"
+        )
         self.writeComment("and just returns the RHS.")
         self.writeln("def binary : 1 (x y) y;")
         self.writeEmptyLine()
@@ -105,16 +124,18 @@ def writePredefinedFunctions(self):
         self.writeComment("Print the result of a function call")
         self.writeln("def printresult(N Result)")
         self.writeln("  # 'result('")
-        self.writeln("  putchard(114) : putchard(101) : putchard(115) : putchard(117) : putchard(108) : putchard(116) : putchard(40) :")
-        self.writeln("  printd(N) :");
+        self.writeln(
+            "  putchard(114) : putchard(101) : putchard(115) : putchard(117) : putchard(108) : putchard(116) : putchard(40) :"
+        )
+        self.writeln("  printd(N) :")
         self.writeln("  # ') = '")
         self.writeln("  putchard(41) : putchard(32) : putchard(61) : putchard(32) :")
-        self.writeln("  printd(Result) :");
+        self.writeln("  printd(Result) :")
         self.writeln("  printlf();")
         self.writeEmptyLine()
 
     def writeRandomOperation(self, LValue, LHS, RHS):
-        shouldCallFunc = (self.lastFuncNum > 2 and random.random() < self.callWeighting)
+        shouldCallFunc = self.lastFuncNum > 2 and random.random() < self.callWeighting
         if shouldCallFunc:
             funcToCall = random.randrange(1, self.lastFuncNum - 1)
             self.updateFunctionCallMap(self.lastFuncNum, funcToCall)
@@ -130,7 +151,10 @@ def writeRandomOperation(self, LValue, LHS, RHS):
                 self.writeln("  else if %s < %s then" % (RHS, LHS))
                 self.writeln("    %s = %s %s %s" % (LValue, LHS, operation, RHS))
                 self.writeln("  else")
-                self.writeln("    %s = %s %s %f :" % (LValue, LHS, operation, random.uniform(1, 100)))
+                self.writeln(
+                    "    %s = %s %s %f :"
+                    % (LValue, LHS, operation, random.uniform(1, 100))
+                )
             else:
                 self.writeln("  %s = %s %s %s :" % (LValue, LHS, operation, RHS))
 
@@ -166,27 +190,43 @@ def writeFunctionCall(self):
         self.writeComment("Call the last function")
         arg1 = random.uniform(1, 100)
         arg2 = random.uniform(1, 100)
-        self.writeln("printresult(%d, func%d(%f, %f) )" % (self.lastFuncNum, self.lastFuncNum, arg1, arg2))
+        self.writeln(
+            "printresult(%d, func%d(%f, %f) )"
+            % (self.lastFuncNum, self.lastFuncNum, arg1, arg2)
+        )
         self.writeEmptyLine()
         self.updateCalledFunctionList(self.lastFuncNum)
 
     def writeFinalFunctionCounts(self):
-        self.writeComment("Called %d of %d functions" % (len(self.calledFunctions), self.lastFuncNum))
+        self.writeComment(
+            "Called %d of %d functions" % (len(self.calledFunctions), self.lastFuncNum)
+        )
+
 
-def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript):
-    """ Generate a random Kaleidoscope script based on the given parameters """
+def generateKScript(
+    filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript
+):
+    """Generate a random Kaleidoscope script based on the given parameters"""
     print("Generating " + filename)
-    print("  %d functions, %d elements per function, %d functions between execution" %
-          (numFuncs, elementsPerFunc, funcsBetweenExec))
+    print(
+        "  %d functions, %d elements per function, %d functions between execution"
+        % (numFuncs, elementsPerFunc, funcsBetweenExec)
+    )
     print("  Call weighting = %f" % callWeighting)
     script = KScriptGenerator(filename)
     script.setCallWeighting(callWeighting)
-    script.writeComment("===========================================================================")
+    script.writeComment(
+        "==========================================================================="
+    )
     script.writeComment("Auto-generated script")
-    script.writeComment("  %d functions, %d elements per function, %d functions between execution"
-                         % (numFuncs, elementsPerFunc, funcsBetweenExec))
+    script.writeComment(
+        "  %d functions, %d elements per function, %d functions between execution"
+        % (numFuncs, elementsPerFunc, funcsBetweenExec)
+    )
     script.writeComment("  call weighting = %f" % callWeighting)
-    script.writeComment("===========================================================================")
+    script.writeComment(
+        "==========================================================================="
+    )
     script.writeEmptyLine()
     script.writePredefinedFunctions()
     funcsSinceLastExec = 0
@@ -202,20 +242,49 @@ def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callW
     script.writeEmptyLine()
     script.writeFinalFunctionCounts()
     funcsCalled = len(script.calledFunctions)
-    print("  Called %d of %d functions, %d total" % (funcsCalled, numFuncs, script.totalCallsExecuted))
-    timingScript.writeTimingCall(filename, numFuncs, funcsCalled, script.totalCallsExecuted)
+    print(
+        "  Called %d of %d functions, %d total"
+        % (funcsCalled, numFuncs, script.totalCallsExecuted)
+    )
+    timingScript.writeTimingCall(
+        filename, numFuncs, funcsCalled, script.totalCallsExecuted
+    )
+
 
 # Execution begins here
 random.seed()
 
 timingScript = TimingScriptGenerator("time-toy.sh", "timing-data.txt")
 
-dataSets = [(5000, 3,  50, 0.50), (5000, 10, 100, 0.10), (5000, 10, 5, 0.10), (5000, 10, 1, 0.0),
-            (1000, 3,  10, 0.50), (1000, 10, 100, 0.10), (1000, 10, 5, 0.10), (1000, 10, 1, 0.0),
-            ( 200, 3,   2, 0.50), ( 200, 10,  40, 0.10), ( 200, 10, 2, 0.10), ( 200, 10, 1, 0.0)]
+dataSets = [
+    (5000, 3, 50, 0.50),
+    (5000, 10, 100, 0.10),
+    (5000, 10, 5, 0.10),
+    (5000, 10, 1, 0.0),
+    (1000, 3, 10, 0.50),
+    (1000, 10, 100, 0.10),
+    (1000, 10, 5, 0.10),
+    (1000, 10, 1, 0.0),
+    (200, 3, 2, 0.50),
+    (200, 10, 40, 0.10),
+    (200, 10, 2, 0.10),
+    (200, 10, 1, 0.0),
+]
 
 # Generate the code
 for (numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting) in dataSets:
-    filename = "test-%d-%d-%d-%d.k" % (numFuncs, elementsPerFunc, funcsBetweenExec, int(callWeighting * 100))
-    generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript)
+    filename = "test-%d-%d-%d-%d.k" % (
+        numFuncs,
+        elementsPerFunc,
+        funcsBetweenExec,
+        int(callWeighting * 100),
+    )
+    generateKScript(
+        filename,
+        numFuncs,
+        elementsPerFunc,
+        funcsBetweenExec,
+        callWeighting,
+        timingScript,
+    )
 print("All done!")

diff  --git a/llvm/examples/Kaleidoscope/MCJIT/cached/split-lib.py b/llvm/examples/Kaleidoscope/MCJIT/cached/split-lib.py
index 1aa80ee83ac4c..7a289e868a9ce 100644
--- a/llvm/examples/Kaleidoscope/MCJIT/cached/split-lib.py
+++ b/llvm/examples/Kaleidoscope/MCJIT/cached/split-lib.py
@@ -2,71 +2,105 @@
 
 from __future__ import print_function
 
+
 class TimingScriptGenerator:
     """Used to generate a bash script which will invoke the toy and time it"""
+
     def __init__(self, scriptname, outputname):
-        self.shfile = open(scriptname, 'w')
+        self.shfile = open(scriptname, "w")
         self.timeFile = outputname
-        self.shfile.write("echo \"\" > %s\n" % self.timeFile)
+        self.shfile.write('echo "" > %s\n' % self.timeFile)
 
     def writeTimingCall(self, irname, callname):
         """Echo some comments and invoke both versions of toy"""
         rootname = irname
-        if '.' in irname:
-            rootname = irname[:irname.rfind('.')]
-        self.shfile.write("echo \"%s: Calls %s\" >> %s\n" % (callname, irname, self.timeFile))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With MCJIT\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        if "." in irname:
+            rootname = irname[: irname.rfind(".")]
+        self.shfile.write(
+            'echo "%s: Calls %s" >> %s\n' % (callname, irname, self.timeFile)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With MCJIT" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy-mcjit -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With MCJIT again\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        self.shfile.write(
+            "./toy-mcjit -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n"
+            % (irname, callname, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With MCJIT again" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy-mcjit -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        self.shfile.write(
+            "./toy-mcjit -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n"
+            % (irname, callname, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With JIT" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy-jit -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
+        self.shfile.write(
+            "./toy-jit -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n"
+            % (irname, callname, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+
 
 class LibScriptGenerator:
     """Used to generate a bash script which will convert Kaleidoscope files to IR"""
+
     def __init__(self, filename):
-        self.shfile = open(filename, 'w')
+        self.shfile = open(filename, "w")
 
     def writeLibGenCall(self, libname, irname):
         self.shfile.write("./toy-ir-gen < %s 2> %s\n" % (libname, irname))
 
+
 def splitScript(inputname, libGenScript, timingScript):
-  rootname = inputname[:-2]
-  libname = rootname + "-lib.k"
-  irname = rootname + "-lib.ir"
-  callname = rootname + "-call.k"
-  infile = open(inputname, "r")
-  libfile = open(libname, "w")
-  callfile = open(callname, "w")
-  print("Splitting %s into %s and %s" % (inputname, callname, libname))
-  for line in infile:
-    if not line.startswith("#"):
-      if line.startswith("print"):
-        callfile.write(line)
-      else:
-        libfile.write(line)
-  libGenScript.writeLibGenCall(libname, irname)
-  timingScript.writeTimingCall(irname, callname)
+    rootname = inputname[:-2]
+    libname = rootname + "-lib.k"
+    irname = rootname + "-lib.ir"
+    callname = rootname + "-call.k"
+    infile = open(inputname, "r")
+    libfile = open(libname, "w")
+    callfile = open(callname, "w")
+    print("Splitting %s into %s and %s" % (inputname, callname, libname))
+    for line in infile:
+        if not line.startswith("#"):
+            if line.startswith("print"):
+                callfile.write(line)
+            else:
+                libfile.write(line)
+    libGenScript.writeLibGenCall(libname, irname)
+    timingScript.writeTimingCall(irname, callname)
+
 
 # Execution begins here
 libGenScript = LibScriptGenerator("make-libs.sh")
 timingScript = TimingScriptGenerator("time-lib.sh", "lib-timing.txt")
 
-script_list = ["test-5000-3-50-50.k", "test-5000-10-100-10.k", "test-5000-10-5-10.k", "test-5000-10-1-0.k", 
-               "test-1000-3-10-50.k", "test-1000-10-100-10.k", "test-1000-10-5-10.k", "test-1000-10-1-0.k",
-               "test-200-3-2-50.k", "test-200-10-40-10.k", "test-200-10-2-10.k", "test-200-10-1-0.k"]
+script_list = [
+    "test-5000-3-50-50.k",
+    "test-5000-10-100-10.k",
+    "test-5000-10-5-10.k",
+    "test-5000-10-1-0.k",
+    "test-1000-3-10-50.k",
+    "test-1000-10-100-10.k",
+    "test-1000-10-5-10.k",
+    "test-1000-10-1-0.k",
+    "test-200-3-2-50.k",
+    "test-200-10-40-10.k",
+    "test-200-10-2-10.k",
+    "test-200-10-1-0.k",
+]
 
 for script in script_list:
-  splitScript(script, libGenScript, timingScript)
+    splitScript(script, libGenScript, timingScript)
 print("All done!")

diff  --git a/llvm/examples/Kaleidoscope/MCJIT/complete/genk-timing.py b/llvm/examples/Kaleidoscope/MCJIT/complete/genk-timing.py
index c3b4d23c002a1..9045541cbe4e8 100644
--- a/llvm/examples/Kaleidoscope/MCJIT/complete/genk-timing.py
+++ b/llvm/examples/Kaleidoscope/MCJIT/complete/genk-timing.py
@@ -5,41 +5,63 @@
 import sys
 import random
 
+
 class TimingScriptGenerator:
     """Used to generate a bash script which will invoke the toy and time it"""
+
     def __init__(self, scriptname, outputname):
         self.timeFile = outputname
-        self.shfile = open(scriptname, 'w')
-        self.shfile.write("echo \"\" > %s\n" % self.timeFile)
+        self.shfile = open(scriptname, "w")
+        self.shfile.write('echo "" > %s\n' % self.timeFile)
 
     def writeTimingCall(self, filename, numFuncs, funcsCalled, totalCalls):
         """Echo some comments and invoke both versions of toy"""
         rootname = filename
-        if '.' in filename:
-            rootname = filename[:filename.rfind('.')]
-        self.shfile.write("echo \"%s: Calls %d of %d functions, %d total\" >> %s\n" % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With MCJIT (original)\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        if "." in filename:
+            rootname = filename[: filename.rfind(".")]
+        self.shfile.write(
+            'echo "%s: Calls %d of %d functions, %d total" >> %s\n'
+            % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With MCJIT (original)" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=false < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (filename, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With MCJIT (lazy)\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        self.shfile.write(
+            "./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=false < %s > %s-mcjit.out 2> %s-mcjit.err\n"
+            % (filename, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With MCJIT (lazy)" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=true < %s > %s-mcjit-lazy.out 2> %s-mcjit-lazy.err\n" % (filename, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        self.shfile.write(
+            "./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=true < %s > %s-mcjit-lazy.out 2> %s-mcjit-lazy.err\n"
+            % (filename, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With JIT" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy -suppress-prompts -use-mcjit=false < %s > %s-jit.out 2> %s-jit.err\n" % (filename, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
+        self.shfile.write(
+            "./toy -suppress-prompts -use-mcjit=false < %s > %s-jit.out 2> %s-jit.err\n"
+            % (filename, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+
 
 class KScriptGenerator:
     """Used to generate random Kaleidoscope code"""
+
     def __init__(self, filename):
-        self.kfile = open(filename, 'w')
+        self.kfile = open(filename, "w")
         self.nextFuncNum = 1
         self.lastFuncNum = None
         self.callWeighting = 0.1
@@ -85,20 +107,22 @@ def updateCalledFunctionList(self, callee):
                 self.updateCalledFunctionList(subCallee)
 
     def setCallWeighting(self, weight):
-        """ Sets the probably of generating a function call"""
+        """Sets the probably of generating a function call"""
         self.callWeighting = weight
 
     def writeln(self, line):
-        self.kfile.write(line + '\n')
+        self.kfile.write(line + "\n")
 
     def writeComment(self, comment):
-        self.writeln('# ' + comment)
+        self.writeln("# " + comment)
 
     def writeEmptyLine(self):
         self.writeln("")
 
     def writePredefinedFunctions(self):
-        self.writeComment("Define ':' for sequencing: as a low-precedence operator that ignores operands")
+        self.writeComment(
+            "Define ':' for sequencing: as a low-precedence operator that ignores operands"
+        )
         self.writeComment("and just returns the RHS.")
         self.writeln("def binary : 1 (x y) y;")
         self.writeEmptyLine()
@@ -110,16 +134,18 @@ def writePredefinedFunctions(self):
         self.writeComment("Print the result of a function call")
         self.writeln("def printresult(N Result)")
         self.writeln("  # 'result('")
-        self.writeln("  putchard(114) : putchard(101) : putchard(115) : putchard(117) : putchard(108) : putchard(116) : putchard(40) :")
-        self.writeln("  printd(N) :");
+        self.writeln(
+            "  putchard(114) : putchard(101) : putchard(115) : putchard(117) : putchard(108) : putchard(116) : putchard(40) :"
+        )
+        self.writeln("  printd(N) :")
         self.writeln("  # ') = '")
         self.writeln("  putchard(41) : putchard(32) : putchard(61) : putchard(32) :")
-        self.writeln("  printd(Result) :");
+        self.writeln("  printd(Result) :")
         self.writeln("  printlf();")
         self.writeEmptyLine()
 
     def writeRandomOperation(self, LValue, LHS, RHS):
-        shouldCallFunc = (self.lastFuncNum > 2 and random.random() < self.callWeighting)
+        shouldCallFunc = self.lastFuncNum > 2 and random.random() < self.callWeighting
         if shouldCallFunc:
             funcToCall = random.randrange(1, self.lastFuncNum - 1)
             self.updateFunctionCallMap(self.lastFuncNum, funcToCall)
@@ -135,7 +161,10 @@ def writeRandomOperation(self, LValue, LHS, RHS):
                 self.writeln("  else if %s < %s then" % (RHS, LHS))
                 self.writeln("    %s = %s %s %s" % (LValue, LHS, operation, RHS))
                 self.writeln("  else")
-                self.writeln("    %s = %s %s %f :" % (LValue, LHS, operation, random.uniform(1, 100)))
+                self.writeln(
+                    "    %s = %s %s %f :"
+                    % (LValue, LHS, operation, random.uniform(1, 100))
+                )
             else:
                 self.writeln("  %s = %s %s %s :" % (LValue, LHS, operation, RHS))
 
@@ -171,27 +200,43 @@ def writeFunctionCall(self):
         self.writeComment("Call the last function")
         arg1 = random.uniform(1, 100)
         arg2 = random.uniform(1, 100)
-        self.writeln("printresult(%d, func%d(%f, %f) )" % (self.lastFuncNum, self.lastFuncNum, arg1, arg2))
+        self.writeln(
+            "printresult(%d, func%d(%f, %f) )"
+            % (self.lastFuncNum, self.lastFuncNum, arg1, arg2)
+        )
         self.writeEmptyLine()
         self.updateCalledFunctionList(self.lastFuncNum)
 
     def writeFinalFunctionCounts(self):
-        self.writeComment("Called %d of %d functions" % (len(self.calledFunctions), self.lastFuncNum))
+        self.writeComment(
+            "Called %d of %d functions" % (len(self.calledFunctions), self.lastFuncNum)
+        )
+
 
-def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript):
-    """ Generate a random Kaleidoscope script based on the given parameters """
+def generateKScript(
+    filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript
+):
+    """Generate a random Kaleidoscope script based on the given parameters"""
     print("Generating " + filename)
-    print("  %d functions, %d elements per function, %d functions between execution" %
-          (numFuncs, elementsPerFunc, funcsBetweenExec))
+    print(
+        "  %d functions, %d elements per function, %d functions between execution"
+        % (numFuncs, elementsPerFunc, funcsBetweenExec)
+    )
     print("  Call weighting = %f" % callWeighting)
     script = KScriptGenerator(filename)
     script.setCallWeighting(callWeighting)
-    script.writeComment("===========================================================================")
+    script.writeComment(
+        "==========================================================================="
+    )
     script.writeComment("Auto-generated script")
-    script.writeComment("  %d functions, %d elements per function, %d functions between execution"
-                         % (numFuncs, elementsPerFunc, funcsBetweenExec))
+    script.writeComment(
+        "  %d functions, %d elements per function, %d functions between execution"
+        % (numFuncs, elementsPerFunc, funcsBetweenExec)
+    )
     script.writeComment("  call weighting = %f" % callWeighting)
-    script.writeComment("===========================================================================")
+    script.writeComment(
+        "==========================================================================="
+    )
     script.writeEmptyLine()
     script.writePredefinedFunctions()
     funcsSinceLastExec = 0
@@ -207,20 +252,49 @@ def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callW
     script.writeEmptyLine()
     script.writeFinalFunctionCounts()
     funcsCalled = len(script.calledFunctions)
-    print("  Called %d of %d functions, %d total" % (funcsCalled, numFuncs, script.totalCallsExecuted))
-    timingScript.writeTimingCall(filename, numFuncs, funcsCalled, script.totalCallsExecuted)
+    print(
+        "  Called %d of %d functions, %d total"
+        % (funcsCalled, numFuncs, script.totalCallsExecuted)
+    )
+    timingScript.writeTimingCall(
+        filename, numFuncs, funcsCalled, script.totalCallsExecuted
+    )
+
 
 # Execution begins here
 random.seed()
 
 timingScript = TimingScriptGenerator("time-toy.sh", "timing-data.txt")
 
-dataSets = [(5000, 3,  50, 0.50), (5000, 10, 100, 0.10), (5000, 10, 5, 0.10), (5000, 10, 1, 0.0),
-            (1000, 3,  10, 0.50), (1000, 10, 100, 0.10), (1000, 10, 5, 0.10), (1000, 10, 1, 0.0),
-            ( 200, 3,   2, 0.50), ( 200, 10,  40, 0.10), ( 200, 10, 2, 0.10), ( 200, 10, 1, 0.0)]
+dataSets = [
+    (5000, 3, 50, 0.50),
+    (5000, 10, 100, 0.10),
+    (5000, 10, 5, 0.10),
+    (5000, 10, 1, 0.0),
+    (1000, 3, 10, 0.50),
+    (1000, 10, 100, 0.10),
+    (1000, 10, 5, 0.10),
+    (1000, 10, 1, 0.0),
+    (200, 3, 2, 0.50),
+    (200, 10, 40, 0.10),
+    (200, 10, 2, 0.10),
+    (200, 10, 1, 0.0),
+]
 
 # Generate the code
 for (numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting) in dataSets:
-    filename = "test-%d-%d-%d-%d.k" % (numFuncs, elementsPerFunc, funcsBetweenExec, int(callWeighting * 100))
-    generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript)
+    filename = "test-%d-%d-%d-%d.k" % (
+        numFuncs,
+        elementsPerFunc,
+        funcsBetweenExec,
+        int(callWeighting * 100),
+    )
+    generateKScript(
+        filename,
+        numFuncs,
+        elementsPerFunc,
+        funcsBetweenExec,
+        callWeighting,
+        timingScript,
+    )
 print("All done!")

diff  --git a/llvm/examples/Kaleidoscope/MCJIT/complete/split-lib.py b/llvm/examples/Kaleidoscope/MCJIT/complete/split-lib.py
index 61c9a5b169fcd..c920e594e675a 100644
--- a/llvm/examples/Kaleidoscope/MCJIT/complete/split-lib.py
+++ b/llvm/examples/Kaleidoscope/MCJIT/complete/split-lib.py
@@ -2,71 +2,108 @@
 
 from __future__ import print_function
 
+
 class TimingScriptGenerator:
     """Used to generate a bash script which will invoke the toy and time it"""
+
     def __init__(self, scriptname, outputname):
-        self.shfile = open(scriptname, 'w')
+        self.shfile = open(scriptname, "w")
         self.timeFile = outputname
-        self.shfile.write("echo \"\" > %s\n" % self.timeFile)
+        self.shfile.write('echo "" > %s\n' % self.timeFile)
 
     def writeTimingCall(self, irname, callname):
         """Echo some comments and invoke both versions of toy"""
         rootname = irname
-        if '.' in irname:
-            rootname = irname[:irname.rfind('.')]
-        self.shfile.write("echo \"%s: Calls %s\" >> %s\n" % (callname, irname, self.timeFile))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With MCJIT\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        if "." in irname:
+            rootname = irname[: irname.rfind(".")]
+        self.shfile.write(
+            'echo "%s: Calls %s" >> %s\n' % (callname, irname, self.timeFile)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With MCJIT" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=true -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With MCJIT again\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        self.shfile.write(
+            "./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=true -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n"
+            % (irname, callname, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With MCJIT again" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=true -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        self.shfile.write(
+            "./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=true -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n"
+            % (irname, callname, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With JIT" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy -suppress-prompts -use-mcjit=false -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
+        self.shfile.write(
+            "./toy -suppress-prompts -use-mcjit=false -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n"
+            % (irname, callname, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+
 
 class LibScriptGenerator:
     """Used to generate a bash script which will invoke the toy and time it"""
+
     def __init__(self, filename):
-        self.shfile = open(filename, 'w')
+        self.shfile = open(filename, "w")
 
     def writeLibGenCall(self, libname, irname):
-        self.shfile.write("./toy -suppress-prompts -use-mcjit=false -dump-modules < %s 2> %s\n" % (libname, irname))
+        self.shfile.write(
+            "./toy -suppress-prompts -use-mcjit=false -dump-modules < %s 2> %s\n"
+            % (libname, irname)
+        )
+
 
 def splitScript(inputname, libGenScript, timingScript):
-  rootname = inputname[:-2]
-  libname = rootname + "-lib.k"
-  irname = rootname + "-lib.ir"
-  callname = rootname + "-call.k"
-  infile = open(inputname, "r")
-  libfile = open(libname, "w")
-  callfile = open(callname, "w")
-  print("Splitting %s into %s and %s" % (inputname, callname, libname))
-  for line in infile:
-    if not line.startswith("#"):
-      if line.startswith("print"):
-        callfile.write(line)
-      else:
-        libfile.write(line)
-  libGenScript.writeLibGenCall(libname, irname)
-  timingScript.writeTimingCall(irname, callname)
+    rootname = inputname[:-2]
+    libname = rootname + "-lib.k"
+    irname = rootname + "-lib.ir"
+    callname = rootname + "-call.k"
+    infile = open(inputname, "r")
+    libfile = open(libname, "w")
+    callfile = open(callname, "w")
+    print("Splitting %s into %s and %s" % (inputname, callname, libname))
+    for line in infile:
+        if not line.startswith("#"):
+            if line.startswith("print"):
+                callfile.write(line)
+            else:
+                libfile.write(line)
+    libGenScript.writeLibGenCall(libname, irname)
+    timingScript.writeTimingCall(irname, callname)
+
 
 # Execution begins here
 libGenScript = LibScriptGenerator("make-libs.sh")
 timingScript = TimingScriptGenerator("time-lib.sh", "lib-timing.txt")
 
-script_list = ["test-5000-3-50-50.k", "test-5000-10-100-10.k", "test-5000-10-5-10.k", "test-5000-10-1-0.k", 
-               "test-1000-3-10-50.k", "test-1000-10-100-10.k", "test-1000-10-5-10.k", "test-1000-10-1-0.k",
-               "test-200-3-2-50.k", "test-200-10-40-10.k", "test-200-10-2-10.k", "test-200-10-1-0.k"]
+script_list = [
+    "test-5000-3-50-50.k",
+    "test-5000-10-100-10.k",
+    "test-5000-10-5-10.k",
+    "test-5000-10-1-0.k",
+    "test-1000-3-10-50.k",
+    "test-1000-10-100-10.k",
+    "test-1000-10-5-10.k",
+    "test-1000-10-1-0.k",
+    "test-200-3-2-50.k",
+    "test-200-10-40-10.k",
+    "test-200-10-2-10.k",
+    "test-200-10-1-0.k",
+]
 
 for script in script_list:
-  splitScript(script, libGenScript, timingScript)
+    splitScript(script, libGenScript, timingScript)
 print("All done!")

diff  --git a/llvm/examples/Kaleidoscope/MCJIT/lazy/genk-timing.py b/llvm/examples/Kaleidoscope/MCJIT/lazy/genk-timing.py
index 87bbfbf32bda3..2e39f103cd9f3 100644
--- a/llvm/examples/Kaleidoscope/MCJIT/lazy/genk-timing.py
+++ b/llvm/examples/Kaleidoscope/MCJIT/lazy/genk-timing.py
@@ -5,36 +5,53 @@
 import sys
 import random
 
+
 class TimingScriptGenerator:
     """Used to generate a bash script which will invoke the toy and time it"""
+
     def __init__(self, scriptname, outputname):
         self.timeFile = outputname
-        self.shfile = open(scriptname, 'w')
-        self.shfile.write("echo \"\" > %s\n" % self.timeFile)
+        self.shfile = open(scriptname, "w")
+        self.shfile.write('echo "" > %s\n' % self.timeFile)
 
     def writeTimingCall(self, filename, numFuncs, funcsCalled, totalCalls):
         """Echo some comments and invoke both versions of toy"""
         rootname = filename
-        if '.' in filename:
-            rootname = filename[:filename.rfind('.')]
-        self.shfile.write("echo \"%s: Calls %d of %d functions, %d total\" >> %s\n" % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With MCJIT\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        if "." in filename:
+            rootname = filename[: filename.rfind(".")]
+        self.shfile.write(
+            'echo "%s: Calls %d of %d functions, %d total" >> %s\n'
+            % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With MCJIT" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy-mcjit < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (filename, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        self.shfile.write(
+            "./toy-mcjit < %s > %s-mcjit.out 2> %s-mcjit.err\n"
+            % (filename, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With JIT" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy-jit < %s > %s-jit.out 2> %s-jit.err\n" % (filename, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
+        self.shfile.write(
+            "./toy-jit < %s > %s-jit.out 2> %s-jit.err\n"
+            % (filename, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+
 
 class KScriptGenerator:
     """Used to generate random Kaleidoscope code"""
+
     def __init__(self, filename):
-        self.kfile = open(filename, 'w')
+        self.kfile = open(filename, "w")
         self.nextFuncNum = 1
         self.lastFuncNum = None
         self.callWeighting = 0.1
@@ -80,20 +97,22 @@ def updateCalledFunctionList(self, callee):
                 self.updateCalledFunctionList(subCallee)
 
     def setCallWeighting(self, weight):
-        """ Sets the probably of generating a function call"""
+        """Sets the probably of generating a function call"""
         self.callWeighting = weight
 
     def writeln(self, line):
-        self.kfile.write(line + '\n')
+        self.kfile.write(line + "\n")
 
     def writeComment(self, comment):
-        self.writeln('# ' + comment)
+        self.writeln("# " + comment)
 
     def writeEmptyLine(self):
         self.writeln("")
 
     def writePredefinedFunctions(self):
-        self.writeComment("Define ':' for sequencing: as a low-precedence operator that ignores operands")
+        self.writeComment(
+            "Define ':' for sequencing: as a low-precedence operator that ignores operands"
+        )
         self.writeComment("and just returns the RHS.")
         self.writeln("def binary : 1 (x y) y;")
         self.writeEmptyLine()
@@ -105,16 +124,18 @@ def writePredefinedFunctions(self):
         self.writeComment("Print the result of a function call")
         self.writeln("def printresult(N Result)")
         self.writeln("  # 'result('")
-        self.writeln("  putchard(114) : putchard(101) : putchard(115) : putchard(117) : putchard(108) : putchard(116) : putchard(40) :")
-        self.writeln("  printd(N) :");
+        self.writeln(
+            "  putchard(114) : putchard(101) : putchard(115) : putchard(117) : putchard(108) : putchard(116) : putchard(40) :"
+        )
+        self.writeln("  printd(N) :")
         self.writeln("  # ') = '")
         self.writeln("  putchard(41) : putchard(32) : putchard(61) : putchard(32) :")
-        self.writeln("  printd(Result) :");
+        self.writeln("  printd(Result) :")
         self.writeln("  printlf();")
         self.writeEmptyLine()
 
     def writeRandomOperation(self, LValue, LHS, RHS):
-        shouldCallFunc = (self.lastFuncNum > 2 and random.random() < self.callWeighting)
+        shouldCallFunc = self.lastFuncNum > 2 and random.random() < self.callWeighting
         if shouldCallFunc:
             funcToCall = random.randrange(1, self.lastFuncNum - 1)
             self.updateFunctionCallMap(self.lastFuncNum, funcToCall)
@@ -130,7 +151,10 @@ def writeRandomOperation(self, LValue, LHS, RHS):
                 self.writeln("  else if %s < %s then" % (RHS, LHS))
                 self.writeln("    %s = %s %s %s" % (LValue, LHS, operation, RHS))
                 self.writeln("  else")
-                self.writeln("    %s = %s %s %f :" % (LValue, LHS, operation, random.uniform(1, 100)))
+                self.writeln(
+                    "    %s = %s %s %f :"
+                    % (LValue, LHS, operation, random.uniform(1, 100))
+                )
             else:
                 self.writeln("  %s = %s %s %s :" % (LValue, LHS, operation, RHS))
 
@@ -166,27 +190,43 @@ def writeFunctionCall(self):
         self.writeComment("Call the last function")
         arg1 = random.uniform(1, 100)
         arg2 = random.uniform(1, 100)
-        self.writeln("printresult(%d, func%d(%f, %f) )" % (self.lastFuncNum, self.lastFuncNum, arg1, arg2))
+        self.writeln(
+            "printresult(%d, func%d(%f, %f) )"
+            % (self.lastFuncNum, self.lastFuncNum, arg1, arg2)
+        )
         self.writeEmptyLine()
         self.updateCalledFunctionList(self.lastFuncNum)
 
     def writeFinalFunctionCounts(self):
-        self.writeComment("Called %d of %d functions" % (len(self.calledFunctions), self.lastFuncNum))
+        self.writeComment(
+            "Called %d of %d functions" % (len(self.calledFunctions), self.lastFuncNum)
+        )
+
 
-def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript):
-    """ Generate a random Kaleidoscope script based on the given parameters """
+def generateKScript(
+    filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript
+):
+    """Generate a random Kaleidoscope script based on the given parameters"""
     print("Generating " + filename)
-    print("  %d functions, %d elements per function, %d functions between execution" %
-          (numFuncs, elementsPerFunc, funcsBetweenExec))
+    print(
+        "  %d functions, %d elements per function, %d functions between execution"
+        % (numFuncs, elementsPerFunc, funcsBetweenExec)
+    )
     print("  Call weighting = %f" % callWeighting)
     script = KScriptGenerator(filename)
     script.setCallWeighting(callWeighting)
-    script.writeComment("===========================================================================")
+    script.writeComment(
+        "==========================================================================="
+    )
     script.writeComment("Auto-generated script")
-    script.writeComment("  %d functions, %d elements per function, %d functions between execution"
-                         % (numFuncs, elementsPerFunc, funcsBetweenExec))
+    script.writeComment(
+        "  %d functions, %d elements per function, %d functions between execution"
+        % (numFuncs, elementsPerFunc, funcsBetweenExec)
+    )
     script.writeComment("  call weighting = %f" % callWeighting)
-    script.writeComment("===========================================================================")
+    script.writeComment(
+        "==========================================================================="
+    )
     script.writeEmptyLine()
     script.writePredefinedFunctions()
     funcsSinceLastExec = 0
@@ -202,20 +242,49 @@ def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callW
     script.writeEmptyLine()
     script.writeFinalFunctionCounts()
     funcsCalled = len(script.calledFunctions)
-    print("  Called %d of %d functions, %d total" % (funcsCalled, numFuncs, script.totalCallsExecuted))
-    timingScript.writeTimingCall(filename, numFuncs, funcsCalled, script.totalCallsExecuted)
+    print(
+        "  Called %d of %d functions, %d total"
+        % (funcsCalled, numFuncs, script.totalCallsExecuted)
+    )
+    timingScript.writeTimingCall(
+        filename, numFuncs, funcsCalled, script.totalCallsExecuted
+    )
+
 
 # Execution begins here
 random.seed()
 
 timingScript = TimingScriptGenerator("time-toy.sh", "timing-data.txt")
 
-dataSets = [(5000, 3,  50, 0.50), (5000, 10, 100, 0.10), (5000, 10, 5, 0.10), (5000, 10, 1, 0.0),
-            (1000, 3,  10, 0.50), (1000, 10, 100, 0.10), (1000, 10, 5, 0.10), (1000, 10, 1, 0.0),
-            ( 200, 3,   2, 0.50), ( 200, 10,  40, 0.10), ( 200, 10, 2, 0.10), ( 200, 10, 1, 0.0)]
+dataSets = [
+    (5000, 3, 50, 0.50),
+    (5000, 10, 100, 0.10),
+    (5000, 10, 5, 0.10),
+    (5000, 10, 1, 0.0),
+    (1000, 3, 10, 0.50),
+    (1000, 10, 100, 0.10),
+    (1000, 10, 5, 0.10),
+    (1000, 10, 1, 0.0),
+    (200, 3, 2, 0.50),
+    (200, 10, 40, 0.10),
+    (200, 10, 2, 0.10),
+    (200, 10, 1, 0.0),
+]
 
 # Generate the code
 for (numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting) in dataSets:
-    filename = "test-%d-%d-%d-%d.k" % (numFuncs, elementsPerFunc, funcsBetweenExec, int(callWeighting * 100))
-    generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript)
+    filename = "test-%d-%d-%d-%d.k" % (
+        numFuncs,
+        elementsPerFunc,
+        funcsBetweenExec,
+        int(callWeighting * 100),
+    )
+    generateKScript(
+        filename,
+        numFuncs,
+        elementsPerFunc,
+        funcsBetweenExec,
+        callWeighting,
+        timingScript,
+    )
 print("All done!")

diff  --git a/llvm/lib/Analysis/models/gen-inline-oz-test-model.py b/llvm/lib/Analysis/models/gen-inline-oz-test-model.py
index d8737f26e0d88..4898509ea544f 100644
--- a/llvm/lib/Analysis/models/gen-inline-oz-test-model.py
+++ b/llvm/lib/Analysis/models/gen-inline-oz-test-model.py
@@ -11,7 +11,7 @@
 
 import tensorflow as tf
 
-POLICY_DECISION_LABEL = 'inlining_decision'
+POLICY_DECISION_LABEL = "inlining_decision"
 POLICY_OUTPUT_SPEC = """
 [
     {
@@ -31,106 +31,110 @@
 
 # pylint: disable=g-complex-comprehension
 def get_input_signature():
-  """Returns the list of features for LLVM inlining."""
-  # int64 features
-  inputs = [
-      tf.TensorSpec(dtype=tf.int64, shape=(), name=key) for key in [
-          'caller_basic_block_count',
-          'caller_conditionally_executed_blocks',
-          'caller_users',
-          'callee_basic_block_count',
-          'callee_conditionally_executed_blocks',
-          'callee_users',
-          'nr_ctant_params',
-          'node_count',
-          'edge_count',
-          'callsite_height',
-          'cost_estimate',
-          'inlining_default',
-          'sroa_savings',
-          'sroa_losses',
-          'load_elimination',
-          'call_penalty',
-          'call_argument_setup',
-          'load_relative_intrinsic',
-          'lowered_call_arg_setup',
-          'indirect_call_penalty',
-          'jump_table_penalty',
-          'case_cluster_penalty',
-          'switch_penalty',
-          'unsimplified_common_instructions',
-          'num_loops',
-          'dead_blocks',
-          'simplified_instructions',
-          'constant_args',
-          'constant_offset_ptr_args',
-          'callsite_cost',
-          'cold_cc_penalty',
-          'last_call_to_static_bonus',
-          'is_multiple_blocks',
-          'nested_inlines',
-          'nested_inline_cost_estimate',
-          'threshold',
-      ]
-  ]
-
-  # float32 features
-  inputs.extend([
-      tf.TensorSpec(dtype=tf.float32, shape=(), name=key)
-      for key in ['discount', 'reward']
-  ])
-
-  # int32 features
-  inputs.extend([
-      tf.TensorSpec(dtype=tf.int32, shape=(), name=key)
-      for key in ['step_type']
-  ])
-  return inputs
+    """Returns the list of features for LLVM inlining."""
+    # int64 features
+    inputs = [
+        tf.TensorSpec(dtype=tf.int64, shape=(), name=key)
+        for key in [
+            "caller_basic_block_count",
+            "caller_conditionally_executed_blocks",
+            "caller_users",
+            "callee_basic_block_count",
+            "callee_conditionally_executed_blocks",
+            "callee_users",
+            "nr_ctant_params",
+            "node_count",
+            "edge_count",
+            "callsite_height",
+            "cost_estimate",
+            "inlining_default",
+            "sroa_savings",
+            "sroa_losses",
+            "load_elimination",
+            "call_penalty",
+            "call_argument_setup",
+            "load_relative_intrinsic",
+            "lowered_call_arg_setup",
+            "indirect_call_penalty",
+            "jump_table_penalty",
+            "case_cluster_penalty",
+            "switch_penalty",
+            "unsimplified_common_instructions",
+            "num_loops",
+            "dead_blocks",
+            "simplified_instructions",
+            "constant_args",
+            "constant_offset_ptr_args",
+            "callsite_cost",
+            "cold_cc_penalty",
+            "last_call_to_static_bonus",
+            "is_multiple_blocks",
+            "nested_inlines",
+            "nested_inline_cost_estimate",
+            "threshold",
+        ]
+    ]
+
+    # float32 features
+    inputs.extend(
+        [
+            tf.TensorSpec(dtype=tf.float32, shape=(), name=key)
+            for key in ["discount", "reward"]
+        ]
+    )
+
+    # int32 features
+    inputs.extend(
+        [tf.TensorSpec(dtype=tf.int32, shape=(), name=key) for key in ["step_type"]]
+    )
+    return inputs
 
 
 def get_output_signature():
-  return POLICY_DECISION_LABEL
+    return POLICY_DECISION_LABEL
 
 
 def get_output_spec():
-  return POLICY_OUTPUT_SPEC
+    return POLICY_OUTPUT_SPEC
+
 
 def get_output_spec_path(path):
-  return os.path.join(path, 'output_spec.json')
+    return os.path.join(path, "output_spec.json")
 
 
 def build_mock_model(path, signature):
-  """Build and save the mock model with the given signature"""
-  module = tf.Module()
-  def action(*inputs):
-    return {signature['output']: tf.constant(value=1, dtype=tf.int64)}
+    """Build and save the mock model with the given signature"""
+    module = tf.Module()
+
+    def action(*inputs):
+        return {signature["output"]: tf.constant(value=1, dtype=tf.int64)}
 
-  module.action = tf.function()(action)
-  action = {'action': module.action.get_concrete_function(signature['inputs'])}
-  tf.saved_model.save(module, path, signatures=action)
+    module.action = tf.function()(action)
+    action = {"action": module.action.get_concrete_function(signature["inputs"])}
+    tf.saved_model.save(module, path, signatures=action)
 
-  output_spec_path = get_output_spec_path(path)
-  with open(output_spec_path, 'w') as f:
-    print(f'Writing output spec to {output_spec_path}.')
-    f.write(signature['output_spec'])
+    output_spec_path = get_output_spec_path(path)
+    with open(output_spec_path, "w") as f:
+        print(f"Writing output spec to {output_spec_path}.")
+        f.write(signature["output_spec"])
 
 
 def get_signature():
-  return {
-      'inputs': get_input_signature(),
-      'output': get_output_signature(),
-      'output_spec': get_output_spec()
-  }
+    return {
+        "inputs": get_input_signature(),
+        "output": get_output_signature(),
+        "output_spec": get_output_spec(),
+    }
 
 
 def main(argv):
-  assert len(argv) == 2
-  model_path = argv[1]
+    assert len(argv) == 2
+    model_path = argv[1]
 
-  print(f'Output model to: [{argv[1]}]')
-  signature = get_signature()
-  build_mock_model(model_path, signature)
+    print(f"Output model to: [{argv[1]}]")
+    signature = get_signature()
+    build_mock_model(model_path, signature)
 
 
-if __name__ == '__main__':
-  main(sys.argv)
+if __name__ == "__main__":
+    main(sys.argv)

diff  --git a/llvm/lib/Analysis/models/gen-regalloc-eviction-test-model.py b/llvm/lib/Analysis/models/gen-regalloc-eviction-test-model.py
index e41e71a09d828..5af2fb2878b5b 100644
--- a/llvm/lib/Analysis/models/gen-regalloc-eviction-test-model.py
+++ b/llvm/lib/Analysis/models/gen-regalloc-eviction-test-model.py
@@ -6,7 +6,8 @@
 import os
 import sys
 import tensorflow as tf
-POLICY_DECISION_LABEL = 'index_to_evict'
+
+POLICY_DECISION_LABEL = "index_to_evict"
 POLICY_OUTPUT_SPEC = """
 [
     {
@@ -22,49 +23,50 @@
     }
 ]
 """
-PER_REGISTER_FEATURE_LIST = ['mask']
+PER_REGISTER_FEATURE_LIST = ["mask"]
 NUM_REGISTERS = 33
 
 
 def get_input_signature():
-  """Returns (time_step_spec, action_spec) for LLVM register allocation."""
-  inputs = dict(
-      (key, tf.TensorSpec(dtype=tf.int64, shape=(NUM_REGISTERS), name=key))
-      for key in PER_REGISTER_FEATURE_LIST)
-  return inputs
+    """Returns (time_step_spec, action_spec) for LLVM register allocation."""
+    inputs = dict(
+        (key, tf.TensorSpec(dtype=tf.int64, shape=(NUM_REGISTERS), name=key))
+        for key in PER_REGISTER_FEATURE_LIST
+    )
+    return inputs
 
 
 def get_output_spec_path(path):
-  return os.path.join(path, 'output_spec.json')
+    return os.path.join(path, "output_spec.json")
 
 
 def build_mock_model(path):
-  """Build and save the mock model with the given signature."""
-  module = tf.Module()
-  # We have to set this useless variable in order for the TF C API to correctly
-  # intake it
-  module.var = tf.Variable(0, dtype=tf.int64)
+    """Build and save the mock model with the given signature."""
+    module = tf.Module()
+    # We have to set this useless variable in order for the TF C API to correctly
+    # intake it
+    module.var = tf.Variable(0, dtype=tf.int64)
+
+    def action(*inputs):
+        result = (
+            tf.math.argmax(tf.cast(inputs[0]["mask"], tf.int32), axis=-1) + module.var
+        )
+        return {POLICY_DECISION_LABEL: result}
 
-  def action(*inputs):
-    result = tf.math.argmax(
-        tf.cast(inputs[0]['mask'], tf.int32), axis=-1) + module.var
-    return {POLICY_DECISION_LABEL: result}
-  module.action = tf.function()(action)
-  action = {
-      'action': module.action.get_concrete_function(get_input_signature())
-  }
-  tf.saved_model.save(module, path, signatures=action)
-  output_spec_path = get_output_spec_path(path)
-  with open(output_spec_path, 'w') as f:
-    print(f'Writing output spec to {output_spec_path}.')
-    f.write(POLICY_OUTPUT_SPEC)
+    module.action = tf.function()(action)
+    action = {"action": module.action.get_concrete_function(get_input_signature())}
+    tf.saved_model.save(module, path, signatures=action)
+    output_spec_path = get_output_spec_path(path)
+    with open(output_spec_path, "w") as f:
+        print(f"Writing output spec to {output_spec_path}.")
+        f.write(POLICY_OUTPUT_SPEC)
 
 
 def main(argv):
-  assert len(argv) == 2
-  model_path = argv[1]
-  build_mock_model(model_path)
+    assert len(argv) == 2
+    model_path = argv[1]
+    build_mock_model(model_path)
 
 
-if __name__ == '__main__':
-  main(sys.argv)
+if __name__ == "__main__":
+    main(sys.argv)

diff  --git a/llvm/lib/Analysis/models/gen-regalloc-priority-test-model.py b/llvm/lib/Analysis/models/gen-regalloc-priority-test-model.py
index 81de2c70565a8..889ddae48b1ff 100644
--- a/llvm/lib/Analysis/models/gen-regalloc-priority-test-model.py
+++ b/llvm/lib/Analysis/models/gen-regalloc-priority-test-model.py
@@ -7,7 +7,8 @@
 import os
 import sys
 import tensorflow as tf
-POLICY_DECISION_LABEL = 'priority'
+
+POLICY_DECISION_LABEL = "priority"
 POLICY_OUTPUT_SPEC = """
 [
     {
@@ -23,73 +24,83 @@
     }
 ]
 """
-PER_LIVEINTERVAL_INT64_FEATURE_LIST = [
-    'li_size', 'stage'
-]
-PER_LIVEINTERVAL_FLOAT32_FEATURE_LIST = ['weight'
-]
-PER_LIVEINTERVAL_FEATURE_LIST = PER_LIVEINTERVAL_FLOAT32_FEATURE_LIST + \
-    PER_LIVEINTERVAL_INT64_FEATURE_LIST
-CONTEXT_FEATURE_LIST =  ('discount', 'reward', 'step_type')
+PER_LIVEINTERVAL_INT64_FEATURE_LIST = ["li_size", "stage"]
+PER_LIVEINTERVAL_FLOAT32_FEATURE_LIST = ["weight"]
+PER_LIVEINTERVAL_FEATURE_LIST = (
+    PER_LIVEINTERVAL_FLOAT32_FEATURE_LIST + PER_LIVEINTERVAL_INT64_FEATURE_LIST
+)
+CONTEXT_FEATURE_LIST = ("discount", "reward", "step_type")
 
 
 def get_input_signature():
-   """Returns (time_step_spec, action_spec) for LLVM register allocation."""
-   inputs = dict(
-       (key, tf.TensorSpec(dtype=tf.int64, shape=(), name=key))
-       for key in PER_LIVEINTERVAL_INT64_FEATURE_LIST)
-   inputs.update(
-       dict((key,
-             tf.TensorSpec(dtype=tf.float32, shape=(), name=key))
-            for key in PER_LIVEINTERVAL_FLOAT32_FEATURE_LIST))
-   inputs.update(
-       dict((key, tf.TensorSpec(dtype=tf.float32, shape=(), name=key))
-            for key in ['discount', 'reward']))
-   inputs.update(
-       dict((key, tf.TensorSpec(dtype=tf.int32, shape=(), name=key))
-            for key in ['step_type']))
-   return inputs
+    """Returns (time_step_spec, action_spec) for LLVM register allocation."""
+    inputs = dict(
+        (key, tf.TensorSpec(dtype=tf.int64, shape=(), name=key))
+        for key in PER_LIVEINTERVAL_INT64_FEATURE_LIST
+    )
+    inputs.update(
+        dict(
+            (key, tf.TensorSpec(dtype=tf.float32, shape=(), name=key))
+            for key in PER_LIVEINTERVAL_FLOAT32_FEATURE_LIST
+        )
+    )
+    inputs.update(
+        dict(
+            (key, tf.TensorSpec(dtype=tf.float32, shape=(), name=key))
+            for key in ["discount", "reward"]
+        )
+    )
+    inputs.update(
+        dict(
+            (key, tf.TensorSpec(dtype=tf.int32, shape=(), name=key))
+            for key in ["step_type"]
+        )
+    )
+    return inputs
 
 
 def get_output_spec_path(path):
-   return os.path.join(path, 'output_spec.json')
+    return os.path.join(path, "output_spec.json")
 
 
 def build_mock_model(path):
-   """Build and save the mock model with the given signature."""
-   module = tf.Module()
-   # We have to set this useless variable in order for the TF C API to correctly
-   # intake it
-   module.var = tf.Variable(0, dtype=tf.float32)
+    """Build and save the mock model with the given signature."""
+    module = tf.Module()
+    # We have to set this useless variable in order for the TF C API to correctly
+    # intake it
+    module.var = tf.Variable(0, dtype=tf.float32)
+
+    def action(*inputs):
+        s1 = tf.reduce_sum(
+            [
+                tf.cast(inputs[0][key], tf.float32)
+                for key in PER_LIVEINTERVAL_FEATURE_LIST
+            ],
+            axis=0,
+        )
+        s2 = tf.reduce_sum(
+            [tf.cast(inputs[0][key], tf.float32) for key in CONTEXT_FEATURE_LIST]
+        )
+        # Add a large number so s won't be 0.
+        s = s1 + s2
+        result = s + module.var
+        return {POLICY_DECISION_LABEL: result}
 
-   def action(*inputs):
-     s1 = tf.reduce_sum([
-         tf.cast(inputs[0][key], tf.float32) for key in PER_LIVEINTERVAL_FEATURE_LIST
-     ],
-         axis=0)
-     s2 = tf.reduce_sum(
-         [tf.cast(inputs[0][key], tf.float32) for key in CONTEXT_FEATURE_LIST])
-     # Add a large number so s won't be 0.
-     s = s1 + s2
-     result = s + module.var
-     return {POLICY_DECISION_LABEL: result}
-   module.action = tf.function()(action)
-   action = {
-       'action': module.action.get_concrete_function(get_input_signature())
-   }
+    module.action = tf.function()(action)
+    action = {"action": module.action.get_concrete_function(get_input_signature())}
 
-   tf.saved_model.save(module, path, signatures=action)
-   output_spec_path = get_output_spec_path(path)
-   with open(output_spec_path, 'w') as f:
-     print(f'Writing output spec to {output_spec_path}.')
-     f.write(POLICY_OUTPUT_SPEC)
+    tf.saved_model.save(module, path, signatures=action)
+    output_spec_path = get_output_spec_path(path)
+    with open(output_spec_path, "w") as f:
+        print(f"Writing output spec to {output_spec_path}.")
+        f.write(POLICY_OUTPUT_SPEC)
 
 
 def main(argv):
-   assert len(argv) == 2
-   model_path = argv[1]
-   build_mock_model(model_path)
+    assert len(argv) == 2
+    model_path = argv[1]
+    build_mock_model(model_path)
 
 
-if __name__ == '__main__':
-   main(sys.argv)
+if __name__ == "__main__":
+    main(sys.argv)

diff  --git a/llvm/lib/Analysis/models/interactive_host.py b/llvm/lib/Analysis/models/interactive_host.py
index 79c74ac3cd881..759c791614a1d 100644
--- a/llvm/lib/Analysis/models/interactive_host.py
+++ b/llvm/lib/Analysis/models/interactive_host.py
@@ -20,68 +20,76 @@
 from typing import Callable, List, Union
 
 
-def send(f: io.BufferedWriter, value: Union[int, float],
-         spec: log_reader.TensorSpec):
-  """Send the `value` - currently just a scalar - formatted as per `spec`."""
+def send(f: io.BufferedWriter, value: Union[int, float], spec: log_reader.TensorSpec):
+    """Send the `value` - currently just a scalar - formatted as per `spec`."""
 
-  # just int64 for now
-  assert (spec.element_type == ctypes.c_int64)
-  to_send = ctypes.c_int64(int(value))
-  assert f.write(bytes(to_send)) == ctypes.sizeof(
-      spec.element_type) * math.prod(spec.shape)
-  f.flush()
+    # just int64 for now
+    assert spec.element_type == ctypes.c_int64
+    to_send = ctypes.c_int64(int(value))
+    assert f.write(bytes(to_send)) == ctypes.sizeof(spec.element_type) * math.prod(
+        spec.shape
+    )
+    f.flush()
 
 
-def run_interactive(temp_rootname: str,
-                    make_response: Callable[[List[log_reader.TensorValue]],
-                                            Union[int, float]],
-                    process_and_args: List[str]):
-  """Host the compiler.
-  Args:
-    temp_rootname: the base file name from which to construct the 2 pipes for
-    communicating with the compiler.
-    make_response: a function that, given the current tensor values, provides a
-    response.
-    process_and_args: the full commandline for the compiler. It it assumed it
-    contains a flag poiting to `temp_rootname` so that the InteractiveModeRunner
-    would attempt communication on the same pair as this function opens.
+def run_interactive(
+    temp_rootname: str,
+    make_response: Callable[[List[log_reader.TensorValue]], Union[int, float]],
+    process_and_args: List[str],
+):
+    """Host the compiler.
+    Args:
+      temp_rootname: the base file name from which to construct the 2 pipes for
+      communicating with the compiler.
+      make_response: a function that, given the current tensor values, provides a
+      response.
+      process_and_args: the full commandline for the compiler. It it assumed it
+      contains a flag poiting to `temp_rootname` so that the InteractiveModeRunner
+      would attempt communication on the same pair as this function opens.
 
-  This function sets up the communication with the compiler - via 2 files named
-  `temp_rootname`.in and `temp_rootname`.out - prints out the received features,
-  and sends back to the compiler an advice (which it gets from `make_response`).
-  It's used for testing, and also to showcase how to set up communication in an
-  interactive ML ("gym") environment.
-  """
-  to_compiler = temp_rootname + ".in"
-  from_compiler = temp_rootname + ".out"
-  try:
-    os.mkfifo(to_compiler, 0o666)
-    os.mkfifo(from_compiler, 0o666)
-    compiler_proc = subprocess.Popen(
-        process_and_args, stderr=subprocess.PIPE, stdout=subprocess.DEVNULL)
-    with io.BufferedWriter(io.FileIO(to_compiler, 'wb')) as tc:
-      with io.BufferedReader(io.FileIO(from_compiler, 'rb')) as fc:
-        tensor_specs, _, advice_spec = log_reader.read_header(fc)
-        context = None
-        while compiler_proc.poll() is None:
-          next_event = fc.readline()
-          if not next_event:
-            break
-          last_context, observation_id, features, _ = log_reader.read_one_observation(
-              context, next_event, fc, tensor_specs, None)
-          if last_context != context:
-            print(f'context: {last_context}')
-          context = last_context
-          print(f'observation: {observation_id}')
-          tensor_values = []
-          for fv in features:
-            log_reader.pretty_print_tensor_value(fv)
-            tensor_values.append(fv)
-          send(tc, make_response(tensor_values), advice_spec)
-    _, err = compiler_proc.communicate()
-    print(err.decode('utf-8'))
-    compiler_proc.wait()
+    This function sets up the communication with the compiler - via 2 files named
+    `temp_rootname`.in and `temp_rootname`.out - prints out the received features,
+    and sends back to the compiler an advice (which it gets from `make_response`).
+    It's used for testing, and also to showcase how to set up communication in an
+    interactive ML ("gym") environment.
+    """
+    to_compiler = temp_rootname + ".in"
+    from_compiler = temp_rootname + ".out"
+    try:
+        os.mkfifo(to_compiler, 0o666)
+        os.mkfifo(from_compiler, 0o666)
+        compiler_proc = subprocess.Popen(
+            process_and_args, stderr=subprocess.PIPE, stdout=subprocess.DEVNULL
+        )
+        with io.BufferedWriter(io.FileIO(to_compiler, "wb")) as tc:
+            with io.BufferedReader(io.FileIO(from_compiler, "rb")) as fc:
+                tensor_specs, _, advice_spec = log_reader.read_header(fc)
+                context = None
+                while compiler_proc.poll() is None:
+                    next_event = fc.readline()
+                    if not next_event:
+                        break
+                    (
+                        last_context,
+                        observation_id,
+                        features,
+                        _,
+                    ) = log_reader.read_one_observation(
+                        context, next_event, fc, tensor_specs, None
+                    )
+                    if last_context != context:
+                        print(f"context: {last_context}")
+                    context = last_context
+                    print(f"observation: {observation_id}")
+                    tensor_values = []
+                    for fv in features:
+                        log_reader.pretty_print_tensor_value(fv)
+                        tensor_values.append(fv)
+                    send(tc, make_response(tensor_values), advice_spec)
+        _, err = compiler_proc.communicate()
+        print(err.decode("utf-8"))
+        compiler_proc.wait()
 
-  finally:
-    os.unlink(to_compiler)
-    os.unlink(from_compiler)
+    finally:
+        os.unlink(to_compiler)
+        os.unlink(from_compiler)

diff  --git a/llvm/lib/Analysis/models/log_reader.py b/llvm/lib/Analysis/models/log_reader.py
index be0f218d4ae77..7080276a0d85d 100644
--- a/llvm/lib/Analysis/models/log_reader.py
+++ b/llvm/lib/Analysis/models/log_reader.py
@@ -11,128 +11,130 @@
 from typing import List, Optional
 
 _element_types = {
-    'float': ctypes.c_float,
-    'double': ctypes.c_double,
-    'int8_t': ctypes.c_int8,
-    'uint8_t': ctypes.c_uint8,
-    'int16_t': ctypes.c_int16,
-    'uint16_t': ctypes.c_uint16,
-    'int32_t': ctypes.c_int32,
-    'uint32_t': ctypes.c_uint32,
-    'int64_t': ctypes.c_int64,
-    'uint64_t': ctypes.c_uint64
+    "float": ctypes.c_float,
+    "double": ctypes.c_double,
+    "int8_t": ctypes.c_int8,
+    "uint8_t": ctypes.c_uint8,
+    "int16_t": ctypes.c_int16,
+    "uint16_t": ctypes.c_uint16,
+    "int32_t": ctypes.c_int32,
+    "uint32_t": ctypes.c_uint32,
+    "int64_t": ctypes.c_int64,
+    "uint64_t": ctypes.c_uint64,
 }
 
 
 @dataclasses.dataclass(frozen=True)
 class TensorSpec:
-  name: str
-  port: int
-  shape: List[int]
-  element_type: type
-
-  @staticmethod
-  def from_dict(d: dict):
-    name = d['name']
-    port = d['port']
-    shape = [int(e) for e in d['shape']]
-    element_type_str = d['type']
-    if element_type_str not in _element_types:
-      raise ValueError(f'uknown type: {element_type_str}')
-    return TensorSpec(
-        name=name,
-        port=port,
-        shape=shape,
-        element_type=_element_types[element_type_str])
+    name: str
+    port: int
+    shape: List[int]
+    element_type: type
+
+    @staticmethod
+    def from_dict(d: dict):
+        name = d["name"]
+        port = d["port"]
+        shape = [int(e) for e in d["shape"]]
+        element_type_str = d["type"]
+        if element_type_str not in _element_types:
+            raise ValueError(f"uknown type: {element_type_str}")
+        return TensorSpec(
+            name=name,
+            port=port,
+            shape=shape,
+            element_type=_element_types[element_type_str],
+        )
 
 
 class TensorValue:
+    def __init__(self, spec: TensorSpec, buffer: bytes):
+        self._spec = spec
+        self._buffer = buffer
+        self._view = ctypes.cast(self._buffer, ctypes.POINTER(self._spec.element_type))
+        self._len = math.prod(self._spec.shape)
 
-  def __init__(self, spec: TensorSpec, buffer: bytes):
-    self._spec = spec
-    self._buffer = buffer
-    self._view = ctypes.cast(self._buffer,
-                             ctypes.POINTER(self._spec.element_type))
-    self._len = math.prod(self._spec.shape)
+    def spec(self) -> TensorSpec:
+        return self._spec
 
-  def spec(self) -> TensorSpec:
-    return self._spec
+    def __len__(self) -> int:
+        return self._len
 
-  def __len__(self) -> int:
-    return self._len
-
-  def __getitem__(self, index):
-    if index < 0 or index >= self._len:
-      raise IndexError(f'Index {index} out of range [0..{self._len})')
-    return self._view[index]
+    def __getitem__(self, index):
+        if index < 0 or index >= self._len:
+            raise IndexError(f"Index {index} out of range [0..{self._len})")
+        return self._view[index]
 
 
 def read_tensor(fs: io.BufferedReader, ts: TensorSpec) -> TensorValue:
-  size = math.prod(ts.shape) * ctypes.sizeof(ts.element_type)
-  data = fs.read(size)
-  return TensorValue(ts, data)
+    size = math.prod(ts.shape) * ctypes.sizeof(ts.element_type)
+    data = fs.read(size)
+    return TensorValue(ts, data)
 
 
 def pretty_print_tensor_value(tv: TensorValue):
-  print(f'{tv.spec().name}: {",".join([str(v) for v in tv])}')
+    print(f'{tv.spec().name}: {",".join([str(v) for v in tv])}')
 
 
 def read_header(f: io.BufferedReader):
-  header = json.loads(f.readline())
-  tensor_specs = [TensorSpec.from_dict(ts) for ts in header['features']]
-  score_spec = TensorSpec.from_dict(
-      header['score']) if 'score' in header else None
-  advice_spec = TensorSpec.from_dict(
-      header['advice']) if 'advice' in header else None
-  return tensor_specs, score_spec, advice_spec
-
-
-def read_one_observation(context: Optional[str], event_str: str,
-                         f: io.BufferedReader, tensor_specs: List[TensorSpec],
-                         score_spec: Optional[TensorSpec]):
-  event = json.loads(event_str)
-  if 'context' in event:
-    context = event['context']
-    event = json.loads(f.readline())
-  observation_id = int(event['observation'])
-  features = []
-  for ts in tensor_specs:
-    features.append(read_tensor(f, ts))
-  f.readline()
-  score = None
-  if score_spec is not None:
-    score_header = json.loads(f.readline())
-    assert int(score_header['outcome']) == observation_id
-    score = read_tensor(f, score_spec)
+    header = json.loads(f.readline())
+    tensor_specs = [TensorSpec.from_dict(ts) for ts in header["features"]]
+    score_spec = TensorSpec.from_dict(header["score"]) if "score" in header else None
+    advice_spec = TensorSpec.from_dict(header["advice"]) if "advice" in header else None
+    return tensor_specs, score_spec, advice_spec
+
+
+def read_one_observation(
+    context: Optional[str],
+    event_str: str,
+    f: io.BufferedReader,
+    tensor_specs: List[TensorSpec],
+    score_spec: Optional[TensorSpec],
+):
+    event = json.loads(event_str)
+    if "context" in event:
+        context = event["context"]
+        event = json.loads(f.readline())
+    observation_id = int(event["observation"])
+    features = []
+    for ts in tensor_specs:
+        features.append(read_tensor(f, ts))
     f.readline()
-  return context, observation_id, features, score
+    score = None
+    if score_spec is not None:
+        score_header = json.loads(f.readline())
+        assert int(score_header["outcome"]) == observation_id
+        score = read_tensor(f, score_spec)
+        f.readline()
+    return context, observation_id, features, score
 
 
 def read_stream(fname: str):
-  with io.BufferedReader(io.FileIO(fname, 'rb')) as f:
-    tensor_specs, score_spec, _ = read_header(f)
-    context = None
-    while True:
-      event_str = f.readline()
-      if not event_str:
-        break
-      context, observation_id, features, score = read_one_observation(
-          context, event_str, f, tensor_specs, score_spec)
-      yield context, observation_id, features, score
+    with io.BufferedReader(io.FileIO(fname, "rb")) as f:
+        tensor_specs, score_spec, _ = read_header(f)
+        context = None
+        while True:
+            event_str = f.readline()
+            if not event_str:
+                break
+            context, observation_id, features, score = read_one_observation(
+                context, event_str, f, tensor_specs, score_spec
+            )
+            yield context, observation_id, features, score
 
 
 def main(args):
-  last_context = None
-  for ctx, obs_id, features, score in read_stream(args[1]):
-    if last_context != ctx:
-      print(f'context: {ctx}')
-      last_context = ctx
-    print(f'observation: {obs_id}')
-    for fv in features:
-      pretty_print_tensor_value(fv)
-    if score:
-      pretty_print_tensor_value(score)
-
-
-if __name__ == '__main__':
-  main(sys.argv)
+    last_context = None
+    for ctx, obs_id, features, score in read_stream(args[1]):
+        if last_context != ctx:
+            print(f"context: {ctx}")
+            last_context = ctx
+        print(f"observation: {obs_id}")
+        for fv in features:
+            pretty_print_tensor_value(fv)
+        if score:
+            pretty_print_tensor_value(score)
+
+
+if __name__ == "__main__":
+    main(sys.argv)

diff  --git a/llvm/lib/Analysis/models/saved-model-to-tflite.py b/llvm/lib/Analysis/models/saved-model-to-tflite.py
index e9d45fdf983b7..9c83718732945 100644
--- a/llvm/lib/Analysis/models/saved-model-to-tflite.py
+++ b/llvm/lib/Analysis/models/saved-model-to-tflite.py
@@ -14,24 +14,24 @@
 
 
 def main(argv):
-  assert len(argv) == 3
-  sm_dir = argv[1]
-  tfl_dir = argv[2]
-  tf.io.gfile.makedirs(tfl_dir)
-  tfl_path = os.path.join(tfl_dir, 'model.tflite')
-  converter = tf.lite.TFLiteConverter.from_saved_model(sm_dir)
-  converter.target_spec.supported_ops = [
-    tf.lite.OpsSet.TFLITE_BUILTINS,
-  ]
-  tfl_model = converter.convert()
-  with tf.io.gfile.GFile(tfl_path, 'wb') as f:
-    f.write(tfl_model)
-  
-  json_file = 'output_spec.json'
-  src_json = os.path.join(sm_dir, json_file)
-  if tf.io.gfile.exists(src_json):
-    tf.io.gfile.copy(src_json,
-                     os.path.join(tfl_dir, json_file))
-
-if __name__ == '__main__':
-  main(sys.argv)
+    assert len(argv) == 3
+    sm_dir = argv[1]
+    tfl_dir = argv[2]
+    tf.io.gfile.makedirs(tfl_dir)
+    tfl_path = os.path.join(tfl_dir, "model.tflite")
+    converter = tf.lite.TFLiteConverter.from_saved_model(sm_dir)
+    converter.target_spec.supported_ops = [
+        tf.lite.OpsSet.TFLITE_BUILTINS,
+    ]
+    tfl_model = converter.convert()
+    with tf.io.gfile.GFile(tfl_path, "wb") as f:
+        f.write(tfl_model)
+
+    json_file = "output_spec.json"
+    src_json = os.path.join(sm_dir, json_file)
+    if tf.io.gfile.exists(src_json):
+        tf.io.gfile.copy(src_json, os.path.join(tfl_dir, json_file))
+
+
+if __name__ == "__main__":
+    main(sys.argv)

diff  --git a/llvm/test/BugPoint/compile-custom.ll.py b/llvm/test/BugPoint/compile-custom.ll.py
index b0062ac0b74f3..8f3e3c41d7799 100755
--- a/llvm/test/BugPoint/compile-custom.ll.py
+++ b/llvm/test/BugPoint/compile-custom.ll.py
@@ -7,6 +7,6 @@
 # Currently any print-out from the custom tool is interpreted as a crash
 # (i.e. test is still interesting)
 
-print("Error: " + ' '.join(sys.argv[1:]))
+print("Error: " + " ".join(sys.argv[1:]))
 
 sys.exit(1)

diff  --git a/llvm/test/CodeGen/AArch64/Atomics/generate-tests.py b/llvm/test/CodeGen/AArch64/Atomics/generate-tests.py
index 8bc10be446ff9..ecda5fd69ca5d 100755
--- a/llvm/test/CodeGen/AArch64/Atomics/generate-tests.py
+++ b/llvm/test/CodeGen/AArch64/Atomics/generate-tests.py
@@ -2,13 +2,14 @@
 import textwrap
 import enum
 import os
+
 """
 Generate the tests in llvm/test/CodeGen/AArch64/Atomics. Run from top level llvm-project.
 """
 
 TRIPLES = [
-    'aarch64',
-    'aarch64_be',
+    "aarch64",
+    "aarch64_be",
 ]
 
 
@@ -117,28 +118,28 @@ class Feature(enum.Flag):
     @property
     def mattr(self):
         if self == Feature.outline_atomics:
-            return '+outline-atomics'
+            return "+outline-atomics"
         if self == Feature.v8_1a:
-            return '+v8.1a'
+            return "+v8.1a"
         if self == Feature.rcpc3:
-            return '+lse2,+rcpc3'
+            return "+lse2,+rcpc3"
         if self == Feature.lse2_lse128:
-            return '+lse2,+lse128'
-        return '+' + self.name
+            return "+lse2,+lse128"
+        return "+" + self.name
 
 
 ATOMICRMW_OPS = [
-    'xchg',
-    'add',
-    'sub',
-    'and',
-    'nand',
-    'or',
-    'xor',
-    'max',
-    'min',
-    'umax',
-    'umin',
+    "xchg",
+    "add",
+    "sub",
+    "and",
+    "nand",
+    "or",
+    "xor",
+    "max",
+    "min",
+    "umax",
+    "umin",
 ]
 
 
@@ -147,15 +148,18 @@ def all_atomicrmw(f):
         for aligned in Aligned:
             for ty in Type:
                 for ordering in ATOMICRMW_ORDERS:
-                    name = f'atomicrmw_{op}_{ty}_{aligned}_{ordering}'
-                    instr = 'atomicrmw'
+                    name = f"atomicrmw_{op}_{ty}_{aligned}_{ordering}"
+                    instr = "atomicrmw"
                     f.write(
-                        textwrap.dedent(f'''
+                        textwrap.dedent(
+                            f"""
                         define dso_local {ty} @{name}(ptr %ptr, {ty} %value) {{
                             %r = {instr} {op} ptr %ptr, {ty} %value {ordering}, align {ty.align(aligned)}
                             ret {ty} %r
                         }}
-                    '''))
+                    """
+                        )
+                    )
 
 
 def all_load(f):
@@ -163,33 +167,39 @@ def all_load(f):
         for ty in Type:
             for ordering in ATOMIC_LOAD_ORDERS:
                 for const in [False, True]:
-                    name = f'load_atomic_{ty}_{aligned}_{ordering}'
-                    instr = 'load atomic'
+                    name = f"load_atomic_{ty}_{aligned}_{ordering}"
+                    instr = "load atomic"
                     if const:
-                        name += '_const'
-                    arg = 'ptr readonly %ptr' if const else 'ptr %ptr'
+                        name += "_const"
+                    arg = "ptr readonly %ptr" if const else "ptr %ptr"
                     f.write(
-                        textwrap.dedent(f'''
+                        textwrap.dedent(
+                            f"""
                         define dso_local {ty} @{name}({arg}) {{
                             %r = {instr} {ty}, ptr %ptr {ordering}, align {ty.align(aligned)}
                             ret {ty} %r
                         }}
-                    '''))
+                    """
+                        )
+                    )
 
 
 def all_store(f):
     for aligned in Aligned:
         for ty in Type:
             for ordering in ATOMIC_STORE_ORDERS:  # FIXME stores
-                name = f'store_atomic_{ty}_{aligned}_{ordering}'
-                instr = 'store atomic'
+                name = f"store_atomic_{ty}_{aligned}_{ordering}"
+                instr = "store atomic"
                 f.write(
-                    textwrap.dedent(f'''
+                    textwrap.dedent(
+                        f"""
                     define dso_local void @{name}({ty} %value, ptr %ptr) {{
                         {instr} {ty} %value, ptr %ptr {ordering}, align {ty.align(aligned)}
                         ret void
                     }}
-                '''))
+                """
+                    )
+                )
 
 
 def all_cmpxchg(f):
@@ -198,85 +208,113 @@ def all_cmpxchg(f):
             for success_ordering in CMPXCHG_SUCCESS_ORDERS:
                 for failure_ordering in CMPXCHG_FAILURE_ORDERS:
                     for weak in [False, True]:
-                        name = f'cmpxchg_{ty}_{aligned}_{success_ordering}_{failure_ordering}'
-                        instr = 'cmpxchg'
+                        name = f"cmpxchg_{ty}_{aligned}_{success_ordering}_{failure_ordering}"
+                        instr = "cmpxchg"
                         if weak:
-                            name += '_weak'
-                            instr += ' weak'
+                            name += "_weak"
+                            instr += " weak"
                         f.write(
-                            textwrap.dedent(f'''
+                            textwrap.dedent(
+                                f"""
                             define dso_local {ty} @{name}({ty} %expected, {ty} %new, ptr %ptr) {{
                                 %pair = {instr} ptr %ptr, {ty} %expected, {ty} %new {success_ordering} {failure_ordering}, align {ty.align(aligned)}
                                 %r = extractvalue {{ {ty}, i1 }} %pair, 0
                                 ret {ty} %r
                             }}
-                        '''))
+                        """
+                            )
+                        )
 
 
 def all_fence(f):
     for ordering in FENCE_ORDERS:
-        name = f'fence_{ordering}'
+        name = f"fence_{ordering}"
         f.write(
-            textwrap.dedent(f'''
+            textwrap.dedent(
+                f"""
             define dso_local void @{name}() {{
                 fence {ordering}
                 ret void
             }}
-        '''))
+        """
+            )
+        )
 
 
 def header(f, triple, features, filter_args: str):
-    f.write('; NOTE: Assertions have been autogenerated by '
-            'utils/update_llc_test_checks.py UTC_ARGS: ')
+    f.write(
+        "; NOTE: Assertions have been autogenerated by "
+        "utils/update_llc_test_checks.py UTC_ARGS: "
+    )
     f.write(filter_args)
-    f.write('\n')
-    f.write(f'; The base test file was generated by {__file__}\n')
+    f.write("\n")
+    f.write(f"; The base test file was generated by {__file__}\n")
     for feat in features:
-        for OptFlag in ['-O0', '-O1']:
-            f.write(' '.join([
-                ';', 'RUN:', 'llc', '%s', '-o', '-', '-verify-machineinstrs',
-                f'-mtriple={triple}', f'-mattr={feat.mattr}', OptFlag, '|',
-                'FileCheck', '%s', f'--check-prefixes=CHECK,{OptFlag}\n'
-            ]))
+        for OptFlag in ["-O0", "-O1"]:
+            f.write(
+                " ".join(
+                    [
+                        ";",
+                        "RUN:",
+                        "llc",
+                        "%s",
+                        "-o",
+                        "-",
+                        "-verify-machineinstrs",
+                        f"-mtriple={triple}",
+                        f"-mattr={feat.mattr}",
+                        OptFlag,
+                        "|",
+                        "FileCheck",
+                        "%s",
+                        f"--check-prefixes=CHECK,{OptFlag}\n",
+                    ]
+                )
+            )
 
 
 def write_lit_tests():
-    os.chdir('llvm/test/CodeGen/AArch64/Atomics/')
+    os.chdir("llvm/test/CodeGen/AArch64/Atomics/")
     for triple in TRIPLES:
         # Feature has no effect on fence, so keep it to one file.
-        with open(f'{triple}-fence.ll', 'w') as f:
+        with open(f"{triple}-fence.ll", "w") as f:
             filter_args = r'--filter "^\s*(dmb)"'
             header(f, triple, Feature, filter_args)
             all_fence(f)
 
         for feat in Feature:
-            with open(f'{triple}-atomicrmw-{feat.name}.ll', 'w') as f:
+            with open(f"{triple}-atomicrmw-{feat.name}.ll", "w") as f:
                 filter_args = r'--filter-out "\b(sp)\b" --filter "^\s*(ld[^r]|st[^r]|swp|cas|bl|add|and|eor|orn|orr|sub|mvn|sxt|cmp|ccmp|csel|dmb)"'
                 header(f, triple, [feat], filter_args)
                 all_atomicrmw(f)
 
-            with open(f'{triple}-cmpxchg-{feat.name}.ll', 'w') as f:
+            with open(f"{triple}-cmpxchg-{feat.name}.ll", "w") as f:
                 filter_args = r'--filter-out "\b(sp)\b" --filter "^\s*(ld[^r]|st[^r]|swp|cas|bl|add|and|eor|orn|orr|sub|mvn|sxt|cmp|ccmp|csel|dmb)"'
                 header(f, triple, [feat], filter_args)
                 all_cmpxchg(f)
 
-            with open(f'{triple}-atomic-load-{feat.name}.ll', 'w') as f:
+            with open(f"{triple}-atomic-load-{feat.name}.ll", "w") as f:
                 filter_args = r'--filter-out "\b(sp)\b" --filter "^\s*(ld|st[^r]|swp|cas|bl|add|and|eor|orn|orr|sub|mvn|sxt|cmp|ccmp|csel|dmb)"'
                 header(f, triple, [feat], filter_args)
                 all_load(f)
 
-            with open(f'{triple}-atomic-store-{feat.name}.ll', 'w') as f:
+            with open(f"{triple}-atomic-store-{feat.name}.ll", "w") as f:
                 filter_args = r'--filter-out "\b(sp)\b" --filter "^\s*(ld[^r]|st|swp|cas|bl|add|and|eor|orn|orr|sub|mvn|sxt|cmp|ccmp|csel|dmb)"'
                 header(f, triple, [feat], filter_args)
                 all_store(f)
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     write_lit_tests()
 
-    print(textwrap.dedent('''
+    print(
+        textwrap.dedent(
+            """
         Testcases written. To update checks run:
             $ ./llvm/utils/update_llc_test_checks.py -u llvm/test/CodeGen/AArch64/Atomics/*.ll
 
         Or in parallel:
             $ parallel ./llvm/utils/update_llc_test_checks.py -u ::: llvm/test/CodeGen/AArch64/Atomics/*.ll
-    '''))
+    """
+        )
+    )

diff  --git a/llvm/test/CodeGen/MLRegalloc/Inputs/interactive_main.py b/llvm/test/CodeGen/MLRegalloc/Inputs/interactive_main.py
index dc96804c0ba91..53809b0a04008 100644
--- a/llvm/test/CodeGen/MLRegalloc/Inputs/interactive_main.py
+++ b/llvm/test/CodeGen/MLRegalloc/Inputs/interactive_main.py
@@ -4,25 +4,24 @@
 
 
 def main(args):
-  # this advisor just picks the first legal register to evict, which is
-  # identifiable by the "mask" feature
-  class Advisor:
-    to_return = False
+    # this advisor just picks the first legal register to evict, which is
+    # identifiable by the "mask" feature
+    class Advisor:
+        to_return = False
 
-    def advice(self, tensor_values: list[log_reader.TensorValue]):
-      for tv in tensor_values:
-        if tv.spec().name != 'mask':
-          continue
-        for i, v in enumerate(tv):
-          if v == 1:
-            return i
-      # i.e. invalid:
-      return -1
+        def advice(self, tensor_values: list[log_reader.TensorValue]):
+            for tv in tensor_values:
+                if tv.spec().name != "mask":
+                    continue
+                for i, v in enumerate(tv):
+                    if v == 1:
+                        return i
+            # i.e. invalid:
+            return -1
 
+    a = Advisor()
+    interactive_host.run_interactive(args[0], a.advice, args[1:])
 
-  a = Advisor()
-  interactive_host.run_interactive(args[0], a.advice, args[1:])
 
-
-if __name__ == '__main__':
-  main(sys.argv[1:])
+if __name__ == "__main__":
+    main(sys.argv[1:])

diff  --git a/llvm/test/CodeGen/NVPTX/ld-st-addrrspace.py b/llvm/test/CodeGen/NVPTX/ld-st-addrrspace.py
index 8ed4406fe9b3d..67fabbac1d4e9 100644
--- a/llvm/test/CodeGen/NVPTX/ld-st-addrrspace.py
+++ b/llvm/test/CodeGen/NVPTX/ld-st-addrrspace.py
@@ -20,7 +20,7 @@
     "half": "b16",
     "<2 x half>": "b32",
     "float": "f32",
-    "double": "f64"
+    "double": "f64",
 }
 
 llvm_type_to_ptx_reg = {
@@ -31,7 +31,7 @@
     "half": "h",
     "<2 x half>": "hh",
     "float": "f",
-    "double": "fd"
+    "double": "fd",
 }
 
 addrspace_id = {
@@ -40,12 +40,12 @@
     ".shared": 3,
     ".const": 4,
     ".local": 5,
-    ".param": 101
+    ".param": 101,
 }
 
 
 def gen_load_tests():
-  load_template = """
+    load_template = """
 define ${type} @${testname}(${type} addrspace(${asid})* %ptr) {
 ; CHECK: ${testname}
 ; CHECK_P32: ld${_volatile}${_volatile_as}.${ptx_type} %${ptx_reg}{{[0-9]+}}, [%r{{[0-9]+}}]
@@ -56,51 +56,52 @@ def gen_load_tests():
   ret ${type} %a
 }
 """
-  for op_type, volatile, space in product(
-      ["i8", "i16", "i32", "i64", "half", "float", "double", "<2 x half>"],
-      [True, False],  # volatile
-      ["", ".shared", ".global", ".const", ".local", ".param"]):
-
-    # Volatile is only supported for global, shared and generic.
-    if volatile and not space in ["", ".global", ".shared"]:
-      continue
-
-    # Volatile is only supported for global, shared and generic.
-    # All other volatile accesses are done in generic AS.
-    if volatile and not space in ["", ".global", ".shared"]:
-      volatile_as = ""
-    else:
-      volatile_as = space
-
-    params = {
-        "type": op_type,
-        "volatile": "volatile" if volatile else "",
-        "_volatile": ".volatile" if volatile else "",
-        "_volatile_as": volatile_as,
-        "_space": space,
-        "ptx_reg": llvm_type_to_ptx_reg[op_type],
-        "ptx_type": llvm_type_to_ptx_type[op_type],
-        "asid": addrspace_id[space],
-    }
-
-    testname = \
-      Template("ld_${_volatile}${_space}.${ptx_type}").substitute(params)
-    params["testname"] = testname.replace(".", "_")
-
-    # LLVM does not accept "addrspacecast Type* addrspace(0) to Type*", so we
-    # need to avoid it for generic pointer tests.
-    if space:
-      generic_ptr_template = ("addrspacecast ${type} addrspace(${asid})* %ptr "
-                              "to ${type}*")
-    else:
-      generic_ptr_template = "select i1 true, ${type}* %ptr, ${type}* %ptr"
-    params["generic_ptr"] = Template(generic_ptr_template).substitute(params)
-
-    print(Template(load_template).substitute(params))
+    for op_type, volatile, space in product(
+        ["i8", "i16", "i32", "i64", "half", "float", "double", "<2 x half>"],
+        [True, False],  # volatile
+        ["", ".shared", ".global", ".const", ".local", ".param"],
+    ):
+
+        # Volatile is only supported for global, shared and generic.
+        if volatile and not space in ["", ".global", ".shared"]:
+            continue
+
+        # Volatile is only supported for global, shared and generic.
+        # All other volatile accesses are done in generic AS.
+        if volatile and not space in ["", ".global", ".shared"]:
+            volatile_as = ""
+        else:
+            volatile_as = space
+
+        params = {
+            "type": op_type,
+            "volatile": "volatile" if volatile else "",
+            "_volatile": ".volatile" if volatile else "",
+            "_volatile_as": volatile_as,
+            "_space": space,
+            "ptx_reg": llvm_type_to_ptx_reg[op_type],
+            "ptx_type": llvm_type_to_ptx_type[op_type],
+            "asid": addrspace_id[space],
+        }
+
+        testname = Template("ld_${_volatile}${_space}.${ptx_type}").substitute(params)
+        params["testname"] = testname.replace(".", "_")
+
+        # LLVM does not accept "addrspacecast Type* addrspace(0) to Type*", so we
+        # need to avoid it for generic pointer tests.
+        if space:
+            generic_ptr_template = (
+                "addrspacecast ${type} addrspace(${asid})* %ptr " "to ${type}*"
+            )
+        else:
+            generic_ptr_template = "select i1 true, ${type}* %ptr, ${type}* %ptr"
+        params["generic_ptr"] = Template(generic_ptr_template).substitute(params)
+
+        print(Template(load_template).substitute(params))
 
 
 def main():
-  gen_load_tests()
+    gen_load_tests()
 
 
 main()

diff  --git a/llvm/test/CodeGen/NVPTX/surf-tex.py b/llvm/test/CodeGen/NVPTX/surf-tex.py
index 4e239ae7b4f48..d63cfc521117d 100644
--- a/llvm/test/CodeGen/NVPTX/surf-tex.py
+++ b/llvm/test/CodeGen/NVPTX/surf-tex.py
@@ -28,213 +28,227 @@
 import textwrap
 from itertools import product
 
+
 def get_llvm_geom(geom_ptx):
-  geom = {
-    "1d"    : "1d",
-    "2d"    : "2d",
-    "3d"    : "3d",
-    "a1d"   : "1d.array",
-    "a2d"   : "2d.array",
-    "cube"  : "cube",
-    "acube" : "cube.array"
-  }
-  return geom[geom_ptx]
+    geom = {
+        "1d": "1d",
+        "2d": "2d",
+        "3d": "3d",
+        "a1d": "1d.array",
+        "a2d": "2d.array",
+        "cube": "cube",
+        "acube": "cube.array",
+    }
+    return geom[geom_ptx]
+
 
 def get_ptx_reg(ty):
-  reg = {
-    "b8"  : "%rs{{[0-9]+}}",
-    "b16" : "%rs{{[0-9]+}}",
-    "b32" : "%r{{[0-9]+}}",
-    "b64" : "%rd{{[0-9]+}}",
-    "f32" : "%f{{[0-9]+}}",
-    "u32" : "%r{{[0-9]+}}",
-    "s32" : "%r{{[0-9]+}}"
-  }
-  return reg[ty]
+    reg = {
+        "b8": "%rs{{[0-9]+}}",
+        "b16": "%rs{{[0-9]+}}",
+        "b32": "%r{{[0-9]+}}",
+        "b64": "%rd{{[0-9]+}}",
+        "f32": "%f{{[0-9]+}}",
+        "u32": "%r{{[0-9]+}}",
+        "s32": "%r{{[0-9]+}}",
+    }
+    return reg[ty]
+
 
 def get_ptx_vec_reg(vec, ty):
-  vec_reg = {
-    ""   : "{{{reg}}}",
-    "v2" : "{{{reg}, {reg}}}",
-    "v4" : "{{{reg}, {reg}, {reg}, {reg}}}"
-  }
-  return vec_reg[vec].format(reg=get_ptx_reg(ty))
+    vec_reg = {
+        "": "{{{reg}}}",
+        "v2": "{{{reg}, {reg}}}",
+        "v4": "{{{reg}, {reg}, {reg}, {reg}}}",
+    }
+    return vec_reg[vec].format(reg=get_ptx_reg(ty))
+
 
 def get_llvm_type(ty):
-  if ty[0] in ("b", "s", "u"):
-    return "i" + ty[1:]
-  if ty == "f16":
-    return "half"
-  if ty == "f32":
-    return "float"
-  raise RuntimeError("invalid type: " + ty)
+    if ty[0] in ("b", "s", "u"):
+        return "i" + ty[1:]
+    if ty == "f16":
+        return "half"
+    if ty == "f32":
+        return "float"
+    raise RuntimeError("invalid type: " + ty)
+
 
 def get_llvm_vec_type(vec, ty_ptx):
-  ty = get_llvm_type(ty_ptx)
+    ty = get_llvm_type(ty_ptx)
 
-  # i8 is passed as i16, same as in PTX
-  if ty == "i8":
-    ty = "i16"
+    # i8 is passed as i16, same as in PTX
+    if ty == "i8":
+        ty = "i16"
+
+    vec_ty = {
+        "": "{ty}",
+        "v2": "{{ {ty}, {ty} }}",
+        "v4": "{{ {ty}, {ty}, {ty}, {ty} }}",
+    }
+    return vec_ty[vec].format(ty=ty)
 
-  vec_ty = {
-    ""   : "{ty}",
-    "v2" : "{{ {ty}, {ty} }}",
-    "v4" : "{{ {ty}, {ty}, {ty}, {ty} }}"
-  }
-  return vec_ty[vec].format(ty=ty)
 
 def get_llvm_value(vec, ty_ptx):
-  ty = get_llvm_type(ty_ptx)
+    ty = get_llvm_type(ty_ptx)
 
-  # i8 is passed as i16, same as in PTX
-  if ty == "i8":
-    ty = "i16"
+    # i8 is passed as i16, same as in PTX
+    if ty == "i8":
+        ty = "i16"
+
+    value = {
+        "": "{ty} %v1",
+        "v2": "{ty} %v1, {ty} %v2",
+        "v4": "{ty} %v1, {ty} %v2, {ty} %v3, {ty} %v4",
+    }
+    return value[vec].format(ty=ty)
 
-  value = {
-    ""   : "{ty} %v1",
-    "v2" : "{ty} %v1, {ty} %v2",
-    "v4" : "{ty} %v1, {ty} %v2, {ty} %v3, {ty} %v4"
-  }
-  return value[vec].format(ty=ty)
 
 def get_llvm_value_type(vec, ty_ptx):
-  ty = get_llvm_type(ty_ptx)
+    ty = get_llvm_type(ty_ptx)
 
-  # i8 is passed as i16, same as in PTX
-  if ty == "i8":
-    ty = "i16"
+    # i8 is passed as i16, same as in PTX
+    if ty == "i8":
+        ty = "i16"
+
+    value = {"": "{ty}", "v2": "{ty}, {ty}", "v4": "{ty}, {ty}, {ty}, {ty}"}
+    return value[vec].format(ty=ty)
 
-  value = {
-    ""   : "{ty}",
-    "v2" : "{ty}, {ty}",
-    "v4" : "{ty}, {ty}, {ty}, {ty}"
-  }
-  return value[vec].format(ty=ty)
 
 def gen_triple(target):
-  if target == "cuda":
-    print("target triple = \"nvptx64-unknown-cuda\"\n")
-  elif target == "nvcl":
-    print("target triple = \"nvptx64-unknown-nvcl\"\n")
-  else:
-    raise RuntimeError("invalid target: " + target)
+    if target == "cuda":
+        print('target triple = "nvptx64-unknown-cuda"\n')
+    elif target == "nvcl":
+        print('target triple = "nvptx64-unknown-nvcl"\n')
+    else:
+        raise RuntimeError("invalid target: " + target)
+
 
 def gen_globals(target, surf_name, tex_name, sampler_name):
-  print("declare i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)*)")
-  print("; CHECK: .global .surfref {}".format(surf_name))
-  print("; CHECK: .global .texref {}".format(tex_name))
-  print("@{} = internal addrspace(1) global i64 0, align 8".format(surf_name))
-  print("@{} = internal addrspace(1) global i64 1, align 8".format(tex_name))
-  generated_metadata = [
-    "!{{i64 addrspace(1)* @{}, !\"surface\", i32 1}}".format(surf_name),
-    "!{{i64 addrspace(1)* @{}, !\"texture\", i32 1}}".format(tex_name),
-  ]
-
-  if not is_unified(target):
-    print("; CHECK: .global .samplerref {}".format(sampler_name))
-    print("@{} = internal addrspace(1) global i64 1, align 8".format(
-      sampler_name))
-    generated_metadata.append(
-      "!{{i64 addrspace(1)* @{}, !\"sampler\", i32 1}}".format(sampler_name))
-
-  return generated_metadata
+    print("declare i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)*)")
+    print("; CHECK: .global .surfref {}".format(surf_name))
+    print("; CHECK: .global .texref {}".format(tex_name))
+    print("@{} = internal addrspace(1) global i64 0, align 8".format(surf_name))
+    print("@{} = internal addrspace(1) global i64 1, align 8".format(tex_name))
+    generated_metadata = [
+        '!{{i64 addrspace(1)* @{}, !"surface", i32 1}}'.format(surf_name),
+        '!{{i64 addrspace(1)* @{}, !"texture", i32 1}}'.format(tex_name),
+    ]
+
+    if not is_unified(target):
+        print("; CHECK: .global .samplerref {}".format(sampler_name))
+        print("@{} = internal addrspace(1) global i64 1, align 8".format(sampler_name))
+        generated_metadata.append(
+            '!{{i64 addrspace(1)* @{}, !"sampler", i32 1}}'.format(sampler_name)
+        )
+
+    return generated_metadata
+
 
 def gen_metadata(metadata):
-  md_values = ["!{}".format(i) for i in range(len(metadata))]
-  print("!nvvm.annotations = !{{{values}}}".format(values=(", ".join(md_values))))
-  for i, md in enumerate(metadata):
-    print("!{} = {}".format(i, md))
+    md_values = ["!{}".format(i) for i in range(len(metadata))]
+    print("!nvvm.annotations = !{{{values}}}".format(values=(", ".join(md_values))))
+    for i, md in enumerate(metadata):
+        print("!{} = {}".format(i, md))
+
 
 def get_llvm_surface_access(geom_ptx):
-  access = {
-    "1d"  : "i32 %x",
-    "2d"  : "i32 %x, i32 %y",
-    "3d"  : "i32 %x, i32 %y, i32 %z",
-    "a1d" : "i32 %l, i32 %x",
-    "a2d" : "i32 %l, i32 %x, i32 %y",
-  }
-  return access[geom_ptx]
+    access = {
+        "1d": "i32 %x",
+        "2d": "i32 %x, i32 %y",
+        "3d": "i32 %x, i32 %y, i32 %z",
+        "a1d": "i32 %l, i32 %x",
+        "a2d": "i32 %l, i32 %x, i32 %y",
+    }
+    return access[geom_ptx]
+
 
 def get_llvm_surface_access_type(geom_ptx):
-  access_ty = {
-    "1d"  : "i32",
-    "2d"  : "i32, i32",
-    "3d"  : "i32, i32, i32",
-    "a1d" : "i32, i32",
-    "a2d" : "i32, i32, i32",
-  }
-  return access_ty[geom_ptx]
+    access_ty = {
+        "1d": "i32",
+        "2d": "i32, i32",
+        "3d": "i32, i32, i32",
+        "a1d": "i32, i32",
+        "a2d": "i32, i32, i32",
+    }
+    return access_ty[geom_ptx]
+
 
 def get_ptx_surface_access(geom_ptx):
-  """
-  Operand b is a scalar or singleton tuple for 1d surfaces; is a
-  two-element vector for 2d surfaces; and is a four-element vector
-  for 3d surfaces, where the fourth element is ignored. Coordinate
-  elements are of type .s32.
-
-  For 1d surface arrays, operand b has type .v2.b32. The first
-  element is interpreted as an unsigned integer index (.u32) into
-  the surface array, and the second element is interpreted as a 1d
-  surface coordinate of type .s32.
-
-  For 2d surface arrays, operand b has type .v4.b32. The first
-  element is interpreted as an unsigned integer index (.u32) into
-  the surface array, and the next two elements are interpreted as 2d
-  surface coordinates of type .s32. The fourth element is ignored.
-  """
-  access_reg = {
-    "1d"  : "{%r{{[0-9]}}}",
-    "2d"  : "{%r{{[0-9]}}, %r{{[0-9]}}}",
-    "3d"  : "{%r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}}",
-    "a1d" : "{%r{{[0-9]}}, %r{{[0-9]}}}",
-    "a2d" : "{%r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}}",
-  }
-  return access_reg[geom_ptx]
+    """
+    Operand b is a scalar or singleton tuple for 1d surfaces; is a
+    two-element vector for 2d surfaces; and is a four-element vector
+    for 3d surfaces, where the fourth element is ignored. Coordinate
+    elements are of type .s32.
+
+    For 1d surface arrays, operand b has type .v2.b32. The first
+    element is interpreted as an unsigned integer index (.u32) into
+    the surface array, and the second element is interpreted as a 1d
+    surface coordinate of type .s32.
+
+    For 2d surface arrays, operand b has type .v4.b32. The first
+    element is interpreted as an unsigned integer index (.u32) into
+    the surface array, and the next two elements are interpreted as 2d
+    surface coordinates of type .s32. The fourth element is ignored.
+    """
+    access_reg = {
+        "1d": "{%r{{[0-9]}}}",
+        "2d": "{%r{{[0-9]}}, %r{{[0-9]}}}",
+        "3d": "{%r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}}",
+        "a1d": "{%r{{[0-9]}}, %r{{[0-9]}}}",
+        "a2d": "{%r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}}",
+    }
+    return access_reg[geom_ptx]
+
 
 def get_ptx_surface(target):
-  # With 'cuda' environment surface is copied with ld.param, so the
-  # instruction uses a register. For 'nvcl' the instruction uses the
-  # parameter directly.
-  if target == "cuda":
-    return "%rd{{[0-9]+}}"
-  elif target == "nvcl":
-    return "test_{{.*}}_param_0"
-  raise RuntimeError("invalid target: " + target)
+    # With 'cuda' environment surface is copied with ld.param, so the
+    # instruction uses a register. For 'nvcl' the instruction uses the
+    # parameter directly.
+    if target == "cuda":
+        return "%rd{{[0-9]+}}"
+    elif target == "nvcl":
+        return "test_{{.*}}_param_0"
+    raise RuntimeError("invalid target: " + target)
+
 
 def get_surface_metadata(target, fun_ty, fun_name, has_surface_param):
-  metadata = []
+    metadata = []
+
+    md_kernel = '!{{{fun_ty} @{fun_name}, !"kernel", i32 1}}'.format(
+        fun_ty=fun_ty, fun_name=fun_name
+    )
+    metadata.append(md_kernel)
 
-  md_kernel = "!{{{fun_ty} @{fun_name}, !\"kernel\", i32 1}}".format(
-    fun_ty=fun_ty, fun_name=fun_name)
-  metadata.append(md_kernel)
+    if target == "cuda":
+        # When a parameter is lowered as a .surfref, it still has the
+        # corresponding ld.param.u64, which is illegal. Do not emit the
+        # metadata to keep the parameter as .b64 instead.
+        has_surface_param = False
 
-  if target == "cuda":
-    # When a parameter is lowered as a .surfref, it still has the
-    # corresponding ld.param.u64, which is illegal. Do not emit the
-    # metadata to keep the parameter as .b64 instead.
-    has_surface_param = False
+    if has_surface_param:
+        md_surface = '!{{{fun_ty} @{fun_name}, !"rdwrimage", i32 0}}'.format(
+            fun_ty=fun_ty, fun_name=fun_name
+        )
+        metadata.append(md_surface)
 
-  if has_surface_param:
-    md_surface = "!{{{fun_ty} @{fun_name}, !\"rdwrimage\", i32 0}}".format(
-      fun_ty=fun_ty, fun_name=fun_name)
-    metadata.append(md_surface)
+    return metadata
 
-  return metadata
 
 def gen_suld_tests(target, global_surf):
-  """
-  PTX spec s9.7.10.1. Surface Instructions:
+    """
+    PTX spec s9.7.10.1. Surface Instructions:
 
-  suld.b.geom{.cop}.vec.dtype.clamp  d, [a, b];  // unformatted
+    suld.b.geom{.cop}.vec.dtype.clamp  d, [a, b];  // unformatted
 
-  .geom  = { .1d, .2d, .3d, .a1d, .a2d };
-  .cop   = { .ca, .cg, .cs, .cv };               // cache operation
-  .vec   = { none, .v2, .v4 };
-  .dtype = { .b8 , .b16, .b32, .b64 };
-  .clamp = { .trap, .clamp, .zero };
-  """
+    .geom  = { .1d, .2d, .3d, .a1d, .a2d };
+    .cop   = { .ca, .cg, .cs, .cv };               // cache operation
+    .vec   = { none, .v2, .v4 };
+    .dtype = { .b8 , .b16, .b32, .b64 };
+    .clamp = { .trap, .clamp, .zero };
+    """
 
-  template = """
+    template = """
   declare ${retty} @${intrinsic}(i64 %s, ${access});
 
   ; CHECK-LABEL: .entry ${test_name}_param
@@ -256,75 +270,79 @@ def gen_suld_tests(target, global_surf):
   }
   """
 
-  generated_items = []
-  generated_metadata = []
-  # FIXME: "cop" is missing
-  for geom, vec, dtype, clamp in product(
-      ["1d", "2d", "3d", "a1d", "a2d"],
-      ["", "v2", "v4"],
-      ["b8" , "b16", "b32", "b64"],
-      ["trap", "clamp", "zero"]):
-
-    if vec == "v4" and dtype == "b64":
-      continue
-
-    test_name = "test_suld_" + geom + vec + dtype + clamp
-
-    params = {
-      "test_name"   : test_name,
-
-      "intrinsic"   : "llvm.nvvm.suld.{geom}.{dtype}.{clamp}".format(
-        geom=get_llvm_geom(geom),
-        dtype=(vec + get_llvm_type(dtype)),
-        clamp=clamp),
-      "retty"       : get_llvm_vec_type(vec, dtype),
-      "access"      : get_llvm_surface_access(geom),
-      "global_surf" : global_surf,
-
-      "instruction" : "suld.b.{geom}{vec}.{dtype}.{clamp}".format(
-        geom=geom,
-        vec=("" if vec == "" else "." + vec),
-        dtype=dtype,
-        clamp=clamp),
-      "reg_ret"     : get_ptx_vec_reg(vec, dtype),
-      "reg_surf"    : get_ptx_surface(target),
-      "reg_access"  : get_ptx_surface_access(geom),
-    }
-    gen_test(template, params)
-    generated_items.append((params["intrinsic"], params["instruction"]))
-
-    fun_name = test_name + "_param";
-    fun_ty = "void (i64, {retty}*, {access_ty})*".format(
-      retty=params["retty"],
-      access_ty=get_llvm_surface_access_type(geom))
-    generated_metadata += get_surface_metadata(
-      target, fun_ty, fun_name, has_surface_param=True)
-
-    fun_name = test_name + "_global";
-    fun_ty = "void ({retty}*, {access_ty})*".format(
-      retty=params["retty"],
-      access_ty=get_llvm_surface_access_type(geom))
-    generated_metadata += get_surface_metadata(
-      target, fun_ty, fun_name, has_surface_param=False)
+    generated_items = []
+    generated_metadata = []
+    # FIXME: "cop" is missing
+    for geom, vec, dtype, clamp in product(
+        ["1d", "2d", "3d", "a1d", "a2d"],
+        ["", "v2", "v4"],
+        ["b8", "b16", "b32", "b64"],
+        ["trap", "clamp", "zero"],
+    ):
+
+        if vec == "v4" and dtype == "b64":
+            continue
+
+        test_name = "test_suld_" + geom + vec + dtype + clamp
+
+        params = {
+            "test_name": test_name,
+            "intrinsic": "llvm.nvvm.suld.{geom}.{dtype}.{clamp}".format(
+                geom=get_llvm_geom(geom),
+                dtype=(vec + get_llvm_type(dtype)),
+                clamp=clamp,
+            ),
+            "retty": get_llvm_vec_type(vec, dtype),
+            "access": get_llvm_surface_access(geom),
+            "global_surf": global_surf,
+            "instruction": "suld.b.{geom}{vec}.{dtype}.{clamp}".format(
+                geom=geom,
+                vec=("" if vec == "" else "." + vec),
+                dtype=dtype,
+                clamp=clamp,
+            ),
+            "reg_ret": get_ptx_vec_reg(vec, dtype),
+            "reg_surf": get_ptx_surface(target),
+            "reg_access": get_ptx_surface_access(geom),
+        }
+        gen_test(template, params)
+        generated_items.append((params["intrinsic"], params["instruction"]))
+
+        fun_name = test_name + "_param"
+        fun_ty = "void (i64, {retty}*, {access_ty})*".format(
+            retty=params["retty"], access_ty=get_llvm_surface_access_type(geom)
+        )
+        generated_metadata += get_surface_metadata(
+            target, fun_ty, fun_name, has_surface_param=True
+        )
+
+        fun_name = test_name + "_global"
+        fun_ty = "void ({retty}*, {access_ty})*".format(
+            retty=params["retty"], access_ty=get_llvm_surface_access_type(geom)
+        )
+        generated_metadata += get_surface_metadata(
+            target, fun_ty, fun_name, has_surface_param=False
+        )
+
+    return generated_items, generated_metadata
 
-  return generated_items, generated_metadata
 
 def gen_sust_tests(target, global_surf):
-  """
-  PTX spec s9.7.10.2. Surface Instructions
+    """
+    PTX spec s9.7.10.2. Surface Instructions
 
-  sust.b.{1d,2d,3d}{.cop}.vec.ctype.clamp  [a, b], c;  // unformatted
-  sust.p.{1d,2d,3d}.vec.b32.clamp          [a, b], c;  // formatted
+    sust.b.{1d,2d,3d}{.cop}.vec.ctype.clamp  [a, b], c;  // unformatted
+    sust.p.{1d,2d,3d}.vec.b32.clamp          [a, b], c;  // formatted
 
-  sust.b.{a1d,a2d}{.cop}.vec.ctype.clamp   [a, b], c;  // unformatted
+    sust.b.{a1d,a2d}{.cop}.vec.ctype.clamp   [a, b], c;  // unformatted
 
-  .cop   = { .wb, .cg, .cs, .wt };                     // cache operation
-  .vec   = { none, .v2, .v4 };
-  .ctype = { .b8 , .b16, .b32, .b64 };
-  .clamp = { .trap, .clamp, .zero };
-  """
+    .cop   = { .wb, .cg, .cs, .wt };                     // cache operation
+    .vec   = { none, .v2, .v4 };
+    .ctype = { .b8 , .b16, .b32, .b64 };
+    .clamp = { .trap, .clamp, .zero };
+    """
 
-  template = """
+    template = """
   declare void @${intrinsic}(i64 %s, ${access}, ${value});
 
   ; CHECK-LABEL: .entry ${test_name}_param
@@ -344,226 +362,248 @@ def gen_sust_tests(target, global_surf):
   }
   """
 
-  generated_items = []
-  generated_metadata = []
-  # FIXME: "cop" is missing
-  for fmt, geom, vec, ctype, clamp in product(
-      ["b", "p"],
-      ["1d", "2d", "3d", "a1d", "a2d"],
-      ["", "v2", "v4"],
-      ["b8" , "b16", "b32", "b64"],
-      ["trap", "clamp", "zero"]):
-
-    if fmt == "p" and geom[0] == "a":
-      continue
-    if fmt == "p" and ctype != "b32":
-      continue
-    if vec == "v4" and ctype == "b64":
-      continue
-
-    # FIXME: these intrinsics are missing, but at least one of them is
-    # listed in the PTX spec: sust.p.{1d,2d,3d}.vec.b32.clamp
-    if fmt == "p" and clamp != "trap":
-      continue
-
-    test_name = "test_sust_" + fmt + geom + vec + ctype + clamp
-
-    params = {
-      "test_name"   : test_name,
-
-      "intrinsic" : "llvm.nvvm.sust.{fmt}.{geom}.{ctype}.{clamp}".format(
-        fmt=fmt,
-        geom=get_llvm_geom(geom),
-        ctype=(vec + get_llvm_type(ctype)),
-        clamp=clamp),
-      "access"      : get_llvm_surface_access(geom),
-      "value"       : get_llvm_value(vec, ctype),
-      "global_surf" : global_surf,
-
-      "instruction" : "sust.{fmt}.{geom}{vec}.{ctype}.{clamp}".format(
-        fmt=fmt,
-        geom=geom,
-        vec=("" if vec == "" else "." + vec),
-        ctype=ctype,
-        clamp=clamp),
-      "reg_value"   : get_ptx_vec_reg(vec, ctype),
-      "reg_surf"    : get_ptx_surface(target),
-      "reg_access"  : get_ptx_surface_access(geom)
-    }
-    gen_test(template, params)
-    generated_items.append((params["intrinsic"], params["instruction"]))
-
-    fun_name = test_name + "_param";
-    fun_ty = "void (i64, {value_ty}, {access_ty})*".format(
-      value_ty=get_llvm_value_type(vec, ctype),
-      access_ty=get_llvm_surface_access_type(geom))
-    generated_metadata += get_surface_metadata(
-      target, fun_ty, fun_name, has_surface_param=True)
+    generated_items = []
+    generated_metadata = []
+    # FIXME: "cop" is missing
+    for fmt, geom, vec, ctype, clamp in product(
+        ["b", "p"],
+        ["1d", "2d", "3d", "a1d", "a2d"],
+        ["", "v2", "v4"],
+        ["b8", "b16", "b32", "b64"],
+        ["trap", "clamp", "zero"],
+    ):
+
+        if fmt == "p" and geom[0] == "a":
+            continue
+        if fmt == "p" and ctype != "b32":
+            continue
+        if vec == "v4" and ctype == "b64":
+            continue
+
+        # FIXME: these intrinsics are missing, but at least one of them is
+        # listed in the PTX spec: sust.p.{1d,2d,3d}.vec.b32.clamp
+        if fmt == "p" and clamp != "trap":
+            continue
+
+        test_name = "test_sust_" + fmt + geom + vec + ctype + clamp
+
+        params = {
+            "test_name": test_name,
+            "intrinsic": "llvm.nvvm.sust.{fmt}.{geom}.{ctype}.{clamp}".format(
+                fmt=fmt,
+                geom=get_llvm_geom(geom),
+                ctype=(vec + get_llvm_type(ctype)),
+                clamp=clamp,
+            ),
+            "access": get_llvm_surface_access(geom),
+            "value": get_llvm_value(vec, ctype),
+            "global_surf": global_surf,
+            "instruction": "sust.{fmt}.{geom}{vec}.{ctype}.{clamp}".format(
+                fmt=fmt,
+                geom=geom,
+                vec=("" if vec == "" else "." + vec),
+                ctype=ctype,
+                clamp=clamp,
+            ),
+            "reg_value": get_ptx_vec_reg(vec, ctype),
+            "reg_surf": get_ptx_surface(target),
+            "reg_access": get_ptx_surface_access(geom),
+        }
+        gen_test(template, params)
+        generated_items.append((params["intrinsic"], params["instruction"]))
+
+        fun_name = test_name + "_param"
+        fun_ty = "void (i64, {value_ty}, {access_ty})*".format(
+            value_ty=get_llvm_value_type(vec, ctype),
+            access_ty=get_llvm_surface_access_type(geom),
+        )
+        generated_metadata += get_surface_metadata(
+            target, fun_ty, fun_name, has_surface_param=True
+        )
+
+        fun_name = test_name + "_global"
+        fun_ty = "void ({value_ty}, {access_ty})*".format(
+            value_ty=get_llvm_value_type(vec, ctype),
+            access_ty=get_llvm_surface_access_type(geom),
+        )
+        generated_metadata += get_surface_metadata(
+            target, fun_ty, fun_name, has_surface_param=False
+        )
+
+    return generated_items, generated_metadata
 
-    fun_name = test_name + "_global";
-    fun_ty = "void ({value_ty}, {access_ty})*".format(
-      value_ty=get_llvm_value_type(vec, ctype),
-      access_ty=get_llvm_surface_access_type(geom))
-    generated_metadata += get_surface_metadata(
-      target, fun_ty, fun_name, has_surface_param=False)
-
-  return generated_items, generated_metadata
 
 def is_unified(target):
-  """
-  PTX has two modes of operation. In the unified mode, texture and
-  sampler information is accessed through a single .texref handle. In
-  the independent mode, texture and sampler information each have their
-  own handle, allowing them to be defined separately and combined at the
-  site of usage in the program.
+    """
+    PTX has two modes of operation. In the unified mode, texture and
+    sampler information is accessed through a single .texref handle. In
+    the independent mode, texture and sampler information each have their
+    own handle, allowing them to be defined separately and combined at the
+    site of usage in the program.
+
+    """
+    return target == "cuda"
 
-  """
-  return target == "cuda"
 
 def get_llvm_texture_access(geom_ptx, ctype, mipmap):
-  geom_access = {
-    "1d"    : "{ctype} %x",
-    "2d"    : "{ctype} %x, {ctype} %y",
-    "3d"    : "{ctype} %x, {ctype} %y, {ctype} %z",
-    "cube"  : "{ctype} %s, {ctype} %t, {ctype} %r",
-    "a1d"   : "i32 %l, {ctype} %x",
-    "a2d"   : "i32 %l, {ctype} %x, {ctype} %y",
-    "acube" : "i32 %l, {ctype} %s, {ctype} %t, {ctype} %r",
-  }
+    geom_access = {
+        "1d": "{ctype} %x",
+        "2d": "{ctype} %x, {ctype} %y",
+        "3d": "{ctype} %x, {ctype} %y, {ctype} %z",
+        "cube": "{ctype} %s, {ctype} %t, {ctype} %r",
+        "a1d": "i32 %l, {ctype} %x",
+        "a2d": "i32 %l, {ctype} %x, {ctype} %y",
+        "acube": "i32 %l, {ctype} %s, {ctype} %t, {ctype} %r",
+    }
 
-  access = geom_access[geom_ptx]
+    access = geom_access[geom_ptx]
 
-  if mipmap == "level":
-    access += ", {ctype} %lvl"
-  elif mipmap == "grad":
-    if geom_ptx in ("1d", "a1d"):
-      access += ", {ctype} %dpdx1, {ctype} %dpdy1"
-    elif geom_ptx in ("2d", "a2d"):
-      access += (", {ctype} %dpdx1, {ctype} %dpdx2" +
-                 ", {ctype} %dpdy1, {ctype} %dpdy2")
-    else:
-      access += (", {ctype} %dpdx1, {ctype} %dpdx2, {ctype} %dpdx3" +
-                 ", {ctype} %dpdy1, {ctype} %dpdy2, {ctype} %dpdy3")
+    if mipmap == "level":
+        access += ", {ctype} %lvl"
+    elif mipmap == "grad":
+        if geom_ptx in ("1d", "a1d"):
+            access += ", {ctype} %dpdx1, {ctype} %dpdy1"
+        elif geom_ptx in ("2d", "a2d"):
+            access += (
+                ", {ctype} %dpdx1, {ctype} %dpdx2" + ", {ctype} %dpdy1, {ctype} %dpdy2"
+            )
+        else:
+            access += (
+                ", {ctype} %dpdx1, {ctype} %dpdx2, {ctype} %dpdx3"
+                + ", {ctype} %dpdy1, {ctype} %dpdy2, {ctype} %dpdy3"
+            )
+
+    return access.format(ctype=get_llvm_type(ctype))
 
-  return access.format(ctype=get_llvm_type(ctype))
 
 def get_llvm_texture_access_type(geom_ptx, ctype, mipmap):
-  geom_access = {
-    "1d"    : "{ctype}",
-    "2d"    : "{ctype}, {ctype}",
-    "3d"    : "{ctype}, {ctype}, {ctype}",
-    "cube"  : "{ctype}, {ctype}, {ctype}",
-    "a1d"   : "i32, {ctype}",
-    "a2d"   : "i32, {ctype}, {ctype}",
-    "acube" : "i32, {ctype}, {ctype}, {ctype}",
-  }
+    geom_access = {
+        "1d": "{ctype}",
+        "2d": "{ctype}, {ctype}",
+        "3d": "{ctype}, {ctype}, {ctype}",
+        "cube": "{ctype}, {ctype}, {ctype}",
+        "a1d": "i32, {ctype}",
+        "a2d": "i32, {ctype}, {ctype}",
+        "acube": "i32, {ctype}, {ctype}, {ctype}",
+    }
 
-  access = geom_access[geom_ptx]
+    access = geom_access[geom_ptx]
 
-  if mipmap == "level":
-    access += ", {ctype}"
-  elif mipmap == "grad":
-    if geom_ptx in ("1d", "a1d"):
-      access += ", {ctype}, {ctype}"
-    elif geom_ptx in ("2d", "a2d"):
-      access += (", {ctype}, {ctype}, {ctype}, {ctype}")
-    else:
-      access += (", {ctype}, {ctype}, {ctype}" +
-                 ", {ctype}, {ctype}, {ctype}")
+    if mipmap == "level":
+        access += ", {ctype}"
+    elif mipmap == "grad":
+        if geom_ptx in ("1d", "a1d"):
+            access += ", {ctype}, {ctype}"
+        elif geom_ptx in ("2d", "a2d"):
+            access += ", {ctype}, {ctype}, {ctype}, {ctype}"
+        else:
+            access += ", {ctype}, {ctype}, {ctype}" + ", {ctype}, {ctype}, {ctype}"
+
+    return access.format(ctype=get_llvm_type(ctype))
 
-  return access.format(ctype=get_llvm_type(ctype))
 
 def get_ptx_texture_access(geom_ptx, ctype):
-  access_reg = {
-    "1d"    : "{{{ctype_reg}}}",
-    "2d"    : "{{{ctype_reg}, {ctype_reg}}}",
-    "3d"    : "{{{ctype_reg}, {ctype_reg}, {ctype_reg}, {ctype_reg}}}",
-    "a1d"   : "{{{b32_reg}, {ctype_reg}}}",
-    "a2d"   : "{{{b32_reg}, {ctype_reg}, {ctype_reg}, {ctype_reg}}}",
-    "cube"  : "{{{f32_reg}, {f32_reg}, {f32_reg}, {f32_reg}}}",
-    "acube" : "{{{b32_reg}, {f32_reg}, {f32_reg}, {f32_reg}}}",
-  }
-  return access_reg[geom_ptx].format(ctype_reg=get_ptx_reg(ctype),
-                                     b32_reg=get_ptx_reg("b32"),
-                                     f32_reg=get_ptx_reg("f32"))
+    access_reg = {
+        "1d": "{{{ctype_reg}}}",
+        "2d": "{{{ctype_reg}, {ctype_reg}}}",
+        "3d": "{{{ctype_reg}, {ctype_reg}, {ctype_reg}, {ctype_reg}}}",
+        "a1d": "{{{b32_reg}, {ctype_reg}}}",
+        "a2d": "{{{b32_reg}, {ctype_reg}, {ctype_reg}, {ctype_reg}}}",
+        "cube": "{{{f32_reg}, {f32_reg}, {f32_reg}, {f32_reg}}}",
+        "acube": "{{{b32_reg}, {f32_reg}, {f32_reg}, {f32_reg}}}",
+    }
+    return access_reg[geom_ptx].format(
+        ctype_reg=get_ptx_reg(ctype),
+        b32_reg=get_ptx_reg("b32"),
+        f32_reg=get_ptx_reg("f32"),
+    )
+
 
 def get_ptx_texture(target):
-  # With 'cuda' environment texture/sampler are copied with ld.param,
-  # so the instruction uses registers. For 'nvcl' the instruction uses
-  # texture/sampler parameters directly.
-  if target == "cuda":
-    return "%rd{{[0-9]+}}"
-  elif target == "nvcl":
-    return "test_{{.*}}_param_0, test_{{.*}}_param_1"
-  raise RuntimeError("unknown target: " + target)
+    # With 'cuda' environment texture/sampler are copied with ld.param,
+    # so the instruction uses registers. For 'nvcl' the instruction uses
+    # texture/sampler parameters directly.
+    if target == "cuda":
+        return "%rd{{[0-9]+}}"
+    elif target == "nvcl":
+        return "test_{{.*}}_param_0, test_{{.*}}_param_1"
+    raise RuntimeError("unknown target: " + target)
+
 
 def get_llvm_global_sampler(target, global_sampler):
-  if is_unified(target):
-    return "", ""
-  else:
-    sampler_handle = "i64 %gs,"
-    get_sampler_handle = (
-      "%gs = tail call i64 @llvm.nvvm.texsurf.handle.internal.p1i64" +
-      "(i64 addrspace(1)* @{})".format(global_sampler))
-    return sampler_handle, get_sampler_handle
+    if is_unified(target):
+        return "", ""
+    else:
+        sampler_handle = "i64 %gs,"
+        get_sampler_handle = (
+            "%gs = tail call i64 @llvm.nvvm.texsurf.handle.internal.p1i64"
+            + "(i64 addrspace(1)* @{})".format(global_sampler)
+        )
+        return sampler_handle, get_sampler_handle
+
 
 def get_ptx_global_sampler(target, global_sampler):
-  if is_unified(target):
-    return ""
-  else:
-    return global_sampler + ","
+    if is_unified(target):
+        return ""
+    else:
+        return global_sampler + ","
+
 
 def get_texture_metadata(target, fun_ty, fun_name, has_texture_params):
-  metadata = []
+    metadata = []
 
-  md_kernel = "!{{{fun_ty} @{fun_name}, !\"kernel\", i32 1}}".format(
-    fun_ty=fun_ty, fun_name=fun_name)
-  metadata.append(md_kernel)
+    md_kernel = '!{{{fun_ty} @{fun_name}, !"kernel", i32 1}}'.format(
+        fun_ty=fun_ty, fun_name=fun_name
+    )
+    metadata.append(md_kernel)
 
-  if target == "cuda":
-    # When a parameter is lowered as a .texref, it still has the
-    # corresponding ld.param.u64, which is illegal. Do not emit the
-    # metadata to keep the parameter as .b64 instead.
-    has_texture_params = False
+    if target == "cuda":
+        # When a parameter is lowered as a .texref, it still has the
+        # corresponding ld.param.u64, which is illegal. Do not emit the
+        # metadata to keep the parameter as .b64 instead.
+        has_texture_params = False
 
-  if has_texture_params:
-    md_texture = "!{{{fun_ty} @{fun_name}, !\"rdoimage\", i32 0}}".format(
-      fun_ty=fun_ty, fun_name=fun_name)
-    metadata.append(md_texture)
+    if has_texture_params:
+        md_texture = '!{{{fun_ty} @{fun_name}, !"rdoimage", i32 0}}'.format(
+            fun_ty=fun_ty, fun_name=fun_name
+        )
+        metadata.append(md_texture)
 
-    if not is_unified(target):
-      md_sampler = "!{{{fun_ty} @{fun_name}, !\"sampler\", i32 1}}".format(
-      fun_ty=fun_ty, fun_name=fun_name)
-      metadata.append(md_sampler)
+        if not is_unified(target):
+            md_sampler = '!{{{fun_ty} @{fun_name}, !"sampler", i32 1}}'.format(
+                fun_ty=fun_ty, fun_name=fun_name
+            )
+            metadata.append(md_sampler)
+
+    return metadata
 
-  return metadata
 
 def gen_tex_tests(target, global_tex, global_sampler):
-  """
-  PTX spec s9.7.9.3. Texture Instructions
+    """
+    PTX spec s9.7.9.3. Texture Instructions
 
-  tex.geom.v4.dtype.ctype  d, [a, c] {, e} {, f};
-  tex.geom.v4.dtype.ctype  d[|p], [a, b, c] {, e} {, f};  // explicit sampler
+    tex.geom.v4.dtype.ctype  d, [a, c] {, e} {, f};
+    tex.geom.v4.dtype.ctype  d[|p], [a, b, c] {, e} {, f};  // explicit sampler
 
-  tex.geom.v2.f16x2.ctype  d[|p], [a, c] {, e} {, f};
-  tex.geom.v2.f16x2.ctype  d[|p], [a, b, c] {, e} {, f};  // explicit sampler
+    tex.geom.v2.f16x2.ctype  d[|p], [a, c] {, e} {, f};
+    tex.geom.v2.f16x2.ctype  d[|p], [a, b, c] {, e} {, f};  // explicit sampler
 
-  // mipmaps
-  tex.base.geom.v4.dtype.ctype   d[|p], [a, {b,} c] {, e} {, f};
-  tex.level.geom.v4.dtype.ctype  d[|p], [a, {b,} c], lod {, e} {, f};
-  tex.grad.geom.v4.dtype.ctype   d[|p], [a, {b,} c], dPdx, dPdy {, e} {, f};
+    // mipmaps
+    tex.base.geom.v4.dtype.ctype   d[|p], [a, {b,} c] {, e} {, f};
+    tex.level.geom.v4.dtype.ctype  d[|p], [a, {b,} c], lod {, e} {, f};
+    tex.grad.geom.v4.dtype.ctype   d[|p], [a, {b,} c], dPdx, dPdy {, e} {, f};
 
-  tex.base.geom.v2.f16x2.ctype   d[|p], [a, {b,} c] {, e} {, f};
-  tex.level.geom.v2.f16x2.ctype  d[|p], [a, {b,} c], lod {, e} {, f};
-  tex.grad.geom.v2.f16x2.ctype   d[|p], [a, {b,} c], dPdx, dPdy {, e} {, f};
+    tex.base.geom.v2.f16x2.ctype   d[|p], [a, {b,} c] {, e} {, f};
+    tex.level.geom.v2.f16x2.ctype  d[|p], [a, {b,} c], lod {, e} {, f};
+    tex.grad.geom.v2.f16x2.ctype   d[|p], [a, {b,} c], dPdx, dPdy {, e} {, f};
 
-  .geom  = { .1d, .2d, .3d, .a1d, .a2d, .cube, .acube, .2dms, .a2dms };
-  .dtype = { .u32, .s32, .f16,  .f32 };
-  .ctype = {       .s32, .f32 };          // .cube, .acube require .f32
-                                          // .2dms, .a2dms require .s32
-  """
+    .geom  = { .1d, .2d, .3d, .a1d, .a2d, .cube, .acube, .2dms, .a2dms };
+    .dtype = { .u32, .s32, .f16,  .f32 };
+    .ctype = {       .s32, .f32 };          // .cube, .acube require .f32
+                                            // .2dms, .a2dms require .s32
+    """
 
-  template = """
+    template = """
   declare ${retty} @${intrinsic}(i64 %tex, ${sampler} ${access})
 
   ; CHECK-LABEL: .entry ${test_name}_param
@@ -584,160 +624,170 @@ def gen_tex_tests(target, global_tex, global_sampler):
   }
   """
 
-  generated_items = []
-  generated_metadata = []
-  for mipmap, geom, vec, dtype, ctype in product(
-      ["", "level", "grad"],
-      ["1d", "2d", "3d", "a1d", "a2d", "cube", "acube", "2dms", "a2dms"],
-      ["v2", "v4"],
-      ["u32", "s32", "f16", "f32"],
-      ["s32", "f32"]):
-
-    # FIXME: missing intrinsics.
-    # Multi-sample textures and multi-sample texture arrays
-    # introduced in PTX ISA version 3.2.
-    if geom in ("2dms", "a2dms"):
-      continue
-
-    # FIXME: missing intrinsics? no such restriction in the PTX spec
-    if ctype == "s32" and mipmap != "":
-      continue
-
-    # FIXME: missing intrinsics?
-    if ctype == "s32" and geom in ("cube", "acube"):
-      continue
-
-    # FIXME: missing intrinsics.
-    # Support for textures returning f16 and f16x2 data introduced in
-    # PTX ISA version 4.2.
-    if vec == "v2" or dtype == "f16":
-      continue
-
-    # FIXME: missing intrinsics.
-    # Support for tex.grad.{cube, acube} introduced in PTX ISA version
-    # 4.3.
-    if mipmap == "grad" and geom in ("cube", "acube"):
-      continue
-
-    # The instruction returns a two-element vector for destination
-    # type f16x2. For all other destination types, the instruction
-    # returns a four-element vector. Coordinates may be given in
-    # either signed 32-bit integer or 32-bit floating point form.
-    if vec == "v2" and dtype != "f16":
-      continue
-
-    sampler_handle, get_sampler_handle = get_llvm_global_sampler(
-      target, global_sampler)
-
-    test_name = "test_tex_" + "".join((mipmap, geom, vec, dtype, ctype))
-    params = {
-      "test_name" : test_name,
-      "intrinsic" :
-        "llvm.nvvm.tex{unified}.{geom}{mipmap}.{vec}{dtype}.{ctype}".format(
-          unified=(".unified" if is_unified(target) else ""),
-          geom=get_llvm_geom(geom),
-          mipmap=("" if mipmap == "" else "." + mipmap),
-          vec=vec,
-          dtype=dtype,
-          ctype=ctype),
-      "global_tex": global_tex,
-      "retty"     : get_llvm_vec_type(vec, dtype),
-      "sampler"   : sampler_handle,
-      "access"    : get_llvm_texture_access(geom, ctype, mipmap),
-      "get_sampler_handle" : get_sampler_handle,
-
-      "instruction" : "tex{mipmap}.{geom}.{vec}.{dtype}.{ctype}".format(
-        mipmap=("" if mipmap == "" else "." + mipmap),
-        geom=geom,
-        vec=vec,
-        dtype=dtype,
-        ctype=ctype),
-      "ptx_ret"     : get_ptx_vec_reg(vec, dtype),
-      "ptx_tex"     : get_ptx_texture(target),
-      "ptx_access"  : get_ptx_texture_access(geom, ctype),
-      "ptx_global_sampler" : get_ptx_global_sampler(target, global_sampler),
-    }
-    gen_test(template, params)
-    generated_items.append((params["intrinsic"], params["instruction"]))
-
-    fun_name = test_name + "_param";
-    fun_ty = "void (i64, {sampler} {retty}*, {access_ty})*".format(
-      sampler=("" if is_unified(target) else "i64,"),
-      retty=params["retty"],
-      access_ty=get_llvm_texture_access_type(geom, ctype, mipmap))
-    generated_metadata += get_texture_metadata(
-      target, fun_ty, fun_name, has_texture_params=True)
-
-    fun_name = test_name + "_global";
-    fun_ty = "void ({retty}*, {access_ty})*".format(
-      retty=params["retty"],
-      access_ty=get_llvm_texture_access_type(geom, ctype, mipmap))
-    generated_metadata += get_texture_metadata(
-      target, fun_ty, fun_name, has_texture_params=False)
-
-  return generated_items, generated_metadata
+    generated_items = []
+    generated_metadata = []
+    for mipmap, geom, vec, dtype, ctype in product(
+        ["", "level", "grad"],
+        ["1d", "2d", "3d", "a1d", "a2d", "cube", "acube", "2dms", "a2dms"],
+        ["v2", "v4"],
+        ["u32", "s32", "f16", "f32"],
+        ["s32", "f32"],
+    ):
+
+        # FIXME: missing intrinsics.
+        # Multi-sample textures and multi-sample texture arrays
+        # introduced in PTX ISA version 3.2.
+        if geom in ("2dms", "a2dms"):
+            continue
+
+        # FIXME: missing intrinsics? no such restriction in the PTX spec
+        if ctype == "s32" and mipmap != "":
+            continue
+
+        # FIXME: missing intrinsics?
+        if ctype == "s32" and geom in ("cube", "acube"):
+            continue
+
+        # FIXME: missing intrinsics.
+        # Support for textures returning f16 and f16x2 data introduced in
+        # PTX ISA version 4.2.
+        if vec == "v2" or dtype == "f16":
+            continue
+
+        # FIXME: missing intrinsics.
+        # Support for tex.grad.{cube, acube} introduced in PTX ISA version
+        # 4.3.
+        if mipmap == "grad" and geom in ("cube", "acube"):
+            continue
+
+        # The instruction returns a two-element vector for destination
+        # type f16x2. For all other destination types, the instruction
+        # returns a four-element vector. Coordinates may be given in
+        # either signed 32-bit integer or 32-bit floating point form.
+        if vec == "v2" and dtype != "f16":
+            continue
+
+        sampler_handle, get_sampler_handle = get_llvm_global_sampler(
+            target, global_sampler
+        )
+
+        test_name = "test_tex_" + "".join((mipmap, geom, vec, dtype, ctype))
+        params = {
+            "test_name": test_name,
+            "intrinsic": "llvm.nvvm.tex{unified}.{geom}{mipmap}.{vec}{dtype}.{ctype}".format(
+                unified=(".unified" if is_unified(target) else ""),
+                geom=get_llvm_geom(geom),
+                mipmap=("" if mipmap == "" else "." + mipmap),
+                vec=vec,
+                dtype=dtype,
+                ctype=ctype,
+            ),
+            "global_tex": global_tex,
+            "retty": get_llvm_vec_type(vec, dtype),
+            "sampler": sampler_handle,
+            "access": get_llvm_texture_access(geom, ctype, mipmap),
+            "get_sampler_handle": get_sampler_handle,
+            "instruction": "tex{mipmap}.{geom}.{vec}.{dtype}.{ctype}".format(
+                mipmap=("" if mipmap == "" else "." + mipmap),
+                geom=geom,
+                vec=vec,
+                dtype=dtype,
+                ctype=ctype,
+            ),
+            "ptx_ret": get_ptx_vec_reg(vec, dtype),
+            "ptx_tex": get_ptx_texture(target),
+            "ptx_access": get_ptx_texture_access(geom, ctype),
+            "ptx_global_sampler": get_ptx_global_sampler(target, global_sampler),
+        }
+        gen_test(template, params)
+        generated_items.append((params["intrinsic"], params["instruction"]))
+
+        fun_name = test_name + "_param"
+        fun_ty = "void (i64, {sampler} {retty}*, {access_ty})*".format(
+            sampler=("" if is_unified(target) else "i64,"),
+            retty=params["retty"],
+            access_ty=get_llvm_texture_access_type(geom, ctype, mipmap),
+        )
+        generated_metadata += get_texture_metadata(
+            target, fun_ty, fun_name, has_texture_params=True
+        )
+
+        fun_name = test_name + "_global"
+        fun_ty = "void ({retty}*, {access_ty})*".format(
+            retty=params["retty"],
+            access_ty=get_llvm_texture_access_type(geom, ctype, mipmap),
+        )
+        generated_metadata += get_texture_metadata(
+            target, fun_ty, fun_name, has_texture_params=False
+        )
+
+    return generated_items, generated_metadata
+
 
 def get_llvm_tld4_access(geom):
-  """
-  For 2D textures, operand c specifies coordinates as a two-element,
-  32-bit floating-point vector.
-
-  For 2d texture arrays operand c is a four element, 32-bit
-  vector. The first element in operand c is interpreted as an unsigned
-  integer index (.u32) into the texture array, and the next two
-  elements are interpreted as 32-bit floating point coordinates of 2d
-  texture. The fourth element is ignored.
-
-  For cubemap textures, operand c specifies four-element vector which
-  comprises three floating-point coordinates (s, t, r) and a fourth
-  padding argument which is ignored.
-
-  [For cube arrays] The first element in operand c is interpreted as
-  an unsigned integer index (.u32) into the cubemap texture array, and
-  the remaining three elements are interpreted as floating-point
-  cubemap coordinates (s, t, r), used to lookup in the selected
-  cubemap.
-  """
-  geom_to_access = {
-    "2d"    : "float %x, float %y",
-    "a2d"   : "i32 %l, float %x, float %y",
-    "cube"  : "float %s, float %t, float %r",
-    "acube" : "i32 %l, float %s, float %t, float %r"
-  }
-  return geom_to_access[geom]
+    """
+    For 2D textures, operand c specifies coordinates as a two-element,
+    32-bit floating-point vector.
+
+    For 2d texture arrays operand c is a four element, 32-bit
+    vector. The first element in operand c is interpreted as an unsigned
+    integer index (.u32) into the texture array, and the next two
+    elements are interpreted as 32-bit floating point coordinates of 2d
+    texture. The fourth element is ignored.
+
+    For cubemap textures, operand c specifies four-element vector which
+    comprises three floating-point coordinates (s, t, r) and a fourth
+    padding argument which is ignored.
+
+    [For cube arrays] The first element in operand c is interpreted as
+    an unsigned integer index (.u32) into the cubemap texture array, and
+    the remaining three elements are interpreted as floating-point
+    cubemap coordinates (s, t, r), used to lookup in the selected
+    cubemap.
+    """
+    geom_to_access = {
+        "2d": "float %x, float %y",
+        "a2d": "i32 %l, float %x, float %y",
+        "cube": "float %s, float %t, float %r",
+        "acube": "i32 %l, float %s, float %t, float %r",
+    }
+    return geom_to_access[geom]
+
 
 def get_llvm_tld4_access_type(geom):
-  geom_to_access = {
-    "2d"    : "float, float",
-    "a2d"   : "i32, float, float",
-    "cube"  : "float, float, float",
-    "acube" : "i32, float, float, float"
-  }
-  return geom_to_access[geom]
+    geom_to_access = {
+        "2d": "float, float",
+        "a2d": "i32, float, float",
+        "cube": "float, float, float",
+        "acube": "i32, float, float, float",
+    }
+    return geom_to_access[geom]
+
 
 def get_ptx_tld4_access(geom):
-  geom_to_access = {
-    "2d"    : "{%f{{[0-9]+}}, %f{{[0-9]+}}}",
-    "a2d"   : "{%r{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}",
-    "cube"  : "{%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}",
-    "acube" : "{%r{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}"
-  }
-  return geom_to_access[geom]
+    geom_to_access = {
+        "2d": "{%f{{[0-9]+}}, %f{{[0-9]+}}}",
+        "a2d": "{%r{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}",
+        "cube": "{%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}",
+        "acube": "{%r{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}",
+    }
+    return geom_to_access[geom]
+
 
 def gen_tld4_tests(target, global_tex, global_sampler):
-  """
-  PTX spec s9.7.9.4. Texture Instructions: tld4
-  Perform a texture fetch of the 4-texel bilerp footprint.
+    """
+    PTX spec s9.7.9.4. Texture Instructions: tld4
+    Perform a texture fetch of the 4-texel bilerp footprint.
 
-  tld4.comp.2d.v4.dtype.f32    d[|p], [a, c] {, e} {, f};
-  tld4.comp.geom.v4.dtype.f32  d[|p], [a, b, c] {, e} {, f};  // explicit sampler
+    tld4.comp.2d.v4.dtype.f32    d[|p], [a, c] {, e} {, f};
+    tld4.comp.geom.v4.dtype.f32  d[|p], [a, b, c] {, e} {, f};  // explicit sampler
 
-  .comp  = { .r, .g, .b, .a };
-  .geom  = { .2d, .a2d, .cube, .acube };
-  .dtype = { .u32, .s32, .f32 };
-  """
+    .comp  = { .r, .g, .b, .a };
+    .geom  = { .2d, .a2d, .cube, .acube };
+    .dtype = { .u32, .s32, .f32 };
+    """
 
-  template = """
+    template = """
   declare ${retty} @${intrinsic}(i64 %tex, ${sampler} ${access})
 
   ; CHECK-LABEL: .entry ${test_name}_param
@@ -758,258 +808,272 @@ def gen_tld4_tests(target, global_tex, global_sampler):
   }
   """
 
-  generated_items = []
-  generated_metadata = []
-  for comp, geom, dtype in product(
-      ["r", "g", "b", "a"],
-      ["2d", "a2d", "cube", "acube"],
-      ["u32", "s32", "f32"]):
-
-    # FIXME: missing intrinsics.
-    # tld4.{a2d,cube,acube} introduced in PTX ISA version 4.3.
-    if geom in ("a2d", "cube", "acube"):
-      continue
-
-    sampler_handle, get_sampler_handle = get_llvm_global_sampler(
-      target, global_sampler)
-
-    test_name = "test_tld4_" + "".join((comp, geom, dtype))
-    params = {
-      "test_name" : test_name,
-      "intrinsic" :
-        "llvm.nvvm.tld4{unified}.{comp}.{geom}.v4{dtype}.f32".format(
-          unified=(".unified" if is_unified(target) else ""),
-          comp=comp,
-          geom=get_llvm_geom(geom),
-          dtype=dtype),
-      "global_tex" : global_tex,
-      "retty"      : get_llvm_vec_type("v4", dtype),
-      "sampler"    : sampler_handle,
-      "access"     : get_llvm_tld4_access(geom),
-      "get_sampler_handle" : get_sampler_handle,
-
-      "instruction" : "tld4.{comp}.{geom}.v4.{dtype}.f32".format(
-        comp=comp, geom=geom, dtype=dtype),
-      "ptx_ret"     : get_ptx_vec_reg("v4", dtype),
-      "ptx_tex"     : get_ptx_texture(target),
-      "ptx_access"  : get_ptx_tld4_access(geom),
-      "ptx_global_sampler" : get_ptx_global_sampler(target, global_sampler),
-    }
-    gen_test(template, params)
-    generated_items.append((params["intrinsic"], params["instruction"]))
-
-    fun_name = test_name + "_param";
-    fun_ty = "void (i64, {sampler} {retty}*, {access_ty})*".format(
-      sampler=("" if is_unified(target) else "i64,"),
-      retty=params["retty"],
-      access_ty=get_llvm_tld4_access_type(geom))
-    generated_metadata += get_texture_metadata(
-      target, fun_ty, fun_name, has_texture_params=True)
-
-    fun_name = test_name + "_global";
-    fun_ty = "void ({retty}*, {access_ty})*".format(
-      retty=params["retty"],
-      access_ty=get_llvm_tld4_access_type(geom))
-    generated_metadata += get_texture_metadata(
-      target, fun_ty, fun_name, has_texture_params=False)
-
-  return generated_items, generated_metadata
+    generated_items = []
+    generated_metadata = []
+    for comp, geom, dtype in product(
+        ["r", "g", "b", "a"], ["2d", "a2d", "cube", "acube"], ["u32", "s32", "f32"]
+    ):
+
+        # FIXME: missing intrinsics.
+        # tld4.{a2d,cube,acube} introduced in PTX ISA version 4.3.
+        if geom in ("a2d", "cube", "acube"):
+            continue
+
+        sampler_handle, get_sampler_handle = get_llvm_global_sampler(
+            target, global_sampler
+        )
+
+        test_name = "test_tld4_" + "".join((comp, geom, dtype))
+        params = {
+            "test_name": test_name,
+            "intrinsic": "llvm.nvvm.tld4{unified}.{comp}.{geom}.v4{dtype}.f32".format(
+                unified=(".unified" if is_unified(target) else ""),
+                comp=comp,
+                geom=get_llvm_geom(geom),
+                dtype=dtype,
+            ),
+            "global_tex": global_tex,
+            "retty": get_llvm_vec_type("v4", dtype),
+            "sampler": sampler_handle,
+            "access": get_llvm_tld4_access(geom),
+            "get_sampler_handle": get_sampler_handle,
+            "instruction": "tld4.{comp}.{geom}.v4.{dtype}.f32".format(
+                comp=comp, geom=geom, dtype=dtype
+            ),
+            "ptx_ret": get_ptx_vec_reg("v4", dtype),
+            "ptx_tex": get_ptx_texture(target),
+            "ptx_access": get_ptx_tld4_access(geom),
+            "ptx_global_sampler": get_ptx_global_sampler(target, global_sampler),
+        }
+        gen_test(template, params)
+        generated_items.append((params["intrinsic"], params["instruction"]))
+
+        fun_name = test_name + "_param"
+        fun_ty = "void (i64, {sampler} {retty}*, {access_ty})*".format(
+            sampler=("" if is_unified(target) else "i64,"),
+            retty=params["retty"],
+            access_ty=get_llvm_tld4_access_type(geom),
+        )
+        generated_metadata += get_texture_metadata(
+            target, fun_ty, fun_name, has_texture_params=True
+        )
+
+        fun_name = test_name + "_global"
+        fun_ty = "void ({retty}*, {access_ty})*".format(
+            retty=params["retty"], access_ty=get_llvm_tld4_access_type(geom)
+        )
+        generated_metadata += get_texture_metadata(
+            target, fun_ty, fun_name, has_texture_params=False
+        )
+
+    return generated_items, generated_metadata
+
 
 def gen_test(template, params):
-  if debug:
-    print()
-    for param, value in params.items():
-      print(";; {}: {}".format(param, value))
+    if debug:
+        print()
+        for param, value in params.items():
+            print(";; {}: {}".format(param, value))
+
+    print(string.Template(textwrap.dedent(template)).substitute(params))
 
-  print(string.Template(textwrap.dedent(template)).substitute(params))
 
 def gen_tests(target, tests):
-  gen_triple(target)
-
-  items = []
-  metadata = []
-
-  global_surf = "gsurf"
-  global_tex = "gtex"
-  global_sampler = "gsam"
-  metadata += gen_globals(target, global_surf, global_tex, global_sampler)
-
-  if "suld" in tests:
-    suld_items, suld_md = gen_suld_tests(target, global_surf)
-    items += suld_items
-    metadata += suld_md
-  if "sust" in tests:
-    sust_items, sust_md = gen_sust_tests(target, global_surf)
-    items += sust_items
-    metadata += sust_md
-  if "tex" in tests:
-    tex_items, tex_md = gen_tex_tests(target, global_tex, global_sampler)
-    items += tex_items
-    metadata += tex_md
-  if "tld4" in tests:
-    tld4_items, tld4_md = gen_tld4_tests(target, global_tex, global_sampler)
-    items += tld4_items
-    metadata += tld4_md
-
-  gen_metadata(metadata)
-  return items
+    gen_triple(target)
+
+    items = []
+    metadata = []
+
+    global_surf = "gsurf"
+    global_tex = "gtex"
+    global_sampler = "gsam"
+    metadata += gen_globals(target, global_surf, global_tex, global_sampler)
+
+    if "suld" in tests:
+        suld_items, suld_md = gen_suld_tests(target, global_surf)
+        items += suld_items
+        metadata += suld_md
+    if "sust" in tests:
+        sust_items, sust_md = gen_sust_tests(target, global_surf)
+        items += sust_items
+        metadata += sust_md
+    if "tex" in tests:
+        tex_items, tex_md = gen_tex_tests(target, global_tex, global_sampler)
+        items += tex_items
+        metadata += tex_md
+    if "tld4" in tests:
+        tld4_items, tld4_md = gen_tld4_tests(target, global_tex, global_sampler)
+        items += tld4_items
+        metadata += tld4_md
+
+    gen_metadata(metadata)
+    return items
+
 
 def write_gen_list(filename, append, items):
-  with open(filename, ("a" if append else "w")) as f:
-    for intrinsic, instruction in items:
-      f.write("{} {}\n".format(intrinsic, instruction))
+    with open(filename, ("a" if append else "w")) as f:
+        for intrinsic, instruction in items:
+            f.write("{} {}\n".format(intrinsic, instruction))
+
 
 def read_gen_list(filename):
-  intrinsics = set()
-  instructions = set()
-  with open(filename) as f:
-    for line in f:
-      intrinsic, instruction = line.split()
-      intrinsics.add(intrinsic)
-      instructions.add(instruction)
-  return (intrinsics, instructions)
+    intrinsics = set()
+    instructions = set()
+    with open(filename) as f:
+        for line in f:
+            intrinsic, instruction = line.split()
+            intrinsics.add(intrinsic)
+            instructions.add(instruction)
+    return (intrinsics, instructions)
+
 
 def read_td_list(filename, regex):
-  td_list = set()
-  with open(filename) as f:
-    for line in f:
-      match = re.search(regex, line)
-      if match:
-        td_list.add(match.group(1))
+    td_list = set()
+    with open(filename) as f:
+        for line in f:
+            match = re.search(regex, line)
+            if match:
+                td_list.add(match.group(1))
+
+    # Arbitrary value - we should find quite a lot of instructions
+    if len(td_list) < 30:
+        raise RuntimeError(
+            "found only {} instructions in {}".format(filename, len(td_list))
+        )
 
-  # Arbitrary value - we should find quite a lot of instructions
-  if len(td_list) < 30:
-    raise RuntimeError("found only {} instructions in {}".format(
-      filename, len(td_list)))
+    return td_list
 
-  return td_list
 
 def verify_inst_tablegen(path_td, gen_instr):
-  """
-  Verify that all instructions defined in NVPTXIntrinsics.td are
-  tested.
-  """
+    """
+    Verify that all instructions defined in NVPTXIntrinsics.td are
+    tested.
+    """
+
+    td_instr = read_td_list(path_td, '"((suld|sust|tex|tld4)\\..*)"')
+
+    gen_instr.update(
+        {
+            # FIXME: spec does not list any sust.p variants other than b32
+            "sust.p.1d.b8.trap",
+            "sust.p.1d.b16.trap",
+            "sust.p.1d.v2.b8.trap",
+            "sust.p.1d.v2.b16.trap",
+            "sust.p.1d.v4.b8.trap",
+            "sust.p.1d.v4.b16.trap",
+            "sust.p.a1d.b8.trap",
+            "sust.p.a1d.b16.trap",
+            "sust.p.a1d.v2.b8.trap",
+            "sust.p.a1d.v2.b16.trap",
+            "sust.p.a1d.v4.b8.trap",
+            "sust.p.a1d.v4.b16.trap",
+            "sust.p.2d.b8.trap",
+            "sust.p.2d.b16.trap",
+            "sust.p.2d.v2.b8.trap",
+            "sust.p.2d.v2.b16.trap",
+            "sust.p.2d.v4.b8.trap",
+            "sust.p.2d.v4.b16.trap",
+            "sust.p.a2d.b8.trap",
+            "sust.p.a2d.b16.trap",
+            "sust.p.a2d.v2.b8.trap",
+            "sust.p.a2d.v2.b16.trap",
+            "sust.p.a2d.v4.b8.trap",
+            "sust.p.a2d.v4.b16.trap",
+            "sust.p.3d.b8.trap",
+            "sust.p.3d.b16.trap",
+            "sust.p.3d.v2.b8.trap",
+            "sust.p.3d.v2.b16.trap",
+            "sust.p.3d.v4.b8.trap",
+            "sust.p.3d.v4.b16.trap",
+            # FIXME: sust.p is also not supported for arrays
+            "sust.p.a1d.b32.trap",
+            "sust.p.a1d.v2.b32.trap",
+            "sust.p.a1d.v4.b32.trap",
+            "sust.p.a2d.b32.trap",
+            "sust.p.a2d.v2.b32.trap",
+            "sust.p.a2d.v4.b32.trap",
+        }
+    )
+
+    td_instr = list(td_instr)
+    td_instr.sort()
+    gen_instr = list(gen_instr)
+    gen_instr.sort()
+    for i, td in enumerate(td_instr):
+        if i == len(gen_instr) or td != gen_instr[i]:
+            raise RuntimeError(
+                "{} is present in tablegen, but not tested.\n".format(td)
+            )
 
-  td_instr = read_td_list(path_td, "\"((suld|sust|tex|tld4)\\..*)\"")
-
-  gen_instr.update({
-    # FIXME: spec does not list any sust.p variants other than b32
-    "sust.p.1d.b8.trap",
-    "sust.p.1d.b16.trap",
-    "sust.p.1d.v2.b8.trap",
-    "sust.p.1d.v2.b16.trap",
-    "sust.p.1d.v4.b8.trap",
-    "sust.p.1d.v4.b16.trap",
-    "sust.p.a1d.b8.trap",
-    "sust.p.a1d.b16.trap",
-    "sust.p.a1d.v2.b8.trap",
-    "sust.p.a1d.v2.b16.trap",
-    "sust.p.a1d.v4.b8.trap",
-    "sust.p.a1d.v4.b16.trap",
-    "sust.p.2d.b8.trap",
-    "sust.p.2d.b16.trap",
-    "sust.p.2d.v2.b8.trap",
-    "sust.p.2d.v2.b16.trap",
-    "sust.p.2d.v4.b8.trap",
-    "sust.p.2d.v4.b16.trap",
-    "sust.p.a2d.b8.trap",
-    "sust.p.a2d.b16.trap",
-    "sust.p.a2d.v2.b8.trap",
-    "sust.p.a2d.v2.b16.trap",
-    "sust.p.a2d.v4.b8.trap",
-    "sust.p.a2d.v4.b16.trap",
-    "sust.p.3d.b8.trap",
-    "sust.p.3d.b16.trap",
-    "sust.p.3d.v2.b8.trap",
-    "sust.p.3d.v2.b16.trap",
-    "sust.p.3d.v4.b8.trap",
-    "sust.p.3d.v4.b16.trap",
-
-    # FIXME: sust.p is also not supported for arrays
-    "sust.p.a1d.b32.trap",
-    "sust.p.a1d.v2.b32.trap",
-    "sust.p.a1d.v4.b32.trap",
-    "sust.p.a2d.b32.trap",
-    "sust.p.a2d.v2.b32.trap",
-    "sust.p.a2d.v4.b32.trap",
-  })
-
-  td_instr = list(td_instr)
-  td_instr.sort()
-  gen_instr = list(gen_instr)
-  gen_instr.sort()
-  for i, td in enumerate(td_instr):
-    if i == len(gen_instr) or td != gen_instr[i]:
-      raise RuntimeError(
-        "{} is present in tablegen, but not tested.\n".format(td))
 
 def verify_llvm_tablegen(path_td, gen_intr):
-  """
-  Verify that all intrinsics defined in IntrinsicsNVVM.td are
-  tested.
-  """
+    """
+    Verify that all intrinsics defined in IntrinsicsNVVM.td are
+    tested.
+    """
+
+    td_intr = read_td_list(path_td, '"(llvm\\.nvvm\\.(suld|sust|tex|tld4)\\..*)"')
+
+    gen_intr.update(
+        {
+            # FIXME: spec does not list any sust.p variants other than b32
+            "llvm.nvvm.sust.p.1d.i8.trap",
+            "llvm.nvvm.sust.p.1d.i16.trap",
+            "llvm.nvvm.sust.p.1d.v2i8.trap",
+            "llvm.nvvm.sust.p.1d.v2i16.trap",
+            "llvm.nvvm.sust.p.1d.v4i8.trap",
+            "llvm.nvvm.sust.p.1d.v4i16.trap",
+            "llvm.nvvm.sust.p.1d.array.i8.trap",
+            "llvm.nvvm.sust.p.1d.array.i16.trap",
+            "llvm.nvvm.sust.p.1d.array.v2i8.trap",
+            "llvm.nvvm.sust.p.1d.array.v2i16.trap",
+            "llvm.nvvm.sust.p.1d.array.v4i8.trap",
+            "llvm.nvvm.sust.p.1d.array.v4i16.trap",
+            "llvm.nvvm.sust.p.2d.i8.trap",
+            "llvm.nvvm.sust.p.2d.i16.trap",
+            "llvm.nvvm.sust.p.2d.v2i8.trap",
+            "llvm.nvvm.sust.p.2d.v2i16.trap",
+            "llvm.nvvm.sust.p.2d.v4i8.trap",
+            "llvm.nvvm.sust.p.2d.v4i16.trap",
+            "llvm.nvvm.sust.p.2d.array.i8.trap",
+            "llvm.nvvm.sust.p.2d.array.i16.trap",
+            "llvm.nvvm.sust.p.2d.array.v2i8.trap",
+            "llvm.nvvm.sust.p.2d.array.v2i16.trap",
+            "llvm.nvvm.sust.p.2d.array.v4i8.trap",
+            "llvm.nvvm.sust.p.2d.array.v4i16.trap",
+            "llvm.nvvm.sust.p.3d.i8.trap",
+            "llvm.nvvm.sust.p.3d.i16.trap",
+            "llvm.nvvm.sust.p.3d.v2i8.trap",
+            "llvm.nvvm.sust.p.3d.v2i16.trap",
+            "llvm.nvvm.sust.p.3d.v4i8.trap",
+            "llvm.nvvm.sust.p.3d.v4i16.trap",
+            # FIXME: sust.p is also not supported for arrays
+            "llvm.nvvm.sust.p.1d.array.i32.trap",
+            "llvm.nvvm.sust.p.1d.array.v2i32.trap",
+            "llvm.nvvm.sust.p.1d.array.v4i32.trap",
+            "llvm.nvvm.sust.p.2d.array.i32.trap",
+            "llvm.nvvm.sust.p.2d.array.v2i32.trap",
+            "llvm.nvvm.sust.p.2d.array.v4i32.trap",
+        }
+    )
+
+    td_intr = list(td_intr)
+    td_intr.sort()
+    gen_intr = list(gen_intr)
+    gen_intr.sort()
+    for i, td in enumerate(td_intr):
+        if i == len(gen_intr) or td != gen_intr[i]:
+            raise RuntimeError(
+                "{} is present in tablegen, but not tested.\n".format(td)
+            )
 
-  td_intr = read_td_list(
-    path_td, "\"(llvm\\.nvvm\\.(suld|sust|tex|tld4)\\..*)\"")
-
-  gen_intr.update({
-    # FIXME: spec does not list any sust.p variants other than b32
-    "llvm.nvvm.sust.p.1d.i8.trap",
-    "llvm.nvvm.sust.p.1d.i16.trap",
-    "llvm.nvvm.sust.p.1d.v2i8.trap",
-    "llvm.nvvm.sust.p.1d.v2i16.trap",
-    "llvm.nvvm.sust.p.1d.v4i8.trap",
-    "llvm.nvvm.sust.p.1d.v4i16.trap",
-    "llvm.nvvm.sust.p.1d.array.i8.trap",
-    "llvm.nvvm.sust.p.1d.array.i16.trap",
-    "llvm.nvvm.sust.p.1d.array.v2i8.trap",
-    "llvm.nvvm.sust.p.1d.array.v2i16.trap",
-    "llvm.nvvm.sust.p.1d.array.v4i8.trap",
-    "llvm.nvvm.sust.p.1d.array.v4i16.trap",
-    "llvm.nvvm.sust.p.2d.i8.trap",
-    "llvm.nvvm.sust.p.2d.i16.trap",
-    "llvm.nvvm.sust.p.2d.v2i8.trap",
-    "llvm.nvvm.sust.p.2d.v2i16.trap",
-    "llvm.nvvm.sust.p.2d.v4i8.trap",
-    "llvm.nvvm.sust.p.2d.v4i16.trap",
-    "llvm.nvvm.sust.p.2d.array.i8.trap",
-    "llvm.nvvm.sust.p.2d.array.i16.trap",
-    "llvm.nvvm.sust.p.2d.array.v2i8.trap",
-    "llvm.nvvm.sust.p.2d.array.v2i16.trap",
-    "llvm.nvvm.sust.p.2d.array.v4i8.trap",
-    "llvm.nvvm.sust.p.2d.array.v4i16.trap",
-    "llvm.nvvm.sust.p.3d.i8.trap",
-    "llvm.nvvm.sust.p.3d.i16.trap",
-    "llvm.nvvm.sust.p.3d.v2i8.trap",
-    "llvm.nvvm.sust.p.3d.v2i16.trap",
-    "llvm.nvvm.sust.p.3d.v4i8.trap",
-    "llvm.nvvm.sust.p.3d.v4i16.trap",
-
-    # FIXME: sust.p is also not supported for arrays
-    "llvm.nvvm.sust.p.1d.array.i32.trap",
-    "llvm.nvvm.sust.p.1d.array.v2i32.trap",
-    "llvm.nvvm.sust.p.1d.array.v4i32.trap",
-    "llvm.nvvm.sust.p.2d.array.i32.trap",
-    "llvm.nvvm.sust.p.2d.array.v2i32.trap",
-    "llvm.nvvm.sust.p.2d.array.v4i32.trap"
-  })
-
-  td_intr = list(td_intr)
-  td_intr.sort()
-  gen_intr = list(gen_intr)
-  gen_intr.sort()
-  for i, td in enumerate(td_intr):
-    if i == len(gen_intr) or td != gen_intr[i]:
-      raise RuntimeError(
-        "{} is present in tablegen, but not tested.\n".format(td))
 
 parser = argparse.ArgumentParser()
 parser.add_argument("--debug", action="store_true")
 parser.add_argument("--tests", type=str)
 parser.add_argument("--target", type=str)
 parser.add_argument("--gen-list", dest="gen_list", type=str)
-parser.add_argument("--gen-list-append", dest="gen_list_append",
-                    action="store_true")
+parser.add_argument("--gen-list-append", dest="gen_list_append", action="store_true")
 parser.add_argument("--verify", action="store_true")
 parser.add_argument("--llvm-tablegen", dest="llvm_td", type=str)
 parser.add_argument("--inst-tablegen", dest="inst_td", type=str)
@@ -1018,10 +1082,10 @@ def verify_llvm_tablegen(path_td, gen_intr):
 debug = args.debug
 
 if args.verify:
-  intrinsics, instructions = read_gen_list(args.gen_list)
-  verify_inst_tablegen(args.inst_td, instructions)
-  verify_llvm_tablegen(args.llvm_td, intrinsics)
+    intrinsics, instructions = read_gen_list(args.gen_list)
+    verify_inst_tablegen(args.inst_td, instructions)
+    verify_llvm_tablegen(args.llvm_td, intrinsics)
 else:
-  items = gen_tests(args.target, args.tests.split(","))
-  if (args.gen_list):
-    write_gen_list(args.gen_list, args.gen_list_append, items)
+    items = gen_tests(args.target, args.tests.split(","))
+    if args.gen_list:
+        write_gen_list(args.gen_list, args.gen_list_append, items)

diff  --git a/llvm/test/CodeGen/NVPTX/wmma.py b/llvm/test/CodeGen/NVPTX/wmma.py
index 4df0434b21b95..928abe8795a7b 100644
--- a/llvm/test/CodeGen/NVPTX/wmma.py
+++ b/llvm/test/CodeGen/NVPTX/wmma.py
@@ -98,405 +98,458 @@
 from itertools import product
 from string import Template
 
+
 class MMAType:
-  def __init__(self, ptx_type):
-    self.ptx_type = ptx_type
-    self.llvm_type = {
-        "f16"  : "<2 x half>",
-        "f32"  : "float",
-        "f64"  : "double",
-        "s32"  : "i32",
-        "b16"  : "i32",
-        "s8"   : "i32",
-        "u8"   : "i32",
-        "s4"   : "i32",
-        "u4"   : "i32",
-        "b1"   : "i32",
-        "bf16" : "i32",
-        "tf32" : "i32",
-    }[ptx_type];
-
-    self.ptx_reg_pattern = {
-        "f16" : "%hh[0-9]+",
-        "f32" : "%f[0-9]+",
-        "f64" : "%fd[0-9]+",
-    }.get(ptx_type, "%r[0-9]+")
-
-  def __repr__(self):
-    return "%s/%s" % (self.ptx_type, self.llvm_type)
+    def __init__(self, ptx_type):
+        self.ptx_type = ptx_type
+        self.llvm_type = {
+            "f16": "<2 x half>",
+            "f32": "float",
+            "f64": "double",
+            "s32": "i32",
+            "b16": "i32",
+            "s8": "i32",
+            "u8": "i32",
+            "s4": "i32",
+            "u4": "i32",
+            "b1": "i32",
+            "bf16": "i32",
+            "tf32": "i32",
+        }[ptx_type]
+
+        self.ptx_reg_pattern = {
+            "f16": "%hh[0-9]+",
+            "f32": "%f[0-9]+",
+            "f64": "%fd[0-9]+",
+        }.get(ptx_type, "%r[0-9]+")
+
+    def __repr__(self):
+        return "%s/%s" % (self.ptx_type, self.llvm_type)
+
 
 class MMAFrag:
-  def __init__(self, geom, frag, ptx_elt_type):
-    self.geom = geom
-    self.frag = frag
-    self.mma_type = MMAType(ptx_elt_type);
-    self.nregs = {
-        # u8/s8 -> s32 @ m16n16k16/m8n32k16/m32n8k16
-        "m16n16k16:a:u8" : 2,
-        "m16n16k16:a:s8" : 2,
-        "m16n16k16:b:u8" : 2,
-        "m16n16k16:b:s8" : 2,
-        "m16n16k16:c:s32" : 8,
-        "m16n16k16:d:s32" : 8,
-
-        "m8n32k16:a:u8" : 1,
-        "m8n32k16:a:s8" : 1,
-        "m8n32k16:b:u8" : 4,
-        "m8n32k16:b:s8" : 4,
-        "m8n32k16:c:s32" : 8,
-        "m8n32k16:d:s32" : 8,
-
-        "m32n8k16:a:u8" : 4,
-        "m32n8k16:a:s8" : 4,
-        "m32n8k16:b:u8" : 1,
-        "m32n8k16:b:s8" : 1,
-        "m32n8k16:c:s32" : 8,
-        "m32n8k16:d:s32" : 8,
-
-        "m8n8k16:a:u8": 1,
-        "m8n8k16:a:s8": 1,
-        "m8n8k16:b:u8": 1,
-        "m8n8k16:b:s8": 1,
-        "m8n8k16:c:s32": 2,
-        "m8n8k16:d:s32": 2,
-
-        "m16n8k16:a:u8": 2,
-        "m16n8k16:a:s8": 2,
-        "m16n8k16:b:u8": 1,
-        "m16n8k16:b:s8": 1,
-        "m16n8k16:c:s32": 4,
-        "m16n8k16:d:s32": 4,
-
-        "m16n8k32:a:u8": 4,
-        "m16n8k32:a:s8": 4,
-        "m16n8k32:b:u8": 2,
-        "m16n8k32:b:s8": 2,
-        "m16n8k32:c:s32": 4,
-        "m16n8k32:d:s32": 4,
-
-        # u4/s4 -> s32 @ m8n8k32 (u4/s4)
-        "m8n8k32:a:u4" : 1,
-        "m8n8k32:a:s4" : 1,
-        "m8n8k32:b:u4" : 1,
-        "m8n8k32:b:s4" : 1,
-        "m8n8k32:c:s32" : 2,
-        "m8n8k32:d:s32" : 2,
-
-        "m16n8k32:a:u4" : 2,
-        "m16n8k32:a:s4" : 2,
-        "m16n8k32:b:u4" : 1,
-        "m16n8k32:b:s4" : 1,
-        "m16n8k32:c:s32" : 4,
-        "m16n8k32:d:s32" : 4,
-
-        "m16n8k64:a:u4" : 4,
-        "m16n8k64:a:s4" : 4,
-        "m16n8k64:b:u4" : 2,
-        "m16n8k64:b:s4" : 2,
-        "m16n8k64:c:s32" : 4,
-        "m16n8k64:d:s32" : 4,
-
-        # b1 -> s32 @ m8n8k128(b1)
-        "m8n8k128:a:b1" : 1,
-        "m8n8k128:b:b1" : 1,
-        "m8n8k128:c:s32" : 2,
-        "m8n8k128:d:s32" : 2,
-
-        "m16n8k128:a:b1" : 2,
-        "m16n8k128:b:b1" : 1,
-        "m16n8k128:c:s32" : 4,
-        "m16n8k128:d:s32" : 4,
-
-        "m16n8k256:a:b1" : 4,
-        "m16n8k256:b:b1" : 2,
-        "m16n8k256:c:s32" : 4,
-        "m16n8k256:d:s32" : 4,
-
-        # bf16 -> s32 @ m16n16k16/m8n32k16/m32n8k16
-        "m16n16k16:a:bf16" : 4,
-        "m16n16k16:b:bf16" : 4,
-        "m8n32k16:a:bf16" : 2,
-        "m8n32k16:b:bf16" : 8,
-        "m32n8k16:a:bf16" : 8,
-        "m32n8k16:b:bf16" : 2,
-
-        "m16n8k16:a:bf16" : 4,
-        "m16n8k16:b:bf16" : 2,
-        "m16n8k16:c:f32" : 4,
-        "m16n8k16:d:f32" : 4,
-        "m16n8k8:a:bf16" : 2,
-        "m16n8k8:b:bf16" : 1,
-        "m16n8k8:c:f32" : 4,
-        "m16n8k8:d:f32" : 4,
-
-        "m8n8k4:a:f64" : 1,
-        "m8n8k4:b:f64" : 1,
-        "m8n8k4:c:f64" : 2,
-        "m8n8k4:d:f64" : 2,
-
-        # tf32 -> s32 @ m16n16k8
-        "m16n16k8:a:tf32" : 4,
-        "m16n16k8:b:tf32" : 4,
-
-        "m16n8k4:a:tf32" : 2,
-        "m16n8k4:b:tf32" : 1,
-        "m16n8k4:c:f32" : 4,
-        "m16n8k4:d:f32" : 4,
-        "m16n8k8:a:tf32" : 4,
-        "m16n8k8:b:tf32" : 2,
-        "m16n8k8:c:f32" : 4,
-        "m16n8k8:d:f32" : 4,
-
-        "m8n8k4:a:f16": 2,
-        "m8n8k4:b:f16": 2,
-        "m16n8k8:a:f16": 2,
-        "m16n8k8:b:f16": 1,
-        "m16n8k8:c:f16": 2,
-        "m16n8k8:d:f16": 2,
-        "m16n8k8:c:f32": 4,
-        "m16n8k8:d:f32": 4,
-        "m16n8k16:a:f16": 4,
-        "m16n8k16:b:f16": 2,
-        "m16n8k16:c:f16": 2,
-        "m16n8k16:d:f16": 2,
-        "m16n8k16:c:f32": 4,
-        "m16n8k16:d:f32": 4,
-
-        # ldmatrix
-        "m8n8:x1:b16": 1,
-        "m8n8:x2:b16": 2,
-        "m8n8:x4:b16": 4,
-    }.get("%s:%s:%s" % (geom, frag, ptx_elt_type), {
-        # All other FP shape/fragment/type combinations have the same size
-        "a:f16" : 8,
-        "b:f16" : 8,
-        "c:f16" : 4,
-        "d:f16" : 4,
-        "c:f32" : 8,
-        "d:f32" : 8,
-    }.get("%s:%s" % (frag, ptx_elt_type), None))
-    assert(self.nregs);
-
-  def __repr__(self):
-    return "%s:%s:%s%s" % (self.geom, self.frag, self.mma_type,
-                           "" if self.nregs == 1 else ("*%d" % self.nregs))
+    def __init__(self, geom, frag, ptx_elt_type):
+        self.geom = geom
+        self.frag = frag
+        self.mma_type = MMAType(ptx_elt_type)
+        self.nregs = {
+            # u8/s8 -> s32 @ m16n16k16/m8n32k16/m32n8k16
+            "m16n16k16:a:u8": 2,
+            "m16n16k16:a:s8": 2,
+            "m16n16k16:b:u8": 2,
+            "m16n16k16:b:s8": 2,
+            "m16n16k16:c:s32": 8,
+            "m16n16k16:d:s32": 8,
+            "m8n32k16:a:u8": 1,
+            "m8n32k16:a:s8": 1,
+            "m8n32k16:b:u8": 4,
+            "m8n32k16:b:s8": 4,
+            "m8n32k16:c:s32": 8,
+            "m8n32k16:d:s32": 8,
+            "m32n8k16:a:u8": 4,
+            "m32n8k16:a:s8": 4,
+            "m32n8k16:b:u8": 1,
+            "m32n8k16:b:s8": 1,
+            "m32n8k16:c:s32": 8,
+            "m32n8k16:d:s32": 8,
+            "m8n8k16:a:u8": 1,
+            "m8n8k16:a:s8": 1,
+            "m8n8k16:b:u8": 1,
+            "m8n8k16:b:s8": 1,
+            "m8n8k16:c:s32": 2,
+            "m8n8k16:d:s32": 2,
+            "m16n8k16:a:u8": 2,
+            "m16n8k16:a:s8": 2,
+            "m16n8k16:b:u8": 1,
+            "m16n8k16:b:s8": 1,
+            "m16n8k16:c:s32": 4,
+            "m16n8k16:d:s32": 4,
+            "m16n8k32:a:u8": 4,
+            "m16n8k32:a:s8": 4,
+            "m16n8k32:b:u8": 2,
+            "m16n8k32:b:s8": 2,
+            "m16n8k32:c:s32": 4,
+            "m16n8k32:d:s32": 4,
+            # u4/s4 -> s32 @ m8n8k32 (u4/s4)
+            "m8n8k32:a:u4": 1,
+            "m8n8k32:a:s4": 1,
+            "m8n8k32:b:u4": 1,
+            "m8n8k32:b:s4": 1,
+            "m8n8k32:c:s32": 2,
+            "m8n8k32:d:s32": 2,
+            "m16n8k32:a:u4": 2,
+            "m16n8k32:a:s4": 2,
+            "m16n8k32:b:u4": 1,
+            "m16n8k32:b:s4": 1,
+            "m16n8k32:c:s32": 4,
+            "m16n8k32:d:s32": 4,
+            "m16n8k64:a:u4": 4,
+            "m16n8k64:a:s4": 4,
+            "m16n8k64:b:u4": 2,
+            "m16n8k64:b:s4": 2,
+            "m16n8k64:c:s32": 4,
+            "m16n8k64:d:s32": 4,
+            # b1 -> s32 @ m8n8k128(b1)
+            "m8n8k128:a:b1": 1,
+            "m8n8k128:b:b1": 1,
+            "m8n8k128:c:s32": 2,
+            "m8n8k128:d:s32": 2,
+            "m16n8k128:a:b1": 2,
+            "m16n8k128:b:b1": 1,
+            "m16n8k128:c:s32": 4,
+            "m16n8k128:d:s32": 4,
+            "m16n8k256:a:b1": 4,
+            "m16n8k256:b:b1": 2,
+            "m16n8k256:c:s32": 4,
+            "m16n8k256:d:s32": 4,
+            # bf16 -> s32 @ m16n16k16/m8n32k16/m32n8k16
+            "m16n16k16:a:bf16": 4,
+            "m16n16k16:b:bf16": 4,
+            "m8n32k16:a:bf16": 2,
+            "m8n32k16:b:bf16": 8,
+            "m32n8k16:a:bf16": 8,
+            "m32n8k16:b:bf16": 2,
+            "m16n8k16:a:bf16": 4,
+            "m16n8k16:b:bf16": 2,
+            "m16n8k16:c:f32": 4,
+            "m16n8k16:d:f32": 4,
+            "m16n8k8:a:bf16": 2,
+            "m16n8k8:b:bf16": 1,
+            "m16n8k8:c:f32": 4,
+            "m16n8k8:d:f32": 4,
+            "m8n8k4:a:f64": 1,
+            "m8n8k4:b:f64": 1,
+            "m8n8k4:c:f64": 2,
+            "m8n8k4:d:f64": 2,
+            # tf32 -> s32 @ m16n16k8
+            "m16n16k8:a:tf32": 4,
+            "m16n16k8:b:tf32": 4,
+            "m16n8k4:a:tf32": 2,
+            "m16n8k4:b:tf32": 1,
+            "m16n8k4:c:f32": 4,
+            "m16n8k4:d:f32": 4,
+            "m16n8k8:a:tf32": 4,
+            "m16n8k8:b:tf32": 2,
+            "m16n8k8:c:f32": 4,
+            "m16n8k8:d:f32": 4,
+            "m8n8k4:a:f16": 2,
+            "m8n8k4:b:f16": 2,
+            "m16n8k8:a:f16": 2,
+            "m16n8k8:b:f16": 1,
+            "m16n8k8:c:f16": 2,
+            "m16n8k8:d:f16": 2,
+            "m16n8k8:c:f32": 4,
+            "m16n8k8:d:f32": 4,
+            "m16n8k16:a:f16": 4,
+            "m16n8k16:b:f16": 2,
+            "m16n8k16:c:f16": 2,
+            "m16n8k16:d:f16": 2,
+            "m16n8k16:c:f32": 4,
+            "m16n8k16:d:f32": 4,
+            # ldmatrix
+            "m8n8:x1:b16": 1,
+            "m8n8:x2:b16": 2,
+            "m8n8:x4:b16": 4,
+        }.get(
+            "%s:%s:%s" % (geom, frag, ptx_elt_type),
+            {
+                # All other FP shape/fragment/type combinations have the same size
+                "a:f16": 8,
+                "b:f16": 8,
+                "c:f16": 4,
+                "d:f16": 4,
+                "c:f32": 8,
+                "d:f32": 8,
+            }.get("%s:%s" % (frag, ptx_elt_type), None),
+        )
+        assert self.nregs
+
+    def __repr__(self):
+        return "%s:%s:%s%s" % (
+            self.geom,
+            self.frag,
+            self.mma_type,
+            "" if self.nregs == 1 else ("*%d" % self.nregs),
+        )
+
 
 class MMAOp:
-  def __init__(self, a, b, c, d):
-    self.a = a
-    self.b = b
-    self.c = c
-    self.d = d
+    def __init__(self, a, b, c, d):
+        self.a = a
+        self.b = b
+        self.c = c
+        self.d = d
+
+    def __repr__(self):
+        return "{A:%s, B:%s, C:%s, D:%s}" % (self.a, self.b, self.c, self.d)
 
-  def __repr__(self):
-    return ("{A:%s, B:%s, C:%s, D:%s}" % (self.a, self.b, self.c, self.d ))
 
 def make_mma_ops(geoms, types_a, types_b, types_c, types_d):
-  ops = []
-  for geom, type_a, type_c in product( geoms,  types_a, types_c):
-    for type_b, type_d in product(types_b if types_b else [type_a],
-                                  types_d if types_d else [type_c]):
-      ops.append(MMAOp(MMAFrag(geom, "a", type_a),
-                       MMAFrag(geom, "b", type_b),
-                       MMAFrag(geom, "c", type_c),
-                       MMAFrag(geom, "d", type_d)))
-  return ops
+    ops = []
+    for geom, type_a, type_c in product(geoms, types_a, types_c):
+        for type_b, type_d in product(
+            types_b if types_b else [type_a], types_d if types_d else [type_c]
+        ):
+            ops.append(
+                MMAOp(
+                    MMAFrag(geom, "a", type_a),
+                    MMAFrag(geom, "b", type_b),
+                    MMAFrag(geom, "c", type_c),
+                    MMAFrag(geom, "d", type_d),
+                )
+            )
+    return ops
+
 
 def make_ldst_ops(geoms, frags, types):
-  return [MMAFrag(geom, frag, ptx_type) for (geom, frag, ptx_type)
-          in product(geoms, frags, types)]
+    return [
+        MMAFrag(geom, frag, ptx_type)
+        for (geom, frag, ptx_type) in product(geoms, frags, types)
+    ]
+
 
 def make_ldmatrix_ops(geoms, frags, types):
-  return [MMAFrag(geom, frag, ptx_type) for (geom, frag, ptx_type)
-          in product(geoms, frags, types)]
+    return [
+        MMAFrag(geom, frag, ptx_type)
+        for (geom, frag, ptx_type) in product(geoms, frags, types)
+    ]
+
 
 def get_wmma_ops():
-  return (make_mma_ops(["m16n16k8"],
-                       ["tf32"], [], ["f32"], []) +
-          make_mma_ops(["m16n16k16", "m32n8k16", "m8n32k16"],
-                       ["bf16"], [], ["f32"], []) +
-          make_mma_ops(["m8n8k4"],
-                       ["f64"], [], ["f64"], []) +
-          make_mma_ops(["m16n16k16", "m32n8k16", "m8n32k16"],
-                       ["f16"], [], ["f16", "f32"], ["f16", "f32"]) +
-          make_mma_ops(["m16n16k16", "m32n8k16", "m8n32k16"],
-                       ["s8", "u8"], [], ["s32"], []) +
-          make_mma_ops(["m8n8k32"],
-                       ["s4", "u4"], [], ["s32"], []) +
-          make_mma_ops(["m8n8k128"],
-                       ["b1"], [], ["s32"], []))
+    return (
+        make_mma_ops(["m16n16k8"], ["tf32"], [], ["f32"], [])
+        + make_mma_ops(["m16n16k16", "m32n8k16", "m8n32k16"], ["bf16"], [], ["f32"], [])
+        + make_mma_ops(["m8n8k4"], ["f64"], [], ["f64"], [])
+        + make_mma_ops(
+            ["m16n16k16", "m32n8k16", "m8n32k16"],
+            ["f16"],
+            [],
+            ["f16", "f32"],
+            ["f16", "f32"],
+        )
+        + make_mma_ops(
+            ["m16n16k16", "m32n8k16", "m8n32k16"], ["s8", "u8"], [], ["s32"], []
+        )
+        + make_mma_ops(["m8n8k32"], ["s4", "u4"], [], ["s32"], [])
+        + make_mma_ops(["m8n8k128"], ["b1"], [], ["s32"], [])
+    )
+
 
 def get_mma_ops():
-  return (make_mma_ops(["m8n8k4"],
-                       ["f64"], [], ["f64"], []) +
-          make_mma_ops(["m16n8k4", "m16n8k8"],
-                       ["tf32"], [], ["f32"], []) +
-          make_mma_ops(["m16n8k16", "m16n8k8"],
-                       ["bf16"], [], ["f32"], []) +
-          make_mma_ops(["m8n8k4", "m16n8k8", "m16n8k16"],
-                       ["f16"], [], ["f16", "f32"], ["f16", "f32"]) +
-          make_mma_ops(["m8n8k16", "m16n8k16", "m16n8k32"],
-                       ["s8", "u8"], ["s8", "u8"], ["s32"], []) +
-          make_mma_ops(["m8n8k32", "m16n8k32", "m16n8k64"],
-                       ["s4", "u4"], ["s4", "u4"], ["s32"], []) +
-          make_mma_ops(["m8n8k128", "m16n8k128", "m16n8k256"],
-                       ["b1"], [], ["s32"], []))
+    return (
+        make_mma_ops(["m8n8k4"], ["f64"], [], ["f64"], [])
+        + make_mma_ops(["m16n8k4", "m16n8k8"], ["tf32"], [], ["f32"], [])
+        + make_mma_ops(["m16n8k16", "m16n8k8"], ["bf16"], [], ["f32"], [])
+        + make_mma_ops(
+            ["m8n8k4", "m16n8k8", "m16n8k16"],
+            ["f16"],
+            [],
+            ["f16", "f32"],
+            ["f16", "f32"],
+        )
+        + make_mma_ops(
+            ["m8n8k16", "m16n8k16", "m16n8k32"], ["s8", "u8"], ["s8", "u8"], ["s32"], []
+        )
+        + make_mma_ops(
+            ["m8n8k32", "m16n8k32", "m16n8k64"], ["s4", "u4"], ["s4", "u4"], ["s32"], []
+        )
+        + make_mma_ops(["m8n8k128", "m16n8k128", "m16n8k256"], ["b1"], [], ["s32"], [])
+    )
+
 
 def get_ldst_ops(kind):
-  ldst_ops = (make_ldst_ops(["m16n16k16", "m32n8k16", "m8n32k16"],
-                            ["a", "b"], ["f16", "u8", "s8", "bf16"]) +
-              make_ldst_ops(["m16n16k16", "m32n8k16", "m8n32k16"],
-                            ["c", "d"], ["f16", "f32", "s32"]) +
-              make_ldst_ops(["m8n8k32"], ["a", "b"], ["s4","u4"]) +
-              make_ldst_ops(["m8n8k128"], ["a", "b"], ["b1"]) +
-              make_ldst_ops(["m8n8k32", "m8n8k128"],  ["c", "d"], ["s32"]) +
-              make_ldst_ops(["m8n8k4"], ["a", "b", "c", "d"], ["f64"]) +
-              make_ldst_ops(["m16n16k8"], ["a", "b"], ["tf32"]) +
-              make_ldst_ops(["m16n16k8"], ["c", "d"], ["f32"]))
-  return [ x for x in ldst_ops if (x.frag == "d") == (kind == "store")]
+    ldst_ops = (
+        make_ldst_ops(
+            ["m16n16k16", "m32n8k16", "m8n32k16"],
+            ["a", "b"],
+            ["f16", "u8", "s8", "bf16"],
+        )
+        + make_ldst_ops(
+            ["m16n16k16", "m32n8k16", "m8n32k16"], ["c", "d"], ["f16", "f32", "s32"]
+        )
+        + make_ldst_ops(["m8n8k32"], ["a", "b"], ["s4", "u4"])
+        + make_ldst_ops(["m8n8k128"], ["a", "b"], ["b1"])
+        + make_ldst_ops(["m8n8k32", "m8n8k128"], ["c", "d"], ["s32"])
+        + make_ldst_ops(["m8n8k4"], ["a", "b", "c", "d"], ["f64"])
+        + make_ldst_ops(["m16n16k8"], ["a", "b"], ["tf32"])
+        + make_ldst_ops(["m16n16k8"], ["c", "d"], ["f32"])
+    )
+    return [x for x in ldst_ops if (x.frag == "d") == (kind == "store")]
+
 
 def get_ldmatrix_ops():
-  return make_ldmatrix_ops(["m8n8"], ["x1", "x2", "x4"], ["b16"])
+    return make_ldmatrix_ops(["m8n8"], ["x1", "x2", "x4"], ["b16"])
+
 
 def is_wmma_geom_supported(geom):
-  # geometries for FP and ints.
-  if geom in ["m8n32k16", "m32n8k16"]:
-    return ptx_version >= 61
-  # geometries for sub-ints.
-  if geom in ["m8n8k32", "m8n8k128"]:
-    return ptx_version >= 63 and gpu_arch >= 75
-  if geom == "m16n16k16":
-    return ptx_version >= 60
-  if geom == "m16n8k8":
-    return ptx_version >= 65
-  if geom in ["m16n16k8", "m8n8k4"]:
-    return ptx_version >= 70
-  assert(False) # Unexpected geometry.
+    # geometries for FP and ints.
+    if geom in ["m8n32k16", "m32n8k16"]:
+        return ptx_version >= 61
+    # geometries for sub-ints.
+    if geom in ["m8n8k32", "m8n8k128"]:
+        return ptx_version >= 63 and gpu_arch >= 75
+    if geom == "m16n16k16":
+        return ptx_version >= 60
+    if geom == "m16n8k8":
+        return ptx_version >= 65
+    if geom in ["m16n16k8", "m8n8k4"]:
+        return ptx_version >= 70
+    assert False  # Unexpected geometry.
+
 
 def is_mma_geom_supported(geom):
-  # geometries for FP and ints.
-  if geom == "m8n8k4":
-    return ptx_version >= 64
-  if geom in ["m16n8k8", "m8n8k16", "m8n8k32"]:
-    return ptx_version >= 65
-  if geom in ["m16n8k16", "m16n8k4", "m16n8k32", "m16n8k64", "m8n8k128",
-              "m16n8k128", "m16n8k256"]:
-    return ptx_version >= 70
-  assert(False) # Unexpected geometry.
+    # geometries for FP and ints.
+    if geom == "m8n8k4":
+        return ptx_version >= 64
+    if geom in ["m16n8k8", "m8n8k16", "m8n8k32"]:
+        return ptx_version >= 65
+    if geom in [
+        "m16n8k16",
+        "m16n8k4",
+        "m16n8k32",
+        "m16n8k64",
+        "m8n8k128",
+        "m16n8k128",
+        "m16n8k256",
+    ]:
+        return ptx_version >= 70
+    assert False  # Unexpected geometry.
+
 
 def is_ldmatrix_geom_supported(geom):
-  if geom in ["m8n8"]:
-    return ptx_version >= 65 and gpu_arch >= 75
-  assert(False) # Unexpected geometry.
+    if geom in ["m8n8"]:
+        return ptx_version >= 65 and gpu_arch >= 75
+    assert False  # Unexpected geometry.
+
 
 def is_type_supported(ptx_type):
-  if ptx_type in ["s8", "u8", "s32"]:
-    return ptx_version >= 63 and gpu_arch >= 72
-  if ptx_type in ["s4", "u4", "b1"]:
-    return ptx_version >= 63 and gpu_arch >= 75
-  if ptx_type == "b16":
-    return ptx_version >= 65 and gpu_arch >= 75
-  if ptx_type in ["bf16", "tf32", "f64"]:
-    return ptx_version >= 70
-  return ptx_version >= 60 and gpu_arch >= 70
+    if ptx_type in ["s8", "u8", "s32"]:
+        return ptx_version >= 63 and gpu_arch >= 72
+    if ptx_type in ["s4", "u4", "b1"]:
+        return ptx_version >= 63 and gpu_arch >= 75
+    if ptx_type == "b16":
+        return ptx_version >= 65 and gpu_arch >= 75
+    if ptx_type in ["bf16", "tf32", "f64"]:
+        return ptx_version >= 70
+    return ptx_version >= 60 and gpu_arch >= 70
+
 
 def is_wmma_variant_supported(op, layout_a, layout_b, rnd, satf):
-  if not (is_type_supported(op.a.mma_type.ptx_type)
-          and is_wmma_geom_supported(op.a.geom)):
-    return False
-
-  # rnd is only supported for FP64 WMMA
-  if rnd and op.a.mma_type.ptx_type != "f64":
-    return False
-
-  if satf:
-    # satfinite for floating points was removed in PTX 6.5
-    if op.a.mma_type.ptx_type == "f16" and ptx_version >= 65:
-      return False
-    if not op.a.mma_type.ptx_type in ["f16", "s8", "u8", "s4", "u4"]:
-      return False
-
-  # sub-integer require row/col layout.
-  if op.a.mma_type.ptx_type in ["s4", "u4", "b1"]:
-    return layout_a == "row" and layout_b == "col"
-  return True
+    if not (
+        is_type_supported(op.a.mma_type.ptx_type) and is_wmma_geom_supported(op.a.geom)
+    ):
+        return False
+
+    # rnd is only supported for FP64 WMMA
+    if rnd and op.a.mma_type.ptx_type != "f64":
+        return False
+
+    if satf:
+        # satfinite for floating points was removed in PTX 6.5
+        if op.a.mma_type.ptx_type == "f16" and ptx_version >= 65:
+            return False
+        if not op.a.mma_type.ptx_type in ["f16", "s8", "u8", "s4", "u4"]:
+            return False
+
+    # sub-integer require row/col layout.
+    if op.a.mma_type.ptx_type in ["s4", "u4", "b1"]:
+        return layout_a == "row" and layout_b == "col"
+    return True
+
 
 def is_mma_variant_supported(op, layout_a, layout_b, satf):
-  if not (is_type_supported(op.a.mma_type.ptx_type)
-          and is_mma_geom_supported(op.a.geom)):
-    return False
-
-  if satf and not op.a.mma_type.ptx_type in ["s8", "u8", "s4", "u4"]:
-    return False
-
-  # If the type of C is f32 then so must the type of D
-  if (op.a.geom == "m8n8k4" and op.c.mma_type.ptx_type == "f32"
-      and op.d.mma_type.ptx_type != "f32"):
-    return False
-
-  # A and B type must be the same. C and D type must be the same
-  if (op.a.geom == "m16n8k8"
-        and (op.a.mma_type.ptx_type != op.b.mma_type.ptx_type
-             or op.c.mma_type.ptx_type != op.d.mma_type.ptx_type)):
-      return False
-
-  # C and D type must be the same
-  if (op.a.geom == "m16n8k16"
-      and op.c.mma_type.ptx_type != op.d.mma_type.ptx_type):
-      return False
-
-  # Require row/col layout for all MMA except m8n8k4 on FP16
-  if not (op.a.geom == "m8n8k4" and op.a.mma_type.ptx_type == "f16"):
-    return layout_a == "row" and layout_b == "col"
-  return True
+    if not (
+        is_type_supported(op.a.mma_type.ptx_type) and is_mma_geom_supported(op.a.geom)
+    ):
+        return False
+
+    if satf and not op.a.mma_type.ptx_type in ["s8", "u8", "s4", "u4"]:
+        return False
+
+    # If the type of C is f32 then so must the type of D
+    if (
+        op.a.geom == "m8n8k4"
+        and op.c.mma_type.ptx_type == "f32"
+        and op.d.mma_type.ptx_type != "f32"
+    ):
+        return False
+
+    # A and B type must be the same. C and D type must be the same
+    if op.a.geom == "m16n8k8" and (
+        op.a.mma_type.ptx_type != op.b.mma_type.ptx_type
+        or op.c.mma_type.ptx_type != op.d.mma_type.ptx_type
+    ):
+        return False
+
+    # C and D type must be the same
+    if op.a.geom == "m16n8k16" and op.c.mma_type.ptx_type != op.d.mma_type.ptx_type:
+        return False
+
+    # Require row/col layout for all MMA except m8n8k4 on FP16
+    if not (op.a.geom == "m8n8k4" and op.a.mma_type.ptx_type == "f16"):
+        return layout_a == "row" and layout_b == "col"
+    return True
+
 
 def is_ldst_variant_supported(frag, layout):
-  if not (is_type_supported(frag.mma_type.ptx_type)
-          and is_wmma_geom_supported(frag.geom)):
-    return False
-  if frag.mma_type.ptx_type in ["s4", "u4", "b1"]:
-    # sub-integer require sm_75 and ptx63, row/col layout for a/b.
-    return ((frag.frag == "a" and layout == "row")
+    if not (
+        is_type_supported(frag.mma_type.ptx_type) and is_wmma_geom_supported(frag.geom)
+    ):
+        return False
+    if frag.mma_type.ptx_type in ["s4", "u4", "b1"]:
+        # sub-integer require sm_75 and ptx63, row/col layout for a/b.
+        return (
+            (frag.frag == "a" and layout == "row")
             or (frag.frag == "b" and layout == "col")
-            or frag.frag in ["c", "d"])
-  return True
+            or frag.frag in ["c", "d"]
+        )
+    return True
+
 
 def is_ldmatrix_variant_supported(frag):
-  if not (is_type_supported(frag.mma_type.ptx_type)
-          and is_ldmatrix_geom_supported(frag.geom)):
-    return False
-  return frag.frag in ["x1", "x2", "x4"]
+    if not (
+        is_type_supported(frag.mma_type.ptx_type)
+        and is_ldmatrix_geom_supported(frag.geom)
+    ):
+        return False
+    return frag.frag in ["x1", "x2", "x4"]
+
 
 def make_wmma_slice_ty(frag):
-  return [frag.mma_type.llvm_type] * frag.nregs
+    return [frag.mma_type.llvm_type] * frag.nregs
+
 
 def make_wmma_ld_ret_ty(frag):
-  results = make_wmma_slice_ty(frag)
-  if len(results) == 1:
-    return "%s" % results[0]
-  return "{%s}" % ", ".join(results)
+    results = make_wmma_slice_ty(frag)
+    if len(results) == 1:
+        return "%s" % results[0]
+    return "{%s}" % ", ".join(results)
+
 
 # returns address space
 def get_aspace(space):
-  space_map = {
-      ".global" : 1,
-      ".shared" : 3,
-      ".const"  : 4,
-      ".local"  : 5,
-      ".param"  : 101,
-      ""        : 0,
-      ".generic": 0
-  }
-  return space_map[space];
+    space_map = {
+        ".global": 1,
+        ".shared": 3,
+        ".const": 4,
+        ".local": 5,
+        ".param": 101,
+        "": 0,
+        ".generic": 0,
+    }
+    return space_map[space]
+
 
 def get_pspace(space):
-  return "p%di8" % get_aspace(space);
+    return "p%di8" % get_aspace(space)
+
 
 def check_pattern(frag):
-   return "{{%s}}" % ", *".join([frag.mma_type.ptx_reg_pattern] * frag.nregs)
+    return "{{%s}}" % ", *".join([frag.mma_type.ptx_reg_pattern] * frag.nregs)
+
 
 def gen_wmma_load_tests():
-  load_template = """
+    load_template = """
 declare ${ret_ty} @${intrinsic}(i8 ${as}* %src ${extra_args});
 
 ; CHECK-LABEL: .func {{.*}}test_${function}(
@@ -518,59 +571,68 @@ def gen_wmma_load_tests():
   ret ${ret_ty} %v0;
 }
 """
-  intrinsic_template = "llvm.nvvm.wmma.${geom}.load.${abc}.${layout}${stride}.${itype}.${pspace}"
-  instruction_template = "wmma.load.${abc}.sync${aligned}.${layout}.${geom}${space}.${itype}"
-
-  generated_items = []
-
-  for frag, layout, space, stride in product(
-      get_ldst_ops("load"),
-      ["row","col"],
-      ["",".shared",".global"],
-      ["", ".stride"],
-      ):
-    if not is_ldst_variant_supported(frag, layout):
-      continue
-
-    params = {
-        "abc" : frag.frag,
-        "aligned" : ".aligned" if ptx_version >= 63 else "",
-        "layout" : layout,
-        "space" : space,
-        "stride" : stride,
-        "itype" : frag.mma_type.ptx_type,
-        "pspace" : get_pspace(space),
-        "as"     : "addrspace(%d)" % get_aspace(space),
-        "geom"   : frag.geom,
-    }
-
-    test_params = params
-    test_params["intrinsic"] = Template(intrinsic_template).substitute(params)
-    test_params["function"] = test_params["intrinsic"].replace(".","_")
-    test_params["instruction"] = Template(instruction_template).substitute(params)
-    test_params["ret_ty"] = make_wmma_ld_ret_ty(frag)
-    test_params["check_result"] = check_pattern(frag)
-
-    if stride:
-      test_params["extra_args"] = ", i32 %stride";
-      test_params["stride_pattern"] = ", %r{{[0-9]+}}"
-    else:
-      test_params["extra_args"] = ""
-      test_params["stride_pattern"] = ""
-
-    print(Template(load_template).substitute(test_params))
+    intrinsic_template = (
+        "llvm.nvvm.wmma.${geom}.load.${abc}.${layout}${stride}.${itype}.${pspace}"
+    )
+    instruction_template = (
+        "wmma.load.${abc}.sync${aligned}.${layout}.${geom}${space}.${itype}"
+    )
+
+    generated_items = []
+
+    for frag, layout, space, stride in product(
+        get_ldst_ops("load"),
+        ["row", "col"],
+        ["", ".shared", ".global"],
+        ["", ".stride"],
+    ):
+        if not is_ldst_variant_supported(frag, layout):
+            continue
+
+        params = {
+            "abc": frag.frag,
+            "aligned": ".aligned" if ptx_version >= 63 else "",
+            "layout": layout,
+            "space": space,
+            "stride": stride,
+            "itype": frag.mma_type.ptx_type,
+            "pspace": get_pspace(space),
+            "as": "addrspace(%d)" % get_aspace(space),
+            "geom": frag.geom,
+        }
+
+        test_params = params
+        test_params["intrinsic"] = Template(intrinsic_template).substitute(params)
+        test_params["function"] = test_params["intrinsic"].replace(".", "_")
+        test_params["instruction"] = Template(instruction_template).substitute(params)
+        test_params["ret_ty"] = make_wmma_ld_ret_ty(frag)
+        test_params["check_result"] = check_pattern(frag)
+
+        if stride:
+            test_params["extra_args"] = ", i32 %stride"
+            test_params["stride_pattern"] = ", %r{{[0-9]+}}"
+        else:
+            test_params["extra_args"] = ""
+            test_params["stride_pattern"] = ""
+
+        print(Template(load_template).substitute(test_params))
+
+        generated_items.append((test_params["intrinsic"], test_params["instruction"]))
+
+    return generated_items
 
-    generated_items.append((test_params["intrinsic"],
-                            test_params["instruction"]))
-
-  return generated_items
 
 def make_wmma_slice_args(frag):
-  return ", ".join(["%s %%%s%d" % (t, frag.frag, i) for i,t
-                  in enumerate(make_wmma_slice_ty(frag))])
+    return ", ".join(
+        [
+            "%s %%%s%d" % (t, frag.frag, i)
+            for i, t in enumerate(make_wmma_slice_ty(frag))
+        ]
+    )
+
 
 def gen_wmma_store_tests():
-  store_template = """
+    store_template = """
 declare void @${intrinsic}(i8 ${as}* %src, ${args}${extra_args});
 
 ; CHECK-LABEL: .func {{.*}}test_${function}(
@@ -592,54 +654,59 @@ def gen_wmma_store_tests():
   ret void
 }
 """
-  intrinsic_template = "llvm.nvvm.wmma.${geom}.store.${abc}.${layout}${stride}.${itype}.${pspace}"
-  instruction_template = "wmma.store.${abc}.sync${aligned}.${layout}.${geom}${space}.${itype}"
-
-  generated_items = []
-
-  for frag, layout, space, stride in product(
-      get_ldst_ops("store"),
-      ["row","col"],
-      ["",".shared",".global"],
-      ["", ".stride"]):
-
-    if not is_ldst_variant_supported(frag, layout):
-      continue
-
-    params = {
-        "abc" : frag.frag,
-        "aligned" : ".aligned" if ptx_version >= 63 else "",
-        "layout" : layout,
-        "space" : space,
-        "stride" : stride,
-        "itype" : frag.mma_type.ptx_type,
-        "pspace" : get_pspace(space),
-        "as"     : "addrspace(%d)" % get_aspace(space),
-        "geom"   : frag.geom,
-    }
-
-    test_params = params
-    test_params["intrinsic"] = Template(intrinsic_template).substitute(params)
-    test_params["function"] = test_params["intrinsic"].replace(".","_")
-    test_params["instruction"] = Template(instruction_template).substitute(params)
-    test_params["ret_ty"] = make_wmma_ld_ret_ty(frag)
-    test_params["check_args"] = check_pattern(frag)
-    if stride:
-      test_params["extra_args"] = ", i32 %stride";
-      test_params["stride_pattern"] = ", %r{{[0-9]+}};"
-    else:
-      test_params["extra_args"] = ""
-      test_params["stride_pattern"] = ";"
-    test_params["args"] = make_wmma_slice_args(frag);
-
-    print(Template(store_template).substitute(test_params))
-    generated_items.append((test_params["intrinsic"],
-                            test_params["instruction"]))
+    intrinsic_template = (
+        "llvm.nvvm.wmma.${geom}.store.${abc}.${layout}${stride}.${itype}.${pspace}"
+    )
+    instruction_template = (
+        "wmma.store.${abc}.sync${aligned}.${layout}.${geom}${space}.${itype}"
+    )
+
+    generated_items = []
+
+    for frag, layout, space, stride in product(
+        get_ldst_ops("store"),
+        ["row", "col"],
+        ["", ".shared", ".global"],
+        ["", ".stride"],
+    ):
+
+        if not is_ldst_variant_supported(frag, layout):
+            continue
+
+        params = {
+            "abc": frag.frag,
+            "aligned": ".aligned" if ptx_version >= 63 else "",
+            "layout": layout,
+            "space": space,
+            "stride": stride,
+            "itype": frag.mma_type.ptx_type,
+            "pspace": get_pspace(space),
+            "as": "addrspace(%d)" % get_aspace(space),
+            "geom": frag.geom,
+        }
+
+        test_params = params
+        test_params["intrinsic"] = Template(intrinsic_template).substitute(params)
+        test_params["function"] = test_params["intrinsic"].replace(".", "_")
+        test_params["instruction"] = Template(instruction_template).substitute(params)
+        test_params["ret_ty"] = make_wmma_ld_ret_ty(frag)
+        test_params["check_args"] = check_pattern(frag)
+        if stride:
+            test_params["extra_args"] = ", i32 %stride"
+            test_params["stride_pattern"] = ", %r{{[0-9]+}};"
+        else:
+            test_params["extra_args"] = ""
+            test_params["stride_pattern"] = ";"
+        test_params["args"] = make_wmma_slice_args(frag)
+
+        print(Template(store_template).substitute(test_params))
+        generated_items.append((test_params["intrinsic"], test_params["instruction"]))
+
+    return generated_items
 
-  return generated_items
 
 def gen_ldmatrix_tests():
-  ldmatrix_template = """
+    ldmatrix_template = """
 declare ${ret_ty} @${intrinsic}(i8 ${as}* %src);
 
 ; CHECK-LABEL: .func {{.*}}test_${function}(
@@ -661,76 +728,84 @@ def gen_ldmatrix_tests():
   ret ${ret_ty} %v0;
 }
 """
-  intrinsic_template = "llvm.nvvm.ldmatrix.sync.aligned.${geom}.${frag}${trans}.${itype}.${pspace}"
-  instruction_template = "ldmatrix.sync.aligned.${geom}.${frag}${trans}${space}.${itype}"
-
-  generated_items = []
-
-  for frag, space, trans in product(
-      get_ldmatrix_ops(),
-      ["",".shared"],
-      ["",".trans"],
-      ):
-    if not is_ldmatrix_variant_supported(frag):
-      continue
-
-    params = {
-        "frag" : frag.frag,
-        "space" : space,
-        "trans" : trans,
-        "itype" : frag.mma_type.ptx_type,
-        "pspace" : get_pspace(space),
-        "as"     : "addrspace(%d)" % get_aspace(space),
-        "geom"   : frag.geom,
-    }
-
-    test_params = params
-    test_params["intrinsic"] = Template(intrinsic_template).substitute(params)
-    test_params["function"] = test_params["intrinsic"].replace(".","_")
-    test_params["instruction"] = Template(instruction_template).substitute(params)
-    test_params["ret_ty"] = make_wmma_ld_ret_ty(frag)
-    test_params["check_result"] = check_pattern(frag)
+    intrinsic_template = (
+        "llvm.nvvm.ldmatrix.sync.aligned.${geom}.${frag}${trans}.${itype}.${pspace}"
+    )
+    instruction_template = (
+        "ldmatrix.sync.aligned.${geom}.${frag}${trans}${space}.${itype}"
+    )
+
+    generated_items = []
+
+    for frag, space, trans in product(
+        get_ldmatrix_ops(),
+        ["", ".shared"],
+        ["", ".trans"],
+    ):
+        if not is_ldmatrix_variant_supported(frag):
+            continue
+
+        params = {
+            "frag": frag.frag,
+            "space": space,
+            "trans": trans,
+            "itype": frag.mma_type.ptx_type,
+            "pspace": get_pspace(space),
+            "as": "addrspace(%d)" % get_aspace(space),
+            "geom": frag.geom,
+        }
+
+        test_params = params
+        test_params["intrinsic"] = Template(intrinsic_template).substitute(params)
+        test_params["function"] = test_params["intrinsic"].replace(".", "_")
+        test_params["instruction"] = Template(instruction_template).substitute(params)
+        test_params["ret_ty"] = make_wmma_ld_ret_ty(frag)
+        test_params["check_result"] = check_pattern(frag)
+
+        print(Template(ldmatrix_template).substitute(test_params))
+
+        generated_items.append((test_params["intrinsic"], test_params["instruction"]))
+
+    return generated_items
 
-    print(Template(ldmatrix_template).substitute(test_params))
-
-    generated_items.append((test_params["intrinsic"],
-                            test_params["instruction"]))
-
-  return generated_items
 
 def mma_signature(op):
-  if op.a.mma_type.ptx_type == "f16":
-    # FP16 ops identified by accumulator & result type.
-    return "%s.%s" % (op.d.mma_type.ptx_type, op.c.mma_type.ptx_type)
-  elif op.a.mma_type.ptx_type != op.b.mma_type.ptx_type:
-    # other ops are identified by input types.
-    return "%s.%s" % (op.a.mma_type.ptx_type, op.b.mma_type.ptx_type)
-  else:
-    # if input types are the same, it only appears once.
-    return op.a.mma_type.ptx_type
+    if op.a.mma_type.ptx_type == "f16":
+        # FP16 ops identified by accumulator & result type.
+        return "%s.%s" % (op.d.mma_type.ptx_type, op.c.mma_type.ptx_type)
+    elif op.a.mma_type.ptx_type != op.b.mma_type.ptx_type:
+        # other ops are identified by input types.
+        return "%s.%s" % (op.a.mma_type.ptx_type, op.b.mma_type.ptx_type)
+    else:
+        # if input types are the same, it only appears once.
+        return op.a.mma_type.ptx_type
+
 
 def mma_ptx_signature(op):
-  # Encode all four types as D.A.B.C
-  return ".".join(x.mma_type.ptx_type for x in (op.d, op.a, op.b, op.c))
+    # Encode all four types as D.A.B.C
+    return ".".join(x.mma_type.ptx_type for x in (op.d, op.a, op.b, op.c))
+
 
 def wmma_signature(op):
-  if op.a.mma_type.ptx_type == "f16":
-    # FP16 ops identified by accumulator & result type.
-    return "%s.%s" % (op.d.mma_type.ptx_type, op.c.mma_type.ptx_type)
-  else:
-    # other ops are identified by input type.
-    return op.a.mma_type.ptx_type
+    if op.a.mma_type.ptx_type == "f16":
+        # FP16 ops identified by accumulator & result type.
+        return "%s.%s" % (op.d.mma_type.ptx_type, op.c.mma_type.ptx_type)
+    else:
+        # other ops are identified by input type.
+        return op.a.mma_type.ptx_type
+
 
 def wmma_ptx_signature(op):
-  if op.a.mma_type.ptx_type == "f16":
-    # FP16 instructions use D.C
-    return "%s.%s" % (op.d.mma_type.ptx_type, op.c.mma_type.ptx_type)
-  else:
-    # other instructions encode all four types as D.A.B.C
-    return ".".join(x.mma_type.ptx_type for x in (op.d, op.a, op.b, op.c))
+    if op.a.mma_type.ptx_type == "f16":
+        # FP16 instructions use D.C
+        return "%s.%s" % (op.d.mma_type.ptx_type, op.c.mma_type.ptx_type)
+    else:
+        # other instructions encode all four types as D.A.B.C
+        return ".".join(x.mma_type.ptx_type for x in (op.d, op.a, op.b, op.c))
+
 
 def common_mma_test_gen(params, op, intrinsic_template, instruction_template):
-  mma_template = """
+    mma_template = """
 declare ${ret_ty} @${intrinsic}(
         ${args});
 
@@ -748,109 +823,120 @@ def common_mma_test_gen(params, op, intrinsic_template, instruction_template):
 }
 """
 
-  test_params = params
-  test_params["intrinsic"] = Template(intrinsic_template).substitute(params)
-  test_params["function"] = test_params["intrinsic"].replace(".", "_")
-  test_params["instruction"] = Template(instruction_template).substitute(params)
-  test_params["ret_ty"] = make_wmma_ld_ret_ty(op.d)
-  test_params["check_a"] = check_pattern(op.a)
-  test_params["check_b"] = check_pattern(op.b)
-  test_params["check_c"] = check_pattern(op.c)
-  test_params["check_d"] = check_pattern(op.d)
-  args = ",\n        ".join(make_wmma_slice_args(frag)
-                            for frag in (op.a, op.b, op.c))
-  test_params["args"] = args
-  print(Template(mma_template).substitute(test_params))
-  return (test_params["intrinsic"], test_params["instruction"])
+    test_params = params
+    test_params["intrinsic"] = Template(intrinsic_template).substitute(params)
+    test_params["function"] = test_params["intrinsic"].replace(".", "_")
+    test_params["instruction"] = Template(instruction_template).substitute(params)
+    test_params["ret_ty"] = make_wmma_ld_ret_ty(op.d)
+    test_params["check_a"] = check_pattern(op.a)
+    test_params["check_b"] = check_pattern(op.b)
+    test_params["check_c"] = check_pattern(op.c)
+    test_params["check_d"] = check_pattern(op.d)
+    args = ",\n        ".join(make_wmma_slice_args(frag) for frag in (op.a, op.b, op.c))
+    test_params["args"] = args
+    print(Template(mma_template).substitute(test_params))
+    return (test_params["intrinsic"], test_params["instruction"])
+
 
 def get_b1_ops(ptx_type):
-  if ptx_type != "b1":
-    return [""]
-  if ptx_version >= 71:
-    return [".xor.popc", ".and.popc"]
-  return [".xor.popc"]
+    if ptx_type != "b1":
+        return [""]
+    if ptx_version >= 71:
+        return [".xor.popc", ".and.popc"]
+    return [".xor.popc"]
+
 
 def gen_wmma_mma_tests():
-  wmma_intrinsic_template = "llvm.nvvm.wmma.${geom}.mma${b1op}.${alayout}.${blayout}${rnd}.${intrinsic_signature}${satf}"
-  wmma_instruction_template = "wmma.mma${b1op}.sync${aligned}.${alayout}.${blayout}.${geom}${rnd}.${ptx_signature}${satf}"
-
-  generated_items=[]
-
-  for op, alayout, blayout, rnd, satf in product(
-      get_wmma_ops(),
-      ["row","col"],
-      ["row","col"],
-      [".rn", ".rz", ".rm", ".rp", ""],
-      [".satfinite", ""]):
-
-    if not is_wmma_variant_supported(op, alayout, blayout, rnd, satf):
-      continue
-
-    for b1op in get_b1_ops(op.a.mma_type.ptx_type):
-      params = {
-          "aligned" : ".aligned" if ptx_version >= 63 else "",
-          "alayout" : alayout,
-          "blayout" : blayout,
-          "intrinsic_signature" : wmma_signature(op),
-          "ptx_signature" : wmma_ptx_signature(op),
-          "satf"  : satf,
-          "rnd"   : rnd,
-          "geom"  : op.a.geom,
-          "b1op"  : b1op
-      }
-
-      intrinsic_template = wmma_intrinsic_template
-      instruction_template = wmma_instruction_template
-
-      generated_items.append(common_mma_test_gen(params, op,
-                                                 intrinsic_template, instruction_template))
-
-  return generated_items
+    wmma_intrinsic_template = "llvm.nvvm.wmma.${geom}.mma${b1op}.${alayout}.${blayout}${rnd}.${intrinsic_signature}${satf}"
+    wmma_instruction_template = "wmma.mma${b1op}.sync${aligned}.${alayout}.${blayout}.${geom}${rnd}.${ptx_signature}${satf}"
+
+    generated_items = []
+
+    for op, alayout, blayout, rnd, satf in product(
+        get_wmma_ops(),
+        ["row", "col"],
+        ["row", "col"],
+        [".rn", ".rz", ".rm", ".rp", ""],
+        [".satfinite", ""],
+    ):
+
+        if not is_wmma_variant_supported(op, alayout, blayout, rnd, satf):
+            continue
+
+        for b1op in get_b1_ops(op.a.mma_type.ptx_type):
+            params = {
+                "aligned": ".aligned" if ptx_version >= 63 else "",
+                "alayout": alayout,
+                "blayout": blayout,
+                "intrinsic_signature": wmma_signature(op),
+                "ptx_signature": wmma_ptx_signature(op),
+                "satf": satf,
+                "rnd": rnd,
+                "geom": op.a.geom,
+                "b1op": b1op,
+            }
+
+            intrinsic_template = wmma_intrinsic_template
+            instruction_template = wmma_instruction_template
+
+            generated_items.append(
+                common_mma_test_gen(
+                    params, op, intrinsic_template, instruction_template
+                )
+            )
+
+    return generated_items
+
 
 def gen_mma_tests():
-  mma_intrinsic_template = "llvm.nvvm.mma${b1op}.${geom}.${alayout}.${blayout}${satf}.${intrinsic_signature}"
-  mma_instruction_template = "mma.sync${aligned}.${geom}.${alayout}.${blayout}${satf}.${ptx_signature}${b1op}"
+    mma_intrinsic_template = "llvm.nvvm.mma${b1op}.${geom}.${alayout}.${blayout}${satf}.${intrinsic_signature}"
+    mma_instruction_template = "mma.sync${aligned}.${geom}.${alayout}.${blayout}${satf}.${ptx_signature}${b1op}"
 
-  generated_items=[]
+    generated_items = []
 
-  for op, alayout, blayout, satf in product(
-      get_mma_ops(),
-      ["row","col"],
-      ["row","col"],
-      [".satfinite", ""]):
+    for op, alayout, blayout, satf in product(
+        get_mma_ops(), ["row", "col"], ["row", "col"], [".satfinite", ""]
+    ):
 
-    if not is_mma_variant_supported(op, alayout, blayout, satf):
-      continue
+        if not is_mma_variant_supported(op, alayout, blayout, satf):
+            continue
 
-    for b1op in get_b1_ops(op.a.mma_type.ptx_type):
-      params = {
-          "aligned" : ".aligned" if ptx_version >= 63 else "",
-          "alayout" : alayout,
-          "blayout" : blayout,
-          "intrinsic_signature" : mma_signature(op),
-          "ptx_signature" : mma_ptx_signature(op),
-          "satf"  : satf,
-          "geom"  : op.a.geom,
-          "b1op"  : b1op
-      }
+        for b1op in get_b1_ops(op.a.mma_type.ptx_type):
+            params = {
+                "aligned": ".aligned" if ptx_version >= 63 else "",
+                "alayout": alayout,
+                "blayout": blayout,
+                "intrinsic_signature": mma_signature(op),
+                "ptx_signature": mma_ptx_signature(op),
+                "satf": satf,
+                "geom": op.a.geom,
+                "b1op": b1op,
+            }
 
-      intrinsic_template = mma_intrinsic_template
-      instruction_template = mma_instruction_template
+            intrinsic_template = mma_intrinsic_template
+            instruction_template = mma_instruction_template
 
-      generated_items.append(common_mma_test_gen(params, op,
-        intrinsic_template, instruction_template))
+            generated_items.append(
+                common_mma_test_gen(
+                    params, op, intrinsic_template, instruction_template
+                )
+            )
+
+    return generated_items
 
-  return generated_items
 
 # Append complete list of intrinsics and instructions we've generated tests for.
 # Generate set of checks to verify that that we did generate sensible set of
 # tests for the given combination of PTX and SM variants.
 #
 def gen_check_unsupported_ops(items):
-  print("; Complete list of intrinsics supported by PTX%d on sm_%d"
-        % (ptx_version, gpu_arch))
-  print("; INTRINSICS: {{^; INTRINSICS_LIST_BEGIN}}")
-  print("""
+    print(
+        "; Complete list of intrinsics supported by PTX%d on sm_%d"
+        % (ptx_version, gpu_arch)
+    )
+    print("; INTRINSICS: {{^; INTRINSICS_LIST_BEGIN}}")
+    print(
+        """
 
 ; NOEXTGEOM-NOT: {{m8n32|m32n8}}
 ; NOINT-NOT: .{{s32|s8}}
@@ -978,21 +1064,24 @@ def gen_check_unsupported_ops(items):
 ; PTX71MMA-DAG: mma.xor.popc.m16n8k256.row.col.b1
 ;
 
-""")
+"""
+    )
+
+    print("; INTRINSICS_LIST_BEGIN")
+    for intrinsic, instruction in sorted(items):
+        print("; ", intrinsic, " -> ", instruction, "")
+    print("; INTRINSICS_LIST_END")
+    print("; INTRINSICS: ; INTRINSICS_LIST_END")
 
-  print("; INTRINSICS_LIST_BEGIN")
-  for intrinsic, instruction in sorted(items):
-    print("; ", intrinsic, " -> ", instruction,"")
-  print("; INTRINSICS_LIST_END")
-  print("; INTRINSICS: ; INTRINSICS_LIST_END")
 
 def gen_tests():
-  items = gen_wmma_load_tests()
-  items += gen_wmma_store_tests()
-  items += gen_ldmatrix_tests()
-  items += gen_wmma_mma_tests()
-  items += gen_mma_tests()
-  gen_check_unsupported_ops(items)
+    items = gen_wmma_load_tests()
+    items += gen_wmma_store_tests()
+    items += gen_ldmatrix_tests()
+    items += gen_wmma_mma_tests()
+    items += gen_mma_tests()
+    gen_check_unsupported_ops(items)
+
 
 parser = argparse.ArgumentParser()
 parser.add_argument("--ptx", type=int, default=60)

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-01.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-01.py
index 162cabdc3bb4b..7efa436fabb69 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-01.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-01.py
@@ -70,41 +70,41 @@
 from __future__ import print_function
 
 branch_blocks = 10
-main_size = 0xffd8
+main_size = 0xFFD8
 
-print('@global = global i32 0')
+print("@global = global i32 0")
 
-print('define void @f1(i8 *%base, i32 *%stop, i32 %limit) {')
-print('entry:')
-print('  br label %before0')
-print('')
+print("define void @f1(i8 *%base, i32 *%stop, i32 %limit) {")
+print("entry:")
+print("  br label %before0")
+print("")
 
 for i in range(branch_blocks):
-    next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print('before%d:' % i)
-    print('  %%bstop%d = getelementptr i32, i32 *%%stop, i64 %d' % (i, i))
-    print('  %%bcur%d = load i32 , i32 *%%bstop%d' % (i, i))
-    print('  %%btest%d = icmp eq i32 %%limit, %%bcur%d' % (i, i))
-    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
-    print('')
+    next = "before%d" % (i + 1) if i + 1 < branch_blocks else "main"
+    print("before%d:" % i)
+    print("  %%bstop%d = getelementptr i32, i32 *%%stop, i64 %d" % (i, i))
+    print("  %%bcur%d = load i32 , i32 *%%bstop%d" % (i, i))
+    print("  %%btest%d = icmp eq i32 %%limit, %%bcur%d" % (i, i))
+    print("  br i1 %%btest%d, label %%after0, label %%%s" % (i, next))
+    print("")
 
-print('%s:' % next)
+print("%s:" % next)
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%astop%d = getelementptr i32, i32 *%%stop, i64 %d' % (i, i + 25))
-    print('  %%acur%d = load i32 , i32 *%%astop%d' % (i, i))
-    print('  %%atest%d = icmp eq i32 %%limit, %%acur%d' % (i, i))
-    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%astop%d = getelementptr i32, i32 *%%stop, i64 %d" % (i, i + 25))
+    print("  %%acur%d = load i32 , i32 *%%astop%d" % (i, i))
+    print("  %%atest%d = icmp eq i32 %%limit, %%acur%d" % (i, i))
+    print("  br i1 %%atest%d, label %%main, label %%after%d" % (i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  %dummy = load volatile i32, i32 *@global')
-print('  ret void')
-print('}')
+print("  %dummy = load volatile i32, i32 *@global")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-02.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-02.py
index a636309963b27..b4d17a26d5e53 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-02.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-02.py
@@ -60,25 +60,25 @@
 
 blocks = 256 + 4
 
-print('define void @f1(i8 *%base, i32 *%stop, i32 %limit) {')
-print('entry:')
-print('  br label %b0')
-print('')
+print("define void @f1(i8 *%base, i32 *%stop, i32 %limit) {")
+print("entry:")
+print("  br label %b0")
+print("")
 
 a, b = 1, 1
 for i in range(blocks):
     a, b = b, a + b
     value = a % 256
-    next = 'b%d' % (i + 1) if i + 1 < blocks else 'end'
-    other = 'end' if 2 * i < blocks else 'b0'
-    print('b%d:' % i)
-    print('  store volatile i8 %d, i8 *%%base' % value)
-    print('  %%astop%d = getelementptr i32, i32 *%%stop, i64 %d' % (i, i))
-    print('  %%acur%d = load i32 , i32 *%%astop%d' % (i, i))
-    print('  %%atest%d = icmp eq i32 %%limit, %%acur%d' % (i, i))
-    print('  br i1 %%atest%d, label %%%s, label %%%s' % (i, other, next))
+    next = "b%d" % (i + 1) if i + 1 < blocks else "end"
+    other = "end" if 2 * i < blocks else "b0"
+    print("b%d:" % i)
+    print("  store volatile i8 %d, i8 *%%base" % value)
+    print("  %%astop%d = getelementptr i32, i32 *%%stop, i64 %d" % (i, i))
+    print("  %%acur%d = load i32 , i32 *%%astop%d" % (i, i))
+    print("  %%atest%d = icmp eq i32 %%limit, %%acur%d" % (i, i))
+    print("  br i1 %%atest%d, label %%%s, label %%%s" % (i, other, next))
 
-print('')
-print('%s:' % next)
-print('  ret void')
-print('}')
+print("")
+print("%s:" % next)
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-03.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-03.py
index 025cbdfeff737..44b7d51b19d10 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-03.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-03.py
@@ -70,43 +70,43 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffcc
+main_size = 0xFFCC
 
-print('@global = global i32 0')
+print("@global = global i32 0")
 
-print('define void @f1(i8 *%base, i8 *%stop, i32 %limit) {')
-print('entry:')
-print('  br label %before0')
-print('')
+print("define void @f1(i8 *%base, i8 *%stop, i32 %limit) {")
+print("entry:")
+print("  br label %before0")
+print("")
 
 for i in range(branch_blocks):
-    next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print('before%d:' % i)
-    print('  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i))
-    print('  %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i))
-    print('  %%bext%d = sext i8 %%bcur%d to i32' % (i, i))
-    print('  %%btest%d = icmp eq i32 %%limit, %%bext%d' % (i, i))
-    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
-    print('')
+    next = "before%d" % (i + 1) if i + 1 < branch_blocks else "main"
+    print("before%d:" % i)
+    print("  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d" % (i, i))
+    print("  %%bcur%d = load i8 , i8 *%%bstop%d" % (i, i))
+    print("  %%bext%d = sext i8 %%bcur%d to i32" % (i, i))
+    print("  %%btest%d = icmp eq i32 %%limit, %%bext%d" % (i, i))
+    print("  br i1 %%btest%d, label %%after0, label %%%s" % (i, next))
+    print("")
 
-print('%s:' % next)
+print("%s:" % next)
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25))
-    print('  %%acur%d = load i8 , i8 *%%astop%d' % (i, i))
-    print('  %%aext%d = sext i8 %%acur%d to i32' % (i, i))
-    print('  %%atest%d = icmp eq i32 %%limit, %%aext%d' % (i, i))
-    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d" % (i, i + 25))
+    print("  %%acur%d = load i8 , i8 *%%astop%d" % (i, i))
+    print("  %%aext%d = sext i8 %%acur%d to i32" % (i, i))
+    print("  %%atest%d = icmp eq i32 %%limit, %%aext%d" % (i, i))
+    print("  br i1 %%atest%d, label %%main, label %%after%d" % (i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  %dummy = load volatile i32, i32 *@global')
-print('  ret void')
-print('}')
+print("  %dummy = load volatile i32, i32 *@global")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-04.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-04.py
index 9292dc9110bdf..5298de996c498 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-04.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-04.py
@@ -74,43 +74,43 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffcc
+main_size = 0xFFCC
 
-print('@global = global i32 0')
+print("@global = global i32 0")
 
-print('define void @f1(i8 *%base, i8 *%stop, i64 %limit) {')
-print('entry:')
-print('  br label %before0')
-print('')
+print("define void @f1(i8 *%base, i8 *%stop, i64 %limit) {")
+print("entry:")
+print("  br label %before0")
+print("")
 
 for i in range(branch_blocks):
-    next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print('before%d:' % i)
-    print('  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i))
-    print('  %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i))
-    print('  %%bext%d = sext i8 %%bcur%d to i64' % (i, i))
-    print('  %%btest%d = icmp eq i64 %%limit, %%bext%d' % (i, i))
-    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
-    print('')
+    next = "before%d" % (i + 1) if i + 1 < branch_blocks else "main"
+    print("before%d:" % i)
+    print("  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d" % (i, i))
+    print("  %%bcur%d = load i8 , i8 *%%bstop%d" % (i, i))
+    print("  %%bext%d = sext i8 %%bcur%d to i64" % (i, i))
+    print("  %%btest%d = icmp eq i64 %%limit, %%bext%d" % (i, i))
+    print("  br i1 %%btest%d, label %%after0, label %%%s" % (i, next))
+    print("")
 
-print('%s:' % next)
+print("%s:" % next)
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25))
-    print('  %%acur%d = load i8 , i8 *%%astop%d' % (i, i))
-    print('  %%aext%d = sext i8 %%acur%d to i64' % (i, i))
-    print('  %%atest%d = icmp eq i64 %%limit, %%aext%d' % (i, i))
-    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d" % (i, i + 25))
+    print("  %%acur%d = load i8 , i8 *%%astop%d" % (i, i))
+    print("  %%aext%d = sext i8 %%acur%d to i64" % (i, i))
+    print("  %%atest%d = icmp eq i64 %%limit, %%aext%d" % (i, i))
+    print("  br i1 %%atest%d, label %%main, label %%after%d" % (i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  %dummy = load volatile i32, i32 *@global')
-print('  ret void')
-print('}')
+print("  %dummy = load volatile i32, i32 *@global")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-05.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-05.py
index 4c01f90ec2bc6..66fa4ca418cc9 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-05.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-05.py
@@ -74,41 +74,41 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffcc
+main_size = 0xFFCC
 
-print('@global = global i32 0')
+print("@global = global i32 0")
 
-print('define void @f1(i8 *%base, i8 *%stop) {')
-print('entry:')
-print('  br label %before0')
-print('')
+print("define void @f1(i8 *%base, i8 *%stop) {")
+print("entry:")
+print("  br label %before0")
+print("")
 
 for i in range(branch_blocks):
-    next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print('before%d:' % i)
-    print('  %%bcur%d = load i8 , i8 *%%stop' % i)
-    print('  %%bext%d = sext i8 %%bcur%d to i32' % (i, i))
-    print('  %%btest%d = icmp slt i32 %%bext%d, %d' % (i, i, i + 50))
-    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
-    print('')
+    next = "before%d" % (i + 1) if i + 1 < branch_blocks else "main"
+    print("before%d:" % i)
+    print("  %%bcur%d = load i8 , i8 *%%stop" % i)
+    print("  %%bext%d = sext i8 %%bcur%d to i32" % (i, i))
+    print("  %%btest%d = icmp slt i32 %%bext%d, %d" % (i, i, i + 50))
+    print("  br i1 %%btest%d, label %%after0, label %%%s" % (i, next))
+    print("")
 
-print('%s:' % next)
+print("%s:" % next)
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%acur%d = load i8 , i8 *%%stop' % i)
-    print('  %%aext%d = sext i8 %%acur%d to i32' % (i, i))
-    print('  %%atest%d = icmp slt i32 %%aext%d, %d' % (i, i, i + 100))
-    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%acur%d = load i8 , i8 *%%stop" % i)
+    print("  %%aext%d = sext i8 %%acur%d to i32" % (i, i))
+    print("  %%atest%d = icmp slt i32 %%aext%d, %d" % (i, i, i + 100))
+    print("  br i1 %%atest%d, label %%main, label %%after%d" % (i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  %dummy = load volatile i32, i32 *@global')
-print('  ret void')
-print('}')
+print("  %dummy = load volatile i32, i32 *@global")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-06.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-06.py
index 45538870f66a3..bdccdeb48a3a4 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-06.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-06.py
@@ -74,41 +74,41 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffcc
+main_size = 0xFFCC
 
-print('@global = global i32 0')
+print("@global = global i32 0")
 
-print('define void @f1(i8 *%base, i8 *%stop) {')
-print('entry:')
-print('  br label %before0')
-print('')
+print("define void @f1(i8 *%base, i8 *%stop) {")
+print("entry:")
+print("  br label %before0")
+print("")
 
 for i in range(branch_blocks):
-    next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print('before%d:' % i)
-    print('  %%bcur%d = load i8 , i8 *%%stop' % i)
-    print('  %%bext%d = sext i8 %%bcur%d to i64' % (i, i))
-    print('  %%btest%d = icmp slt i64 %%bext%d, %d' % (i, i, i + 50))
-    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
-    print('')
+    next = "before%d" % (i + 1) if i + 1 < branch_blocks else "main"
+    print("before%d:" % i)
+    print("  %%bcur%d = load i8 , i8 *%%stop" % i)
+    print("  %%bext%d = sext i8 %%bcur%d to i64" % (i, i))
+    print("  %%btest%d = icmp slt i64 %%bext%d, %d" % (i, i, i + 50))
+    print("  br i1 %%btest%d, label %%after0, label %%%s" % (i, next))
+    print("")
 
-print('%s:' % next)
+print("%s:" % next)
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%acur%d = load i8 , i8 *%%stop' % i)
-    print('  %%aext%d = sext i8 %%acur%d to i64' % (i, i))
-    print('  %%atest%d = icmp slt i64 %%aext%d, %d' % (i, i, i + 100))
-    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%acur%d = load i8 , i8 *%%stop" % i)
+    print("  %%aext%d = sext i8 %%acur%d to i64" % (i, i))
+    print("  %%atest%d = icmp slt i64 %%aext%d, %d" % (i, i, i + 100))
+    print("  br i1 %%atest%d, label %%main, label %%after%d" % (i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  %dummy = load volatile i32, i32 *@global')
-print('  ret void')
-print('}')
+print("  %dummy = load volatile i32, i32 *@global")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-07.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-07.py
index 4fd72d68d7d37..a00318f8ac119 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-07.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-07.py
@@ -35,36 +35,40 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffd8
+main_size = 0xFFD8
 
-print('define void @f1(i8 *%base, i32 *%counts) {')
-print('entry:')
+print("define void @f1(i8 *%base, i32 *%counts) {")
+print("entry:")
 
 for i in range(branch_blocks - 1, -1, -1):
-    print('  %%countptr%d = getelementptr i32, i32 *%%counts, i64 %d' % (i, i))
-    print('  %%initcount%d = load i32 , i32 *%%countptr%d' % (i, i))
-    print('  br label %%loop%d' % i)
-    
-    print('loop%d:' % i)
-    block1 = 'entry' if i == branch_blocks - 1 else 'loop%d' % (i + 1)
-    block2 = 'loop0' if i == 0 else 'after%d' % (i - 1)
-    print(('  %%count%d = phi i32 [ %%initcount%d, %%%s ],'
-           ' [ %%nextcount%d, %%%s ]' % (i, i, block1, i, block2)))
+    print("  %%countptr%d = getelementptr i32, i32 *%%counts, i64 %d" % (i, i))
+    print("  %%initcount%d = load i32 , i32 *%%countptr%d" % (i, i))
+    print("  br label %%loop%d" % i)
+
+    print("loop%d:" % i)
+    block1 = "entry" if i == branch_blocks - 1 else "loop%d" % (i + 1)
+    block2 = "loop0" if i == 0 else "after%d" % (i - 1)
+    print(
+        (
+            "  %%count%d = phi i32 [ %%initcount%d, %%%s ],"
+            " [ %%nextcount%d, %%%s ]" % (i, i, block1, i, block2)
+        )
+    )
 
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%nextcount%d = add i32 %%count%d, -1' % (i, i))
-    print('  %%test%d = icmp ne i32 %%nextcount%d, 0' % (i, i))
-    print('  br i1 %%test%d, label %%loop%d, label %%after%d' % (i, i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%nextcount%d = add i32 %%count%d, -1" % (i, i))
+    print("  %%test%d = icmp ne i32 %%nextcount%d, 0" % (i, i))
+    print("  br i1 %%test%d, label %%loop%d, label %%after%d" % (i, i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  ret void')
-print('}')
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-08.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-08.py
index 7e6e2bf9d039f..dbde991fd65cd 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-08.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-08.py
@@ -36,36 +36,40 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffd8
+main_size = 0xFFD8
 
-print('define void @f1(i8 *%base, i64 *%counts) {')
-print('entry:')
+print("define void @f1(i8 *%base, i64 *%counts) {")
+print("entry:")
 
 for i in range(branch_blocks - 1, -1, -1):
-    print('  %%countptr%d = getelementptr i64, i64 *%%counts, i64 %d' % (i, i))
-    print('  %%initcount%d = load i64 , i64 *%%countptr%d' % (i, i))
-    print('  br label %%loop%d' % i)
-    
-    print('loop%d:' % i)
-    block1 = 'entry' if i == branch_blocks - 1 else 'loop%d' % (i + 1)
-    block2 = 'loop0' if i == 0 else 'after%d' % (i - 1)
-    print(('  %%count%d = phi i64 [ %%initcount%d, %%%s ],'
-           ' [ %%nextcount%d, %%%s ]' % (i, i, block1, i, block2)))
+    print("  %%countptr%d = getelementptr i64, i64 *%%counts, i64 %d" % (i, i))
+    print("  %%initcount%d = load i64 , i64 *%%countptr%d" % (i, i))
+    print("  br label %%loop%d" % i)
+
+    print("loop%d:" % i)
+    block1 = "entry" if i == branch_blocks - 1 else "loop%d" % (i + 1)
+    block2 = "loop0" if i == 0 else "after%d" % (i - 1)
+    print(
+        (
+            "  %%count%d = phi i64 [ %%initcount%d, %%%s ],"
+            " [ %%nextcount%d, %%%s ]" % (i, i, block1, i, block2)
+        )
+    )
 
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%nextcount%d = add i64 %%count%d, -1' % (i, i))
-    print('  %%test%d = icmp ne i64 %%nextcount%d, 0' % (i, i))
-    print('  br i1 %%test%d, label %%loop%d, label %%after%d' % (i, i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%nextcount%d = add i64 %%count%d, -1" % (i, i))
+    print("  %%test%d = icmp ne i64 %%nextcount%d, 0" % (i, i))
+    print("  br i1 %%test%d, label %%loop%d, label %%after%d" % (i, i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  ret void')
-print('}')
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-09.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-09.py
index cfdf31d43b636..a2e63686f1ec0 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-09.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-09.py
@@ -70,43 +70,43 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffcc
+main_size = 0xFFCC
 
-print('@global = global i32 0')
+print("@global = global i32 0")
 
-print('define void @f1(i8 *%base, i8 *%stop, i32 %limit) {')
-print('entry:')
-print('  br label %before0')
-print('')
+print("define void @f1(i8 *%base, i8 *%stop, i32 %limit) {")
+print("entry:")
+print("  br label %before0")
+print("")
 
 for i in range(branch_blocks):
-    next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print('before%d:' % i)
-    print('  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i))
-    print('  %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i))
-    print('  %%bext%d = sext i8 %%bcur%d to i32' % (i, i))
-    print('  %%btest%d = icmp ult i32 %%limit, %%bext%d' % (i, i))
-    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
-    print('')
+    next = "before%d" % (i + 1) if i + 1 < branch_blocks else "main"
+    print("before%d:" % i)
+    print("  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d" % (i, i))
+    print("  %%bcur%d = load i8 , i8 *%%bstop%d" % (i, i))
+    print("  %%bext%d = sext i8 %%bcur%d to i32" % (i, i))
+    print("  %%btest%d = icmp ult i32 %%limit, %%bext%d" % (i, i))
+    print("  br i1 %%btest%d, label %%after0, label %%%s" % (i, next))
+    print("")
 
-print('%s:' % next)
+print("%s:" % next)
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25))
-    print('  %%acur%d = load i8 , i8 *%%astop%d' % (i, i))
-    print('  %%aext%d = sext i8 %%acur%d to i32' % (i, i))
-    print('  %%atest%d = icmp ult i32 %%limit, %%aext%d' % (i, i))
-    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d" % (i, i + 25))
+    print("  %%acur%d = load i8 , i8 *%%astop%d" % (i, i))
+    print("  %%aext%d = sext i8 %%acur%d to i32" % (i, i))
+    print("  %%atest%d = icmp ult i32 %%limit, %%aext%d" % (i, i))
+    print("  br i1 %%atest%d, label %%main, label %%after%d" % (i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  %dummy = load volatile i32, i32 *@global')
-print('  ret void')
-print('}')
+print("  %dummy = load volatile i32, i32 *@global")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-10.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-10.py
index 4b7ba24388b7f..3710e8ebb80b8 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-10.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-10.py
@@ -74,43 +74,43 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffcc
+main_size = 0xFFCC
 
-print('@global = global i32 0')
+print("@global = global i32 0")
 
-print('define void @f1(i8 *%base, i8 *%stop, i64 %limit) {')
-print('entry:')
-print('  br label %before0')
-print('')
+print("define void @f1(i8 *%base, i8 *%stop, i64 %limit) {")
+print("entry:")
+print("  br label %before0")
+print("")
 
 for i in range(branch_blocks):
-    next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print('before%d:' % i)
-    print('  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i))
-    print('  %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i))
-    print('  %%bext%d = sext i8 %%bcur%d to i64' % (i, i))
-    print('  %%btest%d = icmp ult i64 %%limit, %%bext%d' % (i, i))
-    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
-    print('')
+    next = "before%d" % (i + 1) if i + 1 < branch_blocks else "main"
+    print("before%d:" % i)
+    print("  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d" % (i, i))
+    print("  %%bcur%d = load i8 , i8 *%%bstop%d" % (i, i))
+    print("  %%bext%d = sext i8 %%bcur%d to i64" % (i, i))
+    print("  %%btest%d = icmp ult i64 %%limit, %%bext%d" % (i, i))
+    print("  br i1 %%btest%d, label %%after0, label %%%s" % (i, next))
+    print("")
 
-print('%s:' % next)
+print("%s:" % next)
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25))
-    print('  %%acur%d = load i8 , i8 *%%astop%d' % (i, i))
-    print('  %%aext%d = sext i8 %%acur%d to i64' % (i, i))
-    print('  %%atest%d = icmp ult i64 %%limit, %%aext%d' % (i, i))
-    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d" % (i, i + 25))
+    print("  %%acur%d = load i8 , i8 *%%astop%d" % (i, i))
+    print("  %%aext%d = sext i8 %%acur%d to i64" % (i, i))
+    print("  %%atest%d = icmp ult i64 %%limit, %%aext%d" % (i, i))
+    print("  br i1 %%atest%d, label %%main, label %%after%d" % (i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  %dummy = load volatile i32, i32 *@global')
-print('  ret void')
-print('}')
+print("  %dummy = load volatile i32, i32 *@global")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-11.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-11.py
index 1d330a3c412ce..edfac100a739c 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-11.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-11.py
@@ -90,43 +90,43 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffc6
+main_size = 0xFFC6
 
-print('@global = global i32 0')
+print("@global = global i32 0")
 
-print('define void @f1(i8 *%base, i32 *%stopa, i32 *%stopb) {')
-print('entry:')
-print('  br label %before0')
-print('')
+print("define void @f1(i8 *%base, i32 *%stopa, i32 *%stopb) {")
+print("entry:")
+print("  br label %before0")
+print("")
 
 for i in range(branch_blocks):
-    next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print('before%d:' % i)
-    print('  %%bcur%da = load i32 , i32 *%%stopa' % i)
-    print('  %%bcur%db = load i32 , i32 *%%stopb' % i)
-    print('  %%bsub%d = sub i32 %%bcur%da, %%bcur%db' % (i, i, i))
-    print('  %%btest%d = icmp ult i32 %%bsub%d, %d' % (i, i, i + 50))
-    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
-    print('')
+    next = "before%d" % (i + 1) if i + 1 < branch_blocks else "main"
+    print("before%d:" % i)
+    print("  %%bcur%da = load i32 , i32 *%%stopa" % i)
+    print("  %%bcur%db = load i32 , i32 *%%stopb" % i)
+    print("  %%bsub%d = sub i32 %%bcur%da, %%bcur%db" % (i, i, i))
+    print("  %%btest%d = icmp ult i32 %%bsub%d, %d" % (i, i, i + 50))
+    print("  br i1 %%btest%d, label %%after0, label %%%s" % (i, next))
+    print("")
 
-print('%s:' % next)
+print("%s:" % next)
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%acur%da = load i32 , i32 *%%stopa' % i)
-    print('  %%acur%db = load i32 , i32 *%%stopb' % i)
-    print('  %%asub%d = sub i32 %%acur%da, %%acur%db' % (i, i, i))
-    print('  %%atest%d = icmp ult i32 %%asub%d, %d' % (i, i, i + 100))
-    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%acur%da = load i32 , i32 *%%stopa" % i)
+    print("  %%acur%db = load i32 , i32 *%%stopb" % i)
+    print("  %%asub%d = sub i32 %%acur%da, %%acur%db" % (i, i, i))
+    print("  %%atest%d = icmp ult i32 %%asub%d, %d" % (i, i, i + 100))
+    print("  br i1 %%atest%d, label %%main, label %%after%d" % (i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  %dummy = load volatile i32, i32 *@global')
-print('  ret void')
-print('}')
+print("  %dummy = load volatile i32, i32 *@global")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-12.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-12.py
index 56155077e1f4c..2d0d875095da8 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-12.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-12.py
@@ -90,43 +90,43 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffb4
+main_size = 0xFFB4
 
-print('@global = global i32 0')
+print("@global = global i32 0")
 
-print('define void @f1(i8 *%base, i64 *%stopa, i64 *%stopb) {')
-print('entry:')
-print('  br label %before0')
-print('')
+print("define void @f1(i8 *%base, i64 *%stopa, i64 *%stopb) {")
+print("entry:")
+print("  br label %before0")
+print("")
 
 for i in range(branch_blocks):
-    next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print('before%d:' % i)
-    print('  %%bcur%da = load i64 , i64 *%%stopa' % i)
-    print('  %%bcur%db = load i64 , i64 *%%stopb' % i)
-    print('  %%bsub%d = sub i64 %%bcur%da, %%bcur%db' % (i, i, i))
-    print('  %%btest%d = icmp ult i64 %%bsub%d, %d' % (i, i, i + 50))
-    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
-    print('')
+    next = "before%d" % (i + 1) if i + 1 < branch_blocks else "main"
+    print("before%d:" % i)
+    print("  %%bcur%da = load i64 , i64 *%%stopa" % i)
+    print("  %%bcur%db = load i64 , i64 *%%stopb" % i)
+    print("  %%bsub%d = sub i64 %%bcur%da, %%bcur%db" % (i, i, i))
+    print("  %%btest%d = icmp ult i64 %%bsub%d, %d" % (i, i, i + 50))
+    print("  br i1 %%btest%d, label %%after0, label %%%s" % (i, next))
+    print("")
 
-print('%s:' % next)
+print("%s:" % next)
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%acur%da = load i64 , i64 *%%stopa' % i)
-    print('  %%acur%db = load i64 , i64 *%%stopb' % i)
-    print('  %%asub%d = sub i64 %%acur%da, %%acur%db' % (i, i, i))
-    print('  %%atest%d = icmp ult i64 %%asub%d, %d' % (i, i, i + 100))
-    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%acur%da = load i64 , i64 *%%stopa" % i)
+    print("  %%acur%db = load i64 , i64 *%%stopb" % i)
+    print("  %%asub%d = sub i64 %%acur%da, %%acur%db" % (i, i, i))
+    print("  %%atest%d = icmp ult i64 %%asub%d, %d" % (i, i, i + 100))
+    print("  br i1 %%atest%d, label %%main, label %%after%d" % (i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  %dummy = load volatile i32, i32 *@global')
-print('  ret void')
-print('}')
+print("  %dummy = load volatile i32, i32 *@global")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-13.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-13.py
index 84410dbfba083..5fdc63bff2a71 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-13.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-13.py
@@ -24,18 +24,20 @@
 
 num = 11000
 
-print('define void @f1() {')
-print('entry:')
-print('  br label %block')
-print('')
-print('block:')
+print("define void @f1() {")
+print("entry:")
+print("  br label %block")
+print("")
+print("block:")
 
 for i in range(num):
-    print('  tail call i64 asm "lang\\09$0,$2,$1\\0A", "=d,=*Q,d,*Q"(i32* elementtype(i32) undef, i32 undef, i32* elementtype(i32) undef)')
+    print(
+        '  tail call i64 asm "lang\\09$0,$2,$1\\0A", "=d,=*Q,d,*Q"(i32* elementtype(i32) undef, i32 undef, i32* elementtype(i32) undef)'
+    )
 
-print('  br label %block')
+print("  br label %block")
 
-print('')
-print('exit:')
-print('  ret void')
-print('}')
+print("")
+print("exit:")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/spill-01.py b/llvm/test/CodeGen/SystemZ/Large/spill-01.py
index c1b094e36e5f7..42e8d9f5a05c6 100644
--- a/llvm/test/CodeGen/SystemZ/Large/spill-01.py
+++ b/llvm/test/CodeGen/SystemZ/Large/spill-01.py
@@ -23,21 +23,21 @@
 
 count = 500
 
-print('declare void @foo()')
-print('')
-print('define void @f1(i64 *%base0, i64 *%base1) {')
+print("declare void @foo()")
+print("")
+print("define void @f1(i64 *%base0, i64 *%base1) {")
 
 for i in range(count):
-    print('  %%ptr%d = getelementptr i64, i64 *%%base%d, i64 %d' % (i, i % 2, i / 2))
-    print('  %%val%d = load i64 , i64 *%%ptr%d' % (i, i))
-    print('')
+    print("  %%ptr%d = getelementptr i64, i64 *%%base%d, i64 %d" % (i, i % 2, i / 2))
+    print("  %%val%d = load i64 , i64 *%%ptr%d" % (i, i))
+    print("")
 
-print('  call void @foo()')
-print('')
+print("  call void @foo()")
+print("")
 
 for i in range(count):
-    print('  store i64 %%val%d, i64 *%%ptr%d' % (i, i))
+    print("  store i64 %%val%d, i64 *%%ptr%d" % (i, i))
 
-print('')
-print('  ret void')
-print('}')
+print("")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/spill-02.py b/llvm/test/CodeGen/SystemZ/Large/spill-02.py
index aa2900b3d0489..d2ce2cfdbb3b1 100644
--- a/llvm/test/CodeGen/SystemZ/Large/spill-02.py
+++ b/llvm/test/CodeGen/SystemZ/Large/spill-02.py
@@ -24,53 +24,53 @@
 
 args = int((8168 - 160) / 8 + (5 - 1))
 
-print('declare i64 *@foo(i64 *%s)' % (', i64' * args))
-print('declare void @bar(i64 *)')
-print('')
-print('define i64 @f1(i64 %foo) {')
-print('entry:')
+print("declare i64 *@foo(i64 *%s)" % (", i64" * args))
+print("declare void @bar(i64 *)")
+print("")
+print("define i64 @f1(i64 %foo) {")
+print("entry:")
 
 # Make the allocation big, so that it goes at the top of the frame.
-print('  %array = alloca [1000 x i64]')
-print('  %area = getelementptr [1000 x i64], [1000 x i64] *%array, i64 0, i64 0')
-print('  %%base = call i64 *@foo(i64 *%%area%s)' % (', i64 0' * args))
-print('')
+print("  %array = alloca [1000 x i64]")
+print("  %area = getelementptr [1000 x i64], [1000 x i64] *%array, i64 0, i64 0")
+print("  %%base = call i64 *@foo(i64 *%%area%s)" % (", i64 0" * args))
+print("")
 
 # Make sure all GPRs are used.  One is needed for the stack pointer and
 # another for %base, so we need 14 live values.
 count = 14
 for i in range(count):
-    print('  %%ptr%d = getelementptr i64, i64 *%%base, i64 %d' % (i, i / 2))
-    print('  %%val%d = load volatile i64 , i64 *%%ptr%d' % (i, i))
-    print('')
+    print("  %%ptr%d = getelementptr i64, i64 *%%base, i64 %d" % (i, i / 2))
+    print("  %%val%d = load volatile i64 , i64 *%%ptr%d" % (i, i))
+    print("")
 
 # Encourage the register allocator to give preference to these %vals
 # by using them several times.
 for j in range(4):
     for i in range(count):
-        print('  store volatile i64 %%val%d, i64 *%%ptr%d' % (i, i))
-    print('')
+        print("  store volatile i64 %%val%d, i64 *%%ptr%d" % (i, i))
+    print("")
 
 # Copy the incoming argument, which we expect to be spilled, to the frame
 # index for the alloca area.  Also throw in a volatile store, so that this
 # block cannot be reordered with the surrounding code.
-print('  %cond = icmp eq i64 %val0, %val1')
-print('  br i1 %cond, label %skip, label %fallthru')
-print('')
-print('fallthru:')
-print('  store i64 %foo, i64 *%area')
-print('  store volatile i64 %val0, i64 *%ptr0')
-print('  br label %skip')
-print('')
-print('skip:')
+print("  %cond = icmp eq i64 %val0, %val1")
+print("  br i1 %cond, label %skip, label %fallthru")
+print("")
+print("fallthru:")
+print("  store i64 %foo, i64 *%area")
+print("  store volatile i64 %val0, i64 *%ptr0")
+print("  br label %skip")
+print("")
+print("skip:")
 
 # Use each %val a few more times to emphasise the point, and to make sure
 # that they are live across the store of %foo.
 for j in range(4):
     for i in range(count):
-        print('  store volatile i64 %%val%d, i64 *%%ptr%d' % (i, i))
-    print('')
+        print("  store volatile i64 %%val%d, i64 *%%ptr%d" % (i, i))
+    print("")
 
-print('  call void @bar(i64 *%area)')
-print('  ret i64 0')
-print('}')
+print("  call void @bar(i64 *%area)")
+print("  ret i64 0")
+print("}")

diff  --git a/llvm/test/CodeGen/WebAssembly/multivalue-stackify.py b/llvm/test/CodeGen/WebAssembly/multivalue-stackify.py
index 50aac5aba8758..7200facdfcf2a 100755
--- a/llvm/test/CodeGen/WebAssembly/multivalue-stackify.py
+++ b/llvm/test/CodeGen/WebAssembly/multivalue-stackify.py
@@ -36,183 +36,182 @@
 
 
 def get_num_defs(program):
-  num_defs = 0
-  for _, defs in program:
-    num_defs += len(defs)
-  return num_defs
+    num_defs = 0
+    for _, defs in program:
+        num_defs += len(defs)
+    return num_defs
 
 
 def possible_ops(program):
-  program_defs = get_num_defs(program)
-  for num_defs in range(MAX_PROGRAM_DEFS - program_defs + 1):
-    for num_uses in range(MAX_OP_USES + 1):
-      if num_defs == 0 and num_uses == 0:
-        continue
-      for uses in product(range(program_defs), repeat=num_uses):
-        yield uses, tuple(program_defs + i for i in range(num_defs))
+    program_defs = get_num_defs(program)
+    for num_defs in range(MAX_PROGRAM_DEFS - program_defs + 1):
+        for num_uses in range(MAX_OP_USES + 1):
+            if num_defs == 0 and num_uses == 0:
+                continue
+            for uses in product(range(program_defs), repeat=num_uses):
+                yield uses, tuple(program_defs + i for i in range(num_defs))
 
 
 def generate_programs():
-  queue = deque()
-  queue.append([])
-  program_id = 0
-  while True:
-    program = queue.popleft()
-    if len(program) == MAX_PROGRAM_OPS:
-      break
-    for op in possible_ops(program):
-      program_id += 1
-      new_program = program + [op]
-      queue.append(new_program)
-      yield program_id, new_program
+    queue = deque()
+    queue.append([])
+    program_id = 0
+    while True:
+        program = queue.popleft()
+        if len(program) == MAX_PROGRAM_OPS:
+            break
+        for op in possible_ops(program):
+            program_id += 1
+            new_program = program + [op]
+            queue.append(new_program)
+            yield program_id, new_program
 
 
 def get_num_terminal_ops(program):
-  num_terminal_ops = 0
-  for _, defs in program:
-    if len(defs) == 0:
-      num_terminal_ops += 1
-  return num_terminal_ops
+    num_terminal_ops = 0
+    for _, defs in program:
+        if len(defs) == 0:
+            num_terminal_ops += 1
+    return num_terminal_ops
 
 
 def get_max_uses(program):
-  num_uses = [0] * MAX_PROGRAM_DEFS
-  for uses, _ in program:
-    for u in uses:
-      num_uses[u] += 1
-  return max(num_uses)
+    num_uses = [0] * MAX_PROGRAM_DEFS
+    for uses, _ in program:
+        for u in uses:
+            num_uses[u] += 1
+    return max(num_uses)
 
 
 def has_unused_op(program):
-  used = [False] * MAX_PROGRAM_DEFS
-  for uses, defs in program[::-1]:
-    if defs and all(not used[d] for d in defs):
-      return True
-    for u in uses:
-      used[u] = True
-  return False
+    used = [False] * MAX_PROGRAM_DEFS
+    for uses, defs in program[::-1]:
+        if defs and all(not used[d] for d in defs):
+            return True
+        for u in uses:
+            used[u] = True
+    return False
 
 
 def has_multivalue_use(program):
-  is_multi = [False] * MAX_PROGRAM_DEFS
-  for uses, defs in program:
-    if any(is_multi[u] for u in uses):
-      return True
-    if len(defs) >= 2:
-      for d in defs:
-        is_multi[d] = True
-  return False
+    is_multi = [False] * MAX_PROGRAM_DEFS
+    for uses, defs in program:
+        if any(is_multi[u] for u in uses):
+            return True
+        if len(defs) >= 2:
+            for d in defs:
+                is_multi[d] = True
+    return False
 
 
 def has_mvp_use(program):
-  is_mvp = [False] * MAX_PROGRAM_DEFS
-  for uses, defs in program:
-    if uses and all(is_mvp[u] for u in uses):
-      return True
-    if len(defs) <= 1:
-      if any(is_mvp[u] for u in uses):
-        return True
-      for d in defs:
-        is_mvp[d] = True
-  return False
+    is_mvp = [False] * MAX_PROGRAM_DEFS
+    for uses, defs in program:
+        if uses and all(is_mvp[u] for u in uses):
+            return True
+        if len(defs) <= 1:
+            if any(is_mvp[u] for u in uses):
+                return True
+            for d in defs:
+                is_mvp[d] = True
+    return False
 
 
 def is_interesting(program):
-  # Allow only multivalue single-op programs
-  if len(program) == 1:
-    return len(program[0][1]) > 1
+    # Allow only multivalue single-op programs
+    if len(program) == 1:
+        return len(program[0][1]) > 1
 
-  # Reject programs where the last two instructions are identical
-  if len(program) >= 2 and program[-1][0] == program[-2][0]:
-    return False
+    # Reject programs where the last two instructions are identical
+    if len(program) >= 2 and program[-1][0] == program[-2][0]:
+        return False
 
-  # Reject programs with too many ops that don't produce values
-  if get_num_terminal_ops(program) > 2:
-    return False
+    # Reject programs with too many ops that don't produce values
+    if get_num_terminal_ops(program) > 2:
+        return False
 
-  # The third use of a value is no more interesting than the second
-  if get_max_uses(program) >= 3:
-    return False
+    # The third use of a value is no more interesting than the second
+    if get_max_uses(program) >= 3:
+        return False
 
-  # Reject nontrivial programs that have unused instructions
-  if has_unused_op(program):
-    return False
+    # Reject nontrivial programs that have unused instructions
+    if has_unused_op(program):
+        return False
 
-  # Reject programs that have boring MVP uses of MVP defs
-  if has_mvp_use(program):
-    return False
+    # Reject programs that have boring MVP uses of MVP defs
+    if has_mvp_use(program):
+        return False
 
-  # Otherwise if it has multivalue usage it is interesting
-  return has_multivalue_use(program)
+    # Otherwise if it has multivalue usage it is interesting
+    return has_multivalue_use(program)
 
 
 def make_llvm_type(num_defs):
-  if num_defs == 0:
-    return 'void'
-  else:
-    return '{' + ', '.join(['i32'] * num_defs) + '}'
+    if num_defs == 0:
+        return "void"
+    else:
+        return "{" + ", ".join(["i32"] * num_defs) + "}"
 
 
 def make_llvm_op_name(num_uses, num_defs):
-  return f'op_{num_uses}_to_{num_defs}'
+    return f"op_{num_uses}_to_{num_defs}"
 
 
 def make_llvm_args(first_use, num_uses):
-  return ', '.join([f'i32 %t{first_use + i}' for i in range(num_uses)])
+    return ", ".join([f"i32 %t{first_use + i}" for i in range(num_uses)])
 
 
 def print_llvm_program(program, name):
-  tmp = 0
-  def_data = []
-  print(f'define void @{name}() {{')
-  for uses, defs in program:
-    first_arg = tmp
-    # Extract operands
-    for use in uses:
-      ret_type, var, idx = def_data[use]
-      print(f'  %t{tmp} = extractvalue {ret_type} %t{var}, {idx}')
-      tmp += 1
-    # Print instruction
-    assignment = ''
-    if len(defs) > 0:
-      assignment = f'%t{tmp} = '
-      result_var = tmp
-      tmp += 1
-    ret_type = make_llvm_type(len(defs))
-    op_name = make_llvm_op_name(len(uses), len(defs))
-    args = make_llvm_args(first_arg, len(uses))
-    print(f'  {assignment}call {ret_type} @{op_name}({args})')
-    # Update def_data
-    for i in range(len(defs)):
-      def_data.append((ret_type, result_var, i))
-  print('  ret void')
-  print('}')
+    tmp = 0
+    def_data = []
+    print(f"define void @{name}() {{")
+    for uses, defs in program:
+        first_arg = tmp
+        # Extract operands
+        for use in uses:
+            ret_type, var, idx = def_data[use]
+            print(f"  %t{tmp} = extractvalue {ret_type} %t{var}, {idx}")
+            tmp += 1
+        # Print instruction
+        assignment = ""
+        if len(defs) > 0:
+            assignment = f"%t{tmp} = "
+            result_var = tmp
+            tmp += 1
+        ret_type = make_llvm_type(len(defs))
+        op_name = make_llvm_op_name(len(uses), len(defs))
+        args = make_llvm_args(first_arg, len(uses))
+        print(f"  {assignment}call {ret_type} @{op_name}({args})")
+        # Update def_data
+        for i in range(len(defs)):
+            def_data.append((ret_type, result_var, i))
+    print("  ret void")
+    print("}")
 
 
 def print_header():
-  print('; NOTE: Test functions have been generated by multivalue-stackify.py.')
-  print()
-  print('; RUN: llc < %s -verify-machineinstrs -mattr=+multivalue',
-        '| FileCheck %s')
-  print()
-  print('; Test that the multivalue stackification works')
-  print()
-  print('target triple = "wasm32-unknown-unknown"')
-  print()
-  for num_uses in range(MAX_OP_USES + 1):
-    for num_defs in range(MAX_PROGRAM_DEFS + 1):
-      if num_uses == 0 and num_defs == 0:
-        continue
-      ret_type = make_llvm_type(num_defs)
-      op_name = make_llvm_op_name(num_uses, num_defs)
-      args = make_llvm_args(0, num_uses)
-      print(f'declare {ret_type} @{op_name}({args})')
-  print()
-
-
-if __name__ == '__main__':
-  print_header()
-  for i, program in generate_programs():
-    if is_interesting(program):
-      print_llvm_program(program, 'f' + str(i))
-      print()
+    print("; NOTE: Test functions have been generated by multivalue-stackify.py.")
+    print()
+    print("; RUN: llc < %s -verify-machineinstrs -mattr=+multivalue", "| FileCheck %s")
+    print()
+    print("; Test that the multivalue stackification works")
+    print()
+    print('target triple = "wasm32-unknown-unknown"')
+    print()
+    for num_uses in range(MAX_OP_USES + 1):
+        for num_defs in range(MAX_PROGRAM_DEFS + 1):
+            if num_uses == 0 and num_defs == 0:
+                continue
+            ret_type = make_llvm_type(num_defs)
+            op_name = make_llvm_op_name(num_uses, num_defs)
+            args = make_llvm_args(0, num_uses)
+            print(f"declare {ret_type} @{op_name}({args})")
+    print()
+
+
+if __name__ == "__main__":
+    print_header()
+    for i, program in generate_programs():
+        if is_interesting(program):
+            print_llvm_program(program, "f" + str(i))
+            print()

diff  --git a/llvm/test/MC/COFF/bigobj.py b/llvm/test/MC/COFF/bigobj.py
index f7c000d20d345..ca6eb98a33af1 100644
--- a/llvm/test/MC/COFF/bigobj.py
+++ b/llvm/test/MC/COFF/bigobj.py
@@ -22,8 +22,11 @@
 # CHECK-NEXT: }
 
 for i in range(0, num_sections):
-	print("""	.section	.bss,"bw",discard,_b%d
+    print(
+        """	.section	.bss,"bw",discard,_b%d
 	.globl	_b%d                     # @b%d
 _b%d:
 	.byte	0                       # 0x0
-""" % (i, i, i, i))
+"""
+        % (i, i, i, i)
+    )

diff  --git a/llvm/test/Other/opt-bisect-helper.py b/llvm/test/Other/opt-bisect-helper.py
index d2ab4ce096f93..86c0851272e2c 100755
--- a/llvm/test/Other/opt-bisect-helper.py
+++ b/llvm/test/Other/opt-bisect-helper.py
@@ -9,12 +9,12 @@
 
 parser = argparse.ArgumentParser()
 
-parser.add_argument('--start', type=int, default=0)
-parser.add_argument('--end', type=int, default=(1 << 32))
-parser.add_argument('--optcmd', default=("opt"))
-parser.add_argument('--filecheckcmd', default=("FileCheck"))
-parser.add_argument('--prefix', default=("CHECK-BISECT"))
-parser.add_argument('--test', default=(""))
+parser.add_argument("--start", type=int, default=0)
+parser.add_argument("--end", type=int, default=(1 << 32))
+parser.add_argument("--optcmd", default=("opt"))
+parser.add_argument("--filecheckcmd", default=("FileCheck"))
+parser.add_argument("--prefix", default=("CHECK-BISECT"))
+parser.add_argument("--test", default=(""))
 
 args = parser.parse_args()
 
@@ -24,9 +24,9 @@
 opt_command = [args.optcmd, "-O2", "-opt-bisect-limit=%(count)s", "-S", args.test]
 check_command = [args.filecheckcmd, args.test, "--check-prefix=%s" % args.prefix]
 last = None
-while start != end and start != end-1:
-    count = int(round(start + (end - start)/2))
-    cmd = [x % {'count':count} for x in opt_command]
+while start != end and start != end - 1:
+    count = int(round(start + (end - start) / 2))
+    cmd = [x % {"count": count} for x in opt_command]
     print("opt: " + str(cmd))
     opt_result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     filecheck_result = subprocess.Popen(check_command, stdin=opt_result.stdout)

diff  --git a/llvm/test/TableGen/JSON-check.py b/llvm/test/TableGen/JSON-check.py
index b6bc4ee6c909b..296a5dcfc4fdc 100644
--- a/llvm/test/TableGen/JSON-check.py
+++ b/llvm/test/TableGen/JSON-check.py
@@ -21,11 +21,11 @@
             prefix_pos = line.index(prefix)
         except ValueError:
             continue
-        check_expr = line[prefix_pos + len(prefix):]
+        check_expr = line[prefix_pos + len(prefix) :]
 
         try:
             exception = None
-            result = eval(check_expr, {"data":data})
+            result = eval(check_expr, {"data": data})
         except Exception:
             result = False
             exception = traceback.format_exc().splitlines()[-1]
@@ -34,13 +34,16 @@
             sys.stderr.write(
                 "{file}:{line:d}: check threw exception: {expr}\n"
                 "{file}:{line:d}: exception was: {exception}\n".format(
-                    file=testfile, line=lineno,
-                    expr=check_expr, exception=exception))
+                    file=testfile, line=lineno, expr=check_expr, exception=exception
+                )
+            )
             fails += 1
         elif not result:
             sys.stderr.write(
                 "{file}:{line:d}: check returned False: {expr}\n".format(
-                    file=testfile, line=lineno, expr=check_expr))
+                    file=testfile, line=lineno, expr=check_expr
+                )
+            )
             fails += 1
         else:
             passes += 1

diff  --git a/llvm/test/Transforms/Inline/ML/Inputs/interactive_main.py b/llvm/test/Transforms/Inline/ML/Inputs/interactive_main.py
index 2f8084523e672..36f237a31d6b8 100644
--- a/llvm/test/Transforms/Inline/ML/Inputs/interactive_main.py
+++ b/llvm/test/Transforms/Inline/ML/Inputs/interactive_main.py
@@ -3,19 +3,18 @@
 
 
 def main(args):
+    class Advisor:
+        to_return = False
 
-  class Advisor:
-    to_return = False
+        def advice(self, _):
+            # The adice will be a sequence of yes/no/yes/no/...
+            # see ../interactive-mode.ll
+            self.to_return = not self.to_return
+            return int(self.to_return)
 
-    def advice(self, _):
-      # The adice will be a sequence of yes/no/yes/no/...
-      # see ../interactive-mode.ll
-      self.to_return = not self.to_return
-      return int(self.to_return)
+    a = Advisor()
+    interactive_host.run_interactive(args[0], a.advice, args[1:])
 
-  a = Advisor()
-  interactive_host.run_interactive(args[0], a.advice, args[1:])
 
-
-if __name__ == '__main__':
-  main(sys.argv[1:])
+if __name__ == "__main__":
+    main(sys.argv[1:])

diff  --git a/llvm/test/Unit/lit.cfg.py b/llvm/test/Unit/lit.cfg.py
index 61f60de73889b..f15c30dbcdb0a 100644
--- a/llvm/test/Unit/lit.cfg.py
+++ b/llvm/test/Unit/lit.cfg.py
@@ -8,52 +8,53 @@
 import lit.formats
 
 # name: The name of this test suite.
-config.name = 'LLVM-Unit'
+config.name = "LLVM-Unit"
 
 # suffixes: A list of file extensions to treat as test files.
 config.suffixes = []
 
 # test_source_root: The root path where tests are located.
 # test_exec_root: The root path where tests should be run.
-config.test_exec_root = os.path.join(config.llvm_obj_root, 'unittests')
+config.test_exec_root = os.path.join(config.llvm_obj_root, "unittests")
 config.test_source_root = config.test_exec_root
 
 # testFormat: The test format to use to interpret tests.
-config.test_format = lit.formats.GoogleTest(config.llvm_build_mode, 'Tests')
+config.test_format = lit.formats.GoogleTest(config.llvm_build_mode, "Tests")
 
 # Propagate the temp directory. Windows requires this because it uses \Windows\
 # if none of these are present.
-if 'TMP' in os.environ:
-    config.environment['TMP'] = os.environ['TMP']
-if 'TEMP' in os.environ:
-    config.environment['TEMP'] = os.environ['TEMP']
+if "TMP" in os.environ:
+    config.environment["TMP"] = os.environ["TMP"]
+if "TEMP" in os.environ:
+    config.environment["TEMP"] = os.environ["TEMP"]
 
 # Propagate HOME as it can be used to override incorrect homedir in passwd
 # that causes the tests to fail.
-if 'HOME' in os.environ:
-    config.environment['HOME'] = os.environ['HOME']
+if "HOME" in os.environ:
+    config.environment["HOME"] = os.environ["HOME"]
 
 # Propagate sanitizer options.
 for var in [
-    'ASAN_SYMBOLIZER_PATH',
-    'HWASAN_SYMBOLIZER_PATH',
-    'MSAN_SYMBOLIZER_PATH',
-    'TSAN_SYMBOLIZER_PATH',
-    'UBSAN_SYMBOLIZER_PATH',
-    'ASAN_OPTIONS',
-    'HWASAN_OPTIONS',
-    'MSAN_OPTIONS',
-    'TSAN_OPTIONS',
-    'UBSAN_OPTIONS',
+    "ASAN_SYMBOLIZER_PATH",
+    "HWASAN_SYMBOLIZER_PATH",
+    "MSAN_SYMBOLIZER_PATH",
+    "TSAN_SYMBOLIZER_PATH",
+    "UBSAN_SYMBOLIZER_PATH",
+    "ASAN_OPTIONS",
+    "HWASAN_OPTIONS",
+    "MSAN_OPTIONS",
+    "TSAN_OPTIONS",
+    "UBSAN_OPTIONS",
 ]:
     if var in os.environ:
         config.environment[var] = os.environ[var]
 
 # Win32 seeks DLLs along %PATH%.
-if sys.platform in ['win32', 'cygwin'] and os.path.isdir(config.shlibdir):
-    config.environment['PATH'] = os.path.pathsep.join((
-            config.shlibdir, config.environment['PATH']))
+if sys.platform in ["win32", "cygwin"] and os.path.isdir(config.shlibdir):
+    config.environment["PATH"] = os.path.pathsep.join(
+        (config.shlibdir, config.environment["PATH"])
+    )
 
 # Win32 may use %SYSTEMDRIVE% during file system shell operations, so propogate.
-if sys.platform == 'win32' and 'SYSTEMDRIVE' in os.environ:
-    config.environment['SYSTEMDRIVE'] = os.environ['SYSTEMDRIVE']
+if sys.platform == "win32" and "SYSTEMDRIVE" in os.environ:
+    config.environment["SYSTEMDRIVE"] = os.environ["SYSTEMDRIVE"]

diff  --git a/llvm/test/lit.cfg.py b/llvm/test/lit.cfg.py
index 59bfd8ecfd13a..fc682bae5093c 100644
--- a/llvm/test/lit.cfg.py
+++ b/llvm/test/lit.cfg.py
@@ -15,81 +15,82 @@
 from lit.llvm.subst import ToolSubst
 
 # name: The name of this test suite.
-config.name = 'LLVM'
+config.name = "LLVM"
 
 # testFormat: The test format to use to interpret tests.
 config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
 
 # suffixes: A list of file extensions to treat as test files. This is overriden
 # by individual lit.local.cfg files in the test subdirectories.
-config.suffixes = ['.ll', '.c', '.test', '.txt', '.s', '.mir', '.yaml']
+config.suffixes = [".ll", ".c", ".test", ".txt", ".s", ".mir", ".yaml"]
 
 # excludes: A list of directories to exclude from the testsuite. The 'Inputs'
 # subdirectories contain auxiliary inputs for various tests in their parent
 # directories.
-config.excludes = ['Inputs', 'CMakeLists.txt', 'README.txt', 'LICENSE.txt']
+config.excludes = ["Inputs", "CMakeLists.txt", "README.txt", "LICENSE.txt"]
 
 # test_source_root: The root path where tests are located.
 config.test_source_root = os.path.dirname(__file__)
 
 # test_exec_root: The root path where tests should be run.
-config.test_exec_root = os.path.join(config.llvm_obj_root, 'test')
+config.test_exec_root = os.path.join(config.llvm_obj_root, "test")
 
 # Tweak the PATH to include the tools dir.
-llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
+llvm_config.with_environment("PATH", config.llvm_tools_dir, append_path=True)
 
 # Propagate some variables from the host environment.
-llvm_config.with_system_environment(
-    ['HOME', 'INCLUDE', 'LIB', 'TMP', 'TEMP'])
+llvm_config.with_system_environment(["HOME", "INCLUDE", "LIB", "TMP", "TEMP"])
 
 
 # Set up OCAMLPATH to include newly built OCaml libraries.
-top_ocaml_lib = os.path.join(config.llvm_lib_dir, 'ocaml')
-llvm_ocaml_lib = os.path.join(top_ocaml_lib, 'llvm')
+top_ocaml_lib = os.path.join(config.llvm_lib_dir, "ocaml")
+llvm_ocaml_lib = os.path.join(top_ocaml_lib, "llvm")
 
-llvm_config.with_system_environment('OCAMLPATH')
-llvm_config.with_environment('OCAMLPATH', top_ocaml_lib, append_path=True)
-llvm_config.with_environment('OCAMLPATH', llvm_ocaml_lib, append_path=True)
+llvm_config.with_system_environment("OCAMLPATH")
+llvm_config.with_environment("OCAMLPATH", top_ocaml_lib, append_path=True)
+llvm_config.with_environment("OCAMLPATH", llvm_ocaml_lib, append_path=True)
 
-llvm_config.with_system_environment('CAML_LD_LIBRARY_PATH')
-llvm_config.with_environment(
-    'CAML_LD_LIBRARY_PATH', llvm_ocaml_lib, append_path=True)
+llvm_config.with_system_environment("CAML_LD_LIBRARY_PATH")
+llvm_config.with_environment("CAML_LD_LIBRARY_PATH", llvm_ocaml_lib, append_path=True)
 
 # Set up OCAMLRUNPARAM to enable backtraces in OCaml tests.
-llvm_config.with_environment('OCAMLRUNPARAM', 'b')
+llvm_config.with_environment("OCAMLRUNPARAM", "b")
 
 # Provide the path to asan runtime lib 'libclang_rt.asan_osx_dynamic.dylib' if
 # available. This is darwin specific since it's currently only needed on darwin.
 
 
 def get_asan_rtlib():
-    if not 'Address' in config.llvm_use_sanitizer or \
-       not 'Darwin' in config.host_os or \
-       not 'x86' in config.host_triple:
-        return ''
+    if (
+        not "Address" in config.llvm_use_sanitizer
+        or not "Darwin" in config.host_os
+        or not "x86" in config.host_triple
+    ):
+        return ""
     try:
         import glob
     except:
-        print('glob module not found, skipping get_asan_rtlib() lookup')
-        return ''
+        print("glob module not found, skipping get_asan_rtlib() lookup")
+        return ""
     # The libclang_rt.asan_osx_dynamic.dylib path is obtained using the relative
     # path from the host cc.
-    host_lib_dir = os.path.join(os.path.dirname(config.host_cc), '../lib')
-    asan_dylib_dir_pattern = host_lib_dir + \
-        '/clang/*/lib/darwin/libclang_rt.asan_osx_dynamic.dylib'
+    host_lib_dir = os.path.join(os.path.dirname(config.host_cc), "../lib")
+    asan_dylib_dir_pattern = (
+        host_lib_dir + "/clang/*/lib/darwin/libclang_rt.asan_osx_dynamic.dylib"
+    )
     found_dylibs = glob.glob(asan_dylib_dir_pattern)
     if len(found_dylibs) != 1:
-        return ''
+        return ""
     return found_dylibs[0]
 
 
 llvm_config.use_default_substitutions()
 
 # Add site-specific substitutions.
-config.substitutions.append(('%llvmshlibdir', config.llvm_shlib_dir))
-config.substitutions.append(('%shlibext', config.llvm_shlib_ext))
-config.substitutions.append(('%pluginext', config.llvm_plugin_ext))
-config.substitutions.append(('%exeext', config.llvm_exe_ext))
+config.substitutions.append(("%llvmshlibdir", config.llvm_shlib_dir))
+config.substitutions.append(("%shlibext", config.llvm_shlib_ext))
+config.substitutions.append(("%pluginext", config.llvm_plugin_ext))
+config.substitutions.append(("%exeext", config.llvm_exe_ext))
 
 
 lli_args = []
@@ -98,15 +99,14 @@ def get_asan_rtlib():
 # we don't support COFF in MCJIT well enough for the tests, force ELF format on
 # Windows.  FIXME: the process target triple should be used here, but this is
 # 
diff icult to obtain on Windows.
-if re.search(r'cygwin|windows-gnu|windows-msvc', config.host_triple):
-    lli_args = ['-mtriple=' + config.host_triple + '-elf']
+if re.search(r"cygwin|windows-gnu|windows-msvc", config.host_triple):
+    lli_args = ["-mtriple=" + config.host_triple + "-elf"]
 
 llc_args = []
 
 # Similarly, have a macro to use llc with DWARF even when the host is Windows
-if re.search(r'windows-msvc', config.target_triple):
-    llc_args = [' -mtriple=' +
-                config.target_triple.replace('-msvc', '-gnu')]
+if re.search(r"windows-msvc", config.target_triple):
+    llc_args = [" -mtriple=" + config.target_triple.replace("-msvc", "-gnu")]
 
 # Provide the path to asan runtime lib if available. On darwin, this lib needs
 # to be loaded via DYLD_INSERT_LIBRARIES before libLTO.dylib in case the files
@@ -114,95 +114,171 @@ def get_asan_rtlib():
 ld64_cmd = config.ld64_executable
 asan_rtlib = get_asan_rtlib()
 if asan_rtlib:
-    ld64_cmd = 'DYLD_INSERT_LIBRARIES={} {}'.format(asan_rtlib, ld64_cmd)
+    ld64_cmd = "DYLD_INSERT_LIBRARIES={} {}".format(asan_rtlib, ld64_cmd)
 if config.osx_sysroot:
-    ld64_cmd = '{} -syslibroot {}'.format(ld64_cmd, config.osx_sysroot)
-
-ocamlc_command = '%s ocamlc -cclib -L%s %s' % (
-    config.ocamlfind_executable, config.llvm_lib_dir, config.ocaml_flags)
-ocamlopt_command = 'true'
+    ld64_cmd = "{} -syslibroot {}".format(ld64_cmd, config.osx_sysroot)
+
+ocamlc_command = "%s ocamlc -cclib -L%s %s" % (
+    config.ocamlfind_executable,
+    config.llvm_lib_dir,
+    config.ocaml_flags,
+)
+ocamlopt_command = "true"
 if config.have_ocamlopt:
-    ocamlopt_command = '%s ocamlopt -cclib -L%s -cclib -Wl,-rpath,%s %s' % (
-        config.ocamlfind_executable, config.llvm_lib_dir, config.llvm_lib_dir, config.ocaml_flags)
-
-opt_viewer_cmd = '%s %s/tools/opt-viewer/opt-viewer.py' % (sys.executable, config.llvm_src_root)
+    ocamlopt_command = "%s ocamlopt -cclib -L%s -cclib -Wl,-rpath,%s %s" % (
+        config.ocamlfind_executable,
+        config.llvm_lib_dir,
+        config.llvm_lib_dir,
+        config.ocaml_flags,
+    )
+
+opt_viewer_cmd = "%s %s/tools/opt-viewer/opt-viewer.py" % (
+    sys.executable,
+    config.llvm_src_root,
+)
 
 llvm_original_di_preservation_cmd = os.path.join(
-    config.llvm_src_root,'utils', 'llvm-original-di-preservation.py')
+    config.llvm_src_root, "utils", "llvm-original-di-preservation.py"
+)
 config.substitutions.append(
-    ('%llvm-original-di-preservation', "'%s' %s" % (
-        config.python_executable, llvm_original_di_preservation_cmd)))
+    (
+        "%llvm-original-di-preservation",
+        "'%s' %s" % (config.python_executable, llvm_original_di_preservation_cmd),
+    )
+)
 
-llvm_locstats_tool = os.path.join(config.llvm_tools_dir, 'llvm-locstats')
+llvm_locstats_tool = os.path.join(config.llvm_tools_dir, "llvm-locstats")
 config.substitutions.append(
-    ('%llvm-locstats', "'%s' %s" % (config.python_executable, llvm_locstats_tool)))
+    ("%llvm-locstats", "'%s' %s" % (config.python_executable, llvm_locstats_tool))
+)
 config.llvm_locstats_used = os.path.exists(llvm_locstats_tool)
 
 tools = [
-    ToolSubst('%llvm', FindTool('llvm'), unresolved='ignore'),
-    ToolSubst('%lli', FindTool('lli'), post='.', extra_args=lli_args),
-    ToolSubst('%llc_dwarf', FindTool('llc'), extra_args=llc_args),
-    ToolSubst('%gold', config.gold_executable, unresolved='ignore'),
-    ToolSubst('%ld64', ld64_cmd, unresolved='ignore'),
-    ToolSubst('%ocamlc', ocamlc_command, unresolved='ignore'),
-    ToolSubst('%ocamlopt', ocamlopt_command, unresolved='ignore'),
-    ToolSubst('%opt-viewer', opt_viewer_cmd),
-    ToolSubst('%llvm-objcopy', FindTool('llvm-objcopy')),
-    ToolSubst('%llvm-strip', FindTool('llvm-strip')),
-    ToolSubst('%llvm-install-name-tool', FindTool('llvm-install-name-tool')),
-    ToolSubst('%llvm-bitcode-strip', FindTool('llvm-bitcode-strip')),
-    ToolSubst('%split-file', FindTool('split-file')),
+    ToolSubst("%llvm", FindTool("llvm"), unresolved="ignore"),
+    ToolSubst("%lli", FindTool("lli"), post=".", extra_args=lli_args),
+    ToolSubst("%llc_dwarf", FindTool("llc"), extra_args=llc_args),
+    ToolSubst("%gold", config.gold_executable, unresolved="ignore"),
+    ToolSubst("%ld64", ld64_cmd, unresolved="ignore"),
+    ToolSubst("%ocamlc", ocamlc_command, unresolved="ignore"),
+    ToolSubst("%ocamlopt", ocamlopt_command, unresolved="ignore"),
+    ToolSubst("%opt-viewer", opt_viewer_cmd),
+    ToolSubst("%llvm-objcopy", FindTool("llvm-objcopy")),
+    ToolSubst("%llvm-strip", FindTool("llvm-strip")),
+    ToolSubst("%llvm-install-name-tool", FindTool("llvm-install-name-tool")),
+    ToolSubst("%llvm-bitcode-strip", FindTool("llvm-bitcode-strip")),
+    ToolSubst("%split-file", FindTool("split-file")),
 ]
 
 # FIXME: Why do we have both `lli` and `%lli` that do slightly 
diff erent things?
-tools.extend([
-    'dsymutil', 'lli', 'lli-child-target', 'llvm-ar', 'llvm-as',
-    'llvm-addr2line', 'llvm-bcanalyzer', 'llvm-bitcode-strip', 'llvm-config',
-    'llvm-cov', 'llvm-cxxdump', 'llvm-cvtres', 'llvm-debuginfod-find',
-    'llvm-debuginfo-analyzer',
-    'llvm-
diff ', 'llvm-dis', 'llvm-dwarfdump', 'llvm-dwarfutil', 'llvm-dlltool',
-    'llvm-exegesis', 'llvm-extract', 'llvm-isel-fuzzer', 'llvm-ifs',
-    'llvm-install-name-tool', 'llvm-jitlink', 'llvm-opt-fuzzer', 'llvm-lib',
-    'llvm-link', 'llvm-lto', 'llvm-lto2', 'llvm-mc', 'llvm-mca',
-    'llvm-modextract', 'llvm-nm', 'llvm-objcopy', 'llvm-objdump', 'llvm-otool',
-    'llvm-pdbutil', 'llvm-profdata', 'llvm-profgen', 'llvm-ranlib', 'llvm-rc', 'llvm-readelf',
-    'llvm-readobj', 'llvm-remark-size-
diff ', 'llvm-rtdyld', 'llvm-sim',
-    'llvm-size', 'llvm-split', 'llvm-stress', 'llvm-strings', 'llvm-strip',
-    'llvm-tblgen', 'llvm-tapi-
diff ', 'llvm-undname', 'llvm-windres',
-    'llvm-c-test', 'llvm-cxxfilt', 'llvm-xray', 'yaml2obj', 'obj2yaml',
-    'yaml-bench', 'verify-uselistorder', 'bugpoint', 'llc', 'llvm-symbolizer',
-    'opt', 'sancov', 'sanstats', 'llvm-remarkutil'])
+tools.extend(
+    [
+        "dsymutil",
+        "lli",
+        "lli-child-target",
+        "llvm-ar",
+        "llvm-as",
+        "llvm-addr2line",
+        "llvm-bcanalyzer",
+        "llvm-bitcode-strip",
+        "llvm-config",
+        "llvm-cov",
+        "llvm-cxxdump",
+        "llvm-cvtres",
+        "llvm-debuginfod-find",
+        "llvm-debuginfo-analyzer",
+        "llvm-
diff ",
+        "llvm-dis",
+        "llvm-dwarfdump",
+        "llvm-dwarfutil",
+        "llvm-dlltool",
+        "llvm-exegesis",
+        "llvm-extract",
+        "llvm-isel-fuzzer",
+        "llvm-ifs",
+        "llvm-install-name-tool",
+        "llvm-jitlink",
+        "llvm-opt-fuzzer",
+        "llvm-lib",
+        "llvm-link",
+        "llvm-lto",
+        "llvm-lto2",
+        "llvm-mc",
+        "llvm-mca",
+        "llvm-modextract",
+        "llvm-nm",
+        "llvm-objcopy",
+        "llvm-objdump",
+        "llvm-otool",
+        "llvm-pdbutil",
+        "llvm-profdata",
+        "llvm-profgen",
+        "llvm-ranlib",
+        "llvm-rc",
+        "llvm-readelf",
+        "llvm-readobj",
+        "llvm-remark-size-
diff ",
+        "llvm-rtdyld",
+        "llvm-sim",
+        "llvm-size",
+        "llvm-split",
+        "llvm-stress",
+        "llvm-strings",
+        "llvm-strip",
+        "llvm-tblgen",
+        "llvm-tapi-
diff ",
+        "llvm-undname",
+        "llvm-windres",
+        "llvm-c-test",
+        "llvm-cxxfilt",
+        "llvm-xray",
+        "yaml2obj",
+        "obj2yaml",
+        "yaml-bench",
+        "verify-uselistorder",
+        "bugpoint",
+        "llc",
+        "llvm-symbolizer",
+        "opt",
+        "sancov",
+        "sanstats",
+        "llvm-remarkutil",
+    ]
+)
 
 # The following tools are optional
-tools.extend([
-    ToolSubst('llvm-mt', unresolved='ignore'),
-    ToolSubst('llvm-debuginfod', unresolved='ignore'),
-    ToolSubst('Kaleidoscope-Ch3', unresolved='ignore'),
-    ToolSubst('Kaleidoscope-Ch4', unresolved='ignore'),
-    ToolSubst('Kaleidoscope-Ch5', unresolved='ignore'),
-    ToolSubst('Kaleidoscope-Ch6', unresolved='ignore'),
-    ToolSubst('Kaleidoscope-Ch7', unresolved='ignore'),
-    ToolSubst('Kaleidoscope-Ch8', unresolved='ignore'),
-    ToolSubst('LLJITWithThinLTOSummaries', unresolved='ignore'),
-    ToolSubst('LLJITWithRemoteDebugging', unresolved='ignore'),
-    ToolSubst('OrcV2CBindingsBasicUsage', unresolved='ignore'),
-    ToolSubst('OrcV2CBindingsAddObjectFile', unresolved='ignore'),
-    ToolSubst('OrcV2CBindingsRemovableCode', unresolved='ignore'),
-    ToolSubst('OrcV2CBindingsLazy', unresolved='ignore'),
-    ToolSubst('OrcV2CBindingsVeryLazy', unresolved='ignore'),
-    ToolSubst('dxil-dis', unresolved='ignore')])
+tools.extend(
+    [
+        ToolSubst("llvm-mt", unresolved="ignore"),
+        ToolSubst("llvm-debuginfod", unresolved="ignore"),
+        ToolSubst("Kaleidoscope-Ch3", unresolved="ignore"),
+        ToolSubst("Kaleidoscope-Ch4", unresolved="ignore"),
+        ToolSubst("Kaleidoscope-Ch5", unresolved="ignore"),
+        ToolSubst("Kaleidoscope-Ch6", unresolved="ignore"),
+        ToolSubst("Kaleidoscope-Ch7", unresolved="ignore"),
+        ToolSubst("Kaleidoscope-Ch8", unresolved="ignore"),
+        ToolSubst("LLJITWithThinLTOSummaries", unresolved="ignore"),
+        ToolSubst("LLJITWithRemoteDebugging", unresolved="ignore"),
+        ToolSubst("OrcV2CBindingsBasicUsage", unresolved="ignore"),
+        ToolSubst("OrcV2CBindingsAddObjectFile", unresolved="ignore"),
+        ToolSubst("OrcV2CBindingsRemovableCode", unresolved="ignore"),
+        ToolSubst("OrcV2CBindingsLazy", unresolved="ignore"),
+        ToolSubst("OrcV2CBindingsVeryLazy", unresolved="ignore"),
+        ToolSubst("dxil-dis", unresolved="ignore"),
+    ]
+)
 
 # Find (major, minor) version of ptxas
 def ptxas_version(ptxas):
-    ptxas_cmd = subprocess.Popen([ptxas, '--version'], stdout=subprocess.PIPE)
-    ptxas_out = ptxas_cmd.stdout.read().decode('ascii')
+    ptxas_cmd = subprocess.Popen([ptxas, "--version"], stdout=subprocess.PIPE)
+    ptxas_out = ptxas_cmd.stdout.read().decode("ascii")
     ptxas_cmd.wait()
-    match = re.search('release (\d+)\.(\d+)', ptxas_out)
+    match = re.search("release (\d+)\.(\d+)", ptxas_out)
     if match:
         return (int(match.group(1)), int(match.group(2)))
-    print('couldn\'t determine ptxas version')
+    print("couldn't determine ptxas version")
     return None
 
+
 # Enable %ptxas and %ptxas-verify tools.
 # %ptxas-verify defaults to sm_60 architecture. It can be overriden
 # by specifying required one, for instance: %ptxas-verify -arch=sm_80.
@@ -213,10 +289,22 @@ def enable_ptxas(ptxas_executable):
         # versions, so add a feature for every known version prior to
         # the current one.
         ptxas_known_versions = [
-            (9, 0), (9, 1), (9, 2),
-            (10, 0), (10, 1), (10, 2),
-            (11, 0), (11, 1), (11, 2), (11, 3), (11, 4), (11, 5), (11, 6),
-            (11, 7), (11, 8), (12, 0),
+            (9, 0),
+            (9, 1),
+            (9, 2),
+            (10, 0),
+            (10, 1),
+            (10, 2),
+            (11, 0),
+            (11, 1),
+            (11, 2),
+            (11, 3),
+            (11, 4),
+            (11, 5),
+            (11, 6),
+            (11, 7),
+            (11, 8),
+            (12, 0),
         ]
 
         def version_int(ver):
@@ -227,23 +315,29 @@ def version_int(ver):
         min_version = ptxas_known_versions[0]
         if version_int(version) < version_int(min_version):
             print(
-                'Warning: ptxas version {}.{} is not supported'.format(
-                    version[0], version[1]))
+                "Warning: ptxas version {}.{} is not supported".format(
+                    version[0], version[1]
+                )
+            )
             return
 
         for known_version in ptxas_known_versions:
             if version_int(known_version) <= version_int(version):
                 major, minor = known_version
-                config.available_features.add(
-                    'ptxas-{}.{}'.format(major, minor))
+                config.available_features.add("ptxas-{}.{}".format(major, minor))
 
-    config.available_features.add('ptxas')
-    tools.extend([ToolSubst('%ptxas', ptxas_executable),
-                  ToolSubst('%ptxas-verify', '{} -arch=sm_60 -c -'.format(
-                      ptxas_executable))])
+    config.available_features.add("ptxas")
+    tools.extend(
+        [
+            ToolSubst("%ptxas", ptxas_executable),
+            ToolSubst("%ptxas-verify", "{} -arch=sm_60 -c -".format(ptxas_executable)),
+        ]
+    )
 
-ptxas_executable = \
-    os.environ.get('LLVM_PTXAS_EXECUTABLE', None) or config.ptxas_executable
+
+ptxas_executable = (
+    os.environ.get("LLVM_PTXAS_EXECUTABLE", None) or config.ptxas_executable
+)
 if ptxas_executable:
     enable_ptxas(ptxas_executable)
 
@@ -254,63 +348,78 @@ def version_int(ver):
 config.targets = frozenset(config.targets_to_build.split())
 
 for arch in config.targets_to_build.split():
-    config.available_features.add(arch.lower() + '-registered-target')
+    config.available_features.add(arch.lower() + "-registered-target")
 
 # Features
 known_arches = ["x86_64", "mips64", "ppc64", "aarch64"]
-if (config.host_ldflags.find("-m32") < 0
-    and any(config.llvm_host_triple.startswith(x) for x in known_arches)):
-  config.available_features.add("llvm-64-bits")
+if config.host_ldflags.find("-m32") < 0 and any(
+    config.llvm_host_triple.startswith(x) for x in known_arches
+):
+    config.available_features.add("llvm-64-bits")
 
 config.available_features.add("host-byteorder-" + sys.byteorder + "-endian")
 
-if sys.platform in ['win32']:
+if sys.platform in ["win32"]:
     # ExecutionEngine, no weak symbols in COFF.
-    config.available_features.add('uses_COFF')
+    config.available_features.add("uses_COFF")
 else:
     # Others/can-execute.txt
-    config.available_features.add('can-execute')
+    config.available_features.add("can-execute")
 
 # Loadable module
 if config.has_plugins:
-    config.available_features.add('plugins')
+    config.available_features.add("plugins")
 
 if config.build_examples:
-    config.available_features.add('examples')
+    config.available_features.add("examples")
 
 if config.linked_bye_extension:
-    config.substitutions.append(('%llvmcheckext', 'CHECK-EXT'))
-    config.substitutions.append(('%loadbye', ''))
-    config.substitutions.append(('%loadnewpmbye', ''))
+    config.substitutions.append(("%llvmcheckext", "CHECK-EXT"))
+    config.substitutions.append(("%loadbye", ""))
+    config.substitutions.append(("%loadnewpmbye", ""))
 else:
-    config.substitutions.append(('%llvmcheckext', 'CHECK-NOEXT'))
-    config.substitutions.append(('%loadbye',
-                                 '-load={}/Bye{}'.format(config.llvm_shlib_dir,
-                                                         config.llvm_shlib_ext)))
-    config.substitutions.append(('%loadnewpmbye',
-                                 '-load-pass-plugin={}/Bye{}'
-                                 .format(config.llvm_shlib_dir,
-                                         config.llvm_shlib_ext)))
+    config.substitutions.append(("%llvmcheckext", "CHECK-NOEXT"))
+    config.substitutions.append(
+        (
+            "%loadbye",
+            "-load={}/Bye{}".format(config.llvm_shlib_dir, config.llvm_shlib_ext),
+        )
+    )
+    config.substitutions.append(
+        (
+            "%loadnewpmbye",
+            "-load-pass-plugin={}/Bye{}".format(
+                config.llvm_shlib_dir, config.llvm_shlib_ext
+            ),
+        )
+    )
 
 if config.linked_exampleirtransforms_extension:
-    config.substitutions.append(('%loadexampleirtransforms',''))
+    config.substitutions.append(("%loadexampleirtransforms", ""))
 else:
-    config.substitutions.append(('%loadexampleirtransforms',
-                                 '-load-pass-plugin={}/ExampleIRTransforms{}'
-                                 .format(config.llvm_shlib_dir,
-                                 config.llvm_shlib_ext)))
+    config.substitutions.append(
+        (
+            "%loadexampleirtransforms",
+            "-load-pass-plugin={}/ExampleIRTransforms{}".format(
+                config.llvm_shlib_dir, config.llvm_shlib_ext
+            ),
+        )
+    )
 
 # Static libraries are not built if BUILD_SHARED_LIBS is ON.
 if not config.build_shared_libs and not config.link_llvm_dylib:
-    config.available_features.add('static-libs')
+    config.available_features.add("static-libs")
 
 if config.link_llvm_dylib:
-    config.available_features.add('llvm-dylib')
+    config.available_features.add("llvm-dylib")
     config.substitutions.append(
-        ('%llvmdylib',
-         '{}/libLLVM-{}{}'.format(config.llvm_shlib_dir,
-                                  config.llvm_dylib_version,
-                                  config.llvm_shlib_ext)))
+        (
+            "%llvmdylib",
+            "{}/libLLVM-{}{}".format(
+                config.llvm_shlib_dir, config.llvm_dylib_version, config.llvm_shlib_ext
+            ),
+        )
+    )
 
 if config.have_tf_aot:
     config.available_features.add("have_tf_aot")
@@ -324,86 +433,93 @@ def version_int(ver):
 if config.llvm_raevict_model_autogenerated:
     config.available_features.add("llvm_raevict_model_autogenerated")
 
+
 def have_cxx_shared_library():
-    readobj_exe = lit.util.which('llvm-readobj', config.llvm_tools_dir)
+    readobj_exe = lit.util.which("llvm-readobj", config.llvm_tools_dir)
     if not readobj_exe:
-        print('llvm-readobj not found')
+        print("llvm-readobj not found")
         return False
 
     try:
         readobj_cmd = subprocess.Popen(
-            [readobj_exe, '--needed-libs', readobj_exe], stdout=subprocess.PIPE)
+            [readobj_exe, "--needed-libs", readobj_exe], stdout=subprocess.PIPE
+        )
     except OSError:
-        print('could not exec llvm-readobj')
+        print("could not exec llvm-readobj")
         return False
 
-    readobj_out = readobj_cmd.stdout.read().decode('ascii')
+    readobj_out = readobj_cmd.stdout.read().decode("ascii")
     readobj_cmd.wait()
 
-    regex = re.compile(r'(libc\+\+|libstdc\+\+|msvcp).*\.(so|dylib|dll)')
+    regex = re.compile(r"(libc\+\+|libstdc\+\+|msvcp).*\.(so|dylib|dll)")
     needed_libs = False
     for line in readobj_out.splitlines():
-        if 'NeededLibraries [' in line:
+        if "NeededLibraries [" in line:
             needed_libs = True
-        if ']' in line:
+        if "]" in line:
             needed_libs = False
         if needed_libs and regex.search(line.lower()):
             return True
     return False
 
+
 if have_cxx_shared_library():
-    config.available_features.add('cxx-shared-library')
+    config.available_features.add("cxx-shared-library")
 
 if config.libcxx_used:
-    config.available_features.add('libcxx-used')
+    config.available_features.add("libcxx-used")
 
 # LLVM can be configured with an empty default triple
 # Some tests are "generic" and require a valid default triple
 if config.target_triple:
-    config.available_features.add('default_triple')
+    config.available_features.add("default_triple")
     # Direct object generation
     if not config.target_triple.startswith(("nvptx", "xcore")):
-        config.available_features.add('object-emission')
+        config.available_features.add("object-emission")
 
 # Allow checking for specific details in the host triple
 if config.host_triple:
     config.available_features.add('host=%s' % config.host_triple)
 
 if config.have_llvm_driver:
-  config.available_features.add('llvm-driver')
+    config.available_features.add("llvm-driver")
 
 import subprocess
 
 
 def have_ld_plugin_support():
-    if not os.path.exists(os.path.join(config.llvm_shlib_dir, 'LLVMgold' + config.llvm_shlib_ext)):
+    if not os.path.exists(
+        os.path.join(config.llvm_shlib_dir, "LLVMgold" + config.llvm_shlib_ext)
+    ):
         return False
 
     ld_cmd = subprocess.Popen(
-        [config.gold_executable, '--help'], stdout=subprocess.PIPE, env={'LANG': 'C'})
+        [config.gold_executable, "--help"], stdout=subprocess.PIPE, env={"LANG": "C"}
+    )
     ld_out = ld_cmd.stdout.read().decode()
     ld_cmd.wait()
 
-    if not '-plugin' in ld_out:
+    if not "-plugin" in ld_out:
         return False
 
     # check that the used emulations are supported.
-    emu_line = [l for l in ld_out.split('\n') if 'supported emulations' in l]
+    emu_line = [l for l in ld_out.split("\n") if "supported emulations" in l]
     if len(emu_line) != 1:
         return False
     emu_line = emu_line[0]
-    fields = emu_line.split(':')
+    fields = emu_line.split(":")
     if len(fields) != 3:
         return False
     emulations = fields[2].split()
-    if 'elf_x86_64' not in emulations:
+    if "elf_x86_64" not in emulations:
         return False
-    if 'elf32ppc' in emulations:
-        config.available_features.add('ld_emu_elf32ppc')
+    if "elf32ppc" in emulations:
+        config.available_features.add("ld_emu_elf32ppc")
 
     ld_version = subprocess.Popen(
-        [config.gold_executable, '--version'], stdout=subprocess.PIPE, env={'LANG': 'C'})
-    if not 'GNU gold' in ld_version.stdout.read().decode():
+        [config.gold_executable, "--version"], stdout=subprocess.PIPE, env={"LANG": "C"}
+    )
+    if not "GNU gold" in ld_version.stdout.read().decode():
         return False
     ld_version.wait()
 
@@ -411,94 +527,113 @@ def have_ld_plugin_support():
 
 
 if have_ld_plugin_support():
-    config.available_features.add('ld_plugin')
+    config.available_features.add("ld_plugin")
 
 
 def have_ld64_plugin_support():
-    if not os.path.exists(os.path.join(config.llvm_shlib_dir, 'libLTO' + config.llvm_shlib_ext)):
+    if not os.path.exists(
+        os.path.join(config.llvm_shlib_dir, "libLTO" + config.llvm_shlib_ext)
+    ):
         return False
 
-    if config.ld64_executable == '':
+    if config.ld64_executable == "":
         return False
 
-    ld_cmd = subprocess.Popen(
-        [config.ld64_executable, '-v'], stderr=subprocess.PIPE)
+    ld_cmd = subprocess.Popen([config.ld64_executable, "-v"], stderr=subprocess.PIPE)
     ld_out = ld_cmd.stderr.read().decode()
     ld_cmd.wait()
 
-    if 'ld64' not in ld_out or 'LTO' not in ld_out:
+    if "ld64" not in ld_out or "LTO" not in ld_out:
         return False
 
     return True
 
 
 if have_ld64_plugin_support():
-    config.available_features.add('ld64_plugin')
+    config.available_features.add("ld64_plugin")
 
 # Ask llvm-config about asserts
 llvm_config.feature_config(
-    [('--assertion-mode', {'ON': 'asserts'}),
-     ('--build-mode', {'[Dd][Ee][Bb][Uu][Gg]': 'debug'})])
-
-if 'darwin' == sys.platform:
-    cmd = ['sysctl', 'hw.optional.fma']
+    [
+        ("--assertion-mode", {"ON": "asserts"}),
+        ("--build-mode", {"[Dd][Ee][Bb][Uu][Gg]": "debug"}),
+    ]
+)
+
+if "darwin" == sys.platform:
+    cmd = ["sysctl", "hw.optional.fma"]
     sysctl_cmd = subprocess.Popen(cmd, stdout=subprocess.PIPE)
 
     # Non zero return, probably a permission issue
     if sysctl_cmd.wait():
         print(
-          "Warning: sysctl exists but calling \"{}\" failed, defaulting to no fma3.".format(
-          " ".join(cmd)))
+            'Warning: sysctl exists but calling "{}" failed, defaulting to no fma3.'.format(
+                " ".join(cmd)
+            )
+        )
     else:
-        result = sysctl_cmd.stdout.read().decode('ascii')
-        if 'hw.optional.fma: 1' in result:
-            config.available_features.add('fma3')
+        result = sysctl_cmd.stdout.read().decode("ascii")
+        if "hw.optional.fma: 1" in result:
+            config.available_features.add("fma3")
 
 # .debug_frame is not emitted for targeting Windows x64, aarch64/arm64, AIX, or Apple Silicon Mac.
-if not re.match(r'^(x86_64|aarch64|arm64|powerpc|powerpc64).*-(windows-gnu|windows-msvc|aix)', config.target_triple) \
-    and not re.match(r'^arm64(e)?-apple-(macos|darwin)', config.target_triple):
-    config.available_features.add('debug_frame')
+if not re.match(
+    r"^(x86_64|aarch64|arm64|powerpc|powerpc64).*-(windows-gnu|windows-msvc|aix)",
+    config.target_triple,
+) and not re.match(r"^arm64(e)?-apple-(macos|darwin)", config.target_triple):
+    config.available_features.add("debug_frame")
 
 if config.have_libxar:
-    config.available_features.add('xar')
+    config.available_features.add("xar")
 
 if config.enable_threads:
-    config.available_features.add('thread_support')
+    config.available_features.add("thread_support")
 
 if config.have_libxml2:
-    config.available_features.add('libxml2')
+    config.available_features.add("libxml2")
 
 if config.have_curl:
-    config.available_features.add('curl')
+    config.available_features.add("curl")
 
 if config.have_httplib:
-    config.available_features.add('httplib')
+    config.available_features.add("httplib")
 
 if config.have_opt_viewer_modules:
-    config.available_features.add('have_opt_viewer_modules')
+    config.available_features.add("have_opt_viewer_modules")
 
 if config.expensive_checks:
-    config.available_features.add('expensive_checks')
+    config.available_features.add("expensive_checks")
 
 if "MemoryWithOrigins" in config.llvm_use_sanitizer:
-    config.available_features.add('use_msan_with_origins')
+    config.available_features.add("use_msan_with_origins")
+
 
 def exclude_unsupported_files_for_aix(dirname):
-   for filename in os.listdir(dirname):
-       source_path = os.path.join( dirname, filename)
-       if os.path.isdir(source_path):
-           continue
-       f = open(source_path, 'r')
-       try:
-          data = f.read()
-          # 64-bit object files are not supported on AIX, so exclude the tests.
-          if ('-emit-obj' in data or '-filetype=obj' in data) and '64' in config.target_triple:
-            config.excludes += [ filename ]
-       finally:
-          f.close()
-
-if 'aix' in config.target_triple:
-    for directory in ('/CodeGen/X86', '/DebugInfo', '/DebugInfo/X86', '/DebugInfo/Generic', '/LTO/X86', '/Linker'):
+    for filename in os.listdir(dirname):
+        source_path = os.path.join(dirname, filename)
+        if os.path.isdir(source_path):
+            continue
+        f = open(source_path, "r")
+        try:
+            data = f.read()
+            # 64-bit object files are not supported on AIX, so exclude the tests.
+            if (
+                "-emit-obj" in data or "-filetype=obj" in data
+            ) and "64" in config.target_triple:
+                config.excludes += [filename]
+        finally:
+            f.close()
+
+
+if "aix" in config.target_triple:
+    for directory in (
+        "/CodeGen/X86",
+        "/DebugInfo",
+        "/DebugInfo/X86",
+        "/DebugInfo/Generic",
+        "/LTO/X86",
+        "/Linker",
+    ):
         exclude_unsupported_files_for_aix(config.test_source_root + directory)
 
 # Some tools support an environment variable "OBJECT_MODE" on AIX OS, which
@@ -507,5 +642,5 @@ def exclude_unsupported_files_for_aix(dirname):
 # objects only. In order to not affect most test cases, which expect to support
 # 32-bit and 64-bit objects by default, set the environment variable
 # "OBJECT_MODE" to 'any' by default on AIX OS.
-if 'system-aix' in config.available_features:
-    config.environment['OBJECT_MODE'] = 'any'
+if "system-aix" in config.available_features:
+    config.environment["OBJECT_MODE"] = "any"

diff  --git a/llvm/test/tools/llvm-debuginfod-find/Inputs/capture_req.py b/llvm/test/tools/llvm-debuginfod-find/Inputs/capture_req.py
index 56fa2d08a0897..fa823e61f9397 100644
--- a/llvm/test/tools/llvm-debuginfod-find/Inputs/capture_req.py
+++ b/llvm/test/tools/llvm-debuginfod-find/Inputs/capture_req.py
@@ -4,20 +4,22 @@
 import sys
 import threading
 
+
 class TrivialHandler(http.server.BaseHTTPRequestHandler):
-  def do_GET(self):
-    self.send_response(501)
+    def do_GET(self):
+        self.send_response(501)
+
+    def log_request(self, *args, **kwargs):
+        print(self.requestline)
+        print(self.headers)
 
-  def log_request(self, *args, **kwargs):
-    print(self.requestline)
-    print(self.headers)
 
-httpd = http.server.HTTPServer(('', 0),  TrivialHandler)
+httpd = http.server.HTTPServer(("", 0), TrivialHandler)
 port = httpd.socket.getsockname()[1]
 
 try:
-  t = threading.Thread(target=httpd.serve_forever).start()
-  os.environ['DEBUGINFOD_URLS'] =f'http://localhost:{port}'
-  subprocess.run(sys.argv[1:], capture_output = True)
+    t = threading.Thread(target=httpd.serve_forever).start()
+    os.environ["DEBUGINFOD_URLS"] = f"http://localhost:{port}"
+    subprocess.run(sys.argv[1:], capture_output=True)
 finally:
-  httpd.shutdown()
+    httpd.shutdown()

diff  --git a/llvm/test/tools/llvm-libtool-darwin/Inputs/DependencyDump.py b/llvm/test/tools/llvm-libtool-darwin/Inputs/DependencyDump.py
index ddf9873614785..4c9ffb6ef8e28 100755
--- a/llvm/test/tools/llvm-libtool-darwin/Inputs/DependencyDump.py
+++ b/llvm/test/tools/llvm-libtool-darwin/Inputs/DependencyDump.py
@@ -7,17 +7,17 @@
 
 f = open(sys.argv[1], "rb")
 byte = f.read(1)
-while byte != b'':
-    if byte == b'\x00':
+while byte != b"":
+    if byte == b"\x00":
         sys.stdout.write("version: ")
-    elif byte == b'\x10':
+    elif byte == b"\x10":
         sys.stdout.write("input-file: ")
-    elif byte == b'\x11':
+    elif byte == b"\x11":
         sys.stdout.write("not-found: ")
-    elif byte == b'\x40':
+    elif byte == b"\x40":
         sys.stdout.write("output-file: ")
     byte = f.read(1)
-    while byte != b'\x00':
+    while byte != b"\x00":
         sys.stdout.write(byte.decode("ascii"))
         byte = f.read(1)
     sys.stdout.write("\n")

diff  --git a/llvm/test/tools/llvm-objcopy/Inputs/ungzip.py b/llvm/test/tools/llvm-objcopy/Inputs/ungzip.py
index c7b1de96b25c4..7824e9ababb3c 100644
--- a/llvm/test/tools/llvm-objcopy/Inputs/ungzip.py
+++ b/llvm/test/tools/llvm-objcopy/Inputs/ungzip.py
@@ -1,13 +1,14 @@
 import gzip
 import sys
 
-with gzip.open(sys.argv[1], 'rb') as f:
-  writer = getattr(sys.stdout, 'buffer', None)
-  if writer is None:
-    writer = sys.stdout
-    if sys.platform == "win32":
-      import os, msvcrt
-      msvcrt.setmode(sys.stdout.fileno(),os.O_BINARY)
+with gzip.open(sys.argv[1], "rb") as f:
+    writer = getattr(sys.stdout, "buffer", None)
+    if writer is None:
+        writer = sys.stdout
+        if sys.platform == "win32":
+            import os, msvcrt
 
-  writer.write(f.read())
-  sys.stdout.flush()
+            msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+
+    writer.write(f.read())
+    sys.stdout.flush()

diff  --git a/llvm/test/tools/llvm-objcopy/MachO/Inputs/code-signature-check.py b/llvm/test/tools/llvm-objcopy/MachO/Inputs/code-signature-check.py
index 75c01a2174c7e..2efcf80bdd41d 100644
--- a/llvm/test/tools/llvm-objcopy/MachO/Inputs/code-signature-check.py
+++ b/llvm/test/tools/llvm-objcopy/MachO/Inputs/code-signature-check.py
@@ -24,15 +24,25 @@
 import sys
 import typing
 
+
 class CodeDirectoryVersion:
     SUPPORTSSCATTER = 0x20100
     SUPPORTSTEAMID = 0x20200
     SUPPORTSCODELIMIT64 = 0x20300
     SUPPORTSEXECSEG = 0x20400
 
+
 class CodeDirectory:
     @staticmethod
-    def make(buf: memoryview) -> typing.Union['CodeDirectoryBase', 'CodeDirectoryV20100', 'CodeDirectoryV20200', 'CodeDirectoryV20300', 'CodeDirectoryV20400']:
+    def make(
+        buf: memoryview,
+    ) -> typing.Union[
+        "CodeDirectoryBase",
+        "CodeDirectoryV20100",
+        "CodeDirectoryV20200",
+        "CodeDirectoryV20300",
+        "CodeDirectoryV20400",
+    ]:
         _magic, _length, version = struct.unpack_from(">III", buf, 0)
         subtype = {
             CodeDirectoryVersion.SUPPORTSSCATTER: CodeDirectoryV20100,
@@ -43,6 +53,7 @@ def make(buf: memoryview) -> typing.Union['CodeDirectoryBase', 'CodeDirectoryV20
 
         return subtype._make(struct.unpack_from(subtype._format(), buf, 0))
 
+
 class CodeDirectoryBase(typing.NamedTuple):
     magic: int
     length: int
@@ -63,6 +74,7 @@ class CodeDirectoryBase(typing.NamedTuple):
     def _format() -> str:
         return ">IIIIIIIIIBBBBI"
 
+
 class CodeDirectoryV20100(typing.NamedTuple):
     magic: int
     length: int
@@ -85,6 +97,7 @@ class CodeDirectoryV20100(typing.NamedTuple):
     def _format() -> str:
         return CodeDirectoryBase._format() + "I"
 
+
 class CodeDirectoryV20200(typing.NamedTuple):
     magic: int
     length: int
@@ -109,6 +122,7 @@ class CodeDirectoryV20200(typing.NamedTuple):
     def _format() -> str:
         return CodeDirectoryV20100._format() + "I"
 
+
 class CodeDirectoryV20300(typing.NamedTuple):
     magic: int
     length: int
@@ -136,6 +150,7 @@ class CodeDirectoryV20300(typing.NamedTuple):
     def _format() -> str:
         return CodeDirectoryV20200._format() + "IQ"
 
+
 class CodeDirectoryV20400(typing.NamedTuple):
     magic: int
     length: int
@@ -167,13 +182,16 @@ class CodeDirectoryV20400(typing.NamedTuple):
     def _format() -> str:
         return CodeDirectoryV20300._format() + "QQQ"
 
+
 class CodeDirectoryBlobIndex(typing.NamedTuple):
     type_: int
     offset: int
 
     @staticmethod
-    def make(buf: memoryview) -> 'CodeDirectoryBlobIndex':
-        return CodeDirectoryBlobIndex._make(struct.unpack_from(CodeDirectoryBlobIndex.__format(), buf, 0))
+    def make(buf: memoryview) -> "CodeDirectoryBlobIndex":
+        return CodeDirectoryBlobIndex._make(
+            struct.unpack_from(CodeDirectoryBlobIndex.__format(), buf, 0)
+        )
 
     @staticmethod
     def bytesize() -> int:
@@ -183,6 +201,7 @@ def bytesize() -> int:
     def __format() -> str:
         return ">II"
 
+
 class CodeDirectorySuperBlob(typing.NamedTuple):
     magic: int
     length: int
@@ -190,7 +209,7 @@ class CodeDirectorySuperBlob(typing.NamedTuple):
     blob_indices: typing.List[CodeDirectoryBlobIndex]
 
     @staticmethod
-    def make(buf: memoryview) -> 'CodeDirectorySuperBlob':
+    def make(buf: memoryview) -> "CodeDirectorySuperBlob":
         super_blob_layout = ">III"
         super_blob = struct.unpack_from(super_blob_layout, buf, 0)
 
@@ -202,17 +221,25 @@ def make(buf: memoryview) -> 'CodeDirectorySuperBlob':
 
         return CodeDirectorySuperBlob(*super_blob, blob_indices)
 
+
 def unpack_null_terminated_string(buf: memoryview) -> str:
     b = bytes(itertools.takewhile(lambda b: b != 0, buf))
     return b.decode()
 
+
 def main():
     parser = argparse.ArgumentParser()
-    parser.add_argument('binary', type=argparse.FileType('rb'), help='The file to analyze')
-    parser.add_argument('offset', type=int, help='Offset to start of Code Directory data')
-    parser.add_argument('size', type=int, help='Size of Code Directory data')
-    parser.add_argument('code_offset', type=int, help='Offset to start of code pages to hash')
-    parser.add_argument('code_size', type=int, help='Size of the code pages to hash')
+    parser.add_argument(
+        "binary", type=argparse.FileType("rb"), help="The file to analyze"
+    )
+    parser.add_argument(
+        "offset", type=int, help="Offset to start of Code Directory data"
+    )
+    parser.add_argument("size", type=int, help="Size of Code Directory data")
+    parser.add_argument(
+        "code_offset", type=int, help="Offset to start of code pages to hash"
+    )
+    parser.add_argument("code_size", type=int, help="Size of the code pages to hash")
 
     args = parser.parse_args()
 
@@ -229,7 +256,10 @@ def main():
         print(code_directory)
 
         ident_offset = code_directory_offset + code_directory.identOffset
-        print("Code Directory ID: " + unpack_null_terminated_string(super_blob_mem[ident_offset:]))
+        print(
+            "Code Directory ID: "
+            + unpack_null_terminated_string(super_blob_mem[ident_offset:])
+        )
 
         code_offset = args.code_offset
         code_end = code_offset + args.code_size
@@ -238,7 +268,9 @@ def main():
 
         hashes_offset = code_directory_offset + code_directory.hashOffset
         for idx in range(code_directory.nCodeSlots):
-            hash_bytes = bytes(super_blob_mem[hashes_offset:hashes_offset+code_directory.hashSize])
+            hash_bytes = bytes(
+                super_blob_mem[hashes_offset : hashes_offset + code_directory.hashSize]
+            )
             hashes_offset += code_directory.hashSize
 
             hasher = hashlib.sha256()
@@ -253,5 +285,5 @@ def main():
                 sys.exit(-1)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/llvm/test/tools/llvm-reduce/Inputs/llvm-dis-and-filecheck.py b/llvm/test/tools/llvm-reduce/Inputs/llvm-dis-and-filecheck.py
index 672a932515b50..eb0c8f27dc3ed 100755
--- a/llvm/test/tools/llvm-reduce/Inputs/llvm-dis-and-filecheck.py
+++ b/llvm/test/tools/llvm-reduce/Inputs/llvm-dis-and-filecheck.py
@@ -18,7 +18,9 @@
 
 llvm_dis = sys.argv[1]
 filecheck = sys.argv[2]
-filecheck_args = [filecheck, ]
+filecheck_args = [
+    filecheck,
+]
 filecheck_args.extend(sys.argv[3:-1])
 bitcode_file = sys.argv[-1]
 ir_file = bitcode_file + ".ll"
@@ -36,7 +38,7 @@
     print(disassemble.stdout)
     sys.exit(1)
 
-check=None
+check = None
 with open(ir_file, "r") as ir:
     check = subprocess.Popen(filecheck_args, stdin=ir, stdout=sys.stdout)
 check.communicate()

diff  --git a/llvm/test/tools/llvm-reduce/Inputs/remove-args.py b/llvm/test/tools/llvm-reduce/Inputs/remove-args.py
index fea62c3174e06..e003c6a6acecd 100755
--- a/llvm/test/tools/llvm-reduce/Inputs/remove-args.py
+++ b/llvm/test/tools/llvm-reduce/Inputs/remove-args.py
@@ -5,12 +5,12 @@
 
 input = open(sys.argv[1], "r")
 for line in input:
-  if "%interesting" in line:
-    InterestingArgumentPresent = True
-  if "call void @interesting" in line:
-    FunctionCallPresent = True
+    if "%interesting" in line:
+        InterestingArgumentPresent = True
+    if "call void @interesting" in line:
+        FunctionCallPresent = True
 
 if InterestingArgumentPresent and FunctionCallPresent:
-  sys.exit(0) # Interesting!
+    sys.exit(0)  # Interesting!
 
 sys.exit(1)

diff  --git a/llvm/test/tools/llvm-reduce/Inputs/remove-bbs.py b/llvm/test/tools/llvm-reduce/Inputs/remove-bbs.py
index 71f099daaba06..4e5775a9dda97 100755
--- a/llvm/test/tools/llvm-reduce/Inputs/remove-bbs.py
+++ b/llvm/test/tools/llvm-reduce/Inputs/remove-bbs.py
@@ -3,13 +3,13 @@
 InterestingBBs = 0
 input = open(sys.argv[1], "r")
 for line in input:
-  i = line.find(';')
-  if i >= 0:
-    line = line[:i]
-  if line.startswith("interesting") or "%interesting" in line:
-    InterestingBBs += 1
+    i = line.find(";")
+    if i >= 0:
+        line = line[:i]
+    if line.startswith("interesting") or "%interesting" in line:
+        InterestingBBs += 1
 
 if InterestingBBs == 6:
-  sys.exit(0) # interesting!
+    sys.exit(0)  # interesting!
 
-sys.exit(1) # IR isn't interesting
+sys.exit(1)  # IR isn't interesting

diff  --git a/llvm/test/tools/llvm-reduce/Inputs/remove-instructions.py b/llvm/test/tools/llvm-reduce/Inputs/remove-instructions.py
index 9717c73b01aa7..c9396457b4295 100755
--- a/llvm/test/tools/llvm-reduce/Inputs/remove-instructions.py
+++ b/llvm/test/tools/llvm-reduce/Inputs/remove-instructions.py
@@ -4,14 +4,14 @@
 
 input = open(sys.argv[1], "r")
 for line in input:
-  i = line.find(';')
-  if i >= 0:
-    line = line[:i]
-  if "%interesting" in line:
-    InterestingInstructions += 1
-  print(InterestingInstructions)
+    i = line.find(";")
+    if i >= 0:
+        line = line[:i]
+    if "%interesting" in line:
+        InterestingInstructions += 1
+    print(InterestingInstructions)
 
 if InterestingInstructions == 5:
-  sys.exit(0) # interesting!
+    sys.exit(0)  # interesting!
 
 sys.exit(1)

diff  --git a/llvm/test/tools/llvm-reduce/Inputs/sleep-and-check-stores.py b/llvm/test/tools/llvm-reduce/Inputs/sleep-and-check-stores.py
index 8df093e667b2a..c5a6cf7b52157 100755
--- a/llvm/test/tools/llvm-reduce/Inputs/sleep-and-check-stores.py
+++ b/llvm/test/tools/llvm-reduce/Inputs/sleep-and-check-stores.py
@@ -10,8 +10,8 @@
 try:
     input = open(file_input, "r")
 except Exception as err:
-   print(err, file=sys.stderr)
-   sys.exit(1)
+    print(err, file=sys.stderr)
+    sys.exit(1)
 
 InterestingStores = 0
 for line in input:
@@ -23,6 +23,6 @@
 
 
 if InterestingStores > num_stores:
-  sys.exit(0) # interesting!
+    sys.exit(0)  # interesting!
 
-sys.exit(1) # IR isn't interesting
+sys.exit(1)  # IR isn't interesting

diff  --git a/llvm/test/tools/llvm-reduce/remove-bbs-sequence.py b/llvm/test/tools/llvm-reduce/remove-bbs-sequence.py
index f38d501d13700..e211a8c6c7697 100755
--- a/llvm/test/tools/llvm-reduce/remove-bbs-sequence.py
+++ b/llvm/test/tools/llvm-reduce/remove-bbs-sequence.py
@@ -1,15 +1,19 @@
 import subprocess
 import sys
 
-opt = subprocess.run( [ 'opt', '-passes=print<loops>','-disable-output', sys.argv[1]], stdout=subprocess.PIPE, stderr=subprocess.PIPE )
+opt = subprocess.run(
+    ["opt", "-passes=print<loops>", "-disable-output", sys.argv[1]],
+    stdout=subprocess.PIPE,
+    stderr=subprocess.PIPE,
+)
 
 stdout = opt.stdout.decode()
 
-pattern = 'Loop at depth 1 containing'
+pattern = "Loop at depth 1 containing"
 
-if (pattern in opt.stderr.decode()):
-  print('This is interesting!')
-  sys.exit(0)
+if pattern in opt.stderr.decode():
+    print("This is interesting!")
+    sys.exit(0)
 else:
-  print('This is NOT interesting!')
-  sys.exit(1)
+    print("This is NOT interesting!")
+    sys.exit(1)

diff  --git a/llvm/test/tools/llvm-symbolizer/Inputs/flush-output.py b/llvm/test/tools/llvm-symbolizer/Inputs/flush-output.py
index 120d49226fa9a..c8e0b959c7da7 100644
--- a/llvm/test/tools/llvm-symbolizer/Inputs/flush-output.py
+++ b/llvm/test/tools/llvm-symbolizer/Inputs/flush-output.py
@@ -4,21 +4,24 @@
 import sys
 import threading
 
+
 def kill_subprocess(process):
     process.kill()
     os._exit(1)
 
+
 # Pass -f=none and --output-style=GNU to get only one line of output per input.
-cmd = subprocess.Popen([sys.argv[1],
-                        '--obj=' + sys.argv[2],
-                        '-f=none',
-                        '--output-style=GNU'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
+cmd = subprocess.Popen(
+    [sys.argv[1], "--obj=" + sys.argv[2], "-f=none", "--output-style=GNU"],
+    stdout=subprocess.PIPE,
+    stdin=subprocess.PIPE,
+)
 watchdog = threading.Timer(20, kill_subprocess, args=[cmd])
 watchdog.start()
-cmd.stdin.write(b'0\n')
+cmd.stdin.write(b"0\n")
 cmd.stdin.flush()
 print(cmd.stdout.readline())
-cmd.stdin.write(b'bad\n')
+cmd.stdin.write(b"bad\n")
 cmd.stdin.flush()
 print(cmd.stdout.readline())
 watchdog.cancel()

diff  --git a/llvm/tools/llvm-shlib/gen-msvc-exports.py b/llvm/tools/llvm-shlib/gen-msvc-exports.py
index 671faf1152812..5ecb8c2da3476 100644
--- a/llvm/tools/llvm-shlib/gen-msvc-exports.py
+++ b/llvm/tools/llvm-shlib/gen-msvc-exports.py
@@ -1,10 +1,10 @@
-#===- gen-msvc-exports.py - Generate C API export file -------*- python -*--===#
+# ===- gen-msvc-exports.py - Generate C API export file -------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 #
 # Generate an export file from a list of given LIB files. This only exports symbols
 # that start with LLVM, so it only exports the LLVM C API.
@@ -22,7 +22,7 @@
 #
 # You can use the --output flag to set the name of the export file.
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 from tempfile import mkstemp
 from contextlib import contextmanager
 from subprocess import check_call
@@ -33,7 +33,7 @@
 
 _UNDERSCORE_REGEX = {
     False: re.compile(r"^\w+\s+T\s+(LLVM.*)$"),
-    True:  re.compile(r"^\w+\s+T\s+_(LLVM.*)$")
+    True: re.compile(r"^\w+\s+T\s+_(LLVM.*)$"),
 }
 
 
@@ -58,46 +58,54 @@ def gen_llvm_c_export(output, underscore, libs, nm):
     to `output`. If `underscore` is true, symbols will
     be assumed to be prefixed with an underscore.
     """
-    with removing(touch_tempfile(prefix='dumpout', suffix='.txt')) as dumpout:
+    with removing(touch_tempfile(prefix="dumpout", suffix=".txt")) as dumpout:
 
         # Get the right regex.
         p = _UNDERSCORE_REGEX[underscore]
 
-        with open(output, 'w+t') as output_f:
+        with open(output, "w+t") as output_f:
 
             # For each lib get the LLVM* functions it exports.
             for lib in libs:
                 # Call dumpbin.
-                with open(dumpout, 'w+t') as dumpout_f:
-                    check_call([nm, '-g', lib], stdout=dumpout_f)
+                with open(dumpout, "w+t") as dumpout_f:
+                    check_call([nm, "-g", lib], stdout=dumpout_f)
 
                 # Get the matching lines.
                 with open(dumpout) as dumpbin:
                     for line in dumpbin:
                         m = p.match(line)
                         if m is not None:
-                            output_f.write(m.group(1) + '\n')
+                            output_f.write(m.group(1) + "\n")
 
 
 def main():
-    parser = argparse.ArgumentParser('gen-msvc-exports')
+    parser = argparse.ArgumentParser("gen-msvc-exports")
 
     parser.add_argument(
-        '-i', '--libsfile', help='file with list of libs, new line separated',
-        action='store', default=None
+        "-i",
+        "--libsfile",
+        help="file with list of libs, new line separated",
+        action="store",
+        default=None,
     )
     parser.add_argument(
-        '-o', '--output', help='output filename', default='LLVM-C.exports'
+        "-o", "--output", help="output filename", default="LLVM-C.exports"
     )
-    parser.add_argument('-u', '--underscore',
-        help='labels are prefixed with an underscore (use for 32 bit DLLs)',
-        action='store_true'
+    parser.add_argument(
+        "-u",
+        "--underscore",
+        help="labels are prefixed with an underscore (use for 32 bit DLLs)",
+        action="store_true",
     )
     parser.add_argument(
-        '--nm', help='path to the llvm-nm executable', default='llvm-nm'
+        "--nm", help="path to the llvm-nm executable", default="llvm-nm"
     )
     parser.add_argument(
-        'libs', metavar='LIBS', nargs='*', help='list of libraries to generate export from'
+        "libs",
+        metavar="LIBS",
+        nargs="*",
+        help="list of libraries to generate export from",
     )
 
     ns = parser.parse_args()
@@ -112,5 +120,5 @@ def main():
     gen_llvm_c_export(ns.output, ns.underscore, libs, ns.nm)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/llvm/tools/opt-viewer/extract-reproducers.py b/llvm/tools/opt-viewer/extract-reproducers.py
index 1fa3fb9360e15..610439d6f4c94 100644
--- a/llvm/tools/opt-viewer/extract-reproducers.py
+++ b/llvm/tools/opt-viewer/extract-reproducers.py
@@ -1,21 +1,22 @@
 #!/usr/bin/env python
 
-desc = '''
+desc = """
 A script to extract ConstraintElimination's reproducer remarks. The extracted
 modules are written as textual LLVM IR to files named reproducerXXXX.ll in the
 current directory.
-'''
+"""
 
 import optrecord
 import argparse
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     parser = argparse.ArgumentParser(description=desc)
     parser.add_argument(
-        'yaml_dirs_or_files',
-        nargs='+',
-        help='List of optimization record files or directories searched '
-             'for optimization record files.')
+        "yaml_dirs_or_files",
+        nargs="+",
+        help="List of optimization record files or directories searched "
+        "for optimization record files.",
+    )
 
     args = parser.parse_args()
 
@@ -27,13 +28,12 @@
         parser.error("No *.opt.yaml files found")
         sys.exit(1)
 
-    all_remarks, file_remarks, _ = optrecord.gather_results(
-        files, jobs, True)
+    all_remarks, file_remarks, _ = optrecord.gather_results(files, jobs, True)
 
     i = 0
     for r in all_remarks:
-        if r[1] != 'constraint-elimination' or r[2] != 'Reproducer':
+        if r[1] != "constraint-elimination" or r[2] != "Reproducer":
             continue
-        with open('reproducer{}.ll'.format(i), 'wt') as f:
+        with open("reproducer{}.ll".format(i), "wt") as f:
             f.write(r[7][1][0][1])
         i += 1

diff  --git a/llvm/tools/opt-viewer/opt-
diff .py b/llvm/tools/opt-viewer/opt-
diff .py
index 36e81a5d569a7..2763ca4010c8a 100755
--- a/llvm/tools/opt-viewer/opt-
diff .py
+++ b/llvm/tools/opt-viewer/opt-
diff .py
@@ -2,14 +2,15 @@
 
 from __future__ import print_function
 
-desc = '''Generate the 
diff erence of two YAML files into a new YAML file (works on
+desc = """Generate the 
diff erence of two YAML files into a new YAML file (works on
 pair of directories too).  A new attribute 'Added' is set to True or False
 depending whether the entry is added or removed from the first input to the
 next.
 
-The tools requires PyYAML.'''
+The tools requires PyYAML."""
 
 import yaml
+
 # Try to use the C parser.
 try:
     from yaml import CLoader as Loader
@@ -20,35 +21,40 @@
 import argparse
 from collections import defaultdict
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     parser = argparse.ArgumentParser(description=desc)
     parser.add_argument(
-        'yaml_dir_or_file_1',
-        help='An optimization record file or a directory searched for optimization '
-             'record files that are used as the old version for the comparison')
+        "yaml_dir_or_file_1",
+        help="An optimization record file or a directory searched for optimization "
+        "record files that are used as the old version for the comparison",
+    )
     parser.add_argument(
-        'yaml_dir_or_file_2',
-        help='An optimization record file or a directory searched for optimization '
-             'record files that are used as the new version for the comparison')
+        "yaml_dir_or_file_2",
+        help="An optimization record file or a directory searched for optimization "
+        "record files that are used as the new version for the comparison",
+    )
     parser.add_argument(
-        '--jobs',
-        '-j',
+        "--jobs",
+        "-j",
         default=None,
         type=int,
-        help='Max job count (defaults to %(default)s, the current CPU count)')
+        help="Max job count (defaults to %(default)s, the current CPU count)",
+    )
     parser.add_argument(
-        '--max-size',
-        '-m',
+        "--max-size",
+        "-m",
         default=100000,
         type=int,
-        help='Maximum number of remarks stored in an output file')
+        help="Maximum number of remarks stored in an output file",
+    )
     parser.add_argument(
-        '--no-progress-indicator',
-        '-n',
-        action='store_true',
+        "--no-progress-indicator",
+        "-n",
+        action="store_true",
         default=False,
-        help='Do not display any indicator of how many YAML files were read.')
-    parser.add_argument('--output', '-o', default='
diff {}.opt.yaml')
+        help="Do not display any indicator of how many YAML files were read.",
+    )
+    parser.add_argument("--output", "-o", default="
diff {}.opt.yaml")
     args = parser.parse_args()
 
     files1 = optrecord.find_opt_files(args.yaml_dir_or_file_1)
@@ -71,5 +77,5 @@
         r.recover_yaml_structure()
 
     for i in range(0, len(result), args.max_size):
-        with open(args.output.format(i / args.max_size), 'w') as stream:
-            yaml.dump_all(result[i:i + args.max_size], stream)
+        with open(args.output.format(i / args.max_size), "w") as stream:
+            yaml.dump_all(result[i : i + args.max_size], stream)

diff  --git a/llvm/tools/opt-viewer/opt-stats.py b/llvm/tools/opt-viewer/opt-stats.py
index f4ee3a7d44e63..716143b31a890 100755
--- a/llvm/tools/opt-viewer/opt-stats.py
+++ b/llvm/tools/opt-viewer/opt-stats.py
@@ -2,10 +2,10 @@
 
 from __future__ import print_function
 
-desc = '''Generate statistics about optimization records from the YAML files
+desc = """Generate statistics about optimization records from the YAML files
 generated with -fsave-optimization-record and -fdiagnostics-show-hotness.
 
-The tools requires PyYAML and Pygments Python packages.'''
+The tools requires PyYAML and Pygments Python packages."""
 
 import optrecord
 import argparse
@@ -15,30 +15,34 @@
 
 try:
     from guppy import hpy
+
     hp = hpy()
 except ImportError:
     print("Memory consumption not shown because guppy is not installed")
     hp = None
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     parser = argparse.ArgumentParser(description=desc)
     parser.add_argument(
-        'yaml_dirs_or_files',
-        nargs='+',
-        help='List of optimization record files or directories searched '
-             'for optimization record files.')
+        "yaml_dirs_or_files",
+        nargs="+",
+        help="List of optimization record files or directories searched "
+        "for optimization record files.",
+    )
     parser.add_argument(
-        '--jobs',
-        '-j',
+        "--jobs",
+        "-j",
         default=None,
         type=int,
-        help='Max job count (defaults to %(default)s, the current CPU count)')
+        help="Max job count (defaults to %(default)s, the current CPU count)",
+    )
     parser.add_argument(
-        '--no-progress-indicator',
-        '-n',
-        action='store_true',
+        "--no-progress-indicator",
+        "-n",
+        action="store_true",
         default=False,
-        help='Do not display any indicator of how many YAML files were read.')
+        help="Do not display any indicator of how many YAML files were read.",
+    )
     args = parser.parse_args()
 
     print_progress = not args.no_progress_indicator
@@ -49,9 +53,10 @@
         sys.exit(1)
 
     all_remarks, file_remarks, _ = optrecord.gather_results(
-        files, args.jobs, print_progress)
+        files, args.jobs, print_progress
+    )
     if print_progress:
-        print('\n')
+        print("\n")
 
     bypass = defaultdict(int)
     byname = defaultdict(int)
@@ -63,16 +68,17 @@
     print("{:24s} {:10d}".format("Total number of remarks", total))
     if hp:
         h = hp.heap()
-        print("{:24s} {:10d}".format("Memory per remark",
-                                     h.size / len(all_remarks)))
-    print('\n')
+        print("{:24s} {:10d}".format("Memory per remark", h.size / len(all_remarks)))
+    print("\n")
 
     print("Top 10 remarks by pass:")
-    for (passname, count) in sorted(bypass.items(), key=operator.itemgetter(1),
-                                    reverse=True)[:10]:
-        print("  {:30s} {:2.0f}%". format(passname, count * 100. / total))
+    for (passname, count) in sorted(
+        bypass.items(), key=operator.itemgetter(1), reverse=True
+    )[:10]:
+        print("  {:30s} {:2.0f}%".format(passname, count * 100.0 / total))
 
     print("\nTop 10 remarks:")
-    for (name, count) in sorted(byname.items(), key=operator.itemgetter(1),
-                                reverse=True)[:10]:
-        print("  {:30s} {:2.0f}%". format(name, count * 100. / total))
+    for (name, count) in sorted(
+        byname.items(), key=operator.itemgetter(1), reverse=True
+    )[:10]:
+        print("  {:30s} {:2.0f}%".format(name, count * 100.0 / total))

diff  --git a/llvm/tools/opt-viewer/opt-viewer.py b/llvm/tools/opt-viewer/opt-viewer.py
index 714fccc8df967..c9c7db726f765 100755
--- a/llvm/tools/opt-viewer/opt-viewer.py
+++ b/llvm/tools/opt-viewer/opt-viewer.py
@@ -21,27 +21,32 @@
 import optrecord
 
 
-desc = '''Generate HTML output to visualize optimization records from the YAML files
+desc = """Generate HTML output to visualize optimization records from the YAML files
 generated with -fsave-optimization-record and -fdiagnostics-show-hotness.
 
-The tools requires PyYAML and Pygments Python packages.'''
+The tools requires PyYAML and Pygments Python packages."""
 
 
 # This allows passing the global context to the child processes.
 class Context:
-    def __init__(self, caller_loc = dict()):
-       # Map function names to their source location for function where inlining happened
-       self.caller_loc = caller_loc
+    def __init__(self, caller_loc=dict()):
+        # Map function names to their source location for function where inlining happened
+        self.caller_loc = caller_loc
+
 
 context = Context()
 
+
 def suppress(remark):
-    if remark.Name == 'sil.Specialized':
-        return remark.getArgDict()['Function'][0].startswith('\"Swift.')
-    elif remark.Name == 'sil.Inlined':
-        return remark.getArgDict()['Callee'][0].startswith(('\"Swift.', '\"specialized Swift.'))
+    if remark.Name == "sil.Specialized":
+        return remark.getArgDict()["Function"][0].startswith('"Swift.')
+    elif remark.Name == "sil.Inlined":
+        return remark.getArgDict()["Callee"][0].startswith(
+            ('"Swift.', '"specialized Swift.')
+        )
     return False
 
+
 class SourceFileRenderer:
     def __init__(self, source_dir, output_dir, filename, no_highlight):
         self.filename = filename
@@ -54,18 +59,27 @@ def __init__(self, source_dir, output_dir, filename, no_highlight):
                 existing_filename = fn
 
         self.no_highlight = no_highlight
-        self.stream = io.open(os.path.join(output_dir, optrecord.html_file_name(filename)), 'w', encoding='utf-8')
+        self.stream = io.open(
+            os.path.join(output_dir, optrecord.html_file_name(filename)),
+            "w",
+            encoding="utf-8",
+        )
         if existing_filename:
-            self.source_stream = io.open(existing_filename, encoding='utf-8')
+            self.source_stream = io.open(existing_filename, encoding="utf-8")
         else:
             self.source_stream = None
-            print(u'''
+            print(
+                """
 <html>
 <h1>Unable to locate file {}</h1>
 </html>
-            '''.format(filename), file=self.stream)
+            """.format(
+                    filename
+                ),
+                file=self.stream,
+            )
 
-        self.html_formatter = HtmlFormatter(encoding='utf-8')
+        self.html_formatter = HtmlFormatter(encoding="utf-8")
         self.cpp_lexer = CppLexer(stripnl=False)
 
     def render_source_lines(self, stream, line_remarks):
@@ -74,31 +88,35 @@ def render_source_lines(self, stream, line_remarks):
         if self.no_highlight:
             html_highlighted = file_text
         else:
-            html_highlighted = highlight(
-            file_text,
-                self.cpp_lexer,
-                self.html_formatter)
+            html_highlighted = highlight(file_text, self.cpp_lexer, self.html_formatter)
 
             # Note that the API is 
diff erent between Python 2 and 3.  On
             # Python 3, pygments.highlight() returns a bytes object, so we
             # have to decode.  On Python 2, the output is str but since we
             # support unicode characters and the output streams is unicode we
             # decode too.
-            html_highlighted = html_highlighted.decode('utf-8')
+            html_highlighted = html_highlighted.decode("utf-8")
 
             # Take off the header and footer, these must be
             #   reapplied line-wise, within the page structure
-            html_highlighted = html_highlighted.replace('<div class="highlight"><pre>', '')
-            html_highlighted = html_highlighted.replace('</pre></div>', '')
-
-        for (linenum, html_line) in enumerate(html_highlighted.split('\n'), start=1):
-            print(u'''
+            html_highlighted = html_highlighted.replace(
+                '<div class="highlight"><pre>', ""
+            )
+            html_highlighted = html_highlighted.replace("</pre></div>", "")
+
+        for (linenum, html_line) in enumerate(html_highlighted.split("\n"), start=1):
+            print(
+                """
 <tr>
 <td><a name=\"L{linenum}\">{linenum}</a></td>
 <td></td>
 <td></td>
 <td><div class="highlight"><pre>{html_line}</pre></div></td>
-</tr>'''.format(**locals()), file=self.stream)
+</tr>""".format(
+                    **locals()
+                ),
+                file=self.stream,
+            )
 
             for remark in line_remarks.get(linenum, []):
                 if not suppress(remark):
@@ -109,42 +127,52 @@ def render_inline_remarks(self, r, line):
         dl = context.caller_loc.get(r.Function)
         if dl:
             dl_dict = dict(list(dl))
-            link = optrecord.make_link(dl_dict['File'], dl_dict['Line'] - 2)
-            inlining_context = "<a href={link}>{r.DemangledFunctionName}</a>".format(**locals())
+            link = optrecord.make_link(dl_dict["File"], dl_dict["Line"] - 2)
+            inlining_context = "<a href={link}>{r.DemangledFunctionName}</a>".format(
+                **locals()
+            )
 
         # Column is the number of characters *including* tabs, keep those and
         # replace everything else with spaces.
-        indent = line[:max(r.Column, 1) - 1]
-        indent = re.sub('\S', ' ', indent)
+        indent = line[: max(r.Column, 1) - 1]
+        indent = re.sub("\S", " ", indent)
 
         # Create expanded message and link if we have a multiline message.
-        lines = r.message.split('\n')
+        lines = r.message.split("\n")
         if len(lines) > 1:
             expand_link = '<a style="text-decoration: none;" href="" onclick="toggleExpandedMessage(this); return false;">+</a>'
             message = lines[0]
-            expand_message = u'''
+            expand_message = """
 <div class="full-info" style="display:none;">
   <div class="col-left"><pre style="display:inline">{}</pre></div>
   <div class="expanded col-left"><pre>{}</pre></div>
-</div>'''.format(indent, '\n'.join(lines[1:]))
+</div>""".format(
+                indent, "\n".join(lines[1:])
+            )
         else:
-            expand_link = ''
-            expand_message = ''
+            expand_link = ""
+            expand_message = ""
             message = r.message
-        print(u'''
+        print(
+            """
 <tr>
 <td></td>
 <td>{r.RelativeHotness}</td>
 <td class=\"column-entry-{r.color}\">{r.PassWithDiffPrefix}</td>
 <td><pre style="display:inline">{indent}</pre><span class=\"column-entry-yellow\">{expand_link} {message} </span>{expand_message}</td>
 <td class=\"column-entry-yellow\">{inlining_context}</td>
-</tr>'''.format(**locals()), file=self.stream)
+</tr>""".format(
+                **locals()
+            ),
+            file=self.stream,
+        )
 
     def render(self, line_remarks):
         if not self.source_stream:
             return
 
-        print(u'''
+        print(
+            """
 <html>
 <title>{}</title>
 <meta charset="utf-8" />
@@ -180,34 +208,51 @@ def render(self, line_remarks):
 <th style="width: 15%">Inline Context</td>
 </tr>
 </thead>
-<tbody>'''.format(os.path.basename(self.filename)), file=self.stream)
+<tbody>""".format(
+                os.path.basename(self.filename)
+            ),
+            file=self.stream,
+        )
         self.render_source_lines(self.source_stream, line_remarks)
 
-        print(u'''
+        print(
+            """
 </tbody>
 </table>
 </body>
-</html>''', file=self.stream)
+</html>""",
+            file=self.stream,
+        )
 
 
 class IndexRenderer:
-    def __init__(self, output_dir, should_display_hotness, max_hottest_remarks_on_index):
-        self.stream = io.open(os.path.join(output_dir, 'index.html'), 'w', encoding='utf-8')
+    def __init__(
+        self, output_dir, should_display_hotness, max_hottest_remarks_on_index
+    ):
+        self.stream = io.open(
+            os.path.join(output_dir, "index.html"), "w", encoding="utf-8"
+        )
         self.should_display_hotness = should_display_hotness
         self.max_hottest_remarks_on_index = max_hottest_remarks_on_index
 
     def render_entry(self, r, odd):
         escaped_name = html.escape(r.DemangledFunctionName)
-        print(u'''
+        print(
+            """
 <tr>
 <td class=\"column-entry-{odd}\"><a href={r.Link}>{r.DebugLocString}</a></td>
 <td class=\"column-entry-{odd}\">{r.RelativeHotness}</td>
 <td class=\"column-entry-{odd}\">{escaped_name}</td>
 <td class=\"column-entry-{r.color}\">{r.PassWithDiffPrefix}</td>
-</tr>'''.format(**locals()), file=self.stream)
+</tr>""".format(
+                **locals()
+            ),
+            file=self.stream,
+        )
 
     def render(self, all_remarks):
-        print(u'''
+        print(
+            """
 <html>
 <meta charset="utf-8" />
 <head>
@@ -221,7 +266,9 @@ def render(self, all_remarks):
 <td>Hotness</td>
 <td>Function</td>
 <td>Pass</td>
-</tr>''', file=self.stream)
+</tr>""",
+            file=self.stream,
+        )
 
         max_entries = None
         if self.should_display_hotness:
@@ -230,10 +277,13 @@ def render(self, all_remarks):
         for i, remark in enumerate(all_remarks[:max_entries]):
             if not suppress(remark):
                 self.render_entry(remark, i % 2)
-        print(u'''
+        print(
+            """
 </table>
 </body>
-</html>''', file=self.stream)
+</html>""",
+            file=self.stream,
+        )
 
 
 def _render_file(source_dir, output_dir, ctx, no_highlight, entry, filter_):
@@ -247,26 +297,32 @@ def map_remarks(all_remarks):
     # Set up a map between function names and their source location for
     # function where inlining happened
     for remark in optrecord.itervalues(all_remarks):
-        if isinstance(remark, optrecord.Passed) and remark.Pass == "inline" and remark.Name == "Inlined":
+        if (
+            isinstance(remark, optrecord.Passed)
+            and remark.Pass == "inline"
+            and remark.Name == "Inlined"
+        ):
             for arg in remark.Args:
                 arg_dict = dict(list(arg))
-                caller = arg_dict.get('Caller')
+                caller = arg_dict.get("Caller")
                 if caller:
                     try:
-                        context.caller_loc[caller] = arg_dict['DebugLoc']
+                        context.caller_loc[caller] = arg_dict["DebugLoc"]
                     except KeyError:
                         pass
 
 
-def generate_report(all_remarks,
-                    file_remarks,
-                    source_dir,
-                    output_dir,
-                    no_highlight,
-                    should_display_hotness,
-                    max_hottest_remarks_on_index,
-                    num_jobs,
-                    should_print_progress):
+def generate_report(
+    all_remarks,
+    file_remarks,
+    source_dir,
+    output_dir,
+    no_highlight,
+    should_display_hotness,
+    max_hottest_remarks_on_index,
+    num_jobs,
+    should_print_progress,
+):
     try:
         os.makedirs(output_dir)
     except OSError as e:
@@ -276,75 +332,107 @@ def generate_report(all_remarks,
             raise
 
     if should_print_progress:
-        print('Rendering index page...')
+        print("Rendering index page...")
     if should_display_hotness:
-        sorted_remarks = sorted(optrecord.itervalues(all_remarks), key=lambda r: (r.Hotness, r.File, r.Line, r.Column, r.PassWithDiffPrefix, r.yaml_tag, r.Function), reverse=True)
+        sorted_remarks = sorted(
+            optrecord.itervalues(all_remarks),
+            key=lambda r: (
+                r.Hotness,
+                r.File,
+                r.Line,
+                r.Column,
+                r.PassWithDiffPrefix,
+                r.yaml_tag,
+                r.Function,
+            ),
+            reverse=True,
+        )
     else:
-        sorted_remarks = sorted(optrecord.itervalues(all_remarks), key=lambda r: (r.File, r.Line, r.Column, r.PassWithDiffPrefix, r.yaml_tag, r.Function))
-    IndexRenderer(output_dir, should_display_hotness, max_hottest_remarks_on_index).render(sorted_remarks)
-
-    shutil.copy(os.path.join(os.path.dirname(os.path.realpath(__file__)),
-            "style.css"), output_dir)
-
-    _render_file_bound = functools.partial(_render_file, source_dir, output_dir, context, no_highlight)
+        sorted_remarks = sorted(
+            optrecord.itervalues(all_remarks),
+            key=lambda r: (
+                r.File,
+                r.Line,
+                r.Column,
+                r.PassWithDiffPrefix,
+                r.yaml_tag,
+                r.Function,
+            ),
+        )
+    IndexRenderer(
+        output_dir, should_display_hotness, max_hottest_remarks_on_index
+    ).render(sorted_remarks)
+
+    shutil.copy(
+        os.path.join(os.path.dirname(os.path.realpath(__file__)), "style.css"),
+        output_dir,
+    )
+
+    _render_file_bound = functools.partial(
+        _render_file, source_dir, output_dir, context, no_highlight
+    )
     if should_print_progress:
-        print('Rendering HTML files...')
-    optpmap.pmap(_render_file_bound,
-                 file_remarks.items(),
-                 num_jobs,
-                 should_print_progress)
+        print("Rendering HTML files...")
+    optpmap.pmap(
+        _render_file_bound, file_remarks.items(), num_jobs, should_print_progress
+    )
 
 
 def main():
     parser = argparse.ArgumentParser(description=desc)
     parser.add_argument(
-        'yaml_dirs_or_files',
-        nargs='+',
-        help='List of optimization record files or directories searched '
-             'for optimization record files.')
+        "yaml_dirs_or_files",
+        nargs="+",
+        help="List of optimization record files or directories searched "
+        "for optimization record files.",
+    )
     parser.add_argument(
-        '--output-dir',
-        '-o',
-        default='html',
-        help='Path to a directory where generated HTML files will be output. '
-             'If the directory does not already exist, it will be created. '
-             '"%(default)s" by default.')
+        "--output-dir",
+        "-o",
+        default="html",
+        help="Path to a directory where generated HTML files will be output. "
+        "If the directory does not already exist, it will be created. "
+        '"%(default)s" by default.',
+    )
     parser.add_argument(
-        '--jobs',
-        '-j',
+        "--jobs",
+        "-j",
         default=None,
         type=int,
-        help='Max job count (defaults to %(default)s, the current CPU count)')
-    parser.add_argument(
-        '--source-dir',
-        '-s',
-        default='',
-        help='set source directory')
+        help="Max job count (defaults to %(default)s, the current CPU count)",
+    )
+    parser.add_argument("--source-dir", "-s", default="", help="set source directory")
     parser.add_argument(
-        '--no-progress-indicator',
-        '-n',
-        action='store_true',
+        "--no-progress-indicator",
+        "-n",
+        action="store_true",
         default=False,
-        help='Do not display any indicator of how many YAML files were read '
-             'or rendered into HTML.')
+        help="Do not display any indicator of how many YAML files were read "
+        "or rendered into HTML.",
+    )
     parser.add_argument(
-        '--max-hottest-remarks-on-index',
+        "--max-hottest-remarks-on-index",
         default=1000,
         type=int,
-        help='Maximum number of the hottest remarks to appear on the index page')
+        help="Maximum number of the hottest remarks to appear on the index page",
+    )
     parser.add_argument(
-        '--no-highlight',
-        action='store_true',
+        "--no-highlight",
+        action="store_true",
         default=False,
-        help='Do not use a syntax highlighter when rendering the source code')
+        help="Do not use a syntax highlighter when rendering the source code",
+    )
     parser.add_argument(
-        '--demangler',
-        help='Set the demangler to be used (defaults to %s)' % optrecord.Remark.default_demangler)
+        "--demangler",
+        help="Set the demangler to be used (defaults to %s)"
+        % optrecord.Remark.default_demangler,
+    )
 
     parser.add_argument(
-        '--filter',
-        default='',
-        help='Only display remarks from passes matching filter expression')
+        "--filter",
+        default="",
+        help="Only display remarks from passes matching filter expression",
+    )
 
     # Do not make this a global variable.  Values needed to be propagated through
     # to individual classes and functions to be portable with multiprocessing across
@@ -360,20 +448,24 @@ def main():
         parser.error("No *.opt.yaml files found")
         sys.exit(1)
 
-    all_remarks, file_remarks, should_display_hotness = \
-        optrecord.gather_results(files, args.jobs, print_progress, args.filter)
+    all_remarks, file_remarks, should_display_hotness = optrecord.gather_results(
+        files, args.jobs, print_progress, args.filter
+    )
 
     map_remarks(all_remarks)
 
-    generate_report(all_remarks,
-                    file_remarks,
-                    args.source_dir,
-                    args.output_dir,
-                    args.no_highlight,
-                    should_display_hotness,
-                    args.max_hottest_remarks_on_index,
-                    args.jobs,
-                    print_progress)
-
-if __name__ == '__main__':
+    generate_report(
+        all_remarks,
+        file_remarks,
+        args.source_dir,
+        args.output_dir,
+        args.no_highlight,
+        should_display_hotness,
+        args.max_hottest_remarks_on_index,
+        args.jobs,
+        print_progress,
+    )
+
+
+if __name__ == "__main__":
     main()

diff  --git a/llvm/tools/opt-viewer/optpmap.py b/llvm/tools/opt-viewer/optpmap.py
index 8124c8c9036eb..28c608dbfaeee 100644
--- a/llvm/tools/opt-viewer/optpmap.py
+++ b/llvm/tools/opt-viewer/optpmap.py
@@ -19,13 +19,15 @@ def _wrapped_func(func_and_args):
     if should_print_progress:
         with _current.get_lock():
             _current.value += 1
-        sys.stdout.write('\r\t{} of {}'.format(_current.value, _total.value))
+        sys.stdout.write("\r\t{} of {}".format(_current.value, _total.value))
         sys.stdout.flush()
 
     return func(argument, filter_)
 
 
-def pmap(func, iterable, processes, should_print_progress, filter_=None, *args, **kwargs):
+def pmap(
+    func, iterable, processes, should_print_progress, filter_=None, *args, **kwargs
+):
     """
     A parallel map function that reports on its progress.
 
@@ -37,20 +39,25 @@ def pmap(func, iterable, processes, should_print_progress, filter_=None, *args,
     """
     global _current
     global _total
-    _current = multiprocessing.Value('i', 0)
-    _total = multiprocessing.Value('i', len(iterable))
+    _current = multiprocessing.Value("i", 0)
+    _total = multiprocessing.Value("i", len(iterable))
 
     func_and_args = [(func, arg, should_print_progress, filter_) for arg in iterable]
     if processes == 1:
         result = list(map(_wrapped_func, func_and_args, *args, **kwargs))
     else:
-        pool = multiprocessing.Pool(initializer=_init,
-                                    initargs=(_current, _total,),
-                                    processes=processes)
+        pool = multiprocessing.Pool(
+            initializer=_init,
+            initargs=(
+                _current,
+                _total,
+            ),
+            processes=processes,
+        )
         result = pool.map(_wrapped_func, func_and_args, *args, **kwargs)
         pool.close()
         pool.join()
 
     if should_print_progress:
-        sys.stdout.write('\r')
+        sys.stdout.write("\r")
     return result

diff  --git a/llvm/tools/opt-viewer/optrecord.py b/llvm/tools/opt-viewer/optrecord.py
index 6a53e13f4c2b8..9e2fc7cb553b5 100644
--- a/llvm/tools/opt-viewer/optrecord.py
+++ b/llvm/tools/opt-viewer/optrecord.py
@@ -4,6 +4,7 @@
 
 import io
 import yaml
+
 # Try to use the C parser.
 try:
     from yaml import CLoader as Loader
@@ -18,6 +19,7 @@
 from multiprocessing import Lock
 import os, os.path
 import subprocess
+
 try:
     # The previously builtin function `intern()` was moved
     # to the `sys` module in Python 3.
@@ -35,42 +37,47 @@
     # Python 3
     def itervalues(d):
         return iter(d.values())
+
     def iteritems(d):
         return iter(d.items())
+
 else:
     # Python 2
     def itervalues(d):
         return d.itervalues()
+
     def iteritems(d):
         return d.iteritems()
 
 
 def html_file_name(filename):
-    return filename.replace('/', '_').replace('#', '_') + ".html"
+    return filename.replace("/", "_").replace("#", "_") + ".html"
 
 
 def make_link(File, Line):
-    return "\"{}#L{}\"".format(html_file_name(File), Line)
+    return '"{}#L{}"'.format(html_file_name(File), Line)
 
 
 class Remark(yaml.YAMLObject):
     # Work-around for http://pyyaml.org/ticket/154.
     yaml_loader = Loader
 
-    default_demangler = 'c++filt -n'
+    default_demangler = "c++filt -n"
     demangler_proc = None
 
     @classmethod
     def set_demangler(cls, demangler):
-        cls.demangler_proc = subprocess.Popen(demangler.split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+        cls.demangler_proc = subprocess.Popen(
+            demangler.split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE
+        )
         cls.demangler_lock = Lock()
 
     @classmethod
     def demangle(cls, name):
         with cls.demangler_lock:
-            cls.demangler_proc.stdin.write((name + '\n').encode('utf-8'))
+            cls.demangler_proc.stdin.write((name + "\n").encode("utf-8"))
             cls.demangler_proc.stdin.flush()
-            return cls.demangler_proc.stdout.readline().rstrip().decode('utf-8')
+            return cls.demangler_proc.stdout.readline().rstrip().decode("utf-8")
 
     # Intern all strings since we have lot of duplication across filenames,
     # remark text.
@@ -119,23 +126,23 @@ def tuple_to_dict(t):
         self.Args = [tuple_to_dict(arg_tuple) for arg_tuple in self.Args]
 
     def canonicalize(self):
-        if not hasattr(self, 'Hotness'):
+        if not hasattr(self, "Hotness"):
             self.Hotness = 0
-        if not hasattr(self, 'Args'):
+        if not hasattr(self, "Args"):
             self.Args = []
         self._reduce_memory()
 
     @property
     def File(self):
-        return self.DebugLoc['File']
+        return self.DebugLoc["File"]
 
     @property
     def Line(self):
-        return int(self.DebugLoc['Line'])
+        return int(self.DebugLoc["Line"])
 
     @property
     def Column(self):
-        return self.DebugLoc['Column']
+        return self.DebugLoc["Column"]
 
     @property
     def DebugLocString(self):
@@ -151,20 +158,21 @@ def Link(self):
 
     def getArgString(self, mapping):
         mapping = dict(list(mapping))
-        dl = mapping.get('DebugLoc')
+        dl = mapping.get("DebugLoc")
         if dl:
-            del mapping['DebugLoc']
+            del mapping["DebugLoc"]
 
-        assert(len(mapping) == 1)
+        assert len(mapping) == 1
         (key, value) = list(mapping.items())[0]
 
-        if key == 'Caller' or key == 'Callee' or key == 'DirectCallee':
+        if key == "Caller" or key == "Callee" or key == "DirectCallee":
             value = html.escape(self.demangle(value))
 
-        if dl and key != 'Caller':
+        if dl and key != "Caller":
             dl_dict = dict(list(dl))
-            return u"<a href={}>{}</a>".format(
-                make_link(dl_dict['File'], dl_dict['Line']), value)
+            return "<a href={}>{}</a>".format(
+                make_link(dl_dict["File"], dl_dict["Line"]), value
+            )
         else:
             return value
 
@@ -173,15 +181,15 @@ def getArgString(self, mapping):
     # list containing the value (e.g. for 'Callee' the function) and
     # optionally a DebugLoc.
     def getArgDict(self):
-        if hasattr(self, 'ArgDict'):
+        if hasattr(self, "ArgDict"):
             return self.ArgDict
         self.ArgDict = {}
         for arg in self.Args:
             if len(arg) == 2:
-                if arg[0][0] == 'DebugLoc':
+                if arg[0][0] == "DebugLoc":
                     dbgidx = 0
                 else:
-                    assert(arg[1][0] == 'DebugLoc')
+                    assert arg[1][0] == "DebugLoc"
                     dbgidx = 1
 
                 key = arg[1 - dbgidx][0]
@@ -189,18 +197,18 @@ def getArgDict(self):
             else:
                 arg = arg[0]
                 key = arg[0]
-                entry = (arg[1], )
+                entry = (arg[1],)
 
             self.ArgDict[key] = entry
         return self.ArgDict
 
     def getDiffPrefix(self):
-        if hasattr(self, 'Added'):
+        if hasattr(self, "Added"):
             if self.Added:
-                return '+'
+                return "+"
             else:
-                return '-'
-        return ''
+                return "-"
+        return ""
 
     @property
     def PassWithDiffPrefix(self):
@@ -215,14 +223,22 @@ def message(self):
     @property
     def RelativeHotness(self):
         if self.max_hotness:
-            return "{0:.2f}%".format(self.Hotness * 100. / self.max_hotness)
+            return "{0:.2f}%".format(self.Hotness * 100.0 / self.max_hotness)
         else:
-            return ''
+            return ""
 
     @property
     def key(self):
-        return (self.__class__, self.PassWithDiffPrefix, self.Name, self.File,
-                self.Line, self.Column, self.Function, self.Args)
+        return (
+            self.__class__,
+            self.PassWithDiffPrefix,
+            self.Name,
+            self.File,
+            self.Line,
+            self.Column,
+            self.Function,
+            self.Args,
+        )
 
     def __hash__(self):
         return hash(self.key)
@@ -235,7 +251,7 @@ def __repr__(self):
 
 
 class Analysis(Remark):
-    yaml_tag = '!Analysis'
+    yaml_tag = "!Analysis"
 
     @property
     def color(self):
@@ -243,15 +259,15 @@ def color(self):
 
 
 class AnalysisFPCommute(Analysis):
-    yaml_tag = '!AnalysisFPCommute'
+    yaml_tag = "!AnalysisFPCommute"
 
 
 class AnalysisAliasing(Analysis):
-    yaml_tag = '!AnalysisAliasing'
+    yaml_tag = "!AnalysisAliasing"
 
 
 class Passed(Remark):
-    yaml_tag = '!Passed'
+    yaml_tag = "!Passed"
 
     @property
     def color(self):
@@ -259,21 +275,23 @@ def color(self):
 
 
 class Missed(Remark):
-    yaml_tag = '!Missed'
+    yaml_tag = "!Missed"
 
     @property
     def color(self):
         return "red"
 
+
 class Failure(Missed):
-    yaml_tag = '!Failure'
+    yaml_tag = "!Failure"
+
 
 def get_remarks(input_file, filter_=None):
     max_hotness = 0
     all_remarks = dict()
     file_remarks = defaultdict(functools.partial(defaultdict, list))
 
-    with io.open(input_file, encoding = 'utf-8') as f:
+    with io.open(input_file, encoding="utf-8") as f:
         docs = yaml.load_all(f, Loader=Loader)
 
         filter_e = None
@@ -282,7 +300,7 @@ def get_remarks(input_file, filter_=None):
         for remark in docs:
             remark.canonicalize()
             # Avoid remarks withoug debug location or if they are duplicated
-            if not hasattr(remark, 'DebugLoc') or remark.key in all_remarks:
+            if not hasattr(remark, "DebugLoc") or remark.key in all_remarks:
                 continue
 
             if filter_e and not filter_e.search(remark.Pass):
@@ -295,7 +313,7 @@ def get_remarks(input_file, filter_=None):
             # If we're reading a back a 
diff  yaml file, max_hotness is already
             # captured which may actually be less than the max hotness found
             # in the file.
-            if hasattr(remark, 'max_hotness'):
+            if hasattr(remark, "max_hotness"):
                 max_hotness = remark.max_hotness
             max_hotness = max(max_hotness, remark.Hotness)
 
@@ -304,11 +322,12 @@ def get_remarks(input_file, filter_=None):
 
 def gather_results(filenames, num_jobs, should_print_progress, filter_=None):
     if should_print_progress:
-        print('Reading YAML files...')
+        print("Reading YAML files...")
     if not Remark.demangler_proc:
         Remark.set_demangler(Remark.default_demangler)
     remarks = optpmap.pmap(
-        get_remarks, filenames, num_jobs, should_print_progress, filter_)
+        get_remarks, filenames, num_jobs, should_print_progress, filter_
+    )
     max_hotness = max(entry[0] for entry in remarks)
 
     def merge_file_remarks(file_remarks_job, all_remarks, merged):
@@ -338,8 +357,9 @@ def find_opt_files(*dirs_or_files):
         else:
             for dir, subdirs, files in os.walk(dir_or_file):
                 # Exclude mounted directories and symlinks (os.walk default).
-                subdirs[:] = [d for d in subdirs
-                              if not os.path.ismount(os.path.join(dir, d))]
+                subdirs[:] = [
+                    d for d in subdirs if not os.path.ismount(os.path.join(dir, d))
+                ]
                 for file in files:
                     if fnmatch.fnmatch(file, "*.opt.yaml*"):
                         all.append(os.path.join(dir, file))

diff  --git a/llvm/tools/sancov/coverage-report-server.py b/llvm/tools/sancov/coverage-report-server.py
index 4c666dbc111d8..7b0b494218cc1 100755
--- a/llvm/tools/sancov/coverage-report-server.py
+++ b/llvm/tools/sancov/coverage-report-server.py
@@ -1,12 +1,12 @@
 #!/usr/bin/env python3
-#===- symcov-report-server.py - Coverage Reports HTTP Serve --*- python -*--===#
+# ===- symcov-report-server.py - Coverage Reports HTTP Serve --*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
-'''(EXPERIMENTAL) HTTP server to browse coverage reports from .symcov files.
+# ===------------------------------------------------------------------------===#
+"""(EXPERIMENTAL) HTTP server to browse coverage reports from .symcov files.
 
 Coverage reports for big binaries are too huge, generating them statically
 makes no sense. Start the server and go to localhost:8001 instead.
@@ -19,7 +19,7 @@
 Other options:
     --port port_number - specifies the port to use (8001)
     --host host_name - host name to bind server to (127.0.0.1)
-'''
+"""
 
 from __future__ import print_function
 
@@ -73,10 +73,11 @@
 
 FILE_URI_PREFIX = "/file/"
 
+
 class SymcovData:
     def __init__(self, symcov_json):
-        self.covered_points = frozenset(symcov_json['covered-points'])
-        self.point_symbol_info = symcov_json['point-symbol-info']
+        self.covered_points = frozenset(symcov_json["covered-points"])
+        self.point_symbol_info = symcov_json["point-symbol-info"]
         self.file_coverage = self.compute_filecoverage()
 
     def filenames(self):
@@ -114,25 +115,29 @@ def compute_filecoverage(self):
             for fn, points in fns.items():
                 file_points.extend(points.keys())
             covered_points = self.covered_points & set(file_points)
-            result[filename] = int(math.ceil(
-                len(covered_points) * 100 / len(file_points)))
+            result[filename] = int(
+                math.ceil(len(covered_points) * 100 / len(file_points))
+            )
         return result
 
 
 def format_pct(pct):
     pct_str = str(max(0, min(100, pct)))
-    zeroes = '0' * (3 - len(pct_str))
+    zeroes = "0" * (3 - len(pct_str))
     if zeroes:
         zeroes = '<span class="lz">{0}</span>'.format(zeroes)
     return zeroes + pct_str
 
+
 class ServerHandler(http.server.BaseHTTPRequestHandler):
     symcov_data = None
     src_path = None
 
     def do_GET(self):
-        norm_path = os.path.normpath(urllib.parse.unquote(self.path[len(FILE_URI_PREFIX):]))
-        if self.path == '/':
+        norm_path = os.path.normpath(
+            urllib.parse.unquote(self.path[len(FILE_URI_PREFIX) :])
+        )
+        if self.path == "/":
             self.send_response(200)
             self.send_header("Content-type", "text/html; charset=utf-8")
             self.end_headers()
@@ -143,18 +148,21 @@ def do_GET(self):
                 if not file_coverage:
                     continue
                 filelist.append(
-                        "<tr><td><a href=\"{prefix}{name}\">{name}</a></td>"
-                        "<td>{coverage}%</td></tr>".format(
-                            prefix=FILE_URI_PREFIX,
-                            name=html.escape(filename, quote=True), 
-                            coverage=format_pct(file_coverage)))
+                    '<tr><td><a href="{prefix}{name}">{name}</a></td>'
+                    "<td>{coverage}%</td></tr>".format(
+                        prefix=FILE_URI_PREFIX,
+                        name=html.escape(filename, quote=True),
+                        coverage=format_pct(file_coverage),
+                    )
+                )
 
             response = string.Template(INDEX_PAGE_TMPL).safe_substitute(
-                filenames='\n'.join(filelist))
-            self.wfile.write(response.encode('UTF-8', 'replace'))
+                filenames="\n".join(filelist)
+            )
+            self.wfile.write(response.encode("UTF-8", "replace"))
         elif self.symcov_data.has_file(norm_path):
             filename = norm_path
-            filepath = os.path.join(self.src_path, filename) 
+            filepath = os.path.join(self.src_path, filename)
             if not os.path.exists(filepath):
                 self.send_response(404)
                 self.end_headers()
@@ -166,18 +174,22 @@ def do_GET(self):
 
             linemap = self.symcov_data.compute_linemap(filename)
 
-            with open(filepath, 'r', encoding='utf8') as f:
+            with open(filepath, "r", encoding="utf8") as f:
                 content = "\n".join(
-                        ["<span class='{cls}'>{line} </span>".format(
-                            line=html.escape(line.rstrip()), 
-                            cls=linemap.get(line_no, ""))
-                            for line_no, line in enumerate(f, start=1)])
+                    [
+                        "<span class='{cls}'>{line} </span>".format(
+                            line=html.escape(line.rstrip()),
+                            cls=linemap.get(line_no, ""),
+                        )
+                        for line_no, line in enumerate(f, start=1)
+                    ]
+                )
 
             response = string.Template(CONTENT_PAGE_TMPL).safe_substitute(
-                path=self.path[1:],
-                content=content)
+                path=self.path[1:], content=content
+            )
 
-            self.wfile.write(response.encode('UTF-8', 'replace'))
+            self.wfile.write(response.encode("UTF-8", "replace"))
         else:
             self.send_response(404)
             self.end_headers()
@@ -185,10 +197,10 @@ def do_GET(self):
 
 def main():
     parser = argparse.ArgumentParser(description="symcov report http server.")
-    parser.add_argument('--host', default='127.0.0.1')
-    parser.add_argument('--port', default=8001)
-    parser.add_argument('--symcov', required=True, type=argparse.FileType('r'))
-    parser.add_argument('--srcpath', required=True)
+    parser.add_argument("--host", default="127.0.0.1")
+    parser.add_argument("--port", default=8001)
+    parser.add_argument("--symcov", required=True, type=argparse.FileType("r"))
+    parser.add_argument("--srcpath", required=True)
     args = parser.parse_args()
 
     print("Loading coverage...")
@@ -205,5 +217,6 @@ def main():
         pass
     httpd.server_close()
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/DSAclean.py b/llvm/utils/DSAclean.py
index c5fb56b037ebf..1b833ff893248 100755
--- a/llvm/utils/DSAclean.py
+++ b/llvm/utils/DSAclean.py
@@ -1,35 +1,36 @@
 #!/usr/bin/env python
 
-#changelog: 
-#10/13/2005b: replaced the # in tmp(.#*)* with alphanumeric and _, this will then remove
-#nodes such as %tmp.1.i and %tmp._i.3
-#10/13/2005: exntended to remove variables of the form %tmp(.#)* rather than just 
+# changelog:
+# 10/13/2005b: replaced the # in tmp(.#*)* with alphanumeric and _, this will then remove
+# nodes such as %tmp.1.i and %tmp._i.3
+# 10/13/2005: exntended to remove variables of the form %tmp(.#)* rather than just
 #%tmp.#, i.e. it now will remove %tmp.12.3.15 etc, additionally fixed a spelling error in
-#the comments
-#10/12/2005: now it only removes nodes and edges for which the label is %tmp.# rather
-#than removing all lines for which the lable CONTAINS %tmp.#
+# the comments
+# 10/12/2005: now it only removes nodes and edges for which the label is %tmp.# rather
+# than removing all lines for which the lable CONTAINS %tmp.#
 
 from __future__ import print_function
 
 import re
 import sys
-if( len(sys.argv) < 3 ):
-	print('usage is: ./DSAclean <dot_file_to_be_cleaned> <out_put_file>')
-	sys.exit(1)
-#get a file object
-input = open(sys.argv[1], 'r')
-output = open(sys.argv[2], 'w')
-#we'll get this one line at a time...while we could just put the whole thing in a string
-#it would kill old computers
+
+if len(sys.argv) < 3:
+    print("usage is: ./DSAclean <dot_file_to_be_cleaned> <out_put_file>")
+    sys.exit(1)
+# get a file object
+input = open(sys.argv[1], "r")
+output = open(sys.argv[2], "w")
+# we'll get this one line at a time...while we could just put the whole thing in a string
+# it would kill old computers
 buffer = input.readline()
-while buffer != '':
-	if re.compile("label(\s*)=(\s*)\"\s%tmp(.\w*)*(\s*)\"").search(buffer):
-		#skip next line, write neither this line nor the next
-		buffer = input.readline()
-	else:
-		#this isn't a tmp Node, we can write it
-		output.write(buffer)
-	#prepare for the next iteration
-	buffer = input.readline()
+while buffer != "":
+    if re.compile('label(\s*)=(\s*)"\s%tmp(.\w*)*(\s*)"').search(buffer):
+        # skip next line, write neither this line nor the next
+        buffer = input.readline()
+    else:
+        # this isn't a tmp Node, we can write it
+        output.write(buffer)
+    # prepare for the next iteration
+    buffer = input.readline()
 input.close()
 output.close()

diff  --git a/llvm/utils/DSAextract.py b/llvm/utils/DSAextract.py
index 1d93f1e30c55f..96f818bd2a831 100755
--- a/llvm/utils/DSAextract.py
+++ b/llvm/utils/DSAextract.py
@@ -1,29 +1,29 @@
 #!/usr/bin/env python
 
-#this is a script to extract given named nodes from a dot file, with
-#the associated edges.  An edge is kept iff for edge x -> y
+# this is a script to extract given named nodes from a dot file, with
+# the associated edges.  An edge is kept iff for edge x -> y
 # x and y are both nodes specified to be kept.
 
-#known issues: if a line contains '->' and is not an edge line
-#problems will occur.  If node labels do not begin with
-#Node this also will not work.  Since this is designed to work
-#on DSA dot output and not general dot files this is ok.
-#If you want to use this on other files rename the node labels
-#to Node[.*] with a script or something.  This also relies on
-#the length of a node name being 13 characters (as it is in all
-#DSA dot output files)
+# known issues: if a line contains '->' and is not an edge line
+# problems will occur.  If node labels do not begin with
+# Node this also will not work.  Since this is designed to work
+# on DSA dot output and not general dot files this is ok.
+# If you want to use this on other files rename the node labels
+# to Node[.*] with a script or something.  This also relies on
+# the length of a node name being 13 characters (as it is in all
+# DSA dot output files)
 
-#Note that the name of the node can be any substring of the actual
-#name in the dot file.  Thus if you say specify COLLAPSED
-#as a parameter this script will pull out all COLLAPSED
-#nodes in the file
+# Note that the name of the node can be any substring of the actual
+# name in the dot file.  Thus if you say specify COLLAPSED
+# as a parameter this script will pull out all COLLAPSED
+# nodes in the file
 
-#Specifying escape characters in the name like \n also will not work, 
-#as Python
-#will make it \\n, I'm not really sure how to fix this
+# Specifying escape characters in the name like \n also will not work,
+# as Python
+# will make it \\n, I'm not really sure how to fix this
 
-#currently the script prints the names it is searching for
-#to STDOUT, so you can check to see if they are what you intend
+# currently the script prints the names it is searching for
+# to STDOUT, so you can check to see if they are what you intend
 
 from __future__ import print_function
 
@@ -33,81 +33,81 @@
 
 
 if len(sys.argv) < 3:
-	print('usage is ./DSAextract <dot_file_to_modify> \
-			<output_file> [list of nodes to extract]')
+    print(
+        "usage is ./DSAextract <dot_file_to_modify> \
+			<output_file> [list of nodes to extract]"
+    )
 
-#open the input file
-input = open(sys.argv[1], 'r')
+# open the input file
+input = open(sys.argv[1], "r")
 
-#construct a set of node names
+# construct a set of node names
 node_name_set = set()
 for name in sys.argv[3:]:
-	node_name_set |= set([name])
+    node_name_set |= set([name])
 
-#construct a list of compiled regular expressions from the 
-#node_name_set
+# construct a list of compiled regular expressions from the
+# node_name_set
 regexp_list = []
 for name in node_name_set:
-	regexp_list.append(re.compile(name))
+    regexp_list.append(re.compile(name))
 
-#used to see what kind of line we are on
-nodeexp = re.compile('Node')
-#used to check to see if the current line is an edge line
-arrowexp = re.compile('->')
+# used to see what kind of line we are on
+nodeexp = re.compile("Node")
+# used to check to see if the current line is an edge line
+arrowexp = re.compile("->")
 
 node_set = set()
 
-#read the file one line at a time
+# read the file one line at a time
 buffer = input.readline()
-while buffer != '':
-	#filter out the unnecessary checks on all the edge lines
-	if not arrowexp.search(buffer):
-		#check to see if this is a node we are looking for
-		for regexp in regexp_list:
-			#if this name is for the current node, add the dot variable name
-			#for the node (it will be Node(hex number)) to our set of nodes
-			if regexp.search(buffer):
-				node_set |= set([re.split('\s+',buffer,2)[1]])
-				break
-	buffer = input.readline()
-
-
-#test code
-#print '\n'
+while buffer != "":
+    # filter out the unnecessary checks on all the edge lines
+    if not arrowexp.search(buffer):
+        # check to see if this is a node we are looking for
+        for regexp in regexp_list:
+            # if this name is for the current node, add the dot variable name
+            # for the node (it will be Node(hex number)) to our set of nodes
+            if regexp.search(buffer):
+                node_set |= set([re.split("\s+", buffer, 2)[1]])
+                break
+    buffer = input.readline()
+
+
+# test code
+# print '\n'
 
 print(node_name_set)
 
-#print node_set
-	
+# print node_set
 
-#open the output file
-output = open(sys.argv[2], 'w')
-#start the second pass over the file
-input = open(sys.argv[1], 'r')
 
-buffer = input.readline()
-while buffer != '':
-	#there are three types of lines we are looking for
-	#1) node lines, 2) edge lines 3) support lines (like page size, etc)
-	
-	#is this an edge line?
-	#note that this is no completely robust, if a none edge line
-	#for some reason contains -> it will be missidentified
-	#hand edit the file if this happens
-	if arrowexp.search(buffer):
-		#check to make sure that both nodes are in the node list
-		#if they are print this to output
-		nodes = arrowexp.split(buffer)
-		nodes[0] = string.strip(nodes[0])
-		nodes[1] = string.strip(nodes[1])
-		if nodes[0][:13] in node_set and \
-				nodes[1][:13] in node_set:
-					output.write(buffer)
-	elif nodeexp.search(buffer): #this is a node line
-		node = re.split('\s+', buffer,2)[1]
-		if node in node_set:
-			output.write(buffer)
-	else: #this is a support line
-		output.write(buffer)
-	buffer = input.readline()
+# open the output file
+output = open(sys.argv[2], "w")
+# start the second pass over the file
+input = open(sys.argv[1], "r")
 
+buffer = input.readline()
+while buffer != "":
+    # there are three types of lines we are looking for
+    # 1) node lines, 2) edge lines 3) support lines (like page size, etc)
+
+    # is this an edge line?
+    # note that this is no completely robust, if a none edge line
+    # for some reason contains -> it will be missidentified
+    # hand edit the file if this happens
+    if arrowexp.search(buffer):
+        # check to make sure that both nodes are in the node list
+        # if they are print this to output
+        nodes = arrowexp.split(buffer)
+        nodes[0] = string.strip(nodes[0])
+        nodes[1] = string.strip(nodes[1])
+        if nodes[0][:13] in node_set and nodes[1][:13] in node_set:
+            output.write(buffer)
+    elif nodeexp.search(buffer):  # this is a node line
+        node = re.split("\s+", buffer, 2)[1]
+        if node in node_set:
+            output.write(buffer)
+    else:  # this is a support line
+        output.write(buffer)
+    buffer = input.readline()

diff  --git a/llvm/utils/Reviewing/find_interesting_reviews.py b/llvm/utils/Reviewing/find_interesting_reviews.py
index 5e72631b20383..c006691079b64 100644
--- a/llvm/utils/Reviewing/find_interesting_reviews.py
+++ b/llvm/utils/Reviewing/find_interesting_reviews.py
@@ -21,8 +21,7 @@
 # $ . ./venv/bin/activate
 # $ pip install Phabricator
 
-GIT_REPO_METADATA = (("llvm-monorepo", "https://github.com/llvm/llvm-project"),
-                     )
+GIT_REPO_METADATA = (("llvm-monorepo", "https://github.com/llvm/llvm-project"),)
 
 # The below PhabXXX classes represent objects as modelled by Phabricator.
 # The classes can be serialized to disk, to try and make sure that we don't
@@ -72,25 +71,30 @@ def populate_cache_from_disk(self, directory=DEFAULT_DIRECTORY):
         try:
             f = open(self._get_pickle_name(directory), "rb")
         except IOError as err:
-            print("Could not find cache. Error message: {0}. Continuing..."
-                  .format(err))
+            print("Could not find cache. Error message: {0}. Continuing...".format(err))
         else:
             with f:
                 try:
                     d = pickle.load(f)
                     self.__dict__.update(d)
                 except EOFError as err:
-                    print("Cache seems to be corrupt. " +
-                          "Not using cache. Error message: {0}".format(err))
+                    print(
+                        "Cache seems to be corrupt. "
+                        + "Not using cache. Error message: {0}".format(err)
+                    )
 
     def write_cache_to_disk(self, directory=DEFAULT_DIRECTORY):
         if not os.path.exists(directory):
             os.makedirs(directory)
         with open(self._get_pickle_name(directory), "wb") as f:
             pickle.dump(self.__dict__, f)
-        print("wrote cache to disk, most_recent_info= {0}".format(
-            datetime.fromtimestamp(self.most_recent_info)
-            if self.most_recent_info is not None else None))
+        print(
+            "wrote cache to disk, most_recent_info= {0}".format(
+                datetime.fromtimestamp(self.most_recent_info)
+                if self.most_recent_info is not None
+                else None
+            )
+        )
 
 
 class PhabReview(PhabObject):
@@ -162,8 +166,9 @@ def __init__(self, rest_api_hunk):
         # Merge the adjacent and overlapping ranges in there:
         t = []
         lastRange = None
-        for start, end in self.actual_lines_changed_offset + \
-                [(sys.maxsize, sys.maxsize)]:
+        for start, end in self.actual_lines_changed_offset + [
+            (sys.maxsize, sys.maxsize)
+        ]:
             if lastRange is None:
                 lastRange = (start, end)
             else:
@@ -214,48 +219,64 @@ def init_phab_connection():
     return phab
 
 
-def update_cached_info(phab, cache, phab_query, order, record_results,
-                       max_nr_entries_per_fetch, max_nr_days_to_cache):
+def update_cached_info(
+    phab,
+    cache,
+    phab_query,
+    order,
+    record_results,
+    max_nr_entries_per_fetch,
+    max_nr_days_to_cache,
+):
     q = phab
     LIMIT = max_nr_entries_per_fetch
     for query_step in phab_query:
         q = getattr(q, query_step)
     results = q(order=order, limit=LIMIT)
     most_recent_info, oldest_info = record_results(cache, results, phab)
-    oldest_info_to_fetch = datetime.fromtimestamp(most_recent_info) - \
-        timedelta(days=max_nr_days_to_cache)
+    oldest_info_to_fetch = datetime.fromtimestamp(most_recent_info) - timedelta(
+        days=max_nr_days_to_cache
+    )
     most_recent_info_overall = most_recent_info
     cache.write_cache_to_disk()
     after = results["cursor"]["after"]
     print("after: {0!r}".format(after))
-    print("most_recent_info: {0}".format(
-        datetime.fromtimestamp(most_recent_info)))
-    while (after is not None
-           and datetime.fromtimestamp(oldest_info) > oldest_info_to_fetch):
-        need_more_older_data = \
-            (cache.oldest_info is None or
-             datetime.fromtimestamp(cache.oldest_info) > oldest_info_to_fetch)
-        print(("need_more_older_data={0} cache.oldest_info={1} " +
-               "oldest_info_to_fetch={2}").format(
-                   need_more_older_data,
-                   datetime.fromtimestamp(cache.oldest_info)
-                   if cache.oldest_info is not None else None,
-                   oldest_info_to_fetch))
-        need_more_newer_data = \
-            (cache.most_recent_info is None or
-             cache.most_recent_info < most_recent_info)
-        print(("need_more_newer_data={0} cache.most_recent_info={1} " +
-               "most_recent_info={2}")
-              .format(need_more_newer_data, cache.most_recent_info,
-                      most_recent_info))
+    print("most_recent_info: {0}".format(datetime.fromtimestamp(most_recent_info)))
+    while (
+        after is not None and datetime.fromtimestamp(oldest_info) > oldest_info_to_fetch
+    ):
+        need_more_older_data = (
+            cache.oldest_info is None
+            or datetime.fromtimestamp(cache.oldest_info) > oldest_info_to_fetch
+        )
+        print(
+            (
+                "need_more_older_data={0} cache.oldest_info={1} "
+                + "oldest_info_to_fetch={2}"
+            ).format(
+                need_more_older_data,
+                datetime.fromtimestamp(cache.oldest_info)
+                if cache.oldest_info is not None
+                else None,
+                oldest_info_to_fetch,
+            )
+        )
+        need_more_newer_data = (
+            cache.most_recent_info is None or cache.most_recent_info < most_recent_info
+        )
+        print(
+            (
+                "need_more_newer_data={0} cache.most_recent_info={1} "
+                + "most_recent_info={2}"
+            ).format(need_more_newer_data, cache.most_recent_info, most_recent_info)
+        )
         if not need_more_older_data and not need_more_newer_data:
             break
         results = q(order=order, after=after, limit=LIMIT)
         most_recent_info, oldest_info = record_results(cache, results, phab)
         after = results["cursor"]["after"]
         print("after: {0!r}".format(after))
-        print("most_recent_info: {0}".format(
-            datetime.fromtimestamp(most_recent_info)))
+        print("most_recent_info: {0}".format(datetime.fromtimestamp(most_recent_info)))
         cache.write_cache_to_disk()
     cache.most_recent_info = most_recent_info_overall
     if after is None:
@@ -279,8 +300,10 @@ def record_reviews(cache, reviews, phab):
         title = reviewInfo["fields"]["title"]
         author = reviewInfo["fields"]["authorPHID"]
         phabReview = cache.get(id)
-        if "dateModified" not in phabReview.__dict__ or \
-           dateModified > phabReview.dateModified:
+        if (
+            "dateModified" not in phabReview.__dict__
+            or dateModified > phabReview.dateModified
+        ):
             
diff _results = phab.
diff erential.query
diff s(revisionIDs=[id])
             
diff _ids = sorted(
diff _results.keys())
             phabDiffs = []
@@ -291,8 +314,11 @@ def record_reviews(cache, reviews, phab):
                 phabDiffs.append(d)
             phabReview.update(title, dateCreated, dateModified, author)
             phabReview.setPhabDiffs(phabDiffs)
-            print("Updated D{0} modified on {1} ({2} 
diff s)".format(
-                id, datetime.fromtimestamp(dateModified), len(phabDiffs)))
+            print(
+                "Updated D{0} modified on {1} ({2} 
diff s)".format(
+                    id, datetime.fromtimestamp(dateModified), len(phabDiffs)
+                )
+            )
 
         if most_recent_info is None:
             most_recent_info = dateModified
@@ -330,41 +356,66 @@ def record_users(cache, users, phab):
     return most_recent_info, oldest_info
 
 
-PHABCACHESINFO = ((reviews_cache, ("
diff erential", "revision", "search"),
-                   "updated", record_reviews, 5, 7),
-                  (users_cache, ("user", "search"), "newest", record_users,
-                   100, 1000))
+PHABCACHESINFO = (
+    (
+        reviews_cache,
+        ("
diff erential", "revision", "search"),
+        "updated",
+        record_reviews,
+        5,
+        7,
+    ),
+    (users_cache, ("user", "search"), "newest", record_users, 100, 1000),
+)
 
 
 def load_cache():
     for cache, phab_query, order, record_results, _, _ in PHABCACHESINFO:
         cache.populate_cache_from_disk()
-        print("Loaded {0} nr entries: {1}".format(
-            cache.get_name(), len(cache.get_ids_in_cache())))
-        print("Loaded {0} has most recent info: {1}".format(
-            cache.get_name(),
-            datetime.fromtimestamp(cache.most_recent_info)
-            if cache.most_recent_info is not None else None))
+        print(
+            "Loaded {0} nr entries: {1}".format(
+                cache.get_name(), len(cache.get_ids_in_cache())
+            )
+        )
+        print(
+            "Loaded {0} has most recent info: {1}".format(
+                cache.get_name(),
+                datetime.fromtimestamp(cache.most_recent_info)
+                if cache.most_recent_info is not None
+                else None,
+            )
+        )
 
 
 def update_cache(phab):
     load_cache()
-    for cache, phab_query, order, record_results, max_nr_entries_per_fetch, \
-            max_nr_days_to_cache in PHABCACHESINFO:
-        update_cached_info(phab, cache, phab_query, order, record_results,
-                           max_nr_entries_per_fetch, max_nr_days_to_cache)
+    for (
+        cache,
+        phab_query,
+        order,
+        record_results,
+        max_nr_entries_per_fetch,
+        max_nr_days_to_cache,
+    ) in PHABCACHESINFO:
+        update_cached_info(
+            phab,
+            cache,
+            phab_query,
+            order,
+            record_results,
+            max_nr_entries_per_fetch,
+            max_nr_days_to_cache,
+        )
         ids_in_cache = cache.get_ids_in_cache()
         print("{0} objects in {1}".format(len(ids_in_cache), cache.get_name()))
         cache.write_cache_to_disk()
 
 
 def get_most_recent_reviews(days):
-    newest_reviews = sorted(
-        reviews_cache.get_objects(), key=lambda r: -r.dateModified)
+    newest_reviews = sorted(reviews_cache.get_objects(), key=lambda r: -r.dateModified)
     if len(newest_reviews) == 0:
         return newest_reviews
-    most_recent_review_time = \
-        datetime.fromtimestamp(newest_reviews[0].dateModified)
+    most_recent_review_time = datetime.fromtimestamp(newest_reviews[0].dateModified)
     cut_off_date = most_recent_review_time - timedelta(days=days)
     result = []
     for review in newest_reviews:
@@ -395,36 +446,51 @@ def print_most_recent_reviews(phab, days, filter_reviewers):
 
     def add_msg(msg):
         msgs.append(msg)
-        print(msg.encode('utf-8'))
+        print(msg.encode("utf-8"))
 
     newest_reviews = get_most_recent_reviews(days)
-    add_msg(u"These are the reviews that look interesting to be reviewed. " +
-            u"The report below has 2 sections. The first " +
-            u"section is organized per review; the second section is organized "
-            + u"per potential reviewer.\n")
+    add_msg(
+        "These are the reviews that look interesting to be reviewed. "
+        + "The report below has 2 sections. The first "
+        + "section is organized per review; the second section is organized "
+        + "per potential reviewer.\n"
+    )
     oldest_review = newest_reviews[-1] if len(newest_reviews) > 0 else None
-    oldest_datetime = \
-        datetime.fromtimestamp(oldest_review.dateModified) \
-        if oldest_review else None
-    add_msg((u"The report below is based on analyzing the reviews that got " +
-             u"touched in the past {0} days (since {1}). " +
-             u"The script found {2} such reviews.\n").format(
-                 days, oldest_datetime, len(newest_reviews)))
+    oldest_datetime = (
+        datetime.fromtimestamp(oldest_review.dateModified) if oldest_review else None
+    )
+    add_msg(
+        (
+            "The report below is based on analyzing the reviews that got "
+            + "touched in the past {0} days (since {1}). "
+            + "The script found {2} such reviews.\n"
+        ).format(days, oldest_datetime, len(newest_reviews))
+    )
     reviewer2reviews_and_scores = {}
     for i, review in enumerate(newest_reviews):
         matched_reviewers = find_reviewers_for_review(review)
         matched_reviewers = filter_reviewers(matched_reviewers)
         if len(matched_reviewers) == 0:
             continue
-        add_msg((u"{0:>3}. https://reviews.llvm.org/D{1} by {2}\n     {3}\n" +
-                 u"     Last updated on {4}").format(
-                     i, review.id,
-                     get_real_name_from_author(review.author), review.title,
-                     datetime.fromtimestamp(review.dateModified)))
+        add_msg(
+            (
+                "{0:>3}. https://reviews.llvm.org/D{1} by {2}\n     {3}\n"
+                + "     Last updated on {4}"
+            ).format(
+                i,
+                review.id,
+                get_real_name_from_author(review.author),
+                review.title,
+                datetime.fromtimestamp(review.dateModified),
+            )
+        )
         for reviewer, scores in matched_reviewers:
-            add_msg(u"    potential reviewer {0}, score {1}".format(
-                reviewer,
-                "(" + "/".join(["{0:.1f}%".format(s) for s in scores]) + ")"))
+            add_msg(
+                "    potential reviewer {0}, score {1}".format(
+                    reviewer,
+                    "(" + "/".join(["{0:.1f}%".format(s) for s in scores]) + ")",
+                )
+            )
             if reviewer not in reviewer2reviews_and_scores:
                 reviewer2reviews_and_scores[reviewer] = []
             reviewer2reviews_and_scores[reviewer].append((review, scores))
@@ -433,12 +499,20 @@ def add_msg(msg):
     for reviewer in sorted(reviewer2reviews_and_scores.keys()):
         reviews_and_scores = reviewer2reviews_and_scores[reviewer]
         reviews_and_scores.sort(key=lambda rs: rs[1], reverse=True)
-        add_msg(u"\n\nSUMMARY FOR {0} (found {1} reviews):".format(
-            reviewer, len(reviews_and_scores)))
+        add_msg(
+            "\n\nSUMMARY FOR {0} (found {1} reviews):".format(
+                reviewer, len(reviews_and_scores)
+            )
+        )
         for review, scores in reviews_and_scores:
-            add_msg(u"[{0}] https://reviews.llvm.org/D{1} '{2}' by {3}".format(
-                "/".join(["{0:.1f}%".format(s) for s in scores]), review.id,
-                review.title, get_real_name_from_author(review.author)))
+            add_msg(
+                "[{0}] https://reviews.llvm.org/D{1} '{2}' by {3}".format(
+                    "/".join(["{0:.1f}%".format(s) for s in scores]),
+                    review.id,
+                    review.title,
+                    get_real_name_from_author(review.author),
+                )
+            )
     return "\n".join(msgs)
 
 
@@ -446,13 +520,12 @@ def get_git_cmd_output(cmd):
     output = None
     try:
         logging.debug(cmd)
-        output = subprocess.check_output(
-            cmd, shell=True, stderr=subprocess.STDOUT)
+        output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
     except subprocess.CalledProcessError as e:
         logging.debug(str(e))
     if output is None:
         return None
-    return output.decode("utf-8", errors='ignore')
+    return output.decode("utf-8", errors="ignore")
 
 
 reAuthorMail = re.compile("^author-mail <([^>]*)>.*$")
@@ -480,12 +553,14 @@ def __init__(self):
     def _populate_cache_for(self, cache_key):
         assert cache_key not in self.cache
         git_repo, base_revision, path = cache_key
-        cmd = ("git -C {0} blame --encoding=utf-8 --date iso -f -e -w " +
-               "--line-porcelain {1} -- {2}").format(git_repo, base_revision,
-                                                     path)
+        cmd = (
+            "git -C {0} blame --encoding=utf-8 --date iso -f -e -w "
+            + "--line-porcelain {1} -- {2}"
+        ).format(git_repo, base_revision, path)
         blame_output = get_git_cmd_output(cmd)
-        self.cache[cache_key] = \
-            blame_output.split('\n') if blame_output is not None else None
+        self.cache[cache_key] = (
+            blame_output.split("\n") if blame_output is not None else None
+        )
         # FIXME: the blame cache could probably be made more effective still if
         # instead of storing the requested base_revision in the cache, the last
         # revision before the base revision this file/path got changed in gets
@@ -493,8 +568,9 @@ def _populate_cache_for(self, cache_key):
         # file/patch hasn't changed would get cache hits (instead of misses in
         # the current implementation).
 
-    def get_blame_output_for(self, git_repo, base_revision, path, start_line=-1,
-                             end_line=-1):
+    def get_blame_output_for(
+        self, git_repo, base_revision, path, start_line=-1, end_line=-1
+    ):
         cache_key = (git_repo, base_revision, path)
         if cache_key not in self.cache:
             self._populate_cache_for(cache_key)
@@ -511,11 +587,14 @@ def get_blame_output_for(self, git_repo, base_revision, path, start_line=-1,
         assert start_line <= end_line
         return all_blame_lines[start_line:end_line]
 
-    def get_parsed_git_blame_for(self, git_repo, base_revision, path,
-                                 start_line=-1, end_line=-1):
+    def get_parsed_git_blame_for(
+        self, git_repo, base_revision, path, start_line=-1, end_line=-1
+    ):
         return parse_blame_output_line_porcelain(
-            self.get_blame_output_for(git_repo, base_revision, path, start_line,
-                                      end_line))
+            self.get_blame_output_for(
+                git_repo, base_revision, path, start_line, end_line
+            )
+        )
 
 
 blameOutputCache = BlameOutputCache()
@@ -534,8 +613,8 @@ def find_reviewers_for_
diff _heuristic(
diff ):
     git_repo = os.path.join("git_repos", GIT_REPO_METADATA[0][0])
     cmd = 'git -C {0} rev-list -n 1 --before="{1}" main'.format(
         git_repo,
-        datetime.fromtimestamp(
-            
diff .dateModified).strftime("%Y-%m-%d %H:%M:%s"))
+        datetime.fromtimestamp(
diff .dateModified).strftime("%Y-%m-%d %H:%M:%s"),
+    )
     base_revision = get_git_cmd_output(cmd).strip()
     logging.debug("Base revision={0}".format(base_revision))
     for change in 
diff .changes:
@@ -544,18 +623,20 @@ def find_reviewers_for_
diff _heuristic(
diff ):
         for hunk in change.hunks:
             for start_line, end_line in hunk.actual_lines_changed_offset:
                 # Collect git blame results for authors in those ranges.
-                for reviewer, nr_occurences in \
-                        blameOutputCache.get_parsed_git_blame_for(
-                            git_repo, base_revision, path, start_line, end_line
-                        ).items():
+                for (
+                    reviewer,
+                    nr_occurences,
+                ) in blameOutputCache.get_parsed_git_blame_for(
+                    git_repo, base_revision, path, start_line, end_line
+                ).items():
                     if reviewer not in reviewers2nr_lines_touched:
                         reviewers2nr_lines_touched[reviewer] = 0
                     reviewers2nr_lines_touched[reviewer] += nr_occurences
         # Compute heuristic 2: don't look at context, just at files touched.
         # Collect git blame results for authors in those ranges.
-        for reviewer, nr_occurences in \
-                blameOutputCache.get_parsed_git_blame_for(
-                    git_repo, base_revision, path).items():
+        for reviewer, nr_occurences in blameOutputCache.get_parsed_git_blame_for(
+            git_repo, base_revision, path
+        ).items():
             if reviewer not in reviewers2nr_files_touched:
                 reviewers2nr_files_touched[reviewer] = 0
             reviewers2nr_files_touched[reviewer] += 1
@@ -563,30 +644,35 @@ def find_reviewers_for_
diff _heuristic(
diff ):
     # Compute "match scores"
     total_nr_lines = sum(reviewers2nr_lines_touched.values())
     total_nr_files = len(
diff .changes)
-    reviewers_matchscores = \
-        [(reviewer,
-          (reviewers2nr_lines_touched.get(reviewer, 0)*100.0/total_nr_lines
-           if total_nr_lines != 0 else 0,
-           reviewers2nr_files_touched[reviewer]*100.0/total_nr_files
-           if total_nr_files != 0 else 0))
-         for reviewer, nr_lines
-         in reviewers2nr_files_touched.items()]
+    reviewers_matchscores = [
+        (
+            reviewer,
+            (
+                reviewers2nr_lines_touched.get(reviewer, 0) * 100.0 / total_nr_lines
+                if total_nr_lines != 0
+                else 0,
+                reviewers2nr_files_touched[reviewer] * 100.0 / total_nr_files
+                if total_nr_files != 0
+                else 0,
+            ),
+        )
+        for reviewer, nr_lines in reviewers2nr_files_touched.items()
+    ]
     reviewers_matchscores.sort(key=lambda i: i[1], reverse=True)
     return reviewers_matchscores
 
 
 def find_reviewers_for_review(review):
     # Process the newest 
diff  first.
-    
diff s = sorted(
-        review.phabDiffs, key=lambda d: d.dateModified, reverse=True)
+    
diff s = sorted(review.phabDiffs, key=lambda d: d.dateModified, reverse=True)
     if len(
diff s) == 0:
         return
     
diff  = 
diff s[0]
     matched_reviewers = find_reviewers_for_
diff _heuristic(
diff )
     # Show progress, as this is a slow operation:
-    sys.stdout.write('.')
+    sys.stdout.write(".")
     sys.stdout.flush()
-    logging.debug(u"matched_reviewers: {0}".format(matched_reviewers))
+    logging.debug("matched_reviewers: {0}".format(matched_reviewers))
     return matched_reviewers
 
 
@@ -606,58 +692,66 @@ def send_emails(email_addresses, sender, msg):
     s.connect()
     for email_address in email_addresses:
         email_msg = email.mime.multipart.MIMEMultipart()
-        email_msg['From'] = sender
-        email_msg['To'] = email_address
-        email_msg['Subject'] = 'LLVM patches you may be able to review.'
-        email_msg.attach(email.mime.text.MIMEText(msg.encode('utf-8'), 'plain'))
+        email_msg["From"] = sender
+        email_msg["To"] = email_address
+        email_msg["Subject"] = "LLVM patches you may be able to review."
+        email_msg.attach(email.mime.text.MIMEText(msg.encode("utf-8"), "plain"))
         # python 3.x: s.send_message(email_msg)
-        s.sendmail(email_msg['From'], email_msg['To'], email_msg.as_string())
+        s.sendmail(email_msg["From"], email_msg["To"], email_msg.as_string())
     s.quit()
 
 
 def filter_reviewers_to_report_for(people_to_look_for):
     # The below is just an example filter, to only report potential reviews
     # to do for the people that will receive the report email.
-    return lambda potential_reviewers: [r for r in potential_reviewers
-                                        if r[0] in people_to_look_for]
+    return lambda potential_reviewers: [
+        r for r in potential_reviewers if r[0] in people_to_look_for
+    ]
 
 
 def main():
     parser = argparse.ArgumentParser(
-        description='Match open reviews to potential reviewers.')
+        description="Match open reviews to potential reviewers."
+    )
     parser.add_argument(
-        '--no-update-cache',
-        dest='update_cache',
-        action='store_false',
+        "--no-update-cache",
+        dest="update_cache",
+        action="store_false",
         default=True,
-        help='Do not update cached Phabricator objects')
+        help="Do not update cached Phabricator objects",
+    )
     parser.add_argument(
-        '--email-report',
-        dest='email_report',
-        nargs='*',
+        "--email-report",
+        dest="email_report",
+        nargs="*",
         default="",
-        help="A email addresses to send the report to.")
+        help="A email addresses to send the report to.",
+    )
     parser.add_argument(
-        '--sender',
-        dest='sender',
+        "--sender",
+        dest="sender",
         default="",
-        help="The email address to use in 'From' on messages emailed out.")
+        help="The email address to use in 'From' on messages emailed out.",
+    )
     parser.add_argument(
-        '--email-addresses',
-        dest='email_addresses',
-        nargs='*',
-        help="The email addresses (as known by LLVM git) of " +
-        "the people to look for reviews for.")
-    parser.add_argument('--verbose', '-v', action='count')
+        "--email-addresses",
+        dest="email_addresses",
+        nargs="*",
+        help="The email addresses (as known by LLVM git) of "
+        + "the people to look for reviews for.",
+    )
+    parser.add_argument("--verbose", "-v", action="count")
 
     args = parser.parse_args()
 
     if args.verbose >= 1:
         logging.basicConfig(level=logging.DEBUG)
 
-    people_to_look_for = [e.decode('utf-8') for e in args.email_addresses]
-    logging.debug("Will look for reviews that following contributors could " +
-                  "review: {}".format(people_to_look_for))
+    people_to_look_for = [e.decode("utf-8") for e in args.email_addresses]
+    logging.debug(
+        "Will look for reviews that following contributors could "
+        + "review: {}".format(people_to_look_for)
+    )
     logging.debug("Will email a report to: {}".format(args.email_report))
 
     phab = init_phab_connection()
@@ -670,7 +764,8 @@ def main():
     msg = print_most_recent_reviews(
         phab,
         days=1,
-        filter_reviewers=filter_reviewers_to_report_for(people_to_look_for))
+        filter_reviewers=filter_reviewers_to_report_for(people_to_look_for),
+    )
 
     if args.email_report != []:
         send_emails(args.email_report, args.sender, msg)

diff  --git a/llvm/utils/Target/ARM/analyze-match-table.py b/llvm/utils/Target/ARM/analyze-match-table.py
index d4e158d9e1d80..c14b1a19fe39e 100644
--- a/llvm/utils/Target/ARM/analyze-match-table.py
+++ b/llvm/utils/Target/ARM/analyze-match-table.py
@@ -2,6 +2,7 @@
 
 from __future__ import print_function
 
+
 def analyze_match_table(path):
     # Extract the instruction table.
     data = open(path).read()
@@ -14,15 +15,14 @@ def analyze_match_table(path):
     for ln in lines:
         ln = ln.split("{", 1)[1]
         ln = ln.rsplit("}", 1)[0]
-        a,bc = ln.split("{", 1)
-        b,c = bc.split("}", 1)
-        code, string, converter, _ = [s.strip()
-                                      for s in a.split(",")]
+        a, bc = ln.split("{", 1)
+        b, c = bc.split("}", 1)
+        code, string, converter, _ = [s.strip() for s in a.split(",")]
         items = [s.strip() for s in b.split(",")]
-        _,features = [s.strip() for s in c.split(",")]
+        _, features = [s.strip() for s in c.split(",")]
         assert string[0] == string[-1] == '"'
         string = string[1:-1]
-        insns.append((code,string,converter,items,features))
+        insns.append((code, string, converter, items, features))
 
     # For every mnemonic, compute whether or not it can have a carry setting
     # operand and whether or not it can have a predication code.
@@ -34,24 +34,24 @@ def analyze_match_table(path):
         flags.update(items)
 
     mnemonics = set(mnemonic_flags)
-    ccout_mnemonics = set(m for m in mnemonics
-                          if 'MCK_CCOut' in mnemonic_flags[m])
-    condcode_mnemonics = set(m for m in mnemonics
-                             if 'MCK_CondCode' in mnemonic_flags[m])
+    ccout_mnemonics = set(m for m in mnemonics if "MCK_CCOut" in mnemonic_flags[m])
+    condcode_mnemonics = set(
+        m for m in mnemonics if "MCK_CondCode" in mnemonic_flags[m]
+    )
     noncondcode_mnemonics = mnemonics - condcode_mnemonics
-    print(' || '.join('Mnemonic == "%s"' % m
-                      for m in ccout_mnemonics))
-    print(' || '.join('Mnemonic == "%s"' % m
-                      for m in noncondcode_mnemonics))
+    print(" || ".join('Mnemonic == "%s"' % m for m in ccout_mnemonics))
+    print(" || ".join('Mnemonic == "%s"' % m for m in noncondcode_mnemonics))
+
 
 def main():
     import sys
+
     if len(sys.argv) == 1:
         import os
         from lit.Util import capture
+
         llvm_obj_root = capture(["llvm-config", "--obj-root"])
-        file = os.path.join(llvm_obj_root,
-                            "lib/Target/ARM/ARMGenAsmMatcher.inc")
+        file = os.path.join(llvm_obj_root, "lib/Target/ARM/ARMGenAsmMatcher.inc")
     elif len(sys.argv) == 2:
         file = sys.argv[1]
     else:
@@ -59,5 +59,6 @@ def main():
 
     analyze_match_table(file)
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/UpdateTestChecks/asm.py b/llvm/utils/UpdateTestChecks/asm.py
index 54719228e1264..72ff67b03d81e 100644
--- a/llvm/utils/UpdateTestChecks/asm.py
+++ b/llvm/utils/UpdateTestChecks/asm.py
@@ -5,10 +5,12 @@
 from . import common
 
 if sys.version_info[0] > 2:
-  class string:
-    expandtabs = str.expandtabs
+
+    class string:
+        expandtabs = str.expandtabs
+
 else:
-  import string
+    import string
 
 # RegEx: this is where the magic happens.
 
@@ -16,525 +18,580 @@ class string:
 
 ASM_FUNCTION_X86_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*(@"?(?P=func)"?| -- Begin function (?P=func))\n(?:\s*\.?Lfunc_begin[^:\n]*:\n)?'
-    r'(?:\.L(?P=func)\$local:\n)?'      # drop .L<func>$local:
-    r'(?:\s*\.type\s+\.L(?P=func)\$local, at function\n)?'  # drop .type .L<func>$local
-    r'(?:[ \t]*(?:\.cfi_startproc|\.cfi_personality|\.cfi_lsda|\.seh_proc|\.seh_handler)\b[^\n]*\n)*'  # drop optional cfi
-    r'(?P<body>^##?[ \t]+[^:]+:.*?)\s*'
-    r'^\s*(?:[^:\n]+?:\s*\n\s*\.size|\.cfi_endproc|\.globl|\.comm|\.(?:sub)?section|#+ -- End function)',
-    flags=(re.M | re.S))
+    r"(?:\.L(?P=func)\$local:\n)?"  # drop .L<func>$local:
+    r"(?:\s*\.type\s+\.L(?P=func)\$local, at function\n)?"  # drop .type .L<func>$local
+    r"(?:[ \t]*(?:\.cfi_startproc|\.cfi_personality|\.cfi_lsda|\.seh_proc|\.seh_handler)\b[^\n]*\n)*"  # drop optional cfi
+    r"(?P<body>^##?[ \t]+[^:]+:.*?)\s*"
+    r"^\s*(?:[^:\n]+?:\s*\n\s*\.size|\.cfi_endproc|\.globl|\.comm|\.(?:sub)?section|#+ -- End function)",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_ARM_RE = re.compile(
-    r'^(?P<func>[0-9a-zA-Z_$]+):\n' # f: (name of function)
-    r'(?:\.L(?P=func)\$local:\n)?'  # drop .L<func>$local:
-    r'(?:\s*\.type\s+\.L(?P=func)\$local, at function\n)?'  # drop .type .L<func>$local
-    r'\s+\.fnstart\n' # .fnstart
-    r'(?P<body>.*?)' # (body of the function)
-    r'^.Lfunc_end[0-9]+:', # .Lfunc_end0: or # -- End function
-    flags=(re.M | re.S))
+    r"^(?P<func>[0-9a-zA-Z_$]+):\n"  # f: (name of function)
+    r"(?:\.L(?P=func)\$local:\n)?"  # drop .L<func>$local:
+    r"(?:\s*\.type\s+\.L(?P=func)\$local, at function\n)?"  # drop .type .L<func>$local
+    r"\s+\.fnstart\n"  # .fnstart
+    r"(?P<body>.*?)"  # (body of the function)
+    r"^.Lfunc_end[0-9]+:",  # .Lfunc_end0: or # -- End function
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_AARCH64_RE = re.compile(
-     r'^_?(?P<func>[^:]+):[ \t]*\/\/[ \t]*@"?(?P=func)"?( (Function|Tail Call))?\n'
-     r'(?:[ \t]+.cfi_startproc\n)?'  # drop optional cfi noise
-     r'(?P<body>.*?)\n'
-     # This list is incomplete
-     r'^\s*(\.Lfunc_end[0-9]+|// -- End function)',
-     flags=(re.M | re.S))
+    r'^_?(?P<func>[^:]+):[ \t]*\/\/[ \t]*@"?(?P=func)"?( (Function|Tail Call))?\n'
+    r"(?:[ \t]+.cfi_startproc\n)?"  # drop optional cfi noise
+    r"(?P<body>.*?)\n"
+    # This list is incomplete
+    r"^\s*(\.Lfunc_end[0-9]+|// -- End function)",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_AMDGPU_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*;+[ \t]*@"?(?P=func)"?\n[^:]*?'
-    r'(?P<body>.*?)\n' # (body of the function)
+    r"(?P<body>.*?)\n"  # (body of the function)
     # This list is incomplete
-    r'^\s*(\.Lfunc_end[0-9]+:\n|\.section)',
-    flags=(re.M | re.S))
+    r"^\s*(\.Lfunc_end[0-9]+:\n|\.section)",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_BPF_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n'
-    r'(?:[ \t]+.cfi_startproc\n|.seh_proc[^\n]+\n)?'  # drop optional cfi
-    r'(?P<body>.*?)\s*'
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"(?:[ \t]+.cfi_startproc\n|.seh_proc[^\n]+\n)?"  # drop optional cfi
+    r"(?P<body>.*?)\s*"
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_HEXAGON_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*//[ \t]*@"?(?P=func)"?\n[^:]*?'
-    r'(?P<body>.*?)\n' # (body of the function)
+    r"(?P<body>.*?)\n"  # (body of the function)
     # This list is incomplete
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_M68K_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*;[ \t]*@"?(?P=func)"?\n'
-    r'(?P<body>.*?)\s*' # (body of the function)
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"(?P<body>.*?)\s*"  # (body of the function)
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_MIPS_RE = re.compile(
-    r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n[^:]*?' # f: (name of func)
-    r'(?:\s*\.?Ltmp[^:\n]*:\n)?[^:]*?'        # optional .Ltmp<N> for EH
-    r'(?:^[ \t]+\.(frame|f?mask|set).*?\n)+'  # Mips+LLVM standard asm prologue
-    r'(?P<body>.*?)\n'                        # (body of the function)
+    r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n[^:]*?'  # f: (name of func)
+    r"(?:\s*\.?Ltmp[^:\n]*:\n)?[^:]*?"  # optional .Ltmp<N> for EH
+    r"(?:^[ \t]+\.(frame|f?mask|set).*?\n)+"  # Mips+LLVM standard asm prologue
+    r"(?P<body>.*?)\n"  # (body of the function)
     # Mips+LLVM standard asm epilogue
-    r'(?:(^[ \t]+\.set[^\n]*?\n)*^[ \t]+\.end.*?\n)'
-    r'(\$|\.L)func_end[0-9]+:\n',             # $func_end0: (mips32 - O32) or
-                                              # .Lfunc_end0: (mips64 - NewABI)
-    flags=(re.M | re.S))
+    r"(?:(^[ \t]+\.set[^\n]*?\n)*^[ \t]+\.end.*?\n)"
+    r"(\$|\.L)func_end[0-9]+:\n",  # $func_end0: (mips32 - O32) or
+    # .Lfunc_end0: (mips64 - NewABI)
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_MSP430_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*;+[ \t]*@"?(?P=func)"?\n[^:]*?'
-    r'(?P<body>.*?)\n'
-    r'(\$|\.L)func_end[0-9]+:\n',             # $func_end0:
-    flags=(re.M | re.S))
+    r"(?P<body>.*?)\n"
+    r"(\$|\.L)func_end[0-9]+:\n",  # $func_end0:
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_AVR_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*;+[ \t]*@"?(?P=func)"?\n[^:]*?'
-    r'(?P<body>.*?)\n'
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"(?P<body>.*?)\n"
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_PPC_RE = re.compile(
-    r'#[ \-\t]*Begin function (?P<func>[^.:]+)\n'
-    r'.*?'
+    r"#[ \-\t]*Begin function (?P<func>[^.:]+)\n"
+    r".*?"
     r'^[_.]?(?P=func):(?:[ \t]*#+[ \t]*@"?(?P=func)"?)?\n'
-    r'(?:^[^#]*\n)*'
-    r'(?P<body>.*?)\n'
+    r"(?:^[^#]*\n)*"
+    r"(?P<body>.*?)\n"
     # This list is incomplete
-    r'(?:^[ \t]*(?:\.(?:long|quad|v?byte)[ \t]+[^\n]+)\n)*'
-    r'(?:\.Lfunc_end|L\.\.(?P=func))[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"(?:^[ \t]*(?:\.(?:long|quad|v?byte)[ \t]+[^\n]+)\n)*"
+    r"(?:\.Lfunc_end|L\.\.(?P=func))[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_RISCV_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n'
-    r'(?:\s*\.?L(?P=func)\$local:\n)?'  # optional .L<func>$local: due to -fno-semantic-interposition
-    r'(?:\s*\.type\s+\.?L(?P=func)\$local, at function\n)?'  # optional .type .L<func>$local
-    r'(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?'
-    r'(?P<body>^##?[ \t]+[^:]+:.*?)\s*'
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"(?:\s*\.?L(?P=func)\$local:\n)?"  # optional .L<func>$local: due to -fno-semantic-interposition
+    r"(?:\s*\.type\s+\.?L(?P=func)\$local, at function\n)?"  # optional .type .L<func>$local
+    r"(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?"
+    r"(?P<body>^##?[ \t]+[^:]+:.*?)\s*"
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_LANAI_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*!+[ \t]*@"?(?P=func)"?\n'
-    r'(?:[ \t]+.cfi_startproc\n)?'  # drop optional cfi noise
-    r'(?P<body>.*?)\s*'
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"(?:[ \t]+.cfi_startproc\n)?"  # drop optional cfi noise
+    r"(?P<body>.*?)\s*"
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_SPARC_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*!+[ \t]*@"?(?P=func)"?\n'
-    r'(?P<body>.*?)\s*'
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"(?P<body>.*?)\s*"
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_SYSTEMZ_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n'
-    r'(?:[ \t]+.cfi_startproc\n)?'
-    r'(?P<body>.*?)\n'
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"(?:[ \t]+.cfi_startproc\n)?"
+    r"(?P<body>.*?)\n"
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_AARCH64_DARWIN_RE = re.compile(
     r'^_(?P<func>[^:]+):[ \t]*;[ \t]@"?(?P=func)"?\n'
-    r'([ \t]*.cfi_startproc\n[\s]*)?'
-    r'(?P<body>.*?)'
-    r'([ \t]*.cfi_endproc\n[\s]*)?'
-    r'^[ \t]*;[ \t]--[ \t]End[ \t]function',
-    flags=(re.M | re.S))
+    r"([ \t]*.cfi_startproc\n[\s]*)?"
+    r"(?P<body>.*?)"
+    r"([ \t]*.cfi_endproc\n[\s]*)?"
+    r"^[ \t]*;[ \t]--[ \t]End[ \t]function",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_ARM_DARWIN_RE = re.compile(
-    r'@[ \t]--[ \t]Begin[ \t]function[ \t](?P<func>[^ \t]+?)\n'
-    r'^[ \t]*\.globl[ \t]*_(?P=func)[ \t]*'
-    r'(?P<directives>.*?)'
-    r'^_(?P=func):\n[ \t]*'
-    r'(?P<body>.*?)'
-    r'^[ \t]*@[ \t]--[ \t]End[ \t]function',
-    flags=(re.M | re.S ))
+    r"@[ \t]--[ \t]Begin[ \t]function[ \t](?P<func>[^ \t]+?)\n"
+    r"^[ \t]*\.globl[ \t]*_(?P=func)[ \t]*"
+    r"(?P<directives>.*?)"
+    r"^_(?P=func):\n[ \t]*"
+    r"(?P<body>.*?)"
+    r"^[ \t]*@[ \t]--[ \t]End[ \t]function",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_ARM_MACHO_RE = re.compile(
-    r'^_(?P<func>[^:]+):[ \t]*\n'
-    r'([ \t]*.cfi_startproc\n[ \t]*)?'
-    r'(?P<body>.*?)\n'
-    r'[ \t]*\.cfi_endproc\n',
-    flags=(re.M | re.S))
+    r"^_(?P<func>[^:]+):[ \t]*\n"
+    r"([ \t]*.cfi_startproc\n[ \t]*)?"
+    r"(?P<body>.*?)\n"
+    r"[ \t]*\.cfi_endproc\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_THUMBS_DARWIN_RE = re.compile(
-    r'^_(?P<func>[^:]+):\n'
-    r'(?P<body>.*?)\n'
-    r'[ \t]*\.data_region\n',
-    flags=(re.M | re.S))
+    r"^_(?P<func>[^:]+):\n" r"(?P<body>.*?)\n" r"[ \t]*\.data_region\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_THUMB_DARWIN_RE = re.compile(
-    r'^_(?P<func>[^:]+):\n'
-    r'(?P<body>.*?)\n'
-    r'^[ \t]*@[ \t]--[ \t]End[ \t]function',
-    flags=(re.M | re.S))
+    r"^_(?P<func>[^:]+):\n" r"(?P<body>.*?)\n" r"^[ \t]*@[ \t]--[ \t]End[ \t]function",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_ARM_IOS_RE = re.compile(
-    r'^_(?P<func>[^:]+):\n'
-    r'(?P<body>.*?)'
-    r'^[ \t]*@[ \t]--[ \t]End[ \t]function',
-    flags=(re.M | re.S))
+    r"^_(?P<func>[^:]+):\n" r"(?P<body>.*?)" r"^[ \t]*@[ \t]--[ \t]End[ \t]function",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_WASM_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n'
-    r'(?P<body>.*?)\n'
-    r'^\s*(\.Lfunc_end[0-9]+:\n|end_function)',
-    flags=(re.M | re.S))
+    r"(?P<body>.*?)\n"
+    r"^\s*(\.Lfunc_end[0-9]+:\n|end_function)",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_VE_RE = re.compile(
-    r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n'
-    r'(?:\s*\.?L(?P=func)\$local:\n)?'  # optional .L<func>$local: due to -fno-semantic-interposition
-    r'(?:\s*\.type\s+\.?L(?P=func)\$local, at function\n)?'  # optional .type .L<func>$local
-    r'(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?'
-    r'(?P<body>^##?[ \t]+[^:]+:.*?)\s*'
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n"
+    r"(?:\s*\.?L(?P=func)\$local:\n)?"  # optional .L<func>$local: due to -fno-semantic-interposition
+    r"(?:\s*\.type\s+\.?L(?P=func)\$local, at function\n)?"  # optional .type .L<func>$local
+    r"(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?"
+    r"(?P<body>^##?[ \t]+[^:]+:.*?)\s*"
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_CSKY_RE = re.compile(
-    r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?'
-    r'(?P<body>^##?[ \t]+[^:]+:.*?)\s*'
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?"
+    r"(?P<body>^##?[ \t]+[^:]+:.*?)\s*"
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_NVPTX_RE = re.compile(
     # function attributes and retval
     # .visible .func (.param .align 16 .b8 func_retval0[32])
-    #r'^(\.visible\s+)?\.func\s+(\([^\)]*\)\s*)?'
-    r'^(\.(func|visible|weak|entry|noreturn|extern)\s+)+(\([^\)]*\)\s*)?'
-
+    # r'^(\.visible\s+)?\.func\s+(\([^\)]*\)\s*)?'
+    r"^(\.(func|visible|weak|entry|noreturn|extern)\s+)+(\([^\)]*\)\s*)?"
     # function name
-    r'(?P<func>[^\(\n]+)'
-
+    r"(?P<func>[^\(\n]+)"
     # function name separator (opening brace)
-    r'(?P<func_name_separator>\()'
-
+    r"(?P<func_name_separator>\()"
     # function parameters
     # (
     #   .param .align 16 .b8 callee_St8x4_param_0[32]
     # ) // -- Begin function callee_St8x4
-    r'[^\)]*\)(\s*//[^\n]*)?\n'
-
+    r"[^\)]*\)(\s*//[^\n]*)?\n"
     # function body
-    r'(?P<body>.*?)\n'
-
+    r"(?P<body>.*?)\n"
     # function body end marker
-    r'\s*// -- End function',
-    flags=(re.M | re.S))
+    r"\s*// -- End function",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_LOONGARCH_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n'
-    r'(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?'
-    r'(?P<body>^##?[ \t]+[^:]+:.*?)\s*'
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
-
-SCRUB_X86_SHUFFLES_RE = (
-    re.compile(
-        r'^(\s*\w+) [^#\n]+#+ ((?:[xyz]mm\d+|mem)( \{%k\d+\}( \{z\})?)? = .*)$',
-        flags=re.M))
-
-SCRUB_X86_SHUFFLES_NO_MEM_RE = (
-    re.compile(
-        r'^(\s*\w+) [^#\n]+#+ ((?:[xyz]mm\d+|mem)( \{%k\d+\}( \{z\})?)? = (?!.*(?:mem)).*)$',
-        flags=re.M))
-
-SCRUB_X86_SPILL_RELOAD_RE = (
-    re.compile(
-        r'-?\d+\(%([er])[sb]p\)(.*(?:Spill|Reload))$',
-        flags=re.M))
-SCRUB_X86_SP_RE = re.compile(r'\d+\(%(esp|rsp)\)')
-SCRUB_X86_RIP_RE = re.compile(r'[.\w]+\(%rip\)')
-SCRUB_X86_LCP_RE = re.compile(r'\.?LCPI[0-9]+_[0-9]+')
-SCRUB_X86_RET_RE = re.compile(r'ret[l|q]')
+    r"(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?"
+    r"(?P<body>^##?[ \t]+[^:]+:.*?)\s*"
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
+
+SCRUB_X86_SHUFFLES_RE = re.compile(
+    r"^(\s*\w+) [^#\n]+#+ ((?:[xyz]mm\d+|mem)( \{%k\d+\}( \{z\})?)? = .*)$", flags=re.M
+)
+
+SCRUB_X86_SHUFFLES_NO_MEM_RE = re.compile(
+    r"^(\s*\w+) [^#\n]+#+ ((?:[xyz]mm\d+|mem)( \{%k\d+\}( \{z\})?)? = (?!.*(?:mem)).*)$",
+    flags=re.M,
+)
+
+SCRUB_X86_SPILL_RELOAD_RE = re.compile(
+    r"-?\d+\(%([er])[sb]p\)(.*(?:Spill|Reload))$", flags=re.M
+)
+SCRUB_X86_SP_RE = re.compile(r"\d+\(%(esp|rsp)\)")
+SCRUB_X86_RIP_RE = re.compile(r"[.\w]+\(%rip\)")
+SCRUB_X86_LCP_RE = re.compile(r"\.?LCPI[0-9]+_[0-9]+")
+SCRUB_X86_RET_RE = re.compile(r"ret[l|q]")
+
 
 def scrub_asm_x86(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-
-  # Detect shuffle asm comments and hide the operands in favor of the comments.
-  if getattr(args, 'no_x86_scrub_mem_shuffle', True):
-    asm = SCRUB_X86_SHUFFLES_NO_MEM_RE.sub(r'\1 {{.*#+}} \2', asm)
-  else:
-    asm = SCRUB_X86_SHUFFLES_RE.sub(r'\1 {{.*#+}} \2', asm)
-
-  # Detect stack spills and reloads and hide their exact offset and whether
-  # they used the stack pointer or frame pointer.
-  asm = SCRUB_X86_SPILL_RELOAD_RE.sub(r'{{[-0-9]+}}(%\1{{[sb]}}p)\2', asm)
-  if getattr(args, 'x86_scrub_sp', True):
-    # Generically match the stack offset of a memory operand.
-    asm = SCRUB_X86_SP_RE.sub(r'{{[0-9]+}}(%\1)', asm)
-  if getattr(args, 'x86_scrub_rip', False):
-    # Generically match a RIP-relative memory operand.
-    asm = SCRUB_X86_RIP_RE.sub(r'{{.*}}(%rip)', asm)
-  # Generically match a LCP symbol.
-  asm = SCRUB_X86_LCP_RE.sub(r'{{\.?LCPI[0-9]+_[0-9]+}}', asm)
-  if getattr(args, 'extra_scrub', False):
-    # Avoid generating 
diff erent checks for 32- and 64-bit because of 'retl' vs 'retq'.
-    asm = SCRUB_X86_RET_RE.sub(r'ret{{[l|q]}}', asm)
-  # Strip kill operands inserted into the asm.
-  asm = common.SCRUB_KILL_COMMENT_RE.sub('', asm)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+
+    # Detect shuffle asm comments and hide the operands in favor of the comments.
+    if getattr(args, "no_x86_scrub_mem_shuffle", True):
+        asm = SCRUB_X86_SHUFFLES_NO_MEM_RE.sub(r"\1 {{.*#+}} \2", asm)
+    else:
+        asm = SCRUB_X86_SHUFFLES_RE.sub(r"\1 {{.*#+}} \2", asm)
+
+    # Detect stack spills and reloads and hide their exact offset and whether
+    # they used the stack pointer or frame pointer.
+    asm = SCRUB_X86_SPILL_RELOAD_RE.sub(r"{{[-0-9]+}}(%\1{{[sb]}}p)\2", asm)
+    if getattr(args, "x86_scrub_sp", True):
+        # Generically match the stack offset of a memory operand.
+        asm = SCRUB_X86_SP_RE.sub(r"{{[0-9]+}}(%\1)", asm)
+    if getattr(args, "x86_scrub_rip", False):
+        # Generically match a RIP-relative memory operand.
+        asm = SCRUB_X86_RIP_RE.sub(r"{{.*}}(%rip)", asm)
+    # Generically match a LCP symbol.
+    asm = SCRUB_X86_LCP_RE.sub(r"{{\.?LCPI[0-9]+_[0-9]+}}", asm)
+    if getattr(args, "extra_scrub", False):
+        # Avoid generating 
diff erent checks for 32- and 64-bit because of 'retl' vs 'retq'.
+        asm = SCRUB_X86_RET_RE.sub(r"ret{{[l|q]}}", asm)
+    # Strip kill operands inserted into the asm.
+    asm = common.SCRUB_KILL_COMMENT_RE.sub("", asm)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_amdgpu(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_arm_eabi(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip kill operands inserted into the asm.
-  asm = common.SCRUB_KILL_COMMENT_RE.sub('', asm)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip kill operands inserted into the asm.
+    asm = common.SCRUB_KILL_COMMENT_RE.sub("", asm)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_bpf(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_hexagon(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_powerpc(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip unimportant comments, but leave the token '#' in place.
-  asm = common.SCRUB_LOOP_COMMENT_RE.sub(r'#', asm)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  # Strip the tailing token '#', except the line only has token '#'.
-  asm = common.SCRUB_TAILING_COMMENT_TOKEN_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip unimportant comments, but leave the token '#' in place.
+    asm = common.SCRUB_LOOP_COMMENT_RE.sub(r"#", asm)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    # Strip the tailing token '#', except the line only has token '#'.
+    asm = common.SCRUB_TAILING_COMMENT_TOKEN_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_m68k(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_mips(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_msp430(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_avr(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_riscv(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_lanai(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_sparc(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_systemz(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_wasm(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_ve(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_csky(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip kill operands inserted into the asm.
-  asm = common.SCRUB_KILL_COMMENT_RE.sub('', asm)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip kill operands inserted into the asm.
+    asm = common.SCRUB_KILL_COMMENT_RE.sub("", asm)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_nvptx(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_loongarch(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 # Returns a tuple of a scrub function and a function regex. Scrub function is
 # used to alter function body in some way, for example, remove trailing spaces.
 # Function regex is used to match function name, body, etc. in raw llc output.
 def get_run_handler(triple):
-  target_handlers = {
-      'i686': (scrub_asm_x86, ASM_FUNCTION_X86_RE),
-      'x86': (scrub_asm_x86, ASM_FUNCTION_X86_RE),
-      'i386': (scrub_asm_x86, ASM_FUNCTION_X86_RE),
-      'arm64_32-apple-ios': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
-      'arm64_32-apple-watchos2.0.0': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
-      'aarch64': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE),
-      'aarch64-apple-darwin': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
-      'aarch64-apple-ios': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
-      'bpf': (scrub_asm_bpf, ASM_FUNCTION_BPF_RE),
-      'bpfel': (scrub_asm_bpf, ASM_FUNCTION_BPF_RE),
-      'bpfeb': (scrub_asm_bpf, ASM_FUNCTION_BPF_RE),
-      'hexagon': (scrub_asm_hexagon, ASM_FUNCTION_HEXAGON_RE),
-      'r600': (scrub_asm_amdgpu, ASM_FUNCTION_AMDGPU_RE),
-      'amdgcn': (scrub_asm_amdgpu, ASM_FUNCTION_AMDGPU_RE),
-      'arm': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_RE),
-      'arm64': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE),
-      'arm64e': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
-      'arm64ec': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE),
-      'arm64-apple-ios': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
-      'armv7-apple-ios' : (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_IOS_RE),
-      'armv7-apple-darwin': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_DARWIN_RE),
-      'thumb': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_RE),
-      'thumb-macho': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_MACHO_RE),
-      'thumbv5-macho': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_MACHO_RE),
-      'thumbv7s-apple-darwin' : (scrub_asm_arm_eabi, ASM_FUNCTION_THUMBS_DARWIN_RE),
-      'thumbv7-apple-darwin' : (scrub_asm_arm_eabi, ASM_FUNCTION_THUMB_DARWIN_RE),
-      'thumbv7-apple-ios' : (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_IOS_RE),
-      'm68k': (scrub_asm_m68k, ASM_FUNCTION_M68K_RE),
-      'mips': (scrub_asm_mips, ASM_FUNCTION_MIPS_RE),
-      'msp430': (scrub_asm_msp430, ASM_FUNCTION_MSP430_RE),
-      'avr': (scrub_asm_avr, ASM_FUNCTION_AVR_RE),
-      'ppc32': (scrub_asm_powerpc, ASM_FUNCTION_PPC_RE),
-      'ppc64': (scrub_asm_powerpc, ASM_FUNCTION_PPC_RE),
-      'powerpc': (scrub_asm_powerpc, ASM_FUNCTION_PPC_RE),
-      'riscv32': (scrub_asm_riscv, ASM_FUNCTION_RISCV_RE),
-      'riscv64': (scrub_asm_riscv, ASM_FUNCTION_RISCV_RE),
-      'lanai': (scrub_asm_lanai, ASM_FUNCTION_LANAI_RE),
-      'sparc': (scrub_asm_sparc, ASM_FUNCTION_SPARC_RE),
-      's390x': (scrub_asm_systemz, ASM_FUNCTION_SYSTEMZ_RE),
-      'wasm32': (scrub_asm_wasm, ASM_FUNCTION_WASM_RE),
-      'wasm64': (scrub_asm_wasm, ASM_FUNCTION_WASM_RE),
-      've': (scrub_asm_ve, ASM_FUNCTION_VE_RE),
-      'csky': (scrub_asm_csky, ASM_FUNCTION_CSKY_RE),
-      'nvptx': (scrub_asm_nvptx, ASM_FUNCTION_NVPTX_RE),
-      'loongarch32': (scrub_asm_loongarch, ASM_FUNCTION_LOONGARCH_RE),
-      'loongarch64': (scrub_asm_loongarch, ASM_FUNCTION_LOONGARCH_RE)
-  }
-  handler = None
-  best_prefix = ''
-  for prefix, s in target_handlers.items():
-    if triple.startswith(prefix) and len(prefix) > len(best_prefix):
-      handler = s
-      best_prefix = prefix
-
-  if handler is None:
-    raise KeyError('Triple %r is not supported' % (triple))
-
-  return handler
+    target_handlers = {
+        "i686": (scrub_asm_x86, ASM_FUNCTION_X86_RE),
+        "x86": (scrub_asm_x86, ASM_FUNCTION_X86_RE),
+        "i386": (scrub_asm_x86, ASM_FUNCTION_X86_RE),
+        "arm64_32-apple-ios": (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
+        "arm64_32-apple-watchos2.0.0": (
+            scrub_asm_arm_eabi,
+            ASM_FUNCTION_AARCH64_DARWIN_RE,
+        ),
+        "aarch64": (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE),
+        "aarch64-apple-darwin": (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
+        "aarch64-apple-ios": (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
+        "bpf": (scrub_asm_bpf, ASM_FUNCTION_BPF_RE),
+        "bpfel": (scrub_asm_bpf, ASM_FUNCTION_BPF_RE),
+        "bpfeb": (scrub_asm_bpf, ASM_FUNCTION_BPF_RE),
+        "hexagon": (scrub_asm_hexagon, ASM_FUNCTION_HEXAGON_RE),
+        "r600": (scrub_asm_amdgpu, ASM_FUNCTION_AMDGPU_RE),
+        "amdgcn": (scrub_asm_amdgpu, ASM_FUNCTION_AMDGPU_RE),
+        "arm": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_RE),
+        "arm64": (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE),
+        "arm64e": (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
+        "arm64ec": (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE),
+        "arm64-apple-ios": (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
+        "armv7-apple-ios": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_IOS_RE),
+        "armv7-apple-darwin": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_DARWIN_RE),
+        "thumb": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_RE),
+        "thumb-macho": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_MACHO_RE),
+        "thumbv5-macho": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_MACHO_RE),
+        "thumbv7s-apple-darwin": (scrub_asm_arm_eabi, ASM_FUNCTION_THUMBS_DARWIN_RE),
+        "thumbv7-apple-darwin": (scrub_asm_arm_eabi, ASM_FUNCTION_THUMB_DARWIN_RE),
+        "thumbv7-apple-ios": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_IOS_RE),
+        "m68k": (scrub_asm_m68k, ASM_FUNCTION_M68K_RE),
+        "mips": (scrub_asm_mips, ASM_FUNCTION_MIPS_RE),
+        "msp430": (scrub_asm_msp430, ASM_FUNCTION_MSP430_RE),
+        "avr": (scrub_asm_avr, ASM_FUNCTION_AVR_RE),
+        "ppc32": (scrub_asm_powerpc, ASM_FUNCTION_PPC_RE),
+        "ppc64": (scrub_asm_powerpc, ASM_FUNCTION_PPC_RE),
+        "powerpc": (scrub_asm_powerpc, ASM_FUNCTION_PPC_RE),
+        "riscv32": (scrub_asm_riscv, ASM_FUNCTION_RISCV_RE),
+        "riscv64": (scrub_asm_riscv, ASM_FUNCTION_RISCV_RE),
+        "lanai": (scrub_asm_lanai, ASM_FUNCTION_LANAI_RE),
+        "sparc": (scrub_asm_sparc, ASM_FUNCTION_SPARC_RE),
+        "s390x": (scrub_asm_systemz, ASM_FUNCTION_SYSTEMZ_RE),
+        "wasm32": (scrub_asm_wasm, ASM_FUNCTION_WASM_RE),
+        "wasm64": (scrub_asm_wasm, ASM_FUNCTION_WASM_RE),
+        "ve": (scrub_asm_ve, ASM_FUNCTION_VE_RE),
+        "csky": (scrub_asm_csky, ASM_FUNCTION_CSKY_RE),
+        "nvptx": (scrub_asm_nvptx, ASM_FUNCTION_NVPTX_RE),
+        "loongarch32": (scrub_asm_loongarch, ASM_FUNCTION_LOONGARCH_RE),
+        "loongarch64": (scrub_asm_loongarch, ASM_FUNCTION_LOONGARCH_RE),
+    }
+    handler = None
+    best_prefix = ""
+    for prefix, s in target_handlers.items():
+        if triple.startswith(prefix) and len(prefix) > len(best_prefix):
+            handler = s
+            best_prefix = prefix
+
+    if handler is None:
+        raise KeyError("Triple %r is not supported" % (triple))
+
+    return handler
+
 
 ##### Generator of assembly CHECK lines
 
-def add_checks(output_lines, comment_marker, prefix_list, func_dict,
-               func_name, global_vars_seen_dict, is_filtered):
-  # Label format is based on ASM string.
-  check_label_format = '{} %s-LABEL: %s%s%s%s'.format(comment_marker)
-  return common.add_checks(output_lines, comment_marker, prefix_list, func_dict,
-                           func_name, check_label_format, True, False, 1,
-                           global_vars_seen_dict, is_filtered=is_filtered)
+
+def add_checks(
+    output_lines,
+    comment_marker,
+    prefix_list,
+    func_dict,
+    func_name,
+    global_vars_seen_dict,
+    is_filtered,
+):
+    # Label format is based on ASM string.
+    check_label_format = "{} %s-LABEL: %s%s%s%s".format(comment_marker)
+    return common.add_checks(
+        output_lines,
+        comment_marker,
+        prefix_list,
+        func_dict,
+        func_name,
+        check_label_format,
+        True,
+        False,
+        1,
+        global_vars_seen_dict,
+        is_filtered=is_filtered,
+    )

diff  --git a/llvm/utils/UpdateTestChecks/common.py b/llvm/utils/UpdateTestChecks/common.py
index aa0b12812e205..b22d7e3276abf 100644
--- a/llvm/utils/UpdateTestChecks/common.py
+++ b/llvm/utils/UpdateTestChecks/common.py
@@ -16,7 +16,7 @@
 
 
 _verbose = False
-_prefix_filecheck_ir_name = ''
+_prefix_filecheck_ir_name = ""
 
 """
 Version changelog:
@@ -27,792 +27,1028 @@
 """
 DEFAULT_VERSION = 2
 
+
 class Regex(object):
-  """Wrap a compiled regular expression object to allow deep copy of a regexp.
-  This is required for the deep copy done in do_scrub.
+    """Wrap a compiled regular expression object to allow deep copy of a regexp.
+    This is required for the deep copy done in do_scrub.
 
-  """
-  def __init__(self, regex):
-    self.regex = regex
+    """
 
-  def __deepcopy__(self, memo):
-    result = copy.copy(self)
-    result.regex = self.regex
-    return result
+    def __init__(self, regex):
+        self.regex = regex
+
+    def __deepcopy__(self, memo):
+        result = copy.copy(self)
+        result.regex = self.regex
+        return result
+
+    def search(self, line):
+        return self.regex.search(line)
 
-  def search(self, line):
-    return self.regex.search(line)
+    def sub(self, repl, line):
+        return self.regex.sub(repl, line)
 
-  def sub(self, repl, line):
-    return self.regex.sub(repl, line)
+    def pattern(self):
+        return self.regex.pattern
 
-  def pattern(self):
-    return self.regex.pattern
+    def flags(self):
+        return self.regex.flags
 
-  def flags(self):
-    return self.regex.flags
 
 class Filter(Regex):
-  """Augment a Regex object with a flag indicating whether a match should be
+    """Augment a Regex object with a flag indicating whether a match should be
     added (!is_filter_out) or removed (is_filter_out) from the generated checks.
 
-  """
-  def __init__(self, regex, is_filter_out):
-    super(Filter, self).__init__(regex)
-    self.is_filter_out = is_filter_out
+    """
+
+    def __init__(self, regex, is_filter_out):
+        super(Filter, self).__init__(regex)
+        self.is_filter_out = is_filter_out
+
+    def __deepcopy__(self, memo):
+        result = copy.deepcopy(super(Filter, self), memo)
+        result.is_filter_out = copy.deepcopy(self.is_filter_out, memo)
+        return result
 
-  def __deepcopy__(self, memo):
-    result = copy.deepcopy(super(Filter, self), memo)
-    result.is_filter_out = copy.deepcopy(self.is_filter_out, memo)
-    return result
 
 def parse_commandline_args(parser):
-  class RegexAction(argparse.Action):
-    """Add a regular expression option value to a list of regular expressions.
-    This compiles the expression, wraps it in a Regex and adds it to the option
-    value list."""
-    def __init__(self, option_strings, dest, nargs=None, **kwargs):
-      if nargs is not None:
-        raise ValueError('nargs not allowed')
-      super(RegexAction, self).__init__(option_strings, dest, **kwargs)
-
-    def do_call(self, namespace, values, flags):
-      value_list = getattr(namespace, self.dest)
-      if value_list is None:
-        value_list = []
-
-      try:
-        value_list.append(Regex(re.compile(values, flags)))
-      except re.error as error:
-        raise ValueError('{}: Invalid regular expression \'{}\' ({})'.format(
-          option_string, error.pattern, error.msg))
-
-      setattr(namespace, self.dest, value_list)
-
-    def __call__(self, parser, namespace, values, option_string=None):
-      self.do_call(namespace, values, 0)
-
-  class FilterAction(RegexAction):
-    """Add a filter to a list of filter option values."""
-    def __init__(self, option_strings, dest, nargs=None, **kwargs):
-      super(FilterAction, self).__init__(option_strings, dest, nargs, **kwargs)
-
-    def __call__(self, parser, namespace, values, option_string=None):
-      super(FilterAction, self).__call__(parser, namespace, values, option_string)
-
-      value_list = getattr(namespace, self.dest)
-
-      is_filter_out = ( option_string == '--filter-out' )
-
-      value_list[-1] = Filter(value_list[-1].regex, is_filter_out)
-
-      setattr(namespace, self.dest, value_list)
-
-  filter_group = parser.add_argument_group(
-    'filtering',
-    """Filters are applied to each output line according to the order given. The
-    first matching filter terminates filter processing for that current line.""")
-
-  filter_group.add_argument('--filter', action=FilterAction, dest='filters',
-                            metavar='REGEX',
-                            help='Only include lines matching REGEX (may be specified multiple times)')
-  filter_group.add_argument('--filter-out', action=FilterAction, dest='filters',
-                            metavar='REGEX',
-                            help='Exclude lines matching REGEX')
-
-  parser.add_argument('--include-generated-funcs', action='store_true',
-                      help='Output checks for functions not in source')
-  parser.add_argument('-v', '--verbose', action='store_true',
-                      help='Show verbose output')
-  parser.add_argument('-u', '--update-only', action='store_true',
-                      help='Only update test if it was already autogened')
-  parser.add_argument('--force-update', action='store_true',
-                      help='Update test even if it was autogened by a 
diff erent script')
-  parser.add_argument('--enable', action='store_true', dest='enabled', default=True,
-                       help='Activate CHECK line generation from this point forward')
-  parser.add_argument('--disable', action='store_false', dest='enabled',
-                      help='Deactivate CHECK line generation from this point forward')
-  parser.add_argument('--replace-value-regex', nargs='+', default=[],
-                      help='List of regular expressions to replace matching value names')
-  parser.add_argument('--prefix-filecheck-ir-name', default='',
-                      help='Add a prefix to FileCheck IR value names to avoid conflicts with scripted names')
-  parser.add_argument('--global-value-regex', nargs='+', default=[],
-                      help='List of regular expressions that a global value declaration must match to generate a check (has no effect if checking globals is not enabled)')
-  parser.add_argument('--global-hex-value-regex', nargs='+', default=[],
-                      help='List of regular expressions such that, for matching global value declarations, literal integer values should be encoded in hex in the associated FileCheck directives')
-  # FIXME: in 3.9, we can use argparse.BooleanOptionalAction. At that point,
-  # we need to rename the flag to just -generate-body-for-unused-prefixes.
-  parser.add_argument('--no-generate-body-for-unused-prefixes',
-                      action='store_false',
-                      dest='gen_unused_prefix_body',
-                      default=True,
-                      help='Generate a function body that always matches for unused prefixes. This is useful when unused prefixes are desired, and it avoids needing to annotate each FileCheck as allowing them.')
-  # This is the default when regenerating existing tests. The default when
-  # generating new tests is determined by DEFAULT_VERSION.
-  parser.add_argument('--version', type=int, default=1,
-                      help='The version of output format')
-  args = parser.parse_args()
-  global _verbose, _global_value_regex, _global_hex_value_regex
-  _verbose = args.verbose
-  _global_value_regex = args.global_value_regex
-  _global_hex_value_regex = args.global_hex_value_regex
-  return args
+    class RegexAction(argparse.Action):
+        """Add a regular expression option value to a list of regular expressions.
+        This compiles the expression, wraps it in a Regex and adds it to the option
+        value list."""
+
+        def __init__(self, option_strings, dest, nargs=None, **kwargs):
+            if nargs is not None:
+                raise ValueError("nargs not allowed")
+            super(RegexAction, self).__init__(option_strings, dest, **kwargs)
+
+        def do_call(self, namespace, values, flags):
+            value_list = getattr(namespace, self.dest)
+            if value_list is None:
+                value_list = []
+
+            try:
+                value_list.append(Regex(re.compile(values, flags)))
+            except re.error as error:
+                raise ValueError(
+                    "{}: Invalid regular expression '{}' ({})".format(
+                        option_string, error.pattern, error.msg
+                    )
+                )
+
+            setattr(namespace, self.dest, value_list)
+
+        def __call__(self, parser, namespace, values, option_string=None):
+            self.do_call(namespace, values, 0)
+
+    class FilterAction(RegexAction):
+        """Add a filter to a list of filter option values."""
+
+        def __init__(self, option_strings, dest, nargs=None, **kwargs):
+            super(FilterAction, self).__init__(option_strings, dest, nargs, **kwargs)
+
+        def __call__(self, parser, namespace, values, option_string=None):
+            super(FilterAction, self).__call__(parser, namespace, values, option_string)
+
+            value_list = getattr(namespace, self.dest)
+
+            is_filter_out = option_string == "--filter-out"
+
+            value_list[-1] = Filter(value_list[-1].regex, is_filter_out)
+
+            setattr(namespace, self.dest, value_list)
+
+    filter_group = parser.add_argument_group(
+        "filtering",
+        """Filters are applied to each output line according to the order given. The
+    first matching filter terminates filter processing for that current line.""",
+    )
+
+    filter_group.add_argument(
+        "--filter",
+        action=FilterAction,
+        dest="filters",
+        metavar="REGEX",
+        help="Only include lines matching REGEX (may be specified multiple times)",
+    )
+    filter_group.add_argument(
+        "--filter-out",
+        action=FilterAction,
+        dest="filters",
+        metavar="REGEX",
+        help="Exclude lines matching REGEX",
+    )
+
+    parser.add_argument(
+        "--include-generated-funcs",
+        action="store_true",
+        help="Output checks for functions not in source",
+    )
+    parser.add_argument(
+        "-v", "--verbose", action="store_true", help="Show verbose output"
+    )
+    parser.add_argument(
+        "-u",
+        "--update-only",
+        action="store_true",
+        help="Only update test if it was already autogened",
+    )
+    parser.add_argument(
+        "--force-update",
+        action="store_true",
+        help="Update test even if it was autogened by a 
diff erent script",
+    )
+    parser.add_argument(
+        "--enable",
+        action="store_true",
+        dest="enabled",
+        default=True,
+        help="Activate CHECK line generation from this point forward",
+    )
+    parser.add_argument(
+        "--disable",
+        action="store_false",
+        dest="enabled",
+        help="Deactivate CHECK line generation from this point forward",
+    )
+    parser.add_argument(
+        "--replace-value-regex",
+        nargs="+",
+        default=[],
+        help="List of regular expressions to replace matching value names",
+    )
+    parser.add_argument(
+        "--prefix-filecheck-ir-name",
+        default="",
+        help="Add a prefix to FileCheck IR value names to avoid conflicts with scripted names",
+    )
+    parser.add_argument(
+        "--global-value-regex",
+        nargs="+",
+        default=[],
+        help="List of regular expressions that a global value declaration must match to generate a check (has no effect if checking globals is not enabled)",
+    )
+    parser.add_argument(
+        "--global-hex-value-regex",
+        nargs="+",
+        default=[],
+        help="List of regular expressions such that, for matching global value declarations, literal integer values should be encoded in hex in the associated FileCheck directives",
+    )
+    # FIXME: in 3.9, we can use argparse.BooleanOptionalAction. At that point,
+    # we need to rename the flag to just -generate-body-for-unused-prefixes.
+    parser.add_argument(
+        "--no-generate-body-for-unused-prefixes",
+        action="store_false",
+        dest="gen_unused_prefix_body",
+        default=True,
+        help="Generate a function body that always matches for unused prefixes. This is useful when unused prefixes are desired, and it avoids needing to annotate each FileCheck as allowing them.",
+    )
+    # This is the default when regenerating existing tests. The default when
+    # generating new tests is determined by DEFAULT_VERSION.
+    parser.add_argument(
+        "--version", type=int, default=1, help="The version of output format"
+    )
+    args = parser.parse_args()
+    global _verbose, _global_value_regex, _global_hex_value_regex
+    _verbose = args.verbose
+    _global_value_regex = args.global_value_regex
+    _global_hex_value_regex = args.global_hex_value_regex
+    return args
+
 
 def parse_args(parser, argv):
-  args = parser.parse_args(argv)
-  if args.version >= 2:
-    args.function_signature = True
-  return args
+    args = parser.parse_args(argv)
+    if args.version >= 2:
+        args.function_signature = True
+    return args
+
 
 class InputLineInfo(object):
-  def __init__(self, line, line_number, args, argv):
-    self.line = line
-    self.line_number = line_number
-    self.args = args
-    self.argv = argv
+    def __init__(self, line, line_number, args, argv):
+        self.line = line
+        self.line_number = line_number
+        self.args = args
+        self.argv = argv
 
 
 class TestInfo(object):
-  def __init__(self, test, parser, script_name, input_lines, args, argv,
-               comment_prefix, argparse_callback):
-    self.parser = parser
-    self.argparse_callback = argparse_callback
-    self.path = test
-    self.args = args
-    if args.prefix_filecheck_ir_name:
-      global _prefix_filecheck_ir_name
-      _prefix_filecheck_ir_name = args.prefix_filecheck_ir_name
-    self.argv = argv
-    self.input_lines = input_lines
-    self.run_lines = find_run_lines(test, self.input_lines)
-    self.comment_prefix = comment_prefix
-    if self.comment_prefix is None:
-      if self.path.endswith('.mir'):
-        self.comment_prefix = '#'
-      else:
-        self.comment_prefix = ';'
-    self.autogenerated_note_prefix = self.comment_prefix + ' ' + UTC_ADVERT
-    self.test_autogenerated_note = self.autogenerated_note_prefix + script_name
-    self.test_autogenerated_note += get_autogennote_suffix(parser, self.args)
-    self.test_unused_note = self.comment_prefix + self.comment_prefix + ' ' + UNUSED_NOTE
-
-  def ro_iterlines(self):
-    for line_num, input_line in enumerate(self.input_lines):
-      args, argv = check_for_command(input_line, self.parser,
-                                     self.args, self.argv, self.argparse_callback)
-      yield InputLineInfo(input_line, line_num, args, argv)
-
-  def iterlines(self, output_lines):
-    output_lines.append(self.test_autogenerated_note)
-    for line_info in self.ro_iterlines():
-      input_line = line_info.line
-      # Discard any previous script advertising.
-      if input_line.startswith(self.autogenerated_note_prefix):
-        continue
-      self.args = line_info.args
-      self.argv = line_info.argv
-      if not self.args.enabled:
-        output_lines.append(input_line)
-        continue
-      yield line_info
-
-  def get_checks_for_unused_prefixes(self, run_list, used_prefixes: List[str]) -> List[str]:
-    run_list = [element for element in run_list if element[0] is not None]
-    unused_prefixes = set([
-        prefix for sublist in run_list for prefix in sublist[0]
-    ]).
diff erence(set(used_prefixes))
-
-    ret = []
-    if not unused_prefixes:
-      return ret
-    ret.append(self.test_unused_note)
-    for unused in sorted(unused_prefixes):
-      ret.append('{comment} {prefix}: {match_everything}'.format(
-        comment=self.comment_prefix,
-        prefix=unused,
-        match_everything=r"""{{.*}}"""
-      ))
-    return ret
-
-def itertests(test_patterns, parser, script_name, comment_prefix=None, argparse_callback=None):
-  for pattern in test_patterns:
-    # On Windows we must expand the patterns ourselves.
-    tests_list = glob.glob(pattern)
-    if not tests_list:
-      warn("Test file pattern '%s' was not found. Ignoring it." % (pattern,))
-      continue
-    for test in tests_list:
-      with open(test) as f:
-        input_lines = [l.rstrip() for l in f]
-      first_line = input_lines[0] if input_lines else ""
-      is_regenerate = UTC_ADVERT in first_line
-
-      # If we're generating a new test, set the default version to the latest.
-      argv = sys.argv[:]
-      if not is_regenerate:
-        argv.insert(1, '--version=' + str(DEFAULT_VERSION))
-
-      args = parse_args(parser, argv[1:])
-      if argparse_callback is not None:
-        argparse_callback(args)
-      if is_regenerate:
-        if script_name not in first_line and not args.force_update:
-          warn("Skipping test which wasn't autogenerated by " + script_name, test)
-          continue
-        args, argv = check_for_command(first_line, parser, args, argv, argparse_callback)
-      elif args.update_only:
-        assert UTC_ADVERT not in first_line
-        warn("Skipping test which isn't autogenerated: " + test)
-        continue
-      final_input_lines = []
-      for l in input_lines:
-        if UNUSED_NOTE in l:
-          break
-        final_input_lines.append(l)
-      yield TestInfo(test, parser, script_name, final_input_lines, args, argv,
-                     comment_prefix, argparse_callback)
-
-
-def should_add_line_to_output(input_line, prefix_set, skip_global_checks = False, comment_marker = ';'):
-  # Skip any blank comment lines in the IR.
-  if not skip_global_checks and input_line.strip() == comment_marker:
-    return False
-  # Skip a special double comment line we use as a separator.
-  if input_line.strip() == comment_marker + SEPARATOR:
-    return False
-  # Skip any blank lines in the IR.
-  #if input_line.strip() == '':
-  #  return False
-  # And skip any CHECK lines. We're building our own.
-  m = CHECK_RE.match(input_line)
-  if m and m.group(1) in prefix_set:
-    if skip_global_checks:
-      global_ir_value_re = re.compile(r'\[\[', flags=(re.M))
-      return not global_ir_value_re.search(input_line)
-    return False
-
-  return True
+    def __init__(
+        self,
+        test,
+        parser,
+        script_name,
+        input_lines,
+        args,
+        argv,
+        comment_prefix,
+        argparse_callback,
+    ):
+        self.parser = parser
+        self.argparse_callback = argparse_callback
+        self.path = test
+        self.args = args
+        if args.prefix_filecheck_ir_name:
+            global _prefix_filecheck_ir_name
+            _prefix_filecheck_ir_name = args.prefix_filecheck_ir_name
+        self.argv = argv
+        self.input_lines = input_lines
+        self.run_lines = find_run_lines(test, self.input_lines)
+        self.comment_prefix = comment_prefix
+        if self.comment_prefix is None:
+            if self.path.endswith(".mir"):
+                self.comment_prefix = "#"
+            else:
+                self.comment_prefix = ";"
+        self.autogenerated_note_prefix = self.comment_prefix + " " + UTC_ADVERT
+        self.test_autogenerated_note = self.autogenerated_note_prefix + script_name
+        self.test_autogenerated_note += get_autogennote_suffix(parser, self.args)
+        self.test_unused_note = (
+            self.comment_prefix + self.comment_prefix + " " + UNUSED_NOTE
+        )
+
+    def ro_iterlines(self):
+        for line_num, input_line in enumerate(self.input_lines):
+            args, argv = check_for_command(
+                input_line, self.parser, self.args, self.argv, self.argparse_callback
+            )
+            yield InputLineInfo(input_line, line_num, args, argv)
+
+    def iterlines(self, output_lines):
+        output_lines.append(self.test_autogenerated_note)
+        for line_info in self.ro_iterlines():
+            input_line = line_info.line
+            # Discard any previous script advertising.
+            if input_line.startswith(self.autogenerated_note_prefix):
+                continue
+            self.args = line_info.args
+            self.argv = line_info.argv
+            if not self.args.enabled:
+                output_lines.append(input_line)
+                continue
+            yield line_info
+
+    def get_checks_for_unused_prefixes(
+        self, run_list, used_prefixes: List[str]
+    ) -> List[str]:
+        run_list = [element for element in run_list if element[0] is not None]
+        unused_prefixes = set(
+            [prefix for sublist in run_list for prefix in sublist[0]]
+        ).
diff erence(set(used_prefixes))
+
+        ret = []
+        if not unused_prefixes:
+            return ret
+        ret.append(self.test_unused_note)
+        for unused in sorted(unused_prefixes):
+            ret.append(
+                "{comment} {prefix}: {match_everything}".format(
+                    comment=self.comment_prefix,
+                    prefix=unused,
+                    match_everything=r"""{{.*}}""",
+                )
+            )
+        return ret
+
+
+def itertests(
+    test_patterns, parser, script_name, comment_prefix=None, argparse_callback=None
+):
+    for pattern in test_patterns:
+        # On Windows we must expand the patterns ourselves.
+        tests_list = glob.glob(pattern)
+        if not tests_list:
+            warn("Test file pattern '%s' was not found. Ignoring it." % (pattern,))
+            continue
+        for test in tests_list:
+            with open(test) as f:
+                input_lines = [l.rstrip() for l in f]
+            first_line = input_lines[0] if input_lines else ""
+            is_regenerate = UTC_ADVERT in first_line
+
+            # If we're generating a new test, set the default version to the latest.
+            argv = sys.argv[:]
+            if not is_regenerate:
+                argv.insert(1, "--version=" + str(DEFAULT_VERSION))
+
+            args = parse_args(parser, argv[1:])
+            if argparse_callback is not None:
+                argparse_callback(args)
+            if is_regenerate:
+                if script_name not in first_line and not args.force_update:
+                    warn(
+                        "Skipping test which wasn't autogenerated by " + script_name,
+                        test,
+                    )
+                    continue
+                args, argv = check_for_command(
+                    first_line, parser, args, argv, argparse_callback
+                )
+            elif args.update_only:
+                assert UTC_ADVERT not in first_line
+                warn("Skipping test which isn't autogenerated: " + test)
+                continue
+            final_input_lines = []
+            for l in input_lines:
+                if UNUSED_NOTE in l:
+                    break
+                final_input_lines.append(l)
+            yield TestInfo(
+                test,
+                parser,
+                script_name,
+                final_input_lines,
+                args,
+                argv,
+                comment_prefix,
+                argparse_callback,
+            )
+
+
+def should_add_line_to_output(
+    input_line, prefix_set, skip_global_checks=False, comment_marker=";"
+):
+    # Skip any blank comment lines in the IR.
+    if not skip_global_checks and input_line.strip() == comment_marker:
+        return False
+    # Skip a special double comment line we use as a separator.
+    if input_line.strip() == comment_marker + SEPARATOR:
+        return False
+    # Skip any blank lines in the IR.
+    # if input_line.strip() == '':
+    #  return False
+    # And skip any CHECK lines. We're building our own.
+    m = CHECK_RE.match(input_line)
+    if m and m.group(1) in prefix_set:
+        if skip_global_checks:
+            global_ir_value_re = re.compile(r"\[\[", flags=(re.M))
+            return not global_ir_value_re.search(input_line)
+        return False
+
+    return True
+
 
 # Perform lit-like substitutions
 def getSubstitutions(sourcepath):
-  sourcedir = os.path.dirname(sourcepath)
-  return [('%s', sourcepath),
-          ('%S', sourcedir),
-          ('%p', sourcedir),
-          ('%{pathsep}', os.pathsep)]
+    sourcedir = os.path.dirname(sourcepath)
+    return [
+        ("%s", sourcepath),
+        ("%S", sourcedir),
+        ("%p", sourcedir),
+        ("%{pathsep}", os.pathsep),
+    ]
+
 
 def applySubstitutions(s, substitutions):
-  for a,b in substitutions:
-    s = s.replace(a, b)
-  return s
+    for a, b in substitutions:
+        s = s.replace(a, b)
+    return s
+
 
 # Invoke the tool that is being tested.
 def invoke_tool(exe, cmd_args, ir, preprocess_cmd=None, verbose=False):
-  with open(ir) as ir_file:
-    substitutions = getSubstitutions(ir)
-
-    # TODO Remove the str form which is used by update_test_checks.py and
-    # update_llc_test_checks.py
-    # The safer list form is used by update_cc_test_checks.py
-    if preprocess_cmd:
-      # Allow pre-processing the IR file (e.g. using sed):
-      assert isinstance(preprocess_cmd, str)  # TODO: use a list instead of using shell
-      preprocess_cmd = applySubstitutions(preprocess_cmd, substitutions).strip()
-      if verbose:
-        print('Pre-processing input file: ', ir, " with command '",
-              preprocess_cmd, "'", sep="", file=sys.stderr)
-      # Python 2.7 doesn't have subprocess.DEVNULL:
-      with open(os.devnull, 'w') as devnull:
-        pp = subprocess.Popen(preprocess_cmd, shell=True, stdin=devnull,
-                              stdout=subprocess.PIPE)
-        ir_file = pp.stdout
-
-    if isinstance(cmd_args, list):
-      args = [applySubstitutions(a, substitutions) for a in cmd_args]
-      stdout = subprocess.check_output([exe] + args, stdin=ir_file)
-    else:
-      stdout = subprocess.check_output(exe + ' ' + applySubstitutions(cmd_args, substitutions),
-                                       shell=True, stdin=ir_file)
-    if sys.version_info[0] > 2:
-      # FYI, if you crashed here with a decode error, your run line probably
-      # results in bitcode or other binary format being written to the pipe.
-      # For an opt test, you probably want to add -S or -disable-output.
-      stdout = stdout.decode()
-  # Fix line endings to unix CR style.
-  return stdout.replace('\r\n', '\n')
+    with open(ir) as ir_file:
+        substitutions = getSubstitutions(ir)
+
+        # TODO Remove the str form which is used by update_test_checks.py and
+        # update_llc_test_checks.py
+        # The safer list form is used by update_cc_test_checks.py
+        if preprocess_cmd:
+            # Allow pre-processing the IR file (e.g. using sed):
+            assert isinstance(
+                preprocess_cmd, str
+            )  # TODO: use a list instead of using shell
+            preprocess_cmd = applySubstitutions(preprocess_cmd, substitutions).strip()
+            if verbose:
+                print(
+                    "Pre-processing input file: ",
+                    ir,
+                    " with command '",
+                    preprocess_cmd,
+                    "'",
+                    sep="",
+                    file=sys.stderr,
+                )
+            # Python 2.7 doesn't have subprocess.DEVNULL:
+            with open(os.devnull, "w") as devnull:
+                pp = subprocess.Popen(
+                    preprocess_cmd, shell=True, stdin=devnull, stdout=subprocess.PIPE
+                )
+                ir_file = pp.stdout
+
+        if isinstance(cmd_args, list):
+            args = [applySubstitutions(a, substitutions) for a in cmd_args]
+            stdout = subprocess.check_output([exe] + args, stdin=ir_file)
+        else:
+            stdout = subprocess.check_output(
+                exe + " " + applySubstitutions(cmd_args, substitutions),
+                shell=True,
+                stdin=ir_file,
+            )
+        if sys.version_info[0] > 2:
+            # FYI, if you crashed here with a decode error, your run line probably
+            # results in bitcode or other binary format being written to the pipe.
+            # For an opt test, you probably want to add -S or -disable-output.
+            stdout = stdout.decode()
+    # Fix line endings to unix CR style.
+    return stdout.replace("\r\n", "\n")
 
-##### LLVM IR parser
-RUN_LINE_RE = re.compile(r'^\s*(?://|[;#])\s*RUN:\s*(.*)$')
-CHECK_PREFIX_RE = re.compile(r'--?check-prefix(?:es)?[= ](\S+)')
-PREFIX_RE = re.compile('^[a-zA-Z0-9_-]+$')
-CHECK_RE = re.compile(r'^\s*(?://|[;#])\s*([^:]+?)(?:-NEXT|-NOT|-DAG|-LABEL|-SAME|-EMPTY)?:')
 
-UTC_ARGS_KEY = 'UTC_ARGS:'
-UTC_ARGS_CMD = re.compile(r'.*' + UTC_ARGS_KEY + '\s*(?P<cmd>.*)\s*$')
-UTC_ADVERT = 'NOTE: Assertions have been autogenerated by '
-UNUSED_NOTE = 'NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:'
+##### LLVM IR parser
+RUN_LINE_RE = re.compile(r"^\s*(?://|[;#])\s*RUN:\s*(.*)$")
+CHECK_PREFIX_RE = re.compile(r"--?check-prefix(?:es)?[= ](\S+)")
+PREFIX_RE = re.compile("^[a-zA-Z0-9_-]+$")
+CHECK_RE = re.compile(
+    r"^\s*(?://|[;#])\s*([^:]+?)(?:-NEXT|-NOT|-DAG|-LABEL|-SAME|-EMPTY)?:"
+)
+
+UTC_ARGS_KEY = "UTC_ARGS:"
+UTC_ARGS_CMD = re.compile(r".*" + UTC_ARGS_KEY + "\s*(?P<cmd>.*)\s*$")
+UTC_ADVERT = "NOTE: Assertions have been autogenerated by "
+UNUSED_NOTE = "NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:"
 
 OPT_FUNCTION_RE = re.compile(
-    r'^(\s*;\s*Function\sAttrs:\s(?P<attrs>[\w\s():,]+?))?\s*define\s+(?P<funcdef_attrs_and_ret>[^@]*)@(?P<func>[\w.$-]+?)\s*'
-    r'(?P<args_and_sig>\((\)|(.*?[\w.-]+?)\))[^{]*\{)\n(?P<body>.*?)^\}$',
-    flags=(re.M | re.S))
+    r"^(\s*;\s*Function\sAttrs:\s(?P<attrs>[\w\s():,]+?))?\s*define\s+(?P<funcdef_attrs_and_ret>[^@]*)@(?P<func>[\w.$-]+?)\s*"
+    r"(?P<args_and_sig>\((\)|(.*?[\w.-]+?)\))[^{]*\{)\n(?P<body>.*?)^\}$",
+    flags=(re.M | re.S),
+)
 
 ANALYZE_FUNCTION_RE = re.compile(
-    r'^\s*\'(?P<analysis>[\w\s-]+?)\'\s+for\s+function\s+\'(?P<func>[\w.$-]+?)\':'
-    r'\s*\n(?P<body>.*)$',
-    flags=(re.X | re.S))
+    r"^\s*\'(?P<analysis>[\w\s-]+?)\'\s+for\s+function\s+\'(?P<func>[\w.$-]+?)\':"
+    r"\s*\n(?P<body>.*)$",
+    flags=(re.X | re.S),
+)
 
 LV_DEBUG_RE = re.compile(
-    r'^\s*\'(?P<func>[\w.$-]+?)\'[^\n]*'
-    r'\s*\n(?P<body>.*)$',
-    flags=(re.X | re.S))
+    r"^\s*\'(?P<func>[\w.$-]+?)\'[^\n]*" r"\s*\n(?P<body>.*)$", flags=(re.X | re.S)
+)
 
 IR_FUNCTION_RE = re.compile(r'^\s*define\s+(?:internal\s+)?[^@]*@"?([\w.$-]+)"?\s*\(')
 TRIPLE_IR_RE = re.compile(r'^\s*target\s+triple\s*=\s*"([^"]+)"$')
-TRIPLE_ARG_RE = re.compile(r'-mtriple[= ]([^ ]+)')
-MARCH_ARG_RE = re.compile(r'-march[= ]([^ ]+)')
-DEBUG_ONLY_ARG_RE = re.compile(r'-debug-only[= ]([^ ]+)')
+TRIPLE_ARG_RE = re.compile(r"-mtriple[= ]([^ ]+)")
+MARCH_ARG_RE = re.compile(r"-march[= ]([^ ]+)")
+DEBUG_ONLY_ARG_RE = re.compile(r"-debug-only[= ]([^ ]+)")
 
-SCRUB_LEADING_WHITESPACE_RE = re.compile(r'^(\s+)')
-SCRUB_WHITESPACE_RE = re.compile(r'(?!^(|  \w))[ \t]+', flags=re.M)
-SCRUB_TRAILING_WHITESPACE_RE = re.compile(r'[ \t]+$', flags=re.M)
+SCRUB_LEADING_WHITESPACE_RE = re.compile(r"^(\s+)")
+SCRUB_WHITESPACE_RE = re.compile(r"(?!^(|  \w))[ \t]+", flags=re.M)
+SCRUB_TRAILING_WHITESPACE_RE = re.compile(r"[ \t]+$", flags=re.M)
 SCRUB_TRAILING_WHITESPACE_TEST_RE = SCRUB_TRAILING_WHITESPACE_RE
-SCRUB_TRAILING_WHITESPACE_AND_ATTRIBUTES_RE = re.compile(r'([ \t]|(#[0-9]+))+$', flags=re.M)
-SCRUB_KILL_COMMENT_RE = re.compile(r'^ *#+ +kill:.*\n')
+SCRUB_TRAILING_WHITESPACE_AND_ATTRIBUTES_RE = re.compile(
+    r"([ \t]|(#[0-9]+))+$", flags=re.M
+)
+SCRUB_KILL_COMMENT_RE = re.compile(r"^ *#+ +kill:.*\n")
 SCRUB_LOOP_COMMENT_RE = re.compile(
-    r'# =>This Inner Loop Header:.*|# in Loop:.*', flags=re.M)
-SCRUB_TAILING_COMMENT_TOKEN_RE = re.compile(r'(?<=\S)+[ \t]*#$', flags=re.M)
+    r"# =>This Inner Loop Header:.*|# in Loop:.*", flags=re.M
+)
+SCRUB_TAILING_COMMENT_TOKEN_RE = re.compile(r"(?<=\S)+[ \t]*#$", flags=re.M)
+
+SEPARATOR = "."
 
-SEPARATOR = '.'
 
 def error(msg, test_file=None):
-  if test_file:
-    msg = '{}: {}'.format(msg, test_file)
-  print('ERROR: {}'.format(msg), file=sys.stderr)
+    if test_file:
+        msg = "{}: {}".format(msg, test_file)
+    print("ERROR: {}".format(msg), file=sys.stderr)
+
 
 def warn(msg, test_file=None):
-  if test_file:
-    msg = '{}: {}'.format(msg, test_file)
-  print('WARNING: {}'.format(msg), file=sys.stderr)
+    if test_file:
+        msg = "{}: {}".format(msg, test_file)
+    print("WARNING: {}".format(msg), file=sys.stderr)
+
 
 def debug(*args, **kwargs):
-  # Python2 does not allow def debug(*args, file=sys.stderr, **kwargs):
-  if 'file' not in kwargs:
-    kwargs['file'] = sys.stderr
-  if _verbose:
-    print(*args, **kwargs)
+    # Python2 does not allow def debug(*args, file=sys.stderr, **kwargs):
+    if "file" not in kwargs:
+        kwargs["file"] = sys.stderr
+    if _verbose:
+        print(*args, **kwargs)
+
 
 def find_run_lines(test, lines):
-  debug('Scanning for RUN lines in test file:', test)
-  raw_lines = [m.group(1)
-               for m in [RUN_LINE_RE.match(l) for l in lines] if m]
-  run_lines = [raw_lines[0]] if len(raw_lines) > 0 else []
-  for l in raw_lines[1:]:
-    if run_lines[-1].endswith('\\'):
-      run_lines[-1] = run_lines[-1].rstrip('\\') + ' ' + l
-    else:
-      run_lines.append(l)
-  debug('Found {} RUN lines in {}:'.format(len(run_lines), test))
-  for l in run_lines:
-    debug('  RUN: {}'.format(l))
-  return run_lines
+    debug("Scanning for RUN lines in test file:", test)
+    raw_lines = [m.group(1) for m in [RUN_LINE_RE.match(l) for l in lines] if m]
+    run_lines = [raw_lines[0]] if len(raw_lines) > 0 else []
+    for l in raw_lines[1:]:
+        if run_lines[-1].endswith("\\"):
+            run_lines[-1] = run_lines[-1].rstrip("\\") + " " + l
+        else:
+            run_lines.append(l)
+    debug("Found {} RUN lines in {}:".format(len(run_lines), test))
+    for l in run_lines:
+        debug("  RUN: {}".format(l))
+    return run_lines
+
 
 def get_triple_from_march(march):
-  triples = {
-      'amdgcn': 'amdgcn',
-      'r600': 'r600',
-      'mips': 'mips',
-      'sparc': 'sparc',
-      'hexagon': 'hexagon',
-      've': 've',
-  }
-  for prefix, triple in triples.items():
-    if march.startswith(prefix):
-      return triple
-  print("Cannot find a triple. Assume 'x86'", file=sys.stderr)
-  return 'x86'
+    triples = {
+        "amdgcn": "amdgcn",
+        "r600": "r600",
+        "mips": "mips",
+        "sparc": "sparc",
+        "hexagon": "hexagon",
+        "ve": "ve",
+    }
+    for prefix, triple in triples.items():
+        if march.startswith(prefix):
+            return triple
+    print("Cannot find a triple. Assume 'x86'", file=sys.stderr)
+    return "x86"
+
 
 def apply_filters(line, filters):
-  has_filter = False
-  for f in filters:
-    if not f.is_filter_out:
-      has_filter = True
-    if f.search(line):
-      return False if f.is_filter_out else True
-  # If we only used filter-out, keep the line, otherwise discard it since no
-  # filter matched.
-  return False if has_filter else True
+    has_filter = False
+    for f in filters:
+        if not f.is_filter_out:
+            has_filter = True
+        if f.search(line):
+            return False if f.is_filter_out else True
+    # If we only used filter-out, keep the line, otherwise discard it since no
+    # filter matched.
+    return False if has_filter else True
+
 
 def do_filter(body, filters):
-  return body if not filters else '\n'.join(filter(
-    lambda line: apply_filters(line, filters), body.splitlines()))
+    return (
+        body
+        if not filters
+        else "\n".join(
+            filter(lambda line: apply_filters(line, filters), body.splitlines())
+        )
+    )
+
 
 def scrub_body(body):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  body = SCRUB_WHITESPACE_RE.sub(r' ', body)
-  # Expand the tabs used for indentation.
-  body = str.expandtabs(body, 2)
-  # Strip trailing whitespace.
-  body = SCRUB_TRAILING_WHITESPACE_TEST_RE.sub(r'', body)
-  return body
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    body = SCRUB_WHITESPACE_RE.sub(r" ", body)
+    # Expand the tabs used for indentation.
+    body = str.expandtabs(body, 2)
+    # Strip trailing whitespace.
+    body = SCRUB_TRAILING_WHITESPACE_TEST_RE.sub(r"", body)
+    return body
+
 
 def do_scrub(body, scrubber, scrubber_args, extra):
-  if scrubber_args:
-    local_args = copy.deepcopy(scrubber_args)
-    local_args[0].extra_scrub = extra
-    return scrubber(body, *local_args)
-  return scrubber(body, *scrubber_args)
+    if scrubber_args:
+        local_args = copy.deepcopy(scrubber_args)
+        local_args[0].extra_scrub = extra
+        return scrubber(body, *local_args)
+    return scrubber(body, *scrubber_args)
+
 
 # Build up a dictionary of all the function bodies.
 class function_body(object):
-  def __init__(self, string, extra, funcdef_attrs_and_ret, args_and_sig, attrs, func_name_separator):
-    self.scrub = string
-    self.extrascrub = extra
-    self.funcdef_attrs_and_ret = funcdef_attrs_and_ret
-    self.args_and_sig = args_and_sig
-    self.attrs = attrs
-    self.func_name_separator = func_name_separator
-  def is_same_except_arg_names(self, extrascrub, funcdef_attrs_and_ret, args_and_sig, attrs, is_backend):
-    arg_names = set()
-    def drop_arg_names(match):
-      arg_names.add(match.group(variable_group_in_ir_value_match))
-      if match.group(attribute_group_in_ir_value_match):
-        attr = match.group(attribute_group_in_ir_value_match)
-      else:
-        attr = ''
-      return match.group(1) + attr + match.group(match.lastindex)
-    def repl_arg_names(match):
-      if match.group(variable_group_in_ir_value_match) is not None and match.group(variable_group_in_ir_value_match) in arg_names:
-        return match.group(1) + match.group(match.lastindex)
-      return match.group(1) + match.group(2) + match.group(match.lastindex)
-    if self.funcdef_attrs_and_ret != funcdef_attrs_and_ret:
-      return False
-    if self.attrs != attrs:
-      return False
-    ans0 = IR_VALUE_RE.sub(drop_arg_names, self.args_and_sig)
-    ans1 = IR_VALUE_RE.sub(drop_arg_names, args_and_sig)
-    if ans0 != ans1:
-      return False
-    if is_backend:
-      # Check without replacements, the replacements are not applied to the
-      # body for backend checks.
-      return self.extrascrub == extrascrub
-
-    es0 = IR_VALUE_RE.sub(repl_arg_names, self.extrascrub)
-    es1 = IR_VALUE_RE.sub(repl_arg_names, extrascrub)
-    es0 = SCRUB_IR_COMMENT_RE.sub(r'', es0)
-    es1 = SCRUB_IR_COMMENT_RE.sub(r'', es1)
-    return es0 == es1
-
-  def __str__(self):
-    return self.scrub
+    def __init__(
+        self,
+        string,
+        extra,
+        funcdef_attrs_and_ret,
+        args_and_sig,
+        attrs,
+        func_name_separator,
+    ):
+        self.scrub = string
+        self.extrascrub = extra
+        self.funcdef_attrs_and_ret = funcdef_attrs_and_ret
+        self.args_and_sig = args_and_sig
+        self.attrs = attrs
+        self.func_name_separator = func_name_separator
+
+    def is_same_except_arg_names(
+        self, extrascrub, funcdef_attrs_and_ret, args_and_sig, attrs, is_backend
+    ):
+        arg_names = set()
+
+        def drop_arg_names(match):
+            arg_names.add(match.group(variable_group_in_ir_value_match))
+            if match.group(attribute_group_in_ir_value_match):
+                attr = match.group(attribute_group_in_ir_value_match)
+            else:
+                attr = ""
+            return match.group(1) + attr + match.group(match.lastindex)
+
+        def repl_arg_names(match):
+            if (
+                match.group(variable_group_in_ir_value_match) is not None
+                and match.group(variable_group_in_ir_value_match) in arg_names
+            ):
+                return match.group(1) + match.group(match.lastindex)
+            return match.group(1) + match.group(2) + match.group(match.lastindex)
+
+        if self.funcdef_attrs_and_ret != funcdef_attrs_and_ret:
+            return False
+        if self.attrs != attrs:
+            return False
+        ans0 = IR_VALUE_RE.sub(drop_arg_names, self.args_and_sig)
+        ans1 = IR_VALUE_RE.sub(drop_arg_names, args_and_sig)
+        if ans0 != ans1:
+            return False
+        if is_backend:
+            # Check without replacements, the replacements are not applied to the
+            # body for backend checks.
+            return self.extrascrub == extrascrub
+
+        es0 = IR_VALUE_RE.sub(repl_arg_names, self.extrascrub)
+        es1 = IR_VALUE_RE.sub(repl_arg_names, extrascrub)
+        es0 = SCRUB_IR_COMMENT_RE.sub(r"", es0)
+        es1 = SCRUB_IR_COMMENT_RE.sub(r"", es1)
+        return es0 == es1
+
+    def __str__(self):
+        return self.scrub
+
 
 class FunctionTestBuilder:
-  def __init__(self, run_list, flags, scrubber_args, path):
-    self._verbose = flags.verbose
-    self._record_args = flags.function_signature
-    self._check_attributes = flags.check_attributes
-    # Strip double-quotes if input was read by UTC_ARGS
-    self._filters = list(map(lambda f: Filter(re.compile(f.pattern().strip('"'),
-                                                         f.flags()),
-                                              f.is_filter_out),
-                             flags.filters)) if flags.filters else []
-    self._scrubber_args = scrubber_args
-    self._path = path
-    # Strip double-quotes if input was read by UTC_ARGS
-    self._replace_value_regex = list(map(lambda x: x.strip('"'), flags.replace_value_regex))
-    self._func_dict = {}
-    self._func_order = {}
-    self._global_var_dict = {}
-    self._processed_prefixes = set()
-    for tuple in run_list:
-      for prefix in tuple[0]:
-        self._func_dict.update({prefix: dict()})
-        self._func_order.update({prefix: []})
-        self._global_var_dict.update({prefix: dict()})
-
-  def finish_and_get_func_dict(self):
-    for prefix in self.get_failed_prefixes():
-      warn('Prefix %s had conflicting output from 
diff erent RUN lines for all functions in test %s' % (prefix,self._path,))
-    return self._func_dict
-
-  def func_order(self):
-    return self._func_order
-
-  def global_var_dict(self):
-    return self._global_var_dict
-
-  def is_filtered(self):
-    return bool(self._filters)
-
-  def process_run_line(self, function_re, scrubber, raw_tool_output, prefixes, is_backend):
-    build_global_values_dictionary(self._global_var_dict, raw_tool_output, prefixes)
-    for m in function_re.finditer(raw_tool_output):
-      if not m:
-        continue
-      func = m.group('func')
-      body = m.group('body')
-      # func_name_separator is the string that is placed right after function name at the
-      # beginning of assembly function definition. In most assemblies, that is just a
-      # colon: `foo:`. But, for example, in nvptx it is a brace: `foo(`. If is_backend is
-      # False, just assume that separator is an empty string.
-      if is_backend:
-        # Use ':' as default separator.
-        func_name_separator = m.group('func_name_separator') if 'func_name_separator' in m.groupdict() else ':'
-      else:
-        func_name_separator = ''
-      attrs = m.group('attrs') if self._check_attributes else ''
-      funcdef_attrs_and_ret = m.group('funcdef_attrs_and_ret') if self._record_args else ''
-      # Determine if we print arguments, the opening brace, or nothing after the
-      # function name
-      if self._record_args and 'args_and_sig' in m.groupdict():
-        args_and_sig = scrub_body(m.group('args_and_sig').strip())
-      elif 'args_and_sig' in m.groupdict():
-        args_and_sig = '('
-      else:
-        args_and_sig = ''
-      filtered_body = do_filter(body, self._filters)
-      scrubbed_body = do_scrub(filtered_body, scrubber, self._scrubber_args,
-                               extra=False)
-      scrubbed_extra = do_scrub(filtered_body, scrubber, self._scrubber_args,
-                                extra=True)
-      if 'analysis' in m.groupdict():
-        analysis = m.group('analysis')
-        if analysis.lower() != 'cost model analysis':
-          warn('Unsupported analysis mode: %r!' % (analysis,))
-      if func.startswith('stress'):
-        # We only use the last line of the function body for stress tests.
-        scrubbed_body = '\n'.join(scrubbed_body.splitlines()[-1:])
-      if self._verbose:
-        print('Processing function: ' + func, file=sys.stderr)
-        for l in scrubbed_body.splitlines():
-          print('  ' + l, file=sys.stderr)
-      for prefix in prefixes:
-        # Replace function names matching the regex.
-        for regex in self._replace_value_regex:
-          # Pattern that matches capture groups in the regex in leftmost order.
-          group_regex = re.compile(r'\(.*?\)')
-          # Replace function name with regex.
-          match = re.match(regex, func)
-          if match:
-            func_repl = regex
-            # Replace any capture groups with their matched strings.
-            for g in match.groups():
-              func_repl = group_regex.sub(re.escape(g), func_repl, count=1)
-            func = re.sub(func_repl, '{{' + func_repl + '}}', func)
-
-          # Replace all calls to regex matching functions.
-          matches = re.finditer(regex, scrubbed_body)
-          for match in matches:
-            func_repl = regex
-            # Replace any capture groups with their matched strings.
-            for g in match.groups():
-              func_repl = group_regex.sub(re.escape(g), func_repl, count=1)
-            # Substitute function call names that match the regex with the same
-            # capture groups set.
-            scrubbed_body = re.sub(func_repl, '{{' + func_repl + '}}',
-                                   scrubbed_body)
-
-        if func in self._func_dict[prefix]:
-          if (self._func_dict[prefix][func] is not None and
-              (str(self._func_dict[prefix][func]) != scrubbed_body or
-               self._func_dict[prefix][func].args_and_sig != args_and_sig or
-               self._func_dict[prefix][func].attrs != attrs or
-               self._func_dict[prefix][func].funcdef_attrs_and_ret != funcdef_attrs_and_ret)):
-            if self._func_dict[prefix][func].is_same_except_arg_names(
-                scrubbed_extra,
-                funcdef_attrs_and_ret,
-                args_and_sig,
-                attrs,
-                is_backend):
-              self._func_dict[prefix][func].scrub = scrubbed_extra
-              self._func_dict[prefix][func].args_and_sig = args_and_sig
+    def __init__(self, run_list, flags, scrubber_args, path):
+        self._verbose = flags.verbose
+        self._record_args = flags.function_signature
+        self._check_attributes = flags.check_attributes
+        # Strip double-quotes if input was read by UTC_ARGS
+        self._filters = (
+            list(
+                map(
+                    lambda f: Filter(
+                        re.compile(f.pattern().strip('"'), f.flags()), f.is_filter_out
+                    ),
+                    flags.filters,
+                )
+            )
+            if flags.filters
+            else []
+        )
+        self._scrubber_args = scrubber_args
+        self._path = path
+        # Strip double-quotes if input was read by UTC_ARGS
+        self._replace_value_regex = list(
+            map(lambda x: x.strip('"'), flags.replace_value_regex)
+        )
+        self._func_dict = {}
+        self._func_order = {}
+        self._global_var_dict = {}
+        self._processed_prefixes = set()
+        for tuple in run_list:
+            for prefix in tuple[0]:
+                self._func_dict.update({prefix: dict()})
+                self._func_order.update({prefix: []})
+                self._global_var_dict.update({prefix: dict()})
+
+    def finish_and_get_func_dict(self):
+        for prefix in self.get_failed_prefixes():
+            warn(
+                "Prefix %s had conflicting output from 
diff erent RUN lines for all functions in test %s"
+                % (
+                    prefix,
+                    self._path,
+                )
+            )
+        return self._func_dict
+
+    def func_order(self):
+        return self._func_order
+
+    def global_var_dict(self):
+        return self._global_var_dict
+
+    def is_filtered(self):
+        return bool(self._filters)
+
+    def process_run_line(
+        self, function_re, scrubber, raw_tool_output, prefixes, is_backend
+    ):
+        build_global_values_dictionary(self._global_var_dict, raw_tool_output, prefixes)
+        for m in function_re.finditer(raw_tool_output):
+            if not m:
+                continue
+            func = m.group("func")
+            body = m.group("body")
+            # func_name_separator is the string that is placed right after function name at the
+            # beginning of assembly function definition. In most assemblies, that is just a
+            # colon: `foo:`. But, for example, in nvptx it is a brace: `foo(`. If is_backend is
+            # False, just assume that separator is an empty string.
+            if is_backend:
+                # Use ':' as default separator.
+                func_name_separator = (
+                    m.group("func_name_separator")
+                    if "func_name_separator" in m.groupdict()
+                    else ":"
+                )
             else:
-              # This means a previous RUN line produced a body for this function
-              # that is 
diff erent from the one produced by this current RUN line,
-              # so the body can't be common across RUN lines. We use None to
-              # indicate that.
-              self._func_dict[prefix][func] = None
-        else:
-          if prefix not in self._processed_prefixes:
-            self._func_dict[prefix][func] = function_body(
-                scrubbed_body, scrubbed_extra, funcdef_attrs_and_ret,
-                args_and_sig, attrs, func_name_separator)
-            self._func_order[prefix].append(func)
-          else:
-            # An earlier RUN line used this check prefixes but didn't produce
-            # a body for this function. This happens in Clang tests that use
-            # preprocesser directives to exclude individual functions from some
-            # RUN lines.
-            self._func_dict[prefix][func] = None
-
-  def processed_prefixes(self, prefixes):
-    """
-    Mark a set of prefixes as having had at least one applicable RUN line fully
-    processed. This is used to filter out function bodies that don't have
-    outputs for all RUN lines.
-    """
-    self._processed_prefixes.update(prefixes)
-
-  def get_failed_prefixes(self):
-    # This returns the list of those prefixes that failed to match any function,
-    # because there were conflicting bodies produced by 
diff erent RUN lines, in
-    # all instances of the prefix.
-    for prefix in self._func_dict:
-      if (self._func_dict[prefix] and
-          (not [fct for fct in self._func_dict[prefix]
-                if self._func_dict[prefix][fct] is not None])):
-        yield prefix
+                func_name_separator = ""
+            attrs = m.group("attrs") if self._check_attributes else ""
+            funcdef_attrs_and_ret = (
+                m.group("funcdef_attrs_and_ret") if self._record_args else ""
+            )
+            # Determine if we print arguments, the opening brace, or nothing after the
+            # function name
+            if self._record_args and "args_and_sig" in m.groupdict():
+                args_and_sig = scrub_body(m.group("args_and_sig").strip())
+            elif "args_and_sig" in m.groupdict():
+                args_and_sig = "("
+            else:
+                args_and_sig = ""
+            filtered_body = do_filter(body, self._filters)
+            scrubbed_body = do_scrub(
+                filtered_body, scrubber, self._scrubber_args, extra=False
+            )
+            scrubbed_extra = do_scrub(
+                filtered_body, scrubber, self._scrubber_args, extra=True
+            )
+            if "analysis" in m.groupdict():
+                analysis = m.group("analysis")
+                if analysis.lower() != "cost model analysis":
+                    warn("Unsupported analysis mode: %r!" % (analysis,))
+            if func.startswith("stress"):
+                # We only use the last line of the function body for stress tests.
+                scrubbed_body = "\n".join(scrubbed_body.splitlines()[-1:])
+            if self._verbose:
+                print("Processing function: " + func, file=sys.stderr)
+                for l in scrubbed_body.splitlines():
+                    print("  " + l, file=sys.stderr)
+            for prefix in prefixes:
+                # Replace function names matching the regex.
+                for regex in self._replace_value_regex:
+                    # Pattern that matches capture groups in the regex in leftmost order.
+                    group_regex = re.compile(r"\(.*?\)")
+                    # Replace function name with regex.
+                    match = re.match(regex, func)
+                    if match:
+                        func_repl = regex
+                        # Replace any capture groups with their matched strings.
+                        for g in match.groups():
+                            func_repl = group_regex.sub(
+                                re.escape(g), func_repl, count=1
+                            )
+                        func = re.sub(func_repl, "{{" + func_repl + "}}", func)
+
+                    # Replace all calls to regex matching functions.
+                    matches = re.finditer(regex, scrubbed_body)
+                    for match in matches:
+                        func_repl = regex
+                        # Replace any capture groups with their matched strings.
+                        for g in match.groups():
+                            func_repl = group_regex.sub(
+                                re.escape(g), func_repl, count=1
+                            )
+                        # Substitute function call names that match the regex with the same
+                        # capture groups set.
+                        scrubbed_body = re.sub(
+                            func_repl, "{{" + func_repl + "}}", scrubbed_body
+                        )
+
+                if func in self._func_dict[prefix]:
+                    if self._func_dict[prefix][func] is not None and (
+                        str(self._func_dict[prefix][func]) != scrubbed_body
+                        or self._func_dict[prefix][func].args_and_sig != args_and_sig
+                        or self._func_dict[prefix][func].attrs != attrs
+                        or self._func_dict[prefix][func].funcdef_attrs_and_ret
+                        != funcdef_attrs_and_ret
+                    ):
+                        if self._func_dict[prefix][func].is_same_except_arg_names(
+                            scrubbed_extra,
+                            funcdef_attrs_and_ret,
+                            args_and_sig,
+                            attrs,
+                            is_backend,
+                        ):
+                            self._func_dict[prefix][func].scrub = scrubbed_extra
+                            self._func_dict[prefix][func].args_and_sig = args_and_sig
+                        else:
+                            # This means a previous RUN line produced a body for this function
+                            # that is 
diff erent from the one produced by this current RUN line,
+                            # so the body can't be common across RUN lines. We use None to
+                            # indicate that.
+                            self._func_dict[prefix][func] = None
+                else:
+                    if prefix not in self._processed_prefixes:
+                        self._func_dict[prefix][func] = function_body(
+                            scrubbed_body,
+                            scrubbed_extra,
+                            funcdef_attrs_and_ret,
+                            args_and_sig,
+                            attrs,
+                            func_name_separator,
+                        )
+                        self._func_order[prefix].append(func)
+                    else:
+                        # An earlier RUN line used this check prefixes but didn't produce
+                        # a body for this function. This happens in Clang tests that use
+                        # preprocesser directives to exclude individual functions from some
+                        # RUN lines.
+                        self._func_dict[prefix][func] = None
+
+    def processed_prefixes(self, prefixes):
+        """
+        Mark a set of prefixes as having had at least one applicable RUN line fully
+        processed. This is used to filter out function bodies that don't have
+        outputs for all RUN lines.
+        """
+        self._processed_prefixes.update(prefixes)
+
+    def get_failed_prefixes(self):
+        # This returns the list of those prefixes that failed to match any function,
+        # because there were conflicting bodies produced by 
diff erent RUN lines, in
+        # all instances of the prefix.
+        for prefix in self._func_dict:
+            if self._func_dict[prefix] and (
+                not [
+                    fct
+                    for fct in self._func_dict[prefix]
+                    if self._func_dict[prefix][fct] is not None
+                ]
+            ):
+                yield prefix
 
 
 ##### Generator of LLVM IR CHECK lines
 
-SCRUB_IR_COMMENT_RE = re.compile(r'\s*;.*')
+SCRUB_IR_COMMENT_RE = re.compile(r"\s*;.*")
 
 # TODO: We should also derive check lines for global, debug, loop declarations, etc..
 
+
 class NamelessValue:
-  def __init__(self, check_prefix, check_key, ir_prefix, ir_regexp,
-               global_ir_rhs_regexp, *, is_before_functions=False, is_number=False,
-               replace_number_with_counter=False):
-    self.check_prefix = check_prefix
-    self.check_key = check_key
-    self.ir_prefix = ir_prefix
-    self.ir_regexp = ir_regexp
-    self.global_ir_rhs_regexp = global_ir_rhs_regexp
-    self.is_before_functions = is_before_functions
-    self.is_number = is_number
-    # Some variable numbers (e.g. MCINST1234) will change based on unrelated
-    # modifications to LLVM, replace those with an incrementing counter.
-    self.replace_number_with_counter = replace_number_with_counter
-    self.variable_mapping = {}
-
-  # Return true if this kind of IR value is "local", basically if it matches '%{{.*}}'.
-  def is_local_def_ir_value_match(self, match):
-    return self.ir_prefix == '%'
-
-  # Return true if this kind of IR value is "global", basically if it matches '#{{.*}}'.
-  def is_global_scope_ir_value_match(self, match):
-    return self.global_ir_rhs_regexp is not None
-
-  # Return the IR prefix and check prefix we use for this kind or IR value,
-  # e.g., (%, TMP) for locals.
-  def get_ir_prefix_from_ir_value_match(self, match):
-    return self.ir_prefix, self.check_prefix
-
-  # Return the IR regexp we use for this kind or IR value, e.g., [\w.-]+? for locals
-  def get_ir_regex_from_ir_value_re_match(self, match):
-    # for backwards compatibility we check locals with '.*'
-    if self.is_local_def_ir_value_match(match):
-      return '.*'
-    return self.ir_regexp
-
-  # Create a FileCheck variable name based on an IR name.
-  def get_value_name(self, var: str, check_prefix: str):
-    var = var.replace('!', '')
-    if self.replace_number_with_counter:
-      assert var.isdigit(), var
-      replacement = self.variable_mapping.get(var, None)
-      if replacement is None:
-        # Replace variable with an incrementing counter
-        replacement = str(len(self.variable_mapping) + 1)
-        self.variable_mapping[var] = replacement
-      var = replacement
-    # This is a nameless value, prepend check_prefix.
-    if var.isdigit():
-      var = check_prefix + var
-    else:
-      # This is a named value that clashes with the check_prefix, prepend with
-      # _prefix_filecheck_ir_name, if it has been defined.
-      if may_clash_with_default_check_prefix_name(check_prefix, var) and _prefix_filecheck_ir_name:
-        var = _prefix_filecheck_ir_name + var
-    var = var.replace('.', '_')
-    var = var.replace('-', '_')
-    return var.upper()
-
-  # Create a FileCheck variable from regex.
-  def get_value_definition(self, var, match):
-    # for backwards compatibility we check locals with '.*'
-    varname = self.get_value_name(var, self.check_prefix)
-    prefix = self.get_ir_prefix_from_ir_value_match(match)[0]
-    if self.is_number:
-      regex = ''  # always capture a number in the default format
-      capture_start = '[[#'
-    else:
-      regex = self.get_ir_regex_from_ir_value_re_match(match)
-      capture_start = '[['
-    if self.is_local_def_ir_value_match(match):
-      return capture_start + varname + ':' + prefix + regex + ']]'
-    return prefix + capture_start + varname + ':' + regex + ']]'
-
-  # Use a FileCheck variable.
-  def get_value_use(self, var, match, var_prefix=None):
-    if var_prefix is None:
-      var_prefix = self.check_prefix
-    capture_start = '[[#' if self.is_number else '[['
-    if self.is_local_def_ir_value_match(match):
-      return capture_start + self.get_value_name(var, var_prefix) + ']]'
-    prefix = self.get_ir_prefix_from_ir_value_match(match)[0]
-    return prefix + capture_start + self.get_value_name(var, var_prefix) + ']]'
+    def __init__(
+        self,
+        check_prefix,
+        check_key,
+        ir_prefix,
+        ir_regexp,
+        global_ir_rhs_regexp,
+        *,
+        is_before_functions=False,
+        is_number=False,
+        replace_number_with_counter=False
+    ):
+        self.check_prefix = check_prefix
+        self.check_key = check_key
+        self.ir_prefix = ir_prefix
+        self.ir_regexp = ir_regexp
+        self.global_ir_rhs_regexp = global_ir_rhs_regexp
+        self.is_before_functions = is_before_functions
+        self.is_number = is_number
+        # Some variable numbers (e.g. MCINST1234) will change based on unrelated
+        # modifications to LLVM, replace those with an incrementing counter.
+        self.replace_number_with_counter = replace_number_with_counter
+        self.variable_mapping = {}
+
+    # Return true if this kind of IR value is "local", basically if it matches '%{{.*}}'.
+    def is_local_def_ir_value_match(self, match):
+        return self.ir_prefix == "%"
+
+    # Return true if this kind of IR value is "global", basically if it matches '#{{.*}}'.
+    def is_global_scope_ir_value_match(self, match):
+        return self.global_ir_rhs_regexp is not None
+
+    # Return the IR prefix and check prefix we use for this kind or IR value,
+    # e.g., (%, TMP) for locals.
+    def get_ir_prefix_from_ir_value_match(self, match):
+        return self.ir_prefix, self.check_prefix
+
+    # Return the IR regexp we use for this kind or IR value, e.g., [\w.-]+? for locals
+    def get_ir_regex_from_ir_value_re_match(self, match):
+        # for backwards compatibility we check locals with '.*'
+        if self.is_local_def_ir_value_match(match):
+            return ".*"
+        return self.ir_regexp
+
+    # Create a FileCheck variable name based on an IR name.
+    def get_value_name(self, var: str, check_prefix: str):
+        var = var.replace("!", "")
+        if self.replace_number_with_counter:
+            assert var.isdigit(), var
+            replacement = self.variable_mapping.get(var, None)
+            if replacement is None:
+                # Replace variable with an incrementing counter
+                replacement = str(len(self.variable_mapping) + 1)
+                self.variable_mapping[var] = replacement
+            var = replacement
+        # This is a nameless value, prepend check_prefix.
+        if var.isdigit():
+            var = check_prefix + var
+        else:
+            # This is a named value that clashes with the check_prefix, prepend with
+            # _prefix_filecheck_ir_name, if it has been defined.
+            if (
+                may_clash_with_default_check_prefix_name(check_prefix, var)
+                and _prefix_filecheck_ir_name
+            ):
+                var = _prefix_filecheck_ir_name + var
+        var = var.replace(".", "_")
+        var = var.replace("-", "_")
+        return var.upper()
+
+    # Create a FileCheck variable from regex.
+    def get_value_definition(self, var, match):
+        # for backwards compatibility we check locals with '.*'
+        varname = self.get_value_name(var, self.check_prefix)
+        prefix = self.get_ir_prefix_from_ir_value_match(match)[0]
+        if self.is_number:
+            regex = ""  # always capture a number in the default format
+            capture_start = "[[#"
+        else:
+            regex = self.get_ir_regex_from_ir_value_re_match(match)
+            capture_start = "[["
+        if self.is_local_def_ir_value_match(match):
+            return capture_start + varname + ":" + prefix + regex + "]]"
+        return prefix + capture_start + varname + ":" + regex + "]]"
+
+    # Use a FileCheck variable.
+    def get_value_use(self, var, match, var_prefix=None):
+        if var_prefix is None:
+            var_prefix = self.check_prefix
+        capture_start = "[[#" if self.is_number else "[["
+        if self.is_local_def_ir_value_match(match):
+            return capture_start + self.get_value_name(var, var_prefix) + "]]"
+        prefix = self.get_ir_prefix_from_ir_value_match(match)[0]
+        return prefix + capture_start + self.get_value_name(var, var_prefix) + "]]"
+
 
 # Description of the 
diff erent "unnamed" values we match in the IR, e.g.,
 # (local) ssa values, (debug) metadata, etc.
 ir_nameless_values = [
     #            check_prefix   check_key  ir_prefix           ir_regexp                global_ir_rhs_regexp
-    NamelessValue(r'TMP'        , '%' , r'%'                   , r'[\w$.-]+?'           , None                 ) ,
-    NamelessValue(r'ATTR'       , '#' , r'#'                   , r'[0-9]+'              , None                 ) ,
-    NamelessValue(r'ATTR'       , '#' , r'attributes #'        , r'[0-9]+'              , r'{[^}]*}'           ) ,
-    NamelessValue(r'GLOB'       , '@' , r'@'                   , r'[0-9]+'              , None                 ) ,
-    NamelessValue(r'GLOB'       , '@' , r'@'                   , r'[a-zA-Z0-9_$"\\.-]+' , r'.+'                , is_before_functions=True)  ,
-    NamelessValue(r'DBG'        , '!' , r'!dbg '               , r'![0-9]+'             , None                 ) ,
-    NamelessValue(r'DIASSIGNID' , '!' , r'!DIAssignID '        , r'![0-9]+'             , None                 ) ,
-    NamelessValue(r'PROF'       , '!' , r'!prof '              , r'![0-9]+'             , None                 ) ,
-    NamelessValue(r'TBAA'       , '!' , r'!tbaa '              , r'![0-9]+'             , None                 ) ,
-    NamelessValue(r'TBAA_STRUCT', '!' , r'!tbaa.struct '       , r'![0-9]+'             , None                 ) ,
-    NamelessValue(r'RNG'        , '!' , r'!range '             , r'![0-9]+'             , None                 ) ,
-    NamelessValue(r'LOOP'       , '!' , r'!llvm.loop '         , r'![0-9]+'             , None                 ) ,
-    NamelessValue(r'META'       , '!' , r'metadata '           , r'![0-9]+'             , None                 ) ,
-    NamelessValue(r'META'       , '!' , r''                    , r'![0-9]+'             , r'(?:distinct |)!.*' ) ,
-    NamelessValue(r'ACC_GRP'    , '!' , r'!llvm.access.group ' , r'![0-9]+'             , None                 ) ,
+    NamelessValue(r"TMP", "%", r"%", r"[\w$.-]+?", None),
+    NamelessValue(r"ATTR", "#", r"#", r"[0-9]+", None),
+    NamelessValue(r"ATTR", "#", r"attributes #", r"[0-9]+", r"{[^}]*}"),
+    NamelessValue(r"GLOB", "@", r"@", r"[0-9]+", None),
+    NamelessValue(
+        r"GLOB", "@", r"@", r'[a-zA-Z0-9_$"\\.-]+', r".+", is_before_functions=True
+    ),
+    NamelessValue(r"DBG", "!", r"!dbg ", r"![0-9]+", None),
+    NamelessValue(r"DIASSIGNID", "!", r"!DIAssignID ", r"![0-9]+", None),
+    NamelessValue(r"PROF", "!", r"!prof ", r"![0-9]+", None),
+    NamelessValue(r"TBAA", "!", r"!tbaa ", r"![0-9]+", None),
+    NamelessValue(r"TBAA_STRUCT", "!", r"!tbaa.struct ", r"![0-9]+", None),
+    NamelessValue(r"RNG", "!", r"!range ", r"![0-9]+", None),
+    NamelessValue(r"LOOP", "!", r"!llvm.loop ", r"![0-9]+", None),
+    NamelessValue(r"META", "!", r"metadata ", r"![0-9]+", None),
+    NamelessValue(r"META", "!", r"", r"![0-9]+", r"(?:distinct |)!.*"),
+    NamelessValue(r"ACC_GRP", "!", r"!llvm.access.group ", r"![0-9]+", None),
 ]
 
 asm_nameless_values = [
-    NamelessValue(r'MCINST'     , 'Inst#' , '<MCInst #'        , r'\d+'                 , r'.+', is_number=True, replace_number_with_counter=True),
-    NamelessValue(r'MCREG'      , 'Reg:'  , '<MCOperand Reg:'  , r'\d+'                 , r'.+', is_number=True, replace_number_with_counter=True),
+    NamelessValue(
+        r"MCINST",
+        "Inst#",
+        "<MCInst #",
+        r"\d+",
+        r".+",
+        is_number=True,
+        replace_number_with_counter=True,
+    ),
+    NamelessValue(
+        r"MCREG",
+        "Reg:",
+        "<MCOperand Reg:",
+        r"\d+",
+        r".+",
+        is_number=True,
+        replace_number_with_counter=True,
+    ),
 ]
 
+
 def createOrRegexp(old, new):
-  if not old:
-    return new
-  if not new:
-    return old
-  return old + '|' + new
+    if not old:
+        return new
+    if not new:
+        return old
+    return old + "|" + new
+
 
 def createPrefixMatch(prefix_str, prefix_re):
-  return '(?:' + prefix_str + '(' + prefix_re + '))'
+    return "(?:" + prefix_str + "(" + prefix_re + "))"
+
 
 # Build the regexp that matches an "IR value". This can be a local variable,
 # argument, global, or metadata, anything that is "named". It is important that
 # the PREFIX and SUFFIX below only contain a single group, if that changes
 # other locations will need adjustment as well.
-IR_VALUE_REGEXP_PREFIX = r'(\s*)'
-IR_VALUE_REGEXP_STRING = r''
+IR_VALUE_REGEXP_PREFIX = r"(\s*)"
+IR_VALUE_REGEXP_STRING = r""
 for nameless_value in ir_nameless_values:
-  match = createPrefixMatch(nameless_value.ir_prefix, nameless_value.ir_regexp)
-  if nameless_value.global_ir_rhs_regexp is not None:
-    match = '^' + match
-  IR_VALUE_REGEXP_STRING = createOrRegexp(IR_VALUE_REGEXP_STRING, match)
-IR_VALUE_REGEXP_SUFFIX = r'([,\s\(\)]|\Z)'
-IR_VALUE_RE = re.compile(IR_VALUE_REGEXP_PREFIX + r'(' + IR_VALUE_REGEXP_STRING + r')' + IR_VALUE_REGEXP_SUFFIX)
+    match = createPrefixMatch(nameless_value.ir_prefix, nameless_value.ir_regexp)
+    if nameless_value.global_ir_rhs_regexp is not None:
+        match = "^" + match
+    IR_VALUE_REGEXP_STRING = createOrRegexp(IR_VALUE_REGEXP_STRING, match)
+IR_VALUE_REGEXP_SUFFIX = r"([,\s\(\)]|\Z)"
+IR_VALUE_RE = re.compile(
+    IR_VALUE_REGEXP_PREFIX
+    + r"("
+    + IR_VALUE_REGEXP_STRING
+    + r")"
+    + IR_VALUE_REGEXP_SUFFIX
+)
 
 # Build the regexp that matches an "ASM value" (currently only for --asm-show-inst comments).
-ASM_VALUE_REGEXP_STRING = ''
+ASM_VALUE_REGEXP_STRING = ""
 for nameless_value in asm_nameless_values:
-  match = createPrefixMatch(nameless_value.ir_prefix, nameless_value.ir_regexp)
-  ASM_VALUE_REGEXP_STRING = createOrRegexp(ASM_VALUE_REGEXP_STRING, match)
-ASM_VALUE_REGEXP_SUFFIX = r'([>\s]|\Z)'
-ASM_VALUE_RE = re.compile(r'((?:#|//)\s*)' + '(' + ASM_VALUE_REGEXP_STRING + ')' + ASM_VALUE_REGEXP_SUFFIX)
+    match = createPrefixMatch(nameless_value.ir_prefix, nameless_value.ir_regexp)
+    ASM_VALUE_REGEXP_STRING = createOrRegexp(ASM_VALUE_REGEXP_STRING, match)
+ASM_VALUE_REGEXP_SUFFIX = r"([>\s]|\Z)"
+ASM_VALUE_RE = re.compile(
+    r"((?:#|//)\s*)" + "(" + ASM_VALUE_REGEXP_STRING + ")" + ASM_VALUE_REGEXP_SUFFIX
+)
 
 # The entire match is group 0, the prefix has one group (=1), the entire
 # IR_VALUE_REGEXP_STRING is one group (=2), and then the nameless values start.
@@ -825,488 +1061,665 @@ def createPrefixMatch(prefix_str, prefix_re):
 # Check a match for IR_VALUE_RE and inspect it to determine if it was a local
 # value, %..., global @..., debug number !dbg !..., etc. See the PREFIXES above.
 def get_idx_from_ir_value_match(match):
-  for i in range(first_nameless_group_in_ir_value_match, match.lastindex):
-    if match.group(i) is not None:
-      return i - first_nameless_group_in_ir_value_match
-  error("Unable to identify the kind of IR value from the match!")
-  return 0
+    for i in range(first_nameless_group_in_ir_value_match, match.lastindex):
+        if match.group(i) is not None:
+            return i - first_nameless_group_in_ir_value_match
+    error("Unable to identify the kind of IR value from the match!")
+    return 0
+
 
 # See get_idx_from_ir_value_match
 def get_name_from_ir_value_match(match):
-  return match.group(get_idx_from_ir_value_match(match) + first_nameless_group_in_ir_value_match)
+    return match.group(
+        get_idx_from_ir_value_match(match) + first_nameless_group_in_ir_value_match
+    )
+
 
 def get_nameless_value_from_match(match, nameless_values) -> NamelessValue:
-  return nameless_values[get_idx_from_ir_value_match(match)]
+    return nameless_values[get_idx_from_ir_value_match(match)]
+
 
 # Return true if var clashes with the scripted FileCheck check_prefix.
 def may_clash_with_default_check_prefix_name(check_prefix, var):
-  return check_prefix and re.match(r'^' + check_prefix + r'[0-9]+?$', var, re.IGNORECASE)
-
-def generalize_check_lines_common(lines, is_analyze, vars_seen,
-                                  global_vars_seen, nameless_values,
-                                  nameless_value_regex, is_asm):
-  # This gets called for each match that occurs in
-  # a line. We transform variables we haven't seen
-  # into defs, and variables we have seen into uses.
-  def transform_line_vars(match):
-    var = get_name_from_ir_value_match(match)
-    nameless_value = get_nameless_value_from_match(match, nameless_values)
-    if may_clash_with_default_check_prefix_name(nameless_value.check_prefix, var):
-      warn("Change IR value name '%s' or use --prefix-filecheck-ir-name to prevent possible conflict"
-           " with scripted FileCheck name." % (var,))
-    key = (var, nameless_value.check_key)
-    is_local_def = nameless_value.is_local_def_ir_value_match(match)
-    if is_local_def and key in vars_seen:
-      rv = nameless_value.get_value_use(var, match)
-    elif not is_local_def and key in global_vars_seen:
-      # We could have seen a 
diff erent prefix for the global variables first,
-      # ensure we use that one instead of the prefix for the current match.
-      rv = nameless_value.get_value_use(var, match, global_vars_seen[key])
-    else:
-      if is_local_def:
-        vars_seen.add(key)
-      else:
-        global_vars_seen[key] = nameless_value.check_prefix
-      rv = nameless_value.get_value_definition(var, match)
-    # re.sub replaces the entire regex match
-    # with whatever you return, so we have
-    # to make sure to hand it back everything
-    # including the commas and spaces.
-    return match.group(1) + rv + match.group(match.lastindex)
-
-  lines_with_def = []
-
-  for i, line in enumerate(lines):
-    if not is_asm:
-      # An IR variable named '%.' matches the FileCheck regex string.
-      line = line.replace('%.', '%dot')
-      for regex in _global_hex_value_regex:
-        if re.match('^@' + regex + ' = ', line):
-          line = re.sub(r'\bi([0-9]+) ([0-9]+)',
-              lambda m : 'i' + m.group(1) + ' [[#' + hex(int(m.group(2))) + ']]',
-              line)
-          break
-      # Ignore any comments, since the check lines will too.
-      scrubbed_line = SCRUB_IR_COMMENT_RE.sub(r'', line)
-      lines[i] = scrubbed_line
-    if is_asm or not is_analyze:
-      # It can happen that two matches are back-to-back and for some reason sub
-      # will not replace both of them. For now we work around this by
-      # substituting until there is no more match.
-      changed = True
-      while changed:
-        (lines[i], changed) = nameless_value_regex.subn(transform_line_vars,
-                                                        lines[i], count=1)
-  return lines
+    return check_prefix and re.match(
+        r"^" + check_prefix + r"[0-9]+?$", var, re.IGNORECASE
+    )
+
+
+def generalize_check_lines_common(
+    lines,
+    is_analyze,
+    vars_seen,
+    global_vars_seen,
+    nameless_values,
+    nameless_value_regex,
+    is_asm,
+):
+    # This gets called for each match that occurs in
+    # a line. We transform variables we haven't seen
+    # into defs, and variables we have seen into uses.
+    def transform_line_vars(match):
+        var = get_name_from_ir_value_match(match)
+        nameless_value = get_nameless_value_from_match(match, nameless_values)
+        if may_clash_with_default_check_prefix_name(nameless_value.check_prefix, var):
+            warn(
+                "Change IR value name '%s' or use --prefix-filecheck-ir-name to prevent possible conflict"
+                " with scripted FileCheck name." % (var,)
+            )
+        key = (var, nameless_value.check_key)
+        is_local_def = nameless_value.is_local_def_ir_value_match(match)
+        if is_local_def and key in vars_seen:
+            rv = nameless_value.get_value_use(var, match)
+        elif not is_local_def and key in global_vars_seen:
+            # We could have seen a 
diff erent prefix for the global variables first,
+            # ensure we use that one instead of the prefix for the current match.
+            rv = nameless_value.get_value_use(var, match, global_vars_seen[key])
+        else:
+            if is_local_def:
+                vars_seen.add(key)
+            else:
+                global_vars_seen[key] = nameless_value.check_prefix
+            rv = nameless_value.get_value_definition(var, match)
+        # re.sub replaces the entire regex match
+        # with whatever you return, so we have
+        # to make sure to hand it back everything
+        # including the commas and spaces.
+        return match.group(1) + rv + match.group(match.lastindex)
+
+    lines_with_def = []
+
+    for i, line in enumerate(lines):
+        if not is_asm:
+            # An IR variable named '%.' matches the FileCheck regex string.
+            line = line.replace("%.", "%dot")
+            for regex in _global_hex_value_regex:
+                if re.match("^@" + regex + " = ", line):
+                    line = re.sub(
+                        r"\bi([0-9]+) ([0-9]+)",
+                        lambda m: "i"
+                        + m.group(1)
+                        + " [[#"
+                        + hex(int(m.group(2)))
+                        + "]]",
+                        line,
+                    )
+                    break
+            # Ignore any comments, since the check lines will too.
+            scrubbed_line = SCRUB_IR_COMMENT_RE.sub(r"", line)
+            lines[i] = scrubbed_line
+        if is_asm or not is_analyze:
+            # It can happen that two matches are back-to-back and for some reason sub
+            # will not replace both of them. For now we work around this by
+            # substituting until there is no more match.
+            changed = True
+            while changed:
+                (lines[i], changed) = nameless_value_regex.subn(
+                    transform_line_vars, lines[i], count=1
+                )
+    return lines
+
 
 # Replace IR value defs and uses with FileCheck variables.
 def generalize_check_lines(lines, is_analyze, vars_seen, global_vars_seen):
-  return generalize_check_lines_common(lines, is_analyze, vars_seen,
-                                       global_vars_seen, ir_nameless_values,
-                                       IR_VALUE_RE, False)
+    return generalize_check_lines_common(
+        lines,
+        is_analyze,
+        vars_seen,
+        global_vars_seen,
+        ir_nameless_values,
+        IR_VALUE_RE,
+        False,
+    )
 
-def generalize_asm_check_lines(lines, vars_seen, global_vars_seen):
-  return generalize_check_lines_common(lines, False, vars_seen,
-                                       global_vars_seen, asm_nameless_values,
-                                       ASM_VALUE_RE, True)
-
-def add_checks(output_lines, comment_marker, prefix_list, func_dict, func_name, check_label_format, is_backend, is_analyze, version, global_vars_seen_dict, is_filtered):
-  # prefix_exclusions are prefixes we cannot use to print the function because it doesn't exist in run lines that use these prefixes as well.
-  prefix_exclusions = set()
-  printed_prefixes = []
-  for p in prefix_list:
-    checkprefixes = p[0]
-    # If not all checkprefixes of this run line produced the function we cannot check for it as it does not
-    # exist for this run line. A subset of the check prefixes might know about the function but only because
-    # other run lines created it.
-    if any(map(lambda checkprefix: func_name not in func_dict[checkprefix], checkprefixes)):
-      prefix_exclusions |= set(checkprefixes)
-      continue
-
-  # prefix_exclusions is constructed, we can now emit the output
-  for p in prefix_list:
-    global_vars_seen = {}
-    checkprefixes = p[0]
-    for checkprefix in checkprefixes:
-      if checkprefix in global_vars_seen_dict:
-        global_vars_seen.update(global_vars_seen_dict[checkprefix])
-      else:
-        global_vars_seen_dict[checkprefix] = {}
-      if checkprefix in printed_prefixes:
-        break
-
-      # Check if the prefix is excluded.
-      if checkprefix in prefix_exclusions:
-        continue
-
-      # If we do not have output for this prefix we skip it.
-      if not func_dict[checkprefix][func_name]:
-        continue
-
-      # Add some space between 
diff erent check prefixes, but not after the last
-      # check line (before the test code).
-      if is_backend:
-        if len(printed_prefixes) != 0:
-          output_lines.append(comment_marker)
-
-      if checkprefix not in global_vars_seen_dict:
-        global_vars_seen_dict[checkprefix] = {}
-
-      global_vars_seen_before = [key for key in global_vars_seen.keys()]
-
-      vars_seen = set()
-      printed_prefixes.append(checkprefix)
-      attrs = str(func_dict[checkprefix][func_name].attrs)
-      attrs = '' if attrs == 'None' else attrs
-      if version > 1:
-        funcdef_attrs_and_ret = func_dict[checkprefix][func_name].funcdef_attrs_and_ret
-      else:
-        funcdef_attrs_and_ret = ''
-
-      if attrs:
-        output_lines.append('%s %s: Function Attrs: %s' % (comment_marker, checkprefix, attrs))
-      args_and_sig = str(func_dict[checkprefix][func_name].args_and_sig)
-      if args_and_sig:
-        args_and_sig = generalize_check_lines([args_and_sig], is_analyze, vars_seen, global_vars_seen)[0]
-      func_name_separator = func_dict[checkprefix][func_name].func_name_separator
-      if '[[' in args_and_sig:
-        output_lines.append(check_label_format % (checkprefix, funcdef_attrs_and_ret, func_name, '', func_name_separator))
-        output_lines.append('%s %s-SAME: %s' % (comment_marker, checkprefix, args_and_sig))
-      else:
-        output_lines.append(check_label_format % (checkprefix, funcdef_attrs_and_ret, func_name, args_and_sig, func_name_separator))
-      func_body = str(func_dict[checkprefix][func_name]).splitlines()
-      if not func_body:
-        # We have filtered everything.
-        continue
-
-      # For ASM output, just emit the check lines.
-      if is_backend:
-        body_start = 1
-        if is_filtered:
-          # For filtered output we don't add "-NEXT" so don't add extra spaces
-          # before the first line.
-          body_start = 0
-        else:
-          output_lines.append('%s %s:       %s' % (comment_marker, checkprefix, func_body[0]))
-        func_lines = generalize_asm_check_lines(func_body[body_start:],
-                                                vars_seen, global_vars_seen)
-        for func_line in func_lines:
-          if func_line.strip() == '':
-            output_lines.append('%s %s-EMPTY:' % (comment_marker, checkprefix))
-          else:
-            check_suffix = '-NEXT' if not is_filtered else ''
-            output_lines.append('%s %s%s:  %s' % (comment_marker, checkprefix,
-                                                  check_suffix, func_line))
-        # Remember new global variables we have not seen before
-        for key in global_vars_seen:
-          if key not in global_vars_seen_before:
-            global_vars_seen_dict[checkprefix][key] = global_vars_seen[key]
-        break
-
-      # For IR output, change all defs to FileCheck variables, so we're immune
-      # to variable naming fashions.
-      func_body = generalize_check_lines(func_body, is_analyze, vars_seen, global_vars_seen)
-
-      # This could be selectively enabled with an optional invocation argument.
-      # Disabled for now: better to check everything. Be safe rather than sorry.
-
-      # Handle the first line of the function body as a special case because
-      # it's often just noise (a useless asm comment or entry label).
-      #if func_body[0].startswith("#") or func_body[0].startswith("entry:"):
-      #  is_blank_line = True
-      #else:
-      #  output_lines.append('%s %s:       %s' % (comment_marker, checkprefix, func_body[0]))
-      #  is_blank_line = False
-
-      is_blank_line = False
-
-      for func_line in func_body:
-        if func_line.strip() == '':
-          is_blank_line = True
-          continue
-        # Do not waste time checking IR comments.
-        func_line = SCRUB_IR_COMMENT_RE.sub(r'', func_line)
-
-        # Skip blank lines instead of checking them.
-        if is_blank_line:
-          output_lines.append('{} {}:       {}'.format(
-              comment_marker, checkprefix, func_line))
-        else:
-          check_suffix = '-NEXT' if not is_filtered else ''
-          output_lines.append('{} {}{}:  {}'.format(
-              comment_marker, checkprefix, check_suffix, func_line))
-        is_blank_line = False
-
-      # Add space between 
diff erent check prefixes and also before the first
-      # line of code in the test function.
-      output_lines.append(comment_marker)
-
-      # Remember new global variables we have not seen before
-      for key in global_vars_seen:
-        if key not in global_vars_seen_before:
-          global_vars_seen_dict[checkprefix][key] = global_vars_seen[key]
-      break
-  return printed_prefixes
-
-def add_ir_checks(output_lines, comment_marker, prefix_list, func_dict,
-                  func_name, preserve_names, function_sig, version,
-                  global_vars_seen_dict, is_filtered):
-  # Label format is based on IR string.
-  if function_sig and version > 1:
-    function_def_regex = 'define %s'
-  elif function_sig:
-    function_def_regex = 'define {{[^@]+}}%s'
-  else:
-    function_def_regex = '%s'
-  check_label_format = '{} %s-LABEL: {}@%s%s%s'.format(comment_marker, function_def_regex)
-  return add_checks(output_lines, comment_marker, prefix_list, func_dict, func_name,
-                    check_label_format, False, preserve_names, version,
-                    global_vars_seen_dict,
-                    is_filtered)
-
-def add_analyze_checks(output_lines, comment_marker, prefix_list, func_dict, func_name, is_filtered):
-  check_label_format = '{} %s-LABEL: \'%s%s%s%s\''.format(comment_marker)
-  global_vars_seen_dict = {}
-  return add_checks(output_lines, comment_marker, prefix_list, func_dict, func_name,
-                    check_label_format, False, True, 1, global_vars_seen_dict,
-                    is_filtered)
 
-def build_global_values_dictionary(glob_val_dict, raw_tool_output, prefixes):
-  for nameless_value in itertools.chain(ir_nameless_values, asm_nameless_values):
-    if nameless_value.global_ir_rhs_regexp is None:
-      continue
-
-    lhs_re_str = nameless_value.ir_prefix + nameless_value.ir_regexp
-    rhs_re_str = nameless_value.global_ir_rhs_regexp
-
-    global_ir_value_re_str = r'^' + lhs_re_str + r'\s=\s' + rhs_re_str + r'$'
-    global_ir_value_re = re.compile(global_ir_value_re_str, flags=(re.M))
-    lines = []
-    for m in global_ir_value_re.finditer(raw_tool_output):
-      lines.append(m.group(0))
-
-    for prefix in prefixes:
-      if glob_val_dict[prefix] is None:
-        continue
-      if nameless_value.check_prefix in glob_val_dict[prefix]:
-        if lines == glob_val_dict[prefix][nameless_value.check_prefix]:
-          continue
-        if prefix == prefixes[-1]:
-          warn('Found conflicting asm under the same prefix: %r!' % (prefix,))
-        else:
-          glob_val_dict[prefix][nameless_value.check_prefix] = None
-          continue
-      glob_val_dict[prefix][nameless_value.check_prefix] = lines
-
-def add_global_checks(glob_val_dict, comment_marker, prefix_list, output_lines, global_vars_seen_dict, is_analyze, is_before_functions):
-  printed_prefixes = set()
-  for nameless_value in ir_nameless_values:
-    if nameless_value.global_ir_rhs_regexp is None:
-      continue
-    if nameless_value.is_before_functions != is_before_functions:
-      continue
+def generalize_asm_check_lines(lines, vars_seen, global_vars_seen):
+    return generalize_check_lines_common(
+        lines,
+        False,
+        vars_seen,
+        global_vars_seen,
+        asm_nameless_values,
+        ASM_VALUE_RE,
+        True,
+    )
+
+
+def add_checks(
+    output_lines,
+    comment_marker,
+    prefix_list,
+    func_dict,
+    func_name,
+    check_label_format,
+    is_backend,
+    is_analyze,
+    version,
+    global_vars_seen_dict,
+    is_filtered,
+):
+    # prefix_exclusions are prefixes we cannot use to print the function because it doesn't exist in run lines that use these prefixes as well.
+    prefix_exclusions = set()
+    printed_prefixes = []
     for p in prefix_list:
-      global_vars_seen = {}
-      checkprefixes = p[0]
-      if checkprefixes is None:
-        continue
-      for checkprefix in checkprefixes:
-        if checkprefix in global_vars_seen_dict:
-          global_vars_seen.update(global_vars_seen_dict[checkprefix])
-        else:
-          global_vars_seen_dict[checkprefix] = {}
-        if (checkprefix, nameless_value.check_prefix) in printed_prefixes:
-          break
-        if not glob_val_dict[checkprefix]:
-          continue
-        if nameless_value.check_prefix not in glob_val_dict[checkprefix]:
-          continue
-        if not glob_val_dict[checkprefix][nameless_value.check_prefix]:
-          continue
-
-        check_lines = []
-        global_vars_seen_before = [key for key in global_vars_seen.keys()]
-        for line in glob_val_dict[checkprefix][nameless_value.check_prefix]:
-          if _global_value_regex:
-            matched = False
-            for regex in _global_value_regex:
-              if re.match('^@' + regex + ' = ', line):
-                matched = True
+        checkprefixes = p[0]
+        # If not all checkprefixes of this run line produced the function we cannot check for it as it does not
+        # exist for this run line. A subset of the check prefixes might know about the function but only because
+        # other run lines created it.
+        if any(
+            map(
+                lambda checkprefix: func_name not in func_dict[checkprefix],
+                checkprefixes,
+            )
+        ):
+            prefix_exclusions |= set(checkprefixes)
+            continue
+
+    # prefix_exclusions is constructed, we can now emit the output
+    for p in prefix_list:
+        global_vars_seen = {}
+        checkprefixes = p[0]
+        for checkprefix in checkprefixes:
+            if checkprefix in global_vars_seen_dict:
+                global_vars_seen.update(global_vars_seen_dict[checkprefix])
+            else:
+                global_vars_seen_dict[checkprefix] = {}
+            if checkprefix in printed_prefixes:
                 break
-            if not matched:
-              continue
-          tmp = generalize_check_lines([line], is_analyze, set(), global_vars_seen)
-          check_line = '%s %s: %s' % (comment_marker, checkprefix, tmp[0])
-          check_lines.append(check_line)
-        if not check_lines:
-          continue
 
-        output_lines.append(comment_marker + SEPARATOR)
-        for check_line in check_lines:
-          output_lines.append(check_line)
+            # Check if the prefix is excluded.
+            if checkprefix in prefix_exclusions:
+                continue
+
+            # If we do not have output for this prefix we skip it.
+            if not func_dict[checkprefix][func_name]:
+                continue
+
+            # Add some space between 
diff erent check prefixes, but not after the last
+            # check line (before the test code).
+            if is_backend:
+                if len(printed_prefixes) != 0:
+                    output_lines.append(comment_marker)
+
+            if checkprefix not in global_vars_seen_dict:
+                global_vars_seen_dict[checkprefix] = {}
+
+            global_vars_seen_before = [key for key in global_vars_seen.keys()]
+
+            vars_seen = set()
+            printed_prefixes.append(checkprefix)
+            attrs = str(func_dict[checkprefix][func_name].attrs)
+            attrs = "" if attrs == "None" else attrs
+            if version > 1:
+                funcdef_attrs_and_ret = func_dict[checkprefix][
+                    func_name
+                ].funcdef_attrs_and_ret
+            else:
+                funcdef_attrs_and_ret = ""
+
+            if attrs:
+                output_lines.append(
+                    "%s %s: Function Attrs: %s" % (comment_marker, checkprefix, attrs)
+                )
+            args_and_sig = str(func_dict[checkprefix][func_name].args_and_sig)
+            if args_and_sig:
+                args_and_sig = generalize_check_lines(
+                    [args_and_sig], is_analyze, vars_seen, global_vars_seen
+                )[0]
+            func_name_separator = func_dict[checkprefix][func_name].func_name_separator
+            if "[[" in args_and_sig:
+                output_lines.append(
+                    check_label_format
+                    % (
+                        checkprefix,
+                        funcdef_attrs_and_ret,
+                        func_name,
+                        "",
+                        func_name_separator,
+                    )
+                )
+                output_lines.append(
+                    "%s %s-SAME: %s" % (comment_marker, checkprefix, args_and_sig)
+                )
+            else:
+                output_lines.append(
+                    check_label_format
+                    % (
+                        checkprefix,
+                        funcdef_attrs_and_ret,
+                        func_name,
+                        args_and_sig,
+                        func_name_separator,
+                    )
+                )
+            func_body = str(func_dict[checkprefix][func_name]).splitlines()
+            if not func_body:
+                # We have filtered everything.
+                continue
+
+            # For ASM output, just emit the check lines.
+            if is_backend:
+                body_start = 1
+                if is_filtered:
+                    # For filtered output we don't add "-NEXT" so don't add extra spaces
+                    # before the first line.
+                    body_start = 0
+                else:
+                    output_lines.append(
+                        "%s %s:       %s" % (comment_marker, checkprefix, func_body[0])
+                    )
+                func_lines = generalize_asm_check_lines(
+                    func_body[body_start:], vars_seen, global_vars_seen
+                )
+                for func_line in func_lines:
+                    if func_line.strip() == "":
+                        output_lines.append(
+                            "%s %s-EMPTY:" % (comment_marker, checkprefix)
+                        )
+                    else:
+                        check_suffix = "-NEXT" if not is_filtered else ""
+                        output_lines.append(
+                            "%s %s%s:  %s"
+                            % (comment_marker, checkprefix, check_suffix, func_line)
+                        )
+                # Remember new global variables we have not seen before
+                for key in global_vars_seen:
+                    if key not in global_vars_seen_before:
+                        global_vars_seen_dict[checkprefix][key] = global_vars_seen[key]
+                break
 
-        printed_prefixes.add((checkprefix, nameless_value.check_prefix))
+            # For IR output, change all defs to FileCheck variables, so we're immune
+            # to variable naming fashions.
+            func_body = generalize_check_lines(
+                func_body, is_analyze, vars_seen, global_vars_seen
+            )
+
+            # This could be selectively enabled with an optional invocation argument.
+            # Disabled for now: better to check everything. Be safe rather than sorry.
+
+            # Handle the first line of the function body as a special case because
+            # it's often just noise (a useless asm comment or entry label).
+            # if func_body[0].startswith("#") or func_body[0].startswith("entry:"):
+            #  is_blank_line = True
+            # else:
+            #  output_lines.append('%s %s:       %s' % (comment_marker, checkprefix, func_body[0]))
+            #  is_blank_line = False
+
+            is_blank_line = False
+
+            for func_line in func_body:
+                if func_line.strip() == "":
+                    is_blank_line = True
+                    continue
+                # Do not waste time checking IR comments.
+                func_line = SCRUB_IR_COMMENT_RE.sub(r"", func_line)
+
+                # Skip blank lines instead of checking them.
+                if is_blank_line:
+                    output_lines.append(
+                        "{} {}:       {}".format(comment_marker, checkprefix, func_line)
+                    )
+                else:
+                    check_suffix = "-NEXT" if not is_filtered else ""
+                    output_lines.append(
+                        "{} {}{}:  {}".format(
+                            comment_marker, checkprefix, check_suffix, func_line
+                        )
+                    )
+                is_blank_line = False
+
+            # Add space between 
diff erent check prefixes and also before the first
+            # line of code in the test function.
+            output_lines.append(comment_marker)
+
+            # Remember new global variables we have not seen before
+            for key in global_vars_seen:
+                if key not in global_vars_seen_before:
+                    global_vars_seen_dict[checkprefix][key] = global_vars_seen[key]
+            break
+    return printed_prefixes
+
+
+def add_ir_checks(
+    output_lines,
+    comment_marker,
+    prefix_list,
+    func_dict,
+    func_name,
+    preserve_names,
+    function_sig,
+    version,
+    global_vars_seen_dict,
+    is_filtered,
+):
+    # Label format is based on IR string.
+    if function_sig and version > 1:
+        function_def_regex = "define %s"
+    elif function_sig:
+        function_def_regex = "define {{[^@]+}}%s"
+    else:
+        function_def_regex = "%s"
+    check_label_format = "{} %s-LABEL: {}@%s%s%s".format(
+        comment_marker, function_def_regex
+    )
+    return add_checks(
+        output_lines,
+        comment_marker,
+        prefix_list,
+        func_dict,
+        func_name,
+        check_label_format,
+        False,
+        preserve_names,
+        version,
+        global_vars_seen_dict,
+        is_filtered,
+    )
+
+
+def add_analyze_checks(
+    output_lines, comment_marker, prefix_list, func_dict, func_name, is_filtered
+):
+    check_label_format = "{} %s-LABEL: '%s%s%s%s'".format(comment_marker)
+    global_vars_seen_dict = {}
+    return add_checks(
+        output_lines,
+        comment_marker,
+        prefix_list,
+        func_dict,
+        func_name,
+        check_label_format,
+        False,
+        True,
+        1,
+        global_vars_seen_dict,
+        is_filtered,
+    )
 
-        # Remembe new global variables we have not seen before
-        for key in global_vars_seen:
-          if key not in global_vars_seen_before:
-            global_vars_seen_dict[checkprefix][key] = global_vars_seen[key]
-        break
 
-  if printed_prefixes:
-    output_lines.append(comment_marker + SEPARATOR)
-  return printed_prefixes
+def build_global_values_dictionary(glob_val_dict, raw_tool_output, prefixes):
+    for nameless_value in itertools.chain(ir_nameless_values, asm_nameless_values):
+        if nameless_value.global_ir_rhs_regexp is None:
+            continue
+
+        lhs_re_str = nameless_value.ir_prefix + nameless_value.ir_regexp
+        rhs_re_str = nameless_value.global_ir_rhs_regexp
+
+        global_ir_value_re_str = r"^" + lhs_re_str + r"\s=\s" + rhs_re_str + r"$"
+        global_ir_value_re = re.compile(global_ir_value_re_str, flags=(re.M))
+        lines = []
+        for m in global_ir_value_re.finditer(raw_tool_output):
+            lines.append(m.group(0))
+
+        for prefix in prefixes:
+            if glob_val_dict[prefix] is None:
+                continue
+            if nameless_value.check_prefix in glob_val_dict[prefix]:
+                if lines == glob_val_dict[prefix][nameless_value.check_prefix]:
+                    continue
+                if prefix == prefixes[-1]:
+                    warn("Found conflicting asm under the same prefix: %r!" % (prefix,))
+                else:
+                    glob_val_dict[prefix][nameless_value.check_prefix] = None
+                    continue
+            glob_val_dict[prefix][nameless_value.check_prefix] = lines
+
+
+def add_global_checks(
+    glob_val_dict,
+    comment_marker,
+    prefix_list,
+    output_lines,
+    global_vars_seen_dict,
+    is_analyze,
+    is_before_functions,
+):
+    printed_prefixes = set()
+    for nameless_value in ir_nameless_values:
+        if nameless_value.global_ir_rhs_regexp is None:
+            continue
+        if nameless_value.is_before_functions != is_before_functions:
+            continue
+        for p in prefix_list:
+            global_vars_seen = {}
+            checkprefixes = p[0]
+            if checkprefixes is None:
+                continue
+            for checkprefix in checkprefixes:
+                if checkprefix in global_vars_seen_dict:
+                    global_vars_seen.update(global_vars_seen_dict[checkprefix])
+                else:
+                    global_vars_seen_dict[checkprefix] = {}
+                if (checkprefix, nameless_value.check_prefix) in printed_prefixes:
+                    break
+                if not glob_val_dict[checkprefix]:
+                    continue
+                if nameless_value.check_prefix not in glob_val_dict[checkprefix]:
+                    continue
+                if not glob_val_dict[checkprefix][nameless_value.check_prefix]:
+                    continue
+
+                check_lines = []
+                global_vars_seen_before = [key for key in global_vars_seen.keys()]
+                for line in glob_val_dict[checkprefix][nameless_value.check_prefix]:
+                    if _global_value_regex:
+                        matched = False
+                        for regex in _global_value_regex:
+                            if re.match("^@" + regex + " = ", line):
+                                matched = True
+                                break
+                        if not matched:
+                            continue
+                    tmp = generalize_check_lines(
+                        [line], is_analyze, set(), global_vars_seen
+                    )
+                    check_line = "%s %s: %s" % (comment_marker, checkprefix, tmp[0])
+                    check_lines.append(check_line)
+                if not check_lines:
+                    continue
+
+                output_lines.append(comment_marker + SEPARATOR)
+                for check_line in check_lines:
+                    output_lines.append(check_line)
+
+                printed_prefixes.add((checkprefix, nameless_value.check_prefix))
+
+                # Remembe new global variables we have not seen before
+                for key in global_vars_seen:
+                    if key not in global_vars_seen_before:
+                        global_vars_seen_dict[checkprefix][key] = global_vars_seen[key]
+                break
+
+    if printed_prefixes:
+        output_lines.append(comment_marker + SEPARATOR)
+    return printed_prefixes
 
 
 def check_prefix(prefix):
-  if not PREFIX_RE.match(prefix):
-    hint = ""
-    if ',' in prefix:
-      hint = " Did you mean '--check-prefixes=" + prefix + "'?"
-    warn(("Supplied prefix '%s' is invalid. Prefix must contain only alphanumeric characters, hyphens and underscores." + hint) %
-         (prefix))
+    if not PREFIX_RE.match(prefix):
+        hint = ""
+        if "," in prefix:
+            hint = " Did you mean '--check-prefixes=" + prefix + "'?"
+        warn(
+            (
+                "Supplied prefix '%s' is invalid. Prefix must contain only alphanumeric characters, hyphens and underscores."
+                + hint
+            )
+            % (prefix)
+        )
+
 
 def get_check_prefixes(filecheck_cmd):
-  check_prefixes = [item for m in CHECK_PREFIX_RE.finditer(filecheck_cmd)
-                           for item in m.group(1).split(',')]
-  if not check_prefixes:
-    check_prefixes = ['CHECK']
-  return check_prefixes
+    check_prefixes = [
+        item
+        for m in CHECK_PREFIX_RE.finditer(filecheck_cmd)
+        for item in m.group(1).split(",")
+    ]
+    if not check_prefixes:
+        check_prefixes = ["CHECK"]
+    return check_prefixes
+
 
 def verify_filecheck_prefixes(fc_cmd):
-  fc_cmd_parts = fc_cmd.split()
-  for part in fc_cmd_parts:
-    if "check-prefix=" in part:
-      prefix = part.split('=', 1)[1]
-      check_prefix(prefix)
-    elif "check-prefixes=" in part:
-      prefixes = part.split('=', 1)[1].split(',')
-      for prefix in prefixes:
-        check_prefix(prefix)
-        if prefixes.count(prefix) > 1:
-          warn("Supplied prefix '%s' is not unique in the prefix list." % (prefix,))
+    fc_cmd_parts = fc_cmd.split()
+    for part in fc_cmd_parts:
+        if "check-prefix=" in part:
+            prefix = part.split("=", 1)[1]
+            check_prefix(prefix)
+        elif "check-prefixes=" in part:
+            prefixes = part.split("=", 1)[1].split(",")
+            for prefix in prefixes:
+                check_prefix(prefix)
+                if prefixes.count(prefix) > 1:
+                    warn(
+                        "Supplied prefix '%s' is not unique in the prefix list."
+                        % (prefix,)
+                    )
 
 
 def get_autogennote_suffix(parser, args):
-  autogenerated_note_args = ''
-  for action in parser._actions:
-    if not hasattr(args, action.dest):
-      continue  # Ignore options such as --help that aren't included in args
-    # Ignore parameters such as paths to the binary or the list of tests
-    if action.dest in ('tests', 'update_only', 'tool_binary', 'opt_binary',
-                       'llc_binary', 'clang', 'opt', 'llvm_bin', 'verbose',
-                       'force_update'):
-      continue
-    value = getattr(args, action.dest)
-    if action.const is not None:  # action stores a constant (usually True/False)
-      # Skip actions with 
diff erent constant values (this happens with boolean
-      # --foo/--no-foo options)
-      if value != action.const:
-        continue
-    if parser.get_default(action.dest) == value:
-      continue  # Don't add default values
-    if action.dest == 'function_signature' and args.version >= 2:
-      continue # Enabled by default in version 2
-    if action.dest == 'filters':
-      # Create a separate option for each filter element.  The value is a list
-      # of Filter objects.
-      for elem in value:
-        opt_name = 'filter-out' if elem.is_filter_out else 'filter'
-        opt_value = elem.pattern()
-        new_arg = '--%s "%s" ' % (opt_name, opt_value.strip('"'))
-        if new_arg not in autogenerated_note_args:
-          autogenerated_note_args += new_arg
-    else:
-      autogenerated_note_args += action.option_strings[0] + ' '
-      if action.const is None:  # action takes a parameter
-        if action.nargs == '+':
-          value = ' '.join(map(lambda v: '"' + v.strip('"') + '"', value))
-        autogenerated_note_args += '%s ' % value
-  if autogenerated_note_args:
-    autogenerated_note_args = ' %s %s' % (UTC_ARGS_KEY, autogenerated_note_args[:-1])
-  return autogenerated_note_args
+    autogenerated_note_args = ""
+    for action in parser._actions:
+        if not hasattr(args, action.dest):
+            continue  # Ignore options such as --help that aren't included in args
+        # Ignore parameters such as paths to the binary or the list of tests
+        if action.dest in (
+            "tests",
+            "update_only",
+            "tool_binary",
+            "opt_binary",
+            "llc_binary",
+            "clang",
+            "opt",
+            "llvm_bin",
+            "verbose",
+            "force_update",
+        ):
+            continue
+        value = getattr(args, action.dest)
+        if action.const is not None:  # action stores a constant (usually True/False)
+            # Skip actions with 
diff erent constant values (this happens with boolean
+            # --foo/--no-foo options)
+            if value != action.const:
+                continue
+        if parser.get_default(action.dest) == value:
+            continue  # Don't add default values
+        if action.dest == "function_signature" and args.version >= 2:
+            continue  # Enabled by default in version 2
+        if action.dest == "filters":
+            # Create a separate option for each filter element.  The value is a list
+            # of Filter objects.
+            for elem in value:
+                opt_name = "filter-out" if elem.is_filter_out else "filter"
+                opt_value = elem.pattern()
+                new_arg = '--%s "%s" ' % (opt_name, opt_value.strip('"'))
+                if new_arg not in autogenerated_note_args:
+                    autogenerated_note_args += new_arg
+        else:
+            autogenerated_note_args += action.option_strings[0] + " "
+            if action.const is None:  # action takes a parameter
+                if action.nargs == "+":
+                    value = " ".join(map(lambda v: '"' + v.strip('"') + '"', value))
+                autogenerated_note_args += "%s " % value
+    if autogenerated_note_args:
+        autogenerated_note_args = " %s %s" % (
+            UTC_ARGS_KEY,
+            autogenerated_note_args[:-1],
+        )
+    return autogenerated_note_args
 
 
 def check_for_command(line, parser, args, argv, argparse_callback):
-  cmd_m = UTC_ARGS_CMD.match(line)
-  if cmd_m:
-    for option in shlex.split(cmd_m.group('cmd').strip()):
-      if option:
-        argv.append(option)
-    args = parse_args(parser, filter(lambda arg: arg not in args.tests, argv))
-    if argparse_callback is not None:
-      argparse_callback(args)
-  return args, argv
+    cmd_m = UTC_ARGS_CMD.match(line)
+    if cmd_m:
+        for option in shlex.split(cmd_m.group("cmd").strip()):
+            if option:
+                argv.append(option)
+        args = parse_args(parser, filter(lambda arg: arg not in args.tests, argv))
+        if argparse_callback is not None:
+            argparse_callback(args)
+    return args, argv
+
 
 def find_arg_in_test(test_info, get_arg_to_check, arg_string, is_global):
-  result = get_arg_to_check(test_info.args)
-  if not result and is_global:
-    # See if this has been specified via UTC_ARGS.  This is a "global" option
-    # that affects the entire generation of test checks.  If it exists anywhere
-    # in the test, apply it to everything.
-    saw_line = False
-    for line_info in test_info.ro_iterlines():
-      line = line_info.line
-      if not line.startswith(';') and line.strip() != '':
-        saw_line = True
-      result = get_arg_to_check(line_info.args)
-      if result:
-        if warn and saw_line:
-          # We saw the option after already reading some test input lines.
-          # Warn about it.
-          print('WARNING: Found {} in line following test start: '.format(arg_string)
-                + line, file=sys.stderr)
-          print('WARNING: Consider moving {} to top of file'.format(arg_string),
-                file=sys.stderr)
-        break
-  return result
+    result = get_arg_to_check(test_info.args)
+    if not result and is_global:
+        # See if this has been specified via UTC_ARGS.  This is a "global" option
+        # that affects the entire generation of test checks.  If it exists anywhere
+        # in the test, apply it to everything.
+        saw_line = False
+        for line_info in test_info.ro_iterlines():
+            line = line_info.line
+            if not line.startswith(";") and line.strip() != "":
+                saw_line = True
+            result = get_arg_to_check(line_info.args)
+            if result:
+                if warn and saw_line:
+                    # We saw the option after already reading some test input lines.
+                    # Warn about it.
+                    print(
+                        "WARNING: Found {} in line following test start: ".format(
+                            arg_string
+                        )
+                        + line,
+                        file=sys.stderr,
+                    )
+                    print(
+                        "WARNING: Consider moving {} to top of file".format(arg_string),
+                        file=sys.stderr,
+                    )
+                break
+    return result
+
 
 def dump_input_lines(output_lines, test_info, prefix_set, comment_string):
-  for input_line_info in test_info.iterlines(output_lines):
-    line = input_line_info.line
-    args = input_line_info.args
-    if line.strip() == comment_string:
-      continue
-    if line.strip() == comment_string + SEPARATOR:
-      continue
-    if line.lstrip().startswith(comment_string):
-      m = CHECK_RE.match(line)
-      if m and m.group(1) in prefix_set:
-        continue
-    output_lines.append(line.rstrip('\n'))
-
-def add_checks_at_end(output_lines, prefix_list, func_order,
-                      comment_string, check_generator):
-  added = set()
-  generated_prefixes = set()
-  for prefix in prefix_list:
-    prefixes = prefix[0]
-    tool_args = prefix[1]
-    for prefix in prefixes:
-      for func in func_order[prefix]:
-        # The func order can contain the same functions multiple times.
-        # If we see one again we are done.
-        if (func, prefix) in added:
-          continue
-        if added:
-          output_lines.append(comment_string)
-
-        # The add_*_checks routines expect a run list whose items are
-        # tuples that have a list of prefixes as their first element and
-        # tool command args string as their second element.  They output
-        # checks for each prefix in the list of prefixes.  By doing so, it
-        # implicitly assumes that for each function every run line will
-        # generate something for that function.  That is not the case for
-        # generated functions as some run lines might not generate them
-        # (e.g. -fopenmp vs. no -fopenmp).
-        #
-        # Therefore, pass just the prefix we're interested in.  This has
-        # the effect of generating all of the checks for functions of a
-        # single prefix before moving on to the next prefix.  So checks
-        # are ordered by prefix instead of by function as in "normal"
-        # mode.
-        for generated_prefix in check_generator(output_lines,
-                        [([prefix], tool_args)], func):
-          added.add((func, generated_prefix))
-          generated_prefixes.add(generated_prefix)
-  return generated_prefixes
+    for input_line_info in test_info.iterlines(output_lines):
+        line = input_line_info.line
+        args = input_line_info.args
+        if line.strip() == comment_string:
+            continue
+        if line.strip() == comment_string + SEPARATOR:
+            continue
+        if line.lstrip().startswith(comment_string):
+            m = CHECK_RE.match(line)
+            if m and m.group(1) in prefix_set:
+                continue
+        output_lines.append(line.rstrip("\n"))
+
+
+def add_checks_at_end(
+    output_lines, prefix_list, func_order, comment_string, check_generator
+):
+    added = set()
+    generated_prefixes = set()
+    for prefix in prefix_list:
+        prefixes = prefix[0]
+        tool_args = prefix[1]
+        for prefix in prefixes:
+            for func in func_order[prefix]:
+                # The func order can contain the same functions multiple times.
+                # If we see one again we are done.
+                if (func, prefix) in added:
+                    continue
+                if added:
+                    output_lines.append(comment_string)
+
+                # The add_*_checks routines expect a run list whose items are
+                # tuples that have a list of prefixes as their first element and
+                # tool command args string as their second element.  They output
+                # checks for each prefix in the list of prefixes.  By doing so, it
+                # implicitly assumes that for each function every run line will
+                # generate something for that function.  That is not the case for
+                # generated functions as some run lines might not generate them
+                # (e.g. -fopenmp vs. no -fopenmp).
+                #
+                # Therefore, pass just the prefix we're interested in.  This has
+                # the effect of generating all of the checks for functions of a
+                # single prefix before moving on to the next prefix.  So checks
+                # are ordered by prefix instead of by function as in "normal"
+                # mode.
+                for generated_prefix in check_generator(
+                    output_lines, [([prefix], tool_args)], func
+                ):
+                    added.add((func, generated_prefix))
+                    generated_prefixes.add(generated_prefix)
+    return generated_prefixes

diff  --git a/llvm/utils/UpdateTestChecks/isel.py b/llvm/utils/UpdateTestChecks/isel.py
index c10a55540efdd..bdb68e5815a33 100644
--- a/llvm/utils/UpdateTestChecks/isel.py
+++ b/llvm/utils/UpdateTestChecks/isel.py
@@ -3,10 +3,12 @@
 import sys
 
 if sys.version_info[0] > 2:
-  class string:
-    expandtabs = str.expandtabs
+
+    class string:
+        expandtabs = str.expandtabs
+
 else:
-  import string
+    import string
 
 # Support of isel debug checks
 # RegEx: this is where the magic happens.
@@ -15,43 +17,64 @@ class string:
 
 # TODO: add function prefix
 ISEL_FUNCTION_DEFAULT_RE = re.compile(
-     r'Selected[\s]*selection[\s]*DAG:[\s]*%bb.0[\s]*\'(?P<func>.*?):[^\']*\'*\n'
-     r'(?P<body>.*?)\n'
-     r'Total[\s]*amount[\s]*of[\s]*phi[\s]*nodes[\s]*to[\s]*update:[\s]*[0-9]+',
-     flags=(re.M | re.S))
+    r"Selected[\s]*selection[\s]*DAG:[\s]*%bb.0[\s]*\'(?P<func>.*?):[^\']*\'*\n"
+    r"(?P<body>.*?)\n"
+    r"Total[\s]*amount[\s]*of[\s]*phi[\s]*nodes[\s]*to[\s]*update:[\s]*[0-9]+",
+    flags=(re.M | re.S),
+)
+
 
 def scrub_isel_default(isel, args):
-  # Scrub runs of whitespace out of the iSel debug output, but leave the leading
-  # whitespace in place.
-  isel = common.SCRUB_WHITESPACE_RE.sub(r' ', isel)
-  # Expand the tabs used for indentation.
-  isel = string.expandtabs(isel, 2)
-  # Strip trailing whitespace.
-  isel = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', isel)
-  return isel
+    # Scrub runs of whitespace out of the iSel debug output, but leave the leading
+    # whitespace in place.
+    isel = common.SCRUB_WHITESPACE_RE.sub(r" ", isel)
+    # Expand the tabs used for indentation.
+    isel = string.expandtabs(isel, 2)
+    # Strip trailing whitespace.
+    isel = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", isel)
+    return isel
+
 
 def get_run_handler(triple):
-  target_handlers = {
-  }
-  handler = None
-  best_prefix = ''
-  for prefix, s in target_handlers.items():
-    if triple.startswith(prefix) and len(prefix) > len(best_prefix):
-      handler = s
-      best_prefix = prefix
+    target_handlers = {}
+    handler = None
+    best_prefix = ""
+    for prefix, s in target_handlers.items():
+        if triple.startswith(prefix) and len(prefix) > len(best_prefix):
+            handler = s
+            best_prefix = prefix
+
+    if handler is None:
+        common.debug("Using default handler.")
+        handler = (scrub_isel_default, ISEL_FUNCTION_DEFAULT_RE)
 
-  if handler is None:
-    common.debug('Using default handler.')
-    handler = (scrub_isel_default, ISEL_FUNCTION_DEFAULT_RE)
+    return handler
 
-  return handler
 
 ##### Generator of iSel CHECK lines
 
-def add_checks(output_lines, comment_marker, prefix_list, func_dict, func_name,
-               global_vars_seen_dict, is_filtered):
-  # Label format is based on iSel string.
-  check_label_format = '{} %s-LABEL: %s%s%s%s'.format(comment_marker)
-  return common.add_checks(output_lines, comment_marker, prefix_list, func_dict,
-                           func_name, check_label_format, True, False, 1,
-                           global_vars_seen_dict, is_filtered=is_filtered)
+
+def add_checks(
+    output_lines,
+    comment_marker,
+    prefix_list,
+    func_dict,
+    func_name,
+    global_vars_seen_dict,
+    is_filtered,
+):
+    # Label format is based on iSel string.
+    check_label_format = "{} %s-LABEL: %s%s%s%s".format(comment_marker)
+    return common.add_checks(
+        output_lines,
+        comment_marker,
+        prefix_list,
+        func_dict,
+        func_name,
+        check_label_format,
+        True,
+        False,
+        1,
+        global_vars_seen_dict,
+        is_filtered=is_filtered,
+    )

diff  --git a/llvm/utils/abtest.py b/llvm/utils/abtest.py
index 4ca3ca4f730e9..a799c8764b290 100755
--- a/llvm/utils/abtest.py
+++ b/llvm/utils/abtest.py
@@ -57,11 +57,7 @@
 
 
 def find(dir, file_filter=None):
-    files = [
-        walkdir[0]+"/"+file
-        for walkdir in os.walk(dir)
-        for file in walkdir[2]
-    ]
+    files = [walkdir[0] + "/" + file for walkdir in os.walk(dir) for file in walkdir[2]]
     if file_filter is not None:
         files = filter(files, file_filter)
     return sorted(files)
@@ -147,8 +143,10 @@ def test_partition(partition, upcoming_partition):
         picks = dict(all_a)
         for x in partition:
             picks[x] = choice_map[x][1]
-        announce_test("checking %s [<=%d remaining]" %
-                      (format_namelist(partition), max_remaining_steps))
+        announce_test(
+            "checking %s [<=%d remaining]"
+            % (format_namelist(partition), max_remaining_steps)
+        )
         res = perform_test(picks)
         if res is True:
             known_good.update(partition)
@@ -184,7 +182,7 @@ def extract_functions(file):
         if marker != -1:
             if in_function is not None:
                 warn("Missing end of function %s" % (in_function,))
-            funcname = line[marker + 19:-1]
+            funcname = line[marker + 19 : -1]
             in_function = funcname
             text = line
             continue
@@ -210,7 +208,7 @@ def replace_functions(source, dest, replacements):
         if marker != -1:
             if in_function is not None:
                 warn("Missing end of function %s" % (in_function,))
-            funcname = line[marker + 19:-1]
+            funcname = line[marker + 19 : -1]
             in_function = funcname
             replacement = replacements.get(in_function)
             if replacement is not None:
@@ -229,7 +227,10 @@ def replace_functions(source, dest, replacements):
 
 
 def testrun(files):
-    linkline = "%s %s" % (LINKTEST, " ".join(files),)
+    linkline = "%s %s" % (
+        LINKTEST,
+        " ".join(files),
+    )
     res = subprocess.call(linkline, shell=True)
     if res != 0:
         announce_result(FAILED + ": '%s' exitcode != 0" % LINKTEST)
@@ -244,12 +245,13 @@ def prepare_files(gooddir, baddir, rspfile):
     files_b = []
 
     if rspfile is not None:
+
         def get_basename(name):
             # remove prefix
             if name.startswith(gooddir):
-                return name[len(gooddir):]
+                return name[len(gooddir) :]
             if name.startswith(baddir):
-                return name[len(baddir):]
+                return name[len(baddir) :]
             assert False, ""
 
         with open(rspfile, "r") as rf:
@@ -269,15 +271,13 @@ def get_basename(name):
     for name in files_b:
         basename = get_basename(name)
         if basename not in basenames_a:
-            warn("There is no corresponding file to '%s' in %s" %
-                 (name, gooddir))
+            warn("There is no corresponding file to '%s' in %s" % (name, gooddir))
     choices = []
     skipped = []
     for name in files_a:
         basename = get_basename(name)
         if basename not in basenames_b:
-            warn("There is no corresponding file to '%s' in %s" %
-                 (name, baddir))
+            warn("There is no corresponding file to '%s' in %s" % (name, baddir))
 
         file_a = gooddir + "/" + basename
         file_b = baddir + "/" + basename
@@ -307,8 +307,7 @@ def perform_test(picks):
         # If response file is used, create a temporary response file for the
         # picked files.
         if rspfile is not None:
-            with tempfile.NamedTemporaryFile('w', suffix='.rsp',
-                                             delete=False) as tf:
+            with tempfile.NamedTemporaryFile("w", suffix=".rsp", delete=False) as tf:
                 tf.write(" ".join(files))
                 tf.flush()
             ret = testrun([tf.name])
@@ -346,7 +345,7 @@ def prepare_functions(to_check, gooddir, goodfile, badfile):
     if len(skipped) > 0:
         info("Skipped (same content): %s" % format_namelist(skipped))
 
-    combined_file = '/tmp/combined2.s'
+    combined_file = "/tmp/combined2.s"
     files = []
     found_good_file = False
     for c in files_good:
@@ -362,21 +361,21 @@ def perform_test(picks):
             assert x == functions_a_map[name] or x == functions_b_map[name]
         replace_functions(goodfile, combined_file, picks)
         return testrun(files)
+
     return perform_test, choices
 
 
 def main():
     parser = argparse.ArgumentParser()
-    parser.add_argument('--a', dest='dir_a', default='before')
-    parser.add_argument('--b', dest='dir_b', default='after')
-    parser.add_argument('--rsp', default=None)
-    parser.add_argument('--test', default='./link_test')
-    parser.add_argument('--insane', help='Skip sanity check',
-                        action='store_true')
-    parser.add_argument('--seq',
-                        help='Check sequentially instead of bisection',
-                        action='store_true')
-    parser.add_argument('file', metavar='file', nargs='?')
+    parser.add_argument("--a", dest="dir_a", default="before")
+    parser.add_argument("--b", dest="dir_b", default="after")
+    parser.add_argument("--rsp", default=None)
+    parser.add_argument("--test", default="./link_test")
+    parser.add_argument("--insane", help="Skip sanity check", action="store_true")
+    parser.add_argument(
+        "--seq", help="Check sequentially instead of bisection", action="store_true"
+    )
+    parser.add_argument("file", metavar="file", nargs="?")
     config = parser.parse_args()
 
     gooddir = config.dir_a
@@ -391,8 +390,9 @@ def main():
     if config.file is not None:
         goodfile = gooddir + "/" + config.file
         badfile = baddir + "/" + config.file
-        perform_test, choices = prepare_functions(config.file, gooddir,
-                                                  goodfile, badfile)
+        perform_test, choices = prepare_functions(
+            config.file, gooddir, goodfile, badfile
+        )
     else:
         perform_test, choices = prepare_files(gooddir, baddir, rspfile)
 
@@ -423,5 +423,5 @@ def main():
         stderr.write("Could not identify failing parts?!?")
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/add_argument_names.py b/llvm/utils/add_argument_names.py
index 38dde25997941..2860dd4b90bcb 100755
--- a/llvm/utils/add_argument_names.py
+++ b/llvm/utils/add_argument_names.py
@@ -1,55 +1,59 @@
 #!/usr/bin/env python3
 import re, sys
 
+
 def fix_string(s):
-    TYPE = re.compile('\s*(i[0-9]+|float|double|x86_fp80|fp128|ppc_fp128|\[\[.*?\]\]|\[2 x \[\[[A-Z_0-9]+\]\]\]|<.*?>|{.*?}|\[[0-9]+ x .*?\]|%["a-z:A-Z0-9._]+({{.*?}})?|%{{.*?}}|{{.*?}}|\[\[.*?\]\])(\s*(\*|addrspace\(.*?\)|dereferenceable\(.*?\)|byval\(.*?\)|sret|zeroext|inreg|returned|signext|nocapture|align \d+|swiftself|swifterror|readonly|noalias|inalloca|nocapture))*\s*')
+    TYPE = re.compile(
+        '\s*(i[0-9]+|float|double|x86_fp80|fp128|ppc_fp128|\[\[.*?\]\]|\[2 x \[\[[A-Z_0-9]+\]\]\]|<.*?>|{.*?}|\[[0-9]+ x .*?\]|%["a-z:A-Z0-9._]+({{.*?}})?|%{{.*?}}|{{.*?}}|\[\[.*?\]\])(\s*(\*|addrspace\(.*?\)|dereferenceable\(.*?\)|byval\(.*?\)|sret|zeroext|inreg|returned|signext|nocapture|align \d+|swiftself|swifterror|readonly|noalias|inalloca|nocapture))*\s*'
+    )
 
     counter = 0
-    if 'i32{{.*}}' in s:
+    if "i32{{.*}}" in s:
         counter = 1
 
-    at_pos = s.find('@')
+    at_pos = s.find("@")
     if at_pos == -1:
         at_pos = 0
 
-    annoying_pos = s.find('{{[^(]+}}')
+    annoying_pos = s.find("{{[^(]+}}")
     if annoying_pos != -1:
         at_pos = annoying_pos + 9
 
-    paren_pos = s.find('(', at_pos)
+    paren_pos = s.find("(", at_pos)
     if paren_pos == -1:
         return s
 
-    res = s[:paren_pos+1]
-    s = s[paren_pos+1:]
+    res = s[: paren_pos + 1]
+    s = s[paren_pos + 1 :]
 
     m = TYPE.match(s)
     while m:
         res += m.group()
-        s = s[m.end():]
-        if s.startswith(',') or s.startswith(')'):
-            res += f' %{counter}'
+        s = s[m.end() :]
+        if s.startswith(",") or s.startswith(")"):
+            res += f" %{counter}"
             counter += 1
 
-        next_arg = s.find(',')
+        next_arg = s.find(",")
         if next_arg == -1:
             break
 
-        res += s[:next_arg+1]
-        s = s[next_arg+1:]
+        res += s[: next_arg + 1]
+        s = s[next_arg + 1 :]
         m = TYPE.match(s)
 
-    return res+s
+    return res + s
+
 
 def process_file(contents):
-    PREFIX = re.compile(r'check-prefix(es)?(=|\s+)([a-zA-Z0-9,]+)')
-    check_prefixes = ['CHECK']
-    result = ''
-    for line in contents.split('\n'):
-        if 'FileCheck' in line:
+    PREFIX = re.compile(r"check-prefix(es)?(=|\s+)([a-zA-Z0-9,]+)")
+    check_prefixes = ["CHECK"]
+    result = ""
+    for line in contents.split("\n"):
+        if "FileCheck" in line:
             m = PREFIX.search(line)
             if m:
-                check_prefixes.extend(m.group(3).split(','))
+                check_prefixes.extend(m.group(3).split(","))
 
         found_check = False
         for prefix in check_prefixes:
@@ -57,26 +61,28 @@ def process_file(contents):
                 found_check = True
                 break
 
-        if not found_check or 'define' not in line:
-            result += line + '\n'
+        if not found_check or "define" not in line:
+            result += line + "\n"
             continue
 
         # We have a check for a function definition. Number the args.
         line = fix_string(line)
-        result += line + '\n'
+        result += line + "\n"
     return result
 
+
 def main():
-    print(f'Processing {sys.argv[1]}')
+    print(f"Processing {sys.argv[1]}")
     f = open(sys.argv[1])
     content = f.read()
     f.close()
 
     content = process_file(content)
 
-    f = open(sys.argv[1], 'w')
+    f = open(sys.argv[1], "w")
     f.write(content)
     f.close()
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/bugpoint_gisel_reducer.py b/llvm/utils/bugpoint_gisel_reducer.py
index 4c366efbba497..116ec792e921d 100755
--- a/llvm/utils/bugpoint_gisel_reducer.py
+++ b/llvm/utils/bugpoint_gisel_reducer.py
@@ -24,40 +24,38 @@ def log(msg):
 
 
 def hr():
-    log('-' * 50)
+    log("-" * 50)
 
 
 def log_err(msg):
-    print('ERROR: {}'.format(msg), file=sys.stderr)
+    print("ERROR: {}".format(msg), file=sys.stderr)
 
 
 def check_path(path):
     if not os.path.exists(path):
-        log_err('{} does not exist.'.format(path))
+        log_err("{} does not exist.".format(path))
         raise
     return path
 
 
 def check_bin(build_dir, bin_name):
-    file_name = '{}/bin/{}'.format(build_dir, bin_name)
+    file_name = "{}/bin/{}".format(build_dir, bin_name)
     return check_path(file_name)
 
 
 def run_llc(llc, irfile):
-    pr = subprocess.Popen([llc,
-                           '-o',
-                           '-',
-                           '-global-isel',
-                           '-pass-remarks-missed=gisel',
-                           irfile],
-                          stdout=subprocess.PIPE,
-                          stderr=subprocess.PIPE)
+    pr = subprocess.Popen(
+        [llc, "-o", "-", "-global-isel", "-pass-remarks-missed=gisel", irfile],
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+    )
     out, err = pr.communicate()
     res = pr.wait()
     if res == 0:
         return 0
     re_err = re.compile(
-        r'LLVM ERROR: ([a-z\s]+):.*(G_INTRINSIC[_A-Z]* <intrinsic:@[a-zA-Z0-9\.]+>|G_[A-Z_]+)')
+        r"LLVM ERROR: ([a-z\s]+):.*(G_INTRINSIC[_A-Z]* <intrinsic:@[a-zA-Z0-9\.]+>|G_[A-Z_]+)"
+    )
     match = re_err.match(err)
     if not match:
         return 0
@@ -66,13 +64,18 @@ def run_llc(llc, irfile):
 
 
 def run_bugpoint(bugpoint_bin, llc_bin, opt_bin, tmp, ir_file):
-    compileCmd = '-compile-command={} -c {} {}'.format(
-        os.path.realpath(__file__), llc_bin, tmp)
-    pr = subprocess.Popen([bugpoint_bin,
-                           '-compile-custom',
-                           compileCmd,
-                           '-opt-command={}'.format(opt_bin),
-                           ir_file])
+    compileCmd = "-compile-command={} -c {} {}".format(
+        os.path.realpath(__file__), llc_bin, tmp
+    )
+    pr = subprocess.Popen(
+        [
+            bugpoint_bin,
+            "-compile-custom",
+            compileCmd,
+            "-opt-command={}".format(opt_bin),
+            ir_file,
+        ]
+    )
     res = pr.wait()
     if res != 0:
         log_err("Unable to reduce the test.")
@@ -83,13 +86,13 @@ def run_bugpoint_check():
     path_to_llc = sys.argv[2]
     path_to_err = sys.argv[3]
     path_to_ir = sys.argv[4]
-    with open(path_to_err, 'r') as f:
+    with open(path_to_err, "r") as f:
         err = f.read()
         res = run_llc(path_to_llc, path_to_ir)
         if res == 0:
             return 0
-        log('GlobalISed failed, {}: {}'.format(res[0], res[1]))
-        if res != err.split(';'):
+        log("GlobalISed failed, {}: {}".format(res[0], res[1]))
+        if res != err.split(";"):
             return 0
         else:
             return 1
@@ -97,50 +100,53 @@ def run_bugpoint_check():
 
 def main():
     # Check if this is called by bugpoint.
-    if len(sys.argv) == 5 and sys.argv[1] == '-c':
+    if len(sys.argv) == 5 and sys.argv[1] == "-c":
         sys.exit(run_bugpoint_check())
 
     # Parse arguments.
     parser = argparse.ArgumentParser(
-        description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
-    parser.add_argument('BuildDir', help="Path to LLVM build directory")
-    parser.add_argument('IRFile', help="Path to the input IR file")
+        description=__doc__, formatter_class=argparse.RawTextHelpFormatter
+    )
+    parser.add_argument("BuildDir", help="Path to LLVM build directory")
+    parser.add_argument("IRFile", help="Path to the input IR file")
     args = parser.parse_args()
 
     # Check if the binaries exist.
     build_dir = check_path(args.BuildDir)
     ir_file = check_path(args.IRFile)
-    llc_bin = check_bin(build_dir, 'llc')
-    opt_bin = check_bin(build_dir, 'opt')
-    bugpoint_bin = check_bin(build_dir, 'bugpoint')
+    llc_bin = check_bin(build_dir, "llc")
+    opt_bin = check_bin(build_dir, "opt")
+    bugpoint_bin = check_bin(build_dir, "bugpoint")
 
     # Run llc to see if GlobalISel fails.
-    log('Running llc...')
+    log("Running llc...")
     res = run_llc(llc_bin, ir_file)
     if res == 0:
         log_err("Expected failure")
         raise
     hr()
-    log('GlobalISel failed, {}: {}.'.format(res[0], res[1]))
+    log("GlobalISel failed, {}: {}.".format(res[0], res[1]))
     tmp = tempfile.NamedTemporaryFile()
-    log('Writing error to {} for bugpoint.'.format(tmp.name))
-    tmp.write(';'.join(res))
+    log("Writing error to {} for bugpoint.".format(tmp.name))
+    tmp.write(";".join(res))
     tmp.flush()
     hr()
 
     # Run bugpoint.
-    log('Running bugpoint...')
+    log("Running bugpoint...")
     run_bugpoint(bugpoint_bin, llc_bin, opt_bin, tmp.name, ir_file)
     hr()
-    log('Done!')
+    log("Done!")
     hr()
-    output_file = 'bugpoint-reduced-simplified.bc'
-    log('Run llvm-dis to disassemble the output:')
-    log('$ {}/bin/llvm-dis -o - {}'.format(build_dir, output_file))
-    log('Run llc to reproduce the problem:')
-    log('$ {}/bin/llc -o - -global-isel '
-        '-pass-remarks-missed=gisel {}'.format(build_dir, output_file))
+    output_file = "bugpoint-reduced-simplified.bc"
+    log("Run llvm-dis to disassemble the output:")
+    log("$ {}/bin/llvm-dis -o - {}".format(build_dir, output_file))
+    log("Run llc to reproduce the problem:")
+    log(
+        "$ {}/bin/llc -o - -global-isel "
+        "-pass-remarks-missed=gisel {}".format(build_dir, output_file)
+    )
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/check_ninja_deps.py b/llvm/utils/check_ninja_deps.py
index d19c470d21204..e6b5b1ce008f7 100755
--- a/llvm/utils/check_ninja_deps.py
+++ b/llvm/utils/check_ninja_deps.py
@@ -58,6 +58,7 @@
 import subprocess
 import pygraphviz
 
+
 def toposort(g):
     """Topologically sort a graph.
 
@@ -88,7 +89,8 @@ def toposort(g):
                 # If that counter reaches zero, w is ready to output.
                 ready.add(w)
 
-def ancestors(g, translate = lambda x: x):
+
+def ancestors(g, translate=lambda x: x):
     """Form the set of ancestors for each vertex of a graph.
 
     The input g is a pygraphviz graph object representing a DAG. The function
@@ -107,7 +109,7 @@ def ancestors(g, translate = lambda x: x):
         vm = translate(v)
 
         # Make up a[v], based on a[predecessors of v].
-        a[v] = {vm} # include v itself
+        a[v] = {vm}  # include v itself
         for w in g.in_neighbors(v):
             a[v].update(a[w])
 
@@ -115,14 +117,16 @@ def ancestors(g, translate = lambda x: x):
         # doesn't get the trivial dependency of v on itself.
         yield vm, a[v].
diff erence({vm})
 
+
 def main():
     parser = argparse.ArgumentParser(
-        description='Find missing formal dependencies on generated include '
-        'files in a build.ninja file.')
-    parser.add_argument("-C", "--build-dir",
-                        help="Build directory (default cwd)")
-    parser.add_argument("-f", "--build-file",
-                        help="Build directory (default build.ninja)")
+        description="Find missing formal dependencies on generated include "
+        "files in a build.ninja file."
+    )
+    parser.add_argument("-C", "--build-dir", help="Build directory (default cwd)")
+    parser.add_argument(
+        "-f", "--build-file", help="Build directory (default build.ninja)"
+    )
     args = parser.parse_args()
 
     errs = 0
@@ -134,8 +138,9 @@ def main():
         ninja_prefix.extend(["-f", args.build_file])
 
     # Get the formal dependency graph and decode it using pygraphviz.
-    g = pygraphviz.AGraph(subprocess.check_output(
-        ninja_prefix + ["-t", "graph"]).decode("UTF-8"))
+    g = pygraphviz.AGraph(
+        subprocess.check_output(ninja_prefix + ["-t", "graph"]).decode("UTF-8")
+    )
 
     # Helper function to ask for the label of a vertex, which is where ninja's
     # Graphviz output keeps the actual file name of the target.
@@ -153,8 +158,11 @@ def main():
     # Fetch the cached dependency data and check it against our formal ancestry
     # data.
     currtarget = None
-    for line in (subprocess.check_output(ninja_prefix + ["-t", "deps"])
-                 .decode("UTF-8").splitlines()):
+    for line in (
+        subprocess.check_output(ninja_prefix + ["-t", "deps"])
+        .decode("UTF-8")
+        .splitlines()
+    ):
         # ninja -t deps output consists of stanzas of the following form,
         # separated by a blank line:
         #
@@ -176,10 +184,15 @@ def main():
             # cache is not cleared when build.ninja changes, so it can contain
             # stale data from targets that existed only in past builds in the
             # same directory.
-            if (dep in targets and currtarget in deps and
-                dep not in deps[currtarget]):
-                print("error:", currtarget, "requires", dep,
-                      "but has no dependency on it", file=sys.stderr)
+            if dep in targets and currtarget in deps and dep not in deps[currtarget]:
+                print(
+                    "error:",
+                    currtarget,
+                    "requires",
+                    dep,
+                    "but has no dependency on it",
+                    file=sys.stderr,
+                )
                 errs += 1
         elif ":" in line:
             currtarget = line.split(":", 1)[0]
@@ -187,5 +200,6 @@ def main():
     if errs:
         sys.exit("{:d} errors found".format(errs))
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/chunk-print-before-all.py b/llvm/utils/chunk-print-before-all.py
index b5756c59c2719..fe0eaaea1c20d 100755
--- a/llvm/utils/chunk-print-before-all.py
+++ b/llvm/utils/chunk-print-before-all.py
@@ -1,9 +1,9 @@
 #!/usr/bin/env python
 
-# Given a -print-before-all and/or -print-after-all -print-module-scope log from 
-# an opt invocation, chunk it into a series of individual IR files, one for each 
-# pass invocation. If the log ends with an obvious stack trace, try to split off 
-# a separate "crashinfo.txt" file leaving only the valid input IR in the last 
+# Given a -print-before-all and/or -print-after-all -print-module-scope log from
+# an opt invocation, chunk it into a series of individual IR files, one for each
+# pass invocation. If the log ends with an obvious stack trace, try to split off
+# a separate "crashinfo.txt" file leaving only the valid input IR in the last
 # chunk. Files are written to current working directory.
 
 import sys
@@ -14,8 +14,9 @@
 # This function gets the pass name from the following line:
 # *** IR Dump Before/After PASS_NAME... ***
 def get_pass_name(line, prefix):
-    short_line = line[line.find(prefix) + len(prefix) + 1:]
-    return re.split(' |<', short_line)[0]
+    short_line = line[line.find(prefix) + len(prefix) + 1 :]
+    return re.split(" |<", short_line)[0]
+
 
 def print_chunk(lines, prefix, pass_name):
     global chunk_id
@@ -25,6 +26,7 @@ def print_chunk(lines, prefix, pass_name):
     with open(fname, "w") as f:
         f.writelines(lines)
 
+
 is_dump = False
 cur = []
 for line in sys.stdin:

diff  --git a/llvm/utils/collect_and_build_with_pgo.py b/llvm/utils/collect_and_build_with_pgo.py
index 0851b91b0283c..4c30dc876411e 100755
--- a/llvm/utils/collect_and_build_with_pgo.py
+++ b/llvm/utils/collect_and_build_with_pgo.py
@@ -36,7 +36,7 @@
 # in to build more things, if you'd like.
 def _run_benchmark(env, out_dir, include_debug_info):
     """The 'benchmark' we run to generate profile data."""
-    target_dir = env.output_subdir('instrumentation_run')
+    target_dir = env.output_subdir("instrumentation_run")
 
     # `check-llvm` and `check-clang` are cheap ways to increase coverage. The
     # former lets us touch on the non-x86 backends a bit if configured, and the
@@ -44,34 +44,34 @@ def _run_benchmark(env, out_dir, include_debug_info):
     # paths a fair amount, though the `if (stuff_is_broken) { diag() ... }`
     # branches should still heavily be weighted in the not-taken direction,
     # since we built all of LLVM/etc).
-    _build_things_in(env, out_dir, what=['check-llvm', 'check-clang'])
+    _build_things_in(env, out_dir, what=["check-llvm", "check-clang"])
 
     # Building tblgen gets us coverage; don't skip it. (out_dir may also not
     # have them anyway, but that's less of an issue)
-    cmake = _get_cmake_invocation_for_bootstrap_from(
-        env, out_dir, skip_tablegens=False)
+    cmake = _get_cmake_invocation_for_bootstrap_from(env, out_dir, skip_tablegens=False)
 
     if include_debug_info:
-        cmake.add_flag('CMAKE_BUILD_TYPE', 'RelWithDebInfo')
+        cmake.add_flag("CMAKE_BUILD_TYPE", "RelWithDebInfo")
 
     _run_fresh_cmake(env, cmake, target_dir)
 
     # Just build all the things. The more data we have, the better.
-    _build_things_in(env, target_dir, what=['all'])
+    _build_things_in(env, target_dir, what=["all"])
+
 
 ### Script
 
 
 class CmakeInvocation:
-    _cflags = ['CMAKE_C_FLAGS', 'CMAKE_CXX_FLAGS']
+    _cflags = ["CMAKE_C_FLAGS", "CMAKE_CXX_FLAGS"]
     _ldflags = [
-        'CMAKE_EXE_LINKER_FLAGS',
-        'CMAKE_MODULE_LINKER_FLAGS',
-        'CMAKE_SHARED_LINKER_FLAGS',
+        "CMAKE_EXE_LINKER_FLAGS",
+        "CMAKE_MODULE_LINKER_FLAGS",
+        "CMAKE_SHARED_LINKER_FLAGS",
     ]
 
     def __init__(self, cmake, maker, cmake_dir):
-        self._prefix = [cmake, '-G', maker, cmake_dir]
+        self._prefix = [cmake, "-G", maker, cmake_dir]
 
         # Map of str -> (list|str).
         self._flags = {}
@@ -92,7 +92,7 @@ def add_flag(self, key, value, allow_overwrites=True):
             return
 
         if not allow_overwrites:
-            raise ValueError('Invalid overwrite of %s requested' % key)
+            raise ValueError("Invalid overwrite of %s requested" % key)
 
         self._flags[key] = value
 
@@ -115,18 +115,17 @@ def to_args(self):
                 # nothing to add, don't.
                 if not value:
                     continue
-                value = ' '.join(value)
+                value = " ".join(value)
 
-            arg = '-D' + key
-            if value != '':
-                arg += '=' + value
+            arg = "-D" + key
+            if value != "":
+                arg += "=" + value
             args.append(arg)
         return args
 
 
 class Env:
-    def __init__(self, llvm_dir, use_make, output_dir, default_cmake_args,
-                 dry_run):
+    def __init__(self, llvm_dir, use_make, output_dir, default_cmake_args, dry_run):
         self.llvm_dir = llvm_dir
         self.use_make = use_make
         self.output_dir = output_dir
@@ -137,35 +136,30 @@ def get_default_cmake_args_kv(self):
         return self.default_cmake_args.items()
 
     def get_cmake_maker(self):
-        return 'Ninja' if not self.use_make else 'Unix Makefiles'
+        return "Ninja" if not self.use_make else "Unix Makefiles"
 
     def get_make_command(self):
         if self.use_make:
-            return ['make', '-j{}'.format(multiprocessing.cpu_count())]
-        return ['ninja']
+            return ["make", "-j{}".format(multiprocessing.cpu_count())]
+        return ["ninja"]
 
     def output_subdir(self, name):
         return os.path.join(self.output_dir, name)
 
     def has_llvm_subproject(self, name):
-        if name == 'compiler-rt':
-            subdir = '../compiler-rt'
-        elif name == 'clang':
-            subdir = '../clang'
+        if name == "compiler-rt":
+            subdir = "../compiler-rt"
+        elif name == "clang":
+            subdir = "../clang"
         else:
-            raise ValueError('Unknown subproject: %s' % name)
+            raise ValueError("Unknown subproject: %s" % name)
 
         return os.path.isdir(os.path.join(self.llvm_dir, subdir))
 
     # Note that we don't allow capturing stdout/stderr. This works quite nicely
     # with dry_run.
-    def run_command(self,
-                    cmd,
-                    cwd=None,
-                    check=False,
-                    silent_unless_error=False):
-        print(
-            'Running `%s` in %s' % (cmd, shlex.quote(cwd or os.getcwd())))
+    def run_command(self, cmd, cwd=None, check=False, silent_unless_error=False):
+        print("Running `%s` in %s" % (cmd, shlex.quote(cwd or os.getcwd())))
 
         if self.dry_run:
             return
@@ -178,11 +172,8 @@ def run_command(self,
         # Don't use subprocess.run because it's >= py3.5 only, and it's not too
         # much extra effort to get what it gives us anyway.
         popen = subprocess.Popen(
-            cmd,
-            stdin=subprocess.DEVNULL,
-            stdout=stdout,
-            stderr=stderr,
-            cwd=cwd)
+            cmd, stdin=subprocess.DEVNULL, stdout=stdout, stderr=stderr, cwd=cwd
+        )
         stdout, _ = popen.communicate()
         return_code = popen.wait(timeout=0)
 
@@ -190,32 +181,33 @@ def run_command(self,
             return
 
         if silent_unless_error:
-            print(stdout.decode('utf-8', 'ignore'))
+            print(stdout.decode("utf-8", "ignore"))
 
         if check:
             raise subprocess.CalledProcessError(
-                returncode=return_code, cmd=cmd, output=stdout, stderr=None)
+                returncode=return_code, cmd=cmd, output=stdout, stderr=None
+            )
 
 
 def _get_default_cmake_invocation(env):
     inv = CmakeInvocation(
-        cmake='cmake', maker=env.get_cmake_maker(), cmake_dir=env.llvm_dir)
+        cmake="cmake", maker=env.get_cmake_maker(), cmake_dir=env.llvm_dir
+    )
     for key, value in env.get_default_cmake_args_kv():
         inv.add_new_flag(key, value)
     return inv
 
 
-def _get_cmake_invocation_for_bootstrap_from(env, out_dir,
-                                             skip_tablegens=True):
-    clang = os.path.join(out_dir, 'bin', 'clang')
+def _get_cmake_invocation_for_bootstrap_from(env, out_dir, skip_tablegens=True):
+    clang = os.path.join(out_dir, "bin", "clang")
     cmake = _get_default_cmake_invocation(env)
-    cmake.add_new_flag('CMAKE_C_COMPILER', clang)
-    cmake.add_new_flag('CMAKE_CXX_COMPILER', clang + '++')
+    cmake.add_new_flag("CMAKE_C_COMPILER", clang)
+    cmake.add_new_flag("CMAKE_CXX_COMPILER", clang + "++")
 
     # We often get no value out of building new tblgens; the previous build
     # should have them. It's still correct to build them, just slower.
     def add_tablegen(key, binary):
-        path = os.path.join(out_dir, 'bin', binary)
+        path = os.path.join(out_dir, "bin", binary)
 
         # Check that this exists, since the user's allowed to specify their own
         # stage1 directory (which is generally where we'll source everything
@@ -224,8 +216,8 @@ def add_tablegen(key, binary):
             cmake.add_new_flag(key, path)
 
     if skip_tablegens:
-        add_tablegen('LLVM_TABLEGEN', 'llvm-tblgen')
-        add_tablegen('CLANG_TABLEGEN', 'clang-tblgen')
+        add_tablegen("LLVM_TABLEGEN", "llvm-tblgen")
+        add_tablegen("CLANG_TABLEGEN", "clang-tblgen")
 
     return cmake
 
@@ -245,146 +237,160 @@ def _run_fresh_cmake(env, cmake, target_dir):
         os.makedirs(target_dir, mode=0o755)
 
     cmake_args = cmake.to_args()
-    env.run_command(
-        cmake_args, cwd=target_dir, check=True, silent_unless_error=True)
+    env.run_command(cmake_args, cwd=target_dir, check=True, silent_unless_error=True)
 
 
 def _build_stage1_clang(env):
-    target_dir = env.output_subdir('stage1')
+    target_dir = env.output_subdir("stage1")
     cmake = _get_default_cmake_invocation(env)
     _run_fresh_cmake(env, cmake, target_dir)
-    _build_things_in(env, target_dir, what=['clang', 'llvm-profdata', 'profile'])
+    _build_things_in(env, target_dir, what=["clang", "llvm-profdata", "profile"])
     return target_dir
 
 
-def _generate_instrumented_clang_profile(env, stage1_dir, profile_dir,
-                                         output_file):
-    llvm_profdata = os.path.join(stage1_dir, 'bin', 'llvm-profdata')
+def _generate_instrumented_clang_profile(env, stage1_dir, profile_dir, output_file):
+    llvm_profdata = os.path.join(stage1_dir, "bin", "llvm-profdata")
     if env.dry_run:
-        profiles = [os.path.join(profile_dir, '*.profraw')]
+        profiles = [os.path.join(profile_dir, "*.profraw")]
     else:
         profiles = [
-            os.path.join(profile_dir, f) for f in os.listdir(profile_dir)
-            if f.endswith('.profraw')
+            os.path.join(profile_dir, f)
+            for f in os.listdir(profile_dir)
+            if f.endswith(".profraw")
         ]
-    cmd = [llvm_profdata, 'merge', '-output=' + output_file] + profiles
+    cmd = [llvm_profdata, "merge", "-output=" + output_file] + profiles
     env.run_command(cmd, check=True)
 
 
 def _build_instrumented_clang(env, stage1_dir):
     assert os.path.isabs(stage1_dir)
 
-    target_dir = os.path.join(env.output_dir, 'instrumented')
+    target_dir = os.path.join(env.output_dir, "instrumented")
     cmake = _get_cmake_invocation_for_bootstrap_from(env, stage1_dir)
-    cmake.add_new_flag('LLVM_BUILD_INSTRUMENTED', 'IR')
+    cmake.add_new_flag("LLVM_BUILD_INSTRUMENTED", "IR")
 
     # libcxx's configure step messes with our link order: we'll link
     # libclang_rt.profile after libgcc, and the former requires atexit from the
     # latter. So, configure checks fail.
     #
     # Since we don't need libcxx or compiler-rt anyway, just disable them.
-    cmake.add_new_flag('LLVM_BUILD_RUNTIME', 'No')
+    cmake.add_new_flag("LLVM_BUILD_RUNTIME", "No")
 
     _run_fresh_cmake(env, cmake, target_dir)
-    _build_things_in(env, target_dir, what=['clang', 'lld'])
+    _build_things_in(env, target_dir, what=["clang", "lld"])
 
-    profiles_dir = os.path.join(target_dir, 'profiles')
+    profiles_dir = os.path.join(target_dir, "profiles")
     return target_dir, profiles_dir
 
 
 def _build_optimized_clang(env, stage1_dir, profdata_file):
     if not env.dry_run and not os.path.exists(profdata_file):
-        raise ValueError('Looks like the profdata file at %s doesn\'t exist' %
-                         profdata_file)
+        raise ValueError(
+            "Looks like the profdata file at %s doesn't exist" % profdata_file
+        )
 
-    target_dir = os.path.join(env.output_dir, 'optimized')
+    target_dir = os.path.join(env.output_dir, "optimized")
     cmake = _get_cmake_invocation_for_bootstrap_from(env, stage1_dir)
-    cmake.add_new_flag('LLVM_PROFDATA_FILE', os.path.abspath(profdata_file))
+    cmake.add_new_flag("LLVM_PROFDATA_FILE", os.path.abspath(profdata_file))
 
     # We'll get complaints about hash mismatches in `main` in tools/etc. Ignore
     # it.
-    cmake.add_cflags(['-Wno-backend-plugin'])
+    cmake.add_cflags(["-Wno-backend-plugin"])
     _run_fresh_cmake(env, cmake, target_dir)
-    _build_things_in(env, target_dir, what=['clang'])
+    _build_things_in(env, target_dir, what=["clang"])
     return target_dir
 
 
-Args = collections.namedtuple('Args', [
-    'do_optimized_build',
-    'include_debug_info',
-    'profile_location',
-    'stage1_dir',
-])
+Args = collections.namedtuple(
+    "Args",
+    [
+        "do_optimized_build",
+        "include_debug_info",
+        "profile_location",
+        "stage1_dir",
+    ],
+)
 
 
 def _parse_args():
     parser = argparse.ArgumentParser(
-        description='Builds LLVM and Clang with instrumentation, collects '
-        'instrumentation profiles for them, and (optionally) builds things '
-        'with these PGO profiles. By default, it\'s assumed that you\'re '
-        'running this from your LLVM root, and all build artifacts will be '
-        'saved to $PWD/out.')
+        description="Builds LLVM and Clang with instrumentation, collects "
+        "instrumentation profiles for them, and (optionally) builds things "
+        "with these PGO profiles. By default, it's assumed that you're "
+        "running this from your LLVM root, and all build artifacts will be "
+        "saved to $PWD/out."
+    )
     parser.add_argument(
-        '--cmake-extra-arg',
-        action='append',
+        "--cmake-extra-arg",
+        action="append",
         default=[],
-        help='an extra arg to pass to all cmake invocations. Note that this '
-        'is interpreted as a -D argument, e.g. --cmake-extra-arg FOO=BAR will '
-        'be passed as -DFOO=BAR. This may be specified multiple times.')
+        help="an extra arg to pass to all cmake invocations. Note that this "
+        "is interpreted as a -D argument, e.g. --cmake-extra-arg FOO=BAR will "
+        "be passed as -DFOO=BAR. This may be specified multiple times.",
+    )
     parser.add_argument(
-        '--dry-run',
-        action='store_true',
-        help='print commands instead of running them')
+        "--dry-run", action="store_true", help="print commands instead of running them"
+    )
     parser.add_argument(
-        '--llvm-dir',
-        default='.',
-        help='directory containing an LLVM checkout (default: $PWD)')
+        "--llvm-dir",
+        default=".",
+        help="directory containing an LLVM checkout (default: $PWD)",
+    )
     parser.add_argument(
-        '--no-optimized-build',
-        action='store_true',
-        help='disable the final, PGO-optimized build')
+        "--no-optimized-build",
+        action="store_true",
+        help="disable the final, PGO-optimized build",
+    )
     parser.add_argument(
-        '--out-dir',
-        help='directory to write artifacts to (default: $llvm_dir/out)')
+        "--out-dir", help="directory to write artifacts to (default: $llvm_dir/out)"
+    )
     parser.add_argument(
-        '--profile-output',
-        help='where to output the profile (default is $out/pgo_profile.prof)')
+        "--profile-output",
+        help="where to output the profile (default is $out/pgo_profile.prof)",
+    )
     parser.add_argument(
-        '--stage1-dir',
-        help='instead of having an initial build of everything, use the given '
-        'directory. It is expected that this directory will have clang, '
-        'llvm-profdata, and the appropriate libclang_rt.profile already built')
+        "--stage1-dir",
+        help="instead of having an initial build of everything, use the given "
+        "directory. It is expected that this directory will have clang, "
+        "llvm-profdata, and the appropriate libclang_rt.profile already built",
+    )
     parser.add_argument(
-        '--use-debug-info-in-benchmark',
-        action='store_true',
-        help='use a regular build instead of RelWithDebInfo in the benchmark. '
-        'This increases benchmark execution time and disk space requirements, '
-        'but gives more coverage over debuginfo bits in LLVM and clang.')
+        "--use-debug-info-in-benchmark",
+        action="store_true",
+        help="use a regular build instead of RelWithDebInfo in the benchmark. "
+        "This increases benchmark execution time and disk space requirements, "
+        "but gives more coverage over debuginfo bits in LLVM and clang.",
+    )
     parser.add_argument(
-        '--use-make',
-        action='store_true',
-        default=shutil.which('ninja') is None,
-        help='use Makefiles instead of ninja')
+        "--use-make",
+        action="store_true",
+        default=shutil.which("ninja") is None,
+        help="use Makefiles instead of ninja",
+    )
 
     args = parser.parse_args()
 
     llvm_dir = os.path.abspath(args.llvm_dir)
     if args.out_dir is None:
-        output_dir = os.path.join(llvm_dir, 'out')
+        output_dir = os.path.join(llvm_dir, "out")
     else:
         output_dir = os.path.abspath(args.out_dir)
 
-    extra_args = {'CMAKE_BUILD_TYPE': 'Release',
-                  'LLVM_ENABLE_PROJECTS': 'clang;compiler-rt;lld'}
+    extra_args = {
+        "CMAKE_BUILD_TYPE": "Release",
+        "LLVM_ENABLE_PROJECTS": "clang;compiler-rt;lld",
+    }
     for arg in args.cmake_extra_arg:
-        if arg.startswith('-D'):
+        if arg.startswith("-D"):
             arg = arg[2:]
-        elif arg.startswith('-'):
-            raise ValueError('Unknown not- -D arg encountered; you may need '
-                             'to tweak the source...')
-        split = arg.split('=', 1)
+        elif arg.startswith("-"):
+            raise ValueError(
+                "Unknown not- -D arg encountered; you may need "
+                "to tweak the source..."
+            )
+        split = arg.split("=", 1)
         if len(split) == 1:
-            key, val = split[0], ''
+            key, val = split[0], ""
         else:
             key, val = split
         extra_args[key] = val
@@ -400,7 +406,7 @@ def _parse_args():
     if args.profile_output is not None:
         profile_location = args.profile_output
     else:
-        profile_location = os.path.join(env.output_dir, 'pgo_profile.prof')
+        profile_location = os.path.join(env.output_dir, "pgo_profile.prof")
 
     result_args = Args(
         do_optimized_build=not args.no_optimized_build,
@@ -419,26 +425,26 @@ def _looks_like_llvm_dir(directory):
 
     contents = set(os.listdir(directory))
     expected_contents = [
-        'CODE_OWNERS.TXT',
-        'cmake',
-        'docs',
-        'include',
-        'utils',
+        "CODE_OWNERS.TXT",
+        "cmake",
+        "docs",
+        "include",
+        "utils",
     ]
 
     if not all(c in contents for c in expected_contents):
         return False
 
     try:
-        include_listing = os.listdir(os.path.join(directory, 'include'))
+        include_listing = os.listdir(os.path.join(directory, "include"))
     except NotADirectoryError:
         return False
 
-    return 'llvm' in include_listing
+    return "llvm" in include_listing
 
 
 def _die(*args, **kwargs):
-    kwargs['file'] = sys.stderr
+    kwargs["file"] = sys.stderr
     print(*args, **kwargs)
     sys.exit(1)
 
@@ -447,37 +453,36 @@ def _main():
     env, args = _parse_args()
 
     if not _looks_like_llvm_dir(env.llvm_dir):
-        _die('Looks like %s isn\'t an LLVM directory; please see --help' %
-             env.llvm_dir)
-    if not env.has_llvm_subproject('clang'):
-        _die('Need a clang checkout at tools/clang')
-    if not env.has_llvm_subproject('compiler-rt'):
-        _die('Need a compiler-rt checkout at projects/compiler-rt')
+        _die("Looks like %s isn't an LLVM directory; please see --help" % env.llvm_dir)
+    if not env.has_llvm_subproject("clang"):
+        _die("Need a clang checkout at tools/clang")
+    if not env.has_llvm_subproject("compiler-rt"):
+        _die("Need a compiler-rt checkout at projects/compiler-rt")
 
     def status(*args):
         print(*args, file=sys.stderr)
 
     if args.stage1_dir is None:
-        status('*** Building stage1 clang...')
+        status("*** Building stage1 clang...")
         stage1_out = _build_stage1_clang(env)
     else:
         stage1_out = args.stage1_dir
 
-    status('*** Building instrumented clang...')
+    status("*** Building instrumented clang...")
     instrumented_out, profile_dir = _build_instrumented_clang(env, stage1_out)
-    status('*** Running profdata benchmarks...')
+    status("*** Running profdata benchmarks...")
     _run_benchmark(env, instrumented_out, args.include_debug_info)
-    status('*** Generating profile...')
-    _generate_instrumented_clang_profile(env, stage1_out, profile_dir,
-                                         args.profile_location)
+    status("*** Generating profile...")
+    _generate_instrumented_clang_profile(
+        env, stage1_out, profile_dir, args.profile_location
+    )
 
-    print('Final profile:', args.profile_location)
+    print("Final profile:", args.profile_location)
     if args.do_optimized_build:
-        status('*** Building PGO-optimized binaries...')
-        optimized_out = _build_optimized_clang(env, stage1_out,
-                                               args.profile_location)
-        print('Final build directory:', optimized_out)
+        status("*** Building PGO-optimized binaries...")
+        optimized_out = _build_optimized_clang(env, stage1_out, args.profile_location)
+        print("Final build directory:", optimized_out)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     _main()

diff  --git a/llvm/utils/convert-constraint-log-to-z3.py b/llvm/utils/convert-constraint-log-to-z3.py
index 77b0a3d95b6d4..a3c33f2ef4599 100755
--- a/llvm/utils/convert-constraint-log-to-z3.py
+++ b/llvm/utils/convert-constraint-log-to-z3.py
@@ -34,21 +34,23 @@
 
 def main():
     parser = argparse.ArgumentParser(
-        description='Convert constraint log to script to verify using Z3.')
-    parser.add_argument('log_file', metavar='log', type=str,
-                        help='constraint-system log file')
+        description="Convert constraint log to script to verify using Z3."
+    )
+    parser.add_argument(
+        "log_file", metavar="log", type=str, help="constraint-system log file"
+    )
     args = parser.parse_args()
 
-    content = ''
-    with open(args.log_file, 'rt') as f:
+    content = ""
+    with open(args.log_file, "rt") as f:
         content = f.read()
 
-    groups = content.split('---')
-    var_re = re.compile('x\d+')
+    groups = content.split("---")
+    var_re = re.compile("x\d+")
 
-    print('from z3 import *')
+    print("from z3 import *")
     for group in groups:
-        constraints = [g.strip() for g in group.split('\n') if g.strip() != '']
+        constraints = [g.strip() for g in group.split("\n") if g.strip() != ""]
         variables = set()
         for c in constraints[:-1]:
             for m in var_re.finditer(c):
@@ -57,13 +59,13 @@ def main():
             continue
         for v in variables:
             print('{} = Int("{}")'.format(v, v))
-        print('s = Solver()')
+        print("s = Solver()")
         for c in constraints[:-1]:
-            print('s.add({})'.format(c))
+            print("s.add({})".format(c))
         expected = constraints[-1].strip()
-        print('assert(s.check() == {})'.format(expected))
+        print("assert(s.check() == {})".format(expected))
     print('print("all checks passed")')
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/create_ladder_graph.py b/llvm/utils/create_ladder_graph.py
index a5946ff24af5a..f11aaf39bfa96 100755
--- a/llvm/utils/create_ladder_graph.py
+++ b/llvm/utils/create_ladder_graph.py
@@ -13,33 +13,37 @@
 from __future__ import print_function
 
 import argparse
+
+
 def main():
-  parser = argparse.ArgumentParser(description=__doc__)
-  parser.add_argument('rungs', type=int,
-                      help="Number of ladder rungs. Must be a multiple of 2")
-  args = parser.parse_args()
-  if (args.rungs % 2) != 0:
-    print("Rungs must be a multiple of 2")
-    return
-  print("int ladder(int *foo, int *bar, int x) {")
-  rung1 = range(0, args.rungs, 2)
-  rung2 = range(1, args.rungs, 2)
-  for i in rung1:
-    print("rung1%d:" % i)
-    print("*foo = x++;")
-    if i != rung1[-1]:
-      print("if (*bar) goto rung1%d;" % (i+2))
-      print("else goto rung2%d;" % (i+1))
-    else:
-      print("goto rung2%d;" % (i+1))
-  for i in rung2:
-    print("rung2%d:" % i)
-    print("*foo = x++;")
-    if i != rung2[-1]:
-      print("goto rung2%d;" % (i+2))
-    else:
-      print("return *foo;")
-  print("}")
-
-if __name__ == '__main__':
-  main()
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument(
+        "rungs", type=int, help="Number of ladder rungs. Must be a multiple of 2"
+    )
+    args = parser.parse_args()
+    if (args.rungs % 2) != 0:
+        print("Rungs must be a multiple of 2")
+        return
+    print("int ladder(int *foo, int *bar, int x) {")
+    rung1 = range(0, args.rungs, 2)
+    rung2 = range(1, args.rungs, 2)
+    for i in rung1:
+        print("rung1%d:" % i)
+        print("*foo = x++;")
+        if i != rung1[-1]:
+            print("if (*bar) goto rung1%d;" % (i + 2))
+            print("else goto rung2%d;" % (i + 1))
+        else:
+            print("goto rung2%d;" % (i + 1))
+    for i in rung2:
+        print("rung2%d:" % i)
+        print("*foo = x++;")
+        if i != rung2[-1]:
+            print("goto rung2%d;" % (i + 2))
+        else:
+            print("return *foo;")
+    print("}")
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/llvm/utils/demangle_tree.py b/llvm/utils/demangle_tree.py
index 00de72b2a18d7..9d9603e63b832 100644
--- a/llvm/utils/demangle_tree.py
+++ b/llvm/utils/demangle_tree.py
@@ -18,21 +18,23 @@
 
 args = None
 
+
 def parse_line(line):
-    question = line.find('?')
+    question = line.find("?")
     if question == -1:
         return None, None
 
-    open_paren = line.find('(', question)
+    open_paren = line.find("(", question)
     if open_paren == -1:
         return None, None
-    close_paren = line.rfind(')', open_paren)
+    close_paren = line.rfind(")", open_paren)
     if open_paren == -1:
         return None, None
-    mangled = line[question : open_paren]
-    demangled = line[open_paren+1 : close_paren]
+    mangled = line[question:open_paren]
+    demangled = line[open_paren + 1 : close_paren]
     return mangled.strip(), demangled.strip()
 
+
 class Result(object):
     def __init__(self):
         self.crashed = []
@@ -41,6 +43,7 @@ def __init__(self):
         self.errors = set()
         self.nfiles = 0
 
+
 class MapContext(object):
     def __init__(self):
         self.rincomplete = None
@@ -48,18 +51,19 @@ def __init__(self):
         self.pending_objs = []
         self.npending = 0
 
+
 def process_file(path, objdump):
     r = Result()
     r.file = path
 
-    popen_args = [objdump, '-t', '-demangle', path]
+    popen_args = [objdump, "-t", "-demangle", path]
     p = subprocess.Popen(popen_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     stdout, stderr = p.communicate()
     if p.returncode != 0:
         r.crashed = [r.file]
         return r
 
-    output = stdout.decode('utf-8')
+    output = stdout.decode("utf-8")
 
     for line in output.splitlines():
         mangled, demangled = parse_line(line)
@@ -70,15 +74,25 @@ def process_file(path, objdump):
             r.errors.add(mangled)
     return r
 
+
 def add_results(r1, r2):
     r1.crashed.extend(r2.crashed)
     r1.errors.update(r2.errors)
     r1.nsymbols += r2.nsymbols
     r1.nfiles += r2.nfiles
 
+
 def print_result_row(directory, result):
-    print("[{0} files, {1} crashes, {2} errors, {3} symbols]: '{4}'".format(
-        result.nfiles, len(result.crashed), len(result.errors), result.nsymbols, directory))
+    print(
+        "[{0} files, {1} crashes, {2} errors, {3} symbols]: '{4}'".format(
+            result.nfiles,
+            len(result.crashed),
+            len(result.errors),
+            result.nsymbols,
+            directory,
+        )
+    )
+
 
 def process_one_chunk(pool, chunk_size, objdump, context):
     objs = []
@@ -112,7 +126,7 @@ def process_one_chunk(pool, chunk_size, objdump, context):
 
         re.nfiles += ntaken
 
-    assert(len(objs) == chunk_size or context.npending == 0)
+    assert len(objs) == chunk_size or context.npending == 0
 
     copier = functools.partial(process_file, objdump=objdump)
     mapped_results = list(pool.map(copier, objs))
@@ -134,17 +148,18 @@ def process_one_chunk(pool, chunk_size, objdump, context):
         add_results(context.rcumulative, re)
         print_result_row(c, re)
 
+
 def process_pending_files(pool, chunk_size, objdump, context):
     while context.npending >= chunk_size:
         process_one_chunk(pool, chunk_size, objdump, context)
 
+
 def go():
     global args
 
     obj_dir = args.dir
-    extensions = args.extensions.split(',')
-    extensions = [x if x[0] == '.' else '.' + x for x in extensions]
-
+    extensions = args.extensions.split(",")
+    extensions = [x if x[0] == "." else "." + x for x in extensions]
 
     pool_size = 48
     pool = Pool(processes=pool_size)
@@ -178,7 +193,7 @@ def go():
             # `pool_size` tasks remaining.
             process_pending_files(pool, pool_size, args.objdump, context)
 
-        assert(context.npending < pool_size);
+        assert context.npending < pool_size
         process_one_chunk(pool, pool_size, args.objdump, context)
 
         total = context.rcumulative
@@ -186,43 +201,58 @@ def go():
         nsuccess = total.nsymbols - nfailed
         ncrashed = len(total.crashed)
 
-        if (nfailed > 0):
+        if nfailed > 0:
             print("Failures:")
             for m in sorted(total.errors):
                 print("  " + m)
-        if (ncrashed > 0):
+        if ncrashed > 0:
             print("Crashes:")
             for f in sorted(total.crashed):
                 print("  " + f)
         print("Summary:")
-        spct = float(nsuccess)/float(total.nsymbols)
-        fpct = float(nfailed)/float(total.nsymbols)
-        cpct = float(ncrashed)/float(nfiles)
+        spct = float(nsuccess) / float(total.nsymbols)
+        fpct = float(nfailed) / float(total.nsymbols)
+        cpct = float(ncrashed) / float(nfiles)
         print("Processed {0} object files.".format(nfiles))
-        print("{0}/{1} symbols successfully demangled ({2:.4%})".format(nsuccess, total.nsymbols, spct))
+        print(
+            "{0}/{1} symbols successfully demangled ({2:.4%})".format(
+                nsuccess, total.nsymbols, spct
+            )
+        )
         print("{0} symbols could not be demangled ({1:.4%})".format(nfailed, fpct))
         print("{0} files crashed while demangling ({1:.4%})".format(ncrashed, cpct))
-            
+
     except:
         traceback.print_exc()
 
     pool.close()
     pool.join()
 
-if __name__ == "__main__":
-    def_obj = 'obj' if sys.platform == 'win32' else 'o'
 
-    parser = argparse.ArgumentParser(description='Demangle all symbols in a tree of object files, looking for failures.')
-    parser.add_argument('dir', type=str, help='the root directory at which to start crawling')
-    parser.add_argument('--objdump', type=str, help='path to llvm-objdump.  If not specified ' +
-                        'the tool is located as if by `which llvm-objdump`.')
-    parser.add_argument('--extensions', type=str, default=def_obj,
-                        help='comma separated list of extensions to demangle (e.g. `o,obj`).  ' +
-                        'By default this will be `obj` on Windows and `o` otherwise.')
+if __name__ == "__main__":
+    def_obj = "obj" if sys.platform == "win32" else "o"
+
+    parser = argparse.ArgumentParser(
+        description="Demangle all symbols in a tree of object files, looking for failures."
+    )
+    parser.add_argument(
+        "dir", type=str, help="the root directory at which to start crawling"
+    )
+    parser.add_argument(
+        "--objdump",
+        type=str,
+        help="path to llvm-objdump.  If not specified "
+        + "the tool is located as if by `which llvm-objdump`.",
+    )
+    parser.add_argument(
+        "--extensions",
+        type=str,
+        default=def_obj,
+        help="comma separated list of extensions to demangle (e.g. `o,obj`).  "
+        + "By default this will be `obj` on Windows and `o` otherwise.",
+    )
 
     args = parser.parse_args()
 
-
     multiprocessing.freeze_support()
     go()
-

diff  --git a/llvm/utils/docker/scripts/llvm_checksum/llvm_checksum.py b/llvm/utils/docker/scripts/llvm_checksum/llvm_checksum.py
index 1cfbf2b56c709..5fa569aec540d 100755
--- a/llvm/utils/docker/scripts/llvm_checksum/llvm_checksum.py
+++ b/llvm/utils/docker/scripts/llvm_checksum/llvm_checksum.py
@@ -16,183 +16,186 @@
 
 
 def main():
-  parser = ArgumentParser()
-  parser.add_argument(
-      "-v", "--verbose", action="store_true", help="enable debug logging")
-  parser.add_argument(
-      "-c",
-      "--check",
-      metavar="reference_file",
-      help="read checksums from reference_file and " +
-      "check they match checksums of llvm_path.")
-  parser.add_argument(
-      "--partial",
-      action="store_true",
-      help="ignore projects from reference_file " +
-      "that are not checked out in llvm_path.")
-  parser.add_argument(
-      "--multi_dir",
-      action="store_true",
-      help="indicates llvm_path contains llvm, checked out " +
-      "into multiple directories, as opposed to a " +
-      "typical single source tree checkout.")
-  parser.add_argument("llvm_path")
-
-  args = parser.parse_args()
-  if args.check is not None:
-    with open(args.check, "r") as f:
-      reference_checksums = ReadLLVMChecksums(f)
-  else:
-    reference_checksums = None
-
-  if args.verbose:
-    logging.basicConfig(level=logging.DEBUG)
-
-  llvm_projects = CreateLLVMProjects(not args.multi_dir)
-  checksums = ComputeLLVMChecksums(args.llvm_path, llvm_projects)
-
-  if reference_checksums is None:
-    WriteLLVMChecksums(checksums, sys.stdout)
-    sys.exit(0)
-
-  if not ValidateChecksums(reference_checksums, checksums, args.partial):
-    sys.stdout.write("Checksums 
diff er.\nNew checksums:\n")
-    WriteLLVMChecksums(checksums, sys.stdout)
-    sys.stdout.write("Reference checksums:\n")
-    WriteLLVMChecksums(reference_checksums, sys.stdout)
-    sys.exit(1)
-  else:
-    sys.stdout.write("Checksums match.")
+    parser = ArgumentParser()
+    parser.add_argument(
+        "-v", "--verbose", action="store_true", help="enable debug logging"
+    )
+    parser.add_argument(
+        "-c",
+        "--check",
+        metavar="reference_file",
+        help="read checksums from reference_file and "
+        + "check they match checksums of llvm_path.",
+    )
+    parser.add_argument(
+        "--partial",
+        action="store_true",
+        help="ignore projects from reference_file "
+        + "that are not checked out in llvm_path.",
+    )
+    parser.add_argument(
+        "--multi_dir",
+        action="store_true",
+        help="indicates llvm_path contains llvm, checked out "
+        + "into multiple directories, as opposed to a "
+        + "typical single source tree checkout.",
+    )
+    parser.add_argument("llvm_path")
+
+    args = parser.parse_args()
+    if args.check is not None:
+        with open(args.check, "r") as f:
+            reference_checksums = ReadLLVMChecksums(f)
+    else:
+        reference_checksums = None
+
+    if args.verbose:
+        logging.basicConfig(level=logging.DEBUG)
+
+    llvm_projects = CreateLLVMProjects(not args.multi_dir)
+    checksums = ComputeLLVMChecksums(args.llvm_path, llvm_projects)
+
+    if reference_checksums is None:
+        WriteLLVMChecksums(checksums, sys.stdout)
+        sys.exit(0)
+
+    if not ValidateChecksums(reference_checksums, checksums, args.partial):
+        sys.stdout.write("Checksums 
diff er.\nNew checksums:\n")
+        WriteLLVMChecksums(checksums, sys.stdout)
+        sys.stdout.write("Reference checksums:\n")
+        WriteLLVMChecksums(reference_checksums, sys.stdout)
+        sys.exit(1)
+    else:
+        sys.stdout.write("Checksums match.")
 
 
 def ComputeLLVMChecksums(root_path, projects):
-  """Compute checksums for LLVM sources checked out using svn.
-
-  Args:
-    root_path: a directory of llvm checkout.
-    projects: a list of LLVMProject instances, which describe checkout paths,
-      relative to root_path.
-
-  Returns:
-    A dict mapping from project name to project checksum.
-  """
-  hash_algo = hashlib.sha256
-
-  def collapse_svn_substitutions(contents):
-    # Replace svn substitutions for $Date$ and $LastChangedDate$.
-    # Unfortunately, these are locale-specific.
-    return SVN_DATES_REGEX.sub("$\1$", contents)
-
-  def read_and_collapse_svn_subsitutions(file_path):
-    with open(file_path, "rb") as f:
-      contents = f.read()
-      new_contents = collapse_svn_substitutions(contents)
-      if contents != new_contents:
-        logging.debug("Replaced svn keyword substitutions in %s", file_path)
-        logging.debug("\n\tBefore\n%s\n\tAfter\n%s", contents, new_contents)
-      return new_contents
-
-  project_checksums = dict()
-  # Hash each project.
-  for proj in projects:
-    project_root = os.path.join(root_path, proj.relpath)
-    if not os.path.exists(project_root):
-      logging.info("Folder %s doesn't exist, skipping project %s", proj.relpath,
-                   proj.name)
-      continue
-
-    files = list()
-
-    def add_file_hash(file_path):
-      if os.path.islink(file_path) and not os.path.exists(file_path):
-        content = os.readlink(file_path)
-      else:
-        content = read_and_collapse_svn_subsitutions(file_path)
-      hasher = hash_algo()
-      hasher.update(content)
-      file_digest = hasher.hexdigest()
-      logging.debug("Checksum %s for file %s", file_digest, file_path)
-      files.append((file_path, file_digest))
-
-    logging.info("Computing checksum for %s", proj.name)
-    WalkProjectFiles(root_path, projects, proj, add_file_hash)
-
-    # Compute final checksum.
-    files.sort(key=lambda x: x[0])
-    hasher = hash_algo()
-    for file_path, file_digest in files:
-      file_path = os.path.relpath(file_path, project_root)
-      hasher.update(file_path)
-      hasher.update(file_digest)
-    project_checksums[proj.name] = hasher.hexdigest()
-  return project_checksums
+    """Compute checksums for LLVM sources checked out using svn.
+
+    Args:
+      root_path: a directory of llvm checkout.
+      projects: a list of LLVMProject instances, which describe checkout paths,
+        relative to root_path.
+
+    Returns:
+      A dict mapping from project name to project checksum.
+    """
+    hash_algo = hashlib.sha256
+
+    def collapse_svn_substitutions(contents):
+        # Replace svn substitutions for $Date$ and $LastChangedDate$.
+        # Unfortunately, these are locale-specific.
+        return SVN_DATES_REGEX.sub("$\1$", contents)
+
+    def read_and_collapse_svn_subsitutions(file_path):
+        with open(file_path, "rb") as f:
+            contents = f.read()
+            new_contents = collapse_svn_substitutions(contents)
+            if contents != new_contents:
+                logging.debug("Replaced svn keyword substitutions in %s", file_path)
+                logging.debug("\n\tBefore\n%s\n\tAfter\n%s", contents, new_contents)
+            return new_contents
+
+    project_checksums = dict()
+    # Hash each project.
+    for proj in projects:
+        project_root = os.path.join(root_path, proj.relpath)
+        if not os.path.exists(project_root):
+            logging.info(
+                "Folder %s doesn't exist, skipping project %s", proj.relpath, proj.name
+            )
+            continue
+
+        files = list()
+
+        def add_file_hash(file_path):
+            if os.path.islink(file_path) and not os.path.exists(file_path):
+                content = os.readlink(file_path)
+            else:
+                content = read_and_collapse_svn_subsitutions(file_path)
+            hasher = hash_algo()
+            hasher.update(content)
+            file_digest = hasher.hexdigest()
+            logging.debug("Checksum %s for file %s", file_digest, file_path)
+            files.append((file_path, file_digest))
+
+        logging.info("Computing checksum for %s", proj.name)
+        WalkProjectFiles(root_path, projects, proj, add_file_hash)
+
+        # Compute final checksum.
+        files.sort(key=lambda x: x[0])
+        hasher = hash_algo()
+        for file_path, file_digest in files:
+            file_path = os.path.relpath(file_path, project_root)
+            hasher.update(file_path)
+            hasher.update(file_digest)
+        project_checksums[proj.name] = hasher.hexdigest()
+    return project_checksums
 
 
 def WriteLLVMChecksums(checksums, f):
-  """Writes checksums to a text file.
+    """Writes checksums to a text file.
 
-  Args:
-    checksums: a dict mapping from project name to project checksum (result of
-      ComputeLLVMChecksums).
-    f: a file object to write into.
-  """
+    Args:
+      checksums: a dict mapping from project name to project checksum (result of
+        ComputeLLVMChecksums).
+      f: a file object to write into.
+    """
 
-  for proj in sorted(checksums.keys()):
-    f.write("{} {}\n".format(checksums[proj], proj))
+    for proj in sorted(checksums.keys()):
+        f.write("{} {}\n".format(checksums[proj], proj))
 
 
 def ReadLLVMChecksums(f):
-  """Reads checksums from a text file, produced by WriteLLVMChecksums.
-
-  Returns:
-    A dict, mapping from project name to project checksum.
-  """
-  checksums = {}
-  while True:
-    line = f.readline()
-    if line == "":
-      break
-    checksum, proj = line.split()
-    checksums[proj] = checksum
-  return checksums
-
-
-def ValidateChecksums(reference_checksums,
-                      new_checksums,
-                      allow_missing_projects=False):
-  """Validates that reference_checksums and new_checksums match.
-
-  Args:
-    reference_checksums: a dict of reference checksums, mapping from a project
-      name to a project checksum.
-    new_checksums: a dict of checksums to be checked, mapping from a project
-      name to a project checksum.
-    allow_missing_projects:
-      When True, reference_checksums may contain more projects than
-        new_checksums. Projects missing from new_checksums are ignored.
-      When False, new_checksums and reference_checksums must contain checksums
-        for the same set of projects. If there is a project in
-        reference_checksums, missing from new_checksums, ValidateChecksums
-        will return False.
-
-  Returns:
-    True, if checksums match with regards to allow_missing_projects flag value.
-    False, otherwise.
-  """
-  if not allow_missing_projects:
-    if len(new_checksums) != len(reference_checksums):
-      return False
-
-  for proj, checksum in new_checksums.items():
-    # We never computed a checksum for this project.
-    if proj not in reference_checksums:
-      return False
-    # Checksum did not match.
-    if reference_checksums[proj] != checksum:
-      return False
-
-  return True
+    """Reads checksums from a text file, produced by WriteLLVMChecksums.
+
+    Returns:
+      A dict, mapping from project name to project checksum.
+    """
+    checksums = {}
+    while True:
+        line = f.readline()
+        if line == "":
+            break
+        checksum, proj = line.split()
+        checksums[proj] = checksum
+    return checksums
+
+
+def ValidateChecksums(reference_checksums, new_checksums, allow_missing_projects=False):
+    """Validates that reference_checksums and new_checksums match.
+
+    Args:
+      reference_checksums: a dict of reference checksums, mapping from a project
+        name to a project checksum.
+      new_checksums: a dict of checksums to be checked, mapping from a project
+        name to a project checksum.
+      allow_missing_projects:
+        When True, reference_checksums may contain more projects than
+          new_checksums. Projects missing from new_checksums are ignored.
+        When False, new_checksums and reference_checksums must contain checksums
+          for the same set of projects. If there is a project in
+          reference_checksums, missing from new_checksums, ValidateChecksums
+          will return False.
+
+    Returns:
+      True, if checksums match with regards to allow_missing_projects flag value.
+      False, otherwise.
+    """
+    if not allow_missing_projects:
+        if len(new_checksums) != len(reference_checksums):
+            return False
+
+    for proj, checksum in new_checksums.items():
+        # We never computed a checksum for this project.
+        if proj not in reference_checksums:
+            return False
+        # Checksum did not match.
+        if reference_checksums[proj] != checksum:
+            return False
+
+    return True
 
 
 if __name__ == "__main__":
-  main()
+    main()

diff  --git a/llvm/utils/docker/scripts/llvm_checksum/project_tree.py b/llvm/utils/docker/scripts/llvm_checksum/project_tree.py
index 17ed475872bd7..337dd449c6c3b 100644
--- a/llvm/utils/docker/scripts/llvm_checksum/project_tree.py
+++ b/llvm/utils/docker/scripts/llvm_checksum/project_tree.py
@@ -11,85 +11,89 @@
 
 
 class LLVMProject(object):
-  """An LLVM project with a descriptive name and a relative checkout path.
-  """
+    """An LLVM project with a descriptive name and a relative checkout path."""
 
-  def __init__(self, name, relpath):
-    self.name = name
-    self.relpath = relpath
+    def __init__(self, name, relpath):
+        self.name = name
+        self.relpath = relpath
 
-  def is_subproject(self, other_project):
-    """ Check if self is checked out as a subdirectory of other_project.
-    """
-    return self.relpath.startswith(other_project.relpath)
+    def is_subproject(self, other_project):
+        """Check if self is checked out as a subdirectory of other_project."""
+        return self.relpath.startswith(other_project.relpath)
 
 
 def WalkProjectFiles(checkout_root, all_projects, project, visitor):
-  """ Walk over all files inside a project without recursing into subprojects, '.git' and '.svn' subfolders.
+    """Walk over all files inside a project without recursing into subprojects, '.git' and '.svn' subfolders.
 
     checkout_root: root of the LLVM checkout.
     all_projects: projects in the LLVM checkout.
     project: a project to walk the files of. Must be inside all_projects.
     visitor: a function called on each visited file.
-  """
-  assert project in all_projects
-
-  ignored_paths = set()
-  for other_project in all_projects:
-    if other_project != project and other_project.is_subproject(project):
-      ignored_paths.add(os.path.join(checkout_root, other_project.relpath))
-
-  def raise_error(err):
-    raise err
-
-  project_root = os.path.join(checkout_root, project.relpath)
-  for root, dirs, files in os.walk(project_root, onerror=raise_error):
-    dirs[:] = [
-        d for d in dirs
-        if d != ".svn" and d != ".git" and
-        os.path.join(root, d) not in ignored_paths
-    ]
-    for f in files:
-      visitor(os.path.join(root, f))
+    """
+    assert project in all_projects
+
+    ignored_paths = set()
+    for other_project in all_projects:
+        if other_project != project and other_project.is_subproject(project):
+            ignored_paths.add(os.path.join(checkout_root, other_project.relpath))
+
+    def raise_error(err):
+        raise err
+
+    project_root = os.path.join(checkout_root, project.relpath)
+    for root, dirs, files in os.walk(project_root, onerror=raise_error):
+        dirs[:] = [
+            d
+            for d in dirs
+            if d != ".svn"
+            and d != ".git"
+            and os.path.join(root, d) not in ignored_paths
+        ]
+        for f in files:
+            visitor(os.path.join(root, f))
 
 
 def CreateLLVMProjects(single_tree_checkout):
-  """Returns a list of LLVMProject instances, describing relative paths of a typical LLVM checkout.
-
-  Args:
-    single_tree_checkout:
-      When True, relative paths for each project points to a typical single
-        source tree checkout.
-      When False, relative paths for each projects points to a separate
-        directory. However, clang-tools-extra is an exception, its relative path
-        will always be 'clang/tools/extra'.
-  """
-  # FIXME: cover all of llvm projects.
-
-  # Projects that reside inside 'projects/' in a single source tree checkout.
-  ORDINARY_PROJECTS = [
-      "compiler-rt", "dragonegg", "libcxx", "libcxxabi", "libunwind",
-      "test-suite"
-  ]
-  # Projects that reside inside 'tools/' in a single source tree checkout.
-  TOOLS_PROJECTS = ["clang", "lld", "lldb"]
-
-  if single_tree_checkout:
-    projects = [LLVMProject("llvm", "")]
-    projects += [
-        LLVMProject(p, os.path.join("projects", p)) for p in ORDINARY_PROJECTS
-    ]
-    projects += [
-        LLVMProject(p, os.path.join("tools", p)) for p in TOOLS_PROJECTS
+    """Returns a list of LLVMProject instances, describing relative paths of a typical LLVM checkout.
+
+    Args:
+      single_tree_checkout:
+        When True, relative paths for each project points to a typical single
+          source tree checkout.
+        When False, relative paths for each projects points to a separate
+          directory. However, clang-tools-extra is an exception, its relative path
+          will always be 'clang/tools/extra'.
+    """
+    # FIXME: cover all of llvm projects.
+
+    # Projects that reside inside 'projects/' in a single source tree checkout.
+    ORDINARY_PROJECTS = [
+        "compiler-rt",
+        "dragonegg",
+        "libcxx",
+        "libcxxabi",
+        "libunwind",
+        "test-suite",
     ]
-    projects.append(
-        LLVMProject("clang-tools-extra",
-                    os.path.join("tools", "clang", "tools", "extra")))
-  else:
-    projects = [LLVMProject("llvm", "llvm")]
-    projects += [LLVMProject(p, p) for p in ORDINARY_PROJECTS]
-    projects += [LLVMProject(p, p) for p in TOOLS_PROJECTS]
-    projects.append(
-        LLVMProject("clang-tools-extra", os.path.join("clang", "tools",
-                                                      "extra")))
-  return projects
+    # Projects that reside inside 'tools/' in a single source tree checkout.
+    TOOLS_PROJECTS = ["clang", "lld", "lldb"]
+
+    if single_tree_checkout:
+        projects = [LLVMProject("llvm", "")]
+        projects += [
+            LLVMProject(p, os.path.join("projects", p)) for p in ORDINARY_PROJECTS
+        ]
+        projects += [LLVMProject(p, os.path.join("tools", p)) for p in TOOLS_PROJECTS]
+        projects.append(
+            LLVMProject(
+                "clang-tools-extra", os.path.join("tools", "clang", "tools", "extra")
+            )
+        )
+    else:
+        projects = [LLVMProject("llvm", "llvm")]
+        projects += [LLVMProject(p, p) for p in ORDINARY_PROJECTS]
+        projects += [LLVMProject(p, p) for p in TOOLS_PROJECTS]
+        projects.append(
+            LLVMProject("clang-tools-extra", os.path.join("clang", "tools", "extra"))
+        )
+    return projects

diff  --git a/llvm/utils/extract-section.py b/llvm/utils/extract-section.py
index c8838b4900163..bb4e252155372 100755
--- a/llvm/utils/extract-section.py
+++ b/llvm/utils/extract-section.py
@@ -1,6 +1,7 @@
 #!/usr/bin/env python
 from __future__ import print_function
-'''
+
+"""
 Helper script to print out the raw content of an ELF section.
 Example usages:
 ```
@@ -13,87 +14,133 @@
 ```
 This is merely a wrapper around `llvm-readobj` that focuses on the binary
 content as well as providing more formatting options.
-'''
+"""
 
 # Unfortunately reading binary from stdin is not so trivial in Python...
 def read_raw_stdin():
     import sys
+
     if sys.version_info >= (3, 0):
         reading_source = sys.stdin.buffer
     else:
         # Windows will always read as string so we need some
         # special handling
-        if sys.platform == 'win32':
+        if sys.platform == "win32":
             import os, msvcrt
+
             msvcrt.setformat(sys.stdin.fileno(), os.O_BINARY)
         reading_source = sys.stdin
     return reading_source.read()
 
+
 def get_raw_section_dump(readobj_path, section_name, input_file):
     import subprocess
-    cmd = [readobj_path, '--elf-output-style=GNU', '--hex-dump={}'.format(section_name),
-            input_file]
+
+    cmd = [
+        readobj_path,
+        "--elf-output-style=GNU",
+        "--hex-dump={}".format(section_name),
+        input_file,
+    ]
     proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
 
-    if input_file == '-':
+    if input_file == "-":
         # From stdin
-        out,_ = proc.communicate(input=read_raw_stdin())
+        out, _ = proc.communicate(input=read_raw_stdin())
     else:
-        out,_ = proc.communicate()
+        out, _ = proc.communicate()
+
+    return out.decode("utf-8") if type(out) is not str else out
 
-    return out.decode('utf-8') if type(out) is not str else out
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     import argparse
+
     # The default '-h' (--help) will conflict with our '-h' (hex) format
     arg_parser = argparse.ArgumentParser(add_help=False)
-    arg_parser.add_argument('--readobj-path', metavar='<executable path>', type=str,
-            help='Path to llvm-readobj')
-    arg_parser.add_argument('--input-file', metavar='<file>', type=str,
-            help='Input object file, or \'-\' to read from stdin')
-    arg_parser.add_argument('section', metavar='<name>', type=str,
-            help='Name of the section to extract')
+    arg_parser.add_argument(
+        "--readobj-path",
+        metavar="<executable path>",
+        type=str,
+        help="Path to llvm-readobj",
+    )
+    arg_parser.add_argument(
+        "--input-file",
+        metavar="<file>",
+        type=str,
+        help="Input object file, or '-' to read from stdin",
+    )
+    arg_parser.add_argument(
+        "section", metavar="<name>", type=str, help="Name of the section to extract"
+    )
     # Output format
     format_group = arg_parser.add_mutually_exclusive_group()
-    format_group.add_argument('-b', dest='format', action='store_const', const='bits',
-            help='Print out in bits')
-    arg_parser.add_argument('--byte-indicator', action='store_true',
-            help='Whether to print a \'.\' every 8 bits in bits printing mode')
-    arg_parser.add_argument('--bits-endian', metavar='<little/big>', type=str,
-            choices=['little', 'big'],
-            help='Print out bits in specified endianness (little or big); defaults to big')
-    format_group.add_argument('-h', dest='format', action='store_const', const='hex',
-            help='Print out in hexadecimal')
-    arg_parser.add_argument('--hex-width', metavar='<# of bytes>', type=int,
-            help='The width (in byte) of every element in hex printing mode')
+    format_group.add_argument(
+        "-b",
+        dest="format",
+        action="store_const",
+        const="bits",
+        help="Print out in bits",
+    )
+    arg_parser.add_argument(
+        "--byte-indicator",
+        action="store_true",
+        help="Whether to print a '.' every 8 bits in bits printing mode",
+    )
+    arg_parser.add_argument(
+        "--bits-endian",
+        metavar="<little/big>",
+        type=str,
+        choices=["little", "big"],
+        help="Print out bits in specified endianness (little or big); defaults to big",
+    )
+    format_group.add_argument(
+        "-h",
+        dest="format",
+        action="store_const",
+        const="hex",
+        help="Print out in hexadecimal",
+    )
+    arg_parser.add_argument(
+        "--hex-width",
+        metavar="<# of bytes>",
+        type=int,
+        help="The width (in byte) of every element in hex printing mode",
+    )
 
-    arg_parser.add_argument('--help', action='help')
-    arg_parser.set_defaults(format='bits', tool_path='llvm-readobj', input_file='-',
-            byte_indicator=False, hex_width=4, bits_endian='big')
+    arg_parser.add_argument("--help", action="help")
+    arg_parser.set_defaults(
+        format="bits",
+        tool_path="llvm-readobj",
+        input_file="-",
+        byte_indicator=False,
+        hex_width=4,
+        bits_endian="big",
+    )
     args = arg_parser.parse_args()
 
     raw_section = get_raw_section_dump(args.tool_path, args.section, args.input_file)
 
     results = []
     for line in raw_section.splitlines(False):
-        if line.startswith('Hex dump'):
+        if line.startswith("Hex dump"):
             continue
-        parts = line.strip().split(' ')[1:]
+        parts = line.strip().split(" ")[1:]
         for part in parts[:4]:
             # exclude any non-hex dump string
             try:
                 val = int(part, 16)
-                if args.format == 'bits':
+                if args.format == "bits":
                     # divided into bytes first
                     offsets = (24, 16, 8, 0)
-                    if args.bits_endian == 'little':
+                    if args.bits_endian == "little":
                         offsets = (0, 8, 16, 24)
                     for byte in [(val >> off) & 0xFF for off in offsets]:
                         for bit in [(byte >> off) & 1 for off in range(7, -1, -1)]:
                             results.append(str(bit))
                         if args.byte_indicator:
-                            results.append('.')
-                elif args.format == 'hex':
+                            results.append(".")
+                elif args.format == "hex":
                     assert args.hex_width <= 4 and args.hex_width > 0
                     width_bits = args.hex_width * 8
                     offsets = [off for off in range(32 - width_bits, -1, -width_bits)]
@@ -103,4 +150,4 @@ def get_raw_section_dump(readobj_path, section_name, input_file):
                         results.append(format_str.format(word))
             except:
                 break
-    print(' '.join(results), end='')
+    print(" ".join(results), end="")

diff  --git a/llvm/utils/extract_symbols.py b/llvm/utils/extract_symbols.py
index a2eabd3ab4f4c..9238828d7ce85 100755
--- a/llvm/utils/extract_symbols.py
+++ b/llvm/utils/extract_symbols.py
@@ -36,10 +36,14 @@ def nm_get_symbols(tool, lib):
     #   that llvm-nm do not demangle by default, but the system nm on AIX does
     #   that, so the behavior may change in the future,
     # '-p' do not waste time sorting the symbols.
-    cmd = [tool,'-P','-g','-Xany','--no-demangle','-p']
-    process = subprocess.Popen(cmd+[lib], bufsize=1,
-                               stdout=subprocess.PIPE, stdin=subprocess.PIPE,
-                               universal_newlines=True)
+    cmd = [tool, "-P", "-g", "-Xany", "--no-demangle", "-p"]
+    process = subprocess.Popen(
+        cmd + [lib],
+        bufsize=1,
+        stdout=subprocess.PIPE,
+        stdin=subprocess.PIPE,
+        universal_newlines=True,
+    )
     process.stdin.close()
     for line in process.stdout:
         # Look for external symbols that are defined in some section
@@ -58,26 +62,29 @@ def nm_get_symbols(tool, lib):
             yield (match.group(1), False)
     process.wait()
 
+
 # Define a function which determines if the target is 32-bit Windows (as that's
 # where calling convention name decoration happens).
 def readobj_is_32bit_windows(tool, lib):
-    output = subprocess.check_output([tool,'--file-header',lib],
-                                     universal_newlines=True)
+    output = subprocess.check_output(
+        [tool, "--file-header", lib], universal_newlines=True
+    )
     for line in output.splitlines():
-        match = re.match('Format: (\S+)', line)
+        match = re.match("Format: (\S+)", line)
         if match:
-            return (match.group(1) == 'COFF-i386')
+            return match.group(1) == "COFF-i386"
     return False
 
+
 # MSVC mangles names to ?<identifier_mangling>@<type_mangling>. By examining the
 # identifier/type mangling we can decide which symbols could possibly be
 # required and which we can discard.
 def should_keep_microsoft_symbol(symbol, calling_convention_decoration):
     # Keep unmangled (i.e. extern "C") names
-    if not '?' in symbol:
+    if not "?" in symbol:
         if calling_convention_decoration:
             # Remove calling convention decoration from names
-            match = re.match('[_@]([^@]+)', symbol)
+            match = re.match("[_@]([^@]+)", symbol)
             if match:
                 symbol = match.group(1)
         # Discard floating point/SIMD constants.
@@ -87,15 +94,15 @@ def should_keep_microsoft_symbol(symbol, calling_convention_decoration):
     # Deleting destructors start with ?_G or ?_E and can be discarded because
     # link.exe gives you a warning telling you they can't be exported if you
     # don't
-    elif symbol.startswith('??_G') or symbol.startswith('??_E'):
+    elif symbol.startswith("??_G") or symbol.startswith("??_E"):
         return None
     # An anonymous namespace is mangled as ?A(maybe hex number)@. Any symbol
     # that mentions an anonymous namespace can be discarded, as the anonymous
     # namespace doesn't exist outside of that translation unit.
-    elif re.search('\?A(0x\w+)?@', symbol):
+    elif re.search("\?A(0x\w+)?@", symbol):
         return None
     # Skip X86GenMnemonicTables functions, they are not exposed from llvm/include/.
-    elif re.match('\?is[A-Z0-9]*@X86 at llvm', symbol):
+    elif re.match("\?is[A-Z0-9]*@X86 at llvm", symbol):
         return None
     # Keep mangled llvm:: and clang:: function symbols. How we detect these is a
     # bit of a mess and imprecise, but that avoids having to completely demangle
@@ -115,23 +122,24 @@ def should_keep_microsoft_symbol(symbol, calling_convention_decoration):
     #                 ::= .+@ (list of types)
     #                 ::= .*Z (list of types, varargs)
     # <throw-spec> ::= exceptions are not allowed
-    elif re.search('(llvm|clang)@@[A-Z][A-Z0-9_]*[A-JQ].+(X|.+@|.*Z)$', symbol):
+    elif re.search("(llvm|clang)@@[A-Z][A-Z0-9_]*[A-JQ].+(X|.+@|.*Z)$", symbol):
         return symbol
     return None
 
+
 # Itanium manglings are of the form _Z<identifier_mangling><type_mangling>. We
 # demangle the identifier mangling to identify symbols that can be safely
 # discarded.
 def should_keep_itanium_symbol(symbol, calling_convention_decoration):
     # Start by removing any calling convention decoration (which we expect to
     # see on all symbols, even mangled C++ symbols)
-    if calling_convention_decoration and symbol.startswith('_'):
+    if calling_convention_decoration and symbol.startswith("_"):
         symbol = symbol[1:]
     # Keep unmangled names
-    if not symbol.startswith('_') and not symbol.startswith('.'):
+    if not symbol.startswith("_") and not symbol.startswith("."):
         return symbol
     # Discard manglings that aren't nested names
-    match = re.match('_Z(T[VTIS])?(N.+)', symbol)
+    match = re.match("_Z(T[VTIS])?(N.+)", symbol)
     if not match:
         return None
     # Demangle the name. If the name is too complex then we don't need to keep
@@ -143,89 +151,93 @@ def should_keep_itanium_symbol(symbol, calling_convention_decoration):
     if not names:
         return symbol
     # Keep llvm:: and clang:: names
-    elif names[0][0] == '4llvm' or names[0][0] == '5clang':
+    elif names[0][0] == "4llvm" or names[0][0] == "5clang":
         return symbol
     # Discard everything else
     else:
         return None
 
+
 # Certain kinds of complex manglings we assume cannot be part of a public
 # interface, and we handle them by raising an exception.
 class TooComplexName(Exception):
     pass
 
+
 # Parse an itanium mangled name from the start of a string and return a
 # (name, rest of string) pair.
 def parse_itanium_name(arg):
     # Check for a normal name
-    match = re.match('(\d+)(.+)', arg)
+    match = re.match("(\d+)(.+)", arg)
     if match:
         n = int(match.group(1))
-        name = match.group(1)+match.group(2)[:n]
+        name = match.group(1) + match.group(2)[:n]
         rest = match.group(2)[n:]
         return name, rest
     # Check for constructor/destructor names
-    match = re.match('([CD][123])(.+)', arg)
+    match = re.match("([CD][123])(.+)", arg)
     if match:
         return match.group(1), match.group(2)
     # Assume that a sequence of characters that doesn't end a nesting is an
     # operator (this is very imprecise, but appears to be good enough)
-    match = re.match('([^E]+)(.+)', arg)
+    match = re.match("([^E]+)(.+)", arg)
     if match:
         return match.group(1), match.group(2)
     # Anything else: we can't handle it
     return None, arg
 
+
 # Parse an itanium mangled template argument list from the start of a string
 # and throw it away, returning the rest of the string.
 def skip_itanium_template(arg):
     # A template argument list starts with I
-    assert arg.startswith('I'), arg
+    assert arg.startswith("I"), arg
     tmp = arg[1:]
     while tmp:
         # Check for names
-        match = re.match('(\d+)(.+)', tmp)
+        match = re.match("(\d+)(.+)", tmp)
         if match:
             n = int(match.group(1))
-            tmp =  match.group(2)[n:]
+            tmp = match.group(2)[n:]
             continue
         # Check for substitutions
-        match = re.match('S[A-Z0-9]*_(.+)', tmp)
+        match = re.match("S[A-Z0-9]*_(.+)", tmp)
         if match:
             tmp = match.group(1)
         # Start of a template
-        elif tmp.startswith('I'):
+        elif tmp.startswith("I"):
             tmp = skip_itanium_template(tmp)
         # Start of a nested name
-        elif tmp.startswith('N'):
+        elif tmp.startswith("N"):
             _, tmp = parse_itanium_nested_name(tmp)
         # Start of an expression: assume that it's too complicated
-        elif tmp.startswith('L') or tmp.startswith('X'):
+        elif tmp.startswith("L") or tmp.startswith("X"):
             raise TooComplexName
         # End of the template
-        elif tmp.startswith('E'):
+        elif tmp.startswith("E"):
             return tmp[1:]
         # Something else: probably a type, skip it
         else:
             tmp = tmp[1:]
     return None
 
+
 # Parse an itanium mangled nested name and transform it into a list of pairs of
 # (name, is_template), returning (list, rest of string).
 def parse_itanium_nested_name(arg):
     # A nested name starts with N
-    assert arg.startswith('N'), arg
+    assert arg.startswith("N"), arg
     ret = []
 
     # Skip past the N, and possibly a substitution
-    match = re.match('NS[A-Z0-9]*_(.+)', arg)
+    match = re.match("NS[A-Z0-9]*_(.+)", arg)
     if match:
         tmp = match.group(1)
     else:
         tmp = arg[1:]
 
     # Skip past CV-qualifiers and ref qualifiers
-    match = re.match('[rVKRO]*(.+)', tmp);
+    match = re.match("[rVKRO]*(.+)", tmp)
     if match:
         tmp = match.group(1)
 
@@ -233,7 +245,7 @@ def parse_itanium_nested_name(arg):
     # nested name
     while tmp:
         # An E ends the nested name
-        if tmp.startswith('E'):
+        if tmp.startswith("E"):
             return ret, tmp[1:]
         # Parse a name
         name_part, tmp = parse_itanium_name(tmp)
@@ -243,7 +255,7 @@ def parse_itanium_nested_name(arg):
         is_template = False
         # If this name is a template record that, then skip the template
         # arguments
-        if tmp.startswith('I'):
+        if tmp.startswith("I"):
             tmp = skip_itanium_template(tmp)
             is_template = True
         # Add the name to the list
@@ -252,33 +264,34 @@ def parse_itanium_nested_name(arg):
     # If we get here then something went wrong
     return None, None
 
+
 # Parse a microsoft mangled symbol and return a list of pairs of
 # (name, is_template). This is very rudimentary and does just enough
 # in order to determine if the first or second component is a template.
 def parse_microsoft_mangling(arg):
     # If the name doesn't start with ? this isn't a mangled name
-    if not arg.startswith('?'):
+    if not arg.startswith("?"):
         return [(arg, False)]
     arg = arg[1:]
     components = []
     while len(arg) > 0:
         # If we see an empty component we've reached the end
-        if arg.startswith('@'):
+        if arg.startswith("@"):
             return components
         # Check for a simple name
-        match = re.match('(\w+)@(.+)', arg)
+        match = re.match("(\w+)@(.+)", arg)
         if match:
             components.append((match.group(1), False))
             arg = match.group(2)
             continue
         # Check for a special function name
-        match = re.match('(\?_?\w)(.+)', arg)
+        match = re.match("(\?_?\w)(.+)", arg)
         if match:
             components.append((match.group(1), False))
             arg = match.group(2)
             continue
         # Check for a template name
-        match = re.match('\?\$(\w+)@[^@]+@(.+)', arg)
+        match = re.match("\?\$(\w+)@[^@]+@(.+)", arg)
         if match:
             components.append((match.group(1), True))
             arg = match.group(2)
@@ -288,6 +301,7 @@ def parse_microsoft_mangling(arg):
         return components
     return components
 
+
 def extract_symbols(arg):
     llvm_nm_path, should_keep_symbol, calling_convention_decoration, lib = arg
     symbol_defs = dict()
@@ -296,18 +310,19 @@ def extract_symbols(arg):
         symbol = should_keep_symbol(symbol, calling_convention_decoration)
         if symbol:
             if is_def:
-                symbol_defs[symbol] = 1 + symbol_defs.setdefault(symbol,0)
+                symbol_defs[symbol] = 1 + symbol_defs.setdefault(symbol, 0)
             else:
                 symbol_refs.add(symbol)
     return (symbol_defs, symbol_refs)
 
+
 def get_template_name(sym, mangling):
     # Parse the mangling into a list of (name, is_template)
     try:
-        if mangling == 'microsoft':
+        if mangling == "microsoft":
             names = parse_microsoft_mangling(sym)
         else:
-            match = re.match('_Z(T[VTIS])?(N.+)', sym)
+            match = re.match("_Z(T[VTIS])?(N.+)", sym)
             if match:
                 names, _ = parse_itanium_nested_name(match.group(2))
             else:
@@ -326,41 +341,62 @@ def get_template_name(sym, mangling):
     # Not a template
     return None
 
+
 def parse_tool_path(parser, tool, val):
     try:
         # Close std streams as we don't want any output and we don't
         # want the process to wait for something on stdin.
-        p = subprocess.Popen([val], stdout=subprocess.PIPE,
-                                stderr=subprocess.PIPE,
-                                stdin=subprocess.PIPE,
-                                universal_newlines=True)
+        p = subprocess.Popen(
+            [val],
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            stdin=subprocess.PIPE,
+            universal_newlines=True,
+        )
         p.stdout.close()
         p.stderr.close()
         p.stdin.close()
         p.wait()
         return val
     except Exception:
-        parser.error(f'Invalid path for {tool}')
+        parser.error(f"Invalid path for {tool}")
+
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     parser = argparse.ArgumentParser(
-        description='Extract symbols to export from libraries')
-    parser.add_argument('--mangling', choices=['itanium','microsoft'],
-                        required=True, help='expected symbol mangling scheme')
-    parser.add_argument('--nm', metavar='path',
-                        type=lambda x: parse_tool_path(parser, 'nm', x),
-                        help='path to the llvm-nm executable')
-    parser.add_argument('--readobj', metavar='path',
-                        type=lambda x: parse_tool_path(parser, 'readobj', x),
-                        help='path to the llvm-readobj executable')
-    parser.add_argument('libs', metavar='lib', type=str, nargs='+',
-                        help='libraries to extract symbols from')
-    parser.add_argument('-o', metavar='file', type=str, help='output to file')
+        description="Extract symbols to export from libraries"
+    )
+    parser.add_argument(
+        "--mangling",
+        choices=["itanium", "microsoft"],
+        required=True,
+        help="expected symbol mangling scheme",
+    )
+    parser.add_argument(
+        "--nm",
+        metavar="path",
+        type=lambda x: parse_tool_path(parser, "nm", x),
+        help="path to the llvm-nm executable",
+    )
+    parser.add_argument(
+        "--readobj",
+        metavar="path",
+        type=lambda x: parse_tool_path(parser, "readobj", x),
+        help="path to the llvm-readobj executable",
+    )
+    parser.add_argument(
+        "libs",
+        metavar="lib",
+        type=str,
+        nargs="+",
+        help="libraries to extract symbols from",
+    )
+    parser.add_argument("-o", metavar="file", type=str, help="output to file")
     args = parser.parse_args()
 
     # How we determine which symbols to keep and which to discard depends on
     # the mangling scheme
-    if args.mangling == 'microsoft':
+    if args.mangling == "microsoft":
         should_keep_symbol = should_keep_microsoft_symbol
     else:
         should_keep_symbol = should_keep_itanium_symbol
@@ -371,17 +407,17 @@ def parse_tool_path(parser, tool, val):
         # When invoked by cmake the arguments are the cmake target names of the
         # libraries, so we need to add .lib/.a to the end and maybe lib to the
         # start to get the filename. Also allow objects.
-        suffixes = ['.lib','.a','.obj','.o']
+        suffixes = [".lib", ".a", ".obj", ".o"]
         if not any([lib.endswith(s) for s in suffixes]):
             for s in suffixes:
-                if os.path.exists(lib+s):
-                    lib = lib+s
+                if os.path.exists(lib + s):
+                    lib = lib + s
                     break
-                if os.path.exists('lib'+lib+s):
-                    lib = 'lib'+lib+s
+                if os.path.exists("lib" + lib + s):
+                    lib = "lib" + lib + s
                     break
         if not any([lib.endswith(s) for s in suffixes]):
-            print("Don't know what to do with argument "+lib, file=sys.stderr)
+            print("Don't know what to do with argument " + lib, file=sys.stderr)
             exit(1)
         libs.append(lib)
 
@@ -398,7 +434,10 @@ def parse_tool_path(parser, tool, val):
         # use a lambda or local function definition as that doesn't work on
         # windows, so create a list of tuples which duplicates the arguments
         # that are the same in all calls.
-        vals = [(args.nm, should_keep_symbol, calling_convention_decoration, x) for x in libs]
+        vals = [
+            (args.nm, should_keep_symbol, calling_convention_decoration, x)
+            for x in libs
+        ]
         # Do an async map then wait for the result to make sure that
         # KeyboardInterrupt gets caught correctly (see
         # http://bugs.python.org/issue8296)
@@ -415,8 +454,8 @@ def parse_tool_path(parser, tool, val):
     symbol_defs = dict()
     symbol_refs = set()
     for (this_lib_defs, this_lib_refs) in libs_symbols:
-        for k,v in list(this_lib_defs.items()):
-            symbol_defs[k] = v + symbol_defs.setdefault(k,0)
+        for k, v in list(this_lib_defs.items()):
+            symbol_defs[k] = v + symbol_defs.setdefault(k, 0)
         for sym in list(this_lib_refs):
             symbol_refs.add(sym)
 
@@ -434,10 +473,10 @@ def parse_tool_path(parser, tool, val):
     #    is because we need to export any explicitly instantiated templates,
     #    and we expect those to be referenced in some object.
     if args.o:
-        outfile = open(args.o,'w')
+        outfile = open(args.o, "w")
     else:
         outfile = sys.stdout
-    for k,v in list(symbol_defs.items()):
+    for k, v in list(symbol_defs.items()):
         template = get_template_name(k, args.mangling)
         if v == 1 and (not template or template in template_instantiation_refs):
             print(k, file=outfile)

diff  --git a/llvm/utils/extract_vplan.py b/llvm/utils/extract_vplan.py
index a6f217b85176c..cff6f5074d771 100755
--- a/llvm/utils/extract_vplan.py
+++ b/llvm/utils/extract_vplan.py
@@ -13,36 +13,38 @@
 import subprocess
 
 parser = argparse.ArgumentParser()
-parser.add_argument('--png', action='store_true')
+parser.add_argument("--png", action="store_true")
 args = parser.parse_args()
 
-dot = shutil.which('dot')
+dot = shutil.which("dot")
 if args.png and not dot:
     raise RuntimeError("Can't export to PNG without 'dot' in the system")
 
-pattern = re.compile(r"(digraph VPlan {.*?\n})",re.DOTALL)
+pattern = re.compile(r"(digraph VPlan {.*?\n})", re.DOTALL)
 matches = re.findall(pattern, sys.stdin.read())
 
 for vplan in matches:
     m = re.search("graph \[.+(VF=.+,UF.+)", vplan)
     if not m:
         raise ValueError("Can't get the right VPlan name")
-    name = re.sub('[^a-zA-Z0-9]', '', m.group(1))
+    name = re.sub("[^a-zA-Z0-9]", "", m.group(1))
 
     if args.png:
-        filename = 'VPlan' + name + '.png'
+        filename = "VPlan" + name + ".png"
         print("Exporting " + name + " to PNG via dot: " + filename)
-        p = subprocess.Popen([dot, '-Tpng', '-o', filename],
-                              encoding='utf-8',
-                              stdin=subprocess.PIPE,
-                              stdout=subprocess.PIPE,
-                              stderr=subprocess.PIPE)
+        p = subprocess.Popen(
+            [dot, "-Tpng", "-o", filename],
+            encoding="utf-8",
+            stdin=subprocess.PIPE,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+        )
         out, err = p.communicate(input=vplan)
         if err:
             raise RuntimeError("Error running dot: " + err)
 
     else:
-        filename = 'VPlan' + name + '.dot'
+        filename = "VPlan" + name + ".dot"
         print("Exporting " + name + " to DOT: " + filename)
-        with open(filename, 'w') as out:
+        with open(filename, "w") as out:
             out.write(vplan)

diff  --git a/llvm/utils/filecheck_lint/filecheck_lint.py b/llvm/utils/filecheck_lint/filecheck_lint.py
index cae4b3396b3d2..dc054ab76a098 100644
--- a/llvm/utils/filecheck_lint/filecheck_lint.py
+++ b/llvm/utils/filecheck_lint/filecheck_lint.py
@@ -34,146 +34,158 @@
 from typing import Generator, Sequence, Tuple
 
 _distance_threshold = 3
-_prefixes = {'CHECK'}
-_suffixes = {'-DAG', '-COUNT', '-EMPTY', '-LABEL', '-NEXT', '-NOT', '-SAME'}
+_prefixes = {"CHECK"}
+_suffixes = {"-DAG", "-COUNT", "-EMPTY", "-LABEL", "-NEXT", "-NOT", "-SAME"}
 # 'NOTE' and 'TODO' are not directives, but are likely to be false positives
 # if encountered and to generate noise as a result. We filter them out also to
 # avoid this.
 _lit_directives = {
-    'RUN',
-    'REQUIRES',
-    'UNSUPPORTED',
-    'XFAIL',
-    'DEFINE',
-    'REDEFINE',
+    "RUN",
+    "REQUIRES",
+    "UNSUPPORTED",
+    "XFAIL",
+    "DEFINE",
+    "REDEFINE",
 }
 # 'COM' and 'RUN' are default comment prefixes for FileCheck.
-_comment_prefixes = {'COM', 'RUN'}
-_ignore = _lit_directives.union(_comment_prefixes).union({'NOTE', 'TODO'})
+_comment_prefixes = {"COM", "RUN"}
+_ignore = _lit_directives.union(_comment_prefixes).union({"NOTE", "TODO"})
 
 
 def levenshtein(s1: str, s2: str) -> int:  # pylint: disable=g-doc-args
-  """Computes the edit distance between two strings.
-
-  Additions, deletions, and substitutions all count as a single operation.
-  """
-  if not s1:
-    return len(s2)
-  if not s2:
-    return len(s1)
-
-  distances = range(len(s2) + 1)
-  for i in range(len(s1)):
-    new_distances = [i + 1]
-    for j in range(len(s2)):
-      cost = min(distances[j] + int(s1[i] != s2[j]), distances[j + 1] + 1,
-                 new_distances[-1] + 1)
-      new_distances.append(cost)
-    distances = new_distances
-  return distances[-1]
+    """Computes the edit distance between two strings.
+
+    Additions, deletions, and substitutions all count as a single operation.
+    """
+    if not s1:
+        return len(s2)
+    if not s2:
+        return len(s1)
+
+    distances = range(len(s2) + 1)
+    for i in range(len(s1)):
+        new_distances = [i + 1]
+        for j in range(len(s2)):
+            cost = min(
+                distances[j] + int(s1[i] != s2[j]),
+                distances[j + 1] + 1,
+                new_distances[-1] + 1,
+            )
+            new_distances.append(cost)
+        distances = new_distances
+    return distances[-1]
 
 
 class FileRange:
-  """Stores the coordinates of a span on a single line within a file.
+    """Stores the coordinates of a span on a single line within a file.
+
+    Attributes:
+      line:         the line number
+      start_column: the (inclusive) column where the span starts
+      end_column:   the (inclusive) column where the span ends
+    """
 
-  Attributes:
-    line:         the line number
-    start_column: the (inclusive) column where the span starts
-    end_column:   the (inclusive) column where the span ends
-  """
-  line: int
-  start_column: int
-  end_column: int
+    line: int
+    start_column: int
+    end_column: int
 
-  def __init__(self, content: str, start_byte: int, end_byte: int):  # pylint: disable=g-doc-args
-    """Derives a span's coordinates based on a string and start/end bytes.
+    def __init__(
+        self, content: str, start_byte: int, end_byte: int
+    ):  # pylint: disable=g-doc-args
+        """Derives a span's coordinates based on a string and start/end bytes.
 
-    `start_byte` and `end_byte` are assumed to be on the same line.
-    """
-    content_before_span = content[:start_byte]
-    self.line = content_before_span.count('\n') + 1
-    self.start_column = start_byte - content_before_span.rfind('\n')
-    self.end_column = self.start_column + (end_byte - start_byte - 1)
+        `start_byte` and `end_byte` are assumed to be on the same line.
+        """
+        content_before_span = content[:start_byte]
+        self.line = content_before_span.count("\n") + 1
+        self.start_column = start_byte - content_before_span.rfind("\n")
+        self.end_column = self.start_column + (end_byte - start_byte - 1)
 
-  def __str__(self) -> str:
-    return f'{self.line}:{self.start_column}-{self.end_column}'
+    def __str__(self) -> str:
+        return f"{self.line}:{self.start_column}-{self.end_column}"
 
 
 class Diagnostic:
-  """Stores information about one typo and a suggested fix.
-
-  Attributes:
-    filepath:   the path to the file in which the typo was found
-    filerange:  the position at which the typo was found in the file
-    typo:       the typo
-    fix:        a suggested fix
-  """
-
-  filepath: pathlib.Path
-  filerange: FileRange
-  typo: str
-  fix: str
-
-  def __init__(
-      self,
-      filepath: pathlib.Path,
-      filerange: FileRange,
-      typo: str,
-      fix: str  # pylint: disable=redefined-outer-name
-  ):
-    self.filepath = filepath
-    self.filerange = filerange
-    self.typo = typo
-    self.fix = fix
-
-  def __str__(self) -> str:
-    return f'{self.filepath}:' + str(self.filerange) + f': {self.summary()}'
-
-  def summary(self) -> str:
-    return (
-        f'Found potentially misspelled directive "{self.typo}". Did you mean '
-        f'"{self.fix}"?')
+    """Stores information about one typo and a suggested fix.
+
+    Attributes:
+      filepath:   the path to the file in which the typo was found
+      filerange:  the position at which the typo was found in the file
+      typo:       the typo
+      fix:        a suggested fix
+    """
+
+    filepath: pathlib.Path
+    filerange: FileRange
+    typo: str
+    fix: str
+
+    def __init__(
+        self,
+        filepath: pathlib.Path,
+        filerange: FileRange,
+        typo: str,
+        fix: str,  # pylint: disable=redefined-outer-name
+    ):
+        self.filepath = filepath
+        self.filerange = filerange
+        self.typo = typo
+        self.fix = fix
+
+    def __str__(self) -> str:
+        return f"{self.filepath}:" + str(self.filerange) + f": {self.summary()}"
+
+    def summary(self) -> str:
+        return (
+            f'Found potentially misspelled directive "{self.typo}". Did you mean '
+            f'"{self.fix}"?'
+        )
 
 
 def find_potential_directives(
-    content: str,) -> Generator[Tuple[FileRange, str], None, None]:
-  """Extracts all the potential FileCheck directives from a string.
+    content: str,
+) -> Generator[Tuple[FileRange, str], None, None]:
+    """Extracts all the potential FileCheck directives from a string.
 
-  What constitutes a potential directive is loosely defined---we err on the side
-  of capturing more strings than is necessary, rather than missing any.
+    What constitutes a potential directive is loosely defined---we err on the side
+    of capturing more strings than is necessary, rather than missing any.
 
-  Args:
-    content: the string in which to look for directives
+    Args:
+      content: the string in which to look for directives
 
-  Yields:
-    Tuples (p, d) where p is the span where the potential directive occurs
-    within the string and d is the potential directive.
-  """
-  directive_pattern = re.compile(
-      r'(?:^|//|;|#)[^\d\w\-_]*([\d\w\-_][\s\d\w\-_]*):', re.MULTILINE)
-  for match in re.finditer(directive_pattern, content):
-    potential_directive, span = match.group(1), match.span(1)
-    yield (FileRange(content, span[0], span[1]), potential_directive)
+    Yields:
+      Tuples (p, d) where p is the span where the potential directive occurs
+      within the string and d is the potential directive.
+    """
+    directive_pattern = re.compile(
+        r"(?:^|//|;|#)[^\d\w\-_]*([\d\w\-_][\s\d\w\-_]*):", re.MULTILINE
+    )
+    for match in re.finditer(directive_pattern, content):
+        potential_directive, span = match.group(1), match.span(1)
+        yield (FileRange(content, span[0], span[1]), potential_directive)
 
 
 # TODO(bchetioui): also parse comment prefixes to ignore.
-def parse_custom_prefixes(content: str) -> Generator[str, None, None]:  # pylint: disable=g-doc-args
-  """Parses custom prefixes defined in the string provided.
+def parse_custom_prefixes(
+    content: str,
+) -> Generator[str, None, None]:  # pylint: disable=g-doc-args
+    """Parses custom prefixes defined in the string provided.
 
-  For example, given the following file content:
-    RUN: something | FileCheck %s -check-prefixes CHECK1,CHECK2
-    RUN: something_else | FileCheck %s -check-prefix 'CHECK3'
+    For example, given the following file content:
+      RUN: something | FileCheck %s -check-prefixes CHECK1,CHECK2
+      RUN: something_else | FileCheck %s -check-prefix 'CHECK3'
 
-  the custom prefixes are CHECK1, CHECK2, and CHECK3.
-  """
-  param_re = r'|'.join([r"'[^']*'", r'"[^"]*"', r'[^\'"\s]+'])
-  for m in re.finditer(r'-check-prefix(?:es)?(?:\s+|=)({})'.format(param_re),
-                       content):
-    prefixes = m.group(1)
-    if prefixes.startswith('\'') or prefixes.startswith('"'):
-      prefixes = prefixes[1:-1]
-    for prefix in prefixes.split(','):
-      yield prefix
+    the custom prefixes are CHECK1, CHECK2, and CHECK3.
+    """
+    param_re = r"|".join([r"'[^']*'", r'"[^"]*"', r'[^\'"\s]+'])
+    for m in re.finditer(
+        r"-check-prefix(?:es)?(?:\s+|=)({})".format(param_re), content
+    ):
+        prefixes = m.group(1)
+        if prefixes.startswith("'") or prefixes.startswith('"'):
+            prefixes = prefixes[1:-1]
+        for prefix in prefixes.split(","):
+            yield prefix
 
 
 def find_directive_typos(
@@ -181,71 +193,79 @@ def find_directive_typos(
     filepath: pathlib.Path,
     threshold: int = 3,
 ) -> Generator[Diagnostic, None, None]:
-  """Detects potential typos in FileCheck directives.
-
-  Args:
-    content: the content of the file
-    filepath: the path to the file to check for typos in directives
-    threshold: the (inclusive) maximum edit distance between a potential
-      directive and an actual directive, such that the potential directive is
-      classified as a typo
-
-  Yields:
-    Diagnostics, in order from the top of the file.
-  """
-  all_prefixes = _prefixes.union(set(parse_custom_prefixes(content)))
-  all_directives = ([
-      f'{prefix}{suffix}'
-      for prefix, suffix in itertools.product(all_prefixes, _suffixes)
-  ] + list(_ignore) + list(all_prefixes))
-
-  def find_best_match(typo):
-    return min(
-        [(threshold + 1, typo)] + [(levenshtein(typo, d), d)
-                                   for d in all_directives
-                                   if abs(len(d) - len(typo)) <= threshold],
-        key=lambda tup: tup[0],
-    )
+    """Detects potential typos in FileCheck directives.
 
-  potential_directives = find_potential_directives(content)
+    Args:
+      content: the content of the file
+      filepath: the path to the file to check for typos in directives
+      threshold: the (inclusive) maximum edit distance between a potential
+        directive and an actual directive, such that the potential directive is
+        classified as a typo
 
-  for filerange, potential_directive in potential_directives:
-    # TODO(bchetioui): match count directives more finely. We skip directives
-    # starting with 'CHECK-COUNT-' for the moment as they require more complex
-    # logic to be handled correctly.
-    if any(
-        potential_directive.startswith(f'{prefix}-COUNT-')
-        for prefix in all_prefixes):
-      continue
-
-    # Ignoring potential typos that will not be matched later due to a too low
-    # threshold, in order to avoid potentially long computation times.
-    if len(potential_directive) > max(map(len, all_directives)) + threshold:
-      continue
+    Yields:
+      Diagnostics, in order from the top of the file.
+    """
+    all_prefixes = _prefixes.union(set(parse_custom_prefixes(content)))
+    all_directives = (
+        [
+            f"{prefix}{suffix}"
+            for prefix, suffix in itertools.product(all_prefixes, _suffixes)
+        ]
+        + list(_ignore)
+        + list(all_prefixes)
+    )
 
-    score, best_match = find_best_match(potential_directive)
-    if score == 0:  # This is an actual directive, ignore.
-      continue
-    elif score <= threshold and best_match not in _ignore:
-      yield Diagnostic(filepath, filerange, potential_directive, best_match)
+    def find_best_match(typo):
+        return min(
+            [(threshold + 1, typo)]
+            + [
+                (levenshtein(typo, d), d)
+                for d in all_directives
+                if abs(len(d) - len(typo)) <= threshold
+            ],
+            key=lambda tup: tup[0],
+        )
+
+    potential_directives = find_potential_directives(content)
+
+    for filerange, potential_directive in potential_directives:
+        # TODO(bchetioui): match count directives more finely. We skip directives
+        # starting with 'CHECK-COUNT-' for the moment as they require more complex
+        # logic to be handled correctly.
+        if any(
+            potential_directive.startswith(f"{prefix}-COUNT-")
+            for prefix in all_prefixes
+        ):
+            continue
+
+        # Ignoring potential typos that will not be matched later due to a too low
+        # threshold, in order to avoid potentially long computation times.
+        if len(potential_directive) > max(map(len, all_directives)) + threshold:
+            continue
+
+        score, best_match = find_best_match(potential_directive)
+        if score == 0:  # This is an actual directive, ignore.
+            continue
+        elif score <= threshold and best_match not in _ignore:
+            yield Diagnostic(filepath, filerange, potential_directive, best_match)
 
 
 def main(argv: Sequence[str]):
-  if len(argv) < 2:
-    print(f'Usage: {argv[0]} path/to/file/1 ... path/to/file/n')
-    exit(1)
-
-  for filepath in argv[1:]:
-    logging.info('Checking %s', filepath)
-    with open(filepath, 'rt') as f:
-      content = f.read()
-    for diagnostic in find_directive_typos(
-        content,
-        pathlib.Path(filepath),
-        threshold=_distance_threshold,
-    ):
-      print(diagnostic)
-
-
-if __name__ == '__main__':
-  main(sys.argv)
+    if len(argv) < 2:
+        print(f"Usage: {argv[0]} path/to/file/1 ... path/to/file/n")
+        exit(1)
+
+    for filepath in argv[1:]:
+        logging.info("Checking %s", filepath)
+        with open(filepath, "rt") as f:
+            content = f.read()
+        for diagnostic in find_directive_typos(
+            content,
+            pathlib.Path(filepath),
+            threshold=_distance_threshold,
+        ):
+            print(diagnostic)
+
+
+if __name__ == "__main__":
+    main(sys.argv)

diff  --git a/llvm/utils/filecheck_lint/filecheck_lint_test.py b/llvm/utils/filecheck_lint/filecheck_lint_test.py
index ddb2c0768c4f8..16f381d5b0455 100644
--- a/llvm/utils/filecheck_lint/filecheck_lint_test.py
+++ b/llvm/utils/filecheck_lint/filecheck_lint_test.py
@@ -11,68 +11,72 @@
 
 
 class TestParser(unittest.TestCase):
+    def test_parse_all_additional_prefixes(self):
+        def run(content, expected_prefixes):
+            prefixes = set(fcl.parse_custom_prefixes(content))
+            for prefix in expected_prefixes:
+                self.assertIn(prefix, prefixes)
 
-  def test_parse_all_additional_prefixes(self):
-
-    def run(content, expected_prefixes):
-      prefixes = set(fcl.parse_custom_prefixes(content))
-      for prefix in expected_prefixes:
-        self.assertIn(prefix, prefixes)
-
-    for content, expected_prefixes in [
-        ('-check-prefix=PREFIX', {'PREFIX'}),
-        ('-check-prefix=\'PREFIX\'', {'PREFIX'}),
-        ('-check-prefix="PREFIX"', {'PREFIX'}),
-        ('-check-prefix PREFIX', {'PREFIX'}),
-        ('-check-prefix      PREFIX', {'PREFIX'}),
-        ('-check-prefixes=PREFIX1,PREFIX2', {'PREFIX1', 'PREFIX2'}),
-        ('-check-prefixes PREFIX1,PREFIX2', {'PREFIX1', 'PREFIX2'}),
-        (
-            """-check-prefix=PREFIX1 -check-prefix PREFIX2
+        for content, expected_prefixes in [
+            ("-check-prefix=PREFIX", {"PREFIX"}),
+            ("-check-prefix='PREFIX'", {"PREFIX"}),
+            ('-check-prefix="PREFIX"', {"PREFIX"}),
+            ("-check-prefix PREFIX", {"PREFIX"}),
+            ("-check-prefix      PREFIX", {"PREFIX"}),
+            ("-check-prefixes=PREFIX1,PREFIX2", {"PREFIX1", "PREFIX2"}),
+            ("-check-prefixes PREFIX1,PREFIX2", {"PREFIX1", "PREFIX2"}),
+            (
+                """-check-prefix=PREFIX1 -check-prefix PREFIX2
             -check-prefixes=PREFIX3,PREFIX4 -check-prefix=PREFIX5
             -check-prefixes PREFIX6,PREFIX7 -check-prefixes=PREFIX8',
          """,  # pylint: disable=bad-continuation
-            {f'PREFIX{i}' for i in range(1, 9)}),
-    ]:
-      run(content, expected_prefixes)
+                {f"PREFIX{i}" for i in range(1, 9)},
+            ),
+        ]:
+            run(content, expected_prefixes)
 
-  def test_additional_prefixes_uniquely(self):
-    lines = ['--check-prefix=SOME-PREFIX', '--check-prefix=SOME-PREFIX']
-    prefixes = set(fcl.parse_custom_prefixes('\n'.join(lines)))
-    assert len(prefixes) == 1
+    def test_additional_prefixes_uniquely(self):
+        lines = ["--check-prefix=SOME-PREFIX", "--check-prefix=SOME-PREFIX"]
+        prefixes = set(fcl.parse_custom_prefixes("\n".join(lines)))
+        assert len(prefixes) == 1
 
 
 class TestTypoDetection(unittest.TestCase):
+    def test_find_potential_directives_comment_prefix(self):
+        lines = ["junk; CHCK1:", "junk// CHCK2:", "SOME CHCK3:"]
+        content = "\n".join(lines)
 
-  def test_find_potential_directives_comment_prefix(self):
-    lines = ['junk; CHCK1:', 'junk// CHCK2:', 'SOME CHCK3:']
-    content = '\n'.join(lines)
-
-    results = list(fcl.find_potential_directives(content))
-    assert len(results) == 3
-    pos, match = results[0]
-    assert (pos.line == 1 and
-            pos.start_column == len('junk; ') + 1 and
-            pos.end_column == len(lines[0]) - 1)
-    assert match == 'CHCK1'
+        results = list(fcl.find_potential_directives(content))
+        assert len(results) == 3
+        pos, match = results[0]
+        assert (
+            pos.line == 1
+            and pos.start_column == len("junk; ") + 1
+            and pos.end_column == len(lines[0]) - 1
+        )
+        assert match == "CHCK1"
 
-    pos, match = results[1]
-    assert (pos.line == 2 and
-            pos.start_column == len('junk// ') + 1 and
-            pos.end_column == len(lines[1]) - 1)
-    assert match == 'CHCK2'
+        pos, match = results[1]
+        assert (
+            pos.line == 2
+            and pos.start_column == len("junk// ") + 1
+            and pos.end_column == len(lines[1]) - 1
+        )
+        assert match == "CHCK2"
 
-    pos, match = results[2]
-    assert (pos.line == 3 and
-            pos.start_column == 1 and
-            pos.end_column == len(lines[2]) - 1)
-    assert match == 'SOME CHCK3'
+        pos, match = results[2]
+        assert (
+            pos.line == 3
+            and pos.start_column == 1
+            and pos.end_column == len(lines[2]) - 1
+        )
+        assert match == "SOME CHCK3"
 
-  def test_levenshtein(self):
-    for s1, s2, distance in [
-        ('Levenshtein', 'Levenstin', 2),  # 2 insertions
-        ('Levenshtein', 'Levenstherin', 3),  # 1 insertion, 2 deletions
-        ('Levenshtein', 'Lenvinshtein', 2),  # 1 deletion, 1 substitution
-        ('Levenshtein', 'Levenshtein', 0),  # identical strings
-    ]:
-      assert fcl.levenshtein(s1, s2) == distance
+    def test_levenshtein(self):
+        for s1, s2, distance in [
+            ("Levenshtein", "Levenstin", 2),  # 2 insertions
+            ("Levenshtein", "Levenstherin", 3),  # 1 insertion, 2 deletions
+            ("Levenshtein", "Lenvinshtein", 2),  # 1 deletion, 1 substitution
+            ("Levenshtein", "Levenshtein", 0),  # identical strings
+        ]:
+            assert fcl.levenshtein(s1, s2) == distance

diff  --git a/llvm/utils/gdb-scripts/prettyprinters.py b/llvm/utils/gdb-scripts/prettyprinters.py
index 1fdf4fe781c74..1016467fc0974 100644
--- a/llvm/utils/gdb-scripts/prettyprinters.py
+++ b/llvm/utils/gdb-scripts/prettyprinters.py
@@ -5,495 +5,550 @@
 import gdb.printing
 import gdb.types
 
+
 class Iterator:
-  def __iter__(self):
-    return self
+    def __iter__(self):
+        return self
+
+    if sys.version_info.major == 2:
 
-  if sys.version_info.major == 2:
-      def next(self):
-        return self.__next__()
+        def next(self):
+            return self.__next__()
+
+    def children(self):
+        return self
 
-  def children(self):
-    return self
 
 class SmallStringPrinter:
-  """Print an llvm::SmallString object."""
+    """Print an llvm::SmallString object."""
+
+    def __init__(self, val):
+        self.val = val
 
-  def __init__(self, val):
-    self.val = val
+    def to_string(self):
+        data = self.val["BeginX"].cast(gdb.lookup_type("char").pointer())
+        length = self.val["Size"]
+        return data.lazy_string(length=length)
 
-  def to_string(self):
-    data = self.val['BeginX'].cast(gdb.lookup_type('char').pointer())
-    length = self.val['Size']
-    return data.lazy_string(length=length)
+    def display_hint(self):
+        return "string"
 
-  def display_hint (self):
-    return 'string'
 
 class StringRefPrinter:
-  """Print an llvm::StringRef object."""
+    """Print an llvm::StringRef object."""
 
-  def __init__(self, val):
-    self.val = val
+    def __init__(self, val):
+        self.val = val
 
-  def to_string(self):
-    data = self.val['Data']
-    length = self.val['Length']
-    return data.lazy_string(length=length)
+    def to_string(self):
+        data = self.val["Data"]
+        length = self.val["Length"]
+        return data.lazy_string(length=length)
+
+    def display_hint(self):
+        return "string"
 
-  def display_hint(self):
-    return 'string'
 
 class SmallVectorPrinter(Iterator):
-  """Print an llvm::SmallVector object."""
+    """Print an llvm::SmallVector object."""
+
+    def __init__(self, val):
+        self.val = val
+        t = val.type.template_argument(0).pointer()
+        self.begin = val["BeginX"].cast(t)
+        self.size = val["Size"]
+        self.i = 0
 
-  def __init__(self, val):
-    self.val = val
-    t = val.type.template_argument(0).pointer()
-    self.begin = val['BeginX'].cast(t)
-    self.size = val['Size']
-    self.i = 0
+    def __next__(self):
+        if self.i == self.size:
+            raise StopIteration
+        ret = "[{}]".format(self.i), (self.begin + self.i).dereference()
+        self.i += 1
+        return ret
 
-  def __next__(self):
-    if self.i == self.size:
-      raise StopIteration
-    ret = '[{}]'.format(self.i), (self.begin+self.i).dereference()
-    self.i += 1
-    return ret
+    def to_string(self):
+        return "llvm::SmallVector of Size {}, Capacity {}".format(
+            self.size, self.val["Capacity"]
+        )
 
-  def to_string(self):
-    return 'llvm::SmallVector of Size {}, Capacity {}'.format(self.size, self.val['Capacity'])
+    def display_hint(self):
+        return "array"
 
-  def display_hint (self):
-    return 'array'
 
 class ArrayRefPrinter:
-  """Print an llvm::ArrayRef object."""
+    """Print an llvm::ArrayRef object."""
 
-  class _iterator:
-    def __init__(self, begin, end):
-      self.cur = begin
-      self.end = end
-      self.count = 0
+    class _iterator:
+        def __init__(self, begin, end):
+            self.cur = begin
+            self.end = end
+            self.count = 0
 
-    def __iter__(self):
-      return self
+        def __iter__(self):
+            return self
 
-    def __next__(self):
-      if self.cur == self.end:
-        raise StopIteration
-      count = self.count
-      self.count = self.count + 1
-      cur = self.cur
-      self.cur = self.cur + 1
-      return '[%d]' % count, cur.dereference()
+        def __next__(self):
+            if self.cur == self.end:
+                raise StopIteration
+            count = self.count
+            self.count = self.count + 1
+            cur = self.cur
+            self.cur = self.cur + 1
+            return "[%d]" % count, cur.dereference()
 
-    if sys.version_info.major == 2:
-        next = __next__
+        if sys.version_info.major == 2:
+            next = __next__
 
-  def __init__(self, val):
-    self.val = val
+    def __init__(self, val):
+        self.val = val
 
-  def children(self):
-    data = self.val['Data']
-    return self._iterator(data, data + self.val['Length'])
+    def children(self):
+        data = self.val["Data"]
+        return self._iterator(data, data + self.val["Length"])
 
-  def to_string(self):
-    return 'llvm::ArrayRef of length %d' % (self.val['Length'])
+    def to_string(self):
+        return "llvm::ArrayRef of length %d" % (self.val["Length"])
+
+    def display_hint(self):
+        return "array"
 
-  def display_hint (self):
-    return 'array'
 
 class ExpectedPrinter(Iterator):
-  """Print an llvm::Expected object."""
+    """Print an llvm::Expected object."""
 
-  def __init__(self, val):
-    self.val = val
+    def __init__(self, val):
+        self.val = val
 
-  def __next__(self):
-    val = self.val
-    if val is None:
-      raise StopIteration
-    self.val = None
-    if val['HasError']:
-      return ('error', val['ErrorStorage'].address.cast(
-          gdb.lookup_type('llvm::ErrorInfoBase').pointer()).dereference())
-    return ('value', val['TStorage'].address.cast(
-        val.type.template_argument(0).pointer()).dereference())
+    def __next__(self):
+        val = self.val
+        if val is None:
+            raise StopIteration
+        self.val = None
+        if val["HasError"]:
+            return (
+                "error",
+                val["ErrorStorage"]
+                .address.cast(gdb.lookup_type("llvm::ErrorInfoBase").pointer())
+                .dereference(),
+            )
+        return (
+            "value",
+            val["TStorage"]
+            .address.cast(val.type.template_argument(0).pointer())
+            .dereference(),
+        )
+
+    def to_string(self):
+        return "llvm::Expected{}".format(" is error" if self.val["HasError"] else "")
 
-  def to_string(self):
-    return 'llvm::Expected{}'.format(' is error' if self.val['HasError'] else '')
 
 class OptionalPrinter(Iterator):
-  """Print an llvm::Optional object."""
-
-  def __init__(self, val):
-    self.val = val
-
-  def __next__(self):
-    val = self.val
-    if val is None:
-      raise StopIteration
-    self.val = None
-    if not val['Storage']['hasVal']:
-      raise StopIteration
-    return ('value', val['Storage']['val'])
+    """Print an llvm::Optional object."""
 
-  def to_string(self):
-    return 'llvm::Optional{}'.format('' if self.val['Storage']['hasVal'] else ' is not initialized')
-
-class DenseMapPrinter:
-  "Print a DenseMap"
-
-  class _iterator:
-    def __init__(self, key_info_t, begin, end):
-      self.key_info_t = key_info_t
-      self.cur = begin
-      self.end = end
-      self.advancePastEmptyBuckets()
-      self.first = True
-
-    def __iter__(self):
-      return self
-
-    def advancePastEmptyBuckets(self):
-      # disabled until the comments below can be addressed
-      # keeping as notes/posterity/hints for future contributors
-      return
-      n = self.key_info_t.name
-      is_equal = gdb.parse_and_eval(n + '::isEqual')
-      empty = gdb.parse_and_eval(n + '::getEmptyKey()')
-      tombstone = gdb.parse_and_eval(n + '::getTombstoneKey()')
-      # the following is invalid, GDB fails with:
-      #   Python Exception <class 'gdb.error'> Attempt to take address of value
-      #   not located in memory.
-      # because isEqual took parameter (for the unsigned long key I was testing)
-      # by const ref, and GDB
-      # It's also not entirely general - we should be accessing the "getFirst()"
-      # member function, not the 'first' member variable, but I've yet to figure
-      # out how to find/call member functions (especially (const) overloaded
-      # ones) on a gdb.Value.
-      while self.cur != self.end and (is_equal(self.cur.dereference()['first'], empty) or is_equal(self.cur.dereference()['first'], tombstone)):
-        self.cur = self.cur + 1
+    def __init__(self, val):
+        self.val = val
 
     def __next__(self):
-      if self.cur == self.end:
-        raise StopIteration
-      cur = self.cur
-      v = cur.dereference()['first' if self.first else 'second']
-      if not self.first:
-        self.cur = self.cur + 1
-        self.advancePastEmptyBuckets()
-        self.first = True
-      else:
-        self.first = False
-      return 'x', v
-
-    if sys.version_info.major == 2:
-        next = __next__
+        val = self.val
+        if val is None:
+            raise StopIteration
+        self.val = None
+        if not val["Storage"]["hasVal"]:
+            raise StopIteration
+        return ("value", val["Storage"]["val"])
 
-  def __init__(self, val):
-    self.val = val
+    def to_string(self):
+        return "llvm::Optional{}".format(
+            "" if self.val["Storage"]["hasVal"] else " is not initialized"
+        )
 
-  def children(self):
-    t = self.val.type.template_argument(3).pointer()
-    begin = self.val['Buckets'].cast(t)
-    end = (begin + self.val['NumBuckets']).cast(t)
-    return self._iterator(self.val.type.template_argument(2), begin, end)
 
-  def to_string(self):
-    return 'llvm::DenseMap with %d elements' % (self.val['NumEntries'])
+class DenseMapPrinter:
+    "Print a DenseMap"
+
+    class _iterator:
+        def __init__(self, key_info_t, begin, end):
+            self.key_info_t = key_info_t
+            self.cur = begin
+            self.end = end
+            self.advancePastEmptyBuckets()
+            self.first = True
+
+        def __iter__(self):
+            return self
+
+        def advancePastEmptyBuckets(self):
+            # disabled until the comments below can be addressed
+            # keeping as notes/posterity/hints for future contributors
+            return
+            n = self.key_info_t.name
+            is_equal = gdb.parse_and_eval(n + "::isEqual")
+            empty = gdb.parse_and_eval(n + "::getEmptyKey()")
+            tombstone = gdb.parse_and_eval(n + "::getTombstoneKey()")
+            # the following is invalid, GDB fails with:
+            #   Python Exception <class 'gdb.error'> Attempt to take address of value
+            #   not located in memory.
+            # because isEqual took parameter (for the unsigned long key I was testing)
+            # by const ref, and GDB
+            # It's also not entirely general - we should be accessing the "getFirst()"
+            # member function, not the 'first' member variable, but I've yet to figure
+            # out how to find/call member functions (especially (const) overloaded
+            # ones) on a gdb.Value.
+            while self.cur != self.end and (
+                is_equal(self.cur.dereference()["first"], empty)
+                or is_equal(self.cur.dereference()["first"], tombstone)
+            ):
+                self.cur = self.cur + 1
+
+        def __next__(self):
+            if self.cur == self.end:
+                raise StopIteration
+            cur = self.cur
+            v = cur.dereference()["first" if self.first else "second"]
+            if not self.first:
+                self.cur = self.cur + 1
+                self.advancePastEmptyBuckets()
+                self.first = True
+            else:
+                self.first = False
+            return "x", v
+
+        if sys.version_info.major == 2:
+            next = __next__
+
+    def __init__(self, val):
+        self.val = val
+
+    def children(self):
+        t = self.val.type.template_argument(3).pointer()
+        begin = self.val["Buckets"].cast(t)
+        end = (begin + self.val["NumBuckets"]).cast(t)
+        return self._iterator(self.val.type.template_argument(2), begin, end)
+
+    def to_string(self):
+        return "llvm::DenseMap with %d elements" % (self.val["NumEntries"])
+
+    def display_hint(self):
+        return "map"
 
-  def display_hint(self):
-    return 'map'
 
 class StringMapPrinter:
-  "Print a StringMap"
+    "Print a StringMap"
 
-  def __init__(self, val):
-    self.val = val
+    def __init__(self, val):
+        self.val = val
 
-  def children(self):
-    it = self.val['TheTable']
-    end = (it + self.val['NumBuckets'])
-    value_ty = self.val.type.template_argument(0)
-    entry_base_ty = gdb.lookup_type('llvm::StringMapEntryBase')
-    tombstone = gdb.parse_and_eval('llvm::StringMapImpl::TombstoneIntVal');
+    def children(self):
+        it = self.val["TheTable"]
+        end = it + self.val["NumBuckets"]
+        value_ty = self.val.type.template_argument(0)
+        entry_base_ty = gdb.lookup_type("llvm::StringMapEntryBase")
+        tombstone = gdb.parse_and_eval("llvm::StringMapImpl::TombstoneIntVal")
 
-    while it != end:
-      it_deref = it.dereference()
-      if it_deref == 0 or it_deref == tombstone:
-        it = it + 1
-        continue
+        while it != end:
+            it_deref = it.dereference()
+            if it_deref == 0 or it_deref == tombstone:
+                it = it + 1
+                continue
 
-      entry_ptr = it_deref.cast(entry_base_ty.pointer())
-      entry = entry_ptr.dereference()
+            entry_ptr = it_deref.cast(entry_base_ty.pointer())
+            entry = entry_ptr.dereference()
 
-      str_len = entry['keyLength']
-      value_ptr = (entry_ptr + 1).cast(value_ty.pointer())
-      str_data = (entry_ptr + 1).cast(gdb.lookup_type('uintptr_t')) + max(value_ty.sizeof, entry_base_ty.alignof)
-      str_data = str_data.cast(gdb.lookup_type('char').const().pointer())
-      string_ref = gdb.Value(struct.pack('PN', int(str_data), int(str_len)), gdb.lookup_type('llvm::StringRef'))
-      yield 'key', string_ref
+            str_len = entry["keyLength"]
+            value_ptr = (entry_ptr + 1).cast(value_ty.pointer())
+            str_data = (entry_ptr + 1).cast(gdb.lookup_type("uintptr_t")) + max(
+                value_ty.sizeof, entry_base_ty.alignof
+            )
+            str_data = str_data.cast(gdb.lookup_type("char").const().pointer())
+            string_ref = gdb.Value(
+                struct.pack("PN", int(str_data), int(str_len)),
+                gdb.lookup_type("llvm::StringRef"),
+            )
+            yield "key", string_ref
 
-      value = value_ptr.dereference()
-      yield 'value', value
+            value = value_ptr.dereference()
+            yield "value", value
 
-      it = it + 1
+            it = it + 1
 
-  def to_string(self):
-    return 'llvm::StringMap with %d elements' % (self.val['NumItems'])
+    def to_string(self):
+        return "llvm::StringMap with %d elements" % (self.val["NumItems"])
+
+    def display_hint(self):
+        return "map"
 
-  def display_hint(self):
-    return 'map'
 
 class TwinePrinter:
-  "Print a Twine"
+    "Print a Twine"
+
+    def __init__(self, val):
+        self._val = val
 
-  def __init__(self, val):
-    self._val = val
+    def display_hint(self):
+        return "string"
 
-  def display_hint(self):
-    return 'string'
+    def string_from_pretty_printer_lookup(self, val):
+        """Lookup the default pretty-printer for val and use it.
 
-  def string_from_pretty_printer_lookup(self, val):
-    '''Lookup the default pretty-printer for val and use it.
+        If no pretty-printer is defined for the type of val, print an error and
+        return a placeholder string."""
 
-    If no pretty-printer is defined for the type of val, print an error and
-    return a placeholder string.'''
+        pp = gdb.default_visualizer(val)
+        if pp:
+            s = pp.to_string()
 
-    pp = gdb.default_visualizer(val)
-    if pp:
-      s = pp.to_string()
+            # The pretty-printer may return a LazyString instead of an actual Python
+            # string.  Convert it to a Python string.  However, GDB doesn't seem to
+            # register the LazyString type, so we can't check
+            # "type(s) == gdb.LazyString".
+            if "LazyString" in type(s).__name__:
+                s = s.value().string()
 
-      # The pretty-printer may return a LazyString instead of an actual Python
-      # string.  Convert it to a Python string.  However, GDB doesn't seem to
-      # register the LazyString type, so we can't check
-      # "type(s) == gdb.LazyString".
-      if 'LazyString' in type(s).__name__:
-        s = s.value().string()
+        else:
+            print(
+                (
+                    "No pretty printer for {} found. The resulting Twine "
+                    + "representation will be incomplete."
+                ).format(val.type.name)
+            )
+            s = "(missing {})".format(val.type.name)
 
-    else:
-      print(('No pretty printer for {} found. The resulting Twine ' +
-             'representation will be incomplete.').format(val.type.name))
-      s = '(missing {})'.format(val.type.name)
+        return s
 
-    return s
+    def is_twine_kind(self, kind, expected):
+        if not kind.endswith(expected):
+            return False
+        # apparently some GDB versions add the NodeKind:: namespace
+        # (happens for me on GDB 7.11)
+        return kind in (
+            "llvm::Twine::" + expected,
+            "llvm::Twine::NodeKind::" + expected,
+        )
 
-  def is_twine_kind(self, kind, expected):
-    if not kind.endswith(expected):
-      return False
-    # apparently some GDB versions add the NodeKind:: namespace
-    # (happens for me on GDB 7.11)
-    return kind in ('llvm::Twine::' + expected,
-                    'llvm::Twine::NodeKind::' + expected)
+    def string_from_child(self, child, kind):
+        """Return the string representation of the Twine::Child child."""
 
-  def string_from_child(self, child, kind):
-    '''Return the string representation of the Twine::Child child.'''
+        if self.is_twine_kind(kind, "EmptyKind") or self.is_twine_kind(
+            kind, "NullKind"
+        ):
+            return ""
 
-    if self.is_twine_kind(kind, 'EmptyKind') or self.is_twine_kind(kind, 'NullKind'):
-      return ''
+        if self.is_twine_kind(kind, "TwineKind"):
+            return self.string_from_twine_object(child["twine"].dereference())
 
-    if self.is_twine_kind(kind, 'TwineKind'):
-      return self.string_from_twine_object(child['twine'].dereference())
+        if self.is_twine_kind(kind, "CStringKind"):
+            return child["cString"].string()
 
-    if self.is_twine_kind(kind, 'CStringKind'):
-      return child['cString'].string()
+        if self.is_twine_kind(kind, "StdStringKind"):
+            val = child["stdString"].dereference()
+            return self.string_from_pretty_printer_lookup(val)
 
-    if self.is_twine_kind(kind, 'StdStringKind'):
-      val = child['stdString'].dereference()
-      return self.string_from_pretty_printer_lookup(val)
+        if self.is_twine_kind(kind, "PtrAndLengthKind"):
+            val = child["ptrAndLength"]
+            data = val["ptr"]
+            length = val["length"]
+            return data.string(length=length)
 
-    if self.is_twine_kind(kind, 'PtrAndLengthKind'):
-      val = child['ptrAndLength']
-      data = val['ptr']
-      length = val['length']
-      return data.string(length=length)
+        if self.is_twine_kind(kind, "CharKind"):
+            return chr(child["character"])
 
-    if self.is_twine_kind(kind, 'CharKind'):
-      return chr(child['character'])
+        if self.is_twine_kind(kind, "DecUIKind"):
+            return str(child["decUI"])
 
-    if self.is_twine_kind(kind, 'DecUIKind'):
-      return str(child['decUI'])
+        if self.is_twine_kind(kind, "DecIKind"):
+            return str(child["decI"])
 
-    if self.is_twine_kind(kind, 'DecIKind'):
-      return str(child['decI'])
+        if self.is_twine_kind(kind, "DecULKind"):
+            return str(child["decUL"].dereference())
 
-    if self.is_twine_kind(kind, 'DecULKind'):
-      return str(child['decUL'].dereference())
+        if self.is_twine_kind(kind, "DecLKind"):
+            return str(child["decL"].dereference())
 
-    if self.is_twine_kind(kind, 'DecLKind'):
-      return str(child['decL'].dereference())
+        if self.is_twine_kind(kind, "DecULLKind"):
+            return str(child["decULL"].dereference())
 
-    if self.is_twine_kind(kind, 'DecULLKind'):
-      return str(child['decULL'].dereference())
+        if self.is_twine_kind(kind, "DecLLKind"):
+            return str(child["decLL"].dereference())
 
-    if self.is_twine_kind(kind, 'DecLLKind'):
-      return str(child['decLL'].dereference())
+        if self.is_twine_kind(kind, "UHexKind"):
+            val = child["uHex"].dereference()
+            return hex(int(val))
 
-    if self.is_twine_kind(kind, 'UHexKind'):
-      val = child['uHex'].dereference()
-      return hex(int(val))
+        print(
+            (
+                "Unhandled NodeKind {} in Twine pretty-printer. The result will be "
+                "incomplete."
+            ).format(kind)
+        )
 
-    print(('Unhandled NodeKind {} in Twine pretty-printer. The result will be '
-           'incomplete.').format(kind))
+        return "(unhandled {})".format(kind)
 
-    return '(unhandled {})'.format(kind)
+    def string_from_twine_object(self, twine):
+        """Return the string representation of the Twine object twine."""
 
-  def string_from_twine_object(self, twine):
-    '''Return the string representation of the Twine object twine.'''
+        lhs = twine["LHS"]
+        rhs = twine["RHS"]
 
-    lhs = twine['LHS']
-    rhs = twine['RHS']
+        lhs_kind = str(twine["LHSKind"])
+        rhs_kind = str(twine["RHSKind"])
 
-    lhs_kind = str(twine['LHSKind'])
-    rhs_kind = str(twine['RHSKind'])
+        lhs_str = self.string_from_child(lhs, lhs_kind)
+        rhs_str = self.string_from_child(rhs, rhs_kind)
 
-    lhs_str = self.string_from_child(lhs, lhs_kind)
-    rhs_str = self.string_from_child(rhs, rhs_kind)
+        return lhs_str + rhs_str
 
-    return lhs_str + rhs_str
+    def to_string(self):
+        return self.string_from_twine_object(self._val)
 
-  def to_string(self):
-    return self.string_from_twine_object(self._val)
+    def display_hint(self):
+        return "string"
 
-  def display_hint(self):
-    return 'string'
 
 def get_pointer_int_pair(val):
-  """Get tuple from llvm::PointerIntPair."""
-  info_name = val.type.template_argument(4).strip_typedefs().name
-  # Note: this throws a gdb.error if the info type is not used (by means of a
-  # call to getPointer() or similar) in the current translation unit.
-  enum_type = gdb.lookup_type(info_name + '::MaskAndShiftConstants')
-  enum_dict = gdb.types.make_enum_dict(enum_type)
-  ptr_mask = enum_dict[info_name + '::PointerBitMask']
-  int_shift = enum_dict[info_name + '::IntShift']
-  int_mask = enum_dict[info_name + '::IntMask']
-  pair_union = val['Value']
-  pointer = (pair_union & ptr_mask)
-  value = ((pair_union >> int_shift) & int_mask)
-  return (pointer, value)
+    """Get tuple from llvm::PointerIntPair."""
+    info_name = val.type.template_argument(4).strip_typedefs().name
+    # Note: this throws a gdb.error if the info type is not used (by means of a
+    # call to getPointer() or similar) in the current translation unit.
+    enum_type = gdb.lookup_type(info_name + "::MaskAndShiftConstants")
+    enum_dict = gdb.types.make_enum_dict(enum_type)
+    ptr_mask = enum_dict[info_name + "::PointerBitMask"]
+    int_shift = enum_dict[info_name + "::IntShift"]
+    int_mask = enum_dict[info_name + "::IntMask"]
+    pair_union = val["Value"]
+    pointer = pair_union & ptr_mask
+    value = (pair_union >> int_shift) & int_mask
+    return (pointer, value)
+
 
 class PointerIntPairPrinter:
-  """Print a PointerIntPair."""
+    """Print a PointerIntPair."""
+
+    def __init__(self, pointer, value):
+        self.pointer = pointer
+        self.value = value
 
-  def __init__(self, pointer, value):
-    self.pointer = pointer
-    self.value = value
+    def children(self):
+        yield ("pointer", self.pointer)
+        yield ("value", self.value)
 
-  def children(self):
-    yield ('pointer', self.pointer)
-    yield ('value', self.value)
+    def to_string(self):
+        return "(%s, %s)" % (self.pointer.type, self.value.type)
 
-  def to_string(self):
-    return '(%s, %s)' % (self.pointer.type, self.value.type)
 
 def make_pointer_int_pair_printer(val):
-  """Factory for an llvm::PointerIntPair printer."""
-  try:
-    pointer, value = get_pointer_int_pair(val)
-  except gdb.error:
-    return None  # If PointerIntPair cannot be analyzed, print as raw value.
-  pointer_type = val.type.template_argument(0)
-  value_type = val.type.template_argument(2)
-  return PointerIntPairPrinter(pointer.cast(pointer_type),
-                               value.cast(value_type))
+    """Factory for an llvm::PointerIntPair printer."""
+    try:
+        pointer, value = get_pointer_int_pair(val)
+    except gdb.error:
+        return None  # If PointerIntPair cannot be analyzed, print as raw value.
+    pointer_type = val.type.template_argument(0)
+    value_type = val.type.template_argument(2)
+    return PointerIntPairPrinter(pointer.cast(pointer_type), value.cast(value_type))
+
 
 class PointerUnionPrinter:
-  """Print a PointerUnion."""
+    """Print a PointerUnion."""
+
+    def __init__(self, pointer):
+        self.pointer = pointer
 
-  def __init__(self, pointer):
-    self.pointer = pointer
+    def children(self):
+        yield ("pointer", self.pointer)
 
-  def children(self):
-    yield ('pointer', self.pointer)
+    def to_string(self):
+        return "Containing %s" % self.pointer.type
 
-  def to_string(self):
-    return "Containing %s" % self.pointer.type
 
 def make_pointer_union_printer(val):
-  """Factory for an llvm::PointerUnion printer."""
-  try:
-    pointer, value = get_pointer_int_pair(val['Val'])
-  except gdb.error:
-    return None  # If PointerIntPair cannot be analyzed, print as raw value.
-  pointer_type = val.type.template_argument(int(value))
-  return PointerUnionPrinter(pointer.cast(pointer_type))
+    """Factory for an llvm::PointerUnion printer."""
+    try:
+        pointer, value = get_pointer_int_pair(val["Val"])
+    except gdb.error:
+        return None  # If PointerIntPair cannot be analyzed, print as raw value.
+    pointer_type = val.type.template_argument(int(value))
+    return PointerUnionPrinter(pointer.cast(pointer_type))
+
 
 class IlistNodePrinter:
-  """Print an llvm::ilist_node object."""
-
-  def __init__(self, val):
-    impl_type = val.type.fields()[0].type
-    base_type = impl_type.fields()[0].type
-    derived_type = val.type.template_argument(0)
-
-    def get_prev_and_sentinel(base):
-      # One of Prev and PrevAndSentinel exists. Depending on #defines used to
-      # compile LLVM, the base_type's template argument is either true of false.
-      if base_type.template_argument(0):
-        return get_pointer_int_pair(base['PrevAndSentinel'])
-      return base['Prev'], None
-
-    # Casts a base_type pointer to the appropriate derived type.
-    def cast_pointer(pointer):
-      sentinel = get_prev_and_sentinel(pointer.dereference())[1]
-      pointer = pointer.cast(impl_type.pointer())
-      if sentinel:
-          return pointer
-      return pointer.cast(derived_type.pointer())
-
-    # Repeated cast becaue val.type's base_type is ambiguous when using tags.
-    base = val.cast(impl_type).cast(base_type)
-    (prev, sentinel) = get_prev_and_sentinel(base)
-    prev = prev.cast(base_type.pointer())
-    self.prev = cast_pointer(prev)
-    self.next = cast_pointer(val['Next'])
-    self.sentinel = sentinel
-
-  def children(self):
-    if self.sentinel:
-      yield 'sentinel', 'yes'
-    yield 'prev', self.prev
-    yield 'next', self.next
+    """Print an llvm::ilist_node object."""
+
+    def __init__(self, val):
+        impl_type = val.type.fields()[0].type
+        base_type = impl_type.fields()[0].type
+        derived_type = val.type.template_argument(0)
+
+        def get_prev_and_sentinel(base):
+            # One of Prev and PrevAndSentinel exists. Depending on #defines used to
+            # compile LLVM, the base_type's template argument is either true of false.
+            if base_type.template_argument(0):
+                return get_pointer_int_pair(base["PrevAndSentinel"])
+            return base["Prev"], None
+
+        # Casts a base_type pointer to the appropriate derived type.
+        def cast_pointer(pointer):
+            sentinel = get_prev_and_sentinel(pointer.dereference())[1]
+            pointer = pointer.cast(impl_type.pointer())
+            if sentinel:
+                return pointer
+            return pointer.cast(derived_type.pointer())
+
+        # Repeated cast becaue val.type's base_type is ambiguous when using tags.
+        base = val.cast(impl_type).cast(base_type)
+        (prev, sentinel) = get_prev_and_sentinel(base)
+        prev = prev.cast(base_type.pointer())
+        self.prev = cast_pointer(prev)
+        self.next = cast_pointer(val["Next"])
+        self.sentinel = sentinel
+
+    def children(self):
+        if self.sentinel:
+            yield "sentinel", "yes"
+        yield "prev", self.prev
+        yield "next", self.next
+
 
 class IlistPrinter:
-  """Print an llvm::simple_ilist or llvm::iplist object."""
+    """Print an llvm::simple_ilist or llvm::iplist object."""
 
-  def __init__(self, val):
-    self.node_type = val.type.template_argument(0)
-    sentinel = val['Sentinel']
-    # First field is common base type of sentinel and ilist_node.
-    base_type = sentinel.type.fields()[0].type
-    self.sentinel = sentinel.address.cast(base_type.pointer())
+    def __init__(self, val):
+        self.node_type = val.type.template_argument(0)
+        sentinel = val["Sentinel"]
+        # First field is common base type of sentinel and ilist_node.
+        base_type = sentinel.type.fields()[0].type
+        self.sentinel = sentinel.address.cast(base_type.pointer())
 
-  def _pointers(self):
-    pointer = self.sentinel
-    while True:
-      pointer = pointer['Next'].cast(pointer.type)
-      if pointer == self.sentinel:
-        return
-      yield pointer.cast(self.node_type.pointer())
+    def _pointers(self):
+        pointer = self.sentinel
+        while True:
+            pointer = pointer["Next"].cast(pointer.type)
+            if pointer == self.sentinel:
+                return
+            yield pointer.cast(self.node_type.pointer())
 
-  def children(self):
-    for k, v in enumerate(self._pointers()):
-      yield ('[%d]' % k, v.dereference())
+    def children(self):
+        for k, v in enumerate(self._pointers()):
+            yield ("[%d]" % k, v.dereference())
 
 
 pp = gdb.printing.RegexpCollectionPrettyPrinter("LLVMSupport")
-pp.add_printer('llvm::SmallString', '^llvm::SmallString<.*>$', SmallStringPrinter)
-pp.add_printer('llvm::StringRef', '^llvm::StringRef$', StringRefPrinter)
-pp.add_printer('llvm::SmallVectorImpl', '^llvm::SmallVector(Impl)?<.*>$', SmallVectorPrinter)
-pp.add_printer('llvm::ArrayRef', '^llvm::(Mutable)?ArrayRef<.*>$', ArrayRefPrinter)
-pp.add_printer('llvm::Expected', '^llvm::Expected<.*>$', ExpectedPrinter)
-pp.add_printer('llvm::Optional', '^llvm::Optional<.*>$', OptionalPrinter)
-pp.add_printer('llvm::DenseMap', '^llvm::DenseMap<.*>$', DenseMapPrinter)
-pp.add_printer('llvm::StringMap', '^llvm::StringMap<.*>$', StringMapPrinter)
-pp.add_printer('llvm::Twine', '^llvm::Twine$', TwinePrinter)
-pp.add_printer('llvm::PointerIntPair', '^llvm::PointerIntPair<.*>$', make_pointer_int_pair_printer)
-pp.add_printer('llvm::PointerUnion', '^llvm::PointerUnion<.*>$', make_pointer_union_printer)
-pp.add_printer('llvm::ilist_node', '^llvm::ilist_node<.*>$', IlistNodePrinter)
-pp.add_printer('llvm::iplist', '^llvm::iplist<.*>$', IlistPrinter)
-pp.add_printer('llvm::simple_ilist', '^llvm::simple_ilist<.*>$', IlistPrinter)
+pp.add_printer("llvm::SmallString", "^llvm::SmallString<.*>$", SmallStringPrinter)
+pp.add_printer("llvm::StringRef", "^llvm::StringRef$", StringRefPrinter)
+pp.add_printer(
+    "llvm::SmallVectorImpl", "^llvm::SmallVector(Impl)?<.*>$", SmallVectorPrinter
+)
+pp.add_printer("llvm::ArrayRef", "^llvm::(Mutable)?ArrayRef<.*>$", ArrayRefPrinter)
+pp.add_printer("llvm::Expected", "^llvm::Expected<.*>$", ExpectedPrinter)
+pp.add_printer("llvm::Optional", "^llvm::Optional<.*>$", OptionalPrinter)
+pp.add_printer("llvm::DenseMap", "^llvm::DenseMap<.*>$", DenseMapPrinter)
+pp.add_printer("llvm::StringMap", "^llvm::StringMap<.*>$", StringMapPrinter)
+pp.add_printer("llvm::Twine", "^llvm::Twine$", TwinePrinter)
+pp.add_printer(
+    "llvm::PointerIntPair", "^llvm::PointerIntPair<.*>$", make_pointer_int_pair_printer
+)
+pp.add_printer(
+    "llvm::PointerUnion", "^llvm::PointerUnion<.*>$", make_pointer_union_printer
+)
+pp.add_printer("llvm::ilist_node", "^llvm::ilist_node<.*>$", IlistNodePrinter)
+pp.add_printer("llvm::iplist", "^llvm::iplist<.*>$", IlistPrinter)
+pp.add_printer("llvm::simple_ilist", "^llvm::simple_ilist<.*>$", IlistPrinter)
 gdb.printing.register_pretty_printer(gdb.current_objfile(), pp)

diff  --git a/llvm/utils/git/github-automation.py b/llvm/utils/git/github-automation.py
index 5d84a7a9d26c0..1790de1152a93 100755
--- a/llvm/utils/git/github-automation.py
+++ b/llvm/utils/git/github-automation.py
@@ -9,7 +9,7 @@
 # ==-------------------------------------------------------------------------==#
 
 import argparse
-from git import Repo # type: ignore
+from git import Repo  # type: ignore
 import github
 import os
 import re
@@ -18,8 +18,7 @@
 import time
 from typing import List, Optional
 
-beginner_comment = \
-"""
+beginner_comment = """
 Hi!
 
 This issue may be a good introductory issue for people new to working on LLVM. If you would like to work on this issue, your first steps are:
@@ -39,54 +38,58 @@
 If you have any further questions about this issue, don't hesitate to ask via a comment on this Github issue.
 """
 
-class IssueSubscriber:
 
+class IssueSubscriber:
     @property
     def team_name(self) -> str:
         return self._team_name
 
-    def __init__(self, token:str, repo:str, issue_number:int, label_name:str):
+    def __init__(self, token: str, repo: str, issue_number: int, label_name: str):
         self.repo = github.Github(token).get_repo(repo)
         self.org = github.Github(token).get_organization(self.repo.organization.login)
         self.issue = self.repo.get_issue(issue_number)
-        self._team_name = 'issue-subscribers-{}'.format(label_name).lower()
+        self._team_name = "issue-subscribers-{}".format(label_name).lower()
 
     def run(self) -> bool:
         for team in self.org.get_teams():
             if self.team_name != team.name.lower():
                 continue
 
-            comment = ''
-            if team.slug == 'issue-subscribers-good-first-issue':
-                comment = '{}\n'.format(beginner_comment)
+            comment = ""
+            if team.slug == "issue-subscribers-good-first-issue":
+                comment = "{}\n".format(beginner_comment)
 
-            comment += '@llvm/{}'.format(team.slug)
+            comment += "@llvm/{}".format(team.slug)
             self.issue.create_comment(comment)
             return True
         return False
 
-def setup_llvmbot_git(git_dir = '.'):
+
+def setup_llvmbot_git(git_dir="."):
     """
     Configure the git repo in `git_dir` with the llvmbot account so
     commits are attributed to llvmbot.
     """
     repo = Repo(git_dir)
     with repo.config_writer() as config:
-        config.set_value('user', 'name', 'llvmbot')
-        config.set_value('user', 'email', 'llvmbot at llvm.org')
+        config.set_value("user", "name", "llvmbot")
+        config.set_value("user", "email", "llvmbot at llvm.org")
+
 
-def phab_api_call(phab_token:str, url:str, args:dict) -> dict:
+def phab_api_call(phab_token: str, url: str, args: dict) -> dict:
     """
     Make an API call to the Phabricator web service and return a dictionary
     containing the json response.
     """
-    data = { "api.token" : phab_token }
+    data = {"api.token": phab_token}
     data.update(args)
-    response = requests.post(url, data = data)
+    response = requests.post(url, data=data)
     return response.json()
 
 
-def phab_login_to_github_login(phab_token:str, repo:github.Repository.Repository, phab_login:str) -> Optional[str]:
+def phab_login_to_github_login(
+    phab_token: str, repo: github.Repository.Repository, phab_login: str
+) -> Optional[str]:
     """
     Tries to translate a Phabricator login to a github login by
     finding a commit made in Phabricator's Differential.
@@ -99,19 +102,21 @@ def phab_login_to_github_login(phab_token:str, repo:github.Repository.Repository
     """
 
     args = {
-        "constraints[authors][0]" : phab_login,
+        "constraints[authors][0]": phab_login,
         # PHID for "LLVM Github Monorepo" repository
-        "constraints[repositories][0]" : "PHID-REPO-f4scjekhnkmh7qilxlcy",
-        "limit" : 1
+        "constraints[repositories][0]": "PHID-REPO-f4scjekhnkmh7qilxlcy",
+        "limit": 1,
     }
     # API documentation: https://reviews.llvm.org/conduit/method/
diff usion.commit.search/
-    r = phab_api_call(phab_token, "https://reviews.llvm.org/api/
diff usion.commit.search", args)
-    data = r['result']['data']
+    r = phab_api_call(
+        phab_token, "https://reviews.llvm.org/api/
diff usion.commit.search", args
+    )
+    data = r["result"]["data"]
     if len(data) == 0:
         # Can't find any commits associated with this user
         return None
 
-    commit_sha = data[0]['fields']['identifier']
+    commit_sha = data[0]["fields"]["identifier"]
     committer = repo.get_commit(commit_sha).committer
     if not committer:
         # This committer had an email address GitHub could not recognize, so
@@ -120,36 +125,39 @@ def phab_login_to_github_login(phab_token:str, repo:github.Repository.Repository
         return None
     return committer.login
 
-def phab_get_commit_approvers(phab_token:str, commit:github.Commit.Commit) -> list:
-    args = { "corpus" : commit.commit.message }
+
+def phab_get_commit_approvers(phab_token: str, commit: github.Commit.Commit) -> list:
+    args = {"corpus": commit.commit.message}
     # API documentation: https://reviews.llvm.org/conduit/method/
diff erential.parsecommitmessage/
-    r = phab_api_call(phab_token, "https://reviews.llvm.org/api/
diff erential.parsecommitmessage", args)
-    review_id = r['result']['revisionIDFieldInfo']['value']
+    r = phab_api_call(
+        phab_token, "https://reviews.llvm.org/api/
diff erential.parsecommitmessage", args
+    )
+    review_id = r["result"]["revisionIDFieldInfo"]["value"]
     if not review_id:
         # No Phabricator revision for this commit
         return []
 
-    args = {
-        'constraints[ids][0]' : review_id,
-        'attachments[reviewers]' : True
-    }
+    args = {"constraints[ids][0]": review_id, "attachments[reviewers]": True}
     # API documentation: https://reviews.llvm.org/conduit/method/
diff erential.revision.search/
-    r = phab_api_call(phab_token, "https://reviews.llvm.org/api/
diff erential.revision.search", args)
-    reviewers = r['result']['data'][0]['attachments']['reviewers']['reviewers']
+    r = phab_api_call(
+        phab_token, "https://reviews.llvm.org/api/
diff erential.revision.search", args
+    )
+    reviewers = r["result"]["data"][0]["attachments"]["reviewers"]["reviewers"]
     accepted = []
     for reviewer in reviewers:
-        if reviewer['status'] != 'accepted':
+        if reviewer["status"] != "accepted":
             continue
-        phid = reviewer['reviewerPHID']
-        args = { 'constraints[phids][0]' : phid }
+        phid = reviewer["reviewerPHID"]
+        args = {"constraints[phids][0]": phid}
         # API documentation: https://reviews.llvm.org/conduit/method/user.search/
         r = phab_api_call(phab_token, "https://reviews.llvm.org/api/user.search", args)
-        accepted.append(r['result']['data'][0]['fields']['username'])
+        accepted.append(r["result"]["data"][0]["fields"]["username"])
     return accepted
 
+
 class ReleaseWorkflow:
 
-    CHERRY_PICK_FAILED_LABEL = 'release:cherry-pick-failed'
+    CHERRY_PICK_FAILED_LABEL = "release:cherry-pick-failed"
 
     """
     This class implements the sub-commands for the release-workflow command.
@@ -161,9 +169,16 @@ class ReleaseWorkflow:
     based on the text in stdin.
     """
 
-    def __init__(self, token:str, repo:str, issue_number:int,
-                       branch_repo_name:str, branch_repo_token:str,
-                       llvm_project_dir:str, phab_token:str) -> None:
+    def __init__(
+        self,
+        token: str,
+        repo: str,
+        issue_number: int,
+        branch_repo_name: str,
+        branch_repo_token: str,
+        llvm_project_dir: str,
+        phab_token: str,
+    ) -> None:
         self._token = token
         self._repo_name = repo
         self._issue_number = issue_number
@@ -213,11 +228,13 @@ def issue(self) -> github.Issue.Issue:
 
     @property
     def push_url(self) -> str:
-        return 'https://{}@github.com/{}'.format(self.branch_repo_token, self.branch_repo_name)
+        return "https://{}@github.com/{}".format(
+            self.branch_repo_token, self.branch_repo_name
+        )
 
     @property
     def branch_name(self) -> str:
-        return 'issue{}'.format(self.issue_number)
+        return "issue{}".format(self.issue_number)
 
     @property
     def release_branch_for_issue(self) -> Optional[str]:
@@ -225,7 +242,7 @@ def release_branch_for_issue(self) -> Optional[str]:
         milestone = issue.milestone
         if milestone is None:
             return None
-        m = re.search('branch: (.+)',milestone.description)
+        m = re.search("branch: (.+)", milestone.description)
         if m:
             return m.group(1)
         return None
@@ -234,10 +251,14 @@ def print_release_branch(self) -> None:
         print(self.release_branch_for_issue)
 
     def issue_notify_branch(self) -> None:
-        self.issue.create_comment('/branch {}/{}'.format(self.branch_repo_name, self.branch_name))
+        self.issue.create_comment(
+            "/branch {}/{}".format(self.branch_repo_name, self.branch_name)
+        )
 
-    def issue_notify_pull_request(self, pull:github.PullRequest.PullRequest) -> None:
-        self.issue.create_comment('/pull-request {}#{}'.format(self.branch_repo_name, pull.number))
+    def issue_notify_pull_request(self, pull: github.PullRequest.PullRequest) -> None:
+        self.issue.create_comment(
+            "/pull-request {}#{}".format(self.branch_repo_name, pull.number)
+        )
 
     def make_ignore_comment(self, comment: str) -> str:
         """
@@ -246,20 +267,28 @@ def make_ignore_comment(self, comment: str) -> str:
 
         :param str comment: The comment to ignore
         """
-        return "<!--IGNORE-->\n"+comment
+        return "<!--IGNORE-->\n" + comment
 
-    def issue_notify_no_milestone(self, comment:List[str]) -> None:
-        message = "{}\n\nError: Command failed due to missing milestone.".format(''.join(['>' + line for line in comment]))
+    def issue_notify_no_milestone(self, comment: List[str]) -> None:
+        message = "{}\n\nError: Command failed due to missing milestone.".format(
+            "".join([">" + line for line in comment])
+        )
         self.issue.create_comment(self.make_ignore_comment(message))
 
     @property
     def action_url(self) -> str:
-        if os.getenv('CI'):
-            return 'https://github.com/{}/actions/runs/{}'.format(os.getenv('GITHUB_REPOSITORY'), os.getenv('GITHUB_RUN_ID'))
+        if os.getenv("CI"):
+            return "https://github.com/{}/actions/runs/{}".format(
+                os.getenv("GITHUB_REPOSITORY"), os.getenv("GITHUB_RUN_ID")
+            )
         return ""
 
-    def issue_notify_cherry_pick_failure(self, commit:str) -> github.IssueComment.IssueComment:
-        message = self.make_ignore_comment("Failed to cherry-pick: {}\n\n".format(commit))
+    def issue_notify_cherry_pick_failure(
+        self, commit: str
+    ) -> github.IssueComment.IssueComment:
+        message = self.make_ignore_comment(
+            "Failed to cherry-pick: {}\n\n".format(commit)
+        )
         action_url = self.action_url
         if action_url:
             message += action_url + "\n\n"
@@ -269,7 +298,9 @@ def issue_notify_cherry_pick_failure(self, commit:str) -> github.IssueComment.Is
         issue.add_to_labels(self.CHERRY_PICK_FAILED_LABEL)
         return comment
 
-    def issue_notify_pull_request_failure(self, branch:str) -> github.IssueComment.IssueComment:
+    def issue_notify_pull_request_failure(
+        self, branch: str
+    ) -> github.IssueComment.IssueComment:
         message = "Failed to create pull request for {} ".format(branch)
         message += self.action_url
         return self.issue.create_comment(message)
@@ -278,7 +309,7 @@ def issue_remove_cherry_pick_failed_label(self):
         if self.CHERRY_PICK_FAILED_LABEL in [l.name for l in self.issue.labels]:
             self.issue.remove_from_labels(self.CHERRY_PICK_FAILED_LABEL)
 
-    def pr_request_review(self, pr:github.PullRequest.PullRequest):
+    def pr_request_review(self, pr: github.PullRequest.PullRequest):
         """
         This function will try to find the best reviewers for `commits` and
         then add a comment requesting review of the backport and assign the
@@ -297,11 +328,12 @@ def pr_request_review(self, pr:github.PullRequest.PullRequest):
                 reviewers.append(login)
         if len(reviewers):
             message = "{} What do you think about merging this PR to the release branch?".format(
-                    " ".join(["@" + r for r in reviewers]))
+                " ".join(["@" + r for r in reviewers])
+            )
             pr.create_issue_comment(message)
             pr.add_to_assignees(*reviewers)
 
-    def create_branch(self, commits:List[str]) -> bool:
+    def create_branch(self, commits: List[str]) -> bool:
         """
         This function attempts to backport `commits` into the branch associated
         with `self.issue_number`.
@@ -312,31 +344,33 @@ def create_branch(self, commits:List[str]) -> bool:
         :param list commits: List of commits to cherry-pick.
 
         """
-        print('cherry-picking', commits)
+        print("cherry-picking", commits)
         branch_name = self.branch_name
         local_repo = Repo(self.llvm_project_dir)
         local_repo.git.checkout(self.release_branch_for_issue)
 
         for c in commits:
             try:
-                local_repo.git.cherry_pick('-x', c)
+                local_repo.git.cherry_pick("-x", c)
             except Exception as e:
                 self.issue_notify_cherry_pick_failure(c)
                 raise e
 
         push_url = self.push_url
-        print('Pushing to {} {}'.format(push_url, branch_name))
-        local_repo.git.push(push_url, 'HEAD:{}'.format(branch_name), force=True)
+        print("Pushing to {} {}".format(push_url, branch_name))
+        local_repo.git.push(push_url, "HEAD:{}".format(branch_name), force=True)
 
         self.issue_notify_branch()
         self.issue_remove_cherry_pick_failed_label()
         return True
 
-    def check_if_pull_request_exists(self, repo:github.Repository.Repository, head:str) -> bool:
+    def check_if_pull_request_exists(
+        self, repo: github.Repository.Repository, head: str
+    ) -> bool:
         pulls = repo.get_pulls(head=head)
         return pulls.totalCount != 0
 
-    def create_pull_request(self, owner:str, repo_name:str, branch:str) -> bool:
+    def create_pull_request(self, owner: str, repo_name: str, branch: str) -> bool:
         """
         reate a pull request in `self.branch_repo_name`.  The base branch of the
         pull request will be chosen based on the the milestone attached to
@@ -347,7 +381,7 @@ def create_pull_request(self, owner:str, repo_name:str, branch:str) -> bool:
         https://docs.github.com/en/get-started/quickstart/github-glossary#compare-branch
         """
         repo = github.Github(self.token).get_repo(self.branch_repo_name)
-        issue_ref = '{}#{}'.format(self.repo_name, self.issue_number)
+        issue_ref = "{}#{}".format(self.repo_name, self.issue_number)
         pull = None
         release_branch_for_issue = self.release_branch_for_issue
         if release_branch_for_issue is None:
@@ -357,13 +391,17 @@ def create_pull_request(self, owner:str, repo_name:str, branch:str) -> bool:
             # If the target repo is not a fork of llvm-project, we need to copy
             # the branch into the target repo.  GitHub only supports cross-repo pull
             # requests on forked repos.
-            head_branch = f'{owner}-{branch}'
+            head_branch = f"{owner}-{branch}"
             local_repo = Repo(self.llvm_project_dir)
             push_done = False
-            for _ in range(0,5):
+            for _ in range(0, 5):
                 try:
-                    local_repo.git.fetch(f'https://github.com/{owner}/{repo_name}', f'{branch}:{branch}')
-                    local_repo.git.push(self.push_url, f'{branch}:{head_branch}', force=True)
+                    local_repo.git.fetch(
+                        f"https://github.com/{owner}/{repo_name}", f"{branch}:{branch}"
+                    )
+                    local_repo.git.push(
+                        self.push_url, f"{branch}:{head_branch}", force=True
+                    )
                     push_done = True
                     break
                 except Exception as e:
@@ -379,11 +417,13 @@ def create_pull_request(self, owner:str, repo_name:str, branch:str) -> bool:
             print("PR already exists...")
             return True
         try:
-            pull = repo.create_pull(title=f"PR for {issue_ref}",
-                                    body='resolves {}'.format(issue_ref),
-                                    base=release_branch_for_issue,
-                                    head=head,
-                                    maintainer_can_modify=False)
+            pull = repo.create_pull(
+                title=f"PR for {issue_ref}",
+                body="resolves {}".format(issue_ref),
+                base=release_branch_for_issue,
+                head=head,
+                maintainer_can_modify=False,
+            )
 
             try:
                 if self.phab_token:
@@ -404,7 +444,6 @@ def create_pull_request(self, owner:str, repo_name:str, branch:str) -> bool:
         # TODO(tstellar): Do you really want to always return True?
         return True
 
-
     def execute_command(self) -> bool:
         """
         This function reads lines from STDIN and executes the first command
@@ -420,11 +459,11 @@ def execute_command(self) -> bool:
             command = m.group(1)
             args = m.group(2)
 
-            if command == 'cherry-pick':
+            if command == "cherry-pick":
                 return self.create_branch(args.split())
 
-            if command == 'branch':
-                m = re.match('([^/]+)/([^/]+)/(.+)', args)
+            if command == "branch":
+                m = re.match("([^/]+)/([^/]+)/(.+)", args)
                 if m:
                     owner = m.group(1)
                     repo = m.group(2)
@@ -435,45 +474,85 @@ def execute_command(self) -> bool:
         print(sys.stdin.readlines())
         return False
 
+
 parser = argparse.ArgumentParser()
-parser.add_argument('--token', type=str, required=True, help='GitHub authentiation token')
-parser.add_argument('--repo', type=str, default=os.getenv('GITHUB_REPOSITORY', 'llvm/llvm-project'),
-                    help='The GitHub repository that we are working with in the form of <owner>/<repo> (e.g. llvm/llvm-project)')
-subparsers = parser.add_subparsers(dest='command')
-
-issue_subscriber_parser = subparsers.add_parser('issue-subscriber')
-issue_subscriber_parser.add_argument('--label-name', type=str, required=True)
-issue_subscriber_parser.add_argument('--issue-number', type=int, required=True)
-
-release_workflow_parser = subparsers.add_parser('release-workflow')
-release_workflow_parser.add_argument('--llvm-project-dir', type=str, default='.', help='directory containing the llvm-project checout')
-release_workflow_parser.add_argument('--issue-number', type=int, required=True, help='The issue number to update')
-release_workflow_parser.add_argument('--phab-token', type=str, help='Phabricator conduit API token. See https://reviews.llvm.org/settings/user/<USER>/page/apitokens/')
-release_workflow_parser.add_argument('--branch-repo-token', type=str,
-                                     help='GitHub authentication token to use for the repository where new branches will be pushed. Defaults to TOKEN.')
-release_workflow_parser.add_argument('--branch-repo', type=str, default='llvm/llvm-project-release-prs',
-                                     help='The name of the repo where new branches will be pushed (e.g. llvm/llvm-project)')
-release_workflow_parser.add_argument('sub_command', type=str, choices=['print-release-branch', 'auto'],
-                                     help='Print to stdout the name of the release branch ISSUE_NUMBER should be backported to')
-
-llvmbot_git_config_parser = subparsers.add_parser('setup-llvmbot-git', help='Set the default user and email for the git repo in LLVM_PROJECT_DIR to llvmbot')
+parser.add_argument(
+    "--token", type=str, required=True, help="GitHub authentiation token"
+)
+parser.add_argument(
+    "--repo",
+    type=str,
+    default=os.getenv("GITHUB_REPOSITORY", "llvm/llvm-project"),
+    help="The GitHub repository that we are working with in the form of <owner>/<repo> (e.g. llvm/llvm-project)",
+)
+subparsers = parser.add_subparsers(dest="command")
+
+issue_subscriber_parser = subparsers.add_parser("issue-subscriber")
+issue_subscriber_parser.add_argument("--label-name", type=str, required=True)
+issue_subscriber_parser.add_argument("--issue-number", type=int, required=True)
+
+release_workflow_parser = subparsers.add_parser("release-workflow")
+release_workflow_parser.add_argument(
+    "--llvm-project-dir",
+    type=str,
+    default=".",
+    help="directory containing the llvm-project checout",
+)
+release_workflow_parser.add_argument(
+    "--issue-number", type=int, required=True, help="The issue number to update"
+)
+release_workflow_parser.add_argument(
+    "--phab-token",
+    type=str,
+    help="Phabricator conduit API token. See https://reviews.llvm.org/settings/user/<USER>/page/apitokens/",
+)
+release_workflow_parser.add_argument(
+    "--branch-repo-token",
+    type=str,
+    help="GitHub authentication token to use for the repository where new branches will be pushed. Defaults to TOKEN.",
+)
+release_workflow_parser.add_argument(
+    "--branch-repo",
+    type=str,
+    default="llvm/llvm-project-release-prs",
+    help="The name of the repo where new branches will be pushed (e.g. llvm/llvm-project)",
+)
+release_workflow_parser.add_argument(
+    "sub_command",
+    type=str,
+    choices=["print-release-branch", "auto"],
+    help="Print to stdout the name of the release branch ISSUE_NUMBER should be backported to",
+)
+
+llvmbot_git_config_parser = subparsers.add_parser(
+    "setup-llvmbot-git",
+    help="Set the default user and email for the git repo in LLVM_PROJECT_DIR to llvmbot",
+)
 
 args = parser.parse_args()
 
-if args.command == 'issue-subscriber':
-    issue_subscriber = IssueSubscriber(args.token, args.repo, args.issue_number, args.label_name)
+if args.command == "issue-subscriber":
+    issue_subscriber = IssueSubscriber(
+        args.token, args.repo, args.issue_number, args.label_name
+    )
     issue_subscriber.run()
-elif args.command == 'release-workflow':
-    release_workflow = ReleaseWorkflow(args.token, args.repo, args.issue_number,
-                                       args.branch_repo, args.branch_repo_token,
-                                       args.llvm_project_dir, args.phab_token)
+elif args.command == "release-workflow":
+    release_workflow = ReleaseWorkflow(
+        args.token,
+        args.repo,
+        args.issue_number,
+        args.branch_repo,
+        args.branch_repo_token,
+        args.llvm_project_dir,
+        args.phab_token,
+    )
     if not release_workflow.release_branch_for_issue:
         release_workflow.issue_notify_no_milestone(sys.stdin.readlines())
         sys.exit(1)
-    if args.sub_command == 'print-release-branch':
+    if args.sub_command == "print-release-branch":
         release_workflow.print_release_branch()
     else:
         if not release_workflow.execute_command():
             sys.exit(1)
-elif args.command == 'setup-llvmbot-git':
+elif args.command == "setup-llvmbot-git":
     setup_llvmbot_git()

diff  --git a/llvm/utils/git/pre-push.py b/llvm/utils/git/pre-push.py
index e50a913709634..d7ae3767d2923 100755
--- a/llvm/utils/git/pre-push.py
+++ b/llvm/utils/git/pre-push.py
@@ -37,7 +37,7 @@
 VERBOSE = False
 QUIET = False
 dev_null_fd = None
-z40 = '0000000000000000000000000000000000000000'
+z40 = "0000000000000000000000000000000000000000"
 
 
 def eprint(*args, **kwargs):
@@ -63,29 +63,37 @@ def die(msg):
 
 def ask_confirm(prompt):
     while True:
-        query = input('%s (y/N): ' % (prompt))
-        if query.lower() not in ['y', 'n', '']:
-           print('Expect y or n!')
-           continue
-        return query.lower() == 'y'
+        query = input("%s (y/N): " % (prompt))
+        if query.lower() not in ["y", "n", ""]:
+            print("Expect y or n!")
+            continue
+        return query.lower() == "y"
 
 
 def get_dev_null():
     """Lazily create a /dev/null fd for use in shell()"""
     global dev_null_fd
     if dev_null_fd is None:
-        dev_null_fd = open(os.devnull, 'w')
+        dev_null_fd = open(os.devnull, "w")
     return dev_null_fd
 
 
-def shell(cmd, strip=True, cwd=None, stdin=None, die_on_failure=True,
-          ignore_errors=False, text=True, print_raw_stderr=False):
+def shell(
+    cmd,
+    strip=True,
+    cwd=None,
+    stdin=None,
+    die_on_failure=True,
+    ignore_errors=False,
+    text=True,
+    print_raw_stderr=False,
+):
     # Escape args when logging for easy repro.
     quoted_cmd = [quote(arg) for arg in cmd]
-    cwd_msg = ''
+    cwd_msg = ""
     if cwd:
-      cwd_msg = ' in %s' % cwd
-    log_verbose('Running%s: %s' % (cwd_msg, ' '.join(quoted_cmd)))
+        cwd_msg = " in %s" % cwd
+    log_verbose("Running%s: %s" % (cwd_msg, " ".join(quoted_cmd)))
 
     err_pipe = subprocess.PIPE
     if ignore_errors:
@@ -93,29 +101,34 @@ def shell(cmd, strip=True, cwd=None, stdin=None, die_on_failure=True,
         err_pipe = get_dev_null()
 
     start = time.time()
-    p = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=err_pipe,
-                         stdin=subprocess.PIPE,
-                         universal_newlines=text)
+    p = subprocess.Popen(
+        cmd,
+        cwd=cwd,
+        stdout=subprocess.PIPE,
+        stderr=err_pipe,
+        stdin=subprocess.PIPE,
+        universal_newlines=text,
+    )
     stdout, stderr = p.communicate(input=stdin)
     elapsed = time.time() - start
 
-    log_verbose('Command took %0.1fs' % elapsed)
+    log_verbose("Command took %0.1fs" % elapsed)
 
     if p.returncode == 0 or ignore_errors:
         if stderr and not ignore_errors:
             if not print_raw_stderr:
-                eprint('`%s` printed to stderr:' % ' '.join(quoted_cmd))
+                eprint("`%s` printed to stderr:" % " ".join(quoted_cmd))
             eprint(stderr.rstrip())
         if strip:
             if text:
-                stdout = stdout.rstrip('\r\n')
+                stdout = stdout.rstrip("\r\n")
             else:
-                stdout = stdout.rstrip(b'\r\n')
+                stdout = stdout.rstrip(b"\r\n")
         if VERBOSE:
             for l in stdout.splitlines():
-                log_verbose('STDOUT: %s' % l)
+                log_verbose("STDOUT: %s" % l)
         return stdout
-    err_msg = '`%s` returned %s' % (' '.join(quoted_cmd), p.returncode)
+    err_msg = "`%s` returned %s" % (" ".join(quoted_cmd), p.returncode)
     eprint(err_msg)
     if stderr:
         eprint(stderr.rstrip())
@@ -125,40 +138,47 @@ def shell(cmd, strip=True, cwd=None, stdin=None, die_on_failure=True,
 
 
 def git(*cmd, **kwargs):
-    return shell(['git'] + list(cmd), **kwargs)
+    return shell(["git"] + list(cmd), **kwargs)
 
 
 def get_revs_to_push(range):
-    commits = git('rev-list', range).splitlines()
+    commits = git("rev-list", range).splitlines()
     # Reverse the order so we print the oldest commit first
     commits.reverse()
     return commits
 
 
 def handle_push(args, local_ref, local_sha, remote_ref, remote_sha):
-    '''Check a single push request (which can include multiple revisions)'''
-    log_verbose('Handle push, reproduce with '
-                '`echo %s %s %s %s | pre-push.py %s %s'
-                 % (local_ref, local_sha, remote_ref, remote_sha, args.remote,
-                    args.url))
+    """Check a single push request (which can include multiple revisions)"""
+    log_verbose(
+        "Handle push, reproduce with "
+        "`echo %s %s %s %s | pre-push.py %s %s"
+        % (local_ref, local_sha, remote_ref, remote_sha, args.remote, args.url)
+    )
     # Handle request to delete
     if local_sha == z40:
-        if not ask_confirm('Are you sure you want to delete "%s" on remote "%s"?' % (remote_ref, args.url)):
+        if not ask_confirm(
+            'Are you sure you want to delete "%s" on remote "%s"?'
+            % (remote_ref, args.url)
+        ):
             die("Aborting")
         return
 
     # Push a new branch
     if remote_sha == z40:
-      if not ask_confirm('Are you sure you want to push a new branch/tag "%s" on remote "%s"?' % (remote_ref, args.url)):
-        die("Aborting")
-      range=local_sha
-      return
+        if not ask_confirm(
+            'Are you sure you want to push a new branch/tag "%s" on remote "%s"?'
+            % (remote_ref, args.url)
+        ):
+            die("Aborting")
+        range = local_sha
+        return
     else:
-      # Update to existing branch, examine new commits
-      range='%s..%s' % (remote_sha, local_sha)
-      # Check that the remote commit exists, otherwise let git proceed
-      if "commit" not in git('cat-file','-t', remote_sha, ignore_errors=True):
-          return
+        # Update to existing branch, examine new commits
+        range = "%s..%s" % (remote_sha, local_sha)
+        # Check that the remote commit exists, otherwise let git proceed
+        if "commit" not in git("cat-file", "-t", remote_sha, ignore_errors=True):
+            return
 
     revs = get_revs_to_push(range)
     if not revs:
@@ -168,51 +188,57 @@ def handle_push(args, local_ref, local_sha, remote_ref, remote_sha):
     # Print the revision about to be pushed commits
     print('Pushing to "%s" on remote "%s"' % (remote_ref, args.url))
     for sha in revs:
-      print(' - ' + git('show', '--oneline', '--quiet', sha))
+        print(" - " + git("show", "--oneline", "--quiet", sha))
 
     if len(revs) > 1:
-      if not ask_confirm('Are you sure you want to push %d commits?' % len(revs)):
-          die('Aborting')
-
+        if not ask_confirm("Are you sure you want to push %d commits?" % len(revs)):
+            die("Aborting")
 
     for sha in revs:
-      msg = git('log', '--format=%B', '-n1', sha)
-      if 'Differential Revision' not in msg:
-          continue
-      for line in msg.splitlines():
-          for tag in ['Summary', 'Reviewers', 'Subscribers', 'Tags']:
-            if line.startswith(tag + ':'):
-              eprint('Please remove arcanist tags from the commit message (found "%s" tag in %s)' % (tag, sha[:12]))
-              if len(revs) == 1:
-                  eprint('Try running: llvm/utils/git/arcfilter.sh')
-              die('Aborting (force push by adding "--no-verify")')
+        msg = git("log", "--format=%B", "-n1", sha)
+        if "Differential Revision" not in msg:
+            continue
+        for line in msg.splitlines():
+            for tag in ["Summary", "Reviewers", "Subscribers", "Tags"]:
+                if line.startswith(tag + ":"):
+                    eprint(
+                        'Please remove arcanist tags from the commit message (found "%s" tag in %s)'
+                        % (tag, sha[:12])
+                    )
+                    if len(revs) == 1:
+                        eprint("Try running: llvm/utils/git/arcfilter.sh")
+                    die('Aborting (force push by adding "--no-verify")')
 
     return
 
 
-if __name__ == '__main__':
-    if not shutil.which('git'):
-        die('error: cannot find git command')
+if __name__ == "__main__":
+    if not shutil.which("git"):
+        die("error: cannot find git command")
 
     argv = sys.argv[1:]
     p = argparse.ArgumentParser(
-        prog='pre-push', formatter_class=argparse.RawDescriptionHelpFormatter,
-        description=__doc__)
+        prog="pre-push",
+        formatter_class=argparse.RawDescriptionHelpFormatter,
+        description=__doc__,
+    )
     verbosity_group = p.add_mutually_exclusive_group()
-    verbosity_group.add_argument('-q', '--quiet', action='store_true',
-                                 help='print less information')
-    verbosity_group.add_argument('-v', '--verbose', action='store_true',
-                                 help='print more information')
+    verbosity_group.add_argument(
+        "-q", "--quiet", action="store_true", help="print less information"
+    )
+    verbosity_group.add_argument(
+        "-v", "--verbose", action="store_true", help="print more information"
+    )
 
-    p.add_argument('remote', type=str, help='Name of the remote')
-    p.add_argument('url', type=str, help='URL for the remote')
+    p.add_argument("remote", type=str, help="Name of the remote")
+    p.add_argument("url", type=str, help="URL for the remote")
 
     args = p.parse_args(argv)
     VERBOSE = args.verbose
     QUIET = args.quiet
 
     lines = sys.stdin.readlines()
-    sys.stdin = open('/dev/tty', 'r')
+    sys.stdin = open("/dev/tty", "r")
     for line in lines:
-      local_ref, local_sha, remote_ref, remote_sha = line.split()
-      handle_push(args, local_ref, local_sha, remote_ref, remote_sha)
+        local_ref, local_sha, remote_ref, remote_sha = line.split()
+        handle_push(args, local_ref, local_sha, remote_ref, remote_sha)

diff  --git a/llvm/utils/gn/build/run_built_binary.py b/llvm/utils/gn/build/run_built_binary.py
index faac5654873b9..abe606f022b58 100755
--- a/llvm/utils/gn/build/run_built_binary.py
+++ b/llvm/utils/gn/build/run_built_binary.py
@@ -5,4 +5,4 @@
 import sys
 
 # Prefix with ./ to run built binary, not arbitrary stuff from PATH.
-sys.exit(subprocess.call(['./' + sys.argv[1]] + sys.argv[2:]))
+sys.exit(subprocess.call(["./" + sys.argv[1]] + sys.argv[2:]))

diff  --git a/llvm/utils/gn/build/symbol_exports.py b/llvm/utils/gn/build/symbol_exports.py
index 5126e7071e70d..379a999d4c778 100755
--- a/llvm/utils/gn/build/symbol_exports.py
+++ b/llvm/utils/gn/build/symbol_exports.py
@@ -16,29 +16,30 @@
 
 def main():
     parser = argparse.ArgumentParser(description=__doc__)
-    parser.add_argument('--format', required=True,
-                        choices=('linux','mac','win'))
-    parser.add_argument('source')
-    parser.add_argument('output')
+    parser.add_argument("--format", required=True, choices=("linux", "mac", "win"))
+    parser.add_argument("source")
+    parser.add_argument("output")
     args = parser.parse_args()
 
     symbols = open(args.source).readlines()
 
-    if args.format == 'linux':
-        output_lines = (['LLVM_0 {\n',
-                         '  global:\n',] +
-                        ['    %s;\n' % s.rstrip() for s in symbols] +
-                        ['  local:\n',
-                         '    *;\n',
-                         '};\n'])
-    elif args.format == 'mac':
-        output_lines = ['_' + s for s in symbols]
+    if args.format == "linux":
+        output_lines = (
+            [
+                "LLVM_0 {\n",
+                "  global:\n",
+            ]
+            + ["    %s;\n" % s.rstrip() for s in symbols]
+            + ["  local:\n", "    *;\n", "};\n"]
+        )
+    elif args.format == "mac":
+        output_lines = ["_" + s for s in symbols]
     else:
-        assert args.format == 'win'
-        output_lines = ['EXPORTS\n'] + ['  ' + s for s in symbols]
+        assert args.format == "win"
+        output_lines = ["EXPORTS\n"] + ["  " + s for s in symbols]
 
-    open(args.output, 'w').writelines(output_lines)
+    open(args.output, "w").writelines(output_lines)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/gn/build/symlink_or_copy.py b/llvm/utils/gn/build/symlink_or_copy.py
index d5fbf32c1a6de..cbc559a6778f9 100755
--- a/llvm/utils/gn/build/symlink_or_copy.py
+++ b/llvm/utils/gn/build/symlink_or_copy.py
@@ -13,15 +13,16 @@
 
 def main():
     parser = argparse.ArgumentParser(description=__doc__)
-    parser.add_argument('--stamp', required=True,
-                        help='name of a file whose mtime is updated on run')
-    parser.add_argument('source')
-    parser.add_argument('output')
+    parser.add_argument(
+        "--stamp", required=True, help="name of a file whose mtime is updated on run"
+    )
+    parser.add_argument("source")
+    parser.add_argument("output")
     args = parser.parse_args()
 
     # FIXME: This should not check the host platform but the target platform
     # (which needs to be passed in as an arg), for cross builds.
-    if sys.platform != 'win32':
+    if sys.platform != "win32":
         try:
             os.makedirs(os.path.dirname(args.output))
         except OSError as e:
@@ -37,12 +38,13 @@ def main():
                 raise
     else:
         import shutil
+
         output = args.output + ".exe"
         source = args.source + ".exe"
         shutil.copyfile(os.path.join(os.path.dirname(output), source), output)
 
-    open(args.stamp, 'w') # Update mtime on stamp file.
+    open(args.stamp, "w")  # Update mtime on stamp file.
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/gn/build/sync_source_lists_from_cmake.py b/llvm/utils/gn/build/sync_source_lists_from_cmake.py
index fe7e8ed10bcf5..6b48ca7de869f 100755
--- a/llvm/utils/gn/build/sync_source_lists_from_cmake.py
+++ b/llvm/utils/gn/build/sync_source_lists_from_cmake.py
@@ -25,77 +25,87 @@ def patch_gn_file(gn_file, add, remove):
     with open(gn_file) as f:
         gn_contents = f.read()
     if add:
-        srcs_tok = 'sources = ['
+        srcs_tok = "sources = ["
         tokloc = gn_contents.find(srcs_tok)
-        while gn_contents.startswith('sources = []', tokloc):
+        while gn_contents.startswith("sources = []", tokloc):
             tokloc = gn_contents.find(srcs_tok, tokloc + 1)
-        if tokloc == -1: raise ValueError(gn_file + ': No source list')
+        if tokloc == -1:
+            raise ValueError(gn_file + ": No source list")
         if gn_contents.find(srcs_tok, tokloc + 1) != -1:
-            raise ValueError(gn_file + ': Multiple source lists')
-        if gn_contents.find('# NOSORT', 0, tokloc) != -1:
-            raise ValueError(gn_file + ': Found # NOSORT, needs manual merge')
+            raise ValueError(gn_file + ": Multiple source lists")
+        if gn_contents.find("# NOSORT", 0, tokloc) != -1:
+            raise ValueError(gn_file + ": Found # NOSORT, needs manual merge")
         tokloc += len(srcs_tok)
         for a in add:
-            gn_contents = (gn_contents[:tokloc] + ('"%s",' % a) +
-                           gn_contents[tokloc:])
+            gn_contents = gn_contents[:tokloc] + ('"%s",' % a) + gn_contents[tokloc:]
     for r in remove:
-        gn_contents = gn_contents.replace('"%s",' % r, '')
-    with open(gn_file, 'w') as f:
+        gn_contents = gn_contents.replace('"%s",' % r, "")
+    with open(gn_file, "w") as f:
         f.write(gn_contents)
 
     # Run `gn format`.
-    gn = os.path.join(os.path.dirname(__file__), '..', 'gn.py')
-    subprocess.check_call([sys.executable, gn, 'format', '-q', gn_file])
+    gn = os.path.join(os.path.dirname(__file__), "..", "gn.py")
+    subprocess.check_call([sys.executable, gn, "format", "-q", gn_file])
 
 
 def sync_source_lists(write):
     # Use shell=True on Windows in case git is a bat file.
-    def git(args): subprocess.check_call(['git'] + args, shell=os.name == 'nt')
+    def git(args):
+        subprocess.check_call(["git"] + args, shell=os.name == "nt")
+
     def git_out(args):
-        return subprocess.check_output(['git'] + args, shell=os.name == 'nt',
-                                       universal_newlines=True)
-    gn_files = git_out(['ls-files', '*BUILD.gn']).splitlines()
+        return subprocess.check_output(
+            ["git"] + args, shell=os.name == "nt", universal_newlines=True
+        )
+
+    gn_files = git_out(["ls-files", "*BUILD.gn"]).splitlines()
 
     # Matches e.g. |   "foo.cpp",|, captures |foo| in group 1.
     gn_cpp_re = re.compile(r'^\s*"([^$"]+\.(?:cpp|c|h|S))",$', re.MULTILINE)
     # Matches e.g. |   bar_sources = [ "foo.cpp" ]|, captures |foo| in group 1.
     gn_cpp_re2 = re.compile(
-        r'^\s*(?:.*_)?sources \+?= \[ "([^$"]+\.(?:cpp|c|h|S))" ]$',
-        re.MULTILINE)
+        r'^\s*(?:.*_)?sources \+?= \[ "([^$"]+\.(?:cpp|c|h|S))" ]$', re.MULTILINE
+    )
     # Matches e.g. |   foo.cpp|, captures |foo| in group 1.
-    cmake_cpp_re = re.compile(r'^\s*([A-Za-z_0-9./-]+\.(?:cpp|c|h|S))$',
-                              re.MULTILINE)
+    cmake_cpp_re = re.compile(r"^\s*([A-Za-z_0-9./-]+\.(?:cpp|c|h|S))$", re.MULTILINE)
 
     changes_by_rev = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
 
     def find_gitrev(touched_line, in_file):
         # re.escape() escapes e.g. '-', which works in practice but has
         # undefined behavior according to the POSIX extended regex spec.
-        posix_re_escape = lambda s: re.sub(r'([.[{()\\*+?|^$])', r'\\\1', s)
-        cmd = ['log', '--format=%h', '-1', '--pickaxe-regex',
-               # `\<` / `\>` cause issues on Windows (and is a GNU extension).
-               # `\b` is a GNU extension and stopped working in Apple Git-143
-               # (Xcode 13.3).
-               # `[:space:]` is over 10x faster than `^[:alnum:]` and hopefully
-               # good enough.
-               r'-S[[:space:]]%s[[:space:]]' % posix_re_escape(touched_line),
-               in_file]
+        posix_re_escape = lambda s: re.sub(r"([.[{()\\*+?|^$])", r"\\\1", s)
+        cmd = [
+            "log",
+            "--format=%h",
+            "-1",
+            "--pickaxe-regex",
+            # `\<` / `\>` cause issues on Windows (and is a GNU extension).
+            # `\b` is a GNU extension and stopped working in Apple Git-143
+            # (Xcode 13.3).
+            # `[:space:]` is over 10x faster than `^[:alnum:]` and hopefully
+            # good enough.
+            r"-S[[:space:]]%s[[:space:]]" % posix_re_escape(touched_line),
+            in_file,
+        ]
         return git_out(cmd).rstrip()
 
     # Collect changes to gn files, grouped by revision.
     for gn_file in gn_files:
         # The CMakeLists.txt for llvm/utils/gn/secondary/foo/BUILD.gn is
         # at foo/CMakeLists.txt.
-        strip_prefix = 'llvm/utils/gn/secondary/'
+        strip_prefix = "llvm/utils/gn/secondary/"
         if not gn_file.startswith(strip_prefix):
             continue
         cmake_file = os.path.join(
-                os.path.dirname(gn_file[len(strip_prefix):]), 'CMakeLists.txt')
+            os.path.dirname(gn_file[len(strip_prefix) :]), "CMakeLists.txt"
+        )
         if not os.path.exists(cmake_file):
             continue
 
         def get_sources(source_re, text):
             return set([m.group(1) for m in source_re.finditer(text)])
+
         gn_cpp = get_sources(gn_cpp_re, open(gn_file).read())
         gn_cpp |= get_sources(gn_cpp_re2, open(gn_file).read())
         cmake_cpp = get_sources(cmake_cpp_re, open(cmake_file).read())
@@ -107,28 +117,28 @@ def by_rev(files, key):
             for f in files:
                 rev = find_gitrev(f, cmake_file)
                 changes_by_rev[rev][gn_file][key].append(f)
-        by_rev(sorted(cmake_cpp - gn_cpp), 'add')
-        by_rev(sorted(gn_cpp - cmake_cpp), 'remove')
+
+        by_rev(sorted(cmake_cpp - gn_cpp), "add")
+        by_rev(sorted(gn_cpp - cmake_cpp), "remove")
 
     # Output necessary changes grouped by revision.
     for rev in sorted(changes_by_rev):
-        print('[gn build] Port {0} -- https://reviews.llvm.org/rG{0}'
-            .format(rev))
+        print("[gn build] Port {0} -- https://reviews.llvm.org/rG{0}".format(rev))
         for gn_file, data in sorted(changes_by_rev[rev].items()):
-            add = data.get('add', [])
-            remove = data.get('remove', [])
+            add = data.get("add", [])
+            remove = data.get("remove", [])
             if write:
                 patch_gn_file(gn_file, add, remove)
-                git(['add', gn_file])
+                git(["add", gn_file])
             else:
-                print('  ' + gn_file)
+                print("  " + gn_file)
                 if add:
-                    print('   add:\n' + '\n'.join('    "%s",' % a for a in add))
+                    print("   add:\n" + "\n".join('    "%s",' % a for a in add))
                 if remove:
-                    print('   remove:\n    ' + '\n    '.join(remove))
+                    print("   remove:\n    " + "\n    ".join(remove))
                 print()
         if write:
-            git(['commit', '-m', '[gn build] Port %s' % rev])
+            git(["commit", "-m", "[gn build] Port %s" % rev])
         else:
             print()
 
@@ -137,31 +147,33 @@ def by_rev(files, key):
 
 def sync_unittests():
     # Matches e.g. |add_llvm_unittest_with_input_files|.
-    unittest_re = re.compile(r'^add_\S+_unittest', re.MULTILINE)
+    unittest_re = re.compile(r"^add_\S+_unittest", re.MULTILINE)
 
-    checked = [ 'bolt', 'clang', 'clang-tools-extra', 'lld', 'llvm' ]
+    checked = ["bolt", "clang", "clang-tools-extra", "lld", "llvm"]
     changed = False
     for c in checked:
-        for root, _, _ in os.walk(os.path.join(c, 'unittests')):
-            cmake_file = os.path.join(root, 'CMakeLists.txt')
+        for root, _, _ in os.walk(os.path.join(c, "unittests")):
+            cmake_file = os.path.join(root, "CMakeLists.txt")
             if not os.path.exists(cmake_file):
                 continue
             if not unittest_re.search(open(cmake_file).read()):
                 continue  # Skip CMake files that just add subdirectories.
-            gn_file = os.path.join('llvm/utils/gn/secondary', root, 'BUILD.gn')
+            gn_file = os.path.join("llvm/utils/gn/secondary", root, "BUILD.gn")
             if not os.path.exists(gn_file):
                 changed = True
-                print('missing GN file %s for unittest CMake file %s' %
-                      (gn_file, cmake_file))
+                print(
+                    "missing GN file %s for unittest CMake file %s"
+                    % (gn_file, cmake_file)
+                )
     return changed
 
 
 def main():
-    src = sync_source_lists(len(sys.argv) > 1 and sys.argv[1] == '--write')
+    src = sync_source_lists(len(sys.argv) > 1 and sys.argv[1] == "--write")
     tests = sync_unittests()
     if src or tests:
         sys.exit(1)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/gn/build/write_cmake_config.py b/llvm/utils/gn/build/write_cmake_config.py
index ff69079bbef76..a14df6b2cdeab 100755
--- a/llvm/utils/gn/build/write_cmake_config.py
+++ b/llvm/utils/gn/build/write_cmake_config.py
@@ -40,70 +40,72 @@
 
 def main():
     parser = argparse.ArgumentParser(
-                 epilog=__doc__,
-                 formatter_class=argparse.RawDescriptionHelpFormatter)
-    parser.add_argument('input', help='input file')
-    parser.add_argument('values', nargs='*', help='several KEY=VALUE pairs')
-    parser.add_argument('-o', '--output', required=True,
-                        help='output file')
+        epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
+    )
+    parser.add_argument("input", help="input file")
+    parser.add_argument("values", nargs="*", help="several KEY=VALUE pairs")
+    parser.add_argument("-o", "--output", required=True, help="output file")
     args = parser.parse_args()
 
     values = {}
     for value in args.values:
-        key, val = value.split('=', 1)
+        key, val = value.split("=", 1)
         if key in values:
             print('duplicate key "%s" in args' % key, file=sys.stderr)
             return 1
-        values[key] = val.replace('\\n', '\n')
+        values[key] = val.replace("\\n", "\n")
     unused_values = set(values.keys())
 
     # Matches e.g. '${FOO}' or '@FOO@' and captures FOO in group 1 or 2.
-    var_re = re.compile(r'\$\{([^}]*)\}|@([^@]*)@')
+    var_re = re.compile(r"\$\{([^}]*)\}|@([^@]*)@")
 
     with open(args.input) as f:
         in_lines = f.readlines()
     out_lines = []
     for in_line in in_lines:
+
         def repl(m):
             key = m.group(1) or m.group(2)
             unused_values.discard(key)
             return values[key]
+
         in_line = var_re.sub(repl, in_line)
-        if in_line.startswith('#cmakedefine01 '):
+        if in_line.startswith("#cmakedefine01 "):
             _, var = in_line.split()
-            if values[var] == '0':
+            if values[var] == "0":
                 print('error: "%s=0" used with #cmakedefine01 %s' % (var, var))
                 print("       '0' evaluates as truthy with #cmakedefine01")
                 print('       use "%s=" instead' % var)
                 return 1
-            in_line = '#define %s %d\n' % (var, 1 if values[var] else 0)
+            in_line = "#define %s %d\n" % (var, 1 if values[var] else 0)
             unused_values.discard(var)
-        elif in_line.startswith('#cmakedefine '):
+        elif in_line.startswith("#cmakedefine "):
             _, var = in_line.split(None, 1)
             try:
                 var, val = var.split(None, 1)
-                in_line = '#define %s %s' % (var, val)  # val ends in \n.
+                in_line = "#define %s %s" % (var, val)  # val ends in \n.
             except:
                 var = var.rstrip()
-                in_line = '#define %s\n' % var
+                in_line = "#define %s\n" % var
             if not values[var]:
-                in_line = '/* #undef %s */\n' % var
+                in_line = "/* #undef %s */\n" % var
             unused_values.discard(var)
         out_lines.append(in_line)
 
     if unused_values:
-        print('unused values args:', file=sys.stderr)
-        print('    ' + '\n    '.join(unused_values), file=sys.stderr)
+        print("unused values args:", file=sys.stderr)
+        print("    " + "\n    ".join(unused_values), file=sys.stderr)
         return 1
 
-    output = ''.join(out_lines)
+    output = "".join(out_lines)
 
     leftovers = var_re.findall(output)
     if leftovers:
         print(
-            'unprocessed values:\n',
-            '\n'.join([x[0] or x[1] for x in leftovers]),
-            file=sys.stderr)
+            "unprocessed values:\n",
+            "\n".join([x[0] or x[1] for x in leftovers]),
+            file=sys.stderr,
+        )
         return 1
 
     def read(filename):
@@ -111,10 +113,10 @@ def read(filename):
             return f.read()
 
     if not os.path.exists(args.output) or read(args.output) != output:
-        with open(args.output, 'w') as f:
+        with open(args.output, "w") as f:
             f.write(output)
         os.chmod(args.output, os.stat(args.input).st_mode & 0o777)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/gn/build/write_file.py b/llvm/utils/gn/build/write_file.py
index 96545b8ec1b4c..112164f208e54 100644
--- a/llvm/utils/gn/build/write_file.py
+++ b/llvm/utils/gn/build/write_file.py
@@ -8,14 +8,14 @@
 
 def main():
     parser = argparse.ArgumentParser(description=__doc__)
-    parser.add_argument('filepath')
-    parser.add_argument('content')
+    parser.add_argument("filepath")
+    parser.add_argument("content")
 
     args = parser.parse_args()
 
-    with open(args.filepath, 'w') as f:
+    with open(args.filepath, "w") as f:
         f.write(args.content)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/gn/build/write_library_dependencies.py b/llvm/utils/gn/build/write_library_dependencies.py
index d9fcc326df7f9..ba1a8a0157314 100644
--- a/llvm/utils/gn/build/write_library_dependencies.py
+++ b/llvm/utils/gn/build/write_library_dependencies.py
@@ -97,12 +97,12 @@
 
 def main():
     parser = argparse.ArgumentParser()
-    parser.add_argument('-o', '--output', required=True, help='output file')
+    parser.add_argument("-o", "--output", required=True, help="output file")
     args = parser.parse_args()
 
-    with open(args.output, 'w') as f:
+    with open(args.output, "w") as f:
         f.write(OUTPUT)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/gn/build/write_vcsrevision.py b/llvm/utils/gn/build/write_vcsrevision.py
index 6398b94df76bb..afd6aae60f6d7 100755
--- a/llvm/utils/gn/build/write_vcsrevision.py
+++ b/llvm/utils/gn/build/write_vcsrevision.py
@@ -24,59 +24,87 @@ def which(program):
 
 def main():
     parser = argparse.ArgumentParser(description=__doc__)
-    parser.add_argument('-d', '--depfile',
-                        help='if set, writes a depfile that causes this script '
-                             'to re-run each time the current revision changes')
-    parser.add_argument('--write-git-rev', action='store_true',
-                        help='if set, writes git revision, else writes #undef')
-    parser.add_argument('--name', action='append',
-                        help='if set, writes a depfile that causes this script '
-                             'to re-run each time the current revision changes')
-    parser.add_argument('vcs_header', help='path to the output file to write')
+    parser.add_argument(
+        "-d",
+        "--depfile",
+        help="if set, writes a depfile that causes this script "
+        "to re-run each time the current revision changes",
+    )
+    parser.add_argument(
+        "--write-git-rev",
+        action="store_true",
+        help="if set, writes git revision, else writes #undef",
+    )
+    parser.add_argument(
+        "--name",
+        action="append",
+        help="if set, writes a depfile that causes this script "
+        "to re-run each time the current revision changes",
+    )
+    parser.add_argument("vcs_header", help="path to the output file to write")
     args = parser.parse_args()
 
-    vcsrevision_contents = ''
+    vcsrevision_contents = ""
     if args.write_git_rev:
-        git, use_shell = which('git'), False
-        if not git: git = which('git.exe')
-        if not git: git, use_shell = which('git.bat'), True
-        git_dir = subprocess.check_output(
-                [git, 'rev-parse', '--git-dir'],
-                cwd=LLVM_DIR, shell=use_shell).decode().strip()
+        git, use_shell = which("git"), False
+        if not git:
+            git = which("git.exe")
+        if not git:
+            git, use_shell = which("git.bat"), True
+        git_dir = (
+            subprocess.check_output(
+                [git, "rev-parse", "--git-dir"], cwd=LLVM_DIR, shell=use_shell
+            )
+            .decode()
+            .strip()
+        )
         if not os.path.isdir(git_dir):
             print('.git dir not found at "%s"' % git_dir, file=sys.stderr)
             return 1
 
-        rev = subprocess.check_output(
-                [git, 'rev-parse', '--short', 'HEAD'],
-                cwd=git_dir, shell=use_shell).decode().strip()
-        url = subprocess.check_output(
-                [git, 'remote', 'get-url', 'origin'],
-                cwd=git_dir, shell=use_shell).decode().strip()
+        rev = (
+            subprocess.check_output(
+                [git, "rev-parse", "--short", "HEAD"], cwd=git_dir, shell=use_shell
+            )
+            .decode()
+            .strip()
+        )
+        url = (
+            subprocess.check_output(
+                [git, "remote", "get-url", "origin"], cwd=git_dir, shell=use_shell
+            )
+            .decode()
+            .strip()
+        )
         for name in args.name:
             vcsrevision_contents += '#define %s_REVISION "%s"\n' % (name, rev)
             vcsrevision_contents += '#define %s_REPOSITORY "%s"\n' % (name, url)
     else:
         for name in args.name:
-            vcsrevision_contents += '#undef %s_REVISION\n' % name
-            vcsrevision_contents += '#undef %s_REPOSITORY\n' % name
+            vcsrevision_contents += "#undef %s_REVISION\n" % name
+            vcsrevision_contents += "#undef %s_REPOSITORY\n" % name
 
     # If the output already exists and is identical to what we'd write,
     # return to not perturb the existing file's timestamp.
-    if os.path.exists(args.vcs_header) and \
-            open(args.vcs_header).read() == vcsrevision_contents:
+    if (
+        os.path.exists(args.vcs_header)
+        and open(args.vcs_header).read() == vcsrevision_contents
+    ):
         return 0
 
     # http://neugierig.org/software/blog/2014/11/binary-revisions.html
     if args.depfile:
         build_dir = os.getcwd()
-        with open(args.depfile, 'w') as depfile:
-            depfile.write('%s: %s\n' % (
-                args.vcs_header,
-                os.path.relpath(os.path.join(git_dir, 'logs', 'HEAD'),
-                                build_dir)))
-    open(args.vcs_header, 'w').write(vcsrevision_contents)
+        with open(args.depfile, "w") as depfile:
+            depfile.write(
+                "%s: %s\n"
+                % (
+                    args.vcs_header,
+                    os.path.relpath(os.path.join(git_dir, "logs", "HEAD"), build_dir),
+                )
+            )
+    open(args.vcs_header, "w").write(vcsrevision_contents)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/gn/get.py b/llvm/utils/gn/get.py
index 0d08f5f60a229..d32685acf550c 100755
--- a/llvm/utils/gn/get.py
+++ b/llvm/utils/gn/get.py
@@ -10,53 +10,56 @@
 
 def download_and_unpack(url, output_dir, gn):
     """Download an archive from url and extract gn from it into output_dir."""
-    print('downloading %s ...' % url, end='')
+    print("downloading %s ..." % url, end="")
     sys.stdout.flush()
     data = urllib.request.urlopen(url).read()
-    print(' done')
+    print(" done")
     zipfile.ZipFile(io.BytesIO(data)).extract(gn, path=output_dir)
 
 
 def set_executable_bit(path):
     mode = os.stat(path).st_mode
-    mode |= (mode & 0o444) >> 2 # Copy R bits to X.
-    os.chmod(path, mode) # No-op on Windows.
+    mode |= (mode & 0o444) >> 2  # Copy R bits to X.
+    os.chmod(path, mode)  # No-op on Windows.
 
 
 def get_platform():
     import platform
-    if sys.platform == 'darwin':
-        return 'mac-amd64' if platform.machine() != 'arm64' else 'mac-arm64'
-    if platform.machine() not in ('AMD64', 'x86_64'):
+
+    if sys.platform == "darwin":
+        return "mac-amd64" if platform.machine() != "arm64" else "mac-arm64"
+    if platform.machine() not in ("AMD64", "x86_64"):
         return None
-    if sys.platform.startswith('linux'):
-        return 'linux-amd64'
-    if sys.platform == 'win32':
-        return 'windows-amd64'
+    if sys.platform.startswith("linux"):
+        return "linux-amd64"
+    if sys.platform == "win32":
+        return "windows-amd64"
 
 
 def main():
     platform = get_platform()
     if not platform:
-        print('no prebuilt binary for', sys.platform)
-        print('build it yourself with:')
-        print('  rm -rf /tmp/gn &&')
-        print('  pushd /tmp && git clone https://gn.googlesource.com/gn &&')
-        print('  cd gn && build/gen.py && ninja -C out gn && popd &&')
-        print('  cp /tmp/gn/out/gn somewhere/on/PATH')
+        print("no prebuilt binary for", sys.platform)
+        print("build it yourself with:")
+        print("  rm -rf /tmp/gn &&")
+        print("  pushd /tmp && git clone https://gn.googlesource.com/gn &&")
+        print("  cd gn && build/gen.py && ninja -C out gn && popd &&")
+        print("  cp /tmp/gn/out/gn somewhere/on/PATH")
         return 1
-    dirname = os.path.join(os.path.dirname(__file__), 'bin', platform)
+    dirname = os.path.join(os.path.dirname(__file__), "bin", platform)
     if not os.path.exists(dirname):
         os.makedirs(dirname)
 
-    url = 'https://chrome-infra-packages.appspot.com/dl/gn/gn/%s/+/latest'
-    gn = 'gn' + ('.exe' if sys.platform == 'win32' else '')
-    if platform == 'mac-arm64': # For https://openradar.appspot.com/FB8914243
-        try: os.remove(os.path.join(dirname, gn))
-        except OSError: pass
+    url = "https://chrome-infra-packages.appspot.com/dl/gn/gn/%s/+/latest"
+    gn = "gn" + (".exe" if sys.platform == "win32" else "")
+    if platform == "mac-arm64":  # For https://openradar.appspot.com/FB8914243
+        try:
+            os.remove(os.path.join(dirname, gn))
+        except OSError:
+            pass
     download_and_unpack(url % platform, dirname, gn)
     set_executable_bit(os.path.join(dirname, gn))
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/gn/gn.py b/llvm/utils/gn/gn.py
index b2936f88bf3fa..290c6941bceea 100755
--- a/llvm/utils/gn/gn.py
+++ b/llvm/utils/gn/gn.py
@@ -12,57 +12,64 @@
 
 
 THIS_DIR = os.path.dirname(__file__)
-ROOT_DIR = os.path.join(THIS_DIR, '..', '..', '..')
+ROOT_DIR = os.path.join(THIS_DIR, "..", "..", "..")
 
 
 def get_platform():
     import platform
-    if sys.platform == 'darwin':
-        return 'mac-amd64' if platform.machine() != 'arm64' else 'mac-arm64'
-    if platform.machine() not in ('AMD64', 'x86_64'):
+
+    if sys.platform == "darwin":
+        return "mac-amd64" if platform.machine() != "arm64" else "mac-arm64"
+    if platform.machine() not in ("AMD64", "x86_64"):
         return None
-    if sys.platform.startswith('linux'):
-        return 'linux-amd64'
-    if sys.platform == 'win32':
-        return 'windows-amd64'
+    if sys.platform.startswith("linux"):
+        return "linux-amd64"
+    if sys.platform == "win32":
+        return "windows-amd64"
 
 
 def print_no_gn(mention_get):
-    print('gn binary not found in PATH')
+    print("gn binary not found in PATH")
     if mention_get:
-        print('run llvm/utils/gn/get.py to download a binary and try again, or')
-    print('follow https://gn.googlesource.com/gn/#getting-started')
+        print("run llvm/utils/gn/get.py to download a binary and try again, or")
+    print("follow https://gn.googlesource.com/gn/#getting-started")
     return 1
 
 
 def main():
     # Find real gn executable.
-    gn = 'gn'
-    if subprocess.call('gn --version', stdout=open(os.devnull, 'w'),
-                                       stderr=subprocess.STDOUT,
-                                       shell=True) != 0:
+    gn = "gn"
+    if (
+        subprocess.call(
+            "gn --version",
+            stdout=open(os.devnull, "w"),
+            stderr=subprocess.STDOUT,
+            shell=True,
+        )
+        != 0
+    ):
         # Not on path. See if get.py downloaded a prebuilt binary and run that
         # if it's there, or suggest to run get.py if it isn't.
         platform = get_platform()
         if not platform:
             return print_no_gn(mention_get=False)
-        gn = os.path.join(os.path.dirname(__file__), 'bin', platform, 'gn')
-        if not os.path.exists(gn + ('.exe' if sys.platform == 'win32' else '')):
+        gn = os.path.join(os.path.dirname(__file__), "bin", platform, "gn")
+        if not os.path.exists(gn + (".exe" if sys.platform == "win32" else "")):
             return print_no_gn(mention_get=True)
 
     # Compute --dotfile= and --root= args to add.
     extra_args = []
-    gn_main_arg = next((x for x in sys.argv[1:] if not x.startswith('-')), None)
-    if gn_main_arg != 'help':  # `gn help` gets confused by the switches.
+    gn_main_arg = next((x for x in sys.argv[1:] if not x.startswith("-")), None)
+    if gn_main_arg != "help":  # `gn help` gets confused by the switches.
         cwd = os.getcwd()
-        dotfile = os.path.relpath(os.path.join(THIS_DIR, '.gn'), cwd)
+        dotfile = os.path.relpath(os.path.join(THIS_DIR, ".gn"), cwd)
         root = os.path.relpath(ROOT_DIR, cwd)
-        extra_args = [ '--dotfile=' + dotfile, '--root=' + root ]
+        extra_args = ["--dotfile=" + dotfile, "--root=" + root]
 
     # Run GN command with --dotfile= and --root= added.
     cmd = [gn] + extra_args + sys.argv[1:]
     sys.exit(subprocess.call(cmd))
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/gn/secondary/libcxx/utils/gen_link_script.py b/llvm/utils/gn/secondary/libcxx/utils/gen_link_script.py
index 3b162a9123b5b..7a584b1b5dcbb 100755
--- a/llvm/utils/gn/secondary/libcxx/utils/gen_link_script.py
+++ b/llvm/utils/gn/secondary/libcxx/utils/gen_link_script.py
@@ -1,11 +1,11 @@
 #!/usr/bin/env python3
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
 
 """
 Generate a linker script that links libc++ to the proper ABI library.
@@ -20,31 +20,31 @@
 def main():
     parser = argparse.ArgumentParser(description=__doc__)
     parser.add_argument("--input", help="Path to libc++ library", required=True)
-    parser.add_argument("--output", help="Path to libc++ linker script",
-                        required=True)
-    parser.add_argument("libraries", nargs="+",
-                        help="List of libraries libc++ depends on")
+    parser.add_argument("--output", help="Path to libc++ linker script", required=True)
+    parser.add_argument(
+        "libraries", nargs="+", help="List of libraries libc++ depends on"
+    )
     args = parser.parse_args()
 
     # Use the relative path for the libc++ library.
     libcxx = os.path.relpath(args.input, os.path.dirname(args.output))
 
     # Prepare the list of public libraries to link.
-    public_libs = ['-l%s' % l for l in args.libraries]
+    public_libs = ["-l%s" % l for l in args.libraries]
 
     # Generate the linker script contents.
-    contents = "INPUT(%s)" % ' '.join([libcxx] + public_libs)
+    contents = "INPUT(%s)" % " ".join([libcxx] + public_libs)
 
     # Remove the existing libc++ symlink if it exists.
     if os.path.islink(args.output):
         os.unlink(args.output)
 
     # Replace it with the linker script.
-    with open(args.output, 'w') as f:
+    with open(args.output, "w") as f:
         f.write(contents + "\n")
 
     return 0
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/gn/secondary/llvm/include/llvm/Support/write_extension_def.py b/llvm/utils/gn/secondary/llvm/include/llvm/Support/write_extension_def.py
index c31f35ce87416..b33517a2d21a2 100755
--- a/llvm/utils/gn/secondary/llvm/include/llvm/Support/write_extension_def.py
+++ b/llvm/utils/gn/secondary/llvm/include/llvm/Support/write_extension_def.py
@@ -7,16 +7,16 @@
 
 def main():
     parser = argparse.ArgumentParser()
-    parser.add_argument('exts', nargs='*', help='list of supported extensions')
-    parser.add_argument('-o', '--output', required=True, help='output file')
+    parser.add_argument("exts", nargs="*", help="list of supported extensions")
+    parser.add_argument("-o", "--output", required=True, help="output file")
     args = parser.parse_args()
 
-    output = ''.join(['HANDLE_EXTENSION(%s)\n' % ext for ext in args.exts])
-    output += '#undef HANDLE_EXTENSION\n'
+    output = "".join(["HANDLE_EXTENSION(%s)\n" % ext for ext in args.exts])
+    output += "#undef HANDLE_EXTENSION\n"
 
     if not os.path.exists(args.output) or open(args.output).read() != output:
-        open(args.output, 'w').write(output)
+        open(args.output, "w").write(output)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/gn/secondary/llvm/tools/llvm-config/write_extension_dependencies.py b/llvm/utils/gn/secondary/llvm/tools/llvm-config/write_extension_dependencies.py
index 68a341c18bd84..5f0c91cea689f 100644
--- a/llvm/utils/gn/secondary/llvm/tools/llvm-config/write_extension_dependencies.py
+++ b/llvm/utils/gn/secondary/llvm/tools/llvm-config/write_extension_dependencies.py
@@ -9,8 +9,7 @@
 
 def main():
     parser = argparse.ArgumentParser(description=__doc__)
-    parser.add_argument('-o', '--output', required=True,
-                        help='output file')
+    parser.add_argument("-o", "--output", required=True, help="output file")
     args = parser.parse_args()
 
     source = """\
@@ -21,8 +20,8 @@ def main():
 };
 std::array<ExtensionDescriptor, 0>  AvailableExtensions{};
 """
-    open(args.output, 'w').write(source)
+    open(args.output, "w").write(source)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/indirect_calls.py b/llvm/utils/indirect_calls.py
index e460ff7d08362..2bdabc8c4d74f 100755
--- a/llvm/utils/indirect_calls.py
+++ b/llvm/utils/indirect_calls.py
@@ -18,26 +18,29 @@
 import subprocess
 import optparse
 
-# Look for indirect calls/jmps in a binary. re: (call|jmp).*\* 
+# Look for indirect calls/jmps in a binary. re: (call|jmp).*\*
 def look_for_indirect(file):
-    args = ['llvm-objdump']
+    args = ["llvm-objdump"]
     args.extend(["-d"])
     args.extend([file])
 
-    p = subprocess.Popen(args=args, stdin=None, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
-    (stdout,stderr) = p.communicate()
+    p = subprocess.Popen(
+        args=args, stdin=None, stderr=subprocess.PIPE, stdout=subprocess.PIPE
+    )
+    (stdout, stderr) = p.communicate()
 
     function = ""
     for line in stdout.splitlines():
-        if line.startswith(' ') == False:
+        if line.startswith(" ") == False:
             function = line
-        result = re.search('(call|jmp).*\*', line)
+        result = re.search("(call|jmp).*\*", line)
         if result != None:
             # TODO: Perhaps use cxxfilt to demangle functions?
             print(function)
             print(line)
     return
 
+
 def main(args):
     # No options currently other than the binary.
     parser = optparse.OptionParser("%prog [options] <binary>")
@@ -46,5 +49,6 @@ def main(args):
         parser.error("invalid number of arguments: %s" % len(args))
     look_for_indirect(args[1])
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     main(sys.argv)

diff  --git a/llvm/utils/lint/common_lint.py b/llvm/utils/lint/common_lint.py
index 641048b7ee5b4..1bf1695659d88 100644
--- a/llvm/utils/lint/common_lint.py
+++ b/llvm/utils/lint/common_lint.py
@@ -5,94 +5,101 @@
 from __future__ import print_function
 import re
 
+
 def VerifyLineLength(filename, lines, max_length):
-  """Checks to make sure the file has no lines with lines exceeding the length
-  limit.
-
-  Args:
-    filename: the file under consideration as string
-    lines: contents of the file as string array
-    max_length: maximum acceptable line length as number
-
-  Returns:
-    A list of tuples with format [(filename, line number, msg), ...] with any
-    violations found.
-  """
-  lint = []
-  line_num = 1
-  for line in lines:
-    length = len(line.rstrip('\n'))
-    if length > max_length:
-      lint.append((filename, line_num,
-                   'Line exceeds %d chars (%d)' % (max_length, length)))
-    line_num += 1
-  return lint
+    """Checks to make sure the file has no lines with lines exceeding the length
+    limit.
+
+    Args:
+      filename: the file under consideration as string
+      lines: contents of the file as string array
+      max_length: maximum acceptable line length as number
+
+    Returns:
+      A list of tuples with format [(filename, line number, msg), ...] with any
+      violations found.
+    """
+    lint = []
+    line_num = 1
+    for line in lines:
+        length = len(line.rstrip("\n"))
+        if length > max_length:
+            lint.append(
+                (
+                    filename,
+                    line_num,
+                    "Line exceeds %d chars (%d)" % (max_length, length),
+                )
+            )
+        line_num += 1
+    return lint
+
 
 def VerifyTabs(filename, lines):
-  """Checks to make sure the file has no tab characters.
-
-  Args:
-    filename: the file under consideration as string
-    lines: contents of the file as string array
-
-  Returns:
-    A list of tuples with format [(line_number, msg), ...] with any violations
-    found.
-  """
-  lint = []
-  tab_re = re.compile(r'\t')
-  line_num = 1
-  for line in lines:
-    if tab_re.match(line.rstrip('\n')):
-      lint.append((filename, line_num, 'Tab found instead of whitespace'))
-    line_num += 1
-  return lint
+    """Checks to make sure the file has no tab characters.
+
+    Args:
+      filename: the file under consideration as string
+      lines: contents of the file as string array
+
+    Returns:
+      A list of tuples with format [(line_number, msg), ...] with any violations
+      found.
+    """
+    lint = []
+    tab_re = re.compile(r"\t")
+    line_num = 1
+    for line in lines:
+        if tab_re.match(line.rstrip("\n")):
+            lint.append((filename, line_num, "Tab found instead of whitespace"))
+        line_num += 1
+    return lint
 
 
 def VerifyTrailingWhitespace(filename, lines):
-  """Checks to make sure the file has no lines with trailing whitespace.
-
-  Args:
-    filename: the file under consideration as string
-    lines: contents of the file as string array
-
-  Returns:
-    A list of tuples with format [(filename, line number, msg), ...] with any
-    violations found.
-  """
-  lint = []
-  trailing_whitespace_re = re.compile(r'\s+$')
-  line_num = 1
-  for line in lines:
-    if trailing_whitespace_re.match(line.rstrip('\n')):
-      lint.append((filename, line_num, 'Trailing whitespace'))
-    line_num += 1
-  return lint
+    """Checks to make sure the file has no lines with trailing whitespace.
+
+    Args:
+      filename: the file under consideration as string
+      lines: contents of the file as string array
+
+    Returns:
+      A list of tuples with format [(filename, line number, msg), ...] with any
+      violations found.
+    """
+    lint = []
+    trailing_whitespace_re = re.compile(r"\s+$")
+    line_num = 1
+    for line in lines:
+        if trailing_whitespace_re.match(line.rstrip("\n")):
+            lint.append((filename, line_num, "Trailing whitespace"))
+        line_num += 1
+    return lint
 
 
 class BaseLint:
-  def RunOnFile(filename, lines):
-    raise Exception('RunOnFile() unimplemented')
+    def RunOnFile(filename, lines):
+        raise Exception("RunOnFile() unimplemented")
 
 
 def RunLintOverAllFiles(linter, filenames):
-  """Runs linter over the contents of all files.
-
-  Args:
-    lint: subclass of BaseLint, implementing RunOnFile()
-    filenames: list of all files whose contents will be linted
-
-  Returns:
-    A list of tuples with format [(filename, line number, msg), ...] with any
-    violations found.
-  """
-  lint = []
-  for filename in filenames:
-    file = open(filename, 'r')
-    if not file:
-      print('Cound not open %s' % filename)
-      continue
-    lines = file.readlines()
-    lint.extend(linter.RunOnFile(filename, lines))
-
-  return lint
+    """Runs linter over the contents of all files.
+
+    Args:
+      lint: subclass of BaseLint, implementing RunOnFile()
+      filenames: list of all files whose contents will be linted
+
+    Returns:
+      A list of tuples with format [(filename, line number, msg), ...] with any
+      violations found.
+    """
+    lint = []
+    for filename in filenames:
+        file = open(filename, "r")
+        if not file:
+            print("Cound not open %s" % filename)
+            continue
+        lines = file.readlines()
+        lint.extend(linter.RunOnFile(filename, lines))
+
+    return lint

diff  --git a/llvm/utils/lint/cpp_lint.py b/llvm/utils/lint/cpp_lint.py
index 4ef457e83daa5..3734ba456626f 100755
--- a/llvm/utils/lint/cpp_lint.py
+++ b/llvm/utils/lint/cpp_lint.py
@@ -11,85 +11,102 @@
 import re
 import sys
 
+
 def VerifyIncludes(filename, lines):
-  """Makes sure the #includes are in proper order and no disallows files are
-  #included.
-
-  Args:
-    filename: the file under consideration as string
-    lines: contents of the file as string array
-  """
-  lint = []
-
-  include_gtest_re = re.compile(r'^#include "gtest/(.*)"')
-  include_llvm_re = re.compile(r'^#include "llvm/(.*)"')
-  include_support_re = re.compile(r'^#include "(Support/.*)"')
-  include_config_re = re.compile(r'^#include "(Config/.*)"')
-  include_system_re = re.compile(r'^#include <(.*)>')
-
-  DISALLOWED_SYSTEM_HEADERS = ['iostream']
-
-  line_num = 1
-  prev_config_header = None
-  prev_system_header = None
-  for line in lines:
-    # TODO: implement private headers
-    # TODO: implement gtest headers
-    # TODO: implement top-level llvm/* headers
-    # TODO: implement llvm/Support/* headers
-
-    # Process Config/* headers
-    config_header = include_config_re.match(line)
-    if config_header:
-      curr_config_header = config_header.group(1)
-      if prev_config_header:
-        if prev_config_header > curr_config_header:
-          lint.append((filename, line_num,
-                       'Config headers not in order: "%s" before "%s"' % (
-                         prev_config_header, curr_config_header)))
-
-    # Process system headers
-    system_header = include_system_re.match(line)
-    if system_header:
-      curr_system_header = system_header.group(1)
-
-      # Is it disallowed?
-      if curr_system_header in DISALLOWED_SYSTEM_HEADERS:
-        lint.append((filename, line_num,
-                     'Disallowed system header: <%s>' % curr_system_header))
-      elif prev_system_header:
-        # Make sure system headers are alphabetized amongst themselves
-        if prev_system_header > curr_system_header:
-          lint.append((filename, line_num,
-                       'System headers not in order: <%s> before <%s>' % (
-                         prev_system_header, curr_system_header)))
-
-      prev_system_header = curr_system_header
-
-    line_num += 1
-
-  return lint
+    """Makes sure the #includes are in proper order and no disallows files are
+    #included.
 
+    Args:
+      filename: the file under consideration as string
+      lines: contents of the file as string array
+    """
+    lint = []
 
-class CppLint(common_lint.BaseLint):
-  MAX_LINE_LENGTH = 80
+    include_gtest_re = re.compile(r'^#include "gtest/(.*)"')
+    include_llvm_re = re.compile(r'^#include "llvm/(.*)"')
+    include_support_re = re.compile(r'^#include "(Support/.*)"')
+    include_config_re = re.compile(r'^#include "(Config/.*)"')
+    include_system_re = re.compile(r"^#include <(.*)>")
+
+    DISALLOWED_SYSTEM_HEADERS = ["iostream"]
+
+    line_num = 1
+    prev_config_header = None
+    prev_system_header = None
+    for line in lines:
+        # TODO: implement private headers
+        # TODO: implement gtest headers
+        # TODO: implement top-level llvm/* headers
+        # TODO: implement llvm/Support/* headers
+
+        # Process Config/* headers
+        config_header = include_config_re.match(line)
+        if config_header:
+            curr_config_header = config_header.group(1)
+            if prev_config_header:
+                if prev_config_header > curr_config_header:
+                    lint.append(
+                        (
+                            filename,
+                            line_num,
+                            'Config headers not in order: "%s" before "%s"'
+                            % (prev_config_header, curr_config_header),
+                        )
+                    )
+
+        # Process system headers
+        system_header = include_system_re.match(line)
+        if system_header:
+            curr_system_header = system_header.group(1)
+
+            # Is it disallowed?
+            if curr_system_header in DISALLOWED_SYSTEM_HEADERS:
+                lint.append(
+                    (
+                        filename,
+                        line_num,
+                        "Disallowed system header: <%s>" % curr_system_header,
+                    )
+                )
+            elif prev_system_header:
+                # Make sure system headers are alphabetized amongst themselves
+                if prev_system_header > curr_system_header:
+                    lint.append(
+                        (
+                            filename,
+                            line_num,
+                            "System headers not in order: <%s> before <%s>"
+                            % (prev_system_header, curr_system_header),
+                        )
+                    )
+
+            prev_system_header = curr_system_header
+
+        line_num += 1
 
-  def RunOnFile(self, filename, lines):
-    lint = []
-    lint.extend(VerifyIncludes(filename, lines))
-    lint.extend(common_lint.VerifyLineLength(filename, lines,
-                                             CppLint.MAX_LINE_LENGTH))
-    lint.extend(common_lint.VerifyTabs(filename, lines))
-    lint.extend(common_lint.VerifyTrailingWhitespace(filename, lines))
     return lint
 
 
+class CppLint(common_lint.BaseLint):
+    MAX_LINE_LENGTH = 80
+
+    def RunOnFile(self, filename, lines):
+        lint = []
+        lint.extend(VerifyIncludes(filename, lines))
+        lint.extend(
+            common_lint.VerifyLineLength(filename, lines, CppLint.MAX_LINE_LENGTH)
+        )
+        lint.extend(common_lint.VerifyTabs(filename, lines))
+        lint.extend(common_lint.VerifyTrailingWhitespace(filename, lines))
+        return lint
+
+
 def CppLintMain(filenames):
-  all_lint = common_lint.RunLintOverAllFiles(CppLint(), filenames)
-  for lint in all_lint:
-    print('%s:%d:%s' % (lint[0], lint[1], lint[2]))
-  return 0
+    all_lint = common_lint.RunLintOverAllFiles(CppLint(), filenames)
+    for lint in all_lint:
+        print("%s:%d:%s" % (lint[0], lint[1], lint[2]))
+    return 0
 
 
-if __name__ == '__main__':
-  sys.exit(CppLintMain(sys.argv[1:]))
+if __name__ == "__main__":
+    sys.exit(CppLintMain(sys.argv[1:]))

diff  --git a/llvm/utils/lint/generic_lint.py b/llvm/utils/lint/generic_lint.py
index 06218d7ea3c19..18e6e67e29a6e 100755
--- a/llvm/utils/lint/generic_lint.py
+++ b/llvm/utils/lint/generic_lint.py
@@ -6,19 +6,19 @@
 import common_lint
 import sys
 
+
 class GenericCodeLint(common_lint.BaseLint):
-  MAX_LINE_LENGTH = 80
+    MAX_LINE_LENGTH = 80
 
-  def RunOnFile(self, filename, lines):
-    common_lint.VerifyLineLength(filename, lines,
-                                 GenericCodeLint.MAX_LINE_LENGTH)
-    common_lint.VerifyTrailingWhitespace(filename, lines)
+    def RunOnFile(self, filename, lines):
+        common_lint.VerifyLineLength(filename, lines, GenericCodeLint.MAX_LINE_LENGTH)
+        common_lint.VerifyTrailingWhitespace(filename, lines)
 
 
 def GenericCodeLintMain(filenames):
-  common_lint.RunLintOverAllFiles(GenericCodeLint(), filenames)
-  return 0
+    common_lint.RunLintOverAllFiles(GenericCodeLint(), filenames)
+    return 0
 
 
-if __name__ == '__main__':
-  sys.exit(GenericCodeLintMain(sys.argv[1:]))
+if __name__ == "__main__":
+    sys.exit(GenericCodeLintMain(sys.argv[1:]))

diff  --git a/llvm/utils/lit/examples/many-tests/lit.cfg b/llvm/utils/lit/examples/many-tests/lit.cfg
index 3477fef01ef98..da01c935c8e66 100644
--- a/llvm/utils/lit/examples/many-tests/lit.cfg
+++ b/llvm/utils/lit/examples/many-tests/lit.cfg
@@ -2,6 +2,7 @@
 
 import sys
 import os
+
 sys.path.insert(0, os.path.dirname(__file__))
 import ManyTests
 

diff  --git a/llvm/utils/lit/lit.py b/llvm/utils/lit/lit.py
index 381228ea39877..5c1c953e8bc75 100755
--- a/llvm/utils/lit/lit.py
+++ b/llvm/utils/lit/lit.py
@@ -2,5 +2,5 @@
 
 from lit.main import main
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/lit/lit/BooleanExpression.py b/llvm/utils/lit/lit/BooleanExpression.py
index ba8453d60e98a..9b9573d2f3f14 100644
--- a/llvm/utils/lit/lit/BooleanExpression.py
+++ b/llvm/utils/lit/lit/BooleanExpression.py
@@ -1,5 +1,6 @@
 import re
 
+
 class BooleanExpression:
     # A simple evaluator of boolean expressions.
     #
@@ -30,14 +31,14 @@ def evaluate(string, variables):
             parser = BooleanExpression(string, set(variables))
             return parser.parseAll()
         except ValueError as e:
-            raise ValueError(str(e) + ('\nin expression: %r' % string))
+            raise ValueError(str(e) + ("\nin expression: %r" % string))
 
     #####
 
     def __init__(self, string, variables):
         self.tokens = BooleanExpression.tokenize(string)
         self.variables = variables
-        self.variables.add('true')
+        self.variables.add("true")
         self.value = None
         self.token = None
 
@@ -45,7 +46,9 @@ def __init__(self, string, variables):
     END = object()
 
     # Tokenization pattern.
-    Pattern = re.compile(r'\A\s*([()]|&&|\|\||!|(?:[-+=._a-zA-Z0-9]+|\{\{.+?\}\})+)\s*(.*)\Z')
+    Pattern = re.compile(
+        r"\A\s*([()]|&&|\|\||!|(?:[-+=._a-zA-Z0-9]+|\{\{.+?\}\})+)\s*(.*)\Z"
+    )
 
     @staticmethod
     def tokenize(string):
@@ -53,7 +56,7 @@ def tokenize(string):
             m = re.match(BooleanExpression.Pattern, string)
             if m is None:
                 if string == "":
-                    yield BooleanExpression.END;
+                    yield BooleanExpression.END
                     return
                 else:
                     raise ValueError("couldn't parse text: %r" % string)
@@ -64,7 +67,7 @@ def tokenize(string):
 
     def quote(self, token):
         if token is BooleanExpression.END:
-            return '<end of expression>'
+            return "<end of expression>"
         else:
             return repr(token)
 
@@ -80,22 +83,29 @@ def expect(self, t):
             if self.token != BooleanExpression.END:
                 self.token = next(self.tokens)
         else:
-            raise ValueError("expected: %s\nhave: %s" %
-                             (self.quote(t), self.quote(self.token)))
+            raise ValueError(
+                "expected: %s\nhave: %s" % (self.quote(t), self.quote(self.token))
+            )
 
     @staticmethod
     def isMatchExpression(token):
-        if (token is BooleanExpression.END or token == '&&' or token == '||' or
-            token == '!' or token == '(' or token == ')'):
+        if (
+            token is BooleanExpression.END
+            or token == "&&"
+            or token == "||"
+            or token == "!"
+            or token == "("
+            or token == ")"
+        ):
             return False
         return True
 
     def parseMATCH(self):
-        regex = ''
-        for part in filter(None, re.split(r'(\{\{.+?\}\})', self.token)):
-            if part.startswith('{{'):
-                assert part.endswith('}}')
-                regex += '(?:{})'.format(part[2:-2])
+        regex = ""
+        for part in filter(None, re.split(r"(\{\{.+?\}\})", self.token)):
+            if part.startswith("{{"):
+                assert part.endswith("}}")
+                regex += "(?:{})".format(part[2:-2])
             else:
                 regex += re.escape(part)
         regex = re.compile(regex)
@@ -103,21 +113,23 @@ def parseMATCH(self):
         self.token = next(self.tokens)
 
     def parseNOT(self):
-        if self.accept('!'):
+        if self.accept("!"):
             self.parseNOT()
             self.value = not self.value
-        elif self.accept('('):
+        elif self.accept("("):
             self.parseOR()
-            self.expect(')')
+            self.expect(")")
         elif not BooleanExpression.isMatchExpression(self.token):
-            raise ValueError("expected: '!', '(', '{{', or identifier\nhave: %s" %
-                             self.quote(self.token))
+            raise ValueError(
+                "expected: '!', '(', '{{', or identifier\nhave: %s"
+                % self.quote(self.token)
+            )
         else:
             self.parseMATCH()
 
     def parseAND(self):
         self.parseNOT()
-        while self.accept('&&'):
+        while self.accept("&&"):
             left = self.value
             self.parseNOT()
             right = self.value
@@ -127,7 +139,7 @@ def parseAND(self):
 
     def parseOR(self):
         self.parseAND()
-        while self.accept('||'):
+        while self.accept("||"):
             left = self.value
             self.parseAND()
             right = self.value
@@ -147,61 +159,90 @@ def parseAll(self):
 
 import unittest
 
+
 class TestBooleanExpression(unittest.TestCase):
     def test_variables(self):
-        variables = {'its-true', 'false-lol-true', 'under_score',
-                     'e=quals', 'd1g1ts'}
-        self.assertTrue(BooleanExpression.evaluate('true', variables))
-        self.assertTrue(BooleanExpression.evaluate('its-true', variables))
-        self.assertTrue(BooleanExpression.evaluate('false-lol-true', variables))
-        self.assertTrue(BooleanExpression.evaluate('under_score', variables))
-        self.assertTrue(BooleanExpression.evaluate('e=quals', variables))
-        self.assertTrue(BooleanExpression.evaluate('d1g1ts', variables))
-        self.assertTrue(BooleanExpression.evaluate('{{its.+}}', variables))
-        self.assertTrue(BooleanExpression.evaluate('{{false-[lo]+-true}}', variables))
-        self.assertTrue(BooleanExpression.evaluate('{{(true|false)-lol-(true|false)}}', variables))
-        self.assertTrue(BooleanExpression.evaluate('d1g{{[0-9]}}ts', variables))
-        self.assertTrue(BooleanExpression.evaluate('d1g{{[0-9]}}t{{[a-z]}}', variables))
-        self.assertTrue(BooleanExpression.evaluate('{{d}}1g{{[0-9]}}t{{[a-z]}}', variables))
-        self.assertTrue(BooleanExpression.evaluate('d1{{(g|1)+}}ts', variables))
-
-        self.assertFalse(BooleanExpression.evaluate('false', variables))
-        self.assertFalse(BooleanExpression.evaluate('True', variables))
-        self.assertFalse(BooleanExpression.evaluate('true-ish', variables))
-        self.assertFalse(BooleanExpression.evaluate('not_true', variables))
-        self.assertFalse(BooleanExpression.evaluate('tru', variables))
-        self.assertFalse(BooleanExpression.evaluate('{{its-true.+}}', variables))
+        variables = {"its-true", "false-lol-true", "under_score", "e=quals", "d1g1ts"}
+        self.assertTrue(BooleanExpression.evaluate("true", variables))
+        self.assertTrue(BooleanExpression.evaluate("its-true", variables))
+        self.assertTrue(BooleanExpression.evaluate("false-lol-true", variables))
+        self.assertTrue(BooleanExpression.evaluate("under_score", variables))
+        self.assertTrue(BooleanExpression.evaluate("e=quals", variables))
+        self.assertTrue(BooleanExpression.evaluate("d1g1ts", variables))
+        self.assertTrue(BooleanExpression.evaluate("{{its.+}}", variables))
+        self.assertTrue(BooleanExpression.evaluate("{{false-[lo]+-true}}", variables))
+        self.assertTrue(
+            BooleanExpression.evaluate("{{(true|false)-lol-(true|false)}}", variables)
+        )
+        self.assertTrue(BooleanExpression.evaluate("d1g{{[0-9]}}ts", variables))
+        self.assertTrue(BooleanExpression.evaluate("d1g{{[0-9]}}t{{[a-z]}}", variables))
+        self.assertTrue(
+            BooleanExpression.evaluate("{{d}}1g{{[0-9]}}t{{[a-z]}}", variables)
+        )
+        self.assertTrue(BooleanExpression.evaluate("d1{{(g|1)+}}ts", variables))
+
+        self.assertFalse(BooleanExpression.evaluate("false", variables))
+        self.assertFalse(BooleanExpression.evaluate("True", variables))
+        self.assertFalse(BooleanExpression.evaluate("true-ish", variables))
+        self.assertFalse(BooleanExpression.evaluate("not_true", variables))
+        self.assertFalse(BooleanExpression.evaluate("tru", variables))
+        self.assertFalse(BooleanExpression.evaluate("{{its-true.+}}", variables))
 
     def test_matching(self):
-        expr1 = 'linux && (target={{aarch64-.+}} || target={{x86_64-.+}})'
-        self.assertTrue(BooleanExpression.evaluate(expr1, {'linux', 'target=x86_64-unknown-linux-gnu'}))
-        self.assertFalse(BooleanExpression.evaluate(expr1, {'linux', 'target=i386-unknown-linux-gnu'}))
-
-        expr2 = 'use_system_cxx_lib && target={{.+}}-apple-macosx10.{{9|10|11|12}} && !no-exceptions'
-        self.assertTrue(BooleanExpression.evaluate(expr2, {'use_system_cxx_lib', 'target=arm64-apple-macosx10.12'}))
-        self.assertFalse(BooleanExpression.evaluate(expr2, {'use_system_cxx_lib', 'target=arm64-apple-macosx10.12', 'no-exceptions'}))
-        self.assertFalse(BooleanExpression.evaluate(expr2, {'use_system_cxx_lib', 'target=arm64-apple-macosx10.15'}))
+        expr1 = "linux && (target={{aarch64-.+}} || target={{x86_64-.+}})"
+        self.assertTrue(
+            BooleanExpression.evaluate(
+                expr1, {"linux", "target=x86_64-unknown-linux-gnu"}
+            )
+        )
+        self.assertFalse(
+            BooleanExpression.evaluate(
+                expr1, {"linux", "target=i386-unknown-linux-gnu"}
+            )
+        )
+
+        expr2 = "use_system_cxx_lib && target={{.+}}-apple-macosx10.{{9|10|11|12}} && !no-exceptions"
+        self.assertTrue(
+            BooleanExpression.evaluate(
+                expr2, {"use_system_cxx_lib", "target=arm64-apple-macosx10.12"}
+            )
+        )
+        self.assertFalse(
+            BooleanExpression.evaluate(
+                expr2,
+                {
+                    "use_system_cxx_lib",
+                    "target=arm64-apple-macosx10.12",
+                    "no-exceptions",
+                },
+            )
+        )
+        self.assertFalse(
+            BooleanExpression.evaluate(
+                expr2, {"use_system_cxx_lib", "target=arm64-apple-macosx10.15"}
+            )
+        )
 
     def test_operators(self):
-        self.assertTrue(BooleanExpression.evaluate('true || true', {}))
-        self.assertTrue(BooleanExpression.evaluate('true || false', {}))
-        self.assertTrue(BooleanExpression.evaluate('false || true', {}))
-        self.assertFalse(BooleanExpression.evaluate('false || false', {}))
-
-        self.assertTrue(BooleanExpression.evaluate('true && true', {}))
-        self.assertFalse(BooleanExpression.evaluate('true && false', {}))
-        self.assertFalse(BooleanExpression.evaluate('false && true', {}))
-        self.assertFalse(BooleanExpression.evaluate('false && false', {}))
-
-        self.assertFalse(BooleanExpression.evaluate('!true', {}))
-        self.assertTrue(BooleanExpression.evaluate('!false', {}))
-
-        self.assertTrue(BooleanExpression.evaluate('   ((!((false) ))   ) ', {}))
-        self.assertTrue(BooleanExpression.evaluate('true && (true && (true))', {}))
-        self.assertTrue(BooleanExpression.evaluate('!false && !false && !! !false', {}))
-        self.assertTrue(BooleanExpression.evaluate('false && false || true', {}))
-        self.assertTrue(BooleanExpression.evaluate('(false && false) || true', {}))
-        self.assertFalse(BooleanExpression.evaluate('false && (false || true)', {}))
+        self.assertTrue(BooleanExpression.evaluate("true || true", {}))
+        self.assertTrue(BooleanExpression.evaluate("true || false", {}))
+        self.assertTrue(BooleanExpression.evaluate("false || true", {}))
+        self.assertFalse(BooleanExpression.evaluate("false || false", {}))
+
+        self.assertTrue(BooleanExpression.evaluate("true && true", {}))
+        self.assertFalse(BooleanExpression.evaluate("true && false", {}))
+        self.assertFalse(BooleanExpression.evaluate("false && true", {}))
+        self.assertFalse(BooleanExpression.evaluate("false && false", {}))
+
+        self.assertFalse(BooleanExpression.evaluate("!true", {}))
+        self.assertTrue(BooleanExpression.evaluate("!false", {}))
+
+        self.assertTrue(BooleanExpression.evaluate("   ((!((false) ))   ) ", {}))
+        self.assertTrue(BooleanExpression.evaluate("true && (true && (true))", {}))
+        self.assertTrue(BooleanExpression.evaluate("!false && !false && !! !false", {}))
+        self.assertTrue(BooleanExpression.evaluate("false && false || true", {}))
+        self.assertTrue(BooleanExpression.evaluate("(false && false) || true", {}))
+        self.assertFalse(BooleanExpression.evaluate("false && (false || true)", {}))
 
     # Evaluate boolean expression `expr`.
     # Fail if it does not throw a ValueError containing the text `error`.
@@ -211,74 +252,99 @@ def checkException(self, expr, error):
             self.fail("expression %r didn't cause an exception" % expr)
         except ValueError as e:
             if -1 == str(e).find(error):
-                self.fail(("expression %r caused the wrong ValueError\n" +
-                           "actual error was:\n%s\n" +
-                           "expected error was:\n%s\n") % (expr, e, error))
+                self.fail(
+                    (
+                        "expression %r caused the wrong ValueError\n"
+                        + "actual error was:\n%s\n"
+                        + "expected error was:\n%s\n"
+                    )
+                    % (expr, e, error)
+                )
         except BaseException as e:
-            self.fail(("expression %r caused the wrong exception; actual " +
-                      "exception was: \n%r") % (expr, e))
+            self.fail(
+                (
+                    "expression %r caused the wrong exception; actual "
+                    + "exception was: \n%r"
+                )
+                % (expr, e)
+            )
 
     def test_errors(self):
-        self.checkException("ba#d",
-                            "couldn't parse text: '#d'\n" +
-                            "in expression: 'ba#d'")
-
-        self.checkException("true and true",
-                            "expected: <end of expression>\n" +
-                            "have: 'and'\n" +
-                            "in expression: 'true and true'")
-
-        self.checkException("|| true",
-                            "expected: '!', '(', '{{', or identifier\n" +
-                            "have: '||'\n" +
-                            "in expression: '|| true'")
-
-        self.checkException("true &&",
-                            "expected: '!', '(', '{{', or identifier\n" +
-                            "have: <end of expression>\n" +
-                            "in expression: 'true &&'")
-
-        self.checkException("",
-                            "expected: '!', '(', '{{', or identifier\n" +
-                            "have: <end of expression>\n" +
-                            "in expression: ''")
-
-        self.checkException("*",
-                            "couldn't parse text: '*'\n" +
-                            "in expression: '*'")
-
-        self.checkException("no wait stop",
-                            "expected: <end of expression>\n" +
-                            "have: 'wait'\n" +
-                            "in expression: 'no wait stop'")
-
-        self.checkException("no-$-please",
-                            "couldn't parse text: '$-please'\n" +
-                            "in expression: 'no-$-please'")
-
-        self.checkException("(((true && true) || true)",
-                            "expected: ')'\n" +
-                            "have: <end of expression>\n" +
-                            "in expression: '(((true && true) || true)'")
-
-        self.checkException("true (true)",
-                            "expected: <end of expression>\n" +
-                            "have: '('\n" +
-                            "in expression: 'true (true)'")
-
-        self.checkException("( )",
-                            "expected: '!', '(', '{{', or identifier\n" +
-                            "have: ')'\n" +
-                            "in expression: '( )'")
-
-        self.checkException("abc{{def",
-                            "couldn't parse text: '{{def'\n" +
-                            "in expression: 'abc{{def'")
-
-        self.checkException("{{}}",
-                            "couldn't parse text: '{{}}'\n" +
-                            "in expression: '{{}}'")
-
-
-if __name__ == '__main__':
+        self.checkException(
+            "ba#d", "couldn't parse text: '#d'\n" + "in expression: 'ba#d'"
+        )
+
+        self.checkException(
+            "true and true",
+            "expected: <end of expression>\n"
+            + "have: 'and'\n"
+            + "in expression: 'true and true'",
+        )
+
+        self.checkException(
+            "|| true",
+            "expected: '!', '(', '{{', or identifier\n"
+            + "have: '||'\n"
+            + "in expression: '|| true'",
+        )
+
+        self.checkException(
+            "true &&",
+            "expected: '!', '(', '{{', or identifier\n"
+            + "have: <end of expression>\n"
+            + "in expression: 'true &&'",
+        )
+
+        self.checkException(
+            "",
+            "expected: '!', '(', '{{', or identifier\n"
+            + "have: <end of expression>\n"
+            + "in expression: ''",
+        )
+
+        self.checkException("*", "couldn't parse text: '*'\n" + "in expression: '*'")
+
+        self.checkException(
+            "no wait stop",
+            "expected: <end of expression>\n"
+            + "have: 'wait'\n"
+            + "in expression: 'no wait stop'",
+        )
+
+        self.checkException(
+            "no-$-please",
+            "couldn't parse text: '$-please'\n" + "in expression: 'no-$-please'",
+        )
+
+        self.checkException(
+            "(((true && true) || true)",
+            "expected: ')'\n"
+            + "have: <end of expression>\n"
+            + "in expression: '(((true && true) || true)'",
+        )
+
+        self.checkException(
+            "true (true)",
+            "expected: <end of expression>\n"
+            + "have: '('\n"
+            + "in expression: 'true (true)'",
+        )
+
+        self.checkException(
+            "( )",
+            "expected: '!', '(', '{{', or identifier\n"
+            + "have: ')'\n"
+            + "in expression: '( )'",
+        )
+
+        self.checkException(
+            "abc{{def", "couldn't parse text: '{{def'\n" + "in expression: 'abc{{def'"
+        )
+
+        self.checkException(
+            "{{}}", "couldn't parse text: '{{}}'\n" + "in expression: '{{}}'"
+        )
+
+
+if __name__ == "__main__":
     unittest.main()

diff  --git a/llvm/utils/lit/lit/LitConfig.py b/llvm/utils/lit/lit/LitConfig.py
index d6287d06034f3..fcd6825f1d6da 100644
--- a/llvm/utils/lit/lit/LitConfig.py
+++ b/llvm/utils/lit/lit/LitConfig.py
@@ -20,13 +20,24 @@ class LitConfig(object):
     easily.
     """
 
-    def __init__(self, progname, path, quiet,
-                 useValgrind, valgrindLeakCheck, valgrindArgs,
-                 noExecute, debug, isWindows, order,
-                 params, config_prefix = None,
-                 maxIndividualTestTime = 0,
-                 parallelism_groups = {},
-                 echo_all_commands = False):
+    def __init__(
+        self,
+        progname,
+        path,
+        quiet,
+        useValgrind,
+        valgrindLeakCheck,
+        valgrindArgs,
+        noExecute,
+        debug,
+        isWindows,
+        order,
+        params,
+        config_prefix=None,
+        maxIndividualTestTime=0,
+        parallelism_groups={},
+        echo_all_commands=False,
+    ):
         # The name of the test runner.
         self.progname = progname
         # The items to add to the PATH environment variable.
@@ -43,25 +54,34 @@ def __init__(self, progname, path, quiet,
         self.bashPath = None
 
         # Configuration files to look for when discovering test suites.
-        self.config_prefix = config_prefix or 'lit'
-        self.suffixes = ['cfg.py', 'cfg']
-        self.config_names = ['%s.%s' % (self.config_prefix,x) for x in self.suffixes]
-        self.site_config_names = ['%s.site.%s' % (self.config_prefix,x) for x in self.suffixes]
-        self.local_config_names = ['%s.local.%s' % (self.config_prefix,x) for x in self.suffixes]
+        self.config_prefix = config_prefix or "lit"
+        self.suffixes = ["cfg.py", "cfg"]
+        self.config_names = ["%s.%s" % (self.config_prefix, x) for x in self.suffixes]
+        self.site_config_names = [
+            "%s.site.%s" % (self.config_prefix, x) for x in self.suffixes
+        ]
+        self.local_config_names = [
+            "%s.local.%s" % (self.config_prefix, x) for x in self.suffixes
+        ]
 
         self.numErrors = 0
         self.numWarnings = 0
 
         self.valgrindArgs = []
         if self.useValgrind:
-            self.valgrindArgs = ['valgrind', '-q', '--run-libc-freeres=no',
-                                 '--tool=memcheck', '--trace-children=yes',
-                                 '--error-exitcode=123']
+            self.valgrindArgs = [
+                "valgrind",
+                "-q",
+                "--run-libc-freeres=no",
+                "--tool=memcheck",
+                "--trace-children=yes",
+                "--error-exitcode=123",
+            ]
             if self.valgrindLeakCheck:
-                self.valgrindArgs.append('--leak-check=full')
+                self.valgrindArgs.append("--leak-check=full")
             else:
                 # The default is 'summary'.
-                self.valgrindArgs.append('--leak-check=no')
+                self.valgrindArgs.append("--leak-check=no")
             self.valgrindArgs.extend(self.valgrindUserArgs)
 
         self.maxIndividualTestTime = maxIndividualTestTime
@@ -71,32 +91,32 @@ def __init__(self, progname, path, quiet,
     @property
     def maxIndividualTestTime(self):
         """
-            Interface for getting maximum time to spend executing
-            a single test
+        Interface for getting maximum time to spend executing
+        a single test
         """
         return self._maxIndividualTestTime
 
     @property
     def maxIndividualTestTimeIsSupported(self):
         """
-            Returns a tuple (<supported> , <error message>)
-            where
-            `<supported>` is True if setting maxIndividualTestTime is supported
-                on the current host, returns False otherwise.
-            `<error message>` is an empty string if `<supported>` is True,
-                otherwise is contains a string describing why setting
-                maxIndividualTestTime is not supported.
+        Returns a tuple (<supported> , <error message>)
+        where
+        `<supported>` is True if setting maxIndividualTestTime is supported
+            on the current host, returns False otherwise.
+        `<error message>` is an empty string if `<supported>` is True,
+            otherwise is contains a string describing why setting
+            maxIndividualTestTime is not supported.
         """
         return lit.util.killProcessAndChildrenIsSupported()
 
     @maxIndividualTestTime.setter
     def maxIndividualTestTime(self, value):
         """
-            Interface for setting maximum time to spend executing
-            a single test
+        Interface for setting maximum time to spend executing
+        a single test
         """
         if not isinstance(value, int):
-            self.fatal('maxIndividualTestTime must set to a value of type int.')
+            self.fatal("maxIndividualTestTime must set to a value of type int.")
         self._maxIndividualTestTime = value
         if self.maxIndividualTestTime > 0:
             # The current implementation needs psutil on some platforms to set
@@ -104,16 +124,15 @@ def maxIndividualTestTime(self, value):
             # See lit.util.killProcessAndChildren()
             supported, errormsg = self.maxIndividualTestTimeIsSupported
             if not supported:
-                self.fatal('Setting a timeout per test not supported. ' +
-                           errormsg)
+                self.fatal("Setting a timeout per test not supported. " + errormsg)
         elif self.maxIndividualTestTime < 0:
-            self.fatal('The timeout per test must be >= 0 seconds')
+            self.fatal("The timeout per test must be >= 0 seconds")
 
     def load_config(self, config, path):
         """load_config(config, path) - Load a config object from an alternate
         path."""
         if self.debug:
-            self.note('load_config from %r' % path)
+            self.note("load_config from %r" % path)
         config.load_from_path(path, self)
         return config
 
@@ -122,28 +141,32 @@ def getBashPath(self):
         if self.bashPath is not None:
             return self.bashPath
 
-        self.bashPath = lit.util.which('bash', os.pathsep.join(self.path))
+        self.bashPath = lit.util.which("bash", os.pathsep.join(self.path))
         if self.bashPath is None:
-            self.bashPath = lit.util.which('bash')
+            self.bashPath = lit.util.which("bash")
 
         if self.bashPath is None:
-            self.bashPath = ''
+            self.bashPath = ""
 
         # Check whether the found version of bash is able to cope with paths in
         # the host path format. If not, don't return it as it can't be used to
         # run scripts. For example, WSL's bash.exe requires '/mnt/c/foo' rather
         # than 'C:\\foo' or 'C:/foo'.
         if self.isWindows and self.bashPath:
-            command = [self.bashPath, '-c',
-                       '[[ -f "%s" ]]' % self.bashPath.replace('\\', '\\\\')]
+            command = [
+                self.bashPath,
+                "-c",
+                '[[ -f "%s" ]]' % self.bashPath.replace("\\", "\\\\"),
+            ]
             _, _, exitCode = lit.util.executeCommand(command)
             if exitCode:
-                self.note('bash command failed: %s' % (
-                    ' '.join('"%s"' % c for c in command)))
-                self.bashPath = ''
+                self.note(
+                    "bash command failed: %s" % (" ".join('"%s"' % c for c in command))
+                )
+                self.bashPath = ""
 
         if not self.bashPath:
-            self.warning('Unable to find a usable version of bash.')
+            self.warning("Unable to find a usable version of bash.")
 
         return self.bashPath
 
@@ -155,9 +178,9 @@ def getToolsPath(self, dir, paths, tools):
             dir = lit.util.whichTools(tools, paths)
 
         # bash
-        self.bashPath = lit.util.which('bash', dir)
+        self.bashPath = lit.util.which("bash", dir)
         if self.bashPath is None:
-            self.bashPath = ''
+            self.bashPath = ""
 
         return dir
 
@@ -168,8 +191,9 @@ def _write_message(self, kind, message):
         f = f.f_back.f_back
         file = os.path.abspath(inspect.getsourcefile(f))
         line = inspect.getlineno(f)
-        sys.stderr.write('%s: %s:%d: %s: %s\n' % (self.progname, file, line,
-                                                  kind, message))
+        sys.stderr.write(
+            "%s: %s:%d: %s: %s\n" % (self.progname, file, line, kind, message)
+        )
         if self.isWindows:
             # In a git bash terminal, the writes to sys.stderr aren't visible
             # on screen immediately. Flush them here to avoid broken/misoredered
@@ -179,25 +203,26 @@ def _write_message(self, kind, message):
     def substitute(self, string):
         """substitute - Interpolate params into a string"""
         try:
-          return string % self.params
+            return string % self.params
         except KeyError as e:
-          key, = e.args
-          self.fatal("unable to find %r parameter, use '--param=%s=VALUE'" % (
-              key,key))
+            (key,) = e.args
+            self.fatal(
+                "unable to find %r parameter, use '--param=%s=VALUE'" % (key, key)
+            )
 
     def note(self, message):
         if not self.quiet:
-            self._write_message('note', message)
+            self._write_message("note", message)
 
     def warning(self, message):
         if not self.quiet:
-            self._write_message('warning', message)
+            self._write_message("warning", message)
         self.numWarnings += 1
 
     def error(self, message):
-        self._write_message('error', message)
+        self._write_message("error", message)
         self.numErrors += 1
 
     def fatal(self, message):
-        self._write_message('fatal', message)
+        self._write_message("fatal", message)
         sys.exit(2)

diff  --git a/llvm/utils/lit/lit/LitTestCase.py b/llvm/utils/lit/lit/LitTestCase.py
index 2e9b64953bd15..d44b76a0a0415 100644
--- a/llvm/utils/lit/lit/LitTestCase.py
+++ b/llvm/utils/lit/lit/LitTestCase.py
@@ -39,11 +39,12 @@ def runTest(self):
 
 def load_test_suite(inputs):
     import platform
-    windows = platform.system() == 'Windows'
+
+    windows = platform.system() == "Windows"
 
     # Create the global config object.
     lit_config = lit.LitConfig.LitConfig(
-        progname='lit',
+        progname="lit",
         path=[],
         quiet=False,
         useValgrind=False,
@@ -52,8 +53,9 @@ def load_test_suite(inputs):
         noExecute=False,
         debug=False,
         isWindows=windows,
-        order='smart',
-        params={})
+        order="smart",
+        params={},
+    )
 
     # Perform test discovery.
     tests = lit.discovery.find_tests_for_inputs(lit_config, inputs, False)

diff  --git a/llvm/utils/lit/lit/ProgressBar.py b/llvm/utils/lit/lit/ProgressBar.py
index fd721db780b5c..382b8f2e52540 100644
--- a/llvm/utils/lit/lit/ProgressBar.py
+++ b/llvm/utils/lit/lit/ProgressBar.py
@@ -5,15 +5,17 @@
 
 import sys, re, time
 
+
 def to_bytes(str):
     # Encode to UTF-8 to get binary data.
-    return str.encode('utf-8')
+    return str.encode("utf-8")
+
 
 class TerminalController:
     """
     A class that can be used to portably generate formatted output to
-    a terminal.  
-    
+    a terminal.
+
     `TerminalController` defines a set of instance variables whose
     values are initialized to the control sequence necessary to
     perform a given action.  These can be simply included in normal
@@ -43,41 +45,42 @@ class TerminalController:
     Finally, if the width and height of the terminal are known, then
     they will be stored in the `COLS` and `LINES` attributes.
     """
+
     # Cursor movement:
-    BOL = ''             #: Move the cursor to the beginning of the line
-    UP = ''              #: Move the cursor up one line
-    DOWN = ''            #: Move the cursor down one line
-    LEFT = ''            #: Move the cursor left one char
-    RIGHT = ''           #: Move the cursor right one char
+    BOL = ""  #: Move the cursor to the beginning of the line
+    UP = ""  #: Move the cursor up one line
+    DOWN = ""  #: Move the cursor down one line
+    LEFT = ""  #: Move the cursor left one char
+    RIGHT = ""  #: Move the cursor right one char
 
     # Deletion:
-    CLEAR_SCREEN = ''    #: Clear the screen and move to home position
-    CLEAR_EOL = ''       #: Clear to the end of the line.
-    CLEAR_BOL = ''       #: Clear to the beginning of the line.
-    CLEAR_EOS = ''       #: Clear to the end of the screen
+    CLEAR_SCREEN = ""  #: Clear the screen and move to home position
+    CLEAR_EOL = ""  #: Clear to the end of the line.
+    CLEAR_BOL = ""  #: Clear to the beginning of the line.
+    CLEAR_EOS = ""  #: Clear to the end of the screen
 
     # Output modes:
-    BOLD = ''            #: Turn on bold mode
-    BLINK = ''           #: Turn on blink mode
-    DIM = ''             #: Turn on half-bright mode
-    REVERSE = ''         #: Turn on reverse-video mode
-    NORMAL = ''          #: Turn off all modes
+    BOLD = ""  #: Turn on bold mode
+    BLINK = ""  #: Turn on blink mode
+    DIM = ""  #: Turn on half-bright mode
+    REVERSE = ""  #: Turn on reverse-video mode
+    NORMAL = ""  #: Turn off all modes
 
     # Cursor display:
-    HIDE_CURSOR = ''     #: Make the cursor invisible
-    SHOW_CURSOR = ''     #: Make the cursor visible
+    HIDE_CURSOR = ""  #: Make the cursor invisible
+    SHOW_CURSOR = ""  #: Make the cursor visible
 
     # Terminal size:
-    COLS = None          #: Width of the terminal (None for unknown)
-    LINES = None         #: Height of the terminal (None for unknown)
+    COLS = None  #: Width of the terminal (None for unknown)
+    LINES = None  #: Height of the terminal (None for unknown)
 
     # Foreground colors:
-    BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ''
-    
+    BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ""
+
     # Background colors:
-    BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = ''
-    BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = ''
-    
+    BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = ""
+    BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = ""
+
     _STRING_CAPABILITIES = """
     BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1
     CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold
@@ -95,60 +98,67 @@ def __init__(self, term_stream=sys.stdout):
         assumed to be a dumb terminal (i.e., have no capabilities).
         """
         # Curses isn't available on all platforms
-        try: import curses
-        except: return
+        try:
+            import curses
+        except:
+            return
 
         # If the stream isn't a tty, then assume it has no capabilities.
-        if not term_stream.isatty(): return
+        if not term_stream.isatty():
+            return
 
         # Check the terminal type.  If we fail, then assume that the
         # terminal has no capabilities.
-        try: curses.setupterm()
-        except: return
+        try:
+            curses.setupterm()
+        except:
+            return
 
         # Look up numeric capabilities.
-        self.COLS = curses.tigetnum('cols')
-        self.LINES = curses.tigetnum('lines')
-        self.XN = curses.tigetflag('xenl')
-        
+        self.COLS = curses.tigetnum("cols")
+        self.LINES = curses.tigetnum("lines")
+        self.XN = curses.tigetflag("xenl")
+
         # Look up string capabilities.
         for capability in self._STRING_CAPABILITIES:
-            (attrib, cap_name) = capability.split('=')
-            setattr(self, attrib, self._tigetstr(cap_name) or '')
+            (attrib, cap_name) = capability.split("=")
+            setattr(self, attrib, self._tigetstr(cap_name) or "")
 
         # Colors
-        set_fg = self._tigetstr('setf')
+        set_fg = self._tigetstr("setf")
         if set_fg:
-            for i,color in zip(range(len(self._COLORS)), self._COLORS):
+            for i, color in zip(range(len(self._COLORS)), self._COLORS):
                 setattr(self, color, self._tparm(set_fg, i))
-        set_fg_ansi = self._tigetstr('setaf')
+        set_fg_ansi = self._tigetstr("setaf")
         if set_fg_ansi:
-            for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
+            for i, color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
                 setattr(self, color, self._tparm(set_fg_ansi, i))
-        set_bg = self._tigetstr('setb')
+        set_bg = self._tigetstr("setb")
         if set_bg:
-            for i,color in zip(range(len(self._COLORS)), self._COLORS):
-                setattr(self, 'BG_'+color, self._tparm(set_bg, i))
-        set_bg_ansi = self._tigetstr('setab')
+            for i, color in zip(range(len(self._COLORS)), self._COLORS):
+                setattr(self, "BG_" + color, self._tparm(set_bg, i))
+        set_bg_ansi = self._tigetstr("setab")
         if set_bg_ansi:
-            for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
-                setattr(self, 'BG_'+color, self._tparm(set_bg_ansi, i))
+            for i, color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
+                setattr(self, "BG_" + color, self._tparm(set_bg_ansi, i))
 
     def _tparm(self, arg, index):
         import curses
-        return curses.tparm(to_bytes(arg), index).decode('utf-8') or ''
+
+        return curses.tparm(to_bytes(arg), index).decode("utf-8") or ""
 
     def _tigetstr(self, cap_name):
         # String capabilities can include "delays" of the form "$<2>".
         # For any modern terminal, we should be able to just ignore
         # these, so strip them out.
         import curses
+
         cap = curses.tigetstr(cap_name)
         if cap is None:
-            cap = ''
+            cap = ""
         else:
-            cap = cap.decode('utf-8')
-        return re.sub(r'\$<\d+>[/*]?', '', cap)
+            cap = cap.decode("utf-8")
+        return re.sub(r"\$<\d+>[/*]?", "", cap)
 
     def render(self, template):
         """
@@ -156,17 +166,21 @@ def render(self, template):
         the corresponding terminal control string (if it's defined) or
         '' (if it's not).
         """
-        return re.sub(r'\$\$|\${\w+}', self._render_sub, template)
+        return re.sub(r"\$\$|\${\w+}", self._render_sub, template)
 
     def _render_sub(self, match):
         s = match.group()
-        if s == '$$': return s
-        else: return getattr(self, s[2:-1])
+        if s == "$$":
+            return s
+        else:
+            return getattr(self, s[2:-1])
+
 
 #######################################################################
 # Example use case: progress bar
 #######################################################################
 
+
 class SimpleProgressBar:
     """
     A simple progress bar which doesn't need any terminal support.
@@ -184,33 +198,34 @@ def update(self, percent, message):
             sys.stdout.write(self.header)
             self.atIndex = 0
 
-        next = int(percent*50)
+        next = int(percent * 50)
         if next == self.atIndex:
             return
 
         for i in range(self.atIndex, next):
             idx = i % 5
             if idx == 0:
-                sys.stdout.write('%2d' % (i*2))
+                sys.stdout.write("%2d" % (i * 2))
             elif idx == 1:
-                pass # Skip second char
+                pass  # Skip second char
             elif idx < 4:
-                sys.stdout.write('.')
+                sys.stdout.write(".")
             else:
-                sys.stdout.write(' ')
+                sys.stdout.write(" ")
         sys.stdout.flush()
         self.atIndex = next
 
     def clear(self, interrupted):
         if self.atIndex is not None and not interrupted:
-            sys.stdout.write('\n')
+            sys.stdout.write("\n")
             sys.stdout.flush()
             self.atIndex = None
 
+
 class ProgressBar:
     """
     A 3-line progress bar, which looks like::
-    
+
                                 Header
         20% [===========----------------------------------]
                            progress message
@@ -218,26 +233,29 @@ class ProgressBar:
     The progress bar is colored, if the terminal supports color
     output; and adjusts to the width of the terminal.
     """
-    BAR = '%s${%s}[${BOLD}%s%s${NORMAL}${%s}]${NORMAL}%s'
-    HEADER = '${BOLD}${CYAN}%s${NORMAL}\n\n'
-        
+
+    BAR = "%s${%s}[${BOLD}%s%s${NORMAL}${%s}]${NORMAL}%s"
+    HEADER = "${BOLD}${CYAN}%s${NORMAL}\n\n"
+
     def __init__(self, term, header, useETA=True):
         self.term = term
         if not (self.term.CLEAR_EOL and self.term.UP and self.term.BOL):
-            raise ValueError("Terminal isn't capable enough -- you "
-                             "should use a simpler progress dispaly.")
-        self.BOL = self.term.BOL # BoL from col#79
-        self.XNL = "\n" # Newline from col#79
+            raise ValueError(
+                "Terminal isn't capable enough -- you "
+                "should use a simpler progress dispaly."
+            )
+        self.BOL = self.term.BOL  # BoL from col#79
+        self.XNL = "\n"  # Newline from col#79
         if self.term.COLS:
             self.width = self.term.COLS
             if not self.term.XN:
                 self.BOL = self.term.UP + self.term.BOL
-                self.XNL = "" # Cursor must be fed to the next line
+                self.XNL = ""  # Cursor must be fed to the next line
         else:
             self.width = 75
-        self.barColor = 'GREEN'
+        self.barColor = "GREEN"
         self.header = self.term.render(self.HEADER % header.center(self.width))
-        self.cleared = 1 #: true if we haven't drawn the bar yet.
+        self.cleared = 1  #: true if we haven't drawn the bar yet.
         self.useETA = useETA
         if self.useETA:
             self.startTime = time.time()
@@ -247,51 +265,62 @@ def update(self, percent, message):
         if self.cleared:
             sys.stdout.write(self.header)
             self.cleared = 0
-        prefix = '%3d%% ' % (percent*100,)
-        suffix = ''
+        prefix = "%3d%% " % (percent * 100,)
+        suffix = ""
         if self.useETA:
             elapsed = time.time() - self.startTime
-            if percent > .0001 and elapsed > 1:
+            if percent > 0.0001 and elapsed > 1:
                 total = elapsed / percent
                 eta = total - elapsed
-                h = eta//3600.
-                m = (eta//60) % 60
+                h = eta // 3600.0
+                m = (eta // 60) % 60
                 s = eta % 60
-                suffix = ' ETA: %02d:%02d:%02d'%(h,m,s)
+                suffix = " ETA: %02d:%02d:%02d" % (h, m, s)
         barWidth = self.width - len(prefix) - len(suffix) - 2
-        n = int(barWidth*percent)
+        n = int(barWidth * percent)
         if len(message) < self.width:
-            message = message + ' '*(self.width - len(message))
+            message = message + " " * (self.width - len(message))
         else:
-            message = '... ' + message[-(self.width-4):]
+            message = "... " + message[-(self.width - 4) :]
         bc = self.barColor
-        bar = self.BAR % (prefix, bc, '='*n, '-'*(barWidth-n), bc, suffix)
+        bar = self.BAR % (prefix, bc, "=" * n, "-" * (barWidth - n), bc, suffix)
         bar = self.term.render(bar)
         sys.stdout.write(
-            self.BOL + self.term.UP + self.term.CLEAR_EOL +
-            bar +
-            self.XNL +
-            self.term.CLEAR_EOL + message)
+            self.BOL
+            + self.term.UP
+            + self.term.CLEAR_EOL
+            + bar
+            + self.XNL
+            + self.term.CLEAR_EOL
+            + message
+        )
         if not self.term.XN:
             sys.stdout.flush()
 
     def clear(self, interrupted):
         if not self.cleared:
-            sys.stdout.write(self.BOL + self.term.CLEAR_EOL +
-                             self.term.UP + self.term.CLEAR_EOL +
-                             self.term.UP + self.term.CLEAR_EOL)
+            sys.stdout.write(
+                self.BOL
+                + self.term.CLEAR_EOL
+                + self.term.UP
+                + self.term.CLEAR_EOL
+                + self.term.UP
+                + self.term.CLEAR_EOL
+            )
             if interrupted:  # ^C creates extra line. Gobble it up!
                 sys.stdout.write(self.term.UP + self.term.CLEAR_EOL)
-                sys.stdout.write('^C')
+                sys.stdout.write("^C")
             sys.stdout.flush()
             self.cleared = 1
 
+
 def test():
     tc = TerminalController()
-    p = ProgressBar(tc, 'Tests')
+    p = ProgressBar(tc, "Tests")
     for i in range(101):
-        p.update(i/100., str(i))        
-        time.sleep(.3)
+        p.update(i / 100.0, str(i))
+        time.sleep(0.3)
+
 
-if __name__=='__main__':
+if __name__ == "__main__":
     test()

diff  --git a/llvm/utils/lit/lit/ShCommands.py b/llvm/utils/lit/lit/ShCommands.py
index 01e91c55da989..68655a41d7934 100644
--- a/llvm/utils/lit/lit/ShCommands.py
+++ b/llvm/utils/lit/lit/ShCommands.py
@@ -4,30 +4,30 @@ def __init__(self, args, redirects):
         self.redirects = list(redirects)
 
     def __repr__(self):
-        return 'Command(%r, %r)' % (self.args, self.redirects)
+        return "Command(%r, %r)" % (self.args, self.redirects)
 
     def __eq__(self, other):
         if not isinstance(other, Command):
             return False
 
-        return ((self.args, self.redirects) ==
-                (other.args, other.redirects))
+        return (self.args, self.redirects) == (other.args, other.redirects)
 
     def toShell(self, file):
         for arg in self.args:
             if "'" not in arg:
                 quoted = "'%s'" % arg
-            elif '"' not in arg and '$' not in arg:
+            elif '"' not in arg and "$" not in arg:
                 quoted = '"%s"' % arg
             else:
-                raise NotImplementedError('Unable to quote %r' % arg)
+                raise NotImplementedError("Unable to quote %r" % arg)
             file.write(quoted)
 
             # For debugging / validation.
             import ShUtil
+
             dequoted = list(ShUtil.ShLexer(quoted).lex())
             if dequoted != [arg]:
-                raise NotImplementedError('Unable to quote %r' % arg)
+                raise NotImplementedError("Unable to quote %r" % arg)
 
         for r in self.redirects:
             if len(r[0]) == 1:
@@ -35,6 +35,7 @@ def toShell(self, file):
             else:
                 file.write("%s%s '%s'" % (r[0][1], r[0][0], r[1]))
 
+
 class GlobItem:
     def __init__(self, pattern):
         self.pattern = pattern
@@ -46,18 +47,20 @@ def __eq__(self, other):
         if not isinstance(other, Command):
             return False
 
-        return (self.pattern == other.pattern)
+        return self.pattern == other.pattern
 
     def resolve(self, cwd):
         import glob
         import os
+
         if os.path.isabs(self.pattern):
-           abspath = self.pattern
+            abspath = self.pattern
         else:
             abspath = os.path.join(cwd, self.pattern)
         results = glob.glob(abspath)
         return [self.pattern] if len(results) == 0 else results
 
+
 class Pipeline:
     def __init__(self, commands, negate=False, pipe_err=False):
         self.commands = commands
@@ -65,44 +68,46 @@ def __init__(self, commands, negate=False, pipe_err=False):
         self.pipe_err = pipe_err
 
     def __repr__(self):
-        return 'Pipeline(%r, %r, %r)' % (self.commands, self.negate,
-                                         self.pipe_err)
+        return "Pipeline(%r, %r, %r)" % (self.commands, self.negate, self.pipe_err)
 
     def __eq__(self, other):
         if not isinstance(other, Pipeline):
             return False
 
-        return ((self.commands, self.negate, self.pipe_err) ==
-                (other.commands, other.negate, self.pipe_err))
+        return (self.commands, self.negate, self.pipe_err) == (
+            other.commands,
+            other.negate,
+            self.pipe_err,
+        )
 
     def toShell(self, file, pipefail=False):
         if pipefail != self.pipe_err:
             raise ValueError('Inconsistent "pipefail" attribute!')
         if self.negate:
-            file.write('! ')
+            file.write("! ")
         for cmd in self.commands:
             cmd.toShell(file)
             if cmd is not self.commands[-1]:
-                file.write('|\n  ')
+                file.write("|\n  ")
+
 
 class Seq:
     def __init__(self, lhs, op, rhs):
-        assert op in (';', '&', '||', '&&')
+        assert op in (";", "&", "||", "&&")
         self.op = op
         self.lhs = lhs
         self.rhs = rhs
 
     def __repr__(self):
-        return 'Seq(%r, %r, %r)' % (self.lhs, self.op, self.rhs)
+        return "Seq(%r, %r, %r)" % (self.lhs, self.op, self.rhs)
 
     def __eq__(self, other):
         if not isinstance(other, Seq):
             return False
 
-        return ((self.lhs, self.op, self.rhs) ==
-                (other.lhs, other.op, other.rhs))
+        return (self.lhs, self.op, self.rhs) == (other.lhs, other.op, other.rhs)
 
     def toShell(self, file, pipefail=False):
         self.lhs.toShell(file, pipefail)
-        file.write(' %s\n' % self.op)
+        file.write(" %s\n" % self.op)
         self.rhs.toShell(file, pipefail)

diff  --git a/llvm/utils/lit/lit/ShUtil.py b/llvm/utils/lit/lit/ShUtil.py
index 00ec8ab004936..fa13167cad1be 100644
--- a/llvm/utils/lit/lit/ShUtil.py
+++ b/llvm/utils/lit/lit/ShUtil.py
@@ -4,8 +4,9 @@
 import lit.util
 from lit.ShCommands import Command, GlobItem, Pipeline, Seq
 
+
 class ShLexer:
-    def __init__(self, data, win32Escapes = False):
+    def __init__(self, data, win32Escapes=False):
         self.data = data
         self.pos = 0
         self.end = len(data)
@@ -22,7 +23,7 @@ def look(self):
     def maybe_eat(self, c):
         """
         maybe_eat(c) - Consume the character c if it is the next character,
-        returning True if a character was consumed. """
+        returning True if a character was consumed."""
         if self.data[self.pos] == c:
             self.pos += 1
             return True
@@ -30,18 +31,24 @@ def maybe_eat(self, c):
 
     def lex_arg_fast(self, c):
         # Get the leading whitespace free section.
-        chunk = self.data[self.pos - 1:].split(None, 1)[0]
-        
+        chunk = self.data[self.pos - 1 :].split(None, 1)[0]
+
         # If it has special characters, the fast path failed.
-        if ('|' in chunk or '&' in chunk or 
-            '<' in chunk or '>' in chunk or
-            "'" in chunk or '"' in chunk or
-            ';' in chunk or '\\' in chunk):
+        if (
+            "|" in chunk
+            or "&" in chunk
+            or "<" in chunk
+            or ">" in chunk
+            or "'" in chunk
+            or '"' in chunk
+            or ";" in chunk
+            or "\\" in chunk
+        ):
             return None
-        
+
         self.pos = self.pos - 1 + len(chunk)
-        return GlobItem(chunk) if '*' in chunk or '?' in chunk else chunk
-        
+        return GlobItem(chunk) if "*" in chunk or "?" in chunk else chunk
+
     def lex_arg_slow(self, c):
         if c in "'\"":
             str = self.lex_arg_quoted(c)
@@ -53,7 +60,7 @@ def lex_arg_slow(self, c):
             c = self.look()
             if c.isspace() or c in "|&;":
                 break
-            elif c in '><':
+            elif c in "><":
                 # This is an annoying case; we treat '2>' as a single token so
                 # we don't have to track whitespace tokens.
 
@@ -66,22 +73,23 @@ def lex_arg_slow(self, c):
                 num = int(str)
                 tok = self.lex_one_token()
                 assert isinstance(tok, tuple) and len(tok) == 1
-                return (tok[0], num)                    
+                return (tok[0], num)
             elif c == '"' or c == "'":
                 self.eat()
                 quoted_arg = self.lex_arg_quoted(c)
-                if '*' in quoted_arg or '?' in quoted_arg:
+                if "*" in quoted_arg or "?" in quoted_arg:
                     quoted_glob_char = True
                 str += quoted_arg
-            elif not self.win32Escapes and c == '\\':
+            elif not self.win32Escapes and c == "\\":
                 # Outside of a string, '\\' escapes everything.
                 self.eat()
                 if self.pos == self.end:
                     lit.util.warning(
-                        "escape at end of quoted argument in: %r" % self.data)
+                        "escape at end of quoted argument in: %r" % self.data
+                    )
                     return str
                 str += self.eat()
-            elif c in '*?':
+            elif c in "*?":
                 unquoted_glob_char = True
                 str += self.eat()
             else:
@@ -102,30 +110,31 @@ def lex_arg_slow(self, c):
         return GlobItem(str) if unquoted_glob_char else str
 
     def lex_arg_quoted(self, delim):
-        str = ''
+        str = ""
         while self.pos != self.end:
             c = self.eat()
             if c == delim:
                 return str
-            elif c == '\\' and delim == '"':
+            elif c == "\\" and delim == '"':
                 # Inside a '"' quoted string, '\\' only escapes the quote
                 # character and backslash, otherwise it is preserved.
                 if self.pos == self.end:
                     lit.util.warning(
-                        "escape at end of quoted argument in: %r" % self.data)
+                        "escape at end of quoted argument in: %r" % self.data
+                    )
                     return str
                 c = self.eat()
-                if c == '"': # 
+                if c == '"':  #
                     str += '"'
-                elif c == '\\':
-                    str += '\\'
+                elif c == "\\":
+                    str += "\\"
                 else:
-                    str += '\\' + c
+                    str += "\\" + c
             else:
                 str += c
         lit.util.warning("missing quote character in %r" % self.data)
         return str
-    
+
     def lex_arg_checked(self, c):
         pos = self.pos
         res = self.lex_arg_fast(c)
@@ -135,44 +144,42 @@ def lex_arg_checked(self, c):
         reference = self.lex_arg_slow(c)
         if res is not None:
             if res != reference:
-                raise ValueError("Fast path failure: %r != %r" % (
-                        res, reference))
+                raise ValueError("Fast path failure: %r != %r" % (res, reference))
             if self.pos != end:
-                raise ValueError("Fast path failure: %r != %r" % (
-                        self.pos, end))
+                raise ValueError("Fast path failure: %r != %r" % (self.pos, end))
         return reference
-        
+
     def lex_arg(self, c):
         return self.lex_arg_fast(c) or self.lex_arg_slow(c)
-        
+
     def lex_one_token(self):
         """
-        lex_one_token - Lex a single 'sh' token. """
+        lex_one_token - Lex a single 'sh' token."""
 
         c = self.eat()
-        if c == ';':
+        if c == ";":
             return (c,)
-        if c == '|':
-            if self.maybe_eat('|'):
-                return ('||',)
+        if c == "|":
+            if self.maybe_eat("|"):
+                return ("||",)
             return (c,)
-        if c == '&':
-            if self.maybe_eat('&'):
-                return ('&&',)
-            if self.maybe_eat('>'): 
-                return ('&>',)
+        if c == "&":
+            if self.maybe_eat("&"):
+                return ("&&",)
+            if self.maybe_eat(">"):
+                return ("&>",)
             return (c,)
-        if c == '>':
-            if self.maybe_eat('&'):
-                return ('>&',)
-            if self.maybe_eat('>'):
-                return ('>>',)
+        if c == ">":
+            if self.maybe_eat("&"):
+                return (">&",)
+            if self.maybe_eat(">"):
+                return (">>",)
             return (c,)
-        if c == '<':
-            if self.maybe_eat('&'):
-                return ('<&',)
-            if self.maybe_eat('>'):
-                return ('<<',)
+        if c == "<":
+            if self.maybe_eat("&"):
+                return ("<&",)
+            if self.maybe_eat(">"):
+                return ("<<",)
             return (c,)
 
         return self.lex_arg(c)
@@ -184,32 +191,34 @@ def lex(self):
             else:
                 yield self.lex_one_token()
 
+
 ###
- 
+
+
 class ShParser:
-    def __init__(self, data, win32Escapes = False, pipefail = False):
+    def __init__(self, data, win32Escapes=False, pipefail=False):
         self.data = data
         self.pipefail = pipefail
-        self.tokens = ShLexer(data, win32Escapes = win32Escapes).lex()
-    
+        self.tokens = ShLexer(data, win32Escapes=win32Escapes).lex()
+
     def lex(self):
         for item in self.tokens:
             return item
         return None
-    
+
     def look(self):
         token = self.lex()
         if token is not None:
             self.tokens = itertools.chain([token], self.tokens)
         return token
-    
+
     def parse_command(self):
         tok = self.lex()
         if not tok:
             raise ValueError("empty command!")
         if isinstance(tok, tuple):
             raise ValueError("syntax error near unexpected token %r" % tok[0])
-        
+
         args = [tok]
         redirects = []
         while 1:
@@ -226,9 +235,9 @@ def parse_command(self):
 
             # Otherwise see if it is a terminator.
             assert isinstance(tok, tuple)
-            if tok[0] in ('|',';','&','||','&&'):
+            if tok[0] in ("|", ";", "&", "||", "&&"):
                 break
-            
+
             # Otherwise it must be a redirection.
             op = self.lex()
             arg = self.lex()
@@ -242,11 +251,11 @@ def parse_pipeline(self):
         negate = False
 
         commands = [self.parse_command()]
-        while self.look() == ('|',):
+        while self.look() == ("|",):
             self.lex()
             commands.append(self.parse_command())
         return Pipeline(commands, negate, self.pipefail)
-            
+
     def parse(self):
         lhs = self.parse_pipeline()
 
@@ -255,11 +264,9 @@ def parse(self):
             assert isinstance(operator, tuple) and len(operator) == 1
 
             if not self.look():
-                raise ValueError(
-                    "missing argument to operator %r" % operator[0])
-            
+                raise ValueError("missing argument to operator %r" % operator[0])
+
             # FIXME: Operator precedence!!
             lhs = Seq(lhs, operator[0], self.parse_pipeline())
 
         return lhs
-

diff  --git a/llvm/utils/lit/lit/Test.py b/llvm/utils/lit/lit/Test.py
index 6c72359440b9c..051062706f856 100644
--- a/llvm/utils/lit/lit/Test.py
+++ b/llvm/utils/lit/lit/Test.py
@@ -7,6 +7,7 @@
 
 # Test result codes.
 
+
 class ResultCode(object):
     """Test result codes."""
 
@@ -37,26 +38,26 @@ def __init__(self, name, label, isFailure):
         ResultCode._all_codes.append(self)
 
     def __repr__(self):
-        return '%s%r' % (self.__class__.__name__,
-                         (self.name, self.isFailure))
+        return "%s%r" % (self.__class__.__name__, (self.name, self.isFailure))
 
 
 # Successes
-EXCLUDED    = ResultCode('EXCLUDED',    'Excluded', False)
-SKIPPED     = ResultCode('SKIPPED',     'Skipped', False)
-UNSUPPORTED = ResultCode('UNSUPPORTED', 'Unsupported', False)
-PASS        = ResultCode('PASS',        'Passed', False)
-FLAKYPASS   = ResultCode('FLAKYPASS',   'Passed With Retry', False)
-XFAIL       = ResultCode('XFAIL',       'Expectedly Failed', False)
+EXCLUDED = ResultCode("EXCLUDED", "Excluded", False)
+SKIPPED = ResultCode("SKIPPED", "Skipped", False)
+UNSUPPORTED = ResultCode("UNSUPPORTED", "Unsupported", False)
+PASS = ResultCode("PASS", "Passed", False)
+FLAKYPASS = ResultCode("FLAKYPASS", "Passed With Retry", False)
+XFAIL = ResultCode("XFAIL", "Expectedly Failed", False)
 # Failures
-UNRESOLVED  = ResultCode('UNRESOLVED',  'Unresolved', True)
-TIMEOUT     = ResultCode('TIMEOUT',     'Timed Out', True)
-FAIL        = ResultCode('FAIL',        'Failed', True)
-XPASS       = ResultCode('XPASS',       'Unexpectedly Passed', True)
+UNRESOLVED = ResultCode("UNRESOLVED", "Unresolved", True)
+TIMEOUT = ResultCode("TIMEOUT", "Timed Out", True)
+FAIL = ResultCode("FAIL", "Failed", True)
+XPASS = ResultCode("XPASS", "Unexpectedly Passed", True)
 
 
 # Test metric values.
 
+
 class MetricValue(object):
     def format(self):
         """
@@ -76,6 +77,7 @@ def todata(self):
         """
         raise RuntimeError("abstract method")
 
+
 class IntMetricValue(MetricValue):
     def __init__(self, value):
         self.value = value
@@ -86,21 +88,24 @@ def format(self):
     def todata(self):
         return self.value
 
+
 class RealMetricValue(MetricValue):
     def __init__(self, value):
         self.value = value
 
     def format(self):
-        return '%.4f' % self.value
+        return "%.4f" % self.value
 
     def todata(self):
         return self.value
 
+
 class JSONMetricValue(MetricValue):
     """
-        JSONMetricValue is used for types that are representable in the output
-        but that are otherwise uninterpreted.
+    JSONMetricValue is used for types that are representable in the output
+    but that are otherwise uninterpreted.
     """
+
     def __init__(self, value):
         # Ensure the value is a serializable by trying to encode it.
         # WARNING: The value may change before it is encoded again, and may
@@ -119,6 +124,7 @@ def format(self):
     def todata(self):
         return self.value
 
+
 def toMetricValue(value):
     if isinstance(value, MetricValue):
         return value
@@ -141,10 +147,11 @@ def toMetricValue(value):
 
 # Test results.
 
+
 class Result(object):
     """Wrapper for the results of executing an individual test."""
 
-    def __init__(self, code, output='', elapsed=None):
+    def __init__(self, code, output="", elapsed=None):
         # The result code.
         self.code = code
         # The test output.
@@ -169,8 +176,7 @@ def addMetric(self, name, value):
         Each value must be an instance of a MetricValue subclass.
         """
         if name in self.metrics:
-            raise ValueError("result already includes metrics for %r" % (
-                    name,))
+            raise ValueError("result already includes metrics for %r" % (name,))
         if not isinstance(value, MetricValue):
             raise TypeError("unexpected metric value: %r" % (value,))
         self.metrics[name] = value
@@ -186,8 +192,7 @@ def addMicroResult(self, name, microResult):
         Each micro-test result must be an instance of the Result class.
         """
         if name in self.microResults:
-            raise ValueError("Result already includes microResult for %r" % (
-                   name,))
+            raise ValueError("Result already includes microResult for %r" % (name,))
         if not isinstance(microResult, Result):
             raise TypeError("unexpected MicroResult value %r" % (microResult,))
         self.microResults[name] = microResult
@@ -195,6 +200,7 @@ def addMicroResult(self, name, microResult):
 
 # Test classes.
 
+
 class TestSuite:
     """TestSuite - Information on a group of tests.
 
@@ -216,10 +222,13 @@ def getSourcePath(self, components):
     def getExecPath(self, components):
         return os.path.join(self.exec_root, *components)
 
+
 class Test:
     """Test - Information on a single test instance."""
 
-    def __init__(self, suite, path_in_suite, config, file_path = None, gtest_json_file = None):
+    def __init__(
+        self, suite, path_in_suite, config, file_path=None, gtest_json_file=None
+    ):
         self.suite = suite
         self.path_in_suite = path_in_suite
         self.config = config
@@ -247,7 +256,7 @@ def __init__(self, suite, path_in_suite, config, file_path = None, gtest_json_fi
 
         # An optional number of retries allowed before the test finally succeeds.
         # The test is run at most once plus the number of retries specified here.
-        self.allowed_retries = getattr(config, 'test_retry_attempts', 0)
+        self.allowed_retries = getattr(config, "test_retry_attempts", 0)
 
         # The test result, once complete.
         self.result = None
@@ -258,12 +267,11 @@ def __init__(self, suite, path_in_suite, config, file_path = None, gtest_json_fi
         # The previous test elapsed time, if applicable.
         self.previous_elapsed = 0.0
 
-        if suite.test_times and '/'.join(path_in_suite) in suite.test_times:
-            time = suite.test_times['/'.join(path_in_suite)]
+        if suite.test_times and "/".join(path_in_suite) in suite.test_times:
+            time = suite.test_times["/".join(path_in_suite)]
             self.previous_elapsed = abs(time)
             self.previous_failure = time < 0
 
-
     def setResult(self, result):
         assert self.result is None, "result already set"
         assert isinstance(result, Result), "unexpected result type"
@@ -288,7 +296,7 @@ def isFailure(self):
         return self.result.code.isFailure
 
     def getFullName(self):
-        return self.suite.config.name + ' :: ' + '/'.join(self.path_in_suite)
+        return self.suite.config.name + " :: " + "/".join(self.path_in_suite)
 
     def getFilePath(self):
         if self.file_path:
@@ -313,14 +321,14 @@ def isExpectedToFail(self):
         """
 
         if self.xfail_not:
-          return False
+            return False
 
         features = self.config.available_features
 
         # Check if any of the xfails match an available feature.
         for item in self.xfails:
             # If this is the wildcard, it always fails.
-            if item == '*':
+            if item == "*":
                 return True
 
             # If this is a True expression of features, it fails.
@@ -328,7 +336,7 @@ def isExpectedToFail(self):
                 if BooleanExpression.evaluate(item, features):
                     return True
             except ValueError as e:
-                raise ValueError('Error in XFAIL list:\n%s' % str(e))
+                raise ValueError("Error in XFAIL list:\n%s" % str(e))
 
         return False
 
@@ -352,8 +360,11 @@ def isWithinFeatureLimits(self):
             return False
 
         # Check the requirements after removing the limiting features (#2)
-        featuresMinusLimits = [f for f in self.config.available_features
-                               if not f in self.config.limit_to_features]
+        featuresMinusLimits = [
+            f
+            for f in self.config.available_features
+            if not f in self.config.limit_to_features
+        ]
         if not self.getMissingRequiredFeaturesFromList(featuresMinusLimits):
             return False
 
@@ -361,10 +372,13 @@ def isWithinFeatureLimits(self):
 
     def getMissingRequiredFeaturesFromList(self, features):
         try:
-            return [item for item in self.requires
-                    if not BooleanExpression.evaluate(item, features)]
+            return [
+                item
+                for item in self.requires
+                if not BooleanExpression.evaluate(item, features)
+            ]
         except ValueError as e:
-            raise ValueError('Error in REQUIRES list:\n%s' % str(e))
+            raise ValueError("Error in REQUIRES list:\n%s" % str(e))
 
     def getMissingRequiredFeatures(self):
         """
@@ -389,10 +403,13 @@ def getUnsupportedFeatures(self):
         features = self.config.available_features
 
         try:
-            return [item for item in self.unsupported
-                    if BooleanExpression.evaluate(item, features)]
+            return [
+                item
+                for item in self.unsupported
+                if BooleanExpression.evaluate(item, features)
+            ]
         except ValueError as e:
-            raise ValueError('Error in UNSUPPORTED list:\n%s' % str(e))
+            raise ValueError("Error in UNSUPPORTED list:\n%s" % str(e))
 
     def getUsedFeatures(self):
         """
@@ -402,14 +419,18 @@ def getUsedFeatures(self):
         REQUIRES annotations for this test.
         """
         import lit.TestRunner
-        parsed = lit.TestRunner._parseKeywords(self.getSourcePath(), require_script=False)
-        feature_keywords = ('UNSUPPORTED:', 'REQUIRES:', 'XFAIL:')
+
+        parsed = lit.TestRunner._parseKeywords(
+            self.getSourcePath(), require_script=False
+        )
+        feature_keywords = ("UNSUPPORTED:", "REQUIRES:", "XFAIL:")
         boolean_expressions = itertools.chain.from_iterable(
             parsed[k] or [] for k in feature_keywords
         )
         tokens = itertools.chain.from_iterable(
-            BooleanExpression.tokenize(expr) for expr in
-                boolean_expressions if expr != '*'
+            BooleanExpression.tokenize(expr)
+            for expr in boolean_expressions
+            if expr != "*"
         )
         matchExpressions = set(filter(BooleanExpression.isMatchExpression, tokens))
         return matchExpressions

diff  --git a/llvm/utils/lit/lit/TestRunner.py b/llvm/utils/lit/lit/TestRunner.py
index f3499bb122d82..24670610e3a57 100644
--- a/llvm/utils/lit/lit/TestRunner.py
+++ b/llvm/utils/lit/lit/TestRunner.py
@@ -13,6 +13,7 @@
 import threading
 
 import io
+
 try:
     from StringIO import StringIO
 except ImportError:
@@ -25,12 +26,14 @@
 from lit.util import to_bytes, to_string, to_unicode
 from lit.BooleanExpression import BooleanExpression
 
+
 class InternalShellError(Exception):
     def __init__(self, command, message):
         self.command = command
         self.message = message
 
-kIsWindows = platform.system() == 'Windows'
+
+kIsWindows = platform.system() == "Windows"
 
 # Don't use close_fds on Windows.
 kUseCloseFDs = not kIsWindows
@@ -51,7 +54,8 @@ def __init__(self, command, message):
 #
 # COMMAND that follows %dbg(ARG) is also captured. COMMAND can be
 # empty as a result of conditinal substitution.
-kPdbgRegex = '%dbg\\(([^)\'"]*)\\)(.*)'
+kPdbgRegex = "%dbg\\(([^)'\"]*)\\)(.*)"
+
 
 class ShellEnvironment(object):
 
@@ -72,13 +76,15 @@ def change_dir(self, newdir):
         else:
             self.cwd = os.path.realpath(os.path.join(self.cwd, newdir))
 
+
 class TimeoutHelper(object):
     """
-        Object used to helper manage enforcing a timeout in
-        _executeShCmd(). It is passed through recursive calls
-        to collect processes that have been executed so that when
-        the timeout happens they can be killed.
+    Object used to helper manage enforcing a timeout in
+    _executeShCmd(). It is passed through recursive calls
+    to collect processes that have been executed so that when
+    the timeout happens they can be killed.
     """
+
     def __init__(self, timeout):
         self.timeout = timeout
         self._procs = []
@@ -135,24 +141,26 @@ def timeoutReached(self):
 
     def _kill(self):
         """
-            This method may be called multiple times as we might get unlucky
-            and be in the middle of creating a new process in _executeShCmd()
-            which won't yet be in ``self._procs``. By locking here and in
-            addProcess() we should be able to kill processes launched after
-            the initial call to _kill()
+        This method may be called multiple times as we might get unlucky
+        and be in the middle of creating a new process in _executeShCmd()
+        which won't yet be in ``self._procs``. By locking here and in
+        addProcess() we should be able to kill processes launched after
+        the initial call to _kill()
         """
         with self._lock:
             for p in self._procs:
                 lit.util.killProcessAndChildren(p.pid)
             # Empty the list and note that we've done a pass over the list
-            self._procs = [] # Python2 doesn't have list.clear()
+            self._procs = []  # Python2 doesn't have list.clear()
             self._doneKillPass = True
 
+
 class ShellCommandResult(object):
     """Captures the result of an individual command."""
 
-    def __init__(self, command, stdout, stderr, exitCode, timeoutReached,
-                 outputFiles = []):
+    def __init__(
+        self, command, stdout, stderr, exitCode, timeoutReached, outputFiles=[]
+    ):
         self.command = command
         self.stdout = stdout
         self.stderr = stderr
@@ -160,10 +168,11 @@ def __init__(self, command, stdout, stderr, exitCode, timeoutReached,
         self.timeoutReached = timeoutReached
         self.outputFiles = list(outputFiles)
 
+
 def executeShCmd(cmd, shenv, results, timeout=0):
     """
-        Wrapper around _executeShCmd that handles
-        timeout
+    Wrapper around _executeShCmd that handles
+    timeout
     """
     # Use the helper even when no timeout is required to make
     # other code simpler (i.e. avoid bunch of ``!= None`` checks)
@@ -174,21 +183,24 @@ def executeShCmd(cmd, shenv, results, timeout=0):
     timeoutHelper.cancel()
     timeoutInfo = None
     if timeoutHelper.timeoutReached():
-        timeoutInfo = 'Reached timeout of {} seconds'.format(timeout)
+        timeoutInfo = "Reached timeout of {} seconds".format(timeout)
 
     return (finalExitCode, timeoutInfo)
 
+
 def expand_glob(arg, cwd):
     if isinstance(arg, GlobItem):
         return sorted(arg.resolve(cwd))
     return [arg]
 
+
 def expand_glob_expressions(args, cwd):
     result = [args[0]]
     for arg in args[1:]:
         result.extend(expand_glob(arg, cwd))
     return result
 
+
 def quote_windows_command(seq):
     """
     Reimplement Python's private subprocess.list2cmdline for MSys compatibility
@@ -218,20 +230,27 @@ def quote_windows_command(seq):
 
         # Add a space to separate this argument from the others
         if result:
-            result.append(' ')
+            result.append(" ")
 
         # This logic 
diff ers from upstream list2cmdline.
-        needquote = (" " in arg) or ("\t" in arg) or ("\"" in arg) or ("[" in arg) or (";" in arg) or not arg
+        needquote = (
+            (" " in arg)
+            or ("\t" in arg)
+            or ('"' in arg)
+            or ("[" in arg)
+            or (";" in arg)
+            or not arg
+        )
         if needquote:
             result.append('"')
 
         for c in arg:
-            if c == '\\':
+            if c == "\\":
                 # Don't know if we need to double yet.
                 bs_buf.append(c)
             elif c == '"':
                 # Double backslashes.
-                result.append('\\' * len(bs_buf)*2)
+                result.append("\\" * len(bs_buf) * 2)
                 bs_buf = []
                 result.append('\\"')
             else:
@@ -249,7 +268,8 @@ def quote_windows_command(seq):
             result.extend(bs_buf)
             result.append('"')
 
-    return ''.join(result)
+    return "".join(result)
+
 
 # args are from 'export' or 'env' command.
 # Skips the command, and parses its arguments.
@@ -262,7 +282,7 @@ def updateEnv(env, args):
         # Support for the -u flag (unsetting) for env command
         # e.g., env -u FOO -u BAR will remove both FOO and BAR
         # from the environment.
-        if arg == '-u':
+        if arg == "-u":
             unset_next_env_var = True
             continue
         if unset_next_env_var:
@@ -272,14 +292,15 @@ def updateEnv(env, args):
             continue
 
         # Partition the string into KEY=VALUE.
-        key, eq, val = arg.partition('=')
+        key, eq, val = arg.partition("=")
         # Stop if there was no equals.
-        if eq == '':
+        if eq == "":
             arg_idx_next = arg_idx + 1
             break
         env.env[key] = val
     return args[arg_idx_next:]
 
+
 def executeBuiltinCd(cmd, shenv):
     """executeBuiltinCd - Change the current directory."""
     if len(cmd.args) != 2:
@@ -290,6 +311,7 @@ def executeBuiltinCd(cmd, shenv):
     # following Popen calls will fail instead.
     return ShellCommandResult(cmd, "", "", 0, False)
 
+
 def executeBuiltinPushd(cmd, shenv):
     """executeBuiltinPushd - Change the current dir and save the old."""
     if len(cmd.args) != 2:
@@ -298,6 +320,7 @@ def executeBuiltinPushd(cmd, shenv):
     shenv.change_dir(cmd.args[1])
     return ShellCommandResult(cmd, "", "", 0, False)
 
+
 def executeBuiltinPopd(cmd, shenv):
     """executeBuiltinPopd - Restore a previously saved working directory."""
     if len(cmd.args) != 1:
@@ -307,6 +330,7 @@ def executeBuiltinPopd(cmd, shenv):
     shenv.cwd = shenv.dirStack.pop()
     return ShellCommandResult(cmd, "", "", 0, False)
 
+
 def executeBuiltinExport(cmd, shenv):
     """executeBuiltinExport - Set an environment variable."""
     if len(cmd.args) != 2:
@@ -314,19 +338,20 @@ def executeBuiltinExport(cmd, shenv):
     updateEnv(shenv, cmd.args)
     return ShellCommandResult(cmd, "", "", 0, False)
 
+
 def executeBuiltinEcho(cmd, shenv):
     """Interpret a redirected echo command"""
     opened_files = []
-    stdin, stdout, stderr = processRedirects(cmd, subprocess.PIPE, shenv,
-                                             opened_files)
+    stdin, stdout, stderr = processRedirects(cmd, subprocess.PIPE, shenv, opened_files)
     if stdin != subprocess.PIPE or stderr != subprocess.PIPE:
         raise InternalShellError(
-                cmd, "stdin and stderr redirects not supported for echo")
+            cmd, "stdin and stderr redirects not supported for echo"
+        )
 
     # Some tests have un-redirected echo commands to help debug test failures.
     # Buffer our output and return it to the caller.
     is_redirected = True
-    encode = lambda x : x
+    encode = lambda x: x
     if stdout == subprocess.PIPE:
         is_redirected = False
         stdout = StringIO()
@@ -337,7 +362,7 @@ def executeBuiltinEcho(cmd, shenv):
         # When we open as binary, however, this also means that we have to write
         # 'bytes' objects to stdout instead of 'str' objects.
         encode = lit.util.to_bytes
-        stdout = open(stdout.name, stdout.mode + 'b')
+        stdout = open(stdout.name, stdout.mode + "b")
         opened_files.append((None, None, stdout, None))
 
     # Implement echo flags. We only support -e and -n, and not yet in
@@ -346,12 +371,12 @@ def executeBuiltinEcho(cmd, shenv):
     args = cmd.args[1:]
     interpret_escapes = False
     write_newline = True
-    while len(args) >= 1 and args[0] in ('-e', '-n'):
+    while len(args) >= 1 and args[0] in ("-e", "-n"):
         flag = args[0]
         args = args[1:]
-        if flag == '-e':
+        if flag == "-e":
             interpret_escapes = True
-        elif flag == '-n':
+        elif flag == "-n":
             write_newline = False
 
     def maybeUnescape(arg):
@@ -359,16 +384,16 @@ def maybeUnescape(arg):
             return arg
 
         arg = lit.util.to_bytes(arg)
-        codec = 'string_escape' if sys.version_info < (3,0) else 'unicode_escape'
+        codec = "string_escape" if sys.version_info < (3, 0) else "unicode_escape"
         return arg.decode(codec)
 
     if args:
         for arg in args[:-1]:
             stdout.write(encode(maybeUnescape(arg)))
-            stdout.write(encode(' '))
+            stdout.write(encode(" "))
         stdout.write(encode(maybeUnescape(args[-1])))
     if write_newline:
-        stdout.write(encode('\n'))
+        stdout.write(encode("\n"))
 
     for (name, mode, f, path) in opened_files:
         f.close()
@@ -376,11 +401,12 @@ def maybeUnescape(arg):
     output = "" if is_redirected else stdout.getvalue()
     return ShellCommandResult(cmd, output, "", 0, False)
 
+
 def executeBuiltinMkdir(cmd, cmd_shenv):
     """executeBuiltinMkdir - Create new directories."""
     args = expand_glob_expressions(cmd.args, cmd_shenv.cwd)[1:]
     try:
-        opts, args = getopt.gnu_getopt(args, 'p')
+        opts, args = getopt.gnu_getopt(args, "p")
     except getopt.GetoptError as err:
         raise InternalShellError(cmd, "Unsupported: 'mkdir':  %s" % str(err))
 
@@ -412,6 +438,7 @@ def executeBuiltinMkdir(cmd, cmd_shenv):
                 exitCode = 1
     return ShellCommandResult(cmd, "", stderr.getvalue(), exitCode, False)
 
+
 def executeBuiltinRm(cmd, cmd_shenv):
     """executeBuiltinRm - Removes (deletes) files or directories."""
     args = expand_glob_expressions(cmd.args, cmd_shenv.cwd)[1:]
@@ -436,7 +463,7 @@ def executeBuiltinRm(cmd, cmd_shenv):
     def on_rm_error(func, path, exc_info):
         # path contains the path of the file that couldn't be removed
         # let's just assume that it's read-only and remove it.
-        os.chmod(path, stat.S_IMODE( os.stat(path).st_mode) | stat.S_IWRITE)
+        os.chmod(path, stat.S_IMODE(os.stat(path).st_mode) | stat.S_IWRITE)
         os.remove(path)
 
     stderr = StringIO()
@@ -454,7 +481,7 @@ def on_rm_error(func, path, exc_info):
                 if not recursive:
                     stderr.write("Error: %s is a directory\n" % path)
                     exitCode = 1
-                if platform.system() == 'Windows':
+                if platform.system() == "Windows":
                     # NOTE: use ctypes to access `SHFileOperationsW` on Windows to
                     # use the NT style path to get access to long file paths which
                     # cannot be removed otherwise.
@@ -465,14 +492,14 @@ def on_rm_error(func, path, exc_info):
 
                     class SHFILEOPSTRUCTW(Structure):
                         _fields_ = [
-                                ('hWnd', HWND),
-                                ('wFunc', UINT),
-                                ('pFrom', LPCWSTR),
-                                ('pTo', LPCWSTR),
-                                ('fFlags', WORD),
-                                ('fAnyOperationsAborted', BOOL),
-                                ('hNameMappings', c_void_p),
-                                ('lpszProgressTitle', LPCWSTR),
+                            ("hWnd", HWND),
+                            ("wFunc", UINT),
+                            ("pFrom", LPCWSTR),
+                            ("pTo", LPCWSTR),
+                            ("fFlags", WORD),
+                            ("fAnyOperationsAborted", BOOL),
+                            ("hNameMappings", c_void_p),
+                            ("lpszProgressTitle", LPCWSTR),
                         ]
 
                     FO_MOVE, FO_COPY, FO_DELETE, FO_RENAME = range(1, 5)
@@ -482,7 +509,12 @@ class SHFILEOPSTRUCTW(Structure):
                     FOF_NOCONFIRMMKDIR = 512
                     FOF_NOERRORUI = 1024
 
-                    FOF_NO_UI = FOF_SILENT | FOF_NOCONFIRMATION | FOF_NOERRORUI | FOF_NOCONFIRMMKDIR
+                    FOF_NO_UI = (
+                        FOF_SILENT
+                        | FOF_NOCONFIRMATION
+                        | FOF_NOERRORUI
+                        | FOF_NOCONFIRMMKDIR
+                    )
 
                     SHFileOperationW = windll.shell32.SHFileOperationW
                     SHFileOperationW.argtypes = [POINTER(SHFILEOPSTRUCTW)]
@@ -490,29 +522,32 @@ class SHFILEOPSTRUCTW(Structure):
                     path = os.path.abspath(path)
 
                     pFrom = create_unicode_buffer(path, len(path) + 2)
-                    pFrom[len(path)] = pFrom[len(path) + 1] = '\0'
-                    operation = SHFILEOPSTRUCTW(wFunc=UINT(FO_DELETE),
-                                                pFrom=LPCWSTR(addressof(pFrom)),
-                                                fFlags=FOF_NO_UI)
+                    pFrom[len(path)] = pFrom[len(path) + 1] = "\0"
+                    operation = SHFILEOPSTRUCTW(
+                        wFunc=UINT(FO_DELETE),
+                        pFrom=LPCWSTR(addressof(pFrom)),
+                        fFlags=FOF_NO_UI,
+                    )
                     result = SHFileOperationW(byref(operation))
                     if result:
                         raise WinError(result)
                 else:
-                    shutil.rmtree(path, onerror = on_rm_error if force else None)
+                    shutil.rmtree(path, onerror=on_rm_error if force else None)
             else:
                 if force and not os.access(path, os.W_OK):
-                    os.chmod(path,
-                             stat.S_IMODE(os.stat(path).st_mode) | stat.S_IWRITE)
+                    os.chmod(path, stat.S_IMODE(os.stat(path).st_mode) | stat.S_IWRITE)
                 os.remove(path)
         except OSError as err:
             stderr.write("Error: 'rm' command failed, %s" % str(err))
             exitCode = 1
     return ShellCommandResult(cmd, "", stderr.getvalue(), exitCode, False)
 
+
 def executeBuiltinColon(cmd, cmd_shenv):
     """executeBuiltinColon - Discard arguments and exit with status 0."""
     return ShellCommandResult(cmd, "", "", 0, False)
 
+
 def processRedirects(cmd, stdin_source, cmd_shenv, opened_files):
     """Return the standard fds for cmd after applying redirects
 
@@ -527,22 +562,24 @@ def processRedirects(cmd, stdin_source, cmd_shenv, opened_files):
     # where file-object is initially None.
     redirects = [(0,), (1,), (2,)]
     for (op, filename) in cmd.redirects:
-        if op == ('>',2):
-            redirects[2] = [filename, 'w', None]
-        elif op == ('>>',2):
-            redirects[2] = [filename, 'a', None]
-        elif op == ('>&',2) and filename in '012':
+        if op == (">", 2):
+            redirects[2] = [filename, "w", None]
+        elif op == (">>", 2):
+            redirects[2] = [filename, "a", None]
+        elif op == (">&", 2) and filename in "012":
             redirects[2] = redirects[int(filename)]
-        elif op == ('>&',) or op == ('&>',):
-            redirects[1] = redirects[2] = [filename, 'w', None]
-        elif op == ('>',):
-            redirects[1] = [filename, 'w', None]
-        elif op == ('>>',):
-            redirects[1] = [filename, 'a', None]
-        elif op == ('<',):
-            redirects[0] = [filename, 'r', None]
+        elif op == (">&",) or op == ("&>",):
+            redirects[1] = redirects[2] = [filename, "w", None]
+        elif op == (">",):
+            redirects[1] = [filename, "w", None]
+        elif op == (">>",):
+            redirects[1] = [filename, "a", None]
+        elif op == ("<",):
+            redirects[0] = [filename, "r", None]
         else:
-            raise InternalShellError(cmd, "Unsupported redirect: %r" % ((op, filename),))
+            raise InternalShellError(
+                cmd, "Unsupported redirect: %r" % ((op, filename),)
+            )
 
     # Open file descriptors in a second pass.
     std_fds = [None, None, None]
@@ -578,25 +615,27 @@ def processRedirects(cmd, stdin_source, cmd_shenv, opened_files):
         redir_filename = None
         name = expand_glob(filename, cmd_shenv.cwd)
         if len(name) != 1:
-           raise InternalShellError(cmd, "Unsupported: glob in "
-                                    "redirect expanded to multiple files")
+            raise InternalShellError(
+                cmd, "Unsupported: glob in " "redirect expanded to multiple files"
+            )
         name = name[0]
         if kAvoidDevNull and name == kDevNull:
             fd = tempfile.TemporaryFile(mode=mode)
-        elif kIsWindows and name == '/dev/tty':
+        elif kIsWindows and name == "/dev/tty":
             # Simulate /dev/tty on Windows.
             # "CON" is a special filename for the console.
             fd = open("CON", mode)
         else:
             # Make sure relative paths are relative to the cwd.
             redir_filename = os.path.join(cmd_shenv.cwd, name)
-            redir_filename = to_unicode(redir_filename) \
-                    if kIsWindows else to_bytes(redir_filename)
+            redir_filename = (
+                to_unicode(redir_filename) if kIsWindows else to_bytes(redir_filename)
+            )
             fd = open(redir_filename, mode)
         # Workaround a Win32 and/or subprocess bug when appending.
         #
         # FIXME: Actually, this is probably an instance of PR6753.
-        if mode == 'a':
+        if mode == "a":
             fd.seek(0, 2)
         # Mutate the underlying redirect list so that we can redirect stdout
         # and stderr to the same place without opening the file twice.
@@ -606,6 +645,7 @@ def processRedirects(cmd, stdin_source, cmd_shenv, opened_files):
 
     return std_fds
 
+
 def _executeShCmd(cmd, shenv, results, timeoutHelper):
     if timeoutHelper.timeoutReached():
         # Prevent further recursion if the timeout has been hit
@@ -613,20 +653,20 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
         return None
 
     if isinstance(cmd, ShUtil.Seq):
-        if cmd.op == ';':
+        if cmd.op == ";":
             res = _executeShCmd(cmd.lhs, shenv, results, timeoutHelper)
             return _executeShCmd(cmd.rhs, shenv, results, timeoutHelper)
 
-        if cmd.op == '&':
-            raise InternalShellError(cmd,"unsupported shell operator: '&'")
+        if cmd.op == "&":
+            raise InternalShellError(cmd, "unsupported shell operator: '&'")
 
-        if cmd.op == '||':
+        if cmd.op == "||":
             res = _executeShCmd(cmd.lhs, shenv, results, timeoutHelper)
             if res != 0:
                 res = _executeShCmd(cmd.rhs, shenv, results, timeoutHelper)
             return res
 
-        if cmd.op == '&&':
+        if cmd.op == "&&":
             res = _executeShCmd(cmd.lhs, shenv, results, timeoutHelper)
             if res is None:
                 return res
@@ -635,7 +675,7 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
                 res = _executeShCmd(cmd.rhs, shenv, results, timeoutHelper)
             return res
 
-        raise ValueError('Unknown shell command: %r' % cmd.op)
+        raise ValueError("Unknown shell command: %r" % cmd.op)
     assert isinstance(cmd, ShUtil.Pipeline)
 
     procs = []
@@ -644,20 +684,24 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
     stderrTempFiles = []
     opened_files = []
     named_temp_files = []
-    builtin_commands = set(['cat', '
diff '])
-    builtin_commands_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "builtin_commands")
-    inproc_builtins = {'cd': executeBuiltinCd,
-                       'export': executeBuiltinExport,
-                       'echo': executeBuiltinEcho,
-                       'mkdir': executeBuiltinMkdir,
-                       'popd': executeBuiltinPopd,
-                       'pushd': executeBuiltinPushd,
-                       'rm': executeBuiltinRm,
-                       ':': executeBuiltinColon}
+    builtin_commands = set(["cat", "
diff "])
+    builtin_commands_dir = os.path.join(
+        os.path.dirname(os.path.abspath(__file__)), "builtin_commands"
+    )
+    inproc_builtins = {
+        "cd": executeBuiltinCd,
+        "export": executeBuiltinExport,
+        "echo": executeBuiltinEcho,
+        "mkdir": executeBuiltinMkdir,
+        "popd": executeBuiltinPopd,
+        "pushd": executeBuiltinPushd,
+        "rm": executeBuiltinRm,
+        ":": executeBuiltinColon,
+    }
     # To avoid deadlock, we use a single stderr stream for piped
     # output. This is null until we have seen some output using
     # stderr.
-    for i,j in enumerate(cmd.commands):
+    for i, j in enumerate(cmd.commands):
         # Reference the global environment by default.
         cmd_shenv = shenv
         args = list(j.args)
@@ -665,7 +709,7 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
         not_count = 0
         not_crash = False
         while True:
-            if args[0] == 'env':
+            if args[0] == "env":
                 # Create a copy of the global environment and modify it for
                 # this one command. There might be multiple envs in a pipeline,
                 # and there might be multiple envs in a command (usually when
@@ -676,23 +720,20 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
                     cmd_shenv = ShellEnvironment(shenv.cwd, shenv.env)
                 args = updateEnv(cmd_shenv, args)
                 if not args:
-                    raise InternalShellError(j, "Error: 'env' requires a"
-                                                " subcommand")
-            elif args[0] == 'not':
+                    raise InternalShellError(j, "Error: 'env' requires a" " subcommand")
+            elif args[0] == "not":
                 not_args.append(args.pop(0))
                 not_count += 1
-                if args and args[0] == '--crash':
+                if args and args[0] == "--crash":
                     not_args.append(args.pop(0))
                     not_crash = True
                 if not args:
-                    raise InternalShellError(j, "Error: 'not' requires a"
-                                                " subcommand")
-            elif args[0] == '!':
+                    raise InternalShellError(j, "Error: 'not' requires a" " subcommand")
+            elif args[0] == "!":
                 not_args.append(args.pop(0))
                 not_count += 1
                 if not args:
-                    raise InternalShellError(j, "Error: '!' requires a"
-                                                " subcommand")
+                    raise InternalShellError(j, "Error: '!' requires a" " subcommand")
             else:
                 break
 
@@ -704,22 +745,26 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
         # FIXME: Standardize on the builtin echo implementation. We can use a
         # temporary file to sidestep blocking pipe write issues.
         inproc_builtin = inproc_builtins.get(args[0], None)
-        if inproc_builtin and (args[0] != 'echo' or len(cmd.commands) == 1):
+        if inproc_builtin and (args[0] != "echo" or len(cmd.commands) == 1):
             # env calling an in-process builtin is useless, so we take the safe
             # approach of complaining.
             if not cmd_shenv is shenv:
-                raise InternalShellError(j, "Error: 'env' cannot call '{}'"
-                                            .format(args[0]))
+                raise InternalShellError(
+                    j, "Error: 'env' cannot call '{}'".format(args[0])
+                )
             if not_crash:
-                raise InternalShellError(j, "Error: 'not --crash' cannot call"
-                                            " '{}'".format(args[0]))
+                raise InternalShellError(
+                    j, "Error: 'not --crash' cannot call" " '{}'".format(args[0])
+                )
             if len(cmd.commands) != 1:
-                raise InternalShellError(j, "Unsupported: '{}' cannot be part"
-                                            " of a pipeline".format(args[0]))
+                raise InternalShellError(
+                    j,
+                    "Unsupported: '{}' cannot be part" " of a pipeline".format(args[0]),
+                )
             result = inproc_builtin(Command(args, j.redirects), cmd_shenv)
             if not_count % 2:
                 result.exitCode = int(not result.exitCode)
-            result.command.args = j.args;
+            result.command.args = j.args
             results.append(result)
             return result.exitCode
 
@@ -727,8 +772,7 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
         # commands.
         if args[0] in builtin_commands:
             args.insert(0, sys.executable)
-            cmd_shenv.env['PYTHONPATH'] = \
-                os.path.dirname(os.path.abspath(__file__))
+            cmd_shenv.env["PYTHONPATH"] = os.path.dirname(os.path.abspath(__file__))
             args[1] = os.path.join(builtin_commands_dir, args[1] + ".py")
 
         # We had to search through the 'not' commands to find all the 'env'
@@ -749,12 +793,13 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
         else:
             not_args = []
 
-        stdin, stdout, stderr = processRedirects(j, default_stdin, cmd_shenv,
-                                                 opened_files)
+        stdin, stdout, stderr = processRedirects(
+            j, default_stdin, cmd_shenv, opened_files
+        )
 
         # If stderr wants to come from stdout, but stdout isn't a pipe, then put
         # stderr on a pipe and treat it as stdout.
-        if (stderr == subprocess.STDOUT and stdout != subprocess.PIPE):
+        if stderr == subprocess.STDOUT and stdout != subprocess.PIPE:
             stderr = subprocess.PIPE
             stderrIsStdout = True
         else:
@@ -765,20 +810,20 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
             #
             # FIXME: This is slow, but so is deadlock.
             if stderr == subprocess.PIPE and j != cmd.commands[-1]:
-                stderr = tempfile.TemporaryFile(mode='w+b')
+                stderr = tempfile.TemporaryFile(mode="w+b")
                 stderrTempFiles.append((i, stderr))
 
         # Resolve the executable path ourselves.
         executable = None
         # For paths relative to cwd, use the cwd of the shell environment.
-        if args[0].startswith('.'):
+        if args[0].startswith("."):
             exe_in_cwd = os.path.join(cmd_shenv.cwd, args[0])
             if os.path.isfile(exe_in_cwd):
                 executable = exe_in_cwd
         if not executable:
-            executable = lit.util.which(args[0], cmd_shenv.env['PATH'])
+            executable = lit.util.which(args[0], cmd_shenv.env["PATH"])
         if not executable:
-            raise InternalShellError(j, '%r: command not found' % args[0])
+            raise InternalShellError(j, "%r: command not found" % args[0])
 
         # Replace uses of /dev/null with temporary files.
         if kAvoidDevNull:
@@ -788,7 +833,7 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
                 str_type = basestring
             except NameError:
                 str_type = str
-            for i,arg in enumerate(args):
+            for i, arg in enumerate(args):
                 if isinstance(arg, str_type) and kDevNull in arg:
                     f = tempfile.NamedTemporaryFile(delete=False)
                     f.close()
@@ -804,20 +849,27 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
             args = quote_windows_command(args)
 
         try:
-            procs.append(subprocess.Popen(args, cwd=cmd_shenv.cwd,
-                                          executable = executable,
-                                          stdin = stdin,
-                                          stdout = stdout,
-                                          stderr = stderr,
-                                          env = cmd_shenv.env,
-                                          close_fds = kUseCloseFDs,
-                                          universal_newlines = True,
-                                          errors = 'replace'))
+            procs.append(
+                subprocess.Popen(
+                    args,
+                    cwd=cmd_shenv.cwd,
+                    executable=executable,
+                    stdin=stdin,
+                    stdout=stdout,
+                    stderr=stderr,
+                    env=cmd_shenv.env,
+                    close_fds=kUseCloseFDs,
+                    universal_newlines=True,
+                    errors="replace",
+                )
+            )
             proc_not_counts.append(not_count)
             # Let the helper know about this process
             timeoutHelper.addProcess(procs[-1])
         except OSError as e:
-            raise InternalShellError(j, 'Could not create process ({}) due to {}'.format(executable, e))
+            raise InternalShellError(
+                j, "Could not create process ({}) due to {}".format(executable, e)
+            )
 
         # Immediately close stdin for any process taking stdin from us.
         if stdin == subprocess.PIPE:
@@ -847,21 +899,21 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
         if procs[i].stdout is not None:
             out = procs[i].stdout.read()
         else:
-            out = ''
+            out = ""
         if procs[i].stderr is not None:
             err = procs[i].stderr.read()
         else:
-            err = ''
-        procData[i] = (out,err)
+            err = ""
+        procData[i] = (out, err)
 
     # Read stderr out of the temp files.
-    for i,f in stderrTempFiles:
+    for i, f in stderrTempFiles:
         f.seek(0, 0)
         procData[i] = (procData[i][0], f.read())
         f.close()
 
     exitCode = None
-    for i,(out,err) in enumerate(procData):
+    for i, (out, err) in enumerate(procData):
         res = procs[i].wait()
         # Detect Ctrl-C in subprocess.
         if res == -signal.SIGINT:
@@ -874,16 +926,16 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
         # Ensure the resulting output is always of string type.
         try:
             if out is None:
-                out = ''
+                out = ""
             else:
-                out = to_string(out.decode('utf-8', errors='replace'))
+                out = to_string(out.decode("utf-8", errors="replace"))
         except:
             out = str(out)
         try:
             if err is None:
-                err = ''
+                err = ""
             else:
-                err = to_string(err.decode('utf-8', errors='replace'))
+                err = to_string(err.decode("utf-8", errors="replace"))
         except:
             err = str(err)
 
@@ -891,18 +943,25 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
         output_files = []
         if res != 0:
             for (name, mode, f, path) in sorted(opened_files):
-                if path is not None and mode in ('w', 'a'):
+                if path is not None and mode in ("w", "a"):
                     try:
-                        with open(path, 'rb') as f:
+                        with open(path, "rb") as f:
                             data = f.read()
                     except:
                         data = None
                     if data is not None:
                         output_files.append((name, path, data))
 
-        results.append(ShellCommandResult(
-            cmd.commands[i], out, err, res, timeoutHelper.timeoutReached(),
-            output_files))
+        results.append(
+            ShellCommandResult(
+                cmd.commands[i],
+                out,
+                err,
+                res,
+                timeoutHelper.timeoutReached(),
+                output_files,
+            )
+        )
         if cmd.pipe_err:
             # Take the last failing exit code from the pipeline.
             if not exitCode or res != 0:
@@ -922,45 +981,49 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
 
     return exitCode
 
+
 def executeScriptInternal(test, litConfig, tmpBase, commands, cwd):
     cmds = []
     for i, ln in enumerate(commands):
         match = re.match(kPdbgRegex, ln)
         if match:
             command = match.group(2)
-            ln = commands[i] = \
-                match.expand(": '\\1'; \\2" if command else ": '\\1'")
+            ln = commands[i] = match.expand(": '\\1'; \\2" if command else ": '\\1'")
         try:
-            cmds.append(ShUtil.ShParser(ln, litConfig.isWindows,
-                                        test.config.pipefail).parse())
+            cmds.append(
+                ShUtil.ShParser(ln, litConfig.isWindows, test.config.pipefail).parse()
+            )
         except:
             return lit.Test.Result(Test.FAIL, "shell parser error on: %r" % ln)
 
     cmd = cmds[0]
     for c in cmds[1:]:
-        cmd = ShUtil.Seq(cmd, '&&', c)
+        cmd = ShUtil.Seq(cmd, "&&", c)
 
     results = []
     timeoutInfo = None
     try:
         shenv = ShellEnvironment(cwd, test.config.environment)
-        exitCode, timeoutInfo = executeShCmd(cmd, shenv, results, timeout=litConfig.maxIndividualTestTime)
+        exitCode, timeoutInfo = executeShCmd(
+            cmd, shenv, results, timeout=litConfig.maxIndividualTestTime
+        )
     except InternalShellError:
         e = sys.exc_info()[1]
         exitCode = 127
-        results.append(
-            ShellCommandResult(e.command, '', e.message, exitCode, False))
+        results.append(ShellCommandResult(e.command, "", e.message, exitCode, False))
 
-    out = err = ''
-    for i,result in enumerate(results):
+    out = err = ""
+    for i, result in enumerate(results):
         # Write the command line run.
-        out += '$ %s\n' % (' '.join('"%s"' % s
-                                    for s in result.command.args),)
+        out += "$ %s\n" % (" ".join('"%s"' % s for s in result.command.args),)
 
         # If nothing interesting happened, move on.
-        if litConfig.maxIndividualTestTime == 0 and \
-               result.exitCode == 0 and \
-               not result.stdout.strip() and not result.stderr.strip():
+        if (
+            litConfig.maxIndividualTestTime == 0
+            and result.exitCode == 0
+            and not result.stdout.strip()
+            and not result.stderr.strip()
+        ):
             continue
 
         # Otherwise, something failed or was printed, show it.
@@ -969,7 +1032,7 @@ def executeScriptInternal(test, litConfig, tmpBase, commands, cwd):
         for (name, path, data) in result.outputFiles:
             if data.strip():
                 out += "# redirected output from %r:\n" % (name,)
-                data = to_string(data.decode('utf-8', errors='replace'))
+                data = to_string(data.decode("utf-8", errors="replace"))
                 if len(data) > 1024:
                     out += data[:1024] + "\n...\n"
                     out += "note: data was truncated\n"
@@ -978,9 +1041,9 @@ def executeScriptInternal(test, litConfig, tmpBase, commands, cwd):
                 out += "\n"
 
         if result.stdout.strip():
-            out += '# command output:\n%s\n' % (result.stdout,)
+            out += "# command output:\n%s\n" % (result.stdout,)
         if result.stderr.strip():
-            out += '# command stderr:\n%s\n' % (result.stderr,)
+            out += "# command stderr:\n%s\n" % (result.stderr,)
         if not result.stdout.strip() and not result.stderr.strip():
             out += "note: command had no output on stdout or stderr\n"
 
@@ -992,80 +1055,84 @@ def executeScriptInternal(test, litConfig, tmpBase, commands, cwd):
                 codeStr = hex(int(result.exitCode & 0xFFFFFFFF)).rstrip("L")
             else:
                 codeStr = str(result.exitCode)
-            out += "error: command failed with exit status: %s\n" % (
-                codeStr,)
+            out += "error: command failed with exit status: %s\n" % (codeStr,)
         if litConfig.maxIndividualTestTime > 0 and result.timeoutReached:
-            out += 'error: command reached timeout: %s\n' % (
-                str(result.timeoutReached),)
+            out += "error: command reached timeout: %s\n" % (
+                str(result.timeoutReached),
+            )
 
     return out, err, exitCode, timeoutInfo
 
+
 def executeScript(test, litConfig, tmpBase, commands, cwd):
     bashPath = litConfig.getBashPath()
-    isWin32CMDEXE = (litConfig.isWindows and not bashPath)
-    script = tmpBase + '.script'
+    isWin32CMDEXE = litConfig.isWindows and not bashPath
+    script = tmpBase + ".script"
     if isWin32CMDEXE:
-        script += '.bat'
+        script += ".bat"
 
     # Write script file
-    mode = 'w'
+    mode = "w"
     open_kwargs = {}
     if litConfig.isWindows and not isWin32CMDEXE:
-        mode += 'b'  # Avoid CRLFs when writing bash scripts.
-    elif sys.version_info > (3,0):
-        open_kwargs['encoding'] = 'utf-8'
+        mode += "b"  # Avoid CRLFs when writing bash scripts.
+    elif sys.version_info > (3, 0):
+        open_kwargs["encoding"] = "utf-8"
     f = open(script, mode, **open_kwargs)
     if isWin32CMDEXE:
         for i, ln in enumerate(commands):
             match = re.match(kPdbgRegex, ln)
             if match:
                 command = match.group(2)
-                commands[i] = \
-                    match.expand("echo '\\1' > nul && " if command
-                                 else "echo '\\1' > nul")
+                commands[i] = match.expand(
+                    "echo '\\1' > nul && " if command else "echo '\\1' > nul"
+                )
         if litConfig.echo_all_commands:
-            f.write('@echo on\n')
+            f.write("@echo on\n")
         else:
-            f.write('@echo off\n')
-        f.write('\n at if %ERRORLEVEL% NEQ 0 EXIT\n'.join(commands))
+            f.write("@echo off\n")
+        f.write("\n at if %ERRORLEVEL% NEQ 0 EXIT\n".join(commands))
     else:
         for i, ln in enumerate(commands):
             match = re.match(kPdbgRegex, ln)
             if match:
                 command = match.group(2)
-                commands[i] = match.expand(": '\\1'; \\2" if command
-                                           else ": '\\1'")
+                commands[i] = match.expand(": '\\1'; \\2" if command else ": '\\1'")
         if test.config.pipefail:
-            f.write(b'set -o pipefail;' if mode == 'wb' else 'set -o pipefail;')
+            f.write(b"set -o pipefail;" if mode == "wb" else "set -o pipefail;")
         if litConfig.echo_all_commands:
-            f.write(b'set -x;' if mode == 'wb' else 'set -x;')
-        if sys.version_info > (3,0) and mode == 'wb':
-            f.write(bytes('{ ' + '; } &&\n{ '.join(commands) + '; }', 'utf-8'))
+            f.write(b"set -x;" if mode == "wb" else "set -x;")
+        if sys.version_info > (3, 0) and mode == "wb":
+            f.write(bytes("{ " + "; } &&\n{ ".join(commands) + "; }", "utf-8"))
         else:
-            f.write('{ ' + '; } &&\n{ '.join(commands) + '; }')
-    f.write(b'\n' if mode == 'wb' else '\n')
+            f.write("{ " + "; } &&\n{ ".join(commands) + "; }")
+    f.write(b"\n" if mode == "wb" else "\n")
     f.close()
 
     if isWin32CMDEXE:
-        command = ['cmd','/c', script]
+        command = ["cmd", "/c", script]
     else:
         if bashPath:
             command = [bashPath, script]
         else:
-            command = ['/bin/sh', script]
+            command = ["/bin/sh", script]
         if litConfig.useValgrind:
             # FIXME: Running valgrind on sh is overkill. We probably could just
             # run on clang with no real loss.
             command = litConfig.valgrindArgs + command
 
     try:
-        out, err, exitCode = lit.util.executeCommand(command, cwd=cwd,
-                                       env=test.config.environment,
-                                       timeout=litConfig.maxIndividualTestTime)
+        out, err, exitCode = lit.util.executeCommand(
+            command,
+            cwd=cwd,
+            env=test.config.environment,
+            timeout=litConfig.maxIndividualTestTime,
+        )
         return (out, err, exitCode, None)
     except lit.util.ExecuteCommandTimeoutException as e:
         return (e.out, e.err, e.exitCode, e.msg)
 
+
 def parseIntegratedTestScriptCommands(source_path, keywords):
     """
     parseIntegratedTestScriptCommands(source_path) -> commands
@@ -1086,16 +1153,17 @@ def parseIntegratedTestScriptCommands(source_path, keywords):
     # version.
 
     keywords_re = re.compile(
-        to_bytes("(%s)(.*)\n" % ("|".join(re.escape(k) for k in keywords),)))
+        to_bytes("(%s)(.*)\n" % ("|".join(re.escape(k) for k in keywords),))
+    )
 
-    f = open(source_path, 'rb')
+    f = open(source_path, "rb")
     try:
         # Read the entire file contents.
         data = f.read()
 
         # Ensure the data ends with a newline.
-        if not data.endswith(to_bytes('\n')):
-            data = data + to_bytes('\n')
+        if not data.endswith(to_bytes("\n")):
+            data = data + to_bytes("\n")
 
         # Iterate over the matches.
         line_number = 1
@@ -1104,8 +1172,9 @@ def parseIntegratedTestScriptCommands(source_path, keywords):
             # Compute the updated line number by counting the intervening
             # newlines.
             match_position = match.start()
-            line_number += data.count(to_bytes('\n'), last_match_position,
-                                      match_position)
+            line_number += data.count(
+                to_bytes("\n"), last_match_position, match_position
+            )
             last_match_position = match_position
 
             # Convert the keyword and line to UTF-8 strings and yield the
@@ -1116,109 +1185,131 @@ def parseIntegratedTestScriptCommands(source_path, keywords):
             # Opening the file in binary mode prevented Windows \r newline
             # characters from being converted to Unix \n newlines, so manually
             # strip those from the yielded lines.
-            keyword,ln = match.groups()
-            yield (line_number, to_string(keyword.decode('utf-8')),
-                   to_string(ln.decode('utf-8').rstrip('\r')))
+            keyword, ln = match.groups()
+            yield (
+                line_number,
+                to_string(keyword.decode("utf-8")),
+                to_string(ln.decode("utf-8").rstrip("\r")),
+            )
     finally:
         f.close()
 
+
 def getTempPaths(test):
     """Get the temporary location, this is always relative to the test suite
     root, not test source root."""
     execpath = test.getExecPath()
-    execdir,execbase = os.path.split(execpath)
-    tmpDir = os.path.join(execdir, 'Output')
+    execdir, execbase = os.path.split(execpath)
+    tmpDir = os.path.join(execdir, "Output")
     tmpBase = os.path.join(tmpDir, execbase)
     return tmpDir, tmpBase
 
+
 def colonNormalizePath(path):
     if kIsWindows:
-        return re.sub(r'^(.):', r'\1', path.replace('\\', '/'))
+        return re.sub(r"^(.):", r"\1", path.replace("\\", "/"))
     else:
-        assert path[0] == '/'
+        assert path[0] == "/"
         return path[1:]
 
+
 def getDefaultSubstitutions(test, tmpDir, tmpBase, normalize_slashes=False):
     sourcepath = test.getSourcePath()
     sourcedir = os.path.dirname(sourcepath)
 
     # Normalize slashes, if requested.
     if normalize_slashes:
-        sourcepath = sourcepath.replace('\\', '/')
-        sourcedir = sourcedir.replace('\\', '/')
-        tmpDir = tmpDir.replace('\\', '/')
-        tmpBase = tmpBase.replace('\\', '/')
+        sourcepath = sourcepath.replace("\\", "/")
+        sourcedir = sourcedir.replace("\\", "/")
+        tmpDir = tmpDir.replace("\\", "/")
+        tmpBase = tmpBase.replace("\\", "/")
 
     substitutions = []
     substitutions.extend(test.config.substitutions)
-    tmpName = tmpBase + '.tmp'
+    tmpName = tmpBase + ".tmp"
     baseName = os.path.basename(tmpBase)
-    substitutions.extend([('%s', sourcepath),
-                          ('%S', sourcedir),
-                          ('%p', sourcedir),
-                          ('%{pathsep}', os.pathsep),
-                          ('%t', tmpName),
-                          ('%basename_t', baseName),
-                          ('%T', tmpDir)])
-
-    substitutions.extend([
-        ('%{fs-src-root}', pathlib.Path(sourcedir).anchor),
-        ('%{fs-tmp-root}', pathlib.Path(tmpBase).anchor),
-        ('%{fs-sep}', os.path.sep),
-    ])
+    substitutions.extend(
+        [
+            ("%s", sourcepath),
+            ("%S", sourcedir),
+            ("%p", sourcedir),
+            ("%{pathsep}", os.pathsep),
+            ("%t", tmpName),
+            ("%basename_t", baseName),
+            ("%T", tmpDir),
+        ]
+    )
+
+    substitutions.extend(
+        [
+            ("%{fs-src-root}", pathlib.Path(sourcedir).anchor),
+            ("%{fs-tmp-root}", pathlib.Path(tmpBase).anchor),
+            ("%{fs-sep}", os.path.sep),
+        ]
+    )
 
     # "%/[STpst]" should be normalized.
-    substitutions.extend([
-            ('%/s', sourcepath.replace('\\', '/')),
-            ('%/S', sourcedir.replace('\\', '/')),
-            ('%/p', sourcedir.replace('\\', '/')),
-            ('%/t', tmpBase.replace('\\', '/') + '.tmp'),
-            ('%/T', tmpDir.replace('\\', '/')),
-            ('%/et',tmpName.replace('\\', '\\\\\\\\\\\\\\\\')),
-            ])
+    substitutions.extend(
+        [
+            ("%/s", sourcepath.replace("\\", "/")),
+            ("%/S", sourcedir.replace("\\", "/")),
+            ("%/p", sourcedir.replace("\\", "/")),
+            ("%/t", tmpBase.replace("\\", "/") + ".tmp"),
+            ("%/T", tmpDir.replace("\\", "/")),
+            ("%/et", tmpName.replace("\\", "\\\\\\\\\\\\\\\\")),
+        ]
+    )
 
     # "%{/[STpst]:regex_replacement}" should be normalized like "%/[STpst]" but we're
     # also in a regex replacement context of a s@@@ regex.
     def regex_escape(s):
-        s = s.replace('@', r'\@')
-        s = s.replace('&', r'\&')
+        s = s.replace("@", r"\@")
+        s = s.replace("&", r"\&")
         return s
-    substitutions.extend([
-            ('%{/s:regex_replacement}',
-             regex_escape(sourcepath.replace('\\', '/'))),
-            ('%{/S:regex_replacement}',
-             regex_escape(sourcedir.replace('\\', '/'))),
-            ('%{/p:regex_replacement}',
-             regex_escape(sourcedir.replace('\\', '/'))),
-            ('%{/t:regex_replacement}',
-             regex_escape(tmpBase.replace('\\', '/')) + '.tmp'),
-            ('%{/T:regex_replacement}',
-             regex_escape(tmpDir.replace('\\', '/'))),
-            ])
+
+    substitutions.extend(
+        [
+            ("%{/s:regex_replacement}", regex_escape(sourcepath.replace("\\", "/"))),
+            ("%{/S:regex_replacement}", regex_escape(sourcedir.replace("\\", "/"))),
+            ("%{/p:regex_replacement}", regex_escape(sourcedir.replace("\\", "/"))),
+            (
+                "%{/t:regex_replacement}",
+                regex_escape(tmpBase.replace("\\", "/")) + ".tmp",
+            ),
+            ("%{/T:regex_replacement}", regex_escape(tmpDir.replace("\\", "/"))),
+        ]
+    )
 
     # "%:[STpst]" are normalized paths without colons and without a leading
     # slash.
-    substitutions.extend([
-            ('%:s', colonNormalizePath(sourcepath)),
-            ('%:S', colonNormalizePath(sourcedir)),
-            ('%:p', colonNormalizePath(sourcedir)),
-            ('%:t', colonNormalizePath(tmpBase + '.tmp')),
-            ('%:T', colonNormalizePath(tmpDir)),
-            ])
+    substitutions.extend(
+        [
+            ("%:s", colonNormalizePath(sourcepath)),
+            ("%:S", colonNormalizePath(sourcedir)),
+            ("%:p", colonNormalizePath(sourcedir)),
+            ("%:t", colonNormalizePath(tmpBase + ".tmp")),
+            ("%:T", colonNormalizePath(tmpDir)),
+        ]
+    )
     return substitutions
 
+
 def _memoize(f):
     cache = {}  # Intentionally unbounded, see applySubstitutions()
+
     def memoized(x):
         if x not in cache:
             cache[x] = f(x)
         return cache[x]
+
     return memoized
 
+
 @_memoize
 def _caching_re_compile(r):
     return re.compile(r)
 
+
 class ExpandableScriptDirective(object):
     """
     Common interface for lit directives for which any lit substitutions must be
@@ -1270,8 +1361,9 @@ def get_location(self):
         this directive and any line continuations.
         """
         if self.start_line_number == self.end_line_number:
-            return f'at line {self.start_line_number}'
-        return f'from line {self.start_line_number} to {self.end_line_number}'
+            return f"at line {self.start_line_number}"
+        return f"from line {self.start_line_number} to {self.end_line_number}"
+
 
 class CommandDirective(ExpandableScriptDirective):
     """
@@ -1296,7 +1388,8 @@ def add_continuation(self, line_number, keyword, line):
     def needs_continuation(self):
         # Trailing whitespace is stripped immediately when each line is added,
         # so '\' is never hidden here.
-        return self.command[-1] == '\\'
+        return self.command[-1] == "\\"
+
 
 class SubstDirective(ExpandableScriptDirective):
     """
@@ -1313,8 +1406,7 @@ class SubstDirective(ExpandableScriptDirective):
         still required.
     """
 
-    def __init__(self, start_line_number, end_line_number, keyword, new_subst,
-                 line):
+    def __init__(self, start_line_number, end_line_number, keyword, new_subst, line):
         super().__init__(start_line_number, end_line_number, keyword)
         self.new_subst = new_subst
         self.body = line
@@ -1329,13 +1421,13 @@ def add_continuation(self, line_number, keyword, line):
             raise ValueError("Substitution's continuation is empty")
         # Append line.  Replace the '\' and any adjacent whitespace with a
         # single space.
-        self.body = self.body.rstrip()[:-1].rstrip() + ' ' + line.lstrip()
+        self.body = self.body.rstrip()[:-1].rstrip() + " " + line.lstrip()
         self.end_line_number = line_number
         self._parse_body()
         return True
 
     def needs_continuation(self):
-        return self.body.rstrip()[-1:] == '\\'
+        return self.body.rstrip()[-1:] == "\\"
 
     def _parse_body(self):
         """
@@ -1348,7 +1440,7 @@ def _parse_body(self):
 
         # Extract the left-hand side and value, and discard any whitespace
         # enclosing each.
-        parts = self.body.split('=', 1)
+        parts = self.body.split("=", 1)
         if len(parts) == 1:
             raise ValueError("Substitution's definition does not contain '='")
         self.name = parts[0].strip()
@@ -1369,31 +1461,34 @@ def _parse_body(self):
         #
         # Actually, '{' and '}' are special if they contain only digits possibly
         # separated by a comma.  Requiring a leading letter avoids that.
-        if not re.fullmatch(r'%{[_a-zA-Z][-_:0-9a-zA-Z]*}', self.name):
+        if not re.fullmatch(r"%{[_a-zA-Z][-_:0-9a-zA-Z]*}", self.name):
             raise ValueError(
                 f"Substitution name '{self.name}' is malformed as it must "
                 f"start with '%{{', it must end with '}}', and the rest must "
                 f"start with a letter or underscore and contain only "
-                f"alphanumeric characters, hyphens, underscores, and colons")
+                f"alphanumeric characters, hyphens, underscores, and colons"
+            )
 
     def adjust_substitutions(self, substitutions):
         """
         Modify the specified substitution list as specified by this directive.
         """
-        assert not self.needs_continuation(), \
-               "expected directive continuations to be parsed before applying"
-        value_repl = self.value.replace('\\', '\\\\')
-        existing = [i for i, subst in enumerate(substitutions)
-                    if self.name in subst[0]]
-        existing_res = ''.join("\nExisting pattern: " + substitutions[i][0]
-                               for i in existing)
+        assert (
+            not self.needs_continuation()
+        ), "expected directive continuations to be parsed before applying"
+        value_repl = self.value.replace("\\", "\\\\")
+        existing = [i for i, subst in enumerate(substitutions) if self.name in subst[0]]
+        existing_res = "".join(
+            "\nExisting pattern: " + substitutions[i][0] for i in existing
+        )
         if self.new_subst:
             if existing:
                 raise ValueError(
                     f"Substitution whose pattern contains '{self.name}' is "
                     f"already defined before '{self.keyword}' directive "
                     f"{self.get_location()}"
-                    f"{existing_res}")
+                    f"{existing_res}"
+                )
             substitutions.insert(0, (self.name, value_repl))
             return
         if len(existing) > 1:
@@ -1401,23 +1496,25 @@ def adjust_substitutions(self, substitutions):
                 f"Multiple substitutions whose patterns contain '{self.name}' "
                 f"are defined before '{self.keyword}' directive "
                 f"{self.get_location()}"
-                f"{existing_res}")
+                f"{existing_res}"
+            )
         if not existing:
             raise ValueError(
                 f"No substitution for '{self.name}' is defined before "
-                f"'{self.keyword}' directive {self.get_location()}")
+                f"'{self.keyword}' directive {self.get_location()}"
+            )
         if substitutions[existing[0]][0] != self.name:
             raise ValueError(
                 f"Existing substitution whose pattern contains '{self.name}' "
                 f"does not have the pattern specified by '{self.keyword}' "
                 f"directive {self.get_location()}\n"
                 f"Expected pattern: {self.name}"
-                f"{existing_res}")
+                f"{existing_res}"
+            )
         substitutions[existing[0]] = (self.name, value_repl)
 
 
-def applySubstitutions(script, substitutions, conditions={},
-                       recursion_limit=None):
+def applySubstitutions(script, substitutions, conditions={}, recursion_limit=None):
     """
     Apply substitutions to the script.  Allow full regular expression syntax.
     Replace each matching occurrence of regular expression pattern a with
@@ -1432,44 +1529,44 @@ def applySubstitutions(script, substitutions, conditions={},
 
     # We use #_MARKER_# to hide %% while we do the other substitutions.
     def escapePercents(ln):
-        return _caching_re_compile('%%').sub('#_MARKER_#', ln)
+        return _caching_re_compile("%%").sub("#_MARKER_#", ln)
 
     def unescapePercents(ln):
-        return _caching_re_compile('#_MARKER_#').sub('%', ln)
+        return _caching_re_compile("#_MARKER_#").sub("%", ln)
 
     def substituteIfElse(ln):
         # early exit to avoid wasting time on lines without
         # conditional substitutions
-        if ln.find('%if ') == -1:
+        if ln.find("%if ") == -1:
             return ln
 
         def tryParseIfCond(ln):
             # space is important to not conflict with other (possible)
             # substitutions
-            if not ln.startswith('%if '):
+            if not ln.startswith("%if "):
                 return None, ln
             ln = ln[4:]
 
             # stop at '%{'
-            match = _caching_re_compile('%{').search(ln)
+            match = _caching_re_compile("%{").search(ln)
             if not match:
                 raise ValueError("'%{' is missing for %if substitution")
-            cond = ln[:match.start()]
+            cond = ln[: match.start()]
 
             # eat '%{' as well
-            ln = ln[match.end():]
+            ln = ln[match.end() :]
             return cond, ln
 
         def tryParseElse(ln):
-            match = _caching_re_compile('^\s*%else\s*(%{)?').search(ln)
+            match = _caching_re_compile("^\s*%else\s*(%{)?").search(ln)
             if not match:
                 return False, ln
             if not match.group(1):
                 raise ValueError("'%{' is missing for %else substitution")
-            return True, ln[match.end():]
+            return True, ln[match.end() :]
 
         def tryParseEnd(ln):
-            if ln.startswith('%}'):
+            if ln.startswith("%}"):
                 return True, ln[2:]
             return False, ln
 
@@ -1477,16 +1574,17 @@ def parseText(ln, isNested):
             # parse everything until %if, or %} if we're parsing a
             # nested expression.
             match = _caching_re_compile(
-                '(.*?)(?:%if|%})' if isNested else '(.*?)(?:%if)').search(ln)
+                "(.*?)(?:%if|%})" if isNested else "(.*?)(?:%if)"
+            ).search(ln)
             if not match:
                 # there is no terminating pattern, so treat the whole
                 # line as text
-                return ln, ''
+                return ln, ""
             text_end = match.end(1)
             return ln[:text_end], ln[text_end:]
 
         def parseRecursive(ln, isNested):
-            result = ''
+            result = ""
             while len(ln):
                 if isNested:
                     found_end, _ = tryParseEnd(ln)
@@ -1501,7 +1599,7 @@ def parseRecursive(ln, isNested):
                     if not found_end:
                         raise ValueError("'%}' is missing for %if substitution")
 
-                    branch_else = ''
+                    branch_else = ""
                     found_else, ln = tryParseElse(ln)
                     if found_else:
                         branch_else, ln = parseRecursive(ln, isNested=True)
@@ -1528,9 +1626,9 @@ def parseRecursive(ln, isNested):
     def processLine(ln):
         # Apply substitutions
         ln = substituteIfElse(escapePercents(ln))
-        for a,b in substitutions:
+        for a, b in substitutions:
             if kIsWindows:
-                b = b.replace("\\","\\\\")
+                b = b.replace("\\", "\\\\")
             # re.compile() has a built-in LRU cache with 512 entries. In some
             # test suites lit ends up thrashing that cache, which made e.g.
             # check-llvm run 50% slower.  Use an explicit, unbounded cache
@@ -1554,9 +1652,10 @@ def processLineToFixedPoint(ln):
             steps += 1
 
         if processed != ln:
-            raise ValueError("Recursive substitution of '%s' did not complete "
-                             "in the provided recursion limit (%s)" % \
-                             (origLine, recursion_limit))
+            raise ValueError(
+                "Recursive substitution of '%s' did not complete "
+                "in the provided recursion limit (%s)" % (origLine, recursion_limit)
+            )
 
         return processed
 
@@ -1594,6 +1693,7 @@ class ParserKind(object):
     REDEFINE: A keyword taking a lit substitution redefinition. Ex
         'REDEFINE: %{name}=value'
     """
+
     TAG = 0
     COMMAND = 1
     LIST = 2
@@ -1605,27 +1705,29 @@ class ParserKind(object):
 
     @staticmethod
     def allowedKeywordSuffixes(value):
-        return { ParserKind.TAG:          ['.'],
-                 ParserKind.COMMAND:      [':'],
-                 ParserKind.LIST:         [':'],
-                 ParserKind.BOOLEAN_EXPR: [':'],
-                 ParserKind.INTEGER:      [':'],
-                 ParserKind.CUSTOM:       [':', '.'],
-                 ParserKind.DEFINE:       [':'],
-                 ParserKind.REDEFINE:     [':']
-               } [value]
+        return {
+            ParserKind.TAG: ["."],
+            ParserKind.COMMAND: [":"],
+            ParserKind.LIST: [":"],
+            ParserKind.BOOLEAN_EXPR: [":"],
+            ParserKind.INTEGER: [":"],
+            ParserKind.CUSTOM: [":", "."],
+            ParserKind.DEFINE: [":"],
+            ParserKind.REDEFINE: [":"],
+        }[value]
 
     @staticmethod
     def str(value):
-        return { ParserKind.TAG:          'TAG',
-                 ParserKind.COMMAND:      'COMMAND',
-                 ParserKind.LIST:         'LIST',
-                 ParserKind.BOOLEAN_EXPR: 'BOOLEAN_EXPR',
-                 ParserKind.INTEGER:      'INTEGER',
-                 ParserKind.CUSTOM:       'CUSTOM',
-                 ParserKind.DEFINE:       'DEFINE',
-                 ParserKind.REDEFINE:     'REDEFINE'
-               } [value]
+        return {
+            ParserKind.TAG: "TAG",
+            ParserKind.COMMAND: "COMMAND",
+            ParserKind.LIST: "LIST",
+            ParserKind.BOOLEAN_EXPR: "BOOLEAN_EXPR",
+            ParserKind.INTEGER: "INTEGER",
+            ParserKind.CUSTOM: "CUSTOM",
+            ParserKind.DEFINE: "DEFINE",
+            ParserKind.REDEFINE: "REDEFINE",
+        }[value]
 
 
 class IntegratedTestKeywordParser(object):
@@ -1636,22 +1738,26 @@ class IntegratedTestKeywordParser(object):
     parser: A custom parser. This value may only be specified with
             ParserKind.CUSTOM.
     """
+
     def __init__(self, keyword, kind, parser=None, initial_value=None):
         allowedSuffixes = ParserKind.allowedKeywordSuffixes(kind)
         if len(keyword) == 0 or keyword[-1] not in allowedSuffixes:
             if len(allowedSuffixes) == 1:
-                raise ValueError("Keyword '%s' of kind '%s' must end in '%s'"
-                                 % (keyword, ParserKind.str(kind),
-                                    allowedSuffixes[0]))
+                raise ValueError(
+                    "Keyword '%s' of kind '%s' must end in '%s'"
+                    % (keyword, ParserKind.str(kind), allowedSuffixes[0])
+                )
             else:
-                raise ValueError("Keyword '%s' of kind '%s' must end in "
-                                 " one of '%s'"
-                                 % (keyword, ParserKind.str(kind),
-                                    ' '.join(allowedSuffixes)))
+                raise ValueError(
+                    "Keyword '%s' of kind '%s' must end in "
+                    " one of '%s'"
+                    % (keyword, ParserKind.str(kind), " ".join(allowedSuffixes))
+                )
 
         if parser is not None and kind != ParserKind.CUSTOM:
-            raise ValueError("custom parsers can only be specified with "
-                             "ParserKind.CUSTOM")
+            raise ValueError(
+                "custom parsers can only be specified with " "ParserKind.CUSTOM"
+            )
         self.keyword = keyword
         self.kind = kind
         self.parsed_lines = []
@@ -1659,9 +1765,9 @@ def __init__(self, keyword, kind, parser=None, initial_value=None):
         self.parser = parser
 
         if kind == ParserKind.COMMAND:
-            self.parser = lambda line_number, line, output: \
-                                 self._handleCommand(line_number, line, output,
-                                                     self.keyword)
+            self.parser = lambda line_number, line, output: self._handleCommand(
+                line_number, line, output, self.keyword
+            )
         elif kind == ParserKind.LIST:
             self.parser = self._handleList
         elif kind == ParserKind.BOOLEAN_EXPR:
@@ -1675,14 +1781,13 @@ def __init__(self, keyword, kind, parser=None, initial_value=None):
                 raise ValueError("ParserKind.CUSTOM requires a custom parser")
             self.parser = parser
         elif kind == ParserKind.DEFINE:
-            self.parser = lambda line_number, line, output: \
-                                 self._handleSubst(line_number, line, output,
-                                                   self.keyword, new_subst=True)
+            self.parser = lambda line_number, line, output: self._handleSubst(
+                line_number, line, output, self.keyword, new_subst=True
+            )
         elif kind == ParserKind.REDEFINE:
-            self.parser = lambda line_number, line, output: \
-                                 self._handleSubst(line_number, line, output,
-                                                   self.keyword,
-                                                   new_subst=False)
+            self.parser = lambda line_number, line, output: self._handleSubst(
+                line_number, line, output, self.keyword, new_subst=False
+            )
         else:
             raise ValueError("Unknown kind '%s'" % kind)
 
@@ -1691,8 +1796,10 @@ def parseLine(self, line_number, line):
             self.parsed_lines += [(line_number, line)]
             self.value = self.parser(line_number, line, self.value)
         except ValueError as e:
-            raise ValueError(str(e) + ("\nin %s directive on test line %d" %
-                                       (self.keyword, line_number)))
+            raise ValueError(
+                str(e)
+                + ("\nin %s directive on test line %d" % (self.keyword, line_number))
+            )
 
     def getValue(self):
         return self.value
@@ -1700,17 +1807,19 @@ def getValue(self):
     @staticmethod
     def _handleTag(line_number, line, output):
         """A helper for parsing TAG type keywords"""
-        return (not line.strip() or output)
+        return not line.strip() or output
 
     @staticmethod
     def _substituteLineNumbers(line_number, line):
-        line = re.sub(r'%\(line\)', str(line_number), line)
+        line = re.sub(r"%\(line\)", str(line_number), line)
+
         def replace_line_number(match):
-            if match.group(1) == '+':
+            if match.group(1) == "+":
                 return str(line_number + int(match.group(2)))
-            if match.group(1) == '-':
+            if match.group(1) == "-":
                 return str(line_number - int(match.group(2)))
-        return re.sub(r'%\(line *([\+-]) *(\d+)\)', replace_line_number, line)
+
+        return re.sub(r"%\(line *([\+-]) *(\d+)\)", replace_line_number, line)
 
     @classmethod
     def _handleCommand(cls, line_number, line, output, keyword):
@@ -1720,20 +1829,17 @@ def _handleCommand(cls, line_number, line, output, keyword):
 
         # Collapse lines with trailing '\\', or add line with line number to
         # start a new pipeline.
-        if not output or not output[-1].add_continuation(line_number, keyword,
-                                                         line):
+        if not output or not output[-1].add_continuation(line_number, keyword, line):
             if output is None:
                 output = []
             pdbg = "%dbg({keyword} at line {line_number})".format(
-                keyword=keyword,
-                line_number=line_number)
-            assert re.match(kPdbgRegex + "$", pdbg), \
-                   "kPdbgRegex expected to match actual %dbg usage"
-            line = "{pdbg} {real_command}".format(
-                pdbg=pdbg,
-                real_command=line)
-            output.append(CommandDirective(line_number, line_number, keyword,
-                                           line))
+                keyword=keyword, line_number=line_number
+            )
+            assert re.match(
+                kPdbgRegex + "$", pdbg
+            ), "kPdbgRegex expected to match actual %dbg usage"
+            line = "{pdbg} {real_command}".format(pdbg=pdbg, real_command=line)
+            output.append(CommandDirective(line_number, line_number, keyword, line))
         return output
 
     @staticmethod
@@ -1741,7 +1847,7 @@ def _handleList(line_number, line, output):
         """A parser for LIST type keywords"""
         if output is None:
             output = []
-        output.extend([s.strip() for s in line.split(',')])
+        output.extend([s.strip() for s in line.split(",")])
         return output
 
     @staticmethod
@@ -1752,15 +1858,19 @@ def _handleSingleInteger(line_number, line, output):
         try:
             n = int(line)
         except ValueError:
-            raise ValueError("INTEGER parser requires the input to be an integer (got {})".format(line))
+            raise ValueError(
+                "INTEGER parser requires the input to be an integer (got {})".format(
+                    line
+                )
+            )
         output.append(n)
         return output
 
     @staticmethod
     def _handleBooleanExpr(line_number, line, output):
         """A parser for BOOLEAN_EXPR type keywords"""
-        parts = [s.strip() for s in line.split(',') if s.strip() != '']
-        if output and output[-1][-1] == '\\':
+        parts = [s.strip() for s in line.split(",") if s.strip() != ""]
+        if output and output[-1][-1] == "\\":
             output[-1] = output[-1][:-1] + parts[0]
             del parts[0]
         if output is None:
@@ -1769,7 +1879,7 @@ def _handleBooleanExpr(line_number, line, output):
         # Evaluate each expression to verify syntax.
         # We don't want any results, just the raised ValueError.
         for s in output:
-            if s != '*' and not s.endswith('\\'):
+            if s != "*" and not s.endswith("\\"):
                 BooleanExpression.evaluate(s, [])
         return output
 
@@ -1781,13 +1891,13 @@ def _handleSubst(cls, line_number, line, output, keyword, new_subst):
             return output
         if output is None:
             output = []
-        output.append(SubstDirective(line_number, line_number, keyword,
-                                     new_subst, line))
+        output.append(
+            SubstDirective(line_number, line_number, keyword, new_subst, line)
+        )
         return output
 
 
-def _parseKeywords(sourcepath, additional_parsers=[],
-                   require_script=True):
+def _parseKeywords(sourcepath, additional_parsers=[], require_script=True):
     """_parseKeywords
 
     Scan an LLVM/Clang style integrated test script and extract all the lines
@@ -1801,41 +1911,43 @@ def _parseKeywords(sourcepath, additional_parsers=[],
     # Install the built-in keyword parsers.
     script = []
     builtin_parsers = [
-        IntegratedTestKeywordParser('RUN:', ParserKind.COMMAND, initial_value=script),
-        IntegratedTestKeywordParser('XFAIL:', ParserKind.BOOLEAN_EXPR),
-        IntegratedTestKeywordParser('REQUIRES:', ParserKind.BOOLEAN_EXPR),
-        IntegratedTestKeywordParser('UNSUPPORTED:', ParserKind.BOOLEAN_EXPR),
-        IntegratedTestKeywordParser('ALLOW_RETRIES:', ParserKind.INTEGER),
-        IntegratedTestKeywordParser('END.', ParserKind.TAG),
-        IntegratedTestKeywordParser('DEFINE:', ParserKind.DEFINE,
-                                    initial_value=script),
-        IntegratedTestKeywordParser('REDEFINE:', ParserKind.REDEFINE,
-                                    initial_value=script)
+        IntegratedTestKeywordParser("RUN:", ParserKind.COMMAND, initial_value=script),
+        IntegratedTestKeywordParser("XFAIL:", ParserKind.BOOLEAN_EXPR),
+        IntegratedTestKeywordParser("REQUIRES:", ParserKind.BOOLEAN_EXPR),
+        IntegratedTestKeywordParser("UNSUPPORTED:", ParserKind.BOOLEAN_EXPR),
+        IntegratedTestKeywordParser("ALLOW_RETRIES:", ParserKind.INTEGER),
+        IntegratedTestKeywordParser("END.", ParserKind.TAG),
+        IntegratedTestKeywordParser("DEFINE:", ParserKind.DEFINE, initial_value=script),
+        IntegratedTestKeywordParser(
+            "REDEFINE:", ParserKind.REDEFINE, initial_value=script
+        ),
     ]
     keyword_parsers = {p.keyword: p for p in builtin_parsers}
 
     # Install user-defined additional parsers.
     for parser in additional_parsers:
         if not isinstance(parser, IntegratedTestKeywordParser):
-            raise ValueError('Additional parser must be an instance of '
-                             'IntegratedTestKeywordParser')
+            raise ValueError(
+                "Additional parser must be an instance of "
+                "IntegratedTestKeywordParser"
+            )
         if parser.keyword in keyword_parsers:
-            raise ValueError("Parser for keyword '%s' already exists"
-                             % parser.keyword)
+            raise ValueError("Parser for keyword '%s' already exists" % parser.keyword)
         keyword_parsers[parser.keyword] = parser
 
     # Collect the test lines from the script.
-    for line_number, command_type, ln in \
-            parseIntegratedTestScriptCommands(sourcepath,
-                                              keyword_parsers.keys()):
+    for line_number, command_type, ln in parseIntegratedTestScriptCommands(
+        sourcepath, keyword_parsers.keys()
+    ):
         parser = keyword_parsers[command_type]
         parser.parseLine(line_number, ln)
-        if command_type == 'END.' and parser.getValue() is True:
+        if command_type == "END." and parser.getValue() is True:
             break
 
     # Verify the script contains a run line.
-    if require_script and not any(isinstance(directive, CommandDirective)
-                                  for directive in script):
+    if require_script and not any(
+        isinstance(directive, CommandDirective) for directive in script
+    ):
         raise ValueError("Test has no 'RUN:' line")
 
     # Check for unterminated run or subst lines.
@@ -1846,9 +1958,11 @@ def _parseKeywords(sourcepath, additional_parsers=[],
     # here.
     for directive in script:
         if directive.needs_continuation():
-            raise ValueError(f"Test has unterminated '{directive.keyword}' "
-                             f"directive (with '\\') "
-                             f"{directive.get_location()}")
+            raise ValueError(
+                f"Test has unterminated '{directive.keyword}' "
+                f"directive (with '\\') "
+                f"{directive.get_location()}"
+            )
 
     # Check boolean expressions for unterminated lines.
     for key in keyword_parsers:
@@ -1856,20 +1970,20 @@ def _parseKeywords(sourcepath, additional_parsers=[],
         if kp.kind != ParserKind.BOOLEAN_EXPR:
             continue
         value = kp.getValue()
-        if value and value[-1][-1] == '\\':
-            raise ValueError("Test has unterminated '{key}' lines (with '\\')"
-                             .format(key=key))
+        if value and value[-1][-1] == "\\":
+            raise ValueError(
+                "Test has unterminated '{key}' lines (with '\\')".format(key=key)
+            )
 
     # Make sure there's at most one ALLOW_RETRIES: line
-    allowed_retries = keyword_parsers['ALLOW_RETRIES:'].getValue()
+    allowed_retries = keyword_parsers["ALLOW_RETRIES:"].getValue()
     if allowed_retries and len(allowed_retries) > 1:
         raise ValueError("Test has more than one ALLOW_RETRIES lines")
 
     return {p.keyword: p.getValue() for p in keyword_parsers.values()}
 
 
-def parseIntegratedTestScript(test, additional_parsers=[],
-                              require_script=True):
+def parseIntegratedTestScript(test, additional_parsers=[], require_script=True):
     """parseIntegratedTestScript - Scan an LLVM/Clang style integrated test
     script and extract the lines to 'RUN' as well as 'XFAIL', 'REQUIRES',
     'UNSUPPORTED' and 'ALLOW_RETRIES' information into the given test.
@@ -1883,42 +1997,46 @@ def parseIntegratedTestScript(test, additional_parsers=[],
     """
     # Parse the test sources and extract test properties
     try:
-        parsed = _parseKeywords(test.getSourcePath(), additional_parsers,
-                                require_script)
+        parsed = _parseKeywords(
+            test.getSourcePath(), additional_parsers, require_script
+        )
     except ValueError as e:
         return lit.Test.Result(Test.UNRESOLVED, str(e))
-    script = parsed['RUN:'] or []
-    assert parsed['DEFINE:'] == script
-    assert parsed['REDEFINE:'] == script
-    test.xfails += parsed['XFAIL:'] or []
-    test.requires += parsed['REQUIRES:'] or []
-    test.unsupported += parsed['UNSUPPORTED:'] or []
-    if parsed['ALLOW_RETRIES:']:
-        test.allowed_retries = parsed['ALLOW_RETRIES:'][0]
+    script = parsed["RUN:"] or []
+    assert parsed["DEFINE:"] == script
+    assert parsed["REDEFINE:"] == script
+    test.xfails += parsed["XFAIL:"] or []
+    test.requires += parsed["REQUIRES:"] or []
+    test.unsupported += parsed["UNSUPPORTED:"] or []
+    if parsed["ALLOW_RETRIES:"]:
+        test.allowed_retries = parsed["ALLOW_RETRIES:"][0]
 
     # Enforce REQUIRES:
     missing_required_features = test.getMissingRequiredFeatures()
     if missing_required_features:
-        msg = ', '.join(missing_required_features)
-        return lit.Test.Result(Test.UNSUPPORTED,
-                               "Test requires the following unavailable "
-                               "features: %s" % msg)
+        msg = ", ".join(missing_required_features)
+        return lit.Test.Result(
+            Test.UNSUPPORTED,
+            "Test requires the following unavailable " "features: %s" % msg,
+        )
 
     # Enforce UNSUPPORTED:
     unsupported_features = test.getUnsupportedFeatures()
     if unsupported_features:
-        msg = ', '.join(unsupported_features)
+        msg = ", ".join(unsupported_features)
         return lit.Test.Result(
             Test.UNSUPPORTED,
-            "Test does not support the following features "
-            "and/or targets: %s" % msg)
+            "Test does not support the following features " "and/or targets: %s" % msg,
+        )
 
     # Enforce limit_to_features.
     if not test.isWithinFeatureLimits():
-        msg = ', '.join(test.config.limit_to_features)
-        return lit.Test.Result(Test.UNSUPPORTED,
-                               "Test does not require any of the features "
-                               "specified in limit_to_features: %s" % msg)
+        msg = ", ".join(test.config.limit_to_features)
+        return lit.Test.Result(
+            Test.UNSUPPORTED,
+            "Test does not require any of the features "
+            "specified in limit_to_features: %s" % msg,
+        )
 
     return script
 
@@ -1932,7 +2050,7 @@ def runOnce(execdir):
         if isinstance(res, lit.Test.Result):
             return res
 
-        out,err,exitCode,timeoutInfo = res
+        out, err, exitCode, timeoutInfo = res
         if exitCode == 0:
             status = Test.PASS
         else:
@@ -1940,7 +2058,7 @@ def runOnce(execdir):
                 status = Test.FAIL
             else:
                 status = Test.TIMEOUT
-        return out,err,exitCode,timeoutInfo,status
+        return out, err, exitCode, timeoutInfo, status
 
     # Create the output directory if it does not already exist.
     lit.util.mkdir_p(os.path.dirname(tmpBase))
@@ -1953,7 +2071,7 @@ def runOnce(execdir):
         if isinstance(res, lit.Test.Result):
             return res
 
-        out,err,exitCode,timeoutInfo,status = res
+        out, err, exitCode, timeoutInfo, status = res
         if status != Test.FAIL:
             break
 
@@ -1963,8 +2081,7 @@ def runOnce(execdir):
         status = Test.FLAKYPASS
 
     # Form the output log.
-    output = """Script:\n--\n%s\n--\nExit Code: %d\n""" % (
-        '\n'.join(script), exitCode)
+    output = """Script:\n--\n%s\n--\nExit Code: %d\n""" % ("\n".join(script), exitCode)
 
     if timeoutInfo is not None:
         output += """Timeout: %s\n""" % (timeoutInfo,)
@@ -1979,11 +2096,11 @@ def runOnce(execdir):
     return lit.Test.Result(status, output)
 
 
-def executeShTest(test, litConfig, useExternalSh,
-                  extra_substitutions=[],
-                  preamble_commands=[]):
+def executeShTest(
+    test, litConfig, useExternalSh, extra_substitutions=[], preamble_commands=[]
+):
     if test.config.unsupported:
-        return lit.Test.Result(Test.UNSUPPORTED, 'Test is unsupported')
+        return lit.Test.Result(Test.UNSUPPORTED, "Test is unsupported")
 
     script = list(preamble_commands)
     parsed = parseIntegratedTestScript(test, require_script=not script)
@@ -1996,10 +2113,15 @@ def executeShTest(test, litConfig, useExternalSh,
 
     tmpDir, tmpBase = getTempPaths(test)
     substitutions = list(extra_substitutions)
-    substitutions += getDefaultSubstitutions(test, tmpDir, tmpBase,
-                                             normalize_slashes=useExternalSh)
-    conditions = { feature: True for feature in test.config.available_features }
-    script = applySubstitutions(script, substitutions, conditions,
-                                recursion_limit=test.config.recursiveExpansionLimit)
+    substitutions += getDefaultSubstitutions(
+        test, tmpDir, tmpBase, normalize_slashes=useExternalSh
+    )
+    conditions = {feature: True for feature in test.config.available_features}
+    script = applySubstitutions(
+        script,
+        substitutions,
+        conditions,
+        recursion_limit=test.config.recursiveExpansionLimit,
+    )
 
     return _runShTest(test, litConfig, useExternalSh, script, tmpBase)

diff  --git a/llvm/utils/lit/lit/TestTimes.py b/llvm/utils/lit/lit/TestTimes.py
index 3fc22203141f0..a2c0e0527b84b 100644
--- a/llvm/utils/lit/lit/TestTimes.py
+++ b/llvm/utils/lit/lit/TestTimes.py
@@ -3,15 +3,14 @@
 
 def read_test_times(suite):
     test_times = {}
-    test_times_file = os.path.join(suite.exec_root, '.lit_test_times.txt')
+    test_times_file = os.path.join(suite.exec_root, ".lit_test_times.txt")
     if not os.path.exists(test_times_file):
-        test_times_file = os.path.join(
-            suite.source_root, '.lit_test_times.txt')
+        test_times_file = os.path.join(suite.source_root, ".lit_test_times.txt")
     if os.path.exists(test_times_file):
-        with open(test_times_file, 'r') as time_file:
+        with open(test_times_file, "r") as time_file:
             for line in time_file:
                 time, path = line.split(maxsplit=1)
-                test_times[path.strip('\n')] = float(time)
+                test_times[path.strip("\n")] = float(time)
     return test_times
 
 
@@ -28,14 +27,14 @@ def record_test_times(tests, lit_config):
         # used as an actual path to a filesystem API, therefore we use '/' as
         # the canonical separator so that Unix and Windows machines can share
         # timing data.
-        times_by_suite[t.suite.exec_root]['/'.join(t.path_in_suite)] = time
+        times_by_suite[t.suite.exec_root]["/".join(t.path_in_suite)] = time
 
     for s, value in times_by_suite.items():
         try:
-            path = os.path.join(s, '.lit_test_times.txt')
-            with open(path, 'w') as time_file:
+            path = os.path.join(s, ".lit_test_times.txt")
+            with open(path, "w") as time_file:
                 for name, time in value.items():
-                    time_file.write(("%e" % time) + ' ' + name + '\n')
+                    time_file.write(("%e" % time) + " " + name + "\n")
         except:
-            lit_config.warning('Could not save test time: ' + path)
+            lit_config.warning("Could not save test time: " + path)
             continue

diff  --git a/llvm/utils/lit/lit/TestingConfig.py b/llvm/utils/lit/lit/TestingConfig.py
index 01f5ca4946264..76fd665020097 100644
--- a/llvm/utils/lit/lit/TestingConfig.py
+++ b/llvm/utils/lit/lit/TestingConfig.py
@@ -3,7 +3,7 @@
 
 
 class TestingConfig(object):
-    """"
+    """ "
     TestingConfig - Information on the tests inside a suite.
     """
 
@@ -16,65 +16,64 @@ def fromdefaults(litConfig):
         """
         # Set the environment based on the command line arguments.
         environment = {
-            'PATH' : os.pathsep.join(litConfig.path +
-                                     [os.environ.get('PATH','')]),
-            'LLVM_DISABLE_CRASH_REPORT' : '1',
-            }
+            "PATH": os.pathsep.join(litConfig.path + [os.environ.get("PATH", "")]),
+            "LLVM_DISABLE_CRASH_REPORT": "1",
+        }
 
         pass_vars = [
-            'LIBRARY_PATH',
-            'LD_LIBRARY_PATH',
-            'SYSTEMROOT',
-            'TERM',
-            'CLANG',
-            'LLDB',
-            'LD_PRELOAD',
-            'LLVM_SYMBOLIZER_PATH',
-            'LLVM_PROFILE_FILE',
-            'ASAN_SYMBOLIZER_PATH',
-            'HWASAN_SYMBOLIZER_PATH',
-            'LSAN_SYMBOLIZER_PATH',
-            'MSAN_SYMBOLIZER_PATH',
-            'TSAN_SYMBOLIZER_PATH',
-            'UBSAN_SYMBOLIZER_PATH',
-            'ASAN_OPTIONS',
-            'LSAN_OPTIONS',
-            'HWASAN_OPTIONS',
-            'MSAN_OPTIONS',
-            'TSAN_OPTIONS',
-            'UBSAN_OPTIONS',
-            'ADB',
-            'ANDROID_SERIAL',
-            'SSH_AUTH_SOCK',
-            'SANITIZER_IGNORE_CVE_2016_2143',
-            'TMPDIR',
-            'TMP',
-            'TEMP',
-            'TEMPDIR',
-            'AVRLIT_BOARD',
-            'AVRLIT_PORT',
-            'FILECHECK_OPTS',
-            'VCINSTALLDIR',
-            'VCToolsinstallDir',
-            'VSINSTALLDIR',
-            'WindowsSdkDir',
-            'WindowsSDKLibVersion',
-            'SOURCE_DATE_EPOCH',
-            'GTEST_FILTER',
-            'DFLTCC',
+            "LIBRARY_PATH",
+            "LD_LIBRARY_PATH",
+            "SYSTEMROOT",
+            "TERM",
+            "CLANG",
+            "LLDB",
+            "LD_PRELOAD",
+            "LLVM_SYMBOLIZER_PATH",
+            "LLVM_PROFILE_FILE",
+            "ASAN_SYMBOLIZER_PATH",
+            "HWASAN_SYMBOLIZER_PATH",
+            "LSAN_SYMBOLIZER_PATH",
+            "MSAN_SYMBOLIZER_PATH",
+            "TSAN_SYMBOLIZER_PATH",
+            "UBSAN_SYMBOLIZER_PATH",
+            "ASAN_OPTIONS",
+            "LSAN_OPTIONS",
+            "HWASAN_OPTIONS",
+            "MSAN_OPTIONS",
+            "TSAN_OPTIONS",
+            "UBSAN_OPTIONS",
+            "ADB",
+            "ANDROID_SERIAL",
+            "SSH_AUTH_SOCK",
+            "SANITIZER_IGNORE_CVE_2016_2143",
+            "TMPDIR",
+            "TMP",
+            "TEMP",
+            "TEMPDIR",
+            "AVRLIT_BOARD",
+            "AVRLIT_PORT",
+            "FILECHECK_OPTS",
+            "VCINSTALLDIR",
+            "VCToolsinstallDir",
+            "VSINSTALLDIR",
+            "WindowsSdkDir",
+            "WindowsSDKLibVersion",
+            "SOURCE_DATE_EPOCH",
+            "GTEST_FILTER",
+            "DFLTCC",
         ]
 
-        if sys.platform.startswith('aix'):
-            pass_vars += ['LIBPATH']
-        elif sys.platform == 'win32':
+        if sys.platform.startswith("aix"):
+            pass_vars += ["LIBPATH"]
+        elif sys.platform == "win32":
             pass_vars += [
-                'COMSPEC',
-                'INCLUDE',
-                'LIB',
-                'PATHEXT',
-                'USERPROFILE',
+                "COMSPEC",
+                "INCLUDE",
+                "LIB",
+                "PATHEXT",
+                "USERPROFILE",
             ]
-            environment['PYTHONBUFFERED'] = '1'
+            environment["PYTHONBUFFERED"] = "1"
             # Avoid Windows heuristics which try to detect potential installer
             # programs (which may need to run with elevated privileges) and ask
             # if the user wants to run them in that way. This heuristic may
@@ -82,10 +81,10 @@ def fromdefaults(litConfig):
             # a substring of "dispatch"), "update", "setup", etc. Set this
             # environment variable indicating that we want to execute them with
             # the current user.
-            environment['__COMPAT_LAYER'] = 'RunAsInvoker'
+            environment["__COMPAT_LAYER"] = "RunAsInvoker"
 
         for var in pass_vars:
-            val = os.environ.get(var, '')
+            val = os.environ.get(var, "")
             # Check for empty string as some variables such as LD_PRELOAD cannot be empty
             # ('') for OS's such as OpenBSD.
             if val:
@@ -94,23 +93,25 @@ def fromdefaults(litConfig):
         # Set the default available features based on the LitConfig.
         available_features = []
         if litConfig.useValgrind:
-            available_features.append('valgrind')
+            available_features.append("valgrind")
             if litConfig.valgrindLeakCheck:
-                available_features.append('vg_leak')
-
-        return TestingConfig(None,
-                             name = '<unnamed>',
-                             suffixes = set(),
-                             test_format = None,
-                             environment = environment,
-                             substitutions = [],
-                             unsupported = False,
-                             test_exec_root = None,
-                             test_source_root = None,
-                             excludes = [],
-                             available_features = available_features,
-                             pipefail = True,
-                             standalone_tests = False)
+                available_features.append("vg_leak")
+
+        return TestingConfig(
+            None,
+            name="<unnamed>",
+            suffixes=set(),
+            test_format=None,
+            environment=environment,
+            substitutions=[],
+            unsupported=False,
+            test_exec_root=None,
+            test_source_root=None,
+            excludes=[],
+            available_features=available_features,
+            pipefail=True,
+            standalone_tests=False,
+        )
 
     def load_from_path(self, path, litConfig):
         """
@@ -126,18 +127,18 @@ def load_from_path(self, path, litConfig):
         try:
             data = f.read()
         except:
-            litConfig.fatal('unable to load config file: %r' % (path,))
+            litConfig.fatal("unable to load config file: %r" % (path,))
         f.close()
 
         # Execute the config script to initialize the object.
         cfg_globals = dict(globals())
-        cfg_globals['config'] = self
-        cfg_globals['lit_config'] = litConfig
-        cfg_globals['__file__'] = path
+        cfg_globals["config"] = self
+        cfg_globals["lit_config"] = litConfig
+        cfg_globals["__file__"] = path
         try:
-            exec(compile(data, path, 'exec'), cfg_globals, None)
+            exec(compile(data, path, "exec"), cfg_globals, None)
             if litConfig.debug:
-                litConfig.note('... loaded config %r' % path)
+                litConfig.note("... loaded config %r" % path)
         except SystemExit:
             e = sys.exc_info()[1]
             # We allow normal system exit inside a config file to just
@@ -146,17 +147,32 @@ def load_from_path(self, path, litConfig):
                 raise
         except:
             import traceback
+
             litConfig.fatal(
-                'unable to parse config file %r, traceback: %s' % (
-                    path, traceback.format_exc()))
+                "unable to parse config file %r, traceback: %s"
+                % (path, traceback.format_exc())
+            )
         self.finish(litConfig)
 
-    def __init__(self, parent, name, suffixes, test_format,
-                 environment, substitutions, unsupported,
-                 test_exec_root, test_source_root, excludes,
-                 available_features, pipefail, limit_to_features = [],
-                 is_early = False, parallelism_group = None,
-                 standalone_tests = False):
+    def __init__(
+        self,
+        parent,
+        name,
+        suffixes,
+        test_format,
+        environment,
+        substitutions,
+        unsupported,
+        test_exec_root,
+        test_source_root,
+        excludes,
+        available_features,
+        pipefail,
+        limit_to_features=[],
+        is_early=False,
+        parallelism_group=None,
+        standalone_tests=False,
+    ):
         self.parent = parent
         self.name = str(name)
         self.suffixes = set(suffixes)
@@ -184,9 +200,17 @@ def recursiveExpansionLimit(self):
     @recursiveExpansionLimit.setter
     def recursiveExpansionLimit(self, value):
         if value is not None and not isinstance(value, int):
-            raise ValueError('recursiveExpansionLimit must be either None or an integer (got <{}>)'.format(value))
+            raise ValueError(
+                "recursiveExpansionLimit must be either None or an integer (got <{}>)".format(
+                    value
+                )
+            )
         if isinstance(value, int) and value < 0:
-            raise ValueError('recursiveExpansionLimit must be a non-negative integer (got <{}>)'.format(value))
+            raise ValueError(
+                "recursiveExpansionLimit must be a non-negative integer (got <{}>)".format(
+                    value
+                )
+            )
         self._recursiveExpansionLimit = value
 
     def finish(self, litConfig):
@@ -214,6 +238,7 @@ def root(self):
         else:
             return self.parent.root
 
+
 class SubstituteCaptures:
     """
     Helper class to indicate that the substitutions contains backreferences.
@@ -224,6 +249,7 @@ class SubstituteCaptures:
         config.substutions.append(('\b[^ ]*.cpp', SubstituteCaptures('\0.txt')))
 
     """
+
     def __init__(self, substitution):
         self.substitution = substitution
 
@@ -238,4 +264,3 @@ def __len__(self):
 
     def __getitem__(self, item):
         return self.substitution.__getitem__(item)
-

diff  --git a/llvm/utils/lit/lit/__init__.py b/llvm/utils/lit/lit/__init__.py
index 85c3e88042ea8..a90e103d8a1a1 100644
--- a/llvm/utils/lit/lit/__init__.py
+++ b/llvm/utils/lit/lit/__init__.py
@@ -1,8 +1,8 @@
 """'lit' Testing Tool"""
 
-__author__ = 'Daniel Dunbar'
-__email__ = 'daniel at minormatter.com'
+__author__ = "Daniel Dunbar"
+__email__ = "daniel at minormatter.com"
 __versioninfo__ = (17, 0, 0)
-__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev'
+__version__ = ".".join(str(v) for v in __versioninfo__) + "dev"
 
 __all__ = []

diff  --git a/llvm/utils/lit/lit/builtin_commands/cat.py b/llvm/utils/lit/lit/builtin_commands/cat.py
index fab9dccda27b8..37f55c0aef210 100644
--- a/llvm/utils/lit/lit/builtin_commands/cat.py
+++ b/llvm/utils/lit/lit/builtin_commands/cat.py
@@ -1,38 +1,40 @@
 import getopt
 import sys
+
 try:
     from StringIO import StringIO
 except ImportError:
     from io import StringIO
 
+
 def convertToCaretAndMNotation(data):
-   newdata = StringIO()
-   if isinstance(data, str):
-       data = bytearray(data)
+    newdata = StringIO()
+    if isinstance(data, str):
+        data = bytearray(data)
 
-   for intval in data:
-       if intval == 9 or intval == 10:
-           newdata.write(chr(intval))
-           continue
-       if intval > 127:
-           intval = intval -128
-           newdata.write("M-")
-       if intval < 32:
-           newdata.write("^")
-           newdata.write(chr(intval+64))
-       elif intval == 127:
-           newdata.write("^?")
-       else:
-           newdata.write(chr(intval))
+    for intval in data:
+        if intval == 9 or intval == 10:
+            newdata.write(chr(intval))
+            continue
+        if intval > 127:
+            intval = intval - 128
+            newdata.write("M-")
+        if intval < 32:
+            newdata.write("^")
+            newdata.write(chr(intval + 64))
+        elif intval == 127:
+            newdata.write("^?")
+        else:
+            newdata.write(chr(intval))
 
-   return newdata.getvalue().encode()
+    return newdata.getvalue().encode()
 
 
 def main(argv):
     arguments = argv[1:]
     short_options = "v"
     long_options = ["show-nonprinting"]
-    show_nonprinting = False;
+    show_nonprinting = False
 
     try:
         options, filenames = getopt.gnu_getopt(arguments, short_options, long_options)
@@ -42,17 +44,18 @@ def main(argv):
 
     for option, value in options:
         if option == "-v" or option == "--show-nonprinting":
-            show_nonprinting = True;
+            show_nonprinting = True
 
-    writer = getattr(sys.stdout, 'buffer', None)
+    writer = getattr(sys.stdout, "buffer", None)
     if writer is None:
         writer = sys.stdout
         if sys.platform == "win32":
             import os, msvcrt
-            msvcrt.setmode(sys.stdout.fileno(),os.O_BINARY)
+
+            msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
     for filename in filenames:
         try:
-            fileToCat = open(filename,"rb")
+            fileToCat = open(filename, "rb")
             contents = fileToCat.read()
             if show_nonprinting:
                 contents = convertToCaretAndMNotation(contents)
@@ -63,5 +66,6 @@ def main(argv):
             sys.stderr.write(str(error))
             sys.exit(1)
 
+
 if __name__ == "__main__":
     main(sys.argv)

diff  --git a/llvm/utils/lit/lit/builtin_commands/
diff .py b/llvm/utils/lit/lit/builtin_commands/
diff .py
index 1b96a1504e5c1..3a91920f9b5ed 100644
--- a/llvm/utils/lit/lit/builtin_commands/
diff .py
+++ b/llvm/utils/lit/lit/builtin_commands/
diff .py
@@ -10,7 +10,8 @@
 import util
 from util import to_string
 
-class DiffFlags():
+
+class DiffFlags:
     def __init__(self):
         self.ignore_all_space = False
         self.ignore_space_change = False
@@ -21,6 +22,7 @@ def __init__(self):
         self.recursive_
diff  = False
         self.strip_trailing_cr = False
 
+
 def getDirTree(path, basedir=""):
     # Tree is a tuple of form (dirname, child_trees).
     # An empty dir has child_trees = [], a file has child_trees = None.
@@ -32,34 +34,41 @@ def getDirTree(path, basedir=""):
             child_trees.append((filename, None))
         return path, sorted(child_trees)
 
+
 def compareTwoFiles(flags, filepaths):
     filelines = []
     for file in filepaths:
         if file == "-":
             stdin_fileno = sys.stdin.fileno()
-            with os.fdopen(os.dup(stdin_fileno), 'rb') as stdin_bin:
+            with os.fdopen(os.dup(stdin_fileno), "rb") as stdin_bin:
                 filelines.append(stdin_bin.readlines())
         else:
-            with open(file, 'rb') as file_bin:
+            with open(file, "rb") as file_bin:
                 filelines.append(file_bin.readlines())
 
     try:
-        return compareTwoTextFiles(flags, filepaths, filelines,
-                                   locale.getpreferredencoding(False))
+        return compareTwoTextFiles(
+            flags, filepaths, filelines, locale.getpreferredencoding(False)
+        )
     except UnicodeDecodeError:
         try:
             return compareTwoTextFiles(flags, filepaths, filelines, "utf-8")
         except:
             return compareTwoBinaryFiles(flags, filepaths, filelines)
 
+
 def compareTwoBinaryFiles(flags, filepaths, filelines):
     exitCode = 0
-    if hasattr(
diff lib, '
diff _bytes'):
+    if hasattr(
diff lib, "
diff _bytes"):
         # python 3.5 or newer
-        
diff s = 
diff lib.
diff _bytes(
diff lib.unified_
diff , filelines[0],
-                                   filelines[1], filepaths[0].encode(),
-                                   filepaths[1].encode(),
-                                   n = flags.num_context_lines)
+        
diff s = 
diff lib.
diff _bytes(
+            
diff lib.unified_
diff ,
+            filelines[0],
+            filelines[1],
+            filepaths[0].encode(),
+            filepaths[1].encode(),
+            n=flags.num_context_lines,
+        )
         
diff s = [
diff .decode(errors="backslashreplace") for 
diff  in 
diff s]
     else:
         # python 2.7
@@ -67,14 +76,20 @@ def compareTwoBinaryFiles(flags, filepaths, filelines):
             func = 
diff lib.unified_
diff 
         else:
             func = 
diff lib.context_
diff 
-        
diff s = func(filelines[0], filelines[1], filepaths[0], filepaths[1],
-                     n = flags.num_context_lines)
+        
diff s = func(
+            filelines[0],
+            filelines[1],
+            filepaths[0],
+            filepaths[1],
+            n=flags.num_context_lines,
+        )
 
     for 
diff  in 
diff s:
         sys.stdout.write(to_string(
diff ))
         exitCode = 1
     return exitCode
 
+
 def compareTwoTextFiles(flags, filepaths, filelines_bin, encoding):
     filelines = []
     for lines_bin in filelines_bin:
@@ -85,30 +100,43 @@ def compareTwoTextFiles(flags, filepaths, filelines_bin, encoding):
         filelines.append(lines)
 
     exitCode = 0
+
     def compose2(f, g):
         return lambda x: f(g(x))
 
     f = lambda x: x
     if flags.strip_trailing_cr:
-        f = compose2(lambda line: line.replace('\r\n', '\n'), f)
+        f = compose2(lambda line: line.replace("\r\n", "\n"), f)
     if flags.ignore_all_space or flags.ignore_space_change:
-        ignoreSpace = lambda line, separator: \
-                          separator.join(line.split()) + "\n"
-        ignoreAllSpaceOrSpaceChange = functools.partial(ignoreSpace, separator='' if flags.ignore_all_space else ' ')
+        ignoreSpace = lambda line, separator: separator.join(line.split()) + "\n"
+        ignoreAllSpaceOrSpaceChange = functools.partial(
+            ignoreSpace, separator="" if flags.ignore_all_space else " "
+        )
         f = compose2(ignoreAllSpaceOrSpaceChange, f)
 
     for idx, lines in enumerate(filelines):
         if flags.ignore_matching_lines:
-            lines = filter(lambda x: not re.match(r"{}".format(flags.ignore_matching_lines_regex), x), lines)
-        filelines[idx]= [f(line) for line in lines]
+            lines = filter(
+                lambda x: not re.match(
+                    r"{}".format(flags.ignore_matching_lines_regex), x
+                ),
+                lines,
+            )
+        filelines[idx] = [f(line) for line in lines]
 
     func = 
diff lib.unified_
diff  if flags.unified_
diff  else 
diff lib.context_
diff 
-    for 
diff  in func(filelines[0], filelines[1], filepaths[0], filepaths[1],
-                     n = flags.num_context_lines):
+    for 
diff  in func(
+        filelines[0],
+        filelines[1],
+        filepaths[0],
+        filepaths[1],
+        n=flags.num_context_lines,
+    ):
         sys.stdout.write(to_string(
diff ))
         exitCode = 1
     return exitCode
 
+
 def printDirVsFile(dir_path, file_path):
     if os.path.getsize(file_path):
         msg = "File %s is a directory while file %s is a regular file"
@@ -116,6 +144,7 @@ def printDirVsFile(dir_path, file_path):
         msg = "File %s is a directory while file %s is a regular empty file"
     sys.stdout.write(msg % (dir_path, file_path) + "\n")
 
+
 def printFileVsDir(file_path, dir_path):
     if os.path.getsize(file_path):
         msg = "File %s is a regular file while file %s is a directory"
@@ -123,9 +152,11 @@ def printFileVsDir(file_path, dir_path):
         msg = "File %s is a regular empty file while file %s is a directory"
     sys.stdout.write(msg % (file_path, dir_path) + "\n")
 
+
 def printOnlyIn(basedir, path, name):
     sys.stdout.write("Only in %s: %s\n" % (os.path.join(basedir, path), name))
 
+
 def compareDirTrees(flags, dir_trees, base_paths=["", ""]):
     # Dirnames of the trees are not checked, it's caller's responsibility,
     # as top-level dirnames are always 
diff erent. Base paths are important
@@ -136,18 +167,26 @@ def compareDirTrees(flags, dir_trees, base_paths=["", ""]):
 
     # Compare two files or report file vs. directory mismatch.
     if left_tree[1] is None and right_tree[1] is None:
-        return compareTwoFiles(flags,
-                               [os.path.join(left_base, left_tree[0]),
-                                os.path.join(right_base, right_tree[0])])
+        return compareTwoFiles(
+            flags,
+            [
+                os.path.join(left_base, left_tree[0]),
+                os.path.join(right_base, right_tree[0]),
+            ],
+        )
 
     if left_tree[1] is None and right_tree[1] is not None:
-        printFileVsDir(os.path.join(left_base, left_tree[0]),
-                       os.path.join(right_base, right_tree[0]))
+        printFileVsDir(
+            os.path.join(left_base, left_tree[0]),
+            os.path.join(right_base, right_tree[0]),
+        )
         return 1
 
     if left_tree[1] is not None and right_tree[1] is None:
-        printDirVsFile(os.path.join(left_base, left_tree[0]),
-                       os.path.join(right_base, right_tree[0]))
+        printDirVsFile(
+            os.path.join(left_base, left_tree[0]),
+            os.path.join(right_base, right_tree[0]),
+        )
         return 1
 
     # Compare two directories via recursive use of compareDirTrees.
@@ -166,10 +205,14 @@ def compareDirTrees(flags, dir_trees, base_paths=["", ""]):
             printOnlyIn(right_base, right_tree[0], right_names[r])
             r += 1
         else:
-            exitCode |= compareDirTrees(flags,
-                                        [left_tree[1][l], right_tree[1][r]],
-                                        [os.path.join(left_base, left_tree[0]),
-                                        os.path.join(right_base, right_tree[0])])
+            exitCode |= compareDirTrees(
+                flags,
+                [left_tree[1][l], right_tree[1][r]],
+                [
+                    os.path.join(left_base, left_tree[0]),
+                    os.path.join(right_base, right_tree[0]),
+                ],
+            )
             l += 1
             r += 1
 
@@ -184,14 +227,16 @@ def compareDirTrees(flags, dir_trees, base_paths=["", ""]):
         r += 1
     return exitCode
 
+
 def main(argv):
     if sys.platform == "win32":
-        if hasattr(sys.stdout, 'buffer'):
+        if hasattr(sys.stdout, "buffer"):
             # python 3
-            sys.stdout = io.TextIOWrapper(sys.stdout.buffer, newline='\n')
+            sys.stdout = io.TextIOWrapper(sys.stdout.buffer, newline="\n")
         else:
             # python 2.7
             import msvcrt
+
             msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
     args = argv[1:]
     try:
@@ -216,8 +261,7 @@ def main(argv):
                 if flags.num_context_lines < 0:
                     raise ValueException
             except:
-                sys.stderr.write("Error: invalid '-U' argument: {}\n"
-                                 .format(a))
+                sys.stderr.write("Error: invalid '-U' argument: {}\n".format(a))
                 sys.exit(1)
         elif o == "-I":
             flags.ignore_matching_lines = True
@@ -258,5 +302,6 @@ def main(argv):
 
     sys.exit(exitCode)
 
+
 if __name__ == "__main__":
     main(sys.argv)

diff  --git a/llvm/utils/lit/lit/cl_arguments.py b/llvm/utils/lit/lit/cl_arguments.py
index 43bbd6fdaa96c..c20bbee93d1c8 100644
--- a/llvm/utils/lit/lit/cl_arguments.py
+++ b/llvm/utils/lit/lit/cl_arguments.py
@@ -10,75 +10,102 @@
 
 @enum.unique
 class TestOrder(enum.Enum):
-    LEXICAL = 'lexical'
-    RANDOM = 'random'
-    SMART = 'smart'
+    LEXICAL = "lexical"
+    RANDOM = "random"
+    SMART = "smart"
 
 
 def parse_args():
-    parser = argparse.ArgumentParser(prog='lit', fromfile_prefix_chars='@')
-    parser.add_argument('test_paths',
-            nargs='+',
-            metavar="TEST_PATH",
-            help='File or path to include in the test suite')
+    parser = argparse.ArgumentParser(prog="lit", fromfile_prefix_chars="@")
+    parser.add_argument(
+        "test_paths",
+        nargs="+",
+        metavar="TEST_PATH",
+        help="File or path to include in the test suite",
+    )
 
-    parser.add_argument('--version',
-            action='version',
-            version='%(prog)s ' + lit.__version__)
+    parser.add_argument(
+        "--version", action="version", version="%(prog)s " + lit.__version__
+    )
 
-    parser.add_argument("-j", "--threads", "--workers",
-            dest="workers",
-            metavar="N",
-            help="Number of workers used for testing",
-            type=_positive_int,
-            default=lit.util.usable_core_count())
-    parser.add_argument("--config-prefix",
-            dest="configPrefix",
-            metavar="NAME",
-            help="Prefix for 'lit' config files")
-    parser.add_argument("-D", "--param",
-            dest="user_params",
-            metavar="NAME=VAL",
-            help="Add 'NAME' = 'VAL' to the user defined parameters",
-            action="append",
-            default=[])
+    parser.add_argument(
+        "-j",
+        "--threads",
+        "--workers",
+        dest="workers",
+        metavar="N",
+        help="Number of workers used for testing",
+        type=_positive_int,
+        default=lit.util.usable_core_count(),
+    )
+    parser.add_argument(
+        "--config-prefix",
+        dest="configPrefix",
+        metavar="NAME",
+        help="Prefix for 'lit' config files",
+    )
+    parser.add_argument(
+        "-D",
+        "--param",
+        dest="user_params",
+        metavar="NAME=VAL",
+        help="Add 'NAME' = 'VAL' to the user defined parameters",
+        action="append",
+        default=[],
+    )
 
     format_group = parser.add_argument_group("Output Format")
     # FIXME: I find these names very confusing, although I like the
     # functionality.
-    format_group.add_argument("-q", "--quiet",
-            help="Suppress no error output",
-            action="store_true")
-    format_group.add_argument("-s", "--succinct",
-            help="Reduce amount of output."
-                 " Additionally, show a progress bar,"
-                 " unless --no-progress-bar is specified.",
-            action="store_true")
-    format_group.add_argument("-v", "--verbose",
-            dest="showOutput",
-            help="Show test output for failures",
-            action="store_true")
-    format_group.add_argument("-vv", "--echo-all-commands",
-            dest="echoAllCommands",
-            action="store_true",
-            help="Echo all commands as they are executed to stdout. In case of "
-                 "failure, last command shown will be the failing one.")
-    format_group.add_argument("-a", "--show-all",
-            dest="showAllOutput",
-            help="Display all commandlines and output",
-            action="store_true")
-    format_group.add_argument("-o", "--output",
-            type=lit.reports.JsonReport,
-            help="Write test results to the provided path",
-            metavar="PATH")
-    format_group.add_argument("--no-progress-bar",
-            dest="useProgressBar",
-            help="Do not use curses based progress bar",
-            action="store_false")
+    format_group.add_argument(
+        "-q", "--quiet", help="Suppress no error output", action="store_true"
+    )
+    format_group.add_argument(
+        "-s",
+        "--succinct",
+        help="Reduce amount of output."
+        " Additionally, show a progress bar,"
+        " unless --no-progress-bar is specified.",
+        action="store_true",
+    )
+    format_group.add_argument(
+        "-v",
+        "--verbose",
+        dest="showOutput",
+        help="Show test output for failures",
+        action="store_true",
+    )
+    format_group.add_argument(
+        "-vv",
+        "--echo-all-commands",
+        dest="echoAllCommands",
+        action="store_true",
+        help="Echo all commands as they are executed to stdout. In case of "
+        "failure, last command shown will be the failing one.",
+    )
+    format_group.add_argument(
+        "-a",
+        "--show-all",
+        dest="showAllOutput",
+        help="Display all commandlines and output",
+        action="store_true",
+    )
+    format_group.add_argument(
+        "-o",
+        "--output",
+        type=lit.reports.JsonReport,
+        help="Write test results to the provided path",
+        metavar="PATH",
+    )
+    format_group.add_argument(
+        "--no-progress-bar",
+        dest="useProgressBar",
+        help="Do not use curses based progress bar",
+        action="store_false",
+    )
 
     # Note: this does not generate flags for user-defined result codes.
-    success_codes = [c for c in lit.Test.ResultCode.all_codes()
-                     if not c.isFailure]
+    success_codes = [c for c in lit.Test.ResultCode.all_codes() if not c.isFailure]
     for code in success_codes:
         format_group.add_argument(
             "--show-{}".format(code.name.lower()),
@@ -86,134 +113,187 @@ def parse_args():
             help="Show {} tests ({})".format(code.label.lower(), code.name),
             action="append_const",
             const=code,
-            default=[])
+            default=[],
+        )
 
     execution_group = parser.add_argument_group("Test Execution")
-    execution_group.add_argument("--path",
-            help="Additional paths to add to testing environment",
-            action="append",
-            default=[],
-            type=os.path.abspath)
-    execution_group.add_argument("--vg",
-            dest="useValgrind",
-            help="Run tests under valgrind",
-            action="store_true")
-    execution_group.add_argument("--vg-leak",
-            dest="valgrindLeakCheck",
-            help="Check for memory leaks under valgrind",
-            action="store_true")
-    execution_group.add_argument("--vg-arg",
-            dest="valgrindArgs",
-            metavar="ARG",
-            help="Specify an extra argument for valgrind",
-            action="append",
-            default=[])
-    execution_group.add_argument("--time-tests",
-            help="Track elapsed wall time for each test",
-            action="store_true")
-    execution_group.add_argument("--no-execute",
-            dest="noExecute",
-            help="Don't execute any tests (assume PASS)",
-            action="store_true")
-    execution_group.add_argument("--xunit-xml-output",
-            type=lit.reports.XunitReport,
-            help="Write XUnit-compatible XML test reports to the specified file")
-    execution_group.add_argument("--resultdb-output",
-            type=lit.reports.ResultDBReport,
-            help="Write LuCI ResuldDB compatible JSON to the specified file")
-    execution_group.add_argument("--time-trace-output",
-            type=lit.reports.TimeTraceReport,
-            help="Write Chrome tracing compatible JSON to the specified file")
-    execution_group.add_argument("--timeout",
-            dest="maxIndividualTestTime",
-            help="Maximum time to spend running a single test (in seconds). "
-                 "0 means no time limit. [Default: 0]",
-            type=_non_negative_int)
-    execution_group.add_argument("--max-failures",
-            help="Stop execution after the given number of failures.",
-            type=_positive_int)
-    execution_group.add_argument("--allow-empty-runs",
-            help="Do not fail the run if all tests are filtered out",
-            action="store_true")
-    execution_group.add_argument("--ignore-fail",
-            dest="ignoreFail",
-            action="store_true",
-            help="Exit with status zero even if some tests fail")
-    execution_group.add_argument("--no-indirectly-run-check",
-            dest="indirectlyRunCheck",
-            help="Do not error if a test would not be run if the user had "
-                 "specified the containing directory instead of naming the "
-                 "test directly.",
-            action="store_false")
+    execution_group.add_argument(
+        "--path",
+        help="Additional paths to add to testing environment",
+        action="append",
+        default=[],
+        type=os.path.abspath,
+    )
+    execution_group.add_argument(
+        "--vg", dest="useValgrind", help="Run tests under valgrind", action="store_true"
+    )
+    execution_group.add_argument(
+        "--vg-leak",
+        dest="valgrindLeakCheck",
+        help="Check for memory leaks under valgrind",
+        action="store_true",
+    )
+    execution_group.add_argument(
+        "--vg-arg",
+        dest="valgrindArgs",
+        metavar="ARG",
+        help="Specify an extra argument for valgrind",
+        action="append",
+        default=[],
+    )
+    execution_group.add_argument(
+        "--time-tests",
+        help="Track elapsed wall time for each test",
+        action="store_true",
+    )
+    execution_group.add_argument(
+        "--no-execute",
+        dest="noExecute",
+        help="Don't execute any tests (assume PASS)",
+        action="store_true",
+    )
+    execution_group.add_argument(
+        "--xunit-xml-output",
+        type=lit.reports.XunitReport,
+        help="Write XUnit-compatible XML test reports to the specified file",
+    )
+    execution_group.add_argument(
+        "--resultdb-output",
+        type=lit.reports.ResultDBReport,
+        help="Write LuCI ResuldDB compatible JSON to the specified file",
+    )
+    execution_group.add_argument(
+        "--time-trace-output",
+        type=lit.reports.TimeTraceReport,
+        help="Write Chrome tracing compatible JSON to the specified file",
+    )
+    execution_group.add_argument(
+        "--timeout",
+        dest="maxIndividualTestTime",
+        help="Maximum time to spend running a single test (in seconds). "
+        "0 means no time limit. [Default: 0]",
+        type=_non_negative_int,
+    )
+    execution_group.add_argument(
+        "--max-failures",
+        help="Stop execution after the given number of failures.",
+        type=_positive_int,
+    )
+    execution_group.add_argument(
+        "--allow-empty-runs",
+        help="Do not fail the run if all tests are filtered out",
+        action="store_true",
+    )
+    execution_group.add_argument(
+        "--ignore-fail",
+        dest="ignoreFail",
+        action="store_true",
+        help="Exit with status zero even if some tests fail",
+    )
+    execution_group.add_argument(
+        "--no-indirectly-run-check",
+        dest="indirectlyRunCheck",
+        help="Do not error if a test would not be run if the user had "
+        "specified the containing directory instead of naming the "
+        "test directly.",
+        action="store_false",
+    )
 
     selection_group = parser.add_argument_group("Test Selection")
-    selection_group.add_argument("--max-tests",
-            metavar="N",
-            help="Maximum number of tests to run",
-            type=_positive_int)
-    selection_group.add_argument("--max-time",
-            dest="timeout",
-            metavar="N",
-            help="Maximum time to spend testing (in seconds)",
-            type=_positive_int)
-    selection_group.add_argument("--order",
-            choices=[x.value for x in TestOrder],
-            default=TestOrder.SMART,
-            help="Test order to use (default: smart)")
-    selection_group.add_argument("--shuffle",
-            dest="order",
-            help="Run tests in random order (DEPRECATED: use --order=random)",
-            action="store_const",
-            const=TestOrder.RANDOM)
-    selection_group.add_argument("-i", "--incremental",
-            help="Run failed tests first (DEPRECATED: use --order=smart)",
-            action="store_true")
-    selection_group.add_argument("--filter",
-            metavar="REGEX",
-            type=_case_insensitive_regex,
-            help="Only run tests with paths matching the given regular expression",
-            default=os.environ.get("LIT_FILTER", ".*"))
-    selection_group.add_argument("--filter-out",
-            metavar="REGEX",
-            type=_case_insensitive_regex,
-            help="Filter out tests with paths matching the given regular expression",
-            default=os.environ.get("LIT_FILTER_OUT", "^$"))
-    selection_group.add_argument("--xfail",
-            metavar="LIST",
-            type=_semicolon_list,
-            help="XFAIL tests with paths in the semicolon separated list",
-            default=os.environ.get("LIT_XFAIL", ""))
-    selection_group.add_argument("--xfail-not",
-            metavar="LIST",
-            type=_semicolon_list,
-            help="do not XFAIL tests with paths in the semicolon separated list",
-            default=os.environ.get("LIT_XFAIL_NOT", ""))
-    selection_group.add_argument("--num-shards",
-            dest="numShards",
-            metavar="M",
-            help="Split testsuite into M pieces and only run one",
-            type=_positive_int,
-            default=os.environ.get("LIT_NUM_SHARDS"))
-    selection_group.add_argument("--run-shard",
-            dest="runShard",
-            metavar="N",
-            help="Run shard #N of the testsuite",
-            type=_positive_int,
-            default=os.environ.get("LIT_RUN_SHARD"))
+    selection_group.add_argument(
+        "--max-tests",
+        metavar="N",
+        help="Maximum number of tests to run",
+        type=_positive_int,
+    )
+    selection_group.add_argument(
+        "--max-time",
+        dest="timeout",
+        metavar="N",
+        help="Maximum time to spend testing (in seconds)",
+        type=_positive_int,
+    )
+    selection_group.add_argument(
+        "--order",
+        choices=[x.value for x in TestOrder],
+        default=TestOrder.SMART,
+        help="Test order to use (default: smart)",
+    )
+    selection_group.add_argument(
+        "--shuffle",
+        dest="order",
+        help="Run tests in random order (DEPRECATED: use --order=random)",
+        action="store_const",
+        const=TestOrder.RANDOM,
+    )
+    selection_group.add_argument(
+        "-i",
+        "--incremental",
+        help="Run failed tests first (DEPRECATED: use --order=smart)",
+        action="store_true",
+    )
+    selection_group.add_argument(
+        "--filter",
+        metavar="REGEX",
+        type=_case_insensitive_regex,
+        help="Only run tests with paths matching the given regular expression",
+        default=os.environ.get("LIT_FILTER", ".*"),
+    )
+    selection_group.add_argument(
+        "--filter-out",
+        metavar="REGEX",
+        type=_case_insensitive_regex,
+        help="Filter out tests with paths matching the given regular expression",
+        default=os.environ.get("LIT_FILTER_OUT", "^$"),
+    )
+    selection_group.add_argument(
+        "--xfail",
+        metavar="LIST",
+        type=_semicolon_list,
+        help="XFAIL tests with paths in the semicolon separated list",
+        default=os.environ.get("LIT_XFAIL", ""),
+    )
+    selection_group.add_argument(
+        "--xfail-not",
+        metavar="LIST",
+        type=_semicolon_list,
+        help="do not XFAIL tests with paths in the semicolon separated list",
+        default=os.environ.get("LIT_XFAIL_NOT", ""),
+    )
+    selection_group.add_argument(
+        "--num-shards",
+        dest="numShards",
+        metavar="M",
+        help="Split testsuite into M pieces and only run one",
+        type=_positive_int,
+        default=os.environ.get("LIT_NUM_SHARDS"),
+    )
+    selection_group.add_argument(
+        "--run-shard",
+        dest="runShard",
+        metavar="N",
+        help="Run shard #N of the testsuite",
+        type=_positive_int,
+        default=os.environ.get("LIT_RUN_SHARD"),
+    )
 
     debug_group = parser.add_argument_group("Debug and Experimental Options")
-    debug_group.add_argument("--debug",
-            help="Enable debugging (for 'lit' development)",
-            action="store_true")
-    debug_group.add_argument("--show-suites",
-            help="Show discovered test suites and exit",
-            action="store_true")
-    debug_group.add_argument("--show-tests",
-            help="Show all discovered tests and exit",
-            action="store_true")
-    debug_group.add_argument("--show-used-features",
-            help="Show all features used in the test suite (in XFAIL, UNSUPPORTED and REQUIRES) and exit",
-            action="store_true")
+    debug_group.add_argument(
+        "--debug", help="Enable debugging (for 'lit' development)", action="store_true"
+    )
+    debug_group.add_argument(
+        "--show-suites",
+        help="Show discovered test suites and exit",
+        action="store_true",
+    )
+    debug_group.add_argument(
+        "--show-tests", help="Show all discovered tests and exit", action="store_true"
+    )
+    debug_group.add_argument(
+        "--show-used-features",
+        help="Show all features used in the test suite (in XFAIL, UNSUPPORTED and REQUIRES) and exit",
+        action="store_true",
+    )
 
     # LIT is special: environment variables override command line arguments.
     env_args = shlex.split(os.environ.get("LIT_OPTS", ""))
@@ -225,7 +305,9 @@ def parse_args():
         opts.showOutput = True
 
     if opts.incremental:
-        print('WARNING: --incremental is deprecated. Failing tests now always run first.')
+        print(
+            "WARNING: --incremental is deprecated. Failing tests now always run first."
+        )
 
     if opts.numShards or opts.runShard:
         if not opts.numShards or not opts.runShard:
@@ -236,17 +318,25 @@ def parse_args():
     else:
         opts.shard = None
 
-    opts.reports = filter(None, [opts.output, opts.xunit_xml_output, opts.resultdb_output, opts.time_trace_output])
+    opts.reports = filter(
+        None,
+        [
+            opts.output,
+            opts.xunit_xml_output,
+            opts.resultdb_output,
+            opts.time_trace_output,
+        ],
+    )
 
     return opts
 
 
 def _positive_int(arg):
-    return _int(arg, 'positive', lambda i: i > 0)
+    return _int(arg, "positive", lambda i: i > 0)
 
 
 def _non_negative_int(arg):
-    return _int(arg, 'non-negative', lambda i: i >= 0)
+    return _int(arg, "non-negative", lambda i: i >= 0)
 
 
 def _int(arg, kind, pred):
@@ -262,6 +352,7 @@ def _int(arg, kind, pred):
 
 def _case_insensitive_regex(arg):
     import re
+
     try:
         return re.compile(arg, re.IGNORECASE)
     except re.error as reason:
@@ -269,7 +360,7 @@ def _case_insensitive_regex(arg):
 
 
 def _semicolon_list(arg):
-    return arg.split(';')
+    return arg.split(";")
 
 
 def _error(desc, *args):

diff  --git a/llvm/utils/lit/lit/discovery.py b/llvm/utils/lit/lit/discovery.py
index bb77aa310c713..5bfe1eb5acd71 100644
--- a/llvm/utils/lit/lit/discovery.py
+++ b/llvm/utils/lit/lit/discovery.py
@@ -9,6 +9,7 @@
 from lit.TestingConfig import TestingConfig
 from lit import LitConfig, Test
 
+
 def chooseConfigFileFromDir(dir, config_names):
     for name in config_names:
         p = os.path.join(dir, name)
@@ -16,12 +17,14 @@ def chooseConfigFileFromDir(dir, config_names):
             return p
     return None
 
+
 def dirContainsTestSuite(path, lit_config):
     cfgpath = chooseConfigFileFromDir(path, lit_config.site_config_names)
     if not cfgpath:
         cfgpath = chooseConfigFileFromDir(path, lit_config.config_names)
     return cfgpath
 
+
 def getTestSuite(item, litConfig, cache):
     """getTestSuite(item, litConfig, cache) -> (suite, relative_path)
 
@@ -31,13 +34,14 @@ def getTestSuite(item, litConfig, cache):
     @retval (suite, relative_path) - The suite that @arg item is in, and its
     relative path inside that suite.
     """
+
     def search1(path):
         # Check for a site config or a lit config.
         cfgpath = dirContainsTestSuite(path, litConfig)
 
         # If we didn't find a config file, keep looking.
         if not cfgpath:
-            parent,base = os.path.split(path)
+            parent, base = os.path.split(path)
             if parent == path:
                 return (None, ())
 
@@ -50,7 +54,7 @@ def search1(path):
         # when it finds a configuration it is about to load.  If the given
         # path is in the map, the value of that key is a path to the
         # configuration to load instead.
-        config_map = litConfig.params.get('config_map')
+        config_map = litConfig.params.get("config_map")
         if config_map:
             cfgpath = os.path.realpath(cfgpath)
             target = config_map.get(os.path.normcase(cfgpath))
@@ -59,7 +63,7 @@ def search1(path):
 
         # We found a test suite, create a new config for it and load it.
         if litConfig.debug:
-            litConfig.note('loading suite config %r' % cfgpath)
+            litConfig.note("loading suite config %r" % cfgpath)
 
         cfg = TestingConfig.fromdefaults(litConfig)
         cfg.load_from_path(cfgpath, litConfig)
@@ -81,7 +85,7 @@ def search(path):
     # Skip files and virtual components.
     components = []
     while not os.path.isdir(item):
-        parent,base = os.path.split(item)
+        parent, base = os.path.split(item)
         if parent == item:
             return (None, ())
         components.append(base)
@@ -91,6 +95,7 @@ def search(path):
     ts, relative = search(item)
     return ts, tuple(relative + tuple(components))
 
+
 def getLocalConfig(ts, path_in_suite, litConfig, cache):
     def search1(path_in_suite):
         # Get the parent config.
@@ -111,7 +116,7 @@ def search1(path_in_suite):
         # file into it.
         config = copy.deepcopy(parent)
         if litConfig.debug:
-            litConfig.note('loading local config %r' % cfgpath)
+            litConfig.note("loading local config %r" % cfgpath)
         config.load_from_path(cfgpath, litConfig)
         return config
 
@@ -124,23 +129,30 @@ def search(path_in_suite):
 
     return search(path_in_suite)
 
-def getTests(path, litConfig, testSuiteCache,
-             localConfigCache, indirectlyRunCheck):
+
+def getTests(path, litConfig, testSuiteCache, localConfigCache, indirectlyRunCheck):
     # Find the test suite for this input and its relative path.
-    ts,path_in_suite = getTestSuite(path, litConfig, testSuiteCache)
+    ts, path_in_suite = getTestSuite(path, litConfig, testSuiteCache)
     if ts is None:
-        litConfig.warning('unable to find test suite for %r' % path)
-        return (),()
+        litConfig.warning("unable to find test suite for %r" % path)
+        return (), ()
 
     if litConfig.debug:
-        litConfig.note('resolved input %r to %r::%r' % (path, ts.name,
-                                                        path_in_suite))
+        litConfig.note("resolved input %r to %r::%r" % (path, ts.name, path_in_suite))
 
-    return ts, getTestsInSuite(ts, path_in_suite, litConfig,
-                               testSuiteCache, localConfigCache, indirectlyRunCheck)
+    return ts, getTestsInSuite(
+        ts,
+        path_in_suite,
+        litConfig,
+        testSuiteCache,
+        localConfigCache,
+        indirectlyRunCheck,
+    )
 
-def getTestsInSuite(ts, path_in_suite, litConfig,
-                    testSuiteCache, localConfigCache, indirectlyRunCheck):
+
+def getTestsInSuite(
+    ts, path_in_suite, litConfig, testSuiteCache, localConfigCache, indirectlyRunCheck
+):
     # Check that the source path exists (errors here are reported by the
     # caller).
     source_path = ts.getSourcePath(path_in_suite)
@@ -167,16 +179,17 @@ def getTestsInSuite(ts, path_in_suite, litConfig,
             and not lc.standalone_tests
         ):
             found = False
-            for res in lc.test_format.getTestsInDirectory(ts, test_dir_in_suite,
-                                                          litConfig, lc):
+            for res in lc.test_format.getTestsInDirectory(
+                ts, test_dir_in_suite, litConfig, lc
+            ):
                 if test.getFullName() == res.getFullName():
                     found = True
                     break
             if not found:
                 litConfig.error(
-                    '%r would not be run indirectly: change name or LIT config'
-                    '(e.g. suffixes or standalone_tests variables)'
-                    % test.getFullName())
+                    "%r would not be run indirectly: change name or LIT config"
+                    "(e.g. suffixes or standalone_tests variables)" % test.getFullName()
+                )
 
         yield test
         return
@@ -189,21 +202,20 @@ def getTestsInSuite(ts, path_in_suite, litConfig,
     if lc.standalone_tests:
         if lc.suffixes or lc.excludes:
             litConfig.warning(
-                'standalone_tests set in LIT config but suffixes or excludes'
-                    ' are also set'
+                "standalone_tests set in LIT config but suffixes or excludes"
+                " are also set"
             )
         return
 
     # Search for tests.
     if lc.test_format is not None:
-        for res in lc.test_format.getTestsInDirectory(ts, path_in_suite,
-                                                      litConfig, lc):
+        for res in lc.test_format.getTestsInDirectory(ts, path_in_suite, litConfig, lc):
             yield res
 
     # Search subdirectories.
     for filename in os.listdir(source_path):
         # FIXME: This doesn't belong here?
-        if filename in ('Output', '.svn', '.git') or filename in lc.excludes:
+        if filename in ("Output", ".svn", ".git") or filename in lc.excludes:
             continue
 
         # Ignore non-directories.
@@ -216,11 +228,13 @@ def getTestsInSuite(ts, path_in_suite, litConfig,
         subpath = path_in_suite + (filename,)
         file_execpath = ts.getExecPath(subpath)
         if dirContainsTestSuite(file_execpath, litConfig):
-            sub_ts, subpath_in_suite = getTestSuite(file_execpath, litConfig,
-                                                    testSuiteCache)
+            sub_ts, subpath_in_suite = getTestSuite(
+                file_execpath, litConfig, testSuiteCache
+            )
         elif dirContainsTestSuite(file_sourcepath, litConfig):
-            sub_ts, subpath_in_suite = getTestSuite(file_sourcepath, litConfig,
-                                                    testSuiteCache)
+            sub_ts, subpath_in_suite = getTestSuite(
+                file_sourcepath, litConfig, testSuiteCache
+            )
         else:
             sub_ts = None
 
@@ -232,19 +246,31 @@ def getTestsInSuite(ts, path_in_suite, litConfig,
 
         # Otherwise, load from the nested test suite, if present.
         if sub_ts is not None:
-            subiter = getTestsInSuite(sub_ts, subpath_in_suite, litConfig,
-                                      testSuiteCache, localConfigCache,
-                                      indirectlyRunCheck)
+            subiter = getTestsInSuite(
+                sub_ts,
+                subpath_in_suite,
+                litConfig,
+                testSuiteCache,
+                localConfigCache,
+                indirectlyRunCheck,
+            )
         else:
-            subiter = getTestsInSuite(ts, subpath, litConfig, testSuiteCache,
-                                      localConfigCache, indirectlyRunCheck)
+            subiter = getTestsInSuite(
+                ts,
+                subpath,
+                litConfig,
+                testSuiteCache,
+                localConfigCache,
+                indirectlyRunCheck,
+            )
 
         N = 0
         for res in subiter:
             N += 1
             yield res
         if sub_ts and not N:
-            litConfig.warning('test suite %r contained no tests' % sub_ts.name)
+            litConfig.warning("test suite %r contained no tests" % sub_ts.name)
+
 
 def find_tests_for_inputs(lit_config, inputs, indirectlyRunCheck):
     """
@@ -257,7 +283,7 @@ def find_tests_for_inputs(lit_config, inputs, indirectlyRunCheck):
     # Expand '@...' form in inputs.
     actual_inputs = []
     for input in inputs:
-        if input.startswith('@'):
+        if input.startswith("@"):
             f = open(input[1:])
             try:
                 for ln in f:
@@ -275,20 +301,27 @@ def find_tests_for_inputs(lit_config, inputs, indirectlyRunCheck):
     local_config_cache = {}
     for input in actual_inputs:
         prev = len(tests)
-        tests.extend(getTests(input, lit_config, test_suite_cache,
-                              local_config_cache, indirectlyRunCheck)[1])
+        tests.extend(
+            getTests(
+                input,
+                lit_config,
+                test_suite_cache,
+                local_config_cache,
+                indirectlyRunCheck,
+            )[1]
+        )
         if prev == len(tests):
-            lit_config.warning('input %r contained no tests' % input)
+            lit_config.warning("input %r contained no tests" % input)
 
     # This data is no longer needed but keeping it around causes awful
     # performance problems while the test suites run.
     for k, suite in test_suite_cache.items():
-      if suite[0]:
-        suite[0].test_times = None
+        if suite[0]:
+            suite[0].test_times = None
 
     # If there were any errors during test discovery, exit now.
     if lit_config.numErrors:
-        sys.stderr.write('%d errors, exiting.\n' % lit_config.numErrors)
+        sys.stderr.write("%d errors, exiting.\n" % lit_config.numErrors)
         sys.exit(2)
 
     return tests

diff  --git a/llvm/utils/lit/lit/display.py b/llvm/utils/lit/lit/display.py
index 51a05e8abaf42..7de5a298d2302 100644
--- a/llvm/utils/lit/lit/display.py
+++ b/llvm/utils/lit/lit/display.py
@@ -6,19 +6,19 @@ def create_display(opts, tests, total_tests, workers):
         return NopDisplay()
 
     num_tests = len(tests)
-    of_total = (' of %d' % total_tests) if (num_tests != total_tests) else ''
-    header = '-- Testing: %d%s tests, %d workers --' % (
-        num_tests, of_total, workers)
+    of_total = (" of %d" % total_tests) if (num_tests != total_tests) else ""
+    header = "-- Testing: %d%s tests, %d workers --" % (num_tests, of_total, workers)
 
     progress_bar = None
     if opts.succinct and opts.useProgressBar:
         import lit.ProgressBar
+
         try:
             tc = lit.ProgressBar.TerminalController()
             progress_bar = lit.ProgressBar.ProgressBar(tc, header)
             header = None
         except ValueError:
-            progress_bar = lit.ProgressBar.SimpleProgressBar('Testing: ')
+            progress_bar = lit.ProgressBar.SimpleProgressBar("Testing: ")
 
     return Display(opts, tests, header, progress_bar)
 
@@ -49,11 +49,15 @@ def update(self, test):
             self.unpredictable_tests_remaining -= 1
 
         # NOTE: median would be more precise, but might be too slow.
-        average_test_time = (self.time_elapsed + self.predictable_time_remaining) / \
-            (self.completed + self.predictable_tests_remaining)
-        unpredictable_time_remaining = average_test_time * \
-            self.unpredictable_tests_remaining
-        total_time_remaining = self.predictable_time_remaining + unpredictable_time_remaining
+        average_test_time = (self.time_elapsed + self.predictable_time_remaining) / (
+            self.completed + self.predictable_tests_remaining
+        )
+        unpredictable_time_remaining = (
+            average_test_time * self.unpredictable_tests_remaining
+        )
+        total_time_remaining = (
+            self.predictable_time_remaining + unpredictable_time_remaining
+        )
         total_time = self.time_elapsed + total_time_remaining
 
         if total_time > 0:
@@ -62,9 +66,14 @@ def update(self, test):
 
 
 class NopDisplay(object):
-    def print_header(self): pass
-    def update(self, test): pass
-    def clear(self, interrupted): pass
+    def print_header(self):
+        pass
+
+    def update(self, test):
+        pass
+
+    def clear(self, interrupted):
+        pass
 
 
 class Display(object):
@@ -72,8 +81,7 @@ def __init__(self, opts, tests, header, progress_bar):
         self.opts = opts
         self.num_tests = len(tests)
         self.header = header
-        self.progress_predictor = ProgressPredictor(
-            tests) if progress_bar else None
+        self.progress_predictor = ProgressPredictor(tests) if progress_bar else None
         self.progress_bar = progress_bar
         self.completed = 0
 
@@ -81,14 +89,16 @@ def print_header(self):
         if self.header:
             print(self.header)
         if self.progress_bar:
-            self.progress_bar.update(0.0, '')
+            self.progress_bar.update(0.0, "")
 
     def update(self, test):
         self.completed += 1
 
-        show_result = test.isFailure() or \
-                self.opts.showAllOutput or \
-                (not self.opts.quiet and not self.opts.succinct)
+        show_result = (
+            test.isFailure()
+            or self.opts.showAllOutput
+            or (not self.opts.quiet and not self.opts.succinct)
+        )
         if show_result:
             if self.progress_bar:
                 self.progress_bar.clear(interrupted=False)
@@ -96,7 +106,7 @@ def update(self, test):
 
         if self.progress_bar:
             if test.isFailure():
-                self.progress_bar.barColor = 'RED'
+                self.progress_bar.barColor = "RED"
             percent = self.progress_predictor.update(test)
             self.progress_bar.update(percent, test.getFullName())
 
@@ -107,15 +117,17 @@ def clear(self, interrupted):
     def print_result(self, test):
         # Show the test result line.
         test_name = test.getFullName()
-        print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
-                                     self.completed, self.num_tests))
+        print(
+            "%s: %s (%d of %d)"
+            % (test.result.code.name, test_name, self.completed, self.num_tests)
+        )
 
         # Show the test failure output, if requested.
-        if (test.isFailure() and self.opts.showOutput) or \
-           self.opts.showAllOutput:
+        if (test.isFailure() and self.opts.showOutput) or self.opts.showAllOutput:
             if test.isFailure():
-                print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
-                                                  '*'*20))
+                print(
+                    "%s TEST '%s' FAILED %s" % ("*" * 20, test.getFullName(), "*" * 20)
+                )
             out = test.result.output
             # Encode/decode so that, when using Python 3.6.5 in Windows 10,
             # print(out) doesn't raise UnicodeEncodeError if out contains
@@ -124,8 +136,7 @@ def print_result(self, test):
             # encoding if it raises UnicodeDecodeError.
             if sys.stdout.encoding:
                 try:
-                    out = out.encode(encoding=sys.stdout.encoding,
-                                     errors="replace")
+                    out = out.encode(encoding=sys.stdout.encoding, errors="replace")
                 except UnicodeDecodeError:
                     pass
                 # Python 2 can raise UnicodeDecodeError here too in cases
@@ -137,24 +148,22 @@ def print_result(self, test):
 
         # Report test metrics, if present.
         if test.result.metrics:
-            print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
-                                               '*'*10))
+            print("%s TEST '%s' RESULTS %s" % ("*" * 10, test.getFullName(), "*" * 10))
             items = sorted(test.result.metrics.items())
             for metric_name, value in items:
-                print('%s: %s ' % (metric_name, value.format()))
+                print("%s: %s " % (metric_name, value.format()))
             print("*" * 10)
 
         # Report micro-tests, if present
         if test.result.microResults:
             items = sorted(test.result.microResults.items())
             for micro_test_name, micro_test in items:
-                print("%s MICRO-TEST: %s" %
-                         ('*'*3, micro_test_name))
+                print("%s MICRO-TEST: %s" % ("*" * 3, micro_test_name))
 
                 if micro_test.metrics:
                     sorted_metrics = sorted(micro_test.metrics.items())
                     for metric_name, value in sorted_metrics:
-                        print('    %s:  %s ' % (metric_name, value.format()))
+                        print("    %s:  %s " % (metric_name, value.format()))
 
         # Ensure the output is flushed.
         sys.stdout.flush()

diff  --git a/llvm/utils/lit/lit/formats/__init__.py b/llvm/utils/lit/lit/formats/__init__.py
index 7a357657670f3..d0a3c026eccd0 100644
--- a/llvm/utils/lit/lit/formats/__init__.py
+++ b/llvm/utils/lit/lit/formats/__init__.py
@@ -2,7 +2,7 @@
     TestFormat,
     FileBasedTest,
     OneCommandPerFileTest,
-    ExecutableTest
+    ExecutableTest,
 )
 
 from lit.formats.googletest import GoogleTest  # noqa: F401

diff  --git a/llvm/utils/lit/lit/formats/base.py b/llvm/utils/lit/lit/formats/base.py
index b44a606e76a82..0f8e984b2ab48 100644
--- a/llvm/utils/lit/lit/formats/base.py
+++ b/llvm/utils/lit/lit/formats/base.py
@@ -4,39 +4,42 @@
 import lit.Test
 import lit.util
 
+
 class TestFormat(object):
     pass
 
+
 ###
 
+
 class FileBasedTest(TestFormat):
-    def getTestsInDirectory(self, testSuite, path_in_suite,
-                            litConfig, localConfig):
+    def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig):
         source_path = testSuite.getSourcePath(path_in_suite)
         for filename in os.listdir(source_path):
             # Ignore dot files and excluded tests.
-            if (filename.startswith('.') or
-                filename in localConfig.excludes):
+            if filename.startswith(".") or filename in localConfig.excludes:
                 continue
 
             filepath = os.path.join(source_path, filename)
             if not os.path.isdir(filepath):
-                base,ext = os.path.splitext(filename)
+                base, ext = os.path.splitext(filename)
                 if ext in localConfig.suffixes:
-                    yield lit.Test.Test(testSuite, path_in_suite + (filename,),
-                                        localConfig)
+                    yield lit.Test.Test(
+                        testSuite, path_in_suite + (filename,), localConfig
+                    )
+
 
 ###
 
 import re
 import tempfile
 
+
 class OneCommandPerFileTest(TestFormat):
     # FIXME: Refactor into generic test for running some command on a directory
     # of inputs.
 
-    def __init__(self, command, dir, recursive=False,
-                 pattern=".*", useTempInput=False):
+    def __init__(self, command, dir, recursive=False, pattern=".*", useTempInput=False):
         if isinstance(command, str):
             self.command = [command]
         else:
@@ -48,54 +51,55 @@ def __init__(self, command, dir, recursive=False,
         self.pattern = re.compile(pattern)
         self.useTempInput = useTempInput
 
-    def getTestsInDirectory(self, testSuite, path_in_suite,
-                            litConfig, localConfig):
+    def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig):
         dir = self.dir
         if dir is None:
             dir = testSuite.getSourcePath(path_in_suite)
 
-        for dirname,subdirs,filenames in os.walk(dir):
+        for dirname, subdirs, filenames in os.walk(dir):
             if not self.recursive:
                 subdirs[:] = []
 
-            subdirs[:] = [d for d in subdirs
-                          if (d != '.svn' and
-                              d not in localConfig.excludes)]
+            subdirs[:] = [
+                d for d in subdirs if (d != ".svn" and d not in localConfig.excludes)
+            ]
 
             for filename in filenames:
-                if (filename.startswith('.') or
-                    not self.pattern.match(filename) or
-                    filename in localConfig.excludes):
+                if (
+                    filename.startswith(".")
+                    or not self.pattern.match(filename)
+                    or filename in localConfig.excludes
+                ):
                     continue
 
-                path = os.path.join(dirname,filename)
-                suffix = path[len(dir):]
+                path = os.path.join(dirname, filename)
+                suffix = path[len(dir) :]
                 if suffix.startswith(os.sep):
                     suffix = suffix[1:]
                 test = lit.Test.Test(
-                    testSuite, path_in_suite + tuple(suffix.split(os.sep)),
-                    localConfig)
+                    testSuite, path_in_suite + tuple(suffix.split(os.sep)), localConfig
+                )
                 # FIXME: Hack?
                 test.source_path = path
                 yield test
 
     def createTempInput(self, tmp, test):
-        raise NotImplementedError('This is an abstract method.')
+        raise NotImplementedError("This is an abstract method.")
 
     def execute(self, test, litConfig):
         if test.config.unsupported:
-            return (lit.Test.UNSUPPORTED, 'Test is unsupported')
+            return (lit.Test.UNSUPPORTED, "Test is unsupported")
 
         cmd = list(self.command)
 
         # If using temp input, create a temporary file and hand it to the
         # subclass.
         if self.useTempInput:
-            tmp = tempfile.NamedTemporaryFile(suffix='.cpp')
+            tmp = tempfile.NamedTemporaryFile(suffix=".cpp")
             self.createTempInput(tmp, test)
             tmp.flush()
             cmd.append(tmp.name)
-        elif hasattr(test, 'source_path'):
+        elif hasattr(test, "source_path"):
             cmd.append(test.source_path)
         else:
             cmd.append(test.getSourcePath())
@@ -104,14 +108,13 @@ def execute(self, test, litConfig):
 
         diags = out + err
         if not exitCode and not diags.strip():
-            return lit.Test.PASS,''
+            return lit.Test.PASS, ""
 
         # Try to include some useful information.
-        report = """Command: %s\n""" % ' '.join(["'%s'" % a
-                                                 for a in cmd])
+        report = """Command: %s\n""" % " ".join(["'%s'" % a for a in cmd])
         if self.useTempInput:
             report += """Temporary File: %s\n""" % tmp.name
-            report += "--\n%s--\n""" % open(tmp.name).read()
+            report += "--\n%s--\n" "" % open(tmp.name).read()
         report += """Output:\n--\n%s--""" % diags
 
         return lit.Test.FAIL, report
@@ -128,7 +131,6 @@ def execute(self, test, litConfig):
         out, err, exitCode = lit.util.executeCommand(test.getSourcePath())
 
         if not exitCode:
-            return lit.Test.PASS, ''
-
-        return lit.Test.FAIL, out+err
+            return lit.Test.PASS, ""
 
+        return lit.Test.FAIL, out + err

diff  --git a/llvm/utils/lit/lit/formats/googletest.py b/llvm/utils/lit/lit/formats/googletest.py
index 8209f503a1b31..f8304cbd05453 100644
--- a/llvm/utils/lit/lit/formats/googletest.py
+++ b/llvm/utils/lit/lit/formats/googletest.py
@@ -11,39 +11,43 @@
 import lit.util
 from .base import TestFormat
 
-kIsWindows = sys.platform in ['win32', 'cygwin']
+kIsWindows = sys.platform in ["win32", "cygwin"]
+
 
 class GoogleTest(TestFormat):
-    def __init__(self, test_sub_dirs, test_suffix, run_under = []):
+    def __init__(self, test_sub_dirs, test_suffix, run_under=[]):
         self.seen_executables = set()
-        self.test_sub_dirs = str(test_sub_dirs).split(';')
+        self.test_sub_dirs = str(test_sub_dirs).split(";")
 
         # On Windows, assume tests will also end in '.exe'.
         exe_suffix = str(test_suffix)
         if kIsWindows:
-            exe_suffix += '.exe'
+            exe_suffix += ".exe"
 
         # Also check for .py files for testing purposes.
-        self.test_suffixes = {exe_suffix, test_suffix + '.py'}
+        self.test_suffixes = {exe_suffix, test_suffix + ".py"}
         self.run_under = run_under
 
     def get_num_tests(self, path, litConfig, localConfig):
         list_test_cmd = self.prepareCmd(
-            [path, '--gtest_list_tests', '--gtest_filter=-*DISABLED_*'])
+            [path, "--gtest_list_tests", "--gtest_filter=-*DISABLED_*"]
+        )
         try:
-            out = subprocess.check_output(list_test_cmd,
-                                          env=localConfig.environment)
+            out = subprocess.check_output(list_test_cmd, env=localConfig.environment)
         except subprocess.CalledProcessError as exc:
             litConfig.warning(
                 "unable to discover google-tests in %r: %s. Process output: %s"
-                % (path, sys.exc_info()[1], exc.output))
+                % (path, sys.exc_info()[1], exc.output)
+            )
             return None
         return sum(
-            map(lambda line: lit.util.to_string(line).startswith('  '),
-                out.splitlines(False)))
+            map(
+                lambda line: lit.util.to_string(line).startswith("  "),
+                out.splitlines(False),
+            )
+        )
 
-    def getTestsInDirectory(self, testSuite, path_in_suite, litConfig,
-                            localConfig):
+    def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig):
         init_shard_size = 512  # number of tests in a shard
         core_count = lit.util.usable_core_count()
         source_path = testSuite.getSourcePath(path_in_suite)
@@ -51,18 +55,18 @@ def getTestsInDirectory(self, testSuite, path_in_suite, litConfig,
             dir_path = os.path.join(source_path, subdir)
             if not os.path.isdir(dir_path):
                 continue
-            for fn in lit.util.listdir_files(dir_path,
-                                             suffixes=self.test_suffixes):
+            for fn in lit.util.listdir_files(dir_path, suffixes=self.test_suffixes):
                 # Discover the tests in this executable.
                 execpath = os.path.join(source_path, subdir, fn)
                 if execpath in self.seen_executables:
                     litConfig.warning(
-                        "Skip adding %r since it has been added to the test pool" % execpath)
+                        "Skip adding %r since it has been added to the test pool"
+                        % execpath
+                    )
                     continue
                 else:
                     self.seen_executables.add(execpath)
-                num_tests = self.get_num_tests(execpath, litConfig,
-                                               localConfig)
+                num_tests = self.get_num_tests(execpath, litConfig, localConfig)
                 if num_tests is not None:
                     # Compute the number of shards.
                     shard_size = init_shard_size
@@ -73,19 +77,26 @@ def getTestsInDirectory(self, testSuite, path_in_suite, litConfig,
 
                     # Create one lit test for each shard.
                     for idx in range(nshard):
-                        testPath = path_in_suite + (subdir, fn, str(idx),
-                                                    str(nshard))
-                        json_file = '-'.join([
-                            execpath, testSuite.config.name,
-                            str(os.getpid()),
-                            str(idx),
-                            str(nshard)
-                        ]) + '.json'
-                        yield lit.Test.Test(testSuite,
-                                            testPath,
-                                            localConfig,
-                                            file_path=execpath,
-                                            gtest_json_file=json_file)
+                        testPath = path_in_suite + (subdir, fn, str(idx), str(nshard))
+                        json_file = (
+                            "-".join(
+                                [
+                                    execpath,
+                                    testSuite.config.name,
+                                    str(os.getpid()),
+                                    str(idx),
+                                    str(nshard),
+                                ]
+                            )
+                            + ".json"
+                        )
+                        yield lit.Test.Test(
+                            testSuite,
+                            testPath,
+                            localConfig,
+                            file_path=execpath,
+                            gtest_json_file=json_file,
+                        )
                 else:
                     # This doesn't look like a valid gtest file.  This can
                     # have a number of causes, none of them good.  For
@@ -95,32 +106,35 @@ def getTestsInDirectory(self, testSuite, path_in_suite, litConfig,
                     # failures will get reported, so return a dummy test name
                     # so that the failure is reported later.
                     testPath = path_in_suite + (
-                        subdir, fn, 'failed_to_discover_tests_from_gtest')
-                    yield lit.Test.Test(testSuite,
-                                        testPath,
-                                        localConfig,
-                                        file_path=execpath)
+                        subdir,
+                        fn,
+                        "failed_to_discover_tests_from_gtest",
+                    )
+                    yield lit.Test.Test(
+                        testSuite, testPath, localConfig, file_path=execpath
+                    )
 
     def execute(self, test, litConfig):
         if test.gtest_json_file is None:
-            return lit.Test.FAIL, ''
+            return lit.Test.FAIL, ""
 
-        testPath,testName = os.path.split(test.getSourcePath())
+        testPath, testName = os.path.split(test.getSourcePath())
         while not os.path.exists(testPath):
             # Handle GTest parametrized and typed tests, whose name includes
             # some '/'s.
             testPath, namePrefix = os.path.split(testPath)
-            testName = namePrefix + '/' + testName
+            testName = namePrefix + "/" + testName
 
-        testName,total_shards = os.path.split(testName)
-        testName,shard_idx = os.path.split(testName)
+        testName, total_shards = os.path.split(testName)
+        testName, shard_idx = os.path.split(testName)
         from lit.cl_arguments import TestOrder
+
         use_shuffle = TestOrder(litConfig.order) == TestOrder.RANDOM
         shard_env = {
-            'GTEST_OUTPUT': 'json:' + test.gtest_json_file,
-            'GTEST_SHUFFLE': '1' if use_shuffle else '0',
-            'GTEST_TOTAL_SHARDS': os.environ.get("GTEST_TOTAL_SHARDS", total_shards),
-            'GTEST_SHARD_INDEX': os.environ.get("GTEST_SHARD_INDEX", shard_idx)
+            "GTEST_OUTPUT": "json:" + test.gtest_json_file,
+            "GTEST_SHUFFLE": "1" if use_shuffle else "0",
+            "GTEST_TOTAL_SHARDS": os.environ.get("GTEST_TOTAL_SHARDS", total_shards),
+            "GTEST_SHARD_INDEX": os.environ.get("GTEST_SHARD_INDEX", shard_idx),
         }
         test.config.environment.update(shard_env)
 
@@ -130,75 +144,82 @@ def execute(self, test, litConfig):
             cmd = litConfig.valgrindArgs + cmd
 
         if litConfig.noExecute:
-            return lit.Test.PASS, ''
+            return lit.Test.PASS, ""
 
         def get_shard_header(shard_env):
-            shard_envs = ' '.join([k + '=' + v for k, v in shard_env.items()])
-            return f"Script(shard):\n--\n%s %s\n--\n" % (shard_envs, ' '.join(cmd))
+            shard_envs = " ".join([k + "=" + v for k, v in shard_env.items()])
+            return f"Script(shard):\n--\n%s %s\n--\n" % (shard_envs, " ".join(cmd))
 
         shard_header = get_shard_header(shard_env)
 
         try:
             out, _, exitCode = lit.util.executeCommand(
-                cmd, env=test.config.environment,
-                timeout=litConfig.maxIndividualTestTime, redirect_stderr=True)
+                cmd,
+                env=test.config.environment,
+                timeout=litConfig.maxIndividualTestTime,
+                redirect_stderr=True,
+            )
         except lit.util.ExecuteCommandTimeoutException as e:
             stream_msg = f"\n{e.out}\n--\nexit: {e.exitCode}\n--\n"
-            return (lit.Test.TIMEOUT, f'{shard_header}{stream_msg}Reached '
-                    f'timeout of {litConfig.maxIndividualTestTime} seconds')
+            return (
+                lit.Test.TIMEOUT,
+                f"{shard_header}{stream_msg}Reached "
+                f"timeout of {litConfig.maxIndividualTestTime} seconds",
+            )
 
         if not os.path.exists(test.gtest_json_file):
-            errmsg = f"shard JSON output does not exist: %s" % (
-                test.gtest_json_file)
+            errmsg = f"shard JSON output does not exist: %s" % (test.gtest_json_file)
             stream_msg = f"\n{out}\n--\nexit: {exitCode}\n--\n"
             return lit.Test.FAIL, shard_header + stream_msg + errmsg
 
         if exitCode == 0:
-            return lit.Test.PASS, ''
+            return lit.Test.PASS, ""
 
         def get_test_stdout(test_name):
             res = []
-            header = f'[ RUN      ] ' + test_name
-            footer = f'[  FAILED  ] ' + test_name
+            header = f"[ RUN      ] " + test_name
+            footer = f"[  FAILED  ] " + test_name
             in_range = False
             for l in out.splitlines():
                 if l.startswith(header):
                     in_range = True
                 elif l.startswith(footer):
-                    return f'' if len(res) == 0 else '\n'.join(res)
+                    return f"" if len(res) == 0 else "\n".join(res)
                 elif in_range:
                     res.append(l)
-            assert False, f'gtest did not report the result for ' + test_name
+            assert False, f"gtest did not report the result for " + test_name
 
         found_failed_test = False
 
-        with open(test.gtest_json_file, encoding='utf-8') as f:
+        with open(test.gtest_json_file, encoding="utf-8") as f:
             jf = json.load(f)
 
             if use_shuffle:
-                shard_env['GTEST_RANDOM_SEED'] = str(jf['random_seed'])
-            output = get_shard_header(shard_env) + '\n'
+                shard_env["GTEST_RANDOM_SEED"] = str(jf["random_seed"])
+            output = get_shard_header(shard_env) + "\n"
 
-            for testcase in jf['testsuites']:
-                for testinfo in testcase['testsuite']:
-                    result = testinfo['result']
-                    if result == 'SUPPRESSED' or result == 'SKIPPED':
+            for testcase in jf["testsuites"]:
+                for testinfo in testcase["testsuite"]:
+                    result = testinfo["result"]
+                    if result == "SUPPRESSED" or result == "SKIPPED":
                         continue
-                    testname = testcase['name'] + '.' + testinfo['name']
+                    testname = testcase["name"] + "." + testinfo["name"]
                     header = f"Script:\n--\n%s --gtest_filter=%s\n--\n" % (
-                        ' '.join(cmd), testname)
-                    if 'failures' in testinfo:
+                        " ".join(cmd),
+                        testname,
+                    )
+                    if "failures" in testinfo:
                         found_failed_test = True
                         output += header
                         test_out = get_test_stdout(testname)
                         if test_out:
-                            output += test_out + '\n\n'
-                        for fail in testinfo['failures']:
-                            output += fail['failure'] + '\n'
-                        output += '\n'
-                    elif result != 'COMPLETED':
+                            output += test_out + "\n\n"
+                        for fail in testinfo["failures"]:
+                            output += fail["failure"] + "\n"
+                        output += "\n"
+                    elif result != "COMPLETED":
                         output += header
-                        output += 'unresolved test result\n'
+                        output += "unresolved test result\n"
 
         # In some situations, like running tests with sanitizers, all test passes but
         # the shard could still fail due to memory issues.
@@ -216,7 +237,7 @@ def prepareCmd(self, cmd):
         Windows, so add the python executable to the command if this is a .py
         script.
         """
-        if cmd[0].endswith('.py'):
+        if cmd[0].endswith(".py"):
             cmd = [sys.executable] + cmd
         if self.run_under:
             if isinstance(self.run_under, list):
@@ -245,43 +266,52 @@ def remove_gtest(tests):
             has_failure_in_shard = False
 
             # Load json file to retrieve results.
-            with open(test.gtest_json_file, encoding='utf-8') as f:
+            with open(test.gtest_json_file, encoding="utf-8") as f:
                 try:
-                    testsuites = json.load(f)['testsuites']
+                    testsuites = json.load(f)["testsuites"]
                 except json.JSONDecodeError as e:
-                    raise RuntimeError("Failed to parse json file: " +
-                                       test.gtest_json_file + "\n" + e.doc)
+                    raise RuntimeError(
+                        "Failed to parse json file: "
+                        + test.gtest_json_file
+                        + "\n"
+                        + e.doc
+                    )
                 for testcase in testsuites:
-                    for testinfo in testcase['testsuite']:
+                    for testinfo in testcase["testsuite"]:
                         # Ignore disabled tests.
-                        if testinfo['result'] == 'SUPPRESSED':
+                        if testinfo["result"] == "SUPPRESSED":
                             continue
 
-                        testPath = test.path_in_suite[:-2] + (testcase['name'],
-                                                              testinfo['name'])
-                        subtest = lit.Test.Test(test.suite, testPath,
-                                                test.config, test.file_path)
+                        testPath = test.path_in_suite[:-2] + (
+                            testcase["name"],
+                            testinfo["name"],
+                        )
+                        subtest = lit.Test.Test(
+                            test.suite, testPath, test.config, test.file_path
+                        )
 
-                        testname = testcase['name'] + '.' + testinfo['name']
+                        testname = testcase["name"] + "." + testinfo["name"]
                         header = f"Script:\n--\n%s --gtest_filter=%s\n--\n" % (
-                            test.file_path, testname)
+                            test.file_path,
+                            testname,
+                        )
 
-                        output = ''
-                        if testinfo['result'] == 'SKIPPED':
+                        output = ""
+                        if testinfo["result"] == "SKIPPED":
                             returnCode = lit.Test.SKIPPED
-                        elif 'failures' in testinfo:
+                        elif "failures" in testinfo:
                             has_failure_in_shard = True
                             returnCode = lit.Test.FAIL
                             output = header
-                            for fail in testinfo['failures']:
-                                output += fail['failure'] + '\n'
-                        elif testinfo['result'] == 'COMPLETED':
+                            for fail in testinfo["failures"]:
+                                output += fail["failure"] + "\n"
+                        elif testinfo["result"] == "COMPLETED":
                             returnCode = lit.Test.PASS
                         else:
                             returnCode = lit.Test.UNRESOLVED
-                            output = header + 'unresolved test result\n'
+                            output = header + "unresolved test result\n"
 
-                        elapsed_time = float(testinfo['time'][:-1])
+                        elapsed_time = float(testinfo["time"][:-1])
                         res = lit.Test.Result(returnCode, output, elapsed_time)
                         res.pid = test.result.pid or 0
                         res.start = start_time

diff  --git a/llvm/utils/lit/lit/formats/shtest.py b/llvm/utils/lit/lit/formats/shtest.py
index e1be48cbd37f1..b4dc6f9ed32e2 100644
--- a/llvm/utils/lit/lit/formats/shtest.py
+++ b/llvm/utils/lit/lit/formats/shtest.py
@@ -17,14 +17,19 @@ class ShTest(FileBasedTest):
     The ShTest files contain some number of shell-like command pipelines, along
     with assertions about what should be in the output.
     """
-    def __init__(self, execute_external=False, extra_substitutions=[],
-                 preamble_commands=[]):
+
+    def __init__(
+        self, execute_external=False, extra_substitutions=[], preamble_commands=[]
+    ):
         self.execute_external = execute_external
         self.extra_substitutions = extra_substitutions
         self.preamble_commands = preamble_commands
 
     def execute(self, test, litConfig):
-        return lit.TestRunner.executeShTest(test, litConfig,
-                                            self.execute_external,
-                                            self.extra_substitutions,
-                                            self.preamble_commands)
+        return lit.TestRunner.executeShTest(
+            test,
+            litConfig,
+            self.execute_external,
+            self.extra_substitutions,
+            self.preamble_commands,
+        )

diff  --git a/llvm/utils/lit/lit/llvm/config.py b/llvm/utils/lit/lit/llvm/config.py
index fd76f8d9c3633..619db4e369404 100644
--- a/llvm/utils/lit/lit/llvm/config.py
+++ b/llvm/utils/lit/lit/llvm/config.py
@@ -12,8 +12,8 @@
 
 lit_path_displayed = False
 
-class LLVMConfig(object):
 
+class LLVMConfig(object):
     def __init__(self, lit_config, config):
         self.lit_config = lit_config
         self.config = config
@@ -22,23 +22,23 @@ def __init__(self, lit_config, config):
 
         self.use_lit_shell = False
         # Tweak PATH for Win32 to decide to use bash.exe or not.
-        if sys.platform == 'win32':
+        if sys.platform == "win32":
             # Seek necessary tools in directories and set to $PATH.
             path = None
-            lit_tools_dir = getattr(config, 'lit_tools_dir', None)
-            required_tools = [
-                'cmp.exe', 'grep.exe', 'sed.exe', '
diff .exe', 'echo.exe']
-            path = self.lit_config.getToolsPath(lit_tools_dir,
-                                                config.environment['PATH'],
-                                                required_tools)
+            lit_tools_dir = getattr(config, "lit_tools_dir", None)
+            required_tools = ["cmp.exe", "grep.exe", "sed.exe", "
diff .exe", "echo.exe"]
+            path = self.lit_config.getToolsPath(
+                lit_tools_dir, config.environment["PATH"], required_tools
+            )
             if path is None:
                 path = self._find_git_windows_unix_tools(required_tools)
             if path is not None:
-                self.with_environment('PATH', path, append_path=True)
+                self.with_environment("PATH", path, append_path=True)
             # Many tools behave strangely if these environment variables aren't
             # set.
             self.with_system_environment(
-                ['SystemDrive', 'SystemRoot', 'TEMP', 'TMP', 'PLATFORM'])
+                ["SystemDrive", "SystemRoot", "TEMP", "TMP", "PLATFORM"]
+            )
             self.use_lit_shell = True
 
             global lit_path_displayed
@@ -49,114 +49,117 @@ def __init__(self, lit_config, config):
         # Choose between lit's internal shell pipeline runner and a real shell.
         # If LIT_USE_INTERNAL_SHELL is in the environment, we use that as an
         # override.
-        lit_shell_env = os.environ.get('LIT_USE_INTERNAL_SHELL')
+        lit_shell_env = os.environ.get("LIT_USE_INTERNAL_SHELL")
         if lit_shell_env:
             self.use_lit_shell = lit.util.pythonize_bool(lit_shell_env)
 
         if not self.use_lit_shell:
-            features.add('shell')
-
-        self.with_system_environment([
-            'ASAN_SYMBOLIZER_PATH',
-            'HWASAN_SYMBOLIZER_PATH',
-            'MSAN_SYMBOLIZER_PATH',
-            'TSAN_SYMBOLIZER_PATH',
-            'UBSAN_SYMBOLIZER_PATH'
-            'ASAN_OPTIONS',
-            'HWASAN_OPTIONS',
-            'MSAN_OPTIONS',
-            'TSAN_OPTIONS',
-            'UBSAN_OPTIONS',
-        ])
+            features.add("shell")
+
+        self.with_system_environment(
+            [
+                "ASAN_SYMBOLIZER_PATH",
+                "HWASAN_SYMBOLIZER_PATH",
+                "MSAN_SYMBOLIZER_PATH",
+                "TSAN_SYMBOLIZER_PATH",
+                "UBSAN_SYMBOLIZER_PATH" "ASAN_OPTIONS",
+                "HWASAN_OPTIONS",
+                "MSAN_OPTIONS",
+                "TSAN_OPTIONS",
+                "UBSAN_OPTIONS",
+            ]
+        )
 
         # Running on Darwin OS
-        if platform.system() == 'Darwin':
+        if platform.system() == "Darwin":
             # FIXME: lld uses the first, other projects use the second.
             # We should standardize on the former.
-            features.add('system-linker-mach-o')
-            features.add('system-darwin')
-        elif platform.system() == 'Windows':
+            features.add("system-linker-mach-o")
+            features.add("system-darwin")
+        elif platform.system() == "Windows":
             # For tests that require Windows to run.
-            features.add('system-windows')
-        elif platform.system() == 'Linux':
-            features.add('system-linux')
-        elif platform.system() in ['FreeBSD']:
-            features.add('system-freebsd')
-        elif platform.system() == 'NetBSD':
-            features.add('system-netbsd')
-        elif platform.system() == 'AIX':
-            features.add('system-aix')
-        elif platform.system() == 'SunOS':
-            features.add('system-solaris')
-        elif platform.system() == 'OS/390':
-            features.add('system-zos')
+            features.add("system-windows")
+        elif platform.system() == "Linux":
+            features.add("system-linux")
+        elif platform.system() in ["FreeBSD"]:
+            features.add("system-freebsd")
+        elif platform.system() == "NetBSD":
+            features.add("system-netbsd")
+        elif platform.system() == "AIX":
+            features.add("system-aix")
+        elif platform.system() == "SunOS":
+            features.add("system-solaris")
+        elif platform.system() == "OS/390":
+            features.add("system-zos")
 
         # Native compilation: host arch == default triple arch
         # Both of these values should probably be in every site config (e.g. as
         # part of the standard header.  But currently they aren't)
-        host_triple = getattr(config, 'host_triple', None)
-        target_triple = getattr(config, 'target_triple', None)
-        features.add('target=%s' % target_triple)
+        host_triple = getattr(config, "host_triple", None)
+        target_triple = getattr(config, "target_triple", None)
+        features.add("target=%s" % target_triple)
         if host_triple and host_triple == target_triple:
-            features.add('native')
+            features.add("native")
 
         # Sanitizers.
-        sanitizers = getattr(config, 'llvm_use_sanitizer', '')
-        sanitizers = frozenset(x.lower() for x in sanitizers.split(';'))
-        if 'address' in sanitizers:
-            features.add('asan')
-        if 'hwaddress' in sanitizers:
-            features.add('hwasan')
-        if 'memory' in sanitizers or 'memorywithorigins' in sanitizers:
-            features.add('msan')
-        if 'undefined' in sanitizers:
-            features.add('ubsan')
-
-        have_zlib = getattr(config, 'have_zlib', None)
+        sanitizers = getattr(config, "llvm_use_sanitizer", "")
+        sanitizers = frozenset(x.lower() for x in sanitizers.split(";"))
+        if "address" in sanitizers:
+            features.add("asan")
+        if "hwaddress" in sanitizers:
+            features.add("hwasan")
+        if "memory" in sanitizers or "memorywithorigins" in sanitizers:
+            features.add("msan")
+        if "undefined" in sanitizers:
+            features.add("ubsan")
+
+        have_zlib = getattr(config, "have_zlib", None)
         if have_zlib:
-            features.add('zlib')
-        have_zstd = getattr(config, 'have_zstd', None)
+            features.add("zlib")
+        have_zstd = getattr(config, "have_zstd", None)
         if have_zstd:
-            features.add('zstd')
+            features.add("zstd")
 
         # Check if we should run long running tests.
-        long_tests = lit_config.params.get('run_long_tests', None)
+        long_tests = lit_config.params.get("run_long_tests", None)
         if lit.util.pythonize_bool(long_tests):
-            features.add('long_tests')
+            features.add("long_tests")
 
         if target_triple:
-            if re.match(r'^x86_64.*-apple', target_triple):
-                features.add('x86_64-apple')
-                host_cxx = getattr(config, 'host_cxx', None)
-                if ('address' in sanitizers and
-                        self.get_clang_has_lsan(host_cxx, target_triple)):
+            if re.match(r"^x86_64.*-apple", target_triple):
+                features.add("x86_64-apple")
+                host_cxx = getattr(config, "host_cxx", None)
+                if "address" in sanitizers and self.get_clang_has_lsan(
+                    host_cxx, target_triple
+                ):
                     self.with_environment(
-                        'ASAN_OPTIONS', 'detect_leaks=1', append_path=True)
-            if re.match(r'^x86_64.*-linux', target_triple):
-                features.add('x86_64-linux')
-            if re.match(r'^i.86.*', target_triple):
-                features.add('target-x86')
-            elif re.match(r'^x86_64.*', target_triple):
-                features.add('target-x86_64')
-            elif re.match(r'^aarch64.*', target_triple):
-                features.add('target-aarch64')
-            elif re.match(r'^arm64.*', target_triple):
-                features.add('target-aarch64')
-            elif re.match(r'^arm.*', target_triple):
-                features.add('target-arm')
-
-        use_gmalloc = lit_config.params.get('use_gmalloc', None)
+                        "ASAN_OPTIONS", "detect_leaks=1", append_path=True
+                    )
+            if re.match(r"^x86_64.*-linux", target_triple):
+                features.add("x86_64-linux")
+            if re.match(r"^i.86.*", target_triple):
+                features.add("target-x86")
+            elif re.match(r"^x86_64.*", target_triple):
+                features.add("target-x86_64")
+            elif re.match(r"^aarch64.*", target_triple):
+                features.add("target-aarch64")
+            elif re.match(r"^arm64.*", target_triple):
+                features.add("target-aarch64")
+            elif re.match(r"^arm.*", target_triple):
+                features.add("target-arm")
+
+        use_gmalloc = lit_config.params.get("use_gmalloc", None)
         if lit.util.pythonize_bool(use_gmalloc):
             # Allow use of an explicit path for gmalloc library.
             # Will default to '/usr/lib/libgmalloc.dylib' if not set.
             gmalloc_path_str = lit_config.params.get(
-                'gmalloc_path', '/usr/lib/libgmalloc.dylib')
+                "gmalloc_path", "/usr/lib/libgmalloc.dylib"
+            )
             if gmalloc_path_str is not None:
-                self.with_environment(
-                    'DYLD_INSERT_LIBRARIES', gmalloc_path_str)
+                self.with_environment("DYLD_INSERT_LIBRARIES", gmalloc_path_str)
 
     def _find_git_windows_unix_tools(self, tools_needed):
-        assert(sys.platform == 'win32')
+        assert sys.platform == "win32"
         if sys.version_info.major >= 3:
             import winreg
         else:
@@ -167,15 +170,15 @@ def _find_git_windows_unix_tools(self, tools_needed):
         hives = [winreg.HKEY_LOCAL_MACHINE, winreg.HKEY_CURRENT_USER]
         for mask, hive in itertools.product(masks, hives):
             try:
-                with winreg.OpenKey(hive, r"SOFTWARE\GitForWindows", 0,
-                                    winreg.KEY_READ | mask) as key:
-                    install_root, _ = winreg.QueryValueEx(key, 'InstallPath')
+                with winreg.OpenKey(
+                    hive, r"SOFTWARE\GitForWindows", 0, winreg.KEY_READ | mask
+                ) as key:
+                    install_root, _ = winreg.QueryValueEx(key, "InstallPath")
 
                     if not install_root:
                         continue
-                    candidate_path = os.path.join(install_root, 'usr', 'bin')
-                    if not lit.util.checkToolsPath(
-                               candidate_path, tools_needed):
+                    candidate_path = os.path.join(install_root, "usr", "bin")
+                    if not lit.util.checkToolsPath(candidate_path, tools_needed):
                         continue
 
                     # We found it, stop enumerating.
@@ -234,22 +237,25 @@ def clear_environment(self, variables):
     def get_process_output(self, command):
         try:
             cmd = subprocess.Popen(
-                command, stdout=subprocess.PIPE,
-                stderr=subprocess.PIPE, env=self.config.environment)
+                command,
+                stdout=subprocess.PIPE,
+                stderr=subprocess.PIPE,
+                env=self.config.environment,
+            )
             stdout, stderr = cmd.communicate()
             stdout = lit.util.to_string(stdout)
             stderr = lit.util.to_string(stderr)
             return (stdout, stderr)
         except OSError:
-            self.lit_config.fatal('Could not run process %s' % command)
+            self.lit_config.fatal("Could not run process %s" % command)
 
     def feature_config(self, features):
         # Ask llvm-config about the specified feature.
         arguments = [x for (x, _) in features]
-        config_path = os.path.join(self.config.llvm_tools_dir, 'llvm-config')
+        config_path = os.path.join(self.config.llvm_tools_dir, "llvm-config")
 
         output, _ = self.get_process_output([config_path] + arguments)
-        lines = output.split('\n')
+        lines = output.split("\n")
 
         for (feature_line, (_, patterns)) in zip(lines, features):
             # We should have either a callable or a dictionary.  If it's a
@@ -269,18 +275,18 @@ def feature_config(self, features):
     def get_clang_builtin_include_dir(self, clang):
         # FIXME: Rather than just getting the version, we should have clang
         # print out its resource dir here in an easy to scrape form.
-        clang_dir, _ = self.get_process_output(
-            [clang, '-print-file-name=include'])
+        clang_dir, _ = self.get_process_output([clang, "-print-file-name=include"])
 
         if not clang_dir:
             print(clang)
             self.lit_config.fatal(
-                "Couldn't find the include dir for Clang ('%s')" % clang)
+                "Couldn't find the include dir for Clang ('%s')" % clang
+            )
 
         clang_dir = clang_dir.strip()
-        if sys.platform in ['win32'] and not self.use_lit_shell:
+        if sys.platform in ["win32"] and not self.use_lit_shell:
             # Don't pass dosish path separator to msys bash.exe.
-            clang_dir = clang_dir.replace('\\', '/')
+            clang_dir = clang_dir.replace("\\", "/")
         # Ensure the result is an ascii string, across Python2.5+ - Python3.
         return clang_dir
 
@@ -288,30 +294,31 @@ def get_clang_builtin_include_dir(self, clang):
     def get_clang_has_lsan(self, clang, triple):
         if not clang:
             self.lit_config.warning(
-                'config.host_cxx is unset but test suite is configured '
-                'to use sanitizers.')
+                "config.host_cxx is unset but test suite is configured "
+                "to use sanitizers."
+            )
             return False
 
         clang_binary = clang.split()[0]
-        version_string, _ = self.get_process_output(
-            [clang_binary, '--version'])
-        if not 'clang' in version_string:
+        version_string, _ = self.get_process_output([clang_binary, "--version"])
+        if not "clang" in version_string:
             self.lit_config.warning(
-                "compiler '%s' does not appear to be clang, " % clang_binary +
-                'but test suite is configured to use sanitizers.')
+                "compiler '%s' does not appear to be clang, " % clang_binary
+                + "but test suite is configured to use sanitizers."
+            )
             return False
 
-        if re.match(r'.*-linux', triple):
+        if re.match(r".*-linux", triple):
             return True
 
-        if re.match(r'^x86_64.*-apple', triple):
-            version_regex = re.search(r'version ([0-9]+)\.([0-9]+).([0-9]+)',
-                                      version_string)
+        if re.match(r"^x86_64.*-apple", triple):
+            version_regex = re.search(
+                r"version ([0-9]+)\.([0-9]+).([0-9]+)", version_string
+            )
             major_version_number = int(version_regex.group(1))
             minor_version_number = int(version_regex.group(2))
             patch_version_number = int(version_regex.group(3))
-            if ('Apple LLVM' in version_string or
-                'Apple clang' in version_string):
+            if "Apple LLVM" in version_string or "Apple clang" in version_string:
                 # Apple clang doesn't yet support LSan
                 return False
             return major_version_number >= 5
@@ -319,31 +326,31 @@ def get_clang_has_lsan(self, clang, triple):
         return False
 
     def make_itanium_abi_triple(self, triple):
-        m = re.match(r'(\w+)-(\w+)-(\w+)', triple)
+        m = re.match(r"(\w+)-(\w+)-(\w+)", triple)
         if not m:
             self.lit_config.fatal(
-                "Could not turn '%s' into Itanium ABI triple" % triple)
-        if m.group(3).lower() != 'windows':
+                "Could not turn '%s' into Itanium ABI triple" % triple
+            )
+        if m.group(3).lower() != "windows":
             # All non-windows triples use the Itanium ABI.
             return triple
-        return m.group(1) + '-' + m.group(2) + '-' + m.group(3) + '-gnu'
+        return m.group(1) + "-" + m.group(2) + "-" + m.group(3) + "-gnu"
 
     def make_msabi_triple(self, triple):
-        m = re.match(r'(\w+)-(\w+)-(\w+)', triple)
+        m = re.match(r"(\w+)-(\w+)-(\w+)", triple)
         if not m:
-            self.lit_config.fatal(
-                "Could not turn '%s' into MS ABI triple" % triple)
+            self.lit_config.fatal("Could not turn '%s' into MS ABI triple" % triple)
         isa = m.group(1).lower()
         vendor = m.group(2).lower()
         os = m.group(3).lower()
-        if os == 'windows' and re.match(r'.*-msvc$', triple):
+        if os == "windows" and re.match(r".*-msvc$", triple):
             # If the OS is windows and environment is msvc, we're done.
             return triple
-        if isa.startswith('x86') or isa == 'amd64' or re.match(r'i\d86', isa):
+        if isa.startswith("x86") or isa == "amd64" or re.match(r"i\d86", isa):
             # For x86 ISAs, adjust the OS.
-            return isa + '-' + vendor + '-windows-msvc'
+            return isa + "-" + vendor + "-windows-msvc"
         # -msvc is not supported for non-x86 targets; use a default.
-        return 'i686-pc-windows-msvc'
+        return "i686-pc-windows-msvc"
 
     def add_tool_substitutions(self, tools, search_dirs=None):
         if not search_dirs:
@@ -352,8 +359,7 @@ def add_tool_substitutions(self, tools, search_dirs=None):
         if lit.util.is_string(search_dirs):
             search_dirs = [search_dirs]
 
-        tools = [x if isinstance(x, ToolSubst) else ToolSubst(x)
-                 for x in tools]
+        tools = [x if isinstance(x, ToolSubst) else ToolSubst(x) for x in tools]
 
         search_dirs = os.pathsep.join(search_dirs)
         substitutions = []
@@ -385,47 +391,63 @@ def add_err_msg_substitutions(self):
         # messages for ENOENT, EISDIR, EINVAL and EACCES as a semi colon
         # separated string. LLVM testsuites can use get_errc_messages in cmake
         # to automatically get the messages and pass them into lit.
-        errc_messages = getattr(self.config, 'errc_messages', '')
+        errc_messages = getattr(self.config, "errc_messages", "")
         if len(errc_messages) != 0:
-            (errc_enoent, errc_eisdir,
-             errc_einval, errc_eacces) = errc_messages.split(';')
-            self.config.substitutions.append(
-                ('%errc_ENOENT', '\'' + errc_enoent + '\''))
-            self.config.substitutions.append(
-                ('%errc_EISDIR', '\'' + errc_eisdir + '\''))
-            self.config.substitutions.append(
-                ('%errc_EINVAL', '\'' + errc_einval + '\''))
-            self.config.substitutions.append(
-                ('%errc_EACCES', '\'' + errc_eacces + '\''))
+            (errc_enoent, errc_eisdir, errc_einval, errc_eacces) = errc_messages.split(
+                ";"
+            )
+            self.config.substitutions.append(("%errc_ENOENT", "'" + errc_enoent + "'"))
+            self.config.substitutions.append(("%errc_EISDIR", "'" + errc_eisdir + "'"))
+            self.config.substitutions.append(("%errc_EINVAL", "'" + errc_einval + "'"))
+            self.config.substitutions.append(("%errc_EACCES", "'" + errc_eacces + "'"))
         else:
             self.config.substitutions.append(
-                ('%errc_ENOENT', '\'' + os.strerror(errno.ENOENT) + '\''))
+                ("%errc_ENOENT", "'" + os.strerror(errno.ENOENT) + "'")
+            )
             self.config.substitutions.append(
-                ('%errc_EISDIR', '\'' + os.strerror(errno.EISDIR) + '\''))
+                ("%errc_EISDIR", "'" + os.strerror(errno.EISDIR) + "'")
+            )
             self.config.substitutions.append(
-                ('%errc_EINVAL', '\'' + os.strerror(errno.EINVAL) + '\''))
+                ("%errc_EINVAL", "'" + os.strerror(errno.EINVAL) + "'")
+            )
             self.config.substitutions.append(
-                ('%errc_EACCES', '\'' + os.strerror(errno.EACCES) + '\''))
+                ("%errc_EACCES", "'" + os.strerror(errno.EACCES) + "'")
+            )
 
     def use_default_substitutions(self):
         tool_patterns = [
-            ToolSubst('FileCheck', unresolved='fatal'),
+            ToolSubst("FileCheck", unresolved="fatal"),
             # Handle these specially as they are strings searched for during
             # testing.
-            ToolSubst(r'\| \bcount\b', command=FindTool('count'),
-                verbatim=True, unresolved='fatal'),
-            ToolSubst(r'\| \bnot\b', command=FindTool('not'),
-                verbatim=True, unresolved='fatal')]
-
-        self.config.substitutions.append(('%python', '"%s"' % (sys.executable)))
-
-        self.add_tool_substitutions(
-            tool_patterns, [self.config.llvm_tools_dir])
+            ToolSubst(
+                r"\| \bcount\b",
+                command=FindTool("count"),
+                verbatim=True,
+                unresolved="fatal",
+            ),
+            ToolSubst(
+                r"\| \bnot\b",
+                command=FindTool("not"),
+                verbatim=True,
+                unresolved="fatal",
+            ),
+        ]
+
+        self.config.substitutions.append(("%python", '"%s"' % (sys.executable)))
+
+        self.add_tool_substitutions(tool_patterns, [self.config.llvm_tools_dir])
 
         self.add_err_msg_substitutions()
 
-    def use_llvm_tool(self, name, search_env=None, required=False, quiet=False,
-                      search_paths=None, use_installed=False):
+    def use_llvm_tool(
+        self,
+        name,
+        search_env=None,
+        required=False,
+        quiet=False,
+        search_paths=None,
+        use_installed=False,
+    ):
         """Find the executable program 'name', optionally using the specified
         environment variable as an override before searching the build directory
         and then optionally the configuration's PATH."""
@@ -444,23 +466,29 @@ def use_llvm_tool(self, name, search_env=None, required=False, quiet=False,
 
         if not tool and use_installed:
             # Otherwise look in the path, if enabled.
-            tool = lit.util.which(name, self.config.environment['PATH'])
+            tool = lit.util.which(name, self.config.environment["PATH"])
 
         if required and not tool:
             message = "couldn't find '{}' program".format(name)
             if search_env:
-                message = message + \
-                    ', try setting {} in your environment'.format(search_env)
+                message = message + ", try setting {} in your environment".format(
+                    search_env
+                )
             self.lit_config.fatal(message)
 
         if tool:
             tool = os.path.normpath(tool)
             if not self.lit_config.quiet and not quiet:
-                self.lit_config.note('using {}: {}'.format(name, tool))
+                self.lit_config.note("using {}: {}".format(name, tool))
         return tool
 
-    def use_clang(self, additional_tool_dirs=[], additional_flags=[],
-                  required=True, use_installed=False):
+    def use_clang(
+        self,
+        additional_tool_dirs=[],
+        additional_flags=[],
+        required=True,
+        use_installed=False,
+    ):
         """Configure the test suite to be able to invoke clang.
 
         Sets up some environment variables important to clang, locates a
@@ -482,154 +510,205 @@ def use_clang(self, additional_tool_dirs=[], additional_flags=[],
         #     'VCINSTALLDIR', 'VC100COMNTOOLS', 'VC90COMNTOOLS',
         #     'VC80COMNTOOLS')
         possibly_dangerous_env_vars = [
-            'COMPILER_PATH', 'RC_DEBUG_OPTIONS',
-            'CINDEXTEST_PREAMBLE_FILE', 'LIBRARY_PATH',
-            'CPATH', 'C_INCLUDE_PATH', 'CPLUS_INCLUDE_PATH',
-            'OBJC_INCLUDE_PATH', 'OBJCPLUS_INCLUDE_PATH',
-            'LIBCLANG_TIMING', 'LIBCLANG_OBJTRACKING',
-            'LIBCLANG_LOGGING', 'LIBCLANG_BGPRIO_INDEX',
-            'LIBCLANG_BGPRIO_EDIT', 'LIBCLANG_NOTHREADS',
-            'LIBCLANG_RESOURCE_USAGE',
-            'LIBCLANG_CODE_COMPLETION_LOGGING',
-            ]
+            "COMPILER_PATH",
+            "RC_DEBUG_OPTIONS",
+            "CINDEXTEST_PREAMBLE_FILE",
+            "LIBRARY_PATH",
+            "CPATH",
+            "C_INCLUDE_PATH",
+            "CPLUS_INCLUDE_PATH",
+            "OBJC_INCLUDE_PATH",
+            "OBJCPLUS_INCLUDE_PATH",
+            "LIBCLANG_TIMING",
+            "LIBCLANG_OBJTRACKING",
+            "LIBCLANG_LOGGING",
+            "LIBCLANG_BGPRIO_INDEX",
+            "LIBCLANG_BGPRIO_EDIT",
+            "LIBCLANG_NOTHREADS",
+            "LIBCLANG_RESOURCE_USAGE",
+            "LIBCLANG_CODE_COMPLETION_LOGGING",
+        ]
         # Clang/Win32 may refer to %INCLUDE%. vsvarsall.bat sets it.
-        if platform.system() != 'Windows':
-            possibly_dangerous_env_vars.append('INCLUDE')
+        if platform.system() != "Windows":
+            possibly_dangerous_env_vars.append("INCLUDE")
 
         self.clear_environment(possibly_dangerous_env_vars)
 
         # Tweak the PATH to include the tools dir and the scripts dir.
         # Put Clang first to avoid LLVM from overriding out-of-tree clang
         # builds.
-        exe_dir_props = [self.config.name.lower() + '_tools_dir',
-                         'clang_tools_dir', 'llvm_tools_dir']
-        paths = [getattr(self.config, pp) for pp in exe_dir_props
-                 if getattr(self.config, pp, None)]
+        exe_dir_props = [
+            self.config.name.lower() + "_tools_dir",
+            "clang_tools_dir",
+            "llvm_tools_dir",
+        ]
+        paths = [
+            getattr(self.config, pp)
+            for pp in exe_dir_props
+            if getattr(self.config, pp, None)
+        ]
         paths = additional_tool_dirs + paths
-        self.with_environment('PATH', paths, append_path=True)
+        self.with_environment("PATH", paths, append_path=True)
 
         lib_dir_props = [
-            self.config.name.lower() + '_libs_dir',
-            'llvm_shlib_dir',
-            'llvm_libs_dir',
-            ]
-        lib_paths = [getattr(self.config, pp) for pp in lib_dir_props
-                     if getattr(self.config, pp, None)]
-
-        self.with_environment('LD_LIBRARY_PATH', lib_paths, append_path=True)
-
-        shl = getattr(self.config, 'llvm_shlib_dir', None)
-        pext = getattr(self.config, 'llvm_plugin_ext', None)
+            self.config.name.lower() + "_libs_dir",
+            "llvm_shlib_dir",
+            "llvm_libs_dir",
+        ]
+        lib_paths = [
+            getattr(self.config, pp)
+            for pp in lib_dir_props
+            if getattr(self.config, pp, None)
+        ]
+
+        self.with_environment("LD_LIBRARY_PATH", lib_paths, append_path=True)
+
+        shl = getattr(self.config, "llvm_shlib_dir", None)
+        pext = getattr(self.config, "llvm_plugin_ext", None)
         if shl:
-            self.config.substitutions.append(('%llvmshlibdir', shl))
+            self.config.substitutions.append(("%llvmshlibdir", shl))
         if pext:
-            self.config.substitutions.append(('%pluginext', pext))
+            self.config.substitutions.append(("%pluginext", pext))
 
         # Discover the 'clang' and 'clangcc' to use.
         self.config.clang = self.use_llvm_tool(
-            'clang', search_env='CLANG', required=required,
-            search_paths=paths, use_installed=use_installed)
+            "clang",
+            search_env="CLANG",
+            required=required,
+            search_paths=paths,
+            use_installed=use_installed,
+        )
         if self.config.clang:
-          self.config.available_features.add('clang')
-          builtin_include_dir = self.get_clang_builtin_include_dir(
-              self.config.clang)
-          tool_substitutions = [
-              ToolSubst('%clang', command=self.config.clang,
-                        extra_args=additional_flags),
-              ToolSubst('%clang_analyze_cc1', command='%clang_cc1',
-                        extra_args=['-analyze', '%analyze',
-                                    '-setup-static-analyzer']+additional_flags),
-              ToolSubst('%clang_cc1', command=self.config.clang,
-                        extra_args=['-cc1', '-internal-isystem',
-                                    builtin_include_dir, '-nostdsysteminc'] +
-                                   additional_flags),
-              ToolSubst('%clang_cpp', command=self.config.clang,
-                        extra_args=['--driver-mode=cpp']+additional_flags),
-              ToolSubst('%clang_cl', command=self.config.clang,
-                        extra_args=['--driver-mode=cl']+additional_flags),
-              ToolSubst('%clang_dxc', command=self.config.clang,
-                        extra_args=['--driver-mode=dxc']+additional_flags),
-              ToolSubst('%clangxx', command=self.config.clang,
-                        extra_args=['--driver-mode=g++']+additional_flags),
-              ]
-          self.add_tool_substitutions(tool_substitutions)
-          self.config.substitutions.append(
-              ('%resource_dir', builtin_include_dir))
+            self.config.available_features.add("clang")
+            builtin_include_dir = self.get_clang_builtin_include_dir(self.config.clang)
+            tool_substitutions = [
+                ToolSubst(
+                    "%clang", command=self.config.clang, extra_args=additional_flags
+                ),
+                ToolSubst(
+                    "%clang_analyze_cc1",
+                    command="%clang_cc1",
+                    extra_args=["-analyze", "%analyze", "-setup-static-analyzer"]
+                    + additional_flags,
+                ),
+                ToolSubst(
+                    "%clang_cc1",
+                    command=self.config.clang,
+                    extra_args=[
+                        "-cc1",
+                        "-internal-isystem",
+                        builtin_include_dir,
+                        "-nostdsysteminc",
+                    ]
+                    + additional_flags,
+                ),
+                ToolSubst(
+                    "%clang_cpp",
+                    command=self.config.clang,
+                    extra_args=["--driver-mode=cpp"] + additional_flags,
+                ),
+                ToolSubst(
+                    "%clang_cl",
+                    command=self.config.clang,
+                    extra_args=["--driver-mode=cl"] + additional_flags,
+                ),
+                ToolSubst(
+                    "%clang_dxc",
+                    command=self.config.clang,
+                    extra_args=["--driver-mode=dxc"] + additional_flags,
+                ),
+                ToolSubst(
+                    "%clangxx",
+                    command=self.config.clang,
+                    extra_args=["--driver-mode=g++"] + additional_flags,
+                ),
+            ]
+            self.add_tool_substitutions(tool_substitutions)
+            self.config.substitutions.append(("%resource_dir", builtin_include_dir))
 
         self.config.substitutions.append(
-            ('%itanium_abi_triple',
-             self.make_itanium_abi_triple(self.config.target_triple)))
+            (
+                "%itanium_abi_triple",
+                self.make_itanium_abi_triple(self.config.target_triple),
+            )
+        )
         self.config.substitutions.append(
-            ('%ms_abi_triple',
-             self.make_msabi_triple(self.config.target_triple)))
+            ("%ms_abi_triple", self.make_msabi_triple(self.config.target_triple))
+        )
 
         # The host triple might not be set, at least if we're compiling clang
         # from an already installed llvm.
-        if (self.config.host_triple and
-                self.config.host_triple != '@LLVM_HOST_TRIPLE@'):
+        if self.config.host_triple and self.config.host_triple != "@LLVM_HOST_TRIPLE@":
             self.config.substitutions.append(
-                ('%target_itanium_abi_host_triple',
-                 '--target=' + self.make_itanium_abi_triple(
-                                   self.config.host_triple)))
+                (
+                    "%target_itanium_abi_host_triple",
+                    "--target=" + self.make_itanium_abi_triple(self.config.host_triple),
+                )
+            )
         else:
-            self.config.substitutions.append(
-                ('%target_itanium_abi_host_triple', ''))
+            self.config.substitutions.append(("%target_itanium_abi_host_triple", ""))
 
         # TODO: Many tests work across many language standards. Before
         # https://discourse.llvm.org/t/lit-run-a-run-line-multiple-times-with-
diff erent-replacements/64932
         # has a solution, provide substitutions to conveniently try every standard with LIT_CLANG_STD_GROUP.
-        clang_std_group = int(os.environ.get('LIT_CLANG_STD_GROUP', '0'))
-        clang_std_values = ('98', '11', '14', '17', '20', '2b')
+        clang_std_group = int(os.environ.get("LIT_CLANG_STD_GROUP", "0"))
+        clang_std_values = ("98", "11", "14", "17", "20", "2b")
+
         def add_std_cxx(s):
             t = s[8:]
-            if t.endswith('-'):
+            if t.endswith("-"):
                 t += clang_std_values[-1]
-            l = clang_std_values.index(t[0:2] if t[0:2] != '23' else '2b')
+            l = clang_std_values.index(t[0:2] if t[0:2] != "23" else "2b")
             h = clang_std_values.index(t[3:5])
             # Let LIT_CLANG_STD_GROUP=0 pick the highest value (likely the most relevant
             # standard).
-            l = h - clang_std_group % (h-l+1)
-            self.config.substitutions.append((s, '-std=c++' + clang_std_values[l]))
-
-        add_std_cxx('%std_cxx98-14')
-        add_std_cxx('%std_cxx98-')
-        add_std_cxx('%std_cxx11-14')
-        add_std_cxx('%std_cxx11-')
-        add_std_cxx('%std_cxx14-')
-        add_std_cxx('%std_cxx17-20')
-        add_std_cxx('%std_cxx17-')
-        add_std_cxx('%std_cxx20-')
-        add_std_cxx('%std_cxx23-')
+            l = h - clang_std_group % (h - l + 1)
+            self.config.substitutions.append((s, "-std=c++" + clang_std_values[l]))
+
+        add_std_cxx("%std_cxx98-14")
+        add_std_cxx("%std_cxx98-")
+        add_std_cxx("%std_cxx11-14")
+        add_std_cxx("%std_cxx11-")
+        add_std_cxx("%std_cxx14-")
+        add_std_cxx("%std_cxx17-20")
+        add_std_cxx("%std_cxx17-")
+        add_std_cxx("%std_cxx20-")
+        add_std_cxx("%std_cxx23-")
 
         # FIXME: Find nicer way to prohibit this.
         def prefer(this, to):
-            return '''\"*** Do not use '%s' in tests, use '%s'. ***\"''' % (
-                to, this)
-        self.config.substitutions.append(
-            (' clang ', prefer('%clang', 'clang')))
+            return '''\"*** Do not use '%s' in tests, use '%s'. ***\"''' % (to, this)
+
+        self.config.substitutions.append((" clang ", prefer("%clang", "clang")))
         self.config.substitutions.append(
-            (r' clang\+\+ ', prefer('%clangxx', 'clang++')))
+            (r" clang\+\+ ", prefer("%clangxx", "clang++"))
+        )
         self.config.substitutions.append(
-            (' clang-cc ', prefer('%clang_cc1', 'clang-cc')))
+            (" clang-cc ", prefer("%clang_cc1", "clang-cc"))
+        )
         self.config.substitutions.append(
-            (' clang-cl ', prefer('%clang_cl', 'clang-cl')))
+            (" clang-cl ", prefer("%clang_cl", "clang-cl"))
+        )
         self.config.substitutions.append(
-            (' clang -cc1 -analyze ',
-             prefer('%clang_analyze_cc1', 'clang -cc1 -analyze')))
+            (
+                " clang -cc1 -analyze ",
+                prefer("%clang_analyze_cc1", "clang -cc1 -analyze"),
+            )
+        )
         self.config.substitutions.append(
-            (' clang -cc1 ', prefer('%clang_cc1', 'clang -cc1')))
+            (" clang -cc1 ", prefer("%clang_cc1", "clang -cc1"))
+        )
         self.config.substitutions.append(
-            (' %clang-cc1 ',
-             '''\"*** invalid substitution, use '%clang_cc1'. ***\"'''))
+            (" %clang-cc1 ", '''\"*** invalid substitution, use '%clang_cc1'. ***\"''')
+        )
         self.config.substitutions.append(
-            (' %clang-cpp ',
-             '''\"*** invalid substitution, use '%clang_cpp'. ***\"'''))
+            (" %clang-cpp ", '''\"*** invalid substitution, use '%clang_cpp'. ***\"''')
+        )
         self.config.substitutions.append(
-            (' %clang-cl ',
-             '''\"*** invalid substitution, use '%clang_cl'. ***\"'''))
+            (" %clang-cl ", '''\"*** invalid substitution, use '%clang_cl'. ***\"''')
+        )
 
-    def use_lld(self, additional_tool_dirs=[], required=True,
-                use_installed=False):
+    def use_lld(self, additional_tool_dirs=[], required=True, use_installed=False):
         """Configure the test suite to be able to invoke lld.
 
         Sets up some environment variables important to lld, locates a
@@ -639,49 +718,71 @@ def use_lld(self, additional_tool_dirs=[], required=True,
         """
 
         # Tweak the PATH to include the tools dir and the scripts dir.
-        exe_dir_props = [self.config.name.lower() + '_tools_dir',
-                         'lld_tools_dir', 'llvm_tools_dir']
-        paths = [getattr(self.config, pp) for pp in exe_dir_props
-                 if getattr(self.config, pp, None)]
+        exe_dir_props = [
+            self.config.name.lower() + "_tools_dir",
+            "lld_tools_dir",
+            "llvm_tools_dir",
+        ]
+        paths = [
+            getattr(self.config, pp)
+            for pp in exe_dir_props
+            if getattr(self.config, pp, None)
+        ]
         paths = additional_tool_dirs + paths
-        self.with_environment('PATH', paths, append_path=True)
+        self.with_environment("PATH", paths, append_path=True)
 
-        lib_dir_props = [self.config.name.lower() + '_libs_dir',
-                         'lld_libs_dir', 'llvm_shlib_dir', 'llvm_libs_dir']
-        lib_paths = [getattr(self.config, pp) for pp in lib_dir_props
-                     if getattr(self.config, pp, None)]
-
-        self.with_environment('LD_LIBRARY_PATH', lib_paths, append_path=True)
+        lib_dir_props = [
+            self.config.name.lower() + "_libs_dir",
+            "lld_libs_dir",
+            "llvm_shlib_dir",
+            "llvm_libs_dir",
+        ]
+        lib_paths = [
+            getattr(self.config, pp)
+            for pp in lib_dir_props
+            if getattr(self.config, pp, None)
+        ]
+
+        self.with_environment("LD_LIBRARY_PATH", lib_paths, append_path=True)
 
         # Discover the LLD executables to use.
 
-        ld_lld = self.use_llvm_tool('ld.lld', required=required,
-                                    search_paths=paths,
-                                    use_installed=use_installed)
-        lld_link = self.use_llvm_tool('lld-link', required=required,
-                                      search_paths=paths,
-                                      use_installed=use_installed)
-        ld64_lld = self.use_llvm_tool('ld64.lld', required=required,
-                                      search_paths=paths,
-                                      use_installed=use_installed)
-        wasm_ld = self.use_llvm_tool('wasm-ld', required=required,
-                                     search_paths=paths,
-                                     use_installed=use_installed)
+        ld_lld = self.use_llvm_tool(
+            "ld.lld", required=required, search_paths=paths, use_installed=use_installed
+        )
+        lld_link = self.use_llvm_tool(
+            "lld-link",
+            required=required,
+            search_paths=paths,
+            use_installed=use_installed,
+        )
+        ld64_lld = self.use_llvm_tool(
+            "ld64.lld",
+            required=required,
+            search_paths=paths,
+            use_installed=use_installed,
+        )
+        wasm_ld = self.use_llvm_tool(
+            "wasm-ld",
+            required=required,
+            search_paths=paths,
+            use_installed=use_installed,
+        )
 
         was_found = ld_lld and lld_link and ld64_lld and wasm_ld
         tool_substitutions = []
         if ld_lld:
-            tool_substitutions.append(ToolSubst(r'ld\.lld', command=ld_lld))
-            self.config.available_features.add('ld.lld')
+            tool_substitutions.append(ToolSubst(r"ld\.lld", command=ld_lld))
+            self.config.available_features.add("ld.lld")
         if lld_link:
-            tool_substitutions.append(ToolSubst('lld-link', command=lld_link))
-            self.config.available_features.add('lld-link')
+            tool_substitutions.append(ToolSubst("lld-link", command=lld_link))
+            self.config.available_features.add("lld-link")
         if ld64_lld:
-            tool_substitutions.append(ToolSubst(r'ld64\.lld', command=ld64_lld))
-            self.config.available_features.add('ld64.lld')
+            tool_substitutions.append(ToolSubst(r"ld64\.lld", command=ld64_lld))
+            self.config.available_features.add("ld64.lld")
         if wasm_ld:
-            tool_substitutions.append(ToolSubst('wasm-ld', command=wasm_ld))
-            self.config.available_features.add('wasm-ld')
+            tool_substitutions.append(ToolSubst("wasm-ld", command=wasm_ld))
+            self.config.available_features.add("wasm-ld")
         self.add_tool_substitutions(tool_substitutions)
 
         return was_found

diff  --git a/llvm/utils/lit/lit/llvm/subst.py b/llvm/utils/lit/lit/llvm/subst.py
index f2cc266dec34e..e00fdaa516502 100644
--- a/llvm/utils/lit/lit/llvm/subst.py
+++ b/llvm/utils/lit/lit/llvm/subst.py
@@ -21,8 +21,8 @@ def resolve(self, config, dirs):
             if not command:
                 return None
 
-        if self.name == 'llc' and os.environ.get('LLVM_ENABLE_MACHINE_VERIFIER') == '1':
-            command += ' -verify-machineinstrs'
+        if self.name == "llc" and os.environ.get("LLVM_ENABLE_MACHINE_VERIFIER") == "1":
+            command += " -verify-machineinstrs"
         return command
 
 
@@ -35,8 +35,16 @@ class ToolSubst(object):
 
     """
 
-    def __init__(self, key, command=None, pre=r'.-^/\<', post='-.', verbatim=False,
-                 unresolved='warn', extra_args=None):
+    def __init__(
+        self,
+        key,
+        command=None,
+        pre=r".-^/\<",
+        post="-.",
+        verbatim=False,
+        unresolved="warn",
+        extra_args=None,
+    ):
         """Construct a ToolSubst.
 
         key: The text which is to be substituted.
@@ -81,19 +89,19 @@ def __init__(self, key, command=None, pre=r'.-^/\<', post='-.', verbatim=False,
             self.regex = key
             return
 
-        def not_in(chars, where=''):
+        def not_in(chars, where=""):
             if not chars:
-                return ''
-            pattern_str = '|'.join(re.escape(x) for x in chars)
-            return r'(?{}!({}))'.format(where, pattern_str)
+                return ""
+            pattern_str = "|".join(re.escape(x) for x in chars)
+            return r"(?{}!({}))".format(where, pattern_str)
 
         def wordify(word):
             match = wordifier.match(word)
             introducer = match.group(1)
             word = match.group(2)
-            return introducer + r'\b' + word + r'\b'
+            return introducer + r"\b" + word + r"\b"
 
-        self.regex = not_in(pre, '<') + wordify(key) + not_in(post)
+        self.regex = not_in(pre, "<") + wordify(key) + not_in(post)
 
     def resolve(self, config, search_dirs):
         # Extract the tool name from the pattern.  This relies on the tool
@@ -115,29 +123,30 @@ def resolve(self, config, search_dirs):
 
         if command_str:
             if self.extra_args:
-                command_str = ' '.join([command_str] + self.extra_args)
+                command_str = " ".join([command_str] + self.extra_args)
         else:
-            if self.unresolved == 'warn':
+            if self.unresolved == "warn":
                 # Warn, but still provide a substitution.
                 config.lit_config.note(
-                    'Did not find ' + tool_name + ' in %s' % search_dirs)
-                command_str = os.path.join(
-                    config.config.llvm_tools_dir, tool_name)
-            elif self.unresolved == 'fatal':
+                    "Did not find " + tool_name + " in %s" % search_dirs
+                )
+                command_str = os.path.join(config.config.llvm_tools_dir, tool_name)
+            elif self.unresolved == "fatal":
                 # The function won't even return in this case, this leads to
                 # sys.exit
                 config.lit_config.fatal(
-                    'Did not find ' + tool_name + ' in %s' % search_dirs)
-            elif self.unresolved == 'break':
+                    "Did not find " + tool_name + " in %s" % search_dirs
+                )
+            elif self.unresolved == "break":
                 # By returning a valid result with an empty command, the
                 # caller treats this as a failure.
                 pass
-            elif self.unresolved == 'ignore':
+            elif self.unresolved == "ignore":
                 # By returning None, the caller just assumes there was no
                 # match in the first place.
                 return None
             else:
-                raise 'Unexpected value for ToolSubst.unresolved'
+                raise "Unexpected value for ToolSubst.unresolved"
         if command_str:
             self.was_resolved = True
         return (self.regex, tool_pipe, command_str)

diff  --git a/llvm/utils/lit/lit/main.py b/llvm/utils/lit/lit/main.py
index dc90321ddfc9f..236b2e7624841 100755
--- a/llvm/utils/lit/lit/main.py
+++ b/llvm/utils/lit/lit/main.py
@@ -25,7 +25,7 @@
 def main(builtin_params={}):
     opts = lit.cl_arguments.parse_args()
     params = create_params(builtin_params, opts.user_params)
-    is_windows = platform.system() == 'Windows'
+    is_windows = platform.system() == "Windows"
 
     lit_config = lit.LitConfig.LitConfig(
         progname=os.path.basename(sys.argv[0]),
@@ -40,12 +40,14 @@ def main(builtin_params={}):
         order=opts.order,
         params=params,
         config_prefix=opts.configPrefix,
-        echo_all_commands=opts.echoAllCommands)
+        echo_all_commands=opts.echoAllCommands,
+    )
 
-    discovered_tests = lit.discovery.find_tests_for_inputs(lit_config, opts.test_paths,
-                                                           opts.indirectlyRunCheck)
+    discovered_tests = lit.discovery.find_tests_for_inputs(
+        lit_config, opts.test_paths, opts.indirectlyRunCheck
+    )
     if not discovered_tests:
-        sys.stderr.write('error: did not discover any tests for provided path(s)\n')
+        sys.stderr.write("error: did not discover any tests for provided path(s)\n")
         sys.exit(2)
 
     if opts.show_suites or opts.show_tests:
@@ -53,37 +55,50 @@ def main(builtin_params={}):
         sys.exit(0)
 
     if opts.show_used_features:
-        features = set(itertools.chain.from_iterable(t.getUsedFeatures() for t in discovered_tests if t.gtest_json_file is None))
-        print(' '.join(sorted(features)))
+        features = set(
+            itertools.chain.from_iterable(
+                t.getUsedFeatures()
+                for t in discovered_tests
+                if t.gtest_json_file is None
+            )
+        )
+        print(" ".join(sorted(features)))
         sys.exit(0)
 
     # Command line overrides configuration for maxIndividualTestTime.
     if opts.maxIndividualTestTime is not None:  # `not None` is important (default: 0)
         if opts.maxIndividualTestTime != lit_config.maxIndividualTestTime:
-            lit_config.note(('The test suite configuration requested an individual'
-                ' test timeout of {0} seconds but a timeout of {1} seconds was'
-                ' requested on the command line. Forcing timeout to be {1}'
-                ' seconds')
-                .format(lit_config.maxIndividualTestTime,
-                        opts.maxIndividualTestTime))
+            lit_config.note(
+                (
+                    "The test suite configuration requested an individual"
+                    " test timeout of {0} seconds but a timeout of {1} seconds was"
+                    " requested on the command line. Forcing timeout to be {1}"
+                    " seconds"
+                ).format(lit_config.maxIndividualTestTime, opts.maxIndividualTestTime)
+            )
             lit_config.maxIndividualTestTime = opts.maxIndividualTestTime
 
     determine_order(discovered_tests, opts.order)
 
-    selected_tests = [t for t in discovered_tests if
-        opts.filter.search(t.getFullName()) and not
-        opts.filter_out.search(t.getFullName())]
+    selected_tests = [
+        t
+        for t in discovered_tests
+        if opts.filter.search(t.getFullName())
+        and not opts.filter_out.search(t.getFullName())
+    ]
 
     if not selected_tests:
-        sys.stderr.write('error: filter did not match any tests '
-                         '(of %d discovered).  ' % len(discovered_tests))
+        sys.stderr.write(
+            "error: filter did not match any tests "
+            "(of %d discovered).  " % len(discovered_tests)
+        )
         if opts.allow_empty_runs:
-            sys.stderr.write("Suppressing error because '--allow-empty-runs' "
-                             'was specified.\n')
+            sys.stderr.write(
+                "Suppressing error because '--allow-empty-runs' " "was specified.\n"
+            )
             sys.exit(0)
         else:
-            sys.stderr.write("Use '--allow-empty-runs' to suppress this "
-                             'error.\n')
+            sys.stderr.write("Use '--allow-empty-runs' to suppress this " "error.\n")
             sys.exit(2)
 
     # When running multiple shards, don't include skipped tests in the xunit
@@ -92,11 +107,13 @@ def main(builtin_params={}):
         (run, shards) = opts.shard
         selected_tests = filter_by_shard(selected_tests, run, shards, lit_config)
         if not selected_tests:
-            sys.stderr.write('warning: shard does not contain any tests.  '
-                             'Consider decreasing the number of shards.\n')
+            sys.stderr.write(
+                "warning: shard does not contain any tests.  "
+                "Consider decreasing the number of shards.\n"
+            )
             sys.exit(0)
 
-    selected_tests = selected_tests[:opts.max_tests]
+    selected_tests = selected_tests[: opts.max_tests]
 
     mark_xfail(discovered_tests, opts)
 
@@ -109,7 +126,8 @@ def main(builtin_params={}):
     record_test_times(selected_tests, lit_config)
 
     selected_tests, discovered_tests = GoogleTest.post_process_shard_results(
-        selected_tests, discovered_tests)
+        selected_tests, discovered_tests
+    )
 
     if opts.time_tests:
         print_histogram(discovered_tests)
@@ -121,23 +139,26 @@ def main(builtin_params={}):
         report.write_results(tests_for_report, elapsed)
 
     if lit_config.numErrors:
-        sys.stderr.write('\n%d error(s) in tests\n' % lit_config.numErrors)
+        sys.stderr.write("\n%d error(s) in tests\n" % lit_config.numErrors)
         sys.exit(2)
 
     if lit_config.numWarnings:
-        sys.stderr.write('\n%d warning(s) in tests\n' % lit_config.numWarnings)
+        sys.stderr.write("\n%d warning(s) in tests\n" % lit_config.numWarnings)
 
     has_failure = any(t.isFailure() for t in discovered_tests)
     if has_failure:
         if opts.ignoreFail:
-            sys.stderr.write("\nExiting with status 0 instead of 1 because "
-                             "'--ignore-fail' was specified.\n")
+            sys.stderr.write(
+                "\nExiting with status 0 instead of 1 because "
+                "'--ignore-fail' was specified.\n"
+            )
         else:
             sys.exit(1)
 
+
 def create_params(builtin_params, user_params):
     def parse(p):
-        return p.split('=', 1) if '=' in p else (p, '')
+        return p.split("=", 1) if "=" in p else (p, "")
 
     params = dict(builtin_params)
     params.update([parse(p) for p in user_params])
@@ -149,36 +170,40 @@ def print_discovered(tests, show_suites, show_tests):
 
     if show_suites:
         tests_by_suite = itertools.groupby(tests, lambda t: t.suite)
-        print('-- Test Suites --')
+        print("-- Test Suites --")
         for suite, test_iter in tests_by_suite:
             test_count = sum(1 for _ in test_iter)
-            print('  %s - %d tests' % (suite.name, test_count))
-            print('    Source Root: %s' % suite.source_root)
-            print('    Exec Root  : %s' % suite.exec_root)
-            features = ' '.join(sorted(suite.config.available_features))
-            print('    Available Features: %s' % features)
+            print("  %s - %d tests" % (suite.name, test_count))
+            print("    Source Root: %s" % suite.source_root)
+            print("    Exec Root  : %s" % suite.exec_root)
+            features = " ".join(sorted(suite.config.available_features))
+            print("    Available Features: %s" % features)
             substitutions = sorted(suite.config.substitutions)
-            substitutions = ('%s => %s' % (x, y) for (x, y) in substitutions)
-            substitutions = '\n'.ljust(30).join(substitutions)
-            print('    Available Substitutions: %s' % substitutions)
+            substitutions = ("%s => %s" % (x, y) for (x, y) in substitutions)
+            substitutions = "\n".ljust(30).join(substitutions)
+            print("    Available Substitutions: %s" % substitutions)
 
     if show_tests:
-        print('-- Available Tests --')
+        print("-- Available Tests --")
         for t in tests:
-            print('  %s' % t.getFullName())
+            print("  %s" % t.getFullName())
 
 
 def determine_order(tests, order):
     from lit.cl_arguments import TestOrder
+
     enum_order = TestOrder(order)
     if enum_order == TestOrder.RANDOM:
         import random
+
         random.shuffle(tests)
     elif enum_order == TestOrder.LEXICAL:
         tests.sort(key=lambda t: t.getFullName())
     else:
-        assert enum_order == TestOrder.SMART, 'Unknown TestOrder value'
-        tests.sort(key=lambda t: (not t.previous_failure, -t.previous_elapsed, t.getFullName()))
+        assert enum_order == TestOrder.SMART, "Unknown TestOrder value"
+        tests.sort(
+            key=lambda t: (not t.previous_failure, -t.previous_elapsed, t.getFullName())
+        )
 
 
 def filter_by_shard(tests, run, shards, lit_config):
@@ -188,12 +213,14 @@ def filter_by_shard(tests, run, shards, lit_config):
     # For clarity, generate a preview of the first few test indices in the shard
     # to accompany the arithmetic expression.
     preview_len = 3
-    preview = ', '.join([str(i + 1) for i in test_ixs[:preview_len]])
+    preview = ", ".join([str(i + 1) for i in test_ixs[:preview_len]])
     if len(test_ixs) > preview_len:
-        preview += ', ...'
-    msg = f'Selecting shard {run}/{shards} = ' \
-          f'size {len(selected_tests)}/{len(tests)} = ' \
-          f'tests #({shards}*k)+{run} = [{preview}]'
+        preview += ", ..."
+    msg = (
+        f"Selecting shard {run}/{shards} = "
+        f"size {len(selected_tests)}/{len(tests)} = "
+        f"tests #({shards}*k)+{run} = [{preview}]"
+    )
     lit_config.note(msg)
     return selected_tests
 
@@ -203,10 +230,11 @@ def mark_xfail(selected_tests, opts):
         test_file = os.sep.join(t.path_in_suite)
         test_full_name = t.getFullName()
         if test_file in opts.xfail or test_full_name in opts.xfail:
-            t.xfails += '*'
+            t.xfails += "*"
         if test_file in opts.xfail_not or test_full_name in opts.xfail_not:
             t.xfail_not = True
 
+
 def mark_excluded(discovered_tests, selected_tests):
     excluded_tests = set(discovered_tests) - set(selected_tests)
     result = lit.Test.Result(lit.Test.EXCLUDED)
@@ -218,8 +246,9 @@ def run_tests(tests, lit_config, opts, discovered_tests):
     workers = min(len(tests), opts.workers)
     display = lit.display.create_display(opts, tests, discovered_tests, workers)
 
-    run = lit.run.Run(tests, lit_config, workers, display.update,
-                      opts.max_failures, opts.timeout)
+    run = lit.run.Run(
+        tests, lit_config, workers, display.update, opts.max_failures, opts.timeout
+    )
 
     display.print_header()
 
@@ -229,15 +258,15 @@ def run_tests(tests, lit_config, opts, discovered_tests):
         execute_in_tmp_dir(run, lit_config)
     except KeyboardInterrupt:
         interrupted = True
-        error = '  interrupted by user'
+        error = "  interrupted by user"
     except lit.run.MaxFailuresError:
-        error = 'warning: reached maximum number of test failures'
+        error = "warning: reached maximum number of test failures"
     except lit.run.TimeoutError:
-        error = 'warning: reached timeout'
+        error = "warning: reached timeout"
 
     display.clear(interrupted)
     if error:
-        sys.stderr.write('%s, skipping remaining tests\n' % error)
+        sys.stderr.write("%s, skipping remaining tests\n" % error)
 
 
 def execute_in_tmp_dir(run, lit_config):
@@ -247,11 +276,12 @@ def execute_in_tmp_dir(run, lit_config):
     # their own temp directory to monitor temporary file leaks or handle them at
     # the buildbot level.
     tmp_dir = None
-    if 'LIT_PRESERVES_TMP' not in os.environ:
+    if "LIT_PRESERVES_TMP" not in os.environ:
         import tempfile
+
         # z/OS linker does not support '_' in paths, so use '-'.
-        tmp_dir = tempfile.mkdtemp(prefix='lit-tmp-')
-        tmp_dir_envs = {k: tmp_dir for k in ['TMP', 'TMPDIR', 'TEMP', 'TEMPDIR']}
+        tmp_dir = tempfile.mkdtemp(prefix="lit-tmp-")
+        tmp_dir_envs = {k: tmp_dir for k in ["TMP", "TMPDIR", "TEMP", "TEMPDIR"]}
         os.environ.update(tmp_dir_envs)
         for cfg in {t.config for t in run.tests}:
             cfg.environment.update(tmp_dir_envs)
@@ -261,16 +291,21 @@ def execute_in_tmp_dir(run, lit_config):
         if tmp_dir:
             try:
                 import shutil
+
                 shutil.rmtree(tmp_dir)
-            except Exception as e: 
-                lit_config.warning("Failed to delete temp directory '%s', try upgrading your version of Python to fix this" % tmp_dir)
+            except Exception as e:
+                lit_config.warning(
+                    "Failed to delete temp directory '%s', try upgrading your version of Python to fix this"
+                    % tmp_dir
+                )
 
 
 def print_histogram(tests):
-    test_times = [(t.getFullName(), t.result.elapsed)
-                  for t in tests if t.result.elapsed]
+    test_times = [
+        (t.getFullName(), t.result.elapsed) for t in tests if t.result.elapsed
+    ]
     if test_times:
-        lit.util.printHistogram(test_times, title='Tests')
+        lit.util.printHistogram(test_times, title="Tests")
 
 
 def print_results(tests, elapsed, opts):
@@ -279,7 +314,11 @@ def print_results(tests, elapsed, opts):
         tests_by_code[test.result.code].append(test)
 
     for code in lit.Test.ResultCode.all_codes():
-        print_group(sorted(tests_by_code[code], key=lambda t: t.getFullName()), code, opts.shown_codes)
+        print_group(
+            sorted(tests_by_code[code], key=lambda t: t.getFullName()),
+            code,
+            opts.shown_codes,
+        )
 
     print_summary(tests_by_code, opts.quiet, elapsed)
 
@@ -289,19 +328,18 @@ def print_group(tests, code, shown_codes):
         return
     if not code.isFailure and code not in shown_codes:
         return
-    print('*' * 20)
-    print('{} Tests ({}):'.format(code.label, len(tests)))
+    print("*" * 20)
+    print("{} Tests ({}):".format(code.label, len(tests)))
     for test in tests:
-        print('  %s' % test.getFullName())
-    sys.stdout.write('\n')
+        print("  %s" % test.getFullName())
+    sys.stdout.write("\n")
 
 
 def print_summary(tests_by_code, quiet, elapsed):
     if not quiet:
-        print('\nTesting Time: %.2fs' % elapsed)
+        print("\nTesting Time: %.2fs" % elapsed)
 
-    codes = [c for c in lit.Test.ResultCode.all_codes()
-             if not quiet or c.isFailure]
+    codes = [c for c in lit.Test.ResultCode.all_codes() if not quiet or c.isFailure]
     groups = [(c.label, len(tests_by_code[c])) for c in codes]
     groups = [(label, count) for label, count in groups if count]
     if not groups:
@@ -313,4 +351,4 @@ def print_summary(tests_by_code, quiet, elapsed):
     for (label, count) in groups:
         label = label.ljust(max_label_len)
         count = str(count).rjust(max_count_len)
-        print('  %s: %s' % (label, count))
+        print("  %s: %s" % (label, count))

diff  --git a/llvm/utils/lit/lit/reports.py b/llvm/utils/lit/lit/reports.py
index c8add0e5e593b..2ac44b0c0ce86 100755
--- a/llvm/utils/lit/lit/reports.py
+++ b/llvm/utils/lit/lit/reports.py
@@ -24,23 +24,24 @@ def write_results(self, tests, elapsed):
         # Construct the data we will write.
         data = {}
         # Encode the current lit version as a schema version.
-        data['__version__'] = lit.__versioninfo__
-        data['elapsed'] = elapsed
+        data["__version__"] = lit.__versioninfo__
+        data["elapsed"] = elapsed
         # FIXME: Record some information on the lit configuration used?
         # FIXME: Record information from the individual test suites?
 
         # Encode the tests.
-        data['tests'] = tests_data = []
+        data["tests"] = tests_data = []
         for test in tests:
             test_data = {
-                'name': test.getFullName(),
-                'code': test.result.code.name,
-                'output': test.result.output,
-                'elapsed': test.result.elapsed}
+                "name": test.getFullName(),
+                "code": test.result.code.name,
+                "output": test.result.output,
+                "elapsed": test.result.elapsed,
+            }
 
             # Add test metrics, if present.
             if test.result.metrics:
-                test_data['metrics'] = metrics_data = {}
+                test_data["metrics"] = metrics_data = {}
                 for key, value in test.result.metrics.items():
                     metrics_data[key] = value.todata()
 
@@ -49,15 +50,16 @@ def write_results(self, tests, elapsed):
                 for key, micro_test in test.result.microResults.items():
                     # Expand parent test name with micro test name
                     parent_name = test.getFullName()
-                    micro_full_name = parent_name + ':' + key
+                    micro_full_name = parent_name + ":" + key
 
                     micro_test_data = {
-                        'name': micro_full_name,
-                        'code': micro_test.code.name,
-                        'output': micro_test.output,
-                        'elapsed': micro_test.elapsed}
+                        "name": micro_full_name,
+                        "code": micro_test.code.name,
+                        "output": micro_test.output,
+                        "elapsed": micro_test.elapsed,
+                    }
                     if micro_test.metrics:
-                        micro_test_data['metrics'] = micro_metrics_data = {}
+                        micro_test_data["metrics"] = micro_metrics_data = {}
                         for key, value in micro_test.metrics.items():
                             micro_metrics_data[key] = value.todata()
 
@@ -65,12 +67,14 @@ def write_results(self, tests, elapsed):
 
             tests_data.append(test_data)
 
-        with open(self.output_file, 'w') as file:
+        with open(self.output_file, "w") as file:
             json.dump(data, file, indent=2, sort_keys=True)
-            file.write('\n')
+            file.write("\n")
 
 
-_invalid_xml_chars_dict = {c: None for c in range(32) if chr(c) not in ('\t', '\n', '\r')}
+_invalid_xml_chars_dict = {
+    c: None for c in range(32) if chr(c) not in ("\t", "\n", "\r")
+}
 
 
 def remove_invalid_xml_chars(s):
@@ -87,44 +91,47 @@ def remove_invalid_xml_chars(s):
 class XunitReport(object):
     def __init__(self, output_file):
         self.output_file = output_file
-        self.skipped_codes = {lit.Test.EXCLUDED,
-                              lit.Test.SKIPPED, lit.Test.UNSUPPORTED}
+        self.skipped_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED, lit.Test.UNSUPPORTED}
 
     def write_results(self, tests, elapsed):
         tests.sort(key=by_suite_and_test_path)
         tests_by_suite = itertools.groupby(tests, lambda t: t.suite)
 
-        with open(self.output_file, 'w') as file:
+        with open(self.output_file, "w") as file:
             file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
             file.write('<testsuites time="{time:.2f}">\n'.format(time=elapsed))
             for suite, test_iter in tests_by_suite:
                 self._write_testsuite(file, suite, list(test_iter))
-            file.write('</testsuites>\n')
+            file.write("</testsuites>\n")
 
     def _write_testsuite(self, file, suite, tests):
         skipped = sum(1 for t in tests if t.result.code in self.skipped_codes)
         failures = sum(1 for t in tests if t.isFailure())
 
-        name = suite.config.name.replace('.', '-')
-        file.write(f'<testsuite name={quo(name)} tests="{len(tests)}" failures="{failures}" skipped="{skipped}">\n')
+        name = suite.config.name.replace(".", "-")
+        file.write(
+            f'<testsuite name={quo(name)} tests="{len(tests)}" failures="{failures}" skipped="{skipped}">\n'
+        )
         for test in tests:
             self._write_test(file, test, name)
-        file.write('</testsuite>\n')
+        file.write("</testsuite>\n")
 
     def _write_test(self, file, test, suite_name):
-        path = '/'.join(test.path_in_suite[:-1]).replace('.', '_')
-        class_name = f'{suite_name}.{path or suite_name}'
+        path = "/".join(test.path_in_suite[:-1]).replace(".", "_")
+        class_name = f"{suite_name}.{path or suite_name}"
         name = test.path_in_suite[-1]
         time = test.result.elapsed or 0.0
-        file.write(f'<testcase classname={quo(class_name)} name={quo(name)} time="{time:.2f}"')
+        file.write(
+            f'<testcase classname={quo(class_name)} name={quo(name)} time="{time:.2f}"'
+        )
 
         if test.isFailure():
-            file.write('>\n  <failure><![CDATA[')
+            file.write(">\n  <failure><![CDATA[")
             # In the unlikely case that the output contains the CDATA
             # terminator we wrap it by creating a new CDATA block.
-            output = test.result.output.replace(']]>', ']]]]><![CDATA[>')
+            output = test.result.output.replace("]]>", "]]]]><![CDATA[>")
             if isinstance(output, bytes):
-                output = output.decode("utf-8", 'ignore')
+                output = output.decode("utf-8", "ignore")
 
             # Failing test  output sometimes contains control characters like
             # \x1b (e.g. if there was some -fcolor-diagnostics output) which are
@@ -134,60 +141,60 @@ def _write_test(self, file, test, suite_name):
             # characters and similar problems also occur with GitLab CI.
             output = remove_invalid_xml_chars(output)
             file.write(output)
-            file.write(']]></failure>\n</testcase>\n')
+            file.write("]]></failure>\n</testcase>\n")
         elif test.result.code in self.skipped_codes:
             reason = self._get_skip_reason(test)
-            file.write(f'>\n  <skipped message={quo(reason)}/>\n</testcase>\n')
+            file.write(f">\n  <skipped message={quo(reason)}/>\n</testcase>\n")
         else:
-            file.write('/>\n')
+            file.write("/>\n")
 
     def _get_skip_reason(self, test):
         code = test.result.code
         if code == lit.Test.EXCLUDED:
-            return 'Test not selected (--filter, --max-tests)'
+            return "Test not selected (--filter, --max-tests)"
         if code == lit.Test.SKIPPED:
-            return 'User interrupt'
+            return "User interrupt"
 
         assert code == lit.Test.UNSUPPORTED
         features = test.getMissingRequiredFeatures()
         if features:
-            return 'Missing required feature(s): ' + ', '.join(features)
-        return 'Unsupported configuration'
+            return "Missing required feature(s): " + ", ".join(features)
+        return "Unsupported configuration"
 
 
 def gen_resultdb_test_entry(
     test_name, start_time, elapsed_time, test_output, result_code, is_expected
 ):
     test_data = {
-        'testId': test_name,
-        'start_time': datetime.datetime.fromtimestamp(start_time).isoformat() + 'Z',
-        'duration': '%.9fs' % elapsed_time,
-        'summary_html': '<p><text-artifact artifact-id="artifact-content-in-request"></p>',
-        'artifacts': {
-            'artifact-content-in-request': {
-                'contents': base64.b64encode(test_output.encode('utf-8')).decode(
-                    'utf-8'
+        "testId": test_name,
+        "start_time": datetime.datetime.fromtimestamp(start_time).isoformat() + "Z",
+        "duration": "%.9fs" % elapsed_time,
+        "summary_html": '<p><text-artifact artifact-id="artifact-content-in-request"></p>',
+        "artifacts": {
+            "artifact-content-in-request": {
+                "contents": base64.b64encode(test_output.encode("utf-8")).decode(
+                    "utf-8"
                 ),
             },
         },
-        'expected': is_expected,
+        "expected": is_expected,
     }
     if (
         result_code == lit.Test.PASS
         or result_code == lit.Test.XPASS
         or result_code == lit.Test.FLAKYPASS
     ):
-        test_data['status'] = 'PASS'
+        test_data["status"] = "PASS"
     elif result_code == lit.Test.FAIL or result_code == lit.Test.XFAIL:
-        test_data['status'] = 'FAIL'
+        test_data["status"] = "FAIL"
     elif (
         result_code == lit.Test.UNSUPPORTED
         or result_code == lit.Test.SKIPPED
         or result_code == lit.Test.EXCLUDED
     ):
-        test_data['status'] = 'SKIP'
+        test_data["status"] = "SKIP"
     elif result_code == lit.Test.UNRESOLVED or result_code == lit.Test.TIMEOUT:
-        test_data['status'] = 'ABORT'
+        test_data["status"] = "ABORT"
     return test_data
 
 
@@ -199,10 +206,10 @@ def write_results(self, tests, elapsed):
         unexecuted_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED}
         tests = [t for t in tests if t.result.code not in unexecuted_codes]
         data = {}
-        data['__version__'] = lit.__versioninfo__
-        data['elapsed'] = elapsed
+        data["__version__"] = lit.__versioninfo__
+        data["elapsed"] = elapsed
         # Encode the tests.
-        data['tests'] = tests_data = []
+        data["tests"] = tests_data = []
         for test in tests:
             tests_data.append(
                 gen_resultdb_test_entry(
@@ -218,7 +225,7 @@ def write_results(self, tests, elapsed):
                 for key, micro_test in test.result.microResults.items():
                     # Expand parent test name with micro test name
                     parent_name = test.getFullName()
-                    micro_full_name = parent_name + ':' + key + 'microres'
+                    micro_full_name = parent_name + ":" + key + "microres"
                     tests_data.append(
                         gen_resultdb_test_entry(
                             test_name=micro_full_name,
@@ -234,24 +241,26 @@ def write_results(self, tests, elapsed):
                         )
                     )
 
-        with open(self.output_file, 'w') as file:
+        with open(self.output_file, "w") as file:
             json.dump(data, file, indent=2, sort_keys=True)
-            file.write('\n')
+            file.write("\n")
 
 
 class TimeTraceReport(object):
     def __init__(self, output_file):
         self.output_file = output_file
-        self.skipped_codes = {lit.Test.EXCLUDED,
-                              lit.Test.SKIPPED, lit.Test.UNSUPPORTED}
+        self.skipped_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED, lit.Test.UNSUPPORTED}
 
     def write_results(self, tests, elapsed):
         # Find when first test started so we can make start times relative.
         first_start_time = min([t.result.start for t in tests])
-        events = [self._get_test_event(
-            x, first_start_time) for x in tests if x.result.code not in self.skipped_codes]
+        events = [
+            self._get_test_event(x, first_start_time)
+            for x in tests
+            if x.result.code not in self.skipped_codes
+        ]
 
-        json_data = {'traceEvents': events}
+        json_data = {"traceEvents": events}
 
         with open(self.output_file, "w") as time_trace_file:
             json.dump(json_data, time_trace_file, indent=2, sort_keys=True)
@@ -262,10 +271,10 @@ def _get_test_event(self, test, first_start_time):
         start_time = test.result.start - first_start_time if test.result.start else 0.0
         pid = test.result.pid or 0
         return {
-            'pid': pid,
-            'tid': 1,
-            'ph': 'X',
-            'ts': int(start_time * 1000000.),
-            'dur': int(elapsed_time * 1000000.),
-            'name': test_name,
+            "pid": pid,
+            "tid": 1,
+            "ph": "X",
+            "ts": int(start_time * 1000000.0),
+            "dur": int(elapsed_time * 1000000.0),
+            "name": test_name,
         }

diff  --git a/llvm/utils/lit/lit/run.py b/llvm/utils/lit/lit/run.py
index 255e633294327..535c859352cc4 100644
--- a/llvm/utils/lit/lit/run.py
+++ b/llvm/utils/lit/lit/run.py
@@ -9,6 +9,8 @@
 
 class MaxFailuresError(Exception):
     pass
+
+
 class TimeoutError(Exception):
     pass
 
@@ -16,8 +18,9 @@ class TimeoutError(Exception):
 class Run(object):
     """A concrete, configured testing run."""
 
-    def __init__(self, tests, lit_config, workers, progress_callback,
-                 max_failures, timeout):
+    def __init__(
+        self, tests, lit_config, workers, progress_callback, max_failures, timeout
+    ):
         self.tests = tests
         self.lit_config = lit_config
         self.workers = workers
@@ -62,17 +65,22 @@ def execute(self):
     def _execute(self, deadline):
         self._increase_process_limit()
 
-        semaphores = {k: multiprocessing.BoundedSemaphore(v)
-                      for k, v in self.lit_config.parallelism_groups.items()
-                      if v is not None}
+        semaphores = {
+            k: multiprocessing.BoundedSemaphore(v)
+            for k, v in self.lit_config.parallelism_groups.items()
+            if v is not None
+        }
 
-        pool = multiprocessing.Pool(self.workers, lit.worker.initialize,
-                                    (self.lit_config, semaphores))
+        pool = multiprocessing.Pool(
+            self.workers, lit.worker.initialize, (self.lit_config, semaphores)
+        )
 
         async_results = [
-            pool.apply_async(lit.worker.execute, args=[test],
-                             callback=self.progress_callback)
-            for test in self.tests]
+            pool.apply_async(
+                lit.worker.execute, args=[test], callback=self.progress_callback
+            )
+            for test in self.tests
+        ]
         pool.close()
 
         try:
@@ -111,11 +119,12 @@ def _update_test(self, local_test, remote_test):
     # process limit so that tests don't fail due to resource exhaustion.
     def _increase_process_limit(self):
         ncpus = lit.util.usable_core_count()
-        desired_limit = self.workers * ncpus * 2 # the 2 is a safety factor
+        desired_limit = self.workers * ncpus * 2  # the 2 is a safety factor
 
         # Importing the resource module will likely fail on Windows.
         try:
             import resource
+
             NPROC = resource.RLIMIT_NPROC
 
             soft_limit, hard_limit = resource.getrlimit(NPROC)
@@ -123,9 +132,10 @@ def _increase_process_limit(self):
 
             if soft_limit < desired_limit:
                 resource.setrlimit(NPROC, (desired_limit, hard_limit))
-                self.lit_config.note('Raised process limit from %d to %d' % \
-                                        (soft_limit, desired_limit))
+                self.lit_config.note(
+                    "Raised process limit from %d to %d" % (soft_limit, desired_limit)
+                )
         except Exception as ex:
             # Warn, unless this is Windows, in which case this is expected.
-            if os.name != 'nt':
-                self.lit_config.warning('Failed to raise process limit: %s' % ex)
+            if os.name != "nt":
+                self.lit_config.warning("Failed to raise process limit: %s" % ex)

diff  --git a/llvm/utils/lit/lit/util.py b/llvm/utils/lit/lit/util.py
index 75cface848390..882deb3d03efc 100644
--- a/llvm/utils/lit/lit/util.py
+++ b/llvm/utils/lit/lit/util.py
@@ -28,15 +28,15 @@ def pythonize_bool(value):
     if isinstance(value, numbers.Number):
         return value != 0
     if is_string(value):
-        if value.lower() in ('1', 'true', 'on', 'yes'):
+        if value.lower() in ("1", "true", "on", "yes"):
             return True
-        if value.lower() in ('', '0', 'false', 'off', 'no'):
+        if value.lower() in ("", "0", "false", "off", "no"):
             return False
     raise ValueError('"{}" is not a valid boolean'.format(value))
 
 
 def make_word_regex(word):
-    return r'\b' + word + r'\b'
+    return r"\b" + word + r"\b"
 
 
 def to_bytes(s):
@@ -53,7 +53,7 @@ def to_bytes(s):
     # In Python2, 's' is a 'unicode' object.
     # In Python3, 's' is a 'str' object.
     # Encode to UTF-8 to get 'bytes' data.
-    return s.encode('utf-8')
+    return s.encode("utf-8")
 
 
 def to_string(b):
@@ -72,7 +72,7 @@ def to_string(b):
         # In Python2, this branch is never taken ('bytes' is handled as 'str').
         # In Python3, this is true only for 'bytes'.
         try:
-            return b.decode('utf-8')
+            return b.decode("utf-8")
         except UnicodeDecodeError:
             # If the value is not valid Unicode, return the default
             # repr-line encoding.
@@ -90,9 +90,9 @@ def to_string(b):
     # 'unicode' type in Python3 (all the Python3 cases were already handled). In
     # order to get a 'str' object, we need to encode the 'unicode' object.
     try:
-        return b.encode('utf-8')
+        return b.encode("utf-8")
     except AttributeError:
-        raise TypeError('not sure how to convert %s to %s' % (type(b), str))
+        raise TypeError("not sure how to convert %s to %s" % (type(b), str))
 
 
 def to_unicode(s):
@@ -105,7 +105,7 @@ def to_unicode(s):
     if isinstance(s, bytes):
         # In Python2, this branch is taken for both 'str' and 'bytes'.
         # In Python3, this branch is taken only for 'bytes'.
-        return s.decode('utf-8')
+        return s.decode("utf-8")
     return s
 
 
@@ -122,7 +122,7 @@ def usable_core_count():
 
     # On Windows with more than 60 processes, multiprocessing's call to
     # _winapi.WaitForMultipleObjects() prints an error and lit hangs.
-    if platform.system() == 'Windows':
+    if platform.system() == "Windows":
         return min(n, 60)
 
     return n
@@ -130,7 +130,7 @@ def usable_core_count():
 
 def mkdir(path):
     try:
-        if platform.system() == 'Windows':
+        if platform.system() == "Windows":
             from ctypes import windll
             from ctypes import GetLastError, WinError
 
@@ -138,8 +138,8 @@ def mkdir(path):
             # Make sure that the path uses backslashes here, in case
             # python would have happened to use forward slashes, as the
             # NT path format only supports backslashes.
-            path = path.replace('/', '\\')
-            NTPath = to_unicode(r'\\?\%s' % path)
+            path = path.replace("/", "\\")
+            NTPath = to_unicode(r"\\?\%s" % path)
             if not windll.kernel32.CreateDirectoryW(NTPath, None):
                 raise WinError(GetLastError())
         else:
@@ -195,12 +195,14 @@ def listdir_files(dirname, suffixes=None, exclude_filenames=None):
     if exclude_filenames is None:
         exclude_filenames = set()
     if suffixes is None:
-        suffixes = {''}
+        suffixes = {""}
     for filename in os.listdir(dirname):
-        if (os.path.isdir(os.path.join(dirname, filename)) or
-            filename.startswith('.') or
-            filename in exclude_filenames or
-                not any(filename.endswith(sfx) for sfx in suffixes)):
+        if (
+            os.path.isdir(os.path.join(dirname, filename))
+            or filename.startswith(".")
+            or filename in exclude_filenames
+            or not any(filename.endswith(sfx) for sfx in suffixes)
+        ):
             continue
         yield filename
 
@@ -210,7 +212,7 @@ def which(command, paths=None):
     (or the PATH environment variable, if unspecified)."""
 
     if paths is None:
-        paths = os.environ.get('PATH', '')
+        paths = os.environ.get("PATH", "")
 
     # Check for absolute match first.
     if os.path.isabs(command) and os.path.isfile(command):
@@ -222,10 +224,10 @@ def which(command, paths=None):
 
     # Get suffixes to search.
     # On Cygwin, 'PATHEXT' may exist but it should not be used.
-    if os.pathsep == ';':
-        pathext = os.environ.get('PATHEXT', '').split(';')
+    if os.pathsep == ";":
+        pathext = os.environ.get("PATHEXT", "").split(";")
     else:
-        pathext = ['']
+        pathext = [""]
 
     # Search the paths...
     for path in paths.split(os.pathsep):
@@ -251,7 +253,7 @@ def whichTools(tools, paths):
     return None
 
 
-def printHistogram(items, title='Items'):
+def printHistogram(items, title="Items"):
     items.sort(key=lambda item: item[1])
 
     maxValue = max([v for _, v in items])
@@ -272,28 +274,47 @@ def printHistogram(items, title='Items'):
         histo[bin].add(name)
 
     barW = 40
-    hr = '-' * (barW + 34)
-    print('Slowest %s:' % title)
+    hr = "-" * (barW + 34)
+    print("Slowest %s:" % title)
     print(hr)
     for name, value in reversed(items[-20:]):
-        print('%.2fs: %s' % (value, name))
-    print('\n%s Times:' % title)
+        print("%.2fs: %s" % (value, name))
+    print("\n%s Times:" % title)
     print(hr)
     pDigits = int(math.ceil(math.log(maxValue, 10)))
     pfDigits = max(0, 3 - pDigits)
     if pfDigits:
         pDigits += pfDigits + 1
     cDigits = int(math.ceil(math.log(len(items), 10)))
-    print('[%s] :: [%s] :: [%s]' % ('Range'.center((pDigits + 1) * 2 + 3),
-                                    'Percentage'.center(barW),
-                                    'Count'.center(cDigits * 2 + 1)))
+    print(
+        "[%s] :: [%s] :: [%s]"
+        % (
+            "Range".center((pDigits + 1) * 2 + 3),
+            "Percentage".center(barW),
+            "Count".center(cDigits * 2 + 1),
+        )
+    )
     print(hr)
     for i, row in reversed(list(enumerate(histo))):
         pct = float(len(row)) / len(items)
         w = int(barW * pct)
-        print('[%*.*fs,%*.*fs) :: [%s%s] :: [%*d/%*d]' % (
-            pDigits, pfDigits, i * barH, pDigits, pfDigits, (i + 1) * barH,
-            '*' * w, ' ' * (barW - w), cDigits, len(row), cDigits, len(items)))
+        print(
+            "[%*.*fs,%*.*fs) :: [%s%s] :: [%*d/%*d]"
+            % (
+                pDigits,
+                pfDigits,
+                i * barH,
+                pDigits,
+                pfDigits,
+                (i + 1) * barH,
+                "*" * w,
+                " " * (barW - w),
+                cDigits,
+                len(row),
+                cDigits,
+                len(items),
+            )
+        )
     print(hr)
 
 
@@ -311,11 +332,12 @@ def __init__(self, msg, out, err, exitCode):
 
 # Close extra file handles on UNIX (on Windows this cannot be done while
 # also redirecting input).
-kUseCloseFDs = not (platform.system() == 'Windows')
+kUseCloseFDs = not (platform.system() == "Windows")
 
 
-def executeCommand(command, cwd=None, env=None, input=None, timeout=0,
-                   redirect_stderr=False):
+def executeCommand(
+    command, cwd=None, env=None, input=None, timeout=0, redirect_stderr=False
+):
     """Execute command ``command`` (list of arguments or string) with.
 
     * working directory ``cwd`` (str), use None to use the current
@@ -338,11 +360,15 @@ def executeCommand(command, cwd=None, env=None, input=None, timeout=0,
     if input is not None:
         input = to_bytes(input)
     err_out = subprocess.STDOUT if redirect_stderr else subprocess.PIPE
-    p = subprocess.Popen(command, cwd=cwd,
-                         stdin=subprocess.PIPE,
-                         stdout=subprocess.PIPE,
-                         stderr=err_out,
-                         env=env, close_fds=kUseCloseFDs)
+    p = subprocess.Popen(
+        command,
+        cwd=cwd,
+        stdin=subprocess.PIPE,
+        stdout=subprocess.PIPE,
+        stderr=err_out,
+        env=env,
+        close_fds=kUseCloseFDs,
+    )
     timerObject = None
     # FIXME: Because of the way nested function scopes work in Python 2.x we
     # need to use a reference to a mutable object rather than a plain
@@ -351,6 +377,7 @@ def executeCommand(command, cwd=None, env=None, input=None, timeout=0,
     hitTimeOut = [False]
     try:
         if timeout > 0:
+
             def killProcess():
                 # We may be invoking a shell so we need to kill the
                 # process and all its children.
@@ -368,14 +395,14 @@ def killProcess():
 
     # Ensure the resulting output is always of string type.
     out = to_string(out)
-    err = '' if redirect_stderr else to_string(err)
+    err = "" if redirect_stderr else to_string(err)
 
     if hitTimeOut[0]:
         raise ExecuteCommandTimeoutException(
-            msg='Reached timeout of {} seconds'.format(timeout),
+            msg="Reached timeout of {} seconds".format(timeout),
             out=out,
             err=err,
-            exitCode=exitCode
+            exitCode=exitCode,
         )
 
     # Detect Ctrl-C in subprocess.
@@ -387,9 +414,9 @@ def killProcess():
 
 def isMacOSTriple(target_triple):
     """Whether the given target triple is for macOS,
-       e.g. x86_64-apple-darwin, arm64-apple-macos
+    e.g. x86_64-apple-darwin, arm64-apple-macos
     """
-    return 'darwin' in target_triple or 'macos' in target_triple
+    return "darwin" in target_triple or "macos" in target_triple
 
 
 def usePlatformSdkOnDarwin(config, lit_config):
@@ -397,8 +424,11 @@ def usePlatformSdkOnDarwin(config, lit_config):
     # default system root path.
     if isMacOSTriple(config.target_triple):
         try:
-            cmd = subprocess.Popen(['xcrun', '--show-sdk-path', '--sdk', 'macosx'],
-                                   stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            cmd = subprocess.Popen(
+                ["xcrun", "--show-sdk-path", "--sdk", "macosx"],
+                stdout=subprocess.PIPE,
+                stderr=subprocess.PIPE,
+            )
             out, err = cmd.communicate()
             out = out.strip()
             res = cmd.wait()
@@ -406,15 +436,18 @@ def usePlatformSdkOnDarwin(config, lit_config):
             res = -1
         if res == 0 and out:
             sdk_path = out.decode()
-            lit_config.note('using SDKROOT: %r' % sdk_path)
-            config.environment['SDKROOT'] = sdk_path
+            lit_config.note("using SDKROOT: %r" % sdk_path)
+            config.environment["SDKROOT"] = sdk_path
 
 
 def findPlatformSdkVersionOnMacOS(config, lit_config):
     if isMacOSTriple(config.target_triple):
         try:
-            cmd = subprocess.Popen(['xcrun', '--show-sdk-version', '--sdk', 'macosx'],
-                                   stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            cmd = subprocess.Popen(
+                ["xcrun", "--show-sdk-version", "--sdk", "macosx"],
+                stdout=subprocess.PIPE,
+                stderr=subprocess.PIPE,
+            )
             out, err = cmd.communicate()
             out = out.strip()
             res = cmd.wait()
@@ -424,25 +457,31 @@ def findPlatformSdkVersionOnMacOS(config, lit_config):
             return out.decode()
     return None
 
+
 def killProcessAndChildrenIsSupported():
     """
-        Returns a tuple (<supported> , <error message>)
-        where
-        `<supported>` is True if `killProcessAndChildren()` is supported on
-            the current host, returns False otherwise.
-        `<error message>` is an empty string if `<supported>` is True,
-            otherwise is contains a string describing why the function is
-            not supported.
+    Returns a tuple (<supported> , <error message>)
+    where
+    `<supported>` is True if `killProcessAndChildren()` is supported on
+        the current host, returns False otherwise.
+    `<error message>` is an empty string if `<supported>` is True,
+        otherwise is contains a string describing why the function is
+        not supported.
     """
-    if platform.system() == 'AIX':
+    if platform.system() == "AIX":
         return (True, "")
     try:
         import psutil  # noqa: F401
+
         return (True, "")
     except ImportError:
-        return (False,  "Requires the Python psutil module but it could"
-                        " not be found. Try installing it via pip or via"
-                        " your operating system's package manager.")
+        return (
+            False,
+            "Requires the Python psutil module but it could"
+            " not be found. Try installing it via pip or via"
+            " your operating system's package manager.",
+        )
+
 
 def killProcessAndChildren(pid):
     """This function kills a process with ``pid`` and all its running children
@@ -453,10 +492,11 @@ def killProcessAndChildren(pid):
     remove our dependency on it.
 
     """
-    if platform.system() == 'AIX':
-        subprocess.call('kill -kill $(ps -o pid= -L{})'.format(pid), shell=True)
+    if platform.system() == "AIX":
+        subprocess.call("kill -kill $(ps -o pid= -L{})".format(pid), shell=True)
     else:
         import psutil
+
         try:
             psutilProc = psutil.Process(pid)
             # Handle the 
diff erent psutil API versions

diff  --git a/llvm/utils/lit/lit/worker.py b/llvm/utils/lit/lit/worker.py
index ba9b919f50ebb..8e78bfd45d38b 100644
--- a/llvm/utils/lit/lit/worker.py
+++ b/llvm/utils/lit/lit/worker.py
@@ -78,9 +78,9 @@ def _execute_test_handle_errors(test, lit_config):
     except:
         if lit_config.debug:
             raise
-        output = 'Exception during script execution:\n'
+        output = "Exception during script execution:\n"
         output += traceback.format_exc()
-        output += '\n'
+        output += "\n"
         return lit.Test.Result(lit.Test.UNRESOLVED, output)
 
 

diff  --git a/llvm/utils/lit/setup.py b/llvm/utils/lit/setup.py
index 69a38edef83d6..b11e3eafb2a35 100644
--- a/llvm/utils/lit/setup.py
+++ b/llvm/utils/lit/setup.py
@@ -16,35 +16,31 @@
     long_description = f.read()
 
 setup(
-    name = "lit",
-    version = lit.__version__,
-
-    author = lit.__author__,
-    author_email = lit.__email__,
-    url = 'http://llvm.org',
-    license = 'Apache-2.0 with LLVM exception',
-    license_files = ['LICENSE.TXT'],
-
-    description = "A Software Testing Tool",
-    keywords = 'test C++ automatic discovery',
-    long_description = long_description,
-
+    name="lit",
+    version=lit.__version__,
+    author=lit.__author__,
+    author_email=lit.__email__,
+    url="http://llvm.org",
+    license="Apache-2.0 with LLVM exception",
+    license_files=["LICENSE.TXT"],
+    description="A Software Testing Tool",
+    keywords="test C++ automatic discovery",
+    long_description=long_description,
     classifiers=[
-        'Development Status :: 3 - Alpha',
-        'Environment :: Console',
-        'Intended Audience :: Developers',
-        'License :: OSI Approved :: Apache Software License',
-        'Natural Language :: English',
-        'Operating System :: OS Independent',
-        'Programming Language :: Python',
-        'Topic :: Software Development :: Testing',
+        "Development Status :: 3 - Alpha",
+        "Environment :: Console",
+        "Intended Audience :: Developers",
+        "License :: OSI Approved :: Apache Software License",
+        "Natural Language :: English",
+        "Operating System :: OS Independent",
+        "Programming Language :: Python",
+        "Topic :: Software Development :: Testing",
+    ],
+    zip_safe=False,
+    packages=find_packages(),
+    entry_points={
+        "console_scripts": [
+            "lit = lit.main:main",
         ],
-
-    zip_safe = False,
-    packages = find_packages(),
-    entry_points = {
-        'console_scripts': [
-            'lit = lit.main:main',
-            ],
-        }
+    },
 )

diff  --git a/llvm/utils/lit/tests/Inputs/allow-retries/lit.cfg b/llvm/utils/lit/tests/Inputs/allow-retries/lit.cfg
index eed69f389ed07..e1de4577ff3d8 100644
--- a/llvm/utils/lit/tests/Inputs/allow-retries/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/allow-retries/lit.cfg
@@ -1,9 +1,10 @@
 import lit.formats
-config.name = 'allow-retries'
-config.suffixes = ['.py']
+
+config.name = "allow-retries"
+config.suffixes = [".py"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None
 
-config.substitutions.append(('%python', lit_config.params.get('python', '')))
-config.substitutions.append(('%counter', lit_config.params.get('counter', '')))
+config.substitutions.append(("%python", lit_config.params.get("python", "")))
+config.substitutions.append(("%counter", lit_config.params.get("counter", "")))

diff  --git a/llvm/utils/lit/tests/Inputs/allow-retries/succeeds-within-limit.py b/llvm/utils/lit/tests/Inputs/allow-retries/succeeds-within-limit.py
index 45ac9433fc7ef..af80e3ece74bb 100644
--- a/llvm/utils/lit/tests/Inputs/allow-retries/succeeds-within-limit.py
+++ b/llvm/utils/lit/tests/Inputs/allow-retries/succeeds-within-limit.py
@@ -9,16 +9,16 @@
 
 # The first time the test is run, initialize the counter to 1.
 if not os.path.exists(counter_file):
-    with open(counter_file, 'w') as counter:
+    with open(counter_file, "w") as counter:
         counter.write("1")
 
 # Succeed if this is the fourth time we're being run.
-with open(counter_file, 'r') as counter:
+with open(counter_file, "r") as counter:
     num = int(counter.read())
     if num == 4:
         sys.exit(0)
 
 # Otherwise, increment the counter and fail
-with open(counter_file, 'w') as counter:
+with open(counter_file, "w") as counter:
     counter.write(str(num + 1))
     sys.exit(1)

diff  --git a/llvm/utils/lit/tests/Inputs/config-map-discovery/driver.py b/llvm/utils/lit/tests/Inputs/config-map-discovery/driver.py
index db9141b9b1bf9..d22b84d6a15cf 100644
--- a/llvm/utils/lit/tests/Inputs/config-map-discovery/driver.py
+++ b/llvm/utils/lit/tests/Inputs/config-map-discovery/driver.py
@@ -6,11 +6,12 @@
 main_config = os.path.realpath(main_config)
 main_config = os.path.normcase(main_config)
 
-config_map = {main_config : sys.argv[2]}
-builtin_parameters = {'config_map' : config_map}
+config_map = {main_config: sys.argv[2]}
+builtin_parameters = {"config_map": config_map}
 
-if __name__=='__main__':
+if __name__ == "__main__":
     from lit.main import main
+
     main_config_dir = os.path.dirname(main_config)
     sys.argv = [sys.argv[0]] + sys.argv[3:] + [main_config_dir]
     main(builtin_parameters)

diff  --git a/llvm/utils/lit/tests/Inputs/config-map-discovery/main-config/lit.cfg b/llvm/utils/lit/tests/Inputs/config-map-discovery/main-config/lit.cfg
index 380a05beb4a83..d092be7ea71da 100644
--- a/llvm/utils/lit/tests/Inputs/config-map-discovery/main-config/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/config-map-discovery/main-config/lit.cfg
@@ -1 +1 @@
-print("ERROR: lit.cfg invoked!")
\ No newline at end of file
+print("ERROR: lit.cfg invoked!")

diff  --git a/llvm/utils/lit/tests/Inputs/custom-result-category/format.py b/llvm/utils/lit/tests/Inputs/custom-result-category/format.py
index 0ef1bf24f833e..d402c6c45e751 100644
--- a/llvm/utils/lit/tests/Inputs/custom-result-category/format.py
+++ b/llvm/utils/lit/tests/Inputs/custom-result-category/format.py
@@ -1,8 +1,8 @@
 import lit
 import lit.formats
 
-CUSTOM_PASS = lit.Test.ResultCode('CUSTOM_PASS', 'My Passed', False)
-CUSTOM_FAILURE = lit.Test.ResultCode('CUSTOM_FAILURE', 'My Failed', True)
+CUSTOM_PASS = lit.Test.ResultCode("CUSTOM_PASS", "My Passed", False)
+CUSTOM_FAILURE = lit.Test.ResultCode("CUSTOM_FAILURE", "My Failed", True)
 
 
 class MyFormat(lit.formats.ShTest):

diff  --git a/llvm/utils/lit/tests/Inputs/custom-result-category/lit.cfg b/llvm/utils/lit/tests/Inputs/custom-result-category/lit.cfg
index 842ea612ebf03..1db7be6834c79 100644
--- a/llvm/utils/lit/tests/Inputs/custom-result-category/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/custom-result-category/lit.cfg
@@ -1,10 +1,11 @@
 import lit
 import site
+
 site.addsitedir(os.path.dirname(__file__))
 import format
 
-config.name = 'custom-result-category'
-config.suffixes = ['.txt']
+config.name = "custom-result-category"
+config.suffixes = [".txt"]
 config.test_format = format.MyFormat()
 config.test_source_root = None
 config.test_exec_root = None

diff  --git a/llvm/utils/lit/tests/Inputs/discovery/lit.cfg b/llvm/utils/lit/tests/Inputs/discovery/lit.cfg
index f64d905888be9..e28753415f74a 100644
--- a/llvm/utils/lit/tests/Inputs/discovery/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/discovery/lit.cfg
@@ -1,20 +1,21 @@
 import lit.formats
-config.name = 'top-level-suite'
-config.suffixes = ['.txt']
+
+config.name = "top-level-suite"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 
 # We intentionally don't set the source root or exec root directories here,
 # because this suite gets reused for testing the exec root behavior (in
 # ../exec-discovery).
 #
-#config.test_source_root = None
-#config.test_exec_root = None
+# config.test_source_root = None
+# config.test_exec_root = None
 
 # Check that arbitrary config values are copied (tested by subdir/lit.local.cfg).
 config.an_extra_variable = False
 
 # Check that available_features are printed by --show-suites (and in the right order)
-config.available_features = ['feature2', 'feature1']
+config.available_features = ["feature2", "feature1"]
 
 # Check that substitutions are printed by --show-suites (and in the right order)
-config.substitutions = [('%key2', 'value2'), ('%key1', 'value1')]
+config.substitutions = [("%key2", "value2"), ("%key1", "value1")]

diff  --git a/llvm/utils/lit/tests/Inputs/discovery/subsuite/lit.cfg b/llvm/utils/lit/tests/Inputs/discovery/subsuite/lit.cfg
index b49329abfde61..cc8a0cac6a527 100644
--- a/llvm/utils/lit/tests/Inputs/discovery/subsuite/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/discovery/subsuite/lit.cfg
@@ -1,6 +1,7 @@
 import lit.formats
-config.name = 'sub-suite'
-config.suffixes = ['.txt']
+
+config.name = "sub-suite"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None

diff  --git a/llvm/utils/lit/tests/Inputs/exec-discovery-in-tree/lit.cfg b/llvm/utils/lit/tests/Inputs/exec-discovery-in-tree/lit.cfg
index ae25b4f4acb42..5421c53f73c5f 100644
--- a/llvm/utils/lit/tests/Inputs/exec-discovery-in-tree/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/exec-discovery-in-tree/lit.cfg
@@ -4,6 +4,6 @@ import lit.formats
 if config.test_source_root is None or config.test_exec_root is None:
     lit_config.fatal("No site specific configuration")
 
-config.name = 'exec-discovery-in-tree-suite'
-config.suffixes = ['.txt']
+config.name = "exec-discovery-in-tree-suite"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()

diff  --git a/llvm/utils/lit/tests/Inputs/fake-externals/fake_external.py b/llvm/utils/lit/tests/Inputs/fake-externals/fake_external.py
index 1fdc2ee063ca4..eb736abae72ce 100644
--- a/llvm/utils/lit/tests/Inputs/fake-externals/fake_external.py
+++ b/llvm/utils/lit/tests/Inputs/fake-externals/fake_external.py
@@ -1,7 +1,11 @@
 import os
 import sys
 
+
 def execute(fileName):
-  sys.stderr.write("error: external '{}' command called unexpectedly\n"
-                   .format(os.path.basename(fileName)));
-  sys.exit(1)
+    sys.stderr.write(
+        "error: external '{}' command called unexpectedly\n".format(
+            os.path.basename(fileName)
+        )
+    )
+    sys.exit(1)

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-cmd-wrapper/lit.cfg b/llvm/utils/lit/tests/Inputs/googletest-cmd-wrapper/lit.cfg
index 68252532962d6..9f93bac51456d 100644
--- a/llvm/utils/lit/tests/Inputs/googletest-cmd-wrapper/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/googletest-cmd-wrapper/lit.cfg
@@ -1,3 +1,6 @@
 import lit.formats
-config.name = 'googletest-cmd-wrapper'
-config.test_format = lit.formats.GoogleTest('DummySubDir', 'Test' if 'win32' in sys.platform else '.exe', [sys.executable])
+
+config.name = "googletest-cmd-wrapper"
+config.test_format = lit.formats.GoogleTest(
+    "DummySubDir", "Test" if "win32" in sys.platform else ".exe", [sys.executable]
+)

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-crash/DummySubDir/OneTest.py b/llvm/utils/lit/tests/Inputs/googletest-crash/DummySubDir/OneTest.py
index 61530493caa51..f8fe95cbfbb30 100644
--- a/llvm/utils/lit/tests/Inputs/googletest-crash/DummySubDir/OneTest.py
+++ b/llvm/utils/lit/tests/Inputs/googletest-crash/DummySubDir/OneTest.py
@@ -4,9 +4,10 @@
 import sys
 
 if len(sys.argv) == 3 and sys.argv[1] == "--gtest_list_tests":
-    if sys.argv[2] != '--gtest_filter=-*DISABLED_*':
+    if sys.argv[2] != "--gtest_filter=-*DISABLED_*":
         raise ValueError("unexpected argument: %s" % (sys.argv[2]))
-    print("""\
+    print(
+        """\
 FirstTest.
   subTestA
   subTestB
@@ -15,18 +16,19 @@
 ParameterizedTest/0.
   subTest
 ParameterizedTest/1.
-  subTest""")
+  subTest"""
+    )
     sys.exit(0)
 elif len(sys.argv) != 1:
     # sharding and json output are specified using environment variables
-    raise ValueError("unexpected argument: %r" % (' '.join(sys.argv[1:])))
+    raise ValueError("unexpected argument: %r" % (" ".join(sys.argv[1:])))
 
-for e in ['GTEST_TOTAL_SHARDS', 'GTEST_SHARD_INDEX', 'GTEST_OUTPUT']:
+for e in ["GTEST_TOTAL_SHARDS", "GTEST_SHARD_INDEX", "GTEST_OUTPUT"]:
     if e not in os.environ:
         raise ValueError("missing environment variables: " + e)
 
-if not os.environ['GTEST_OUTPUT'].startswith('json:'):
-    raise ValueError("must emit json output: " + os.environ['GTEST_OUTPUT'])
+if not os.environ["GTEST_OUTPUT"].startswith("json:"):
+    raise ValueError("must emit json output: " + os.environ["GTEST_OUTPUT"])
 
 dummy_output = """\
 {
@@ -34,17 +36,20 @@
 ]
 }"""
 
-if os.environ['GTEST_SHARD_INDEX'] == '0':
-    print("""\
+if os.environ["GTEST_SHARD_INDEX"] == "0":
+    print(
+        """\
 [----------] 4 test from FirstTest
 [ RUN      ] FirstTest.subTestA
 [       OK ] FirstTest.subTestA (18 ms)
-[ RUN      ] FirstTest.subTestB""", flush=True)
-    print('I am about to crash', file=sys.stderr, flush=True)
+[ RUN      ] FirstTest.subTestB""",
+        flush=True,
+    )
+    print("I am about to crash", file=sys.stderr, flush=True)
     exit_code = 1
 else:
-    json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
-    with open(json_filename, 'w', encoding='utf-8') as f:
+    json_filename = os.environ["GTEST_OUTPUT"].split(":", 1)[1]
+    with open(json_filename, "w", encoding="utf-8") as f:
         f.write(dummy_output)
     exit_code = 0
 

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-crash/lit.cfg b/llvm/utils/lit/tests/Inputs/googletest-crash/lit.cfg
index 8048411a70882..5d51a6098f76a 100644
--- a/llvm/utils/lit/tests/Inputs/googletest-crash/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/googletest-crash/lit.cfg
@@ -1,3 +1,4 @@
 import lit.formats
-config.name = 'googletest-crash'
-config.test_format = lit.formats.GoogleTest('DummySubDir', 'Test')
+
+config.name = "googletest-crash"
+config.test_format = lit.formats.GoogleTest("DummySubDir", "Test")

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-detect-duplicate/DummySubDir/OneTest.py b/llvm/utils/lit/tests/Inputs/googletest-detect-duplicate/DummySubDir/OneTest.py
index e9ce90e0a8864..bd48ccc6d63a6 100644
--- a/llvm/utils/lit/tests/Inputs/googletest-detect-duplicate/DummySubDir/OneTest.py
+++ b/llvm/utils/lit/tests/Inputs/googletest-detect-duplicate/DummySubDir/OneTest.py
@@ -4,22 +4,24 @@
 import sys
 
 if len(sys.argv) == 3 and sys.argv[1] == "--gtest_list_tests":
-    if sys.argv[2] != '--gtest_filter=-*DISABLED_*':
+    if sys.argv[2] != "--gtest_filter=-*DISABLED_*":
         raise ValueError("unexpected argument: %s" % (sys.argv[2]))
-    print("""\
+    print(
+        """\
 FirstTest.
-  subTestA""")
+  subTestA"""
+    )
     sys.exit(0)
 elif len(sys.argv) != 1:
     # sharding and json output are specified using environment variables
-    raise ValueError("unexpected argument: %r" % (' '.join(sys.argv[1:])))
+    raise ValueError("unexpected argument: %r" % (" ".join(sys.argv[1:])))
 
-for e in ['GTEST_TOTAL_SHARDS', 'GTEST_SHARD_INDEX', 'GTEST_OUTPUT']:
+for e in ["GTEST_TOTAL_SHARDS", "GTEST_SHARD_INDEX", "GTEST_OUTPUT"]:
     if e not in os.environ:
         raise ValueError("missing environment variables: " + e)
 
-if not os.environ['GTEST_OUTPUT'].startswith('json:'):
-    raise ValueError("must emit json output: " + os.environ['GTEST_OUTPUT'])
+if not os.environ["GTEST_OUTPUT"].startswith("json:"):
+    raise ValueError("must emit json output: " + os.environ["GTEST_OUTPUT"])
 
 output = """\
 {
@@ -44,9 +46,9 @@
 ]
 }"""
 
-json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
-with open(json_filename, 'w', encoding='utf-8') as f:
-    if os.environ['GTEST_SHARD_INDEX'] == '0':
+json_filename = os.environ["GTEST_OUTPUT"].split(":", 1)[1]
+with open(json_filename, "w", encoding="utf-8") as f:
+    if os.environ["GTEST_SHARD_INDEX"] == "0":
         f.write(output)
     else:
         f.write(dummy_output)

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-detect-duplicate/lit.cfg b/llvm/utils/lit/tests/Inputs/googletest-detect-duplicate/lit.cfg
index 20b6db9b4134d..57ef41b7073fe 100644
--- a/llvm/utils/lit/tests/Inputs/googletest-detect-duplicate/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/googletest-detect-duplicate/lit.cfg
@@ -1,3 +1,4 @@
 import lit.formats
-config.name = 'googletest-detect-duplicate'
-config.test_format = lit.formats.GoogleTest('DummySubDir', 'Test')
+
+config.name = "googletest-detect-duplicate"
+config.test_format = lit.formats.GoogleTest("DummySubDir", "Test")

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-discovery-failed/lit.cfg b/llvm/utils/lit/tests/Inputs/googletest-discovery-failed/lit.cfg
index 66be8ec333881..d96f93e798b3f 100644
--- a/llvm/utils/lit/tests/Inputs/googletest-discovery-failed/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/googletest-discovery-failed/lit.cfg
@@ -1,3 +1,4 @@
 import lit.formats
-config.name = 'googletest-discovery-failed'
-config.test_format = lit.formats.GoogleTest('subdir', 'Test')
+
+config.name = "googletest-discovery-failed"
+config.test_format = lit.formats.GoogleTest("subdir", "Test")

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-format-respect-gtest-sharding-env-vars/DummySubDir/OneTest.py b/llvm/utils/lit/tests/Inputs/googletest-format-respect-gtest-sharding-env-vars/DummySubDir/OneTest.py
index e35a9cbeafcb8..0aa5d0ba39cbf 100644
--- a/llvm/utils/lit/tests/Inputs/googletest-format-respect-gtest-sharding-env-vars/DummySubDir/OneTest.py
+++ b/llvm/utils/lit/tests/Inputs/googletest-format-respect-gtest-sharding-env-vars/DummySubDir/OneTest.py
@@ -4,9 +4,10 @@
 import sys
 
 if len(sys.argv) == 3 and sys.argv[1] == "--gtest_list_tests":
-    if sys.argv[2] != '--gtest_filter=-*DISABLED_*':
+    if sys.argv[2] != "--gtest_filter=-*DISABLED_*":
         raise ValueError("unexpected argument: %s" % (sys.argv[2]))
-    print("""\
+    print(
+        """\
 FirstTest.
   subTestA
   subTestB
@@ -15,18 +16,19 @@
 ParameterizedTest/0.
   subTest
 ParameterizedTest/1.
-  subTest""")
+  subTest"""
+    )
     sys.exit(0)
 elif len(sys.argv) != 1:
     # sharding and json output are specified using environment variables
-    raise ValueError("unexpected argument: %r" % (' '.join(sys.argv[1:])))
+    raise ValueError("unexpected argument: %r" % (" ".join(sys.argv[1:])))
 
-for e in ['GTEST_TOTAL_SHARDS', 'GTEST_SHARD_INDEX', 'GTEST_OUTPUT']:
+for e in ["GTEST_TOTAL_SHARDS", "GTEST_SHARD_INDEX", "GTEST_OUTPUT"]:
     if e not in os.environ:
         raise ValueError("missing environment variables: " + e)
 
-if not os.environ['GTEST_OUTPUT'].startswith('json:'):
-    raise ValueError("must emit json output: " + os.environ['GTEST_OUTPUT'])
+if not os.environ["GTEST_OUTPUT"].startswith("json:"):
+    raise ValueError("must emit json output: " + os.environ["GTEST_OUTPUT"])
 
 output = """\
 {
@@ -92,12 +94,12 @@
 ]
 }"""
 
-json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
-with open(json_filename, 'w', encoding='utf-8') as f:
-    if os.environ['GTEST_TOTAL_SHARDS'] == '1':
-        print('[ RUN      ] FirstTest.subTestB', flush=True)
-        print('I am subTest B output', file=sys.stderr, flush=True)
-        print('[  FAILED  ] FirstTest.subTestB (8 ms)', flush=True)
+json_filename = os.environ["GTEST_OUTPUT"].split(":", 1)[1]
+with open(json_filename, "w", encoding="utf-8") as f:
+    if os.environ["GTEST_TOTAL_SHARDS"] == "1":
+        print("[ RUN      ] FirstTest.subTestB", flush=True)
+        print("I am subTest B output", file=sys.stderr, flush=True)
+        print("[  FAILED  ] FirstTest.subTestB (8 ms)", flush=True)
 
         f.write(output)
         exit_code = 1

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-format-respect-gtest-sharding-env-vars/lit.cfg b/llvm/utils/lit/tests/Inputs/googletest-format-respect-gtest-sharding-env-vars/lit.cfg
index f2f6cda8db6c0..ff6577443fb45 100644
--- a/llvm/utils/lit/tests/Inputs/googletest-format-respect-gtest-sharding-env-vars/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/googletest-format-respect-gtest-sharding-env-vars/lit.cfg
@@ -1,3 +1,4 @@
 import lit.formats
-config.name = 'googletest-format'
-config.test_format = lit.formats.GoogleTest('DummySubDir', 'Test')
+
+config.name = "googletest-format"
+config.test_format = lit.formats.GoogleTest("DummySubDir", "Test")

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-format/DummySubDir/OneTest.py b/llvm/utils/lit/tests/Inputs/googletest-format/DummySubDir/OneTest.py
index 120d7c0d67e2c..c45a710d52dae 100644
--- a/llvm/utils/lit/tests/Inputs/googletest-format/DummySubDir/OneTest.py
+++ b/llvm/utils/lit/tests/Inputs/googletest-format/DummySubDir/OneTest.py
@@ -4,9 +4,10 @@
 import sys
 
 if len(sys.argv) == 3 and sys.argv[1] == "--gtest_list_tests":
-    if sys.argv[2] != '--gtest_filter=-*DISABLED_*':
+    if sys.argv[2] != "--gtest_filter=-*DISABLED_*":
         raise ValueError("unexpected argument: %s" % (sys.argv[2]))
-    print("""\
+    print(
+        """\
 FirstTest.
   subTestA
   subTestB
@@ -15,18 +16,19 @@
 ParameterizedTest/0.
   subTest
 ParameterizedTest/1.
-  subTest""")
+  subTest"""
+    )
     sys.exit(0)
 elif len(sys.argv) != 1:
     # sharding and json output are specified using environment variables
-    raise ValueError("unexpected argument: %r" % (' '.join(sys.argv[1:])))
+    raise ValueError("unexpected argument: %r" % (" ".join(sys.argv[1:])))
 
-for e in ['GTEST_TOTAL_SHARDS', 'GTEST_SHARD_INDEX', 'GTEST_OUTPUT']:
+for e in ["GTEST_TOTAL_SHARDS", "GTEST_SHARD_INDEX", "GTEST_OUTPUT"]:
     if e not in os.environ:
         raise ValueError("missing environment variables: " + e)
 
-if not os.environ['GTEST_OUTPUT'].startswith('json:'):
-    raise ValueError("must emit json output: " + os.environ['GTEST_OUTPUT'])
+if not os.environ["GTEST_OUTPUT"].startswith("json:"):
+    raise ValueError("must emit json output: " + os.environ["GTEST_OUTPUT"])
 
 output = """\
 {
@@ -92,12 +94,12 @@
 ]
 }"""
 
-json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
-with open(json_filename, 'w', encoding='utf-8') as f:
-    if os.environ['GTEST_SHARD_INDEX'] == '0':
-        print('[ RUN      ] FirstTest.subTestB', flush=True)
-        print('I am subTest B output', file=sys.stderr, flush=True)
-        print('[  FAILED  ] FirstTest.subTestB (8 ms)', flush=True)
+json_filename = os.environ["GTEST_OUTPUT"].split(":", 1)[1]
+with open(json_filename, "w", encoding="utf-8") as f:
+    if os.environ["GTEST_SHARD_INDEX"] == "0":
+        print("[ RUN      ] FirstTest.subTestB", flush=True)
+        print("I am subTest B output", file=sys.stderr, flush=True)
+        print("[  FAILED  ] FirstTest.subTestB (8 ms)", flush=True)
 
         f.write(output)
         exit_code = 1

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-format/lit.cfg b/llvm/utils/lit/tests/Inputs/googletest-format/lit.cfg
index f2f6cda8db6c0..ff6577443fb45 100644
--- a/llvm/utils/lit/tests/Inputs/googletest-format/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/googletest-format/lit.cfg
@@ -1,3 +1,4 @@
 import lit.formats
-config.name = 'googletest-format'
-config.test_format = lit.formats.GoogleTest('DummySubDir', 'Test')
+
+config.name = "googletest-format"
+config.test_format = lit.formats.GoogleTest("DummySubDir", "Test")

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-sanitizer-error/DummySubDir/OneTest.py b/llvm/utils/lit/tests/Inputs/googletest-sanitizer-error/DummySubDir/OneTest.py
index 558117c7c9c08..0811149b1110c 100644
--- a/llvm/utils/lit/tests/Inputs/googletest-sanitizer-error/DummySubDir/OneTest.py
+++ b/llvm/utils/lit/tests/Inputs/googletest-sanitizer-error/DummySubDir/OneTest.py
@@ -4,22 +4,24 @@
 import sys
 
 if len(sys.argv) == 3 and sys.argv[1] == "--gtest_list_tests":
-    if sys.argv[2] != '--gtest_filter=-*DISABLED_*':
+    if sys.argv[2] != "--gtest_filter=-*DISABLED_*":
         raise ValueError("unexpected argument: %s" % (sys.argv[2]))
-    print("""\
+    print(
+        """\
 FirstTest.
-  subTestA""")
+  subTestA"""
+    )
     sys.exit(0)
 elif len(sys.argv) != 1:
     # sharding and json output are specified using environment variables
-    raise ValueError("unexpected argument: %r" % (' '.join(sys.argv[1:])))
+    raise ValueError("unexpected argument: %r" % (" ".join(sys.argv[1:])))
 
-for e in ['GTEST_TOTAL_SHARDS', 'GTEST_SHARD_INDEX', 'GTEST_OUTPUT']:
+for e in ["GTEST_TOTAL_SHARDS", "GTEST_SHARD_INDEX", "GTEST_OUTPUT"]:
     if e not in os.environ:
         raise ValueError("missing environment variables: " + e)
 
-if not os.environ['GTEST_OUTPUT'].startswith('json:'):
-    raise ValueError("must emit json output: " + os.environ['GTEST_OUTPUT'])
+if not os.environ["GTEST_OUTPUT"].startswith("json:"):
+    raise ValueError("must emit json output: " + os.environ["GTEST_OUTPUT"])
 
 output = """\
 {
@@ -44,11 +46,11 @@
 ]
 }"""
 
-json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
-with open(json_filename, 'w', encoding='utf-8') as f:
-    if os.environ['GTEST_SHARD_INDEX'] == '0':
-        print('[ RUN      ] FirstTest.subTestA', flush=True)
-        print('[       OK ] FirstTest.subTestA (8 ms)', flush=True)
+json_filename = os.environ["GTEST_OUTPUT"].split(":", 1)[1]
+with open(json_filename, "w", encoding="utf-8") as f:
+    if os.environ["GTEST_SHARD_INDEX"] == "0":
+        print("[ RUN      ] FirstTest.subTestA", flush=True)
+        print("[       OK ] FirstTest.subTestA (8 ms)", flush=True)
 
         f.write(output)
         exit_code = 1

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-sanitizer-error/lit.cfg b/llvm/utils/lit/tests/Inputs/googletest-sanitizer-error/lit.cfg
index 43a8aabd96b3f..1086f84d273f5 100644
--- a/llvm/utils/lit/tests/Inputs/googletest-sanitizer-error/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/googletest-sanitizer-error/lit.cfg
@@ -1,3 +1,4 @@
 import lit.formats
-config.name = 'googletest-sanitizer-error'
-config.test_format = lit.formats.GoogleTest('DummySubDir', 'Test')
+
+config.name = "googletest-sanitizer-error"
+config.test_format = lit.formats.GoogleTest("DummySubDir", "Test")

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-timeout/DummySubDir/OneTest.py b/llvm/utils/lit/tests/Inputs/googletest-timeout/DummySubDir/OneTest.py
index 73f659ee0b43f..d1698c69f4d12 100644
--- a/llvm/utils/lit/tests/Inputs/googletest-timeout/DummySubDir/OneTest.py
+++ b/llvm/utils/lit/tests/Inputs/googletest-timeout/DummySubDir/OneTest.py
@@ -4,24 +4,26 @@
 import sys
 
 if len(sys.argv) == 3 and sys.argv[1] == "--gtest_list_tests":
-    if sys.argv[2] != '--gtest_filter=-*DISABLED_*':
+    if sys.argv[2] != "--gtest_filter=-*DISABLED_*":
         raise ValueError("unexpected argument: %s" % (sys.argv[2]))
-    print("""\
+    print(
+        """\
 T.
   QuickSubTest
   InfiniteLoopSubTest
-""")
+"""
+    )
     sys.exit(0)
 elif len(sys.argv) != 1:
     # sharding and json output are specified using environment variables
-    raise ValueError("unexpected argument: %r" % (' '.join(sys.argv[1:])))
+    raise ValueError("unexpected argument: %r" % (" ".join(sys.argv[1:])))
 
-for e in ['GTEST_TOTAL_SHARDS', 'GTEST_SHARD_INDEX', 'GTEST_OUTPUT', 'GTEST_FILTER']:
+for e in ["GTEST_TOTAL_SHARDS", "GTEST_SHARD_INDEX", "GTEST_OUTPUT", "GTEST_FILTER"]:
     if e not in os.environ:
         raise ValueError("missing environment variables: " + e)
 
-if not os.environ['GTEST_OUTPUT'].startswith('json:'):
-    raise ValueError("must emit json output: " + os.environ['GTEST_OUTPUT'])
+if not os.environ["GTEST_OUTPUT"].startswith("json:"):
+    raise ValueError("must emit json output: " + os.environ["GTEST_OUTPUT"])
 
 output = """\
 {
@@ -45,21 +47,21 @@
 ]
 }"""
 
-json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
+json_filename = os.environ["GTEST_OUTPUT"].split(":", 1)[1]
 
-if os.environ['GTEST_SHARD_INDEX'] == '0':
-    test_name = os.environ['GTEST_FILTER']
-    if test_name == 'QuickSubTest':
-        with open(json_filename, 'w', encoding='utf-8') as f:
+if os.environ["GTEST_SHARD_INDEX"] == "0":
+    test_name = os.environ["GTEST_FILTER"]
+    if test_name == "QuickSubTest":
+        with open(json_filename, "w", encoding="utf-8") as f:
             f.write(output)
         exit_code = 0
-    elif test_name == 'InfiniteLoopSubTest':
+    elif test_name == "InfiniteLoopSubTest":
         while True:
             pass
     else:
         raise SystemExit("error: invalid test name: %r" % (test_name,))
 else:
-    with open(json_filename, 'w', encoding='utf-8') as f:
+    with open(json_filename, "w", encoding="utf-8") as f:
         f.write(dummy_output)
     exit_code = 0
 

diff  --git a/llvm/utils/lit/tests/Inputs/googletest-timeout/lit.cfg b/llvm/utils/lit/tests/Inputs/googletest-timeout/lit.cfg
index af6a4846af56a..c1eab40f47e8d 100644
--- a/llvm/utils/lit/tests/Inputs/googletest-timeout/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/googletest-timeout/lit.cfg
@@ -1,10 +1,11 @@
 import lit.formats
-config.name = 'googletest-timeout'
-config.test_format = lit.formats.GoogleTest('DummySubDir', 'Test')
 
-configSetTimeout = lit_config.params.get('set_timeout', '0')
-config.environment['GTEST_FILTER'] = lit_config.params.get('gtest_filter')
+config.name = "googletest-timeout"
+config.test_format = lit.formats.GoogleTest("DummySubDir", "Test")
 
-if configSetTimeout == '1':
+configSetTimeout = lit_config.params.get("set_timeout", "0")
+config.environment["GTEST_FILTER"] = lit_config.params.get("gtest_filter")
+
+if configSetTimeout == "1":
     # Try setting the max individual test time in the configuration
     lit_config.maxIndividualTestTime = 1

diff  --git a/llvm/utils/lit/tests/Inputs/ignore-fail/lit.cfg b/llvm/utils/lit/tests/Inputs/ignore-fail/lit.cfg
index d908833764161..57fca26f5fe9c 100644
--- a/llvm/utils/lit/tests/Inputs/ignore-fail/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/ignore-fail/lit.cfg
@@ -1,6 +1,7 @@
 import lit.formats
-config.name = 'ignore-fail'
-config.suffixes = ['.txt']
+
+config.name = "ignore-fail"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None

diff  --git a/llvm/utils/lit/tests/Inputs/lit-opts/lit.cfg b/llvm/utils/lit/tests/Inputs/lit-opts/lit.cfg
index d8dccdb6f4e93..cf1a4f1ba1ec7 100644
--- a/llvm/utils/lit/tests/Inputs/lit-opts/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/lit-opts/lit.cfg
@@ -1,7 +1,8 @@
 import lit.formats
-config.name = 'lit-opts'
-config.suffixes = ['.txt']
+
+config.name = "lit-opts"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None
-config.substitutions.append(('%var', lit_config.params.get('var', '')))
+config.substitutions.append(("%var", lit_config.params.get("var", "")))

diff  --git a/llvm/utils/lit/tests/Inputs/lld-features/lit.cfg b/llvm/utils/lit/tests/Inputs/lld-features/lit.cfg
index f9249baa1ced0..f5e75690f93c7 100644
--- a/llvm/utils/lit/tests/Inputs/lld-features/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/lld-features/lit.cfg
@@ -1,17 +1,20 @@
 import lit.formats
-config.name = 'search-env'
-config.suffixes = ['.txt']
+
+config.name = "search-env"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None
-config.llvm_tools_dir = ''
+config.llvm_tools_dir = ""
 import lit.llvm
+
 lit.llvm.initialize(lit_config, config)
 import os.path
+
 curdir = os.path.dirname(__file__)
 # The current directory contains files for each version of LLD, both with and
 # without a .exe extension. The .exe versions will be found on Windows and the
 # ones without will be found on Linux. Note that all files are just empty files,
 # since the test doesn't actually use them.
-lit.llvm.llvm_config.with_environment('PATH', curdir, append_path=True)
+lit.llvm.llvm_config.with_environment("PATH", curdir, append_path=True)
 lit.llvm.llvm_config.use_lld(use_installed=True)

diff  --git a/llvm/utils/lit/tests/Inputs/max-failures/lit.cfg b/llvm/utils/lit/tests/Inputs/max-failures/lit.cfg
index f0f7649700eff..1335d22c77384 100644
--- a/llvm/utils/lit/tests/Inputs/max-failures/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/max-failures/lit.cfg
@@ -1,6 +1,7 @@
 import lit.formats
-config.name = 'max-failures'
-config.suffixes = ['.txt']
+
+config.name = "max-failures"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None

diff  --git a/llvm/utils/lit/tests/Inputs/max-time/lit.cfg b/llvm/utils/lit/tests/Inputs/max-time/lit.cfg
index 01784452bd9b3..0600565e24371 100644
--- a/llvm/utils/lit/tests/Inputs/max-time/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/max-time/lit.cfg
@@ -1,7 +1,8 @@
 import lit.formats
-config.name = 'max-time'
-config.suffixes = ['.txt', '.py']
+
+config.name = "max-time"
+config.suffixes = [".txt", ".py"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None
-config.substitutions.append(('%{python}', '"%s"' % (sys.executable)))
+config.substitutions.append(("%{python}", '"%s"' % (sys.executable)))

diff  --git a/llvm/utils/lit/tests/Inputs/parallelism-groups/lit.cfg b/llvm/utils/lit/tests/Inputs/parallelism-groups/lit.cfg
index de511877ee00b..35815943ff5f8 100644
--- a/llvm/utils/lit/tests/Inputs/parallelism-groups/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/parallelism-groups/lit.cfg
@@ -1,14 +1,15 @@
 import lit.formats
-config.name = 'parallelism-groups'
-config.suffixes = ['.txt']
+
+config.name = "parallelism-groups"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None
 
 # Should not crash
-lit_config.parallelism_groups['my_group'] = None
+lit_config.parallelism_groups["my_group"] = None
 
-config.parallelism_group = 'my_group'
+config.parallelism_group = "my_group"
 
 # TODO(yln): we should have a nice property setter that doesn't allow setting
 # to a non-existent group and do Math.min(old_group, new_group) when we

diff  --git a/llvm/utils/lit/tests/Inputs/progress-bar/lit.cfg b/llvm/utils/lit/tests/Inputs/progress-bar/lit.cfg
index 17488a26e4f13..d43d1aecbd4f1 100644
--- a/llvm/utils/lit/tests/Inputs/progress-bar/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/progress-bar/lit.cfg
@@ -1,6 +1,7 @@
 import lit.formats
-config.name = 'progress-bar'
-config.suffixes = ['.txt']
+
+config.name = "progress-bar"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None

diff  --git a/llvm/utils/lit/tests/Inputs/py-config-discovery/lit.site.cfg.py b/llvm/utils/lit/tests/Inputs/py-config-discovery/lit.site.cfg.py
index ac273c797c5f3..5536fe718fad4 100644
--- a/llvm/utils/lit/tests/Inputs/py-config-discovery/lit.site.cfg.py
+++ b/llvm/utils/lit/tests/Inputs/py-config-discovery/lit.site.cfg.py
@@ -1,5 +1,8 @@
 # Load the discovery suite, but with a separate exec root.
 import os
+
 config.test_exec_root = os.path.dirname(__file__)
-config.test_source_root = os.path.join(os.path.dirname(config.test_exec_root), "discovery")
+config.test_source_root = os.path.join(
+    os.path.dirname(config.test_exec_root), "discovery"
+)
 lit_config.load_config(config, os.path.join(config.test_source_root, "lit.cfg"))

diff  --git a/llvm/utils/lit/tests/Inputs/reorder/lit.cfg b/llvm/utils/lit/tests/Inputs/reorder/lit.cfg
index 6320609a1e6c6..fc1b60c6fedbf 100644
--- a/llvm/utils/lit/tests/Inputs/reorder/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/reorder/lit.cfg
@@ -1,6 +1,7 @@
 import lit.formats
-config.name = 'reorder'
-config.suffixes = ['.txt']
+
+config.name = "reorder"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None

diff  --git a/llvm/utils/lit/tests/Inputs/show-result-codes/lit.cfg b/llvm/utils/lit/tests/Inputs/show-result-codes/lit.cfg
index 2aa84326bcea8..277af73dd5ddd 100644
--- a/llvm/utils/lit/tests/Inputs/show-result-codes/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/show-result-codes/lit.cfg
@@ -1,6 +1,7 @@
 import lit.formats
-config.name = 'show-result-codes'
-config.suffixes = ['.txt']
+
+config.name = "show-result-codes"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None

diff  --git a/llvm/utils/lit/tests/Inputs/show-used-features/lit.cfg b/llvm/utils/lit/tests/Inputs/show-used-features/lit.cfg
index 7ee2154d2e19b..4707cb4445003 100644
--- a/llvm/utils/lit/tests/Inputs/show-used-features/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/show-used-features/lit.cfg
@@ -1,6 +1,7 @@
 import lit.formats
-config.name = 'show-used-features'
-config.suffixes = ['.txt']
+
+config.name = "show-used-features"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-define/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-define/lit.cfg
index 7f0d1f7c2e463..a29755eb2b600 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-define/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-define/lit.cfg
@@ -1,6 +1,7 @@
 import lit.formats
-config.name = 'shtest-define'
-config.suffixes = ['.txt']
+
+config.name = "shtest-define"
+config.suffixes = [".txt"]
 # Use lit's internal shell to avoid shell portability issues within RUN lines
 # (e.g., for 'echo' commands in Windows).  Those issues should be orthogonal to
 # the substitution behavior we are trying to test.
@@ -16,10 +17,11 @@ config.test_exec_root = None
 # of %{global:what}, so we make sure the former expands before the latter.
 # If we always insert at the beginning of the substitution list (as DEFINE
 # does), then the rule is simple: define a substitution before you refer to it.
-config.substitutions.insert(0, ('%{global:what}', 'World'))
-config.substitutions.insert(0, ('%{global:greeting}', ''))
-config.substitutions.insert(0,
-    ('%{global:echo}', "echo GLOBAL: %{global:greeting} %{global:what}"))
+config.substitutions.insert(0, ("%{global:what}", "World"))
+config.substitutions.insert(0, ("%{global:greeting}", ""))
+config.substitutions.insert(
+    0, ("%{global:echo}", "echo GLOBAL: %{global:greeting} %{global:what}")
+)
 
 # The following substitution definitions are confusing and should be avoided.
 # We define them here so we can test that 'DEFINE:' and 'REDEFINE:' directives
@@ -30,21 +32,21 @@ config.substitutions.insert(0,
 # 'DEFINE:' and 'REDEFINE:' will refuse to (re)define a substitution with that
 # pattern because it is a substring of one of the following substitution's
 # patterns.
-config.substitutions.insert(0, ('<%{global:inside}>', '<@>'))
-config.substitutions.insert(0, (r'%{global:prefix}\((.*)\)', r'@(\g<1>)'))
-config.substitutions.insert(0, ('@%{global:suffix}', '@@'))
+config.substitutions.insert(0, ("<%{global:inside}>", "<@>"))
+config.substitutions.insert(0, (r"%{global:prefix}\((.*)\)", r"@(\g<1>)"))
+config.substitutions.insert(0, ("@%{global:suffix}", "@@"))
 
 # These cannot be redefined by 'REDEFINE:', which doesn't know which one to
 # redefine.
-config.substitutions.insert(0, ('%{global:multiple-exact}', 'first'))
-config.substitutions.insert(0, ('%{global:multiple-exact}', 'second'))
+config.substitutions.insert(0, ("%{global:multiple-exact}", "first"))
+config.substitutions.insert(0, ("%{global:multiple-exact}", "second"))
 
 # Even though '%{global:multiple-once-exact}' is the exact pattern of only one
 # existing substitution, 'REDEFINE:' will refuse to redefine that substitution
 # because that string is a substring of another substitution's pattern.
-config.substitutions.insert(0, ('%{global:multiple-once-exact}', '@'))
-config.substitutions.insert(0, ('<%{global:multiple-once-exact}>', '<@>'))
+config.substitutions.insert(0, ("%{global:multiple-once-exact}", "@"))
+config.substitutions.insert(0, ("<%{global:multiple-once-exact}>", "<@>"))
 
-recur = lit_config.params.get('recur', None)
+recur = lit_config.params.get("recur", None)
 if recur:
-  config.recursiveExpansionLimit = int(recur)
+    config.recursiveExpansionLimit = int(recur)

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-env/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-env/lit.cfg
index 1e2d050754a79..df9df7da81daa 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-env/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-env/lit.cfg
@@ -1,9 +1,10 @@
 import lit.formats
-config.name = 'shtest-env'
-config.suffixes = ['.txt']
+
+config.name = "shtest-env"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None
-config.environment['FOO'] = '1'
-config.environment['BAR'] = '2'
-config.substitutions.append(('%{python}', '"%s"' % (sys.executable)))
+config.environment["FOO"] = "1"
+config.environment["BAR"] = "2"
+config.substitutions.append(("%{python}", '"%s"' % (sys.executable)))

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-env/print_environment.py b/llvm/utils/lit/tests/Inputs/shtest-env/print_environment.py
index af6127670b8b5..e39bd73e44a10 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-env/print_environment.py
+++ b/llvm/utils/lit/tests/Inputs/shtest-env/print_environment.py
@@ -5,5 +5,5 @@
 
 sorted_environment = sorted(os.environ.items())
 
-for name,value in sorted_environment:
-    print(name,'=',value)
+for name, value in sorted_environment:
+    print(name, "=", value)

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-format-argv0/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-format-argv0/lit.cfg
index a5ee22c731d7c..a0780499cdc32 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-format-argv0/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-format-argv0/lit.cfg
@@ -1,7 +1,8 @@
 import lit.formats
-config.name = 'shtest-format-argv0'
-config.suffixes = ['.txt']
+
+config.name = "shtest-format-argv0"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None
-config.target_triple = 'x86_64-unknown-unknown'
+config.target_triple = "x86_64-unknown-unknown"

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-format/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-format/lit.cfg
index 799b518180844..f84002090f82e 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-format/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-format/lit.cfg
@@ -1,10 +1,11 @@
 import lit.formats
-config.name = 'shtest-format'
-config.suffixes = ['.txt']
+
+config.name = "shtest-format"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None
-config.target_triple = 'x86_64-unknown-unknown'
-config.available_features.add('target=%s' % config.target_triple)
-config.available_features.add('a-present-feature')
-config.substitutions.append(('%{python}', '"%s"' % (sys.executable)))
+config.target_triple = "x86_64-unknown-unknown"
+config.available_features.add("target=%s" % config.target_triple)
+config.available_features.add("a-present-feature")
+config.substitutions.append(("%{python}", '"%s"' % (sys.executable)))

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-if-else/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-if-else/lit.cfg
index b2243df51c20c..65b1651c95504 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-if-else/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-if-else/lit.cfg
@@ -1,8 +1,9 @@
 import lit.formats
-config.name = 'shtest-if-else'
+
+config.name = "shtest-if-else"
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None
-config.suffixes = ['.txt']
-config.available_features.add('feature')
-config.substitutions.append(('%{sub}', 'ok'))
+config.suffixes = [".txt"]
+config.available_features.add("feature")
+config.substitutions.append(("%{sub}", "ok"))

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-inject/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-inject/lit.cfg
index 755edf8b5bfce..b3a86e73d21ac 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-inject/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-inject/lit.cfg
@@ -1,12 +1,9 @@
 import lit
 
-preamble_commands = [
-    'echo "THIS WAS"',
-    'echo "INJECTED"'
-]
+preamble_commands = ['echo "THIS WAS"', 'echo "INJECTED"']
 
-config.name = 'shtest-inject'
-config.suffixes = ['.txt']
+config.name = "shtest-inject"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest(preamble_commands=preamble_commands)
 config.test_source_root = None
 config.test_exec_root = None

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-keyword-parse-errors/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-keyword-parse-errors/lit.cfg
index 5a28fb03ece38..04ead8b83a7f2 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-keyword-parse-errors/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-keyword-parse-errors/lit.cfg
@@ -1,4 +1,5 @@
 import lit.formats
-config.name = 'shtest-keyword-parse-errors'
-config.suffixes = ['.txt']
+
+config.name = "shtest-keyword-parse-errors"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-not/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-not/lit.cfg
index 8609fd2c51378..0197c6ee780dd 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-not/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-not/lit.cfg
@@ -1,7 +1,8 @@
 import lit.formats
-config.name = 'shtest-not'
-config.suffixes = ['.txt']
+
+config.name = "shtest-not"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None
-config.substitutions.append(('%{python}', '"%s"' % (sys.executable)))
+config.substitutions.append(("%{python}", '"%s"' % (sys.executable)))

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-not/print_environment.py b/llvm/utils/lit/tests/Inputs/shtest-not/print_environment.py
index 7397889d5e764..c7070d4141702 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-not/print_environment.py
+++ b/llvm/utils/lit/tests/Inputs/shtest-not/print_environment.py
@@ -1,6 +1,7 @@
 from __future__ import print_function
 import os
 
+
 def execute():
-    for name in ['FOO', 'BAR']:
-        print(name, '=', os.environ.get(name, '[undefined]'))
+    for name in ["FOO", "BAR"]:
+        print(name, "=", os.environ.get(name, "[undefined]"))

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-output-printing/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-output-printing/lit.cfg
index 4fe698d73368e..b872854d21e63 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-output-printing/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-output-printing/lit.cfg
@@ -1,4 +1,5 @@
 import lit.formats
-config.name = 'shtest-output-printing'
-config.suffixes = ['.txt']
+
+config.name = "shtest-output-printing"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest(execute_external=False)

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-pushd-popd/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-pushd-popd/lit.cfg
index e7c209220ec06..1c30b97d82ac0 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-pushd-popd/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-pushd-popd/lit.cfg
@@ -1,4 +1,5 @@
 import lit.formats
-config.name = 'shtest-pushd-popd'
-config.suffixes = ['.txt']
+
+config.name = "shtest-pushd-popd"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest(execute_external=False)

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/does-not-substitute-no-limit/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/does-not-substitute-no-limit/lit.cfg
index 80c5f88c2ebd8..5eb912d43b71b 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/does-not-substitute-no-limit/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/does-not-substitute-no-limit/lit.cfg
@@ -1,10 +1,15 @@
 import lit.formats
-config.name = 'does-not-substitute-no-limit'
-config.suffixes = ['.py']
+
+config.name = "does-not-substitute-no-limit"
+config.suffixes = [".py"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None
 
-config.substitutions = [("%rec1", "STOP"), ("%rec2", "%rec1"),
-                        ("%rec3", "%rec2"), ("%rec4", "%rec3"),
-                        ("%rec5", "%rec4")]
+config.substitutions = [
+    ("%rec1", "STOP"),
+    ("%rec2", "%rec1"),
+    ("%rec3", "%rec2"),
+    ("%rec4", "%rec3"),
+    ("%rec5", "%rec4"),
+]

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/does-not-substitute-within-limit/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/does-not-substitute-within-limit/lit.cfg
index 69e20ceef5839..f772b01b4d4d3 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/does-not-substitute-within-limit/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/does-not-substitute-within-limit/lit.cfg
@@ -1,12 +1,17 @@
 import lit.formats
-config.name = 'does-not-substitute-within-limit'
-config.suffixes = ['.py']
+
+config.name = "does-not-substitute-within-limit"
+config.suffixes = [".py"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None
 
-config.substitutions = [("%rec1", "STOP"), ("%rec2", "%rec1"),
-                        ("%rec3", "%rec2"), ("%rec4", "%rec3"),
-                        ("%rec5", "%rec4")]
+config.substitutions = [
+    ("%rec1", "STOP"),
+    ("%rec2", "%rec1"),
+    ("%rec3", "%rec2"),
+    ("%rec4", "%rec3"),
+    ("%rec5", "%rec4"),
+]
 
 config.recursiveExpansionLimit = 2

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/escaping/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/escaping/lit.cfg
index 97a4faac23870..71161f055ed83 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/escaping/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/escaping/lit.cfg
@@ -1,6 +1,7 @@
 import lit.formats
-config.name = 'escaping'
-config.suffixes = ['.py']
+
+config.name = "escaping"
+config.suffixes = [".py"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/negative-integer/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/negative-integer/lit.cfg
index 164e4a162fecf..d3efab5bef9af 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/negative-integer/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/negative-integer/lit.cfg
@@ -1,6 +1,7 @@
 import lit.formats
-config.name = 'negative-integer'
-config.suffixes = ['.py']
+
+config.name = "negative-integer"
+config.suffixes = [".py"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/not-an-integer/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/not-an-integer/lit.cfg
index 6407d23b6b4ea..0f50d9548d990 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/not-an-integer/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/not-an-integer/lit.cfg
@@ -1,6 +1,7 @@
 import lit.formats
-config.name = 'not-an-integer'
-config.suffixes = ['.py']
+
+config.name = "not-an-integer"
+config.suffixes = [".py"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/set-to-none/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/set-to-none/lit.cfg
index 9c2583383fb62..6f8a1e7b2cabd 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/set-to-none/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/set-to-none/lit.cfg
@@ -1,6 +1,7 @@
 import lit.formats
-config.name = 'set-to-none'
-config.suffixes = ['.py']
+
+config.name = "set-to-none"
+config.suffixes = [".py"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/substitutes-within-limit/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/substitutes-within-limit/lit.cfg
index 6ff0ee5ba1f4f..1fccdd798ea83 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/substitutes-within-limit/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/substitutes-within-limit/lit.cfg
@@ -1,12 +1,17 @@
 import lit.formats
-config.name = 'substitutes-within-limit'
-config.suffixes = ['.py']
+
+config.name = "substitutes-within-limit"
+config.suffixes = [".py"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None
 
-config.substitutions = [("%rec1", "STOP"), ("%rec2", "%rec1"),
-                        ("%rec3", "%rec2"), ("%rec4", "%rec3"),
-                        ("%rec5", "%rec4")]
+config.substitutions = [
+    ("%rec1", "STOP"),
+    ("%rec2", "%rec1"),
+    ("%rec3", "%rec2"),
+    ("%rec4", "%rec3"),
+    ("%rec5", "%rec4"),
+]
 
 config.recursiveExpansionLimit = 5

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-run-at-line/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-run-at-line/lit.cfg
index f4c7921b73269..e015516ad08d4 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-run-at-line/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-run-at-line/lit.cfg
@@ -1,2 +1,2 @@
-config.name = 'shtest-run-at-line'
-config.suffixes = ['.txt']
+config.name = "shtest-run-at-line"
+config.suffixes = [".txt"]

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-shell/check_args.py b/llvm/utils/lit/tests/Inputs/shtest-shell/check_args.py
index 2f7a2503b9762..2ad14c4017eab 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-shell/check_args.py
+++ b/llvm/utils/lit/tests/Inputs/shtest-shell/check_args.py
@@ -8,7 +8,8 @@
 
 args = parser.parse_args()
 
-answer = (platform.system() == "Windows" and
-          args.my_arg == "/dev/null" and "ERROR") or "OK"
+answer = (
+    platform.system() == "Windows" and args.my_arg == "/dev/null" and "ERROR"
+) or "OK"
 
 print(answer)

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-shell/check_path.py b/llvm/utils/lit/tests/Inputs/shtest-shell/check_path.py
index 467505b7f293f..62c906846fbb5 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-shell/check_path.py
+++ b/llvm/utils/lit/tests/Inputs/shtest-shell/check_path.py
@@ -15,10 +15,10 @@ def check_path(argv):
     paths = argv[2:]
     exit_code = 0
 
-    if type == 'dir':
+    if type == "dir":
         for idx, dir in enumerate(paths):
             print(os.path.isdir(dir))
-    elif type == 'file':
+    elif type == "file":
         for idx, file in enumerate(paths):
             print(os.path.isfile(file))
     else:
@@ -26,5 +26,6 @@ def check_path(argv):
         exit_code = 1
     return exit_code
 
-if __name__ == '__main__':
-    sys.exit (check_path (sys.argv))
+
+if __name__ == "__main__":
+    sys.exit(check_path(sys.argv))

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-shell/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-shell/lit.cfg
index 3231dedc71464..cf71edc1d54d2 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-shell/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-shell/lit.cfg
@@ -1,7 +1,8 @@
 import lit.formats
-config.name = 'shtest-shell'
-config.suffixes = ['.txt']
+
+config.name = "shtest-shell"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None
-config.substitutions.append(('%{python}', '"%s"' % (sys.executable)))
+config.substitutions.append(("%{python}", '"%s"' % (sys.executable)))

diff  --git a/llvm/utils/lit/tests/Inputs/shtest-timeout/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-timeout/lit.cfg
index 6256f5a9911a0..b78c0eac31d88 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-timeout/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-timeout/lit.cfg
@@ -4,37 +4,37 @@ import sys
 
 import lit.formats
 
-config.name = 'per_test_timeout'
+config.name = "per_test_timeout"
 
-shellType = lit_config.params.get('external', '1')
+shellType = lit_config.params.get("external", "1")
 
-if shellType == '0':
-    lit_config.note('Using internal shell')
+if shellType == "0":
+    lit_config.note("Using internal shell")
     externalShell = False
 else:
-    lit_config.note('Using external shell')
+    lit_config.note("Using external shell")
     externalShell = True
 
-configSetTimeout = lit_config.params.get('set_timeout', '0')
+configSetTimeout = lit_config.params.get("set_timeout", "0")
 
-if configSetTimeout != '0':
+if configSetTimeout != "0":
     # Try setting the max individual test time in the configuration
     lit_config.maxIndividualTestTime = int(configSetTimeout)
 
 config.test_format = lit.formats.ShTest(execute_external=externalShell)
-config.suffixes = ['.py']
+config.suffixes = [".py"]
 
 config.test_source_root = os.path.dirname(__file__)
 config.test_exec_root = config.test_source_root
-config.target_triple = '(unused)'
-src_root = os.path.join(config.test_source_root, '..')
+config.target_triple = "(unused)"
+src_root = os.path.join(config.test_source_root, "..")
 
 pythonpath_list = [src_root]
 # Ensure the user's PYTHONPATH is included.
-if 'PYTHONPATH' in os.environ:
-    pythonpath_list.append(os.environ['PYTHONPATH'])
-if 'PYTHONPATH' in config.environment:
-    pythonpath_list.append(config.environment['PYTHONPATH'])
-config.environment['PYTHONPATH'] = os.pathsep.join(pythonpath_list)
+if "PYTHONPATH" in os.environ:
+    pythonpath_list.append(os.environ["PYTHONPATH"])
+if "PYTHONPATH" in config.environment:
+    pythonpath_list.append(config.environment["PYTHONPATH"])
+config.environment["PYTHONPATH"] = os.pathsep.join(pythonpath_list)
 
-config.substitutions.append(('%{python}', '"%s"' % (sys.executable)))
+config.substitutions.append(("%{python}", '"%s"' % (sys.executable)))

diff  --git a/llvm/utils/lit/tests/Inputs/standalone-tests-with-excludes/lit.cfg b/llvm/utils/lit/tests/Inputs/standalone-tests-with-excludes/lit.cfg
index 37d96ddaebfee..e277348245df5 100644
--- a/llvm/utils/lit/tests/Inputs/standalone-tests-with-excludes/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/standalone-tests-with-excludes/lit.cfg
@@ -1,5 +1,6 @@
 import lit.formats
-config.name = 'Standalone tests'
+
+config.name = "Standalone tests"
 config.test_format = lit.formats.ShTest()
-config.excludes = ['.test']
+config.excludes = [".test"]
 config.standalone_tests = True

diff  --git a/llvm/utils/lit/tests/Inputs/standalone-tests-with-suffixes/lit.cfg b/llvm/utils/lit/tests/Inputs/standalone-tests-with-suffixes/lit.cfg
index 41434c9855b38..22d981fa8310d 100644
--- a/llvm/utils/lit/tests/Inputs/standalone-tests-with-suffixes/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/standalone-tests-with-suffixes/lit.cfg
@@ -1,5 +1,6 @@
 import lit.formats
-config.name = 'Standalone tests'
+
+config.name = "Standalone tests"
 config.test_format = lit.formats.ShTest()
-config.suffixes = ['.txt']
+config.suffixes = [".txt"]
 config.standalone_tests = True

diff  --git a/llvm/utils/lit/tests/Inputs/standalone-tests/lit.cfg b/llvm/utils/lit/tests/Inputs/standalone-tests/lit.cfg
index 12ed7b0ce067b..e77ce30c4b0aa 100644
--- a/llvm/utils/lit/tests/Inputs/standalone-tests/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/standalone-tests/lit.cfg
@@ -1,4 +1,5 @@
 import lit.formats
-config.name = 'Standalone tests'
+
+config.name = "Standalone tests"
 config.test_format = lit.formats.ShTest()
 config.standalone_tests = True

diff  --git a/llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py b/llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py
index 5842f5a5ba33b..b400083a0d967 100644
--- a/llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py
+++ b/llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py
@@ -1,4 +1,5 @@
 import os
+
 try:
     import ConfigParser
 except ImportError:
@@ -7,6 +8,7 @@
 import lit.formats
 import lit.Test
 
+
 class DummyFormat(lit.formats.FileBasedTest):
     def execute(self, test, lit_config):
         # In this dummy format, expect that each test file is actually just a
@@ -18,13 +20,12 @@ def execute(self, test, lit_config):
         cfg.read(source_path)
 
         # Create the basic test result.
-        result_code = cfg.get('global', 'result_code')
-        result_output = cfg.get('global', 'result_output')
-        result = lit.Test.Result(getattr(lit.Test, result_code),
-                                 result_output)
+        result_code = cfg.get("global", "result_code")
+        result_output = cfg.get("global", "result_output")
+        result = lit.Test.Result(getattr(lit.Test, result_code), result_output)
 
         # Load additional metrics.
-        for key,value_str in cfg.items('results'):
+        for key, value_str in cfg.items("results"):
             value = eval(value_str)
             if isinstance(value, int):
                 metric = lit.Test.IntMetricValue(value)
@@ -35,10 +36,10 @@ def execute(self, test, lit_config):
             result.addMetric(key, metric)
 
         # Create micro test results
-        for key,micro_name in cfg.items('micro-tests'):
-            micro_result = lit.Test.Result(getattr(lit.Test, result_code, ''))
+        for key, micro_name in cfg.items("micro-tests"):
+            micro_result = lit.Test.Result(getattr(lit.Test, result_code, ""))
             # Load micro test additional metrics
-            for key,value_str in cfg.items('micro-results'):
+            for key, value_str in cfg.items("micro-results"):
                 value = eval(value_str)
                 if isinstance(value, int):
                     metric = lit.Test.IntMetricValue(value)

diff  --git a/llvm/utils/lit/tests/Inputs/test-data-micro/lit.cfg b/llvm/utils/lit/tests/Inputs/test-data-micro/lit.cfg
index 3fc1e8597360f..a565db13be148 100644
--- a/llvm/utils/lit/tests/Inputs/test-data-micro/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/test-data-micro/lit.cfg
@@ -1,9 +1,10 @@
 import site
+
 site.addsitedir(os.path.dirname(__file__))
 import dummy_format
 
-config.name = 'test-data-micro'
-config.suffixes = ['.ini']
+config.name = "test-data-micro"
+config.suffixes = [".ini"]
 config.test_format = dummy_format.DummyFormat()
 config.test_source_root = None
 config.test_exec_root = None

diff  --git a/llvm/utils/lit/tests/Inputs/test-data/dummy_format.py b/llvm/utils/lit/tests/Inputs/test-data/dummy_format.py
index 93e48eeb83960..a2d314fdb1a8d 100644
--- a/llvm/utils/lit/tests/Inputs/test-data/dummy_format.py
+++ b/llvm/utils/lit/tests/Inputs/test-data/dummy_format.py
@@ -1,4 +1,5 @@
 import os
+
 try:
     import ConfigParser
 except ImportError:
@@ -7,6 +8,7 @@
 import lit.formats
 import lit.Test
 
+
 class DummyFormat(lit.formats.FileBasedTest):
     def execute(self, test, lit_config):
         # In this dummy format, expect that each test file is actually just a
@@ -18,13 +20,12 @@ def execute(self, test, lit_config):
         cfg.read(source_path)
 
         # Create the basic test result.
-        result_code = cfg.get('global', 'result_code')
-        result_output = cfg.get('global', 'result_output')
-        result = lit.Test.Result(getattr(lit.Test, result_code),
-                                 result_output)
+        result_code = cfg.get("global", "result_code")
+        result_output = cfg.get("global", "result_output")
+        result = lit.Test.Result(getattr(lit.Test, result_code), result_output)
 
         # Load additional metrics.
-        for key,value_str in cfg.items('results'):
+        for key, value_str in cfg.items("results"):
             value = eval(value_str)
             if isinstance(value, int):
                 metric = lit.Test.IntMetricValue(value)
@@ -35,4 +36,3 @@ def execute(self, test, lit_config):
             result.addMetric(key, metric)
 
         return result
-

diff  --git a/llvm/utils/lit/tests/Inputs/test-data/lit.cfg b/llvm/utils/lit/tests/Inputs/test-data/lit.cfg
index 0191cc2188843..6de70eabb0137 100644
--- a/llvm/utils/lit/tests/Inputs/test-data/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/test-data/lit.cfg
@@ -1,9 +1,10 @@
 import site
+
 site.addsitedir(os.path.dirname(__file__))
 import dummy_format
 
-config.name = 'test-data'
-config.suffixes = ['.ini']
+config.name = "test-data"
+config.suffixes = [".ini"]
 config.test_format = dummy_format.DummyFormat()
 config.test_source_root = None
 config.test_exec_root = None

diff  --git a/llvm/utils/lit/tests/Inputs/test_retry_attempts/lit.cfg b/llvm/utils/lit/tests/Inputs/test_retry_attempts/lit.cfg
index a3b660fbaef32..cf15854b1799b 100644
--- a/llvm/utils/lit/tests/Inputs/test_retry_attempts/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/test_retry_attempts/lit.cfg
@@ -1,10 +1,11 @@
 import lit.formats
-config.name = 'test_retry_attempts'
-config.suffixes = ['.py']
+
+config.name = "test_retry_attempts"
+config.suffixes = [".py"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None
 
 config.test_retry_attempts = 5
-config.substitutions.append(('%python', lit_config.params.get('python', '')))
-config.substitutions.append(('%counter', lit_config.params.get('counter', '')))
+config.substitutions.append(("%python", lit_config.params.get("python", "")))
+config.substitutions.append(("%counter", lit_config.params.get("counter", "")))

diff  --git a/llvm/utils/lit/tests/Inputs/test_retry_attempts/test.py b/llvm/utils/lit/tests/Inputs/test_retry_attempts/test.py
index ee8a92cc5d8ff..a139976cc49ec 100644
--- a/llvm/utils/lit/tests/Inputs/test_retry_attempts/test.py
+++ b/llvm/utils/lit/tests/Inputs/test_retry_attempts/test.py
@@ -7,16 +7,16 @@
 
 # The first time the test is run, initialize the counter to 1.
 if not os.path.exists(counter_file):
-    with open(counter_file, 'w') as counter:
+    with open(counter_file, "w") as counter:
         counter.write("1")
 
 # Succeed if this is the fourth time we're being run.
-with open(counter_file, 'r') as counter:
+with open(counter_file, "r") as counter:
     num = int(counter.read())
     if num == 4:
         sys.exit(0)
 
 # Otherwise, increment the counter and fail
-with open(counter_file, 'w') as counter:
+with open(counter_file, "w") as counter:
     counter.write(str(num + 1))
     sys.exit(1)

diff  --git a/llvm/utils/lit/tests/Inputs/testrunner-custom-parsers/lit.cfg b/llvm/utils/lit/tests/Inputs/testrunner-custom-parsers/lit.cfg
index cf46c1674e572..29ff8dfef6e0a 100644
--- a/llvm/utils/lit/tests/Inputs/testrunner-custom-parsers/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/testrunner-custom-parsers/lit.cfg
@@ -2,13 +2,15 @@ import lit.formats
 import os
 import lit.Test
 
+
 class TestParserFormat(lit.formats.FileBasedTest):
-  def execute(self, test, lit_config):
-      return lit.Test.PASS, ''
+    def execute(self, test, lit_config):
+        return lit.Test.PASS, ""
+
 
-config.name = 'custom-parsers'
-config.suffixes = ['.txt']
+config.name = "custom-parsers"
+config.suffixes = [".txt"]
 config.test_format = TestParserFormat()
 config.test_source_root = None
 config.test_exec_root = None
-config.target_triple = 'x86_64-unknown-unknown'
+config.target_triple = "x86_64-unknown-unknown"

diff  --git a/llvm/utils/lit/tests/Inputs/unittest-adaptor/lit.cfg b/llvm/utils/lit/tests/Inputs/unittest-adaptor/lit.cfg
index 9e08a8629a435..045575f20c285 100644
--- a/llvm/utils/lit/tests/Inputs/unittest-adaptor/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/unittest-adaptor/lit.cfg
@@ -1,6 +1,7 @@
 import lit.formats
-config.name = 'unittest-adaptor'
-config.suffixes = ['.txt']
+
+config.name = "unittest-adaptor"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None

diff  --git a/llvm/utils/lit/tests/Inputs/use-llvm-tool-required/lit.cfg b/llvm/utils/lit/tests/Inputs/use-llvm-tool-required/lit.cfg
index b1c510e05ab74..e41207bc2f05d 100644
--- a/llvm/utils/lit/tests/Inputs/use-llvm-tool-required/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/use-llvm-tool-required/lit.cfg
@@ -1,12 +1,15 @@
 import lit.formats
-config.name = 'use-llvm-tool-required'
-config.suffixes = ['.txt']
+
+config.name = "use-llvm-tool-required"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None
 import os.path
+
 config.llvm_tools_dir = os.path.realpath(os.path.dirname(__file__))
 import lit.llvm
+
 lit.llvm.initialize(lit_config, config)
-lit.llvm.llvm_config.use_llvm_tool('found', required=True)
-lit.llvm.llvm_config.use_llvm_tool('not-found', required=True)
+lit.llvm.llvm_config.use_llvm_tool("found", required=True)
+lit.llvm.llvm_config.use_llvm_tool("not-found", required=True)

diff  --git a/llvm/utils/lit/tests/Inputs/use-llvm-tool/lit.cfg b/llvm/utils/lit/tests/Inputs/use-llvm-tool/lit.cfg
index 2bd401aacde35..8fe62d98c1349 100644
--- a/llvm/utils/lit/tests/Inputs/use-llvm-tool/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/use-llvm-tool/lit.cfg
@@ -1,25 +1,34 @@
 import lit.formats
-config.name = 'use-llvm-tool'
-config.suffixes = ['.txt']
+
+config.name = "use-llvm-tool"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()
 config.test_source_root = None
 config.test_exec_root = None
 import os.path
+
 this_dir = os.path.realpath(os.path.dirname(__file__))
-config.llvm_tools_dir = os.path.join(this_dir, 'build')
+config.llvm_tools_dir = os.path.join(this_dir, "build")
 import lit.llvm
+
 lit.llvm.initialize(lit_config, config)
-lit.llvm.llvm_config.with_environment('CASE1', os.path.join(this_dir, 'env-case1'))
-lit.llvm.llvm_config.with_environment('CASE6', os.path.join(this_dir, 'env-case6'))
-lit.llvm.llvm_config.with_environment('PATH', os.path.join(this_dir, 'path'), append_path=True)
-lit.llvm.llvm_config.use_llvm_tool('case1', search_env='CASE1')
-lit.llvm.llvm_config.use_llvm_tool('case2', search_env='CASE2')
-lit.llvm.llvm_config.use_llvm_tool('case3')
-lit.llvm.llvm_config.use_llvm_tool('case4', use_installed=True)
-lit.llvm.llvm_config.use_llvm_tool('case5')
-lit.llvm.llvm_config.use_llvm_tool('case6', search_env='CASE6', use_installed=True)
-lit.llvm.llvm_config.use_llvm_tool('case7', use_installed=True)
-lit.llvm.llvm_config.use_llvm_tool('case8', use_installed=True)
-paths = [os.path.join(this_dir, 'search1'), os.path.join(this_dir, 'search2'), os.path.join(this_dir, 'search3')]
-lit.llvm.llvm_config.use_llvm_tool('case9', search_paths=paths)
-lit.llvm.llvm_config.use_llvm_tool('case10', search_paths=paths, use_installed=True)
+lit.llvm.llvm_config.with_environment("CASE1", os.path.join(this_dir, "env-case1"))
+lit.llvm.llvm_config.with_environment("CASE6", os.path.join(this_dir, "env-case6"))
+lit.llvm.llvm_config.with_environment(
+    "PATH", os.path.join(this_dir, "path"), append_path=True
+)
+lit.llvm.llvm_config.use_llvm_tool("case1", search_env="CASE1")
+lit.llvm.llvm_config.use_llvm_tool("case2", search_env="CASE2")
+lit.llvm.llvm_config.use_llvm_tool("case3")
+lit.llvm.llvm_config.use_llvm_tool("case4", use_installed=True)
+lit.llvm.llvm_config.use_llvm_tool("case5")
+lit.llvm.llvm_config.use_llvm_tool("case6", search_env="CASE6", use_installed=True)
+lit.llvm.llvm_config.use_llvm_tool("case7", use_installed=True)
+lit.llvm.llvm_config.use_llvm_tool("case8", use_installed=True)
+paths = [
+    os.path.join(this_dir, "search1"),
+    os.path.join(this_dir, "search2"),
+    os.path.join(this_dir, "search3"),
+]
+lit.llvm.llvm_config.use_llvm_tool("case9", search_paths=paths)
+lit.llvm.llvm_config.use_llvm_tool("case10", search_paths=paths, use_installed=True)

diff  --git a/llvm/utils/lit/tests/Inputs/xfail-cl/a/lit.cfg b/llvm/utils/lit/tests/Inputs/xfail-cl/a/lit.cfg
index 09f49c32a173a..9d68c96290bef 100644
--- a/llvm/utils/lit/tests/Inputs/xfail-cl/a/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/xfail-cl/a/lit.cfg
@@ -1,4 +1,5 @@
 import lit.formats
-config.name = 'top-level-suite :: a'
-config.suffixes = ['.txt']
+
+config.name = "top-level-suite :: a"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()

diff  --git a/llvm/utils/lit/tests/Inputs/xfail-cl/b/lit.cfg b/llvm/utils/lit/tests/Inputs/xfail-cl/b/lit.cfg
index 62f721c671392..3aeafcc9f3a4e 100644
--- a/llvm/utils/lit/tests/Inputs/xfail-cl/b/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/xfail-cl/b/lit.cfg
@@ -1,4 +1,5 @@
 import lit.formats
-config.name = 'top-level-suite :: b'
-config.suffixes = ['.txt']
+
+config.name = "top-level-suite :: b"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()

diff  --git a/llvm/utils/lit/tests/Inputs/xfail-cl/lit.cfg b/llvm/utils/lit/tests/Inputs/xfail-cl/lit.cfg
index 4568d61bfbcfc..50f4de93a396b 100644
--- a/llvm/utils/lit/tests/Inputs/xfail-cl/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/xfail-cl/lit.cfg
@@ -1,4 +1,5 @@
 import lit.formats
-config.name = 'top-level-suite'
-config.suffixes = ['.txt']
+
+config.name = "top-level-suite"
+config.suffixes = [".txt"]
 config.test_format = lit.formats.ShTest()

diff  --git a/llvm/utils/lit/tests/Inputs/xunit-output/dummy_format.py b/llvm/utils/lit/tests/Inputs/xunit-output/dummy_format.py
index 59e75eb6860c5..efac0b561c44b 100644
--- a/llvm/utils/lit/tests/Inputs/xunit-output/dummy_format.py
+++ b/llvm/utils/lit/tests/Inputs/xunit-output/dummy_format.py
@@ -1,4 +1,5 @@
 import os
+
 try:
     import ConfigParser
 except ImportError:
@@ -7,6 +8,7 @@
 import lit.formats
 import lit.Test
 
+
 class DummyFormat(lit.formats.FileBasedTest):
     def execute(self, test, lit_config):
         # In this dummy format, expect that each test file is actually just a
@@ -18,18 +20,17 @@ def execute(self, test, lit_config):
         cfg.read(source_path)
 
         # Create the basic test result.
-        result_code = cfg.get('global', 'result_code')
-        result_output = cfg.get('global', 'result_output')
-        result = lit.Test.Result(getattr(lit.Test, result_code),
-                                 result_output)
+        result_code = cfg.get("global", "result_code")
+        result_output = cfg.get("global", "result_output")
+        result = lit.Test.Result(getattr(lit.Test, result_code), result_output)
 
-        if cfg.has_option('global', 'required_feature'):
-            required_feature = cfg.get('global', 'required_feature')
+        if cfg.has_option("global", "required_feature"):
+            required_feature = cfg.get("global", "required_feature")
             if required_feature:
                 test.requires.append(required_feature)
 
         # Load additional metrics.
-        for key,value_str in cfg.items('results'):
+        for key, value_str in cfg.items("results"):
             value = eval(value_str)
             if isinstance(value, int):
                 metric = lit.Test.IntMetricValue(value)
@@ -40,4 +41,3 @@ def execute(self, test, lit_config):
             result.addMetric(key, metric)
 
         return result
-

diff  --git a/llvm/utils/lit/tests/Inputs/xunit-output/lit.cfg b/llvm/utils/lit/tests/Inputs/xunit-output/lit.cfg
index 0191cc2188843..6de70eabb0137 100644
--- a/llvm/utils/lit/tests/Inputs/xunit-output/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/xunit-output/lit.cfg
@@ -1,9 +1,10 @@
 import site
+
 site.addsitedir(os.path.dirname(__file__))
 import dummy_format
 
-config.name = 'test-data'
-config.suffixes = ['.ini']
+config.name = "test-data"
+config.suffixes = [".ini"]
 config.test_format = dummy_format.DummyFormat()
 config.test_source_root = None
 config.test_exec_root = None

diff  --git a/llvm/utils/lit/tests/lit.cfg b/llvm/utils/lit/tests/lit.cfg
index b9a846ac4784d..438fdbd93c398 100644
--- a/llvm/utils/lit/tests/lit.cfg
+++ b/llvm/utils/lit/tests/lit.cfg
@@ -11,48 +11,48 @@ from lit.llvm import llvm_config
 # Configuration file for the 'lit' test runner.
 
 # name: The name of this test suite.
-config.name = 'lit'
+config.name = "lit"
 
 # testFormat: The test format to use to interpret tests.
 config.test_format = lit.formats.ShTest(execute_external=False)
 
 # suffixes: A list of file extensions to treat as test files.
-config.suffixes = ['.py']
+config.suffixes = [".py"]
 
 # excludes: A list of individual files to exclude.
-config.excludes = ['Inputs']
+config.excludes = ["Inputs"]
 
 # test_source_root: The root path where tests are located.
 config.test_source_root = os.path.dirname(__file__)
 config.test_exec_root = config.test_source_root
 
-config.target_triple = '(unused)'
+config.target_triple = "(unused)"
 
-llvm_src_root = getattr(config, 'llvm_src_root', None)
+llvm_src_root = getattr(config, "llvm_src_root", None)
 if llvm_src_root:
-  # ``test_source_root`` may be in LLVM's binary build directory which does not contain
-  # ``lit.py``, so use `llvm_src_root` instead.
-  lit_path = os.path.join(llvm_src_root, 'utils', 'lit')
+    # ``test_source_root`` may be in LLVM's binary build directory which does not contain
+    # ``lit.py``, so use `llvm_src_root` instead.
+    lit_path = os.path.join(llvm_src_root, "utils", "lit")
 else:
-  lit_path = os.path.join(config.test_source_root, '..')
+    lit_path = os.path.join(config.test_source_root, "..")
 lit_path = os.path.abspath(lit_path)
 
 # Required because some tests import the lit module
 if llvm_config:
-  llvm_config.with_environment('PYTHONPATH', lit_path, append_path=True)
+    llvm_config.with_environment("PYTHONPATH", lit_path, append_path=True)
 else:
-  config.environment['PYTHONPATH'] = lit_path
+    config.environment["PYTHONPATH"] = lit_path
 # Do not add user-site packages directory to the python search path. This avoids test failures if there's an
 # incompatible lit module installed inside the user-site packages directory, as it gets prioritized over the lit
 # from the PYTHONPATH.
-config.environment['PYTHONNOUSERSITE'] = '1'
+config.environment["PYTHONNOUSERSITE"] = "1"
 
 # Add llvm and lit tools directories if this config is being loaded indirectly.
 # In this case, we can also expect llvm_config to have been imported correctly.
-for attribute in ('llvm_tools_dir', 'lit_tools_dir'):
+for attribute in ("llvm_tools_dir", "lit_tools_dir"):
     directory = getattr(config, attribute, None)
     if directory:
-        llvm_config.with_environment('PATH', directory, append_path=True)
+        llvm_config.with_environment("PATH", directory, append_path=True)
 
 # This test suite calls %{lit} to test lit's behavior for the sample test
 # suites in %{inputs}.  This test suite's results are then determined in part
@@ -61,46 +61,58 @@ for attribute in ('llvm_tools_dir', 'lit_tools_dir'):
 # that can affect FileCheck's output.  It also includes "--order=lexical -j1"
 # to ensure predictable test order, as it is often required for FileCheck
 # matches.
-config.substitutions.append(('%{inputs}', 'Inputs'))
-config.substitutions.append(('%{lit}', '%{lit-no-order-opt} --order=lexical'))
-config.substitutions.append(('%{lit-no-order-opt}',
-    "{env} %{{python}} {lit} -j1".format(
-        env="env -u FILECHECK_OPTS",
-        lit=os.path.join(lit_path, 'lit.py'))))
-config.substitutions.append(('%{python}', '"%s"' % (sys.executable)))
+config.substitutions.append(("%{inputs}", "Inputs"))
+config.substitutions.append(("%{lit}", "%{lit-no-order-opt} --order=lexical"))
+config.substitutions.append(
+    (
+        "%{lit-no-order-opt}",
+        "{env} %{{python}} {lit} -j1".format(
+            env="env -u FILECHECK_OPTS", lit=os.path.join(lit_path, "lit.py")
+        ),
+    )
+)
+config.substitutions.append(("%{python}", '"%s"' % (sys.executable)))
 
 # Enable coverage.py reporting, assuming the coverage module has been installed
 # and sitecustomize.py in the virtualenv has been modified appropriately.
-if lit_config.params.get('check-coverage', None):
-    config.environment['COVERAGE_PROCESS_START'] = os.path.join(
-        os.path.dirname(__file__), ".coveragerc")
+if lit_config.params.get("check-coverage", None):
+    config.environment["COVERAGE_PROCESS_START"] = os.path.join(
+        os.path.dirname(__file__), ".coveragerc"
+    )
 
 # Add a feature to detect if test cancellation is available. Check the ability
 # to do cancellation in the same environment as where RUN commands are run.
 # The reason is that on most systems cancellation depends on psutil being
 # available and RUN commands are run with a cleared PYTHONPATH and user site
 # packages disabled.
-testing_script_path = "/".join((os.path.dirname(__file__),
-                                "check-tested-lit-timeout-ability"))
-proc = subprocess.run([sys.executable, testing_script_path],
-                      stderr=subprocess.PIPE, env=config.environment,
-                      universal_newlines=True)
+testing_script_path = "/".join(
+    (os.path.dirname(__file__), "check-tested-lit-timeout-ability")
+)
+proc = subprocess.run(
+    [sys.executable, testing_script_path],
+    stderr=subprocess.PIPE,
+    env=config.environment,
+    universal_newlines=True,
+)
 if proc.returncode == 0:
     config.available_features.add("lit-max-individual-test-time")
 else:
     errormsg = proc.stderr
-    lit_config.warning('Setting a timeout per test not supported. ' + errormsg
-                       + ' Some tests will be skipped and the --timeout'
-                         ' command line argument will not work.')
+    lit_config.warning(
+        "Setting a timeout per test not supported. "
+        + errormsg
+        + " Some tests will be skipped and the --timeout"
+        " command line argument will not work."
+    )
 
 # When running the lit tests standalone, we want to define the same features
 # that the llvm_config defines. This means that the 'system-windows' feature
 # (and any others) need to match the names in llvm_config for consistency
 if not llvm_config:
-  if sys.platform.startswith('win') or sys.platform.startswith('cygwin'):
-    config.available_features.add('system-windows')
-  if platform.system() == 'AIX':
-    config.available_features.add('system-aix')
+    if sys.platform.startswith("win") or sys.platform.startswith("cygwin"):
+        config.available_features.add("system-windows")
+    if platform.system() == "AIX":
+        config.available_features.add("system-aix")
 
 # For each of lit's internal shell commands ('env', 'cd', '
diff ', etc.), put
 # a fake command that always fails at the start of PATH.  This helps us check
@@ -110,6 +122,7 @@ if not llvm_config:
 # pipeline.  Don't do this for ':' because it doesn't appear to be a valid file
 # name under Windows. Don't do this for 'not' because lit uses the external
 # 'not' throughout a RUN line that calls 'not --crash'.
-test_bin = os.path.join(os.path.dirname(__file__), 'Inputs', 'fake-externals')
-config.environment['PATH'] = os.path.pathsep.join((test_bin,
-                                                   config.environment['PATH']))
+test_bin = os.path.join(os.path.dirname(__file__), "Inputs", "fake-externals")
+config.environment["PATH"] = os.path.pathsep.join(
+    (test_bin, config.environment["PATH"])
+)

diff  --git a/llvm/utils/lit/tests/unit/ShUtil.py b/llvm/utils/lit/tests/unit/ShUtil.py
index 9cf42dbbcefa4..c5f2e3b99ae13 100644
--- a/llvm/utils/lit/tests/unit/ShUtil.py
+++ b/llvm/utils/lit/tests/unit/ShUtil.py
@@ -10,103 +10,133 @@ def lex(self, str, *args, **kwargs):
         return list(ShLexer(str, *args, **kwargs).lex())
 
     def test_basic(self):
-        self.assertEqual(self.lex('a|b>c&d<e;f'),
-                         ['a', ('|',), 'b', ('>',), 'c', ('&',), 'd', 
-                          ('<',), 'e', (';',), 'f'])
+        self.assertEqual(
+            self.lex("a|b>c&d<e;f"),
+            ["a", ("|",), "b", (">",), "c", ("&",), "d", ("<",), "e", (";",), "f"],
+        )
 
     def test_redirection_tokens(self):
-        self.assertEqual(self.lex('a2>c'),
-                         ['a2', ('>',), 'c'])
-        self.assertEqual(self.lex('a 2>c'),
-                         ['a', ('>',2), 'c'])
+        self.assertEqual(self.lex("a2>c"), ["a2", (">",), "c"])
+        self.assertEqual(self.lex("a 2>c"), ["a", (">", 2), "c"])
 
     def test_quoting(self):
-        self.assertEqual(self.lex(""" 'a' """),
-                         ['a'])
-        self.assertEqual(self.lex(""" "hello\\"world" """),
-                         ['hello"world'])
-        self.assertEqual(self.lex(""" "hello\\'world" """),
-                         ["hello\\'world"])
-        self.assertEqual(self.lex(""" "hello\\\\world" """),
-                         ["hello\\world"])
-        self.assertEqual(self.lex(""" he"llo wo"rld """),
-                         ["hello world"])
-        self.assertEqual(self.lex(""" a\\ b a\\\\b """),
-                         ["a b", "a\\b"])
-        self.assertEqual(self.lex(""" "" "" """),
-                         ["", ""])
-        self.assertEqual(self.lex(""" a\\ b """, win32Escapes = True),
-                         ['a\\', 'b'])
+        self.assertEqual(self.lex(""" 'a' """), ["a"])
+        self.assertEqual(self.lex(""" "hello\\"world" """), ['hello"world'])
+        self.assertEqual(self.lex(""" "hello\\'world" """), ["hello\\'world"])
+        self.assertEqual(self.lex(""" "hello\\\\world" """), ["hello\\world"])
+        self.assertEqual(self.lex(""" he"llo wo"rld """), ["hello world"])
+        self.assertEqual(self.lex(""" a\\ b a\\\\b """), ["a b", "a\\b"])
+        self.assertEqual(self.lex(""" "" "" """), ["", ""])
+        self.assertEqual(self.lex(""" a\\ b """, win32Escapes=True), ["a\\", "b"])
+
 
 class TestShParse(unittest.TestCase):
     def parse(self, str):
         return ShParser(str).parse()
 
     def test_basic(self):
-        self.assertEqual(self.parse('echo hello'),
-                         Pipeline([Command(['echo', 'hello'], [])], False))
-        self.assertEqual(self.parse('echo ""'),
-                         Pipeline([Command(['echo', ''], [])], False))
-        self.assertEqual(self.parse("""echo -DFOO='a'"""),
-                         Pipeline([Command(['echo', '-DFOO=a'], [])], False))
-        self.assertEqual(self.parse('echo -DFOO="a"'),
-                         Pipeline([Command(['echo', '-DFOO=a'], [])], False))
+        self.assertEqual(
+            self.parse("echo hello"), Pipeline([Command(["echo", "hello"], [])], False)
+        )
+        self.assertEqual(
+            self.parse('echo ""'), Pipeline([Command(["echo", ""], [])], False)
+        )
+        self.assertEqual(
+            self.parse("""echo -DFOO='a'"""),
+            Pipeline([Command(["echo", "-DFOO=a"], [])], False),
+        )
+        self.assertEqual(
+            self.parse('echo -DFOO="a"'),
+            Pipeline([Command(["echo", "-DFOO=a"], [])], False),
+        )
 
     def test_redirection(self):
-        self.assertEqual(self.parse('echo hello > c'),
-                         Pipeline([Command(['echo', 'hello'], 
-                                           [((('>'),), 'c')])], False))
-        self.assertEqual(self.parse('echo hello > c >> d'),
-                         Pipeline([Command(['echo', 'hello'], [(('>',), 'c'),
-                                                     (('>>',), 'd')])], False))
-        self.assertEqual(self.parse('a 2>&1'),
-                         Pipeline([Command(['a'], [(('>&',2), '1')])], False))
+        self.assertEqual(
+            self.parse("echo hello > c"),
+            Pipeline([Command(["echo", "hello"], [(((">"),), "c")])], False),
+        )
+        self.assertEqual(
+            self.parse("echo hello > c >> d"),
+            Pipeline(
+                [Command(["echo", "hello"], [((">",), "c"), ((">>",), "d")])], False
+            ),
+        )
+        self.assertEqual(
+            self.parse("a 2>&1"), Pipeline([Command(["a"], [((">&", 2), "1")])], False)
+        )
 
     def test_pipeline(self):
-        self.assertEqual(self.parse('a | b'),
-                         Pipeline([Command(['a'], []),
-                                   Command(['b'], [])],
-                                  False))
-
-        self.assertEqual(self.parse('a | b | c'),
-                         Pipeline([Command(['a'], []),
-                                   Command(['b'], []),
-                                   Command(['c'], [])],
-                                  False))
+        self.assertEqual(
+            self.parse("a | b"),
+            Pipeline([Command(["a"], []), Command(["b"], [])], False),
+        )
+
+        self.assertEqual(
+            self.parse("a | b | c"),
+            Pipeline(
+                [Command(["a"], []), Command(["b"], []), Command(["c"], [])], False
+            ),
+        )
 
     def test_list(self):
-        self.assertEqual(self.parse('a ; b'),
-                         Seq(Pipeline([Command(['a'], [])], False),
-                             ';',
-                             Pipeline([Command(['b'], [])], False)))
-
-        self.assertEqual(self.parse('a & b'),
-                         Seq(Pipeline([Command(['a'], [])], False),
-                             '&',
-                             Pipeline([Command(['b'], [])], False)))
-
-        self.assertEqual(self.parse('a && b'),
-                         Seq(Pipeline([Command(['a'], [])], False),
-                             '&&',
-                             Pipeline([Command(['b'], [])], False)))
-
-        self.assertEqual(self.parse('a || b'),
-                         Seq(Pipeline([Command(['a'], [])], False),
-                             '||',
-                             Pipeline([Command(['b'], [])], False)))
-
-        self.assertEqual(self.parse('a && b || c'),
-                         Seq(Seq(Pipeline([Command(['a'], [])], False),
-                                 '&&',
-                                 Pipeline([Command(['b'], [])], False)),
-                             '||',
-                             Pipeline([Command(['c'], [])], False)))
-
-        self.assertEqual(self.parse('a; b'),
-                         Seq(Pipeline([Command(['a'], [])], False),
-                             ';',
-                             Pipeline([Command(['b'], [])], False)))
-
-if __name__ == '__main__':
+        self.assertEqual(
+            self.parse("a ; b"),
+            Seq(
+                Pipeline([Command(["a"], [])], False),
+                ";",
+                Pipeline([Command(["b"], [])], False),
+            ),
+        )
+
+        self.assertEqual(
+            self.parse("a & b"),
+            Seq(
+                Pipeline([Command(["a"], [])], False),
+                "&",
+                Pipeline([Command(["b"], [])], False),
+            ),
+        )
+
+        self.assertEqual(
+            self.parse("a && b"),
+            Seq(
+                Pipeline([Command(["a"], [])], False),
+                "&&",
+                Pipeline([Command(["b"], [])], False),
+            ),
+        )
+
+        self.assertEqual(
+            self.parse("a || b"),
+            Seq(
+                Pipeline([Command(["a"], [])], False),
+                "||",
+                Pipeline([Command(["b"], [])], False),
+            ),
+        )
+
+        self.assertEqual(
+            self.parse("a && b || c"),
+            Seq(
+                Seq(
+                    Pipeline([Command(["a"], [])], False),
+                    "&&",
+                    Pipeline([Command(["b"], [])], False),
+                ),
+                "||",
+                Pipeline([Command(["c"], [])], False),
+            ),
+        )
+
+        self.assertEqual(
+            self.parse("a; b"),
+            Seq(
+                Pipeline([Command(["a"], [])], False),
+                ";",
+                Pipeline([Command(["b"], [])], False),
+            ),
+        )
+
+
+if __name__ == "__main__":
     unittest.main()
-

diff  --git a/llvm/utils/lit/tests/unit/TestRunner.py b/llvm/utils/lit/tests/unit/TestRunner.py
index e8780502f4e72..01e771b1f0ce7 100644
--- a/llvm/utils/lit/tests/unit/TestRunner.py
+++ b/llvm/utils/lit/tests/unit/TestRunner.py
@@ -10,8 +10,11 @@
 import lit.discovery
 import lit.LitConfig
 import lit.Test as Test
-from lit.TestRunner import ParserKind, IntegratedTestKeywordParser, \
-                           parseIntegratedTestScript
+from lit.TestRunner import (
+    ParserKind,
+    IntegratedTestKeywordParser,
+    parseIntegratedTestScript,
+)
 
 
 class TestIntegratedTestKeywordParser(unittest.TestCase):
@@ -24,22 +27,23 @@ def load_keyword_parser_lit_tests():
         TestIntegratedTestKeywordParser
         """
         # Create the global config object.
-        lit_config = lit.LitConfig.LitConfig(progname='lit',
-                                             path=[],
-                                             quiet=False,
-                                             useValgrind=False,
-                                             valgrindLeakCheck=False,
-                                             valgrindArgs=[],
-                                             noExecute=False,
-                                             debug=False,
-                                             isWindows=(
-                                               platform.system() == 'Windows'),
-                                             order='smart',
-                                             params={})
+        lit_config = lit.LitConfig.LitConfig(
+            progname="lit",
+            path=[],
+            quiet=False,
+            useValgrind=False,
+            valgrindLeakCheck=False,
+            valgrindArgs=[],
+            noExecute=False,
+            debug=False,
+            isWindows=(platform.system() == "Windows"),
+            order="smart",
+            params={},
+        )
         TestIntegratedTestKeywordParser.litConfig = lit_config
         # Perform test discovery.
         test_path = os.path.dirname(os.path.dirname(__file__))
-        inputs = [os.path.join(test_path, 'Inputs/testrunner-custom-parsers/')]
+        inputs = [os.path.join(test_path, "Inputs/testrunner-custom-parsers/")]
         assert os.path.isdir(inputs[0])
         tests = lit.discovery.find_tests_for_inputs(lit_config, inputs, False)
         assert len(tests) == 1 and "there should only be one test"
@@ -50,7 +54,7 @@ def make_parsers():
         def custom_parse(line_number, line, output):
             if output is None:
                 output = []
-            output += [part for part in line.split(' ') if part.strip()]
+            output += [part for part in line.split(" ") if part.strip()]
             return output
 
         return [
@@ -60,8 +64,7 @@ def custom_parse(line_number, line, output):
             IntegratedTestKeywordParser("MY_BOOL:", ParserKind.BOOLEAN_EXPR),
             IntegratedTestKeywordParser("MY_INT:", ParserKind.INTEGER),
             IntegratedTestKeywordParser("MY_RUN:", ParserKind.COMMAND),
-            IntegratedTestKeywordParser("MY_CUSTOM:", ParserKind.CUSTOM,
-                                        custom_parse),
+            IntegratedTestKeywordParser("MY_CUSTOM:", ParserKind.CUSTOM, custom_parse),
             IntegratedTestKeywordParser("MY_DEFINE:", ParserKind.DEFINE),
             IntegratedTestKeywordParser("MY_REDEFINE:", ParserKind.REDEFINE),
         ]
@@ -77,7 +80,9 @@ def get_parser(parser_list, keyword):
     def parse_test(parser_list, allow_result=False):
         script = parseIntegratedTestScript(
             TestIntegratedTestKeywordParser.inputTestCase,
-            additional_parsers=parser_list, require_script=False)
+            additional_parsers=parser_list,
+            require_script=False,
+        )
         if isinstance(script, lit.Test.Result):
             assert allow_result
         else:
@@ -88,33 +93,30 @@ def parse_test(parser_list, allow_result=False):
     def test_tags(self):
         parsers = self.make_parsers()
         self.parse_test(parsers)
-        tag_parser = self.get_parser(parsers, 'MY_TAG.')
-        dne_tag_parser = self.get_parser(parsers, 'MY_DNE_TAG.')
+        tag_parser = self.get_parser(parsers, "MY_TAG.")
+        dne_tag_parser = self.get_parser(parsers, "MY_DNE_TAG.")
         self.assertTrue(tag_parser.getValue())
         self.assertFalse(dne_tag_parser.getValue())
 
     def test_lists(self):
         parsers = self.make_parsers()
         self.parse_test(parsers)
-        list_parser = self.get_parser(parsers, 'MY_LIST:')
-        self.assertEqual(list_parser.getValue(),
-                              ['one', 'two', 'three', 'four'])
+        list_parser = self.get_parser(parsers, "MY_LIST:")
+        self.assertEqual(list_parser.getValue(), ["one", "two", "three", "four"])
 
     def test_commands(self):
         parsers = self.make_parsers()
         self.parse_test(parsers)
-        cmd_parser = self.get_parser(parsers, 'MY_RUN:')
+        cmd_parser = self.get_parser(parsers, "MY_RUN:")
         value = cmd_parser.getValue()
         self.assertEqual(len(value), 2)  # there are only two run lines
-        self.assertEqual(value[0].command.strip(),
-                         "%dbg(MY_RUN: at line 4)  baz")
-        self.assertEqual(value[1].command.strip(),
-                         "%dbg(MY_RUN: at line 7)  foo  bar")
+        self.assertEqual(value[0].command.strip(), "%dbg(MY_RUN: at line 4)  baz")
+        self.assertEqual(value[1].command.strip(), "%dbg(MY_RUN: at line 7)  foo  bar")
 
     def test_boolean(self):
         parsers = self.make_parsers()
         self.parse_test(parsers)
-        bool_parser = self.get_parser(parsers, 'MY_BOOL:')
+        bool_parser = self.get_parser(parsers, "MY_BOOL:")
         value = bool_parser.getValue()
         self.assertEqual(len(value), 2)  # there are only two run lines
         self.assertEqual(value[0].strip(), "a && (b)")
@@ -123,7 +125,7 @@ def test_boolean(self):
     def test_integer(self):
         parsers = self.make_parsers()
         self.parse_test(parsers)
-        int_parser = self.get_parser(parsers, 'MY_INT:')
+        int_parser = self.get_parser(parsers, "MY_INT:")
         value = int_parser.getValue()
         self.assertEqual(len(value), 2)  # there are only two MY_INT: lines
         self.assertEqual(type(value[0]), int)
@@ -136,61 +138,66 @@ def test_bad_parser_type(self):
         script = self.parse_test(parsers, allow_result=True)
         self.assertTrue(isinstance(script, lit.Test.Result))
         self.assertEqual(script.code, lit.Test.UNRESOLVED)
-        self.assertEqual('Additional parser must be an instance of '
-                         'IntegratedTestKeywordParser',
-                         script.output)
+        self.assertEqual(
+            "Additional parser must be an instance of " "IntegratedTestKeywordParser",
+            script.output,
+        )
 
     def test_duplicate_keyword(self):
-        parsers = self.make_parsers() + \
-            [IntegratedTestKeywordParser("KEY:", ParserKind.BOOLEAN_EXPR),
-             IntegratedTestKeywordParser("KEY:", ParserKind.BOOLEAN_EXPR)]
+        parsers = self.make_parsers() + [
+            IntegratedTestKeywordParser("KEY:", ParserKind.BOOLEAN_EXPR),
+            IntegratedTestKeywordParser("KEY:", ParserKind.BOOLEAN_EXPR),
+        ]
         script = self.parse_test(parsers, allow_result=True)
         self.assertTrue(isinstance(script, lit.Test.Result))
         self.assertEqual(script.code, lit.Test.UNRESOLVED)
-        self.assertEqual("Parser for keyword 'KEY:' already exists",
-                         script.output)
+        self.assertEqual("Parser for keyword 'KEY:' already exists", script.output)
 
     def test_boolean_unterminated(self):
-        parsers = self.make_parsers() + \
-            [IntegratedTestKeywordParser("MY_BOOL_UNTERMINATED:", ParserKind.BOOLEAN_EXPR)]
+        parsers = self.make_parsers() + [
+            IntegratedTestKeywordParser(
+                "MY_BOOL_UNTERMINATED:", ParserKind.BOOLEAN_EXPR
+            )
+        ]
         script = self.parse_test(parsers, allow_result=True)
         self.assertTrue(isinstance(script, lit.Test.Result))
         self.assertEqual(script.code, lit.Test.UNRESOLVED)
-        self.assertEqual("Test has unterminated 'MY_BOOL_UNTERMINATED:' lines "
-                         "(with '\\')",
-                         script.output)
+        self.assertEqual(
+            "Test has unterminated 'MY_BOOL_UNTERMINATED:' lines " "(with '\\')",
+            script.output,
+        )
 
     def test_custom(self):
         parsers = self.make_parsers()
         self.parse_test(parsers)
-        custom_parser = self.get_parser(parsers, 'MY_CUSTOM:')
+        custom_parser = self.get_parser(parsers, "MY_CUSTOM:")
         value = custom_parser.getValue()
-        self.assertEqual(value, ['a', 'b', 'c'])
+        self.assertEqual(value, ["a", "b", "c"])
 
     def test_defines(self):
         parsers = self.make_parsers()
         self.parse_test(parsers)
-        cmd_parser = self.get_parser(parsers, 'MY_DEFINE:')
+        cmd_parser = self.get_parser(parsers, "MY_DEFINE:")
         value = cmd_parser.getValue()
-        self.assertEqual(len(value), 1) # there's only one MY_DEFINE directive
+        self.assertEqual(len(value), 1)  # there's only one MY_DEFINE directive
         self.assertEqual(value[0].new_subst, True)
-        self.assertEqual(value[0].name, '%{name}')
-        self.assertEqual(value[0].value, 'value one')
+        self.assertEqual(value[0].name, "%{name}")
+        self.assertEqual(value[0].value, "value one")
 
     def test_redefines(self):
         parsers = self.make_parsers()
         self.parse_test(parsers)
-        cmd_parser = self.get_parser(parsers, 'MY_REDEFINE:')
+        cmd_parser = self.get_parser(parsers, "MY_REDEFINE:")
         value = cmd_parser.getValue()
-        self.assertEqual(len(value), 1) # there's only one MY_REDEFINE directive
+        self.assertEqual(len(value), 1)  # there's only one MY_REDEFINE directive
         self.assertEqual(value[0].new_subst, False)
-        self.assertEqual(value[0].name, '%{name}')
-        self.assertEqual(value[0].value, 'value two')
+        self.assertEqual(value[0].name, "%{name}")
+        self.assertEqual(value[0].value, "value two")
 
     def test_bad_keywords(self):
         def custom_parse(line_number, line, output):
             return output
-        
+
         try:
             IntegratedTestKeywordParser("TAG_NO_SUFFIX", ParserKind.TAG),
             self.fail("TAG_NO_SUFFIX failed to raise an exception")
@@ -216,8 +223,9 @@ def custom_parse(line_number, line, output):
             self.fail("LIST_WITH_DOT. raised the wrong exception: %r" % e)
 
         try:
-            IntegratedTestKeywordParser("CUSTOM_NO_SUFFIX",
-                                        ParserKind.CUSTOM, custom_parse),
+            IntegratedTestKeywordParser(
+                "CUSTOM_NO_SUFFIX", ParserKind.CUSTOM, custom_parse
+            ),
             self.fail("CUSTOM_NO_SUFFIX failed to raise an exception")
         except ValueError as e:
             pass
@@ -226,25 +234,27 @@ def custom_parse(line_number, line, output):
 
         # Both '.' and ':' are allowed for CUSTOM keywords.
         try:
-            IntegratedTestKeywordParser("CUSTOM_WITH_DOT.",
-                                        ParserKind.CUSTOM, custom_parse),
+            IntegratedTestKeywordParser(
+                "CUSTOM_WITH_DOT.", ParserKind.CUSTOM, custom_parse
+            ),
         except BaseException as e:
             self.fail("CUSTOM_WITH_DOT. raised an exception: %r" % e)
         try:
-            IntegratedTestKeywordParser("CUSTOM_WITH_COLON:",
-                                        ParserKind.CUSTOM, custom_parse),
+            IntegratedTestKeywordParser(
+                "CUSTOM_WITH_COLON:", ParserKind.CUSTOM, custom_parse
+            ),
         except BaseException as e:
             self.fail("CUSTOM_WITH_COLON: raised an exception: %r" % e)
 
         try:
-            IntegratedTestKeywordParser("CUSTOM_NO_PARSER:",
-                                        ParserKind.CUSTOM),
+            IntegratedTestKeywordParser("CUSTOM_NO_PARSER:", ParserKind.CUSTOM),
             self.fail("CUSTOM_NO_PARSER: failed to raise an exception")
         except ValueError as e:
             pass
         except BaseException as e:
             self.fail("CUSTOM_NO_PARSER: raised the wrong exception: %r" % e)
 
+
 class TestApplySubtitutions(unittest.TestCase):
     def test_simple(self):
         script = ["echo %bar"]
@@ -254,29 +264,38 @@ def test_simple(self):
 
     def test_multiple_substitutions(self):
         script = ["echo %bar %baz"]
-        substitutions = [("%bar", "hello"),
-                         ("%baz", "world"),
-                         ("%useless", "shouldnt expand")]
+        substitutions = [
+            ("%bar", "hello"),
+            ("%baz", "world"),
+            ("%useless", "shouldnt expand"),
+        ]
         result = lit.TestRunner.applySubstitutions(script, substitutions)
         self.assertEqual(result, ["echo hello world"])
 
     def test_multiple_script_lines(self):
-        script = ["%cxx %compile_flags -c -o %t.o",
-                  "%cxx %link_flags %t.o -o %t.exe"]
-        substitutions = [("%cxx", "clang++"),
-                         ("%compile_flags", "-std=c++11 -O3"),
-                         ("%link_flags", "-lc++")]
+        script = ["%cxx %compile_flags -c -o %t.o", "%cxx %link_flags %t.o -o %t.exe"]
+        substitutions = [
+            ("%cxx", "clang++"),
+            ("%compile_flags", "-std=c++11 -O3"),
+            ("%link_flags", "-lc++"),
+        ]
         result = lit.TestRunner.applySubstitutions(script, substitutions)
-        self.assertEqual(result, ["clang++ -std=c++11 -O3 -c -o %t.o",
-                                  "clang++ -lc++ %t.o -o %t.exe"])
+        self.assertEqual(
+            result,
+            ["clang++ -std=c++11 -O3 -c -o %t.o", "clang++ -lc++ %t.o -o %t.exe"],
+        )
 
     def test_recursive_substitution_real(self):
         script = ["%build %s"]
-        substitutions = [("%cxx", "clang++"),
-                         ("%compile_flags", "-std=c++11 -O3"),
-                         ("%link_flags", "-lc++"),
-                         ("%build", "%cxx %compile_flags %link_flags %s -o %t.exe")]
-        result = lit.TestRunner.applySubstitutions(script, substitutions, recursion_limit=3)
+        substitutions = [
+            ("%cxx", "clang++"),
+            ("%compile_flags", "-std=c++11 -O3"),
+            ("%link_flags", "-lc++"),
+            ("%build", "%cxx %compile_flags %link_flags %s -o %t.exe"),
+        ]
+        result = lit.TestRunner.applySubstitutions(
+            script, substitutions, recursion_limit=3
+        )
         self.assertEqual(result, ["clang++ -std=c++11 -O3 -lc++ %s -o %t.exe %s"])
 
     def test_recursive_substitution_limit(self):
@@ -284,35 +303,56 @@ def test_recursive_substitution_limit(self):
         # Make sure the substitutions are not in an order where the global
         # substitution would appear to be recursive just because they are
         # processed in the right order.
-        substitutions = [("%rec1", "STOP"), ("%rec2", "%rec1"),
-                         ("%rec3", "%rec2"), ("%rec4", "%rec3"), ("%rec5", "%rec4")]
+        substitutions = [
+            ("%rec1", "STOP"),
+            ("%rec2", "%rec1"),
+            ("%rec3", "%rec2"),
+            ("%rec4", "%rec3"),
+            ("%rec5", "%rec4"),
+        ]
         for limit in [5, 6, 7]:
-            result = lit.TestRunner.applySubstitutions(script, substitutions, recursion_limit=limit)
+            result = lit.TestRunner.applySubstitutions(
+                script, substitutions, recursion_limit=limit
+            )
             self.assertEqual(result, ["STOP"])
 
     def test_recursive_substitution_limit_exceeded(self):
         script = ["%rec5"]
-        substitutions = [("%rec1", "STOP"), ("%rec2", "%rec1"),
-                         ("%rec3", "%rec2"), ("%rec4", "%rec3"), ("%rec5", "%rec4")]
+        substitutions = [
+            ("%rec1", "STOP"),
+            ("%rec2", "%rec1"),
+            ("%rec3", "%rec2"),
+            ("%rec4", "%rec3"),
+            ("%rec5", "%rec4"),
+        ]
         for limit in [0, 1, 2, 3, 4]:
             try:
-                lit.TestRunner.applySubstitutions(script, substitutions, recursion_limit=limit)
+                lit.TestRunner.applySubstitutions(
+                    script, substitutions, recursion_limit=limit
+                )
                 self.fail("applySubstitutions should have raised an exception")
             except ValueError:
                 pass
 
     def test_recursive_substitution_invalid_value(self):
         script = ["%rec5"]
-        substitutions = [("%rec1", "STOP"), ("%rec2", "%rec1"),
-                         ("%rec3", "%rec2"), ("%rec4", "%rec3"), ("%rec5", "%rec4")]
+        substitutions = [
+            ("%rec1", "STOP"),
+            ("%rec2", "%rec1"),
+            ("%rec3", "%rec2"),
+            ("%rec4", "%rec3"),
+            ("%rec5", "%rec4"),
+        ]
         for limit in [-1, -2, -3, "foo"]:
             try:
-                lit.TestRunner.applySubstitutions(script, substitutions, recursion_limit=limit)
+                lit.TestRunner.applySubstitutions(
+                    script, substitutions, recursion_limit=limit
+                )
                 self.fail("applySubstitutions should have raised an exception")
             except AssertionError:
                 pass
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     TestIntegratedTestKeywordParser.load_keyword_parser_lit_tests()
     unittest.main(verbosity=2)

diff  --git a/llvm/utils/lit/tests/unparsed-requirements.py b/llvm/utils/lit/tests/unparsed-requirements.py
index 6f0985841181f..48cd37f8e65df 100644
--- a/llvm/utils/lit/tests/unparsed-requirements.py
+++ b/llvm/utils/lit/tests/unparsed-requirements.py
@@ -5,7 +5,20 @@
 from lit.TestRunner import parseIntegratedTestScript
 from lit.TestingConfig import TestingConfig
 
-config = TestingConfig(None, "config", [".txt"], None, [], [], False, sys.argv[1], sys.argv[1], [], [], True)
+config = TestingConfig(
+    None,
+    "config",
+    [".txt"],
+    None,
+    [],
+    [],
+    False,
+    sys.argv[1],
+    sys.argv[1],
+    [],
+    [],
+    True,
+)
 suite = TestSuite("suite", sys.argv[1], sys.argv[1], config)
 
 test = Test(suite, ["test.py"], config)

diff  --git a/llvm/utils/lldbDataFormatters.py b/llvm/utils/lldbDataFormatters.py
index bdcd1c931ba82..df62ed0884ae4 100644
--- a/llvm/utils/lldbDataFormatters.py
+++ b/llvm/utils/lldbDataFormatters.py
@@ -7,78 +7,106 @@
 import lldb
 import json
 
+
 def __lldb_init_module(debugger, internal_dict):
-    debugger.HandleCommand('type category define -e llvm -l c++')
-    debugger.HandleCommand('type synthetic add -w llvm '
-                           '-l lldbDataFormatters.SmallVectorSynthProvider '
-                           '-x "^llvm::SmallVectorImpl<.+>$"')
-    debugger.HandleCommand('type summary add -w llvm '
-                           '-e -s "size=${svar%#}" '
-                           '-x "^llvm::SmallVectorImpl<.+>$"')
-    debugger.HandleCommand('type synthetic add -w llvm '
-                           '-l lldbDataFormatters.SmallVectorSynthProvider '
-                           '-x "^llvm::SmallVector<.+,.+>$"')
-    debugger.HandleCommand('type summary add -w llvm '
-                           '-e -s "size=${svar%#}" '
-                           '-x "^llvm::SmallVector<.+,.+>$"')
-    debugger.HandleCommand('type synthetic add -w llvm '
-                           '-l lldbDataFormatters.ArrayRefSynthProvider '
-                           '-x "^llvm::ArrayRef<.+>$"')
-    debugger.HandleCommand('type summary add -w llvm '
-                           '-e -s "size=${svar%#}" '
-                           '-x "^llvm::ArrayRef<.+>$"')
-    debugger.HandleCommand('type synthetic add -w llvm '
-                           '-l lldbDataFormatters.OptionalSynthProvider '
-                           '-x "^llvm::Optional<.+>$"')
-    debugger.HandleCommand('type summary add -w llvm '
-                           '-e -F lldbDataFormatters.OptionalSummaryProvider '
-                           '-x "^llvm::Optional<.+>$"')
-    debugger.HandleCommand('type summary add -w llvm '
-                           '-F lldbDataFormatters.SmallStringSummaryProvider '
-                           '-x "^llvm::SmallString<.+>$"')
-    debugger.HandleCommand('type summary add -w llvm '
-                           '-F lldbDataFormatters.StringRefSummaryProvider '
-                           '-x "^llvm::StringRef$"')
-    debugger.HandleCommand('type summary add -w llvm '
-                           '-F lldbDataFormatters.ConstStringSummaryProvider '
-                           '-x "^lldb_private::ConstString$"')
-    debugger.HandleCommand('type synthetic add -w llvm '
-                           '-l lldbDataFormatters.PointerIntPairSynthProvider '
-                           '-x "^llvm::PointerIntPair<.+>$"')
-    debugger.HandleCommand('type synthetic add -w llvm '
-                           '-l lldbDataFormatters.PointerUnionSynthProvider '
-                           '-x "^llvm::PointerUnion<.+>$"')
+    debugger.HandleCommand("type category define -e llvm -l c++")
+    debugger.HandleCommand(
+        "type synthetic add -w llvm "
+        "-l lldbDataFormatters.SmallVectorSynthProvider "
+        '-x "^llvm::SmallVectorImpl<.+>$"'
+    )
+    debugger.HandleCommand(
+        "type summary add -w llvm "
+        '-e -s "size=${svar%#}" '
+        '-x "^llvm::SmallVectorImpl<.+>$"'
+    )
+    debugger.HandleCommand(
+        "type synthetic add -w llvm "
+        "-l lldbDataFormatters.SmallVectorSynthProvider "
+        '-x "^llvm::SmallVector<.+,.+>$"'
+    )
+    debugger.HandleCommand(
+        "type summary add -w llvm "
+        '-e -s "size=${svar%#}" '
+        '-x "^llvm::SmallVector<.+,.+>$"'
+    )
+    debugger.HandleCommand(
+        "type synthetic add -w llvm "
+        "-l lldbDataFormatters.ArrayRefSynthProvider "
+        '-x "^llvm::ArrayRef<.+>$"'
+    )
+    debugger.HandleCommand(
+        "type summary add -w llvm "
+        '-e -s "size=${svar%#}" '
+        '-x "^llvm::ArrayRef<.+>$"'
+    )
+    debugger.HandleCommand(
+        "type synthetic add -w llvm "
+        "-l lldbDataFormatters.OptionalSynthProvider "
+        '-x "^llvm::Optional<.+>$"'
+    )
+    debugger.HandleCommand(
+        "type summary add -w llvm "
+        "-e -F lldbDataFormatters.OptionalSummaryProvider "
+        '-x "^llvm::Optional<.+>$"'
+    )
+    debugger.HandleCommand(
+        "type summary add -w llvm "
+        "-F lldbDataFormatters.SmallStringSummaryProvider "
+        '-x "^llvm::SmallString<.+>$"'
+    )
+    debugger.HandleCommand(
+        "type summary add -w llvm "
+        "-F lldbDataFormatters.StringRefSummaryProvider "
+        '-x "^llvm::StringRef$"'
+    )
+    debugger.HandleCommand(
+        "type summary add -w llvm "
+        "-F lldbDataFormatters.ConstStringSummaryProvider "
+        '-x "^lldb_private::ConstString$"'
+    )
+    debugger.HandleCommand(
+        "type synthetic add -w llvm "
+        "-l lldbDataFormatters.PointerIntPairSynthProvider "
+        '-x "^llvm::PointerIntPair<.+>$"'
+    )
+    debugger.HandleCommand(
+        "type synthetic add -w llvm "
+        "-l lldbDataFormatters.PointerUnionSynthProvider "
+        '-x "^llvm::PointerUnion<.+>$"'
+    )
 
 
 # Pretty printer for llvm::SmallVector/llvm::SmallVectorImpl
 class SmallVectorSynthProvider:
     def __init__(self, valobj, internal_dict):
-        self.valobj = valobj;
-        self.update() # initialize this provider
+        self.valobj = valobj
+        self.update()  # initialize this provider
 
     def num_children(self):
         return self.size.GetValueAsUnsigned(0)
 
     def get_child_index(self, name):
         try:
-            return int(name.lstrip('[').rstrip(']'))
+            return int(name.lstrip("[").rstrip("]"))
         except:
-            return -1;
+            return -1
 
     def get_child_at_index(self, index):
         # Do bounds checking.
         if index < 0:
             return None
         if index >= self.num_children():
-            return None;
+            return None
 
         offset = index * self.type_size
-        return self.begin.CreateChildAtOffset('['+str(index)+']',
-                                              offset, self.data_type)
+        return self.begin.CreateChildAtOffset(
+            "[" + str(index) + "]", offset, self.data_type
+        )
 
     def update(self):
-        self.begin = self.valobj.GetChildMemberWithName('BeginX')
-        self.size = self.valobj.GetChildMemberWithName('Size')
+        self.begin = self.valobj.GetChildMemberWithName("BeginX")
+        self.size = self.valobj.GetChildMemberWithName("Size")
         the_type = self.valobj.GetType()
         # If this is a reference type we have to dereference it to get to the
         # template parameter.
@@ -89,63 +117,70 @@ def update(self):
         self.type_size = self.data_type.GetByteSize()
         assert self.type_size != 0
 
+
 class ArrayRefSynthProvider:
-    """ Provider for llvm::ArrayRef """
+    """Provider for llvm::ArrayRef"""
+
     def __init__(self, valobj, internal_dict):
-        self.valobj = valobj;
-        self.update() # initialize this provider
+        self.valobj = valobj
+        self.update()  # initialize this provider
 
     def num_children(self):
         return self.length
 
     def get_child_index(self, name):
         try:
-            return int(name.lstrip('[').rstrip(']'))
+            return int(name.lstrip("[").rstrip("]"))
         except:
-            return -1;
+            return -1
 
     def get_child_at_index(self, index):
         if index < 0 or index >= self.num_children():
-            return None;
+            return None
         offset = index * self.type_size
-        return self.data.CreateChildAtOffset('[' + str(index) + ']',
-                                             offset, self.data_type)
+        return self.data.CreateChildAtOffset(
+            "[" + str(index) + "]", offset, self.data_type
+        )
 
     def update(self):
-        self.data = self.valobj.GetChildMemberWithName('Data')
-        length_obj = self.valobj.GetChildMemberWithName('Length')
+        self.data = self.valobj.GetChildMemberWithName("Data")
+        length_obj = self.valobj.GetChildMemberWithName("Length")
         self.length = length_obj.GetValueAsUnsigned(0)
         self.data_type = self.data.GetType().GetPointeeType()
         self.type_size = self.data_type.GetByteSize()
         assert self.type_size != 0
 
+
 def GetOptionalValue(valobj):
-    storage = valobj.GetChildMemberWithName('Storage')
+    storage = valobj.GetChildMemberWithName("Storage")
     if not storage:
         storage = valobj
 
     failure = 2
-    hasVal = storage.GetChildMemberWithName('hasVal').GetValueAsUnsigned(failure)
+    hasVal = storage.GetChildMemberWithName("hasVal").GetValueAsUnsigned(failure)
     if hasVal == failure:
-        return '<could not read llvm::Optional>'
+        return "<could not read llvm::Optional>"
 
     if hasVal == 0:
         return None
 
     underlying_type = storage.GetType().GetTemplateArgumentType(0)
-    storage = storage.GetChildMemberWithName('value')
+    storage = storage.GetChildMemberWithName("value")
     return storage.Cast(underlying_type)
 
+
 def OptionalSummaryProvider(valobj, internal_dict):
     val = GetOptionalValue(valobj)
     if val is None:
-        return 'None'
+        return "None"
     if val.summary:
         return val.summary
-    return ''
+    return ""
+
 
 class OptionalSynthProvider:
     """Provides deref support to llvm::Optional<T>"""
+
     def __init__(self, valobj, internal_dict):
         self.valobj = valobj
 
@@ -153,7 +188,7 @@ def num_children(self):
         return self.valobj.num_children
 
     def get_child_index(self, name):
-        if name == '$$dereference$$':
+        if name == "$$dereference$$":
             return self.valobj.num_children
         return self.valobj.GetIndexOfChildWithName(name)
 
@@ -162,14 +197,15 @@ def get_child_at_index(self, index):
             return self.valobj.GetChildAtIndex(index)
         return GetOptionalValue(self.valobj) or lldb.SBValue()
 
+
 def SmallStringSummaryProvider(valobj, internal_dict):
     num_elements = valobj.GetNumChildren()
-    res = "\""
+    res = '"'
     for i in range(0, num_elements):
         c = valobj.GetChildAtIndex(i).GetValue()
         if c:
             res += c.strip("'")
-    res += "\""
+    res += '"'
     return res
 
 
@@ -220,18 +256,22 @@ def num_children(self):
         return 2
 
     def get_child_index(self, name):
-        if name == 'Pointer':
+        if name == "Pointer":
             return 0
-        if name == 'Int':
+        if name == "Int":
             return 1
         return None
 
     def get_child_at_index(self, index):
         expr_path = get_expression_path(self.valobj)
         if index == 0:
-            return self.valobj.CreateValueFromExpression('Pointer', f'({self.pointer_ty.name}){expr_path}.getPointer()')
+            return self.valobj.CreateValueFromExpression(
+                "Pointer", f"({self.pointer_ty.name}){expr_path}.getPointer()"
+            )
         if index == 1:
-            return self.valobj.CreateValueFromExpression('Int', f'({self.int_ty.name}){expr_path}.getInt()')
+            return self.valobj.CreateValueFromExpression(
+                "Int", f"({self.int_ty.name}){expr_path}.getInt()"
+            )
         return None
 
     def update(self):
@@ -244,8 +284,8 @@ def parse_template_parameters(typename):
     LLDB doesn't support template parameter packs, so let's parse them manually.
     """
     result = []
-    start = typename.find('<')
-    end = typename.rfind('>')
+    start = typename.find("<")
+    end = typename.rfind(">")
     if start < 1 or end < 2 or end - start < 2:
         return result
 
@@ -254,11 +294,11 @@ def parse_template_parameters(typename):
 
     for i in range(start + 1, end + 1):
         c = typename[i]
-        if c == '<':
+        if c == "<":
             nesting_level += 1
-        elif c == '>':
+        elif c == ">":
             nesting_level -= 1
-        elif c == ',' and nesting_level == 0:
+        elif c == "," and nesting_level == 0:
             result.append(typename[current_parameter_start:i].strip())
             current_parameter_start = i + 1
 
@@ -276,7 +316,7 @@ def num_children(self):
         return 1
 
     def get_child_index(self, name):
-        if name == 'Ptr':
+        if name == "Ptr":
             return 0
         return None
 
@@ -284,10 +324,16 @@ def get_child_at_index(self, index):
         if index != 0:
             return None
         ptr_type_name = self.template_args[self.active_type_tag]
-        return self.valobj.CreateValueFromExpression('Ptr', f'({ptr_type_name}){self.val_expr_path}.getPointer()')
+        return self.valobj.CreateValueFromExpression(
+            "Ptr", f"({ptr_type_name}){self.val_expr_path}.getPointer()"
+        )
 
     def update(self):
-        self.pointer_int_pair = self.valobj.GetChildMemberWithName('Val')
-        self.val_expr_path = get_expression_path(self.valobj.GetChildMemberWithName('Val'))
-        self.active_type_tag = self.valobj.CreateValueFromExpression('', f'(int){self.val_expr_path}.getInt()').GetValueAsSigned()
+        self.pointer_int_pair = self.valobj.GetChildMemberWithName("Val")
+        self.val_expr_path = get_expression_path(
+            self.valobj.GetChildMemberWithName("Val")
+        )
+        self.active_type_tag = self.valobj.CreateValueFromExpression(
+            "", f"(int){self.val_expr_path}.getInt()"
+        ).GetValueAsSigned()
         self.template_args = parse_template_parameters(self.valobj.GetType().name)

diff  --git a/llvm/utils/llvm-gisel-cov.py b/llvm/utils/llvm-gisel-cov.py
index 88b19455ac393..9d9526e0558a5 100755
--- a/llvm/utils/llvm-gisel-cov.py
+++ b/llvm/utils/llvm-gisel-cov.py
@@ -10,59 +10,73 @@
 import argparse
 import struct
 
+
 class FileFormatError(Exception):
-  pass
+    pass
+
 
 def backend_int_pair(s):
-  backend, sep, value = s.partition('=')
-  if sep is None:
-    raise argparse.ArgumentTypeError("'=' missing, expected name=value")
-  if not backend:
-    raise argparse.ArgumentTypeError("Expected name=value")
-  if not value:
-    raise argparse.ArgumentTypeError("Expected name=value")
-  return backend, int(value)
+    backend, sep, value = s.partition("=")
+    if sep is None:
+        raise argparse.ArgumentTypeError("'=' missing, expected name=value")
+    if not backend:
+        raise argparse.ArgumentTypeError("Expected name=value")
+    if not value:
+        raise argparse.ArgumentTypeError("Expected name=value")
+    return backend, int(value)
+
 
 def main():
-  parser = argparse.ArgumentParser(description=__doc__)
-  parser.add_argument('input', nargs='+')
-  parser.add_argument('--num-rules', type=backend_int_pair, action='append',
-                      metavar='BACKEND=NUM',
-                      help='Specify the number of rules for a backend')
-  args = parser.parse_args()
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument("input", nargs="+")
+    parser.add_argument(
+        "--num-rules",
+        type=backend_int_pair,
+        action="append",
+        metavar="BACKEND=NUM",
+        help="Specify the number of rules for a backend",
+    )
+    args = parser.parse_args()
+
+    covered_rules = {}
 
-  covered_rules = {}
+    for input_filename in args.input:
+        with open(input_filename, "rb") as input_fh:
+            data = input_fh.read()
+            pos = 0
+            while data:
+                backend, _, data = data.partition("\0")
+                pos += len(backend)
+                pos += 1
 
-  for input_filename in args.input:
-    with open(input_filename, 'rb') as input_fh:
-      data = input_fh.read()
-      pos = 0
-      while data:
-        backend, _, data = data.partition('\0')
-        pos += len(backend)
-        pos += 1
+                if len(backend) == 0:
+                    raise FileFormatError()
+                (backend,) = struct.unpack("%ds" % len(backend), backend)
 
-        if len(backend) == 0:
-          raise FileFormatError()
-        backend, = struct.unpack("%ds" % len(backend), backend)
+                while data:
+                    if len(data) < 8:
+                        raise FileFormatError()
+                    (rule_id,) = struct.unpack("Q", data[:8])
+                    pos += 8
+                    data = data[8:]
+                    if rule_id == (2**64) - 1:
+                        break
+                    covered_rules[backend] = covered_rules.get(backend, {})
+                    covered_rules[backend][rule_id] = (
+                        covered_rules[backend].get(rule_id, 0) + 1
+                    )
 
-        while data:
-          if len(data) < 8:
-            raise FileFormatError()
-          rule_id, = struct.unpack("Q", data[:8])
-          pos += 8
-          data = data[8:]
-          if rule_id == (2 ** 64) - 1:
-            break
-          covered_rules[backend] = covered_rules.get(backend, {})
-          covered_rules[backend][rule_id] = covered_rules[backend].get(rule_id, 0) + 1
+    num_rules = dict(args.num_rules)
+    for backend, rules_for_backend in covered_rules.items():
+        if backend in num_rules:
+            print(
+                "%s: %3.2f%% of rules covered"
+                % (backend, float(len(rules_for_backend)) / num_rules[backend])
+                * 100
+            )
+        else:
+            print("%s: %d rules covered" % (backend, len(rules_for_backend)))
 
-  num_rules = dict(args.num_rules)
-  for backend, rules_for_backend in covered_rules.items():
-    if backend in num_rules:
-      print("%s: %3.2f%% of rules covered" % (backend, float(len(rules_for_backend)) / num_rules[backend]) * 100)
-    else:
-      print("%s: %d rules covered" % (backend, len(rules_for_backend)))
 
-if __name__ == '__main__':
-  main()
+if __name__ == "__main__":
+    main()

diff  --git a/llvm/utils/llvm-locstats/llvm-locstats.py b/llvm/utils/llvm-locstats/llvm-locstats.py
index e86cf13df8c1d..454488064106e 100755
--- a/llvm/utils/llvm-locstats/llvm-locstats.py
+++ b/llvm/utils/llvm-locstats/llvm-locstats.py
@@ -19,372 +19,476 @@
 
 # Initialize the plot.
 def init_plot(plt):
-  plt.title('Debug Location Statistics', fontweight='bold')
-  plt.xlabel('location buckets')
-  plt.ylabel('number of variables in the location buckets')
-  plt.xticks(rotation=45, fontsize='x-small')
-  plt.yticks()
+    plt.title("Debug Location Statistics", fontweight="bold")
+    plt.xlabel("location buckets")
+    plt.ylabel("number of variables in the location buckets")
+    plt.xticks(rotation=45, fontsize="x-small")
+    plt.yticks()
+
 
 # Finalize the plot.
 def finish_plot(plt):
-  plt.legend()
-  plt.grid(color='grey', which='major', axis='y', linestyle='-', linewidth=0.3)
-  plt.savefig('locstats.png')
-  print('The plot was saved within "locstats.png".')
+    plt.legend()
+    plt.grid(color="grey", which="major", axis="y", linestyle="-", linewidth=0.3)
+    plt.savefig("locstats.png")
+    print('The plot was saved within "locstats.png".')
+
 
 # Holds the debug location statistics.
 class LocationStats:
-  def __init__(self, file_name, variables_total, variables_total_locstats,
-    variables_with_loc, variables_scope_bytes_covered, variables_scope_bytes,
-    variables_coverage_map):
-    self.file_name = file_name
-    self.variables_total = variables_total
-    self.variables_total_locstats = variables_total_locstats
-    self.variables_with_loc = variables_with_loc
-    self.scope_bytes_covered = variables_scope_bytes_covered
-    self.scope_bytes = variables_scope_bytes
-    self.variables_coverage_map = variables_coverage_map
-
-  # Get the PC ranges coverage.
-  def get_pc_coverage(self):
-    if self.scope_bytes_covered == TAINT_VALUE or \
-       self.scope_bytes == TAINT_VALUE:
-      return TAINT_VALUE
-    pc_ranges_covered = int(ceil(self.scope_bytes_covered * 100.0) \
-                / self.scope_bytes)
-    return pc_ranges_covered
-
-  # Pretty print the debug location buckets.
-  def pretty_print(self):
-    if self.scope_bytes == 0:
-      print ('No scope bytes found.')
-      return -1
-
-    pc_ranges_covered = self.get_pc_coverage()
-    variables_coverage_per_map = {}
-    for cov_bucket in coverage_buckets():
-      variables_coverage_per_map[cov_bucket] = None
-      if self.variables_coverage_map[cov_bucket] == TAINT_VALUE or \
-         self.variables_total_locstats == TAINT_VALUE:
-        variables_coverage_per_map[cov_bucket] = TAINT_VALUE
-      else:
-        variables_coverage_per_map[cov_bucket] = \
-          int(ceil(self.variables_coverage_map[cov_bucket] * 100.0) \
-                   / self.variables_total_locstats)
-
-    print (' =================================================')
-    print ('            Debug Location Statistics       ')
-    print (' =================================================')
-    print ('     cov%           samples         percentage(~)  ')
-    print (' -------------------------------------------------')
-    for cov_bucket in coverage_buckets():
-      if self.variables_coverage_map[cov_bucket] or \
-         self.variables_total_locstats == TAINT_VALUE:
-        print ('   {0:10}     {1:8}              {2:3}%'. \
-          format(cov_bucket, self.variables_coverage_map[cov_bucket], \
-                 variables_coverage_per_map[cov_bucket]))
-      else:
-        print ('   {0:10}     {1:8d}              {2:3d}%'. \
-          format(cov_bucket, self.variables_coverage_map[cov_bucket], \
-                 variables_coverage_per_map[cov_bucket]))
-    print (' =================================================')
-    print (' -the number of debug variables processed: ' \
-      + str(self.variables_total_locstats))
-    print (' -PC ranges covered: ' + str(pc_ranges_covered) + '%')
-
-    # Only if we are processing all the variables output the total
-    # availability.
-    if self.variables_total and self.variables_with_loc:
-      total_availability = None
-      if self.variables_total == TAINT_VALUE or \
-         self.variables_with_loc == TAINT_VALUE:
-        total_availability = TAINT_VALUE
-      else:
-        total_availability = int(ceil(self.variables_with_loc * 100.0) \
-                                      / self.variables_total)
-      print (' -------------------------------------------------')
-      print (' -total availability: ' + str(total_availability) + '%')
-    print (' =================================================')
-
-    return 0
-
-  # Draw a plot representing the location buckets.
-  def draw_plot(self):
-    from matplotlib import pyplot as plt
-
-    buckets = range(len(self.variables_coverage_map))
-    plt.figure(figsize=(12, 8))
-    init_plot(plt)
-    plt.bar(buckets, self.variables_coverage_map.values(), align='center',
+    def __init__(
+        self,
+        file_name,
+        variables_total,
+        variables_total_locstats,
+        variables_with_loc,
+        variables_scope_bytes_covered,
+        variables_scope_bytes,
+        variables_coverage_map,
+    ):
+        self.file_name = file_name
+        self.variables_total = variables_total
+        self.variables_total_locstats = variables_total_locstats
+        self.variables_with_loc = variables_with_loc
+        self.scope_bytes_covered = variables_scope_bytes_covered
+        self.scope_bytes = variables_scope_bytes
+        self.variables_coverage_map = variables_coverage_map
+
+    # Get the PC ranges coverage.
+    def get_pc_coverage(self):
+        if self.scope_bytes_covered == TAINT_VALUE or self.scope_bytes == TAINT_VALUE:
+            return TAINT_VALUE
+        pc_ranges_covered = int(
+            ceil(self.scope_bytes_covered * 100.0) / self.scope_bytes
+        )
+        return pc_ranges_covered
+
+    # Pretty print the debug location buckets.
+    def pretty_print(self):
+        if self.scope_bytes == 0:
+            print("No scope bytes found.")
+            return -1
+
+        pc_ranges_covered = self.get_pc_coverage()
+        variables_coverage_per_map = {}
+        for cov_bucket in coverage_buckets():
+            variables_coverage_per_map[cov_bucket] = None
+            if (
+                self.variables_coverage_map[cov_bucket] == TAINT_VALUE
+                or self.variables_total_locstats == TAINT_VALUE
+            ):
+                variables_coverage_per_map[cov_bucket] = TAINT_VALUE
+            else:
+                variables_coverage_per_map[cov_bucket] = int(
+                    ceil(self.variables_coverage_map[cov_bucket] * 100.0)
+                    / self.variables_total_locstats
+                )
+
+        print(" =================================================")
+        print("            Debug Location Statistics       ")
+        print(" =================================================")
+        print("     cov%           samples         percentage(~)  ")
+        print(" -------------------------------------------------")
+        for cov_bucket in coverage_buckets():
+            if (
+                self.variables_coverage_map[cov_bucket]
+                or self.variables_total_locstats == TAINT_VALUE
+            ):
+                print(
+                    "   {0:10}     {1:8}              {2:3}%".format(
+                        cov_bucket,
+                        self.variables_coverage_map[cov_bucket],
+                        variables_coverage_per_map[cov_bucket],
+                    )
+                )
+            else:
+                print(
+                    "   {0:10}     {1:8d}              {2:3d}%".format(
+                        cov_bucket,
+                        self.variables_coverage_map[cov_bucket],
+                        variables_coverage_per_map[cov_bucket],
+                    )
+                )
+        print(" =================================================")
+        print(
+            " -the number of debug variables processed: "
+            + str(self.variables_total_locstats)
+        )
+        print(" -PC ranges covered: " + str(pc_ranges_covered) + "%")
+
+        # Only if we are processing all the variables output the total
+        # availability.
+        if self.variables_total and self.variables_with_loc:
+            total_availability = None
+            if (
+                self.variables_total == TAINT_VALUE
+                or self.variables_with_loc == TAINT_VALUE
+            ):
+                total_availability = TAINT_VALUE
+            else:
+                total_availability = int(
+                    ceil(self.variables_with_loc * 100.0) / self.variables_total
+                )
+            print(" -------------------------------------------------")
+            print(" -total availability: " + str(total_availability) + "%")
+        print(" =================================================")
+
+        return 0
+
+    # Draw a plot representing the location buckets.
+    def draw_plot(self):
+        from matplotlib import pyplot as plt
+
+        buckets = range(len(self.variables_coverage_map))
+        plt.figure(figsize=(12, 8))
+        init_plot(plt)
+        plt.bar(
+            buckets,
+            self.variables_coverage_map.values(),
+            align="center",
             tick_label=self.variables_coverage_map.keys(),
-            label='variables of {}'.format(self.file_name))
-
-    # Place the text box with the coverage info.
-    pc_ranges_covered = self.get_pc_coverage()
-    props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
-    plt.text(0.02, 0.90, 'PC ranges covered: {}%'.format(pc_ranges_covered),
-             transform=plt.gca().transAxes, fontsize=12,
-             verticalalignment='top', bbox=props)
-
-    finish_plot(plt)
-
-  # Compare the two LocationStats objects and draw a plot showing
-  # the 
diff erence.
-  def draw_location_
diff (self, locstats_to_compare):
-    from matplotlib import pyplot as plt
-
-    pc_ranges_covered = self.get_pc_coverage()
-    pc_ranges_covered_to_compare = locstats_to_compare.get_pc_coverage()
-
-    buckets = range(len(self.variables_coverage_map))
-    buckets_to_compare = range(len(locstats_to_compare.variables_coverage_map))
-
-    fig = plt.figure(figsize=(12, 8))
-    ax = fig.add_subplot(111)
-    init_plot(plt)
-
-    comparison_keys = list(coverage_buckets())
-    ax.bar(buckets, self.variables_coverage_map.values(), align='edge',
-           width=0.4,
-           label='variables of {}'.format(self.file_name))
-    ax.bar(buckets_to_compare,
-           locstats_to_compare.variables_coverage_map.values(),
-           color='r', align='edge', width=-0.4,
-           label='variables of {}'.format(locstats_to_compare.file_name))
-    ax.set_xticks(range(len(comparison_keys)))
-    ax.set_xticklabels(comparison_keys)
-
-    props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
-    plt.text(0.02, 0.88,
-             '{} PC ranges covered: {}%'. \
-             format(self.file_name, pc_ranges_covered),
-             transform=plt.gca().transAxes, fontsize=12,
-             verticalalignment='top', bbox=props)
-    plt.text(0.02, 0.83,
-             '{} PC ranges covered: {}%'. \
-             format(locstats_to_compare.file_name,
-                    pc_ranges_covered_to_compare),
-             transform=plt.gca().transAxes, fontsize=12,
-             verticalalignment='top', bbox=props)
-
-    finish_plot(plt)
+            label="variables of {}".format(self.file_name),
+        )
+
+        # Place the text box with the coverage info.
+        pc_ranges_covered = self.get_pc_coverage()
+        props = dict(boxstyle="round", facecolor="wheat", alpha=0.5)
+        plt.text(
+            0.02,
+            0.90,
+            "PC ranges covered: {}%".format(pc_ranges_covered),
+            transform=plt.gca().transAxes,
+            fontsize=12,
+            verticalalignment="top",
+            bbox=props,
+        )
+
+        finish_plot(plt)
+
+    # Compare the two LocationStats objects and draw a plot showing
+    # the 
diff erence.
+    def draw_location_
diff (self, locstats_to_compare):
+        from matplotlib import pyplot as plt
+
+        pc_ranges_covered = self.get_pc_coverage()
+        pc_ranges_covered_to_compare = locstats_to_compare.get_pc_coverage()
+
+        buckets = range(len(self.variables_coverage_map))
+        buckets_to_compare = range(len(locstats_to_compare.variables_coverage_map))
+
+        fig = plt.figure(figsize=(12, 8))
+        ax = fig.add_subplot(111)
+        init_plot(plt)
+
+        comparison_keys = list(coverage_buckets())
+        ax.bar(
+            buckets,
+            self.variables_coverage_map.values(),
+            align="edge",
+            width=0.4,
+            label="variables of {}".format(self.file_name),
+        )
+        ax.bar(
+            buckets_to_compare,
+            locstats_to_compare.variables_coverage_map.values(),
+            color="r",
+            align="edge",
+            width=-0.4,
+            label="variables of {}".format(locstats_to_compare.file_name),
+        )
+        ax.set_xticks(range(len(comparison_keys)))
+        ax.set_xticklabels(comparison_keys)
+
+        props = dict(boxstyle="round", facecolor="wheat", alpha=0.5)
+        plt.text(
+            0.02,
+            0.88,
+            "{} PC ranges covered: {}%".format(self.file_name, pc_ranges_covered),
+            transform=plt.gca().transAxes,
+            fontsize=12,
+            verticalalignment="top",
+            bbox=props,
+        )
+        plt.text(
+            0.02,
+            0.83,
+            "{} PC ranges covered: {}%".format(
+                locstats_to_compare.file_name, pc_ranges_covered_to_compare
+            ),
+            transform=plt.gca().transAxes,
+            fontsize=12,
+            verticalalignment="top",
+            bbox=props,
+        )
+
+        finish_plot(plt)
+
 
 # Define the location buckets.
 def coverage_buckets():
-  yield '0%'
-  yield '(0%,10%)'
-  for start in range(10, 91, 10):
-    yield '[{0}%,{1}%)'.format(start, start + 10)
-  yield '100%'
+    yield "0%"
+    yield "(0%,10%)"
+    for start in range(10, 91, 10):
+        yield "[{0}%,{1}%)".format(start, start + 10)
+    yield "100%"
+
 
 # Parse the JSON representing the debug statistics, and create a
 # LocationStats object.
 def parse_locstats(opts, binary):
-  # These will be 
diff erent due to 
diff erent options enabled.
-  variables_total = None
-  variables_total_locstats = None
-  variables_with_loc = None
-  variables_scope_bytes_covered = None
-  variables_scope_bytes = None
-  variables_scope_bytes_entry_values = None
-  variables_coverage_map = OrderedDict()
-
-  # Get the directory of the LLVM tools.
-  llvm_dwarfdump_cmd = os.path.join(os.path.dirname(__file__), \
-                                    "llvm-dwarfdump")
-  # The statistics llvm-dwarfdump option.
-  llvm_dwarfdump_stats_opt = "--statistics"
-
-  # Generate the stats with the llvm-dwarfdump.
-  subproc = Popen([llvm_dwarfdump_cmd, llvm_dwarfdump_stats_opt, binary], \
-                  stdin=PIPE, stdout=PIPE, stderr=PIPE, \
-                  universal_newlines = True)
-  cmd_stdout, cmd_stderr = subproc.communicate()
-
-  # TODO: Handle errors that are coming from llvm-dwarfdump.
-
-  # Get the JSON and parse it.
-  json_parsed = None
-
-  try:
-    json_parsed = loads(cmd_stdout)
-  except:
-    print ('error: No valid llvm-dwarfdump statistics found.')
-    sys.exit(1)
-
-  # TODO: Parse the statistics Version from JSON.
-
-  def init_field(name):
-    if json_parsed[name] == 'overflowed':
-      print ('warning: "' + name + '" field overflowed.')
-      return TAINT_VALUE
-    return json_parsed[name]
-
-  if opts.only_variables:
-    # Read the JSON only for local variables.
-    variables_total_locstats = \
-      init_field('#local vars processed by location statistics')
-    variables_scope_bytes_covered = \
-      init_field('sum_all_local_vars(#bytes in parent scope covered' \
-                  ' by DW_AT_location)')
-    variables_scope_bytes = \
-      init_field('sum_all_local_vars(#bytes in parent scope)')
-    if not opts.ignore_debug_entry_values:
-      for cov_bucket in coverage_buckets():
-        cov_category = "#local vars with {} of parent scope covered " \
-                       "by DW_AT_location".format(cov_bucket)
-        variables_coverage_map[cov_bucket] = init_field(cov_category)
-    else:
-      variables_scope_bytes_entry_values = \
-        init_field('sum_all_local_vars(#bytes in parent scope ' \
-                    'covered by DW_OP_entry_value)')
-      if variables_scope_bytes_covered != TAINT_VALUE and \
-         variables_scope_bytes_entry_values != TAINT_VALUE:
-        variables_scope_bytes_covered = variables_scope_bytes_covered \
-           - variables_scope_bytes_entry_values
-      for cov_bucket in coverage_buckets():
-        cov_category = \
-          "#local vars - entry values with {} of parent scope " \
-          "covered by DW_AT_location".format(cov_bucket)
-        variables_coverage_map[cov_bucket] = init_field(cov_category)
-  elif opts.only_formal_parameters:
-    # Read the JSON only for formal parameters.
-    variables_total_locstats = \
-      init_field('#params processed by location statistics')
-    variables_scope_bytes_covered = \
-      init_field('sum_all_params(#bytes in parent scope covered ' \
-                  'by DW_AT_location)')
-    variables_scope_bytes = \
-      init_field('sum_all_params(#bytes in parent scope)')
-    if not opts.ignore_debug_entry_values:
-      for cov_bucket in coverage_buckets():
-        cov_category = "#params with {} of parent scope covered " \
-                       "by DW_AT_location".format(cov_bucket)
-        variables_coverage_map[cov_bucket] = init_field(cov_category)
-    else:
-      variables_scope_bytes_entry_values = \
-        init_field('sum_all_params(#bytes in parent scope covered ' \
-                    'by DW_OP_entry_value)')
-      if variables_scope_bytes_covered != TAINT_VALUE and \
-         variables_scope_bytes_entry_values != TAINT_VALUE:
-        variables_scope_bytes_covered = variables_scope_bytes_covered \
-          - variables_scope_bytes_entry_values
-      for cov_bucket in coverage_buckets():
-        cov_category = \
-          "#params - entry values with {} of parent scope covered" \
-          " by DW_AT_location".format(cov_bucket)
-        variables_coverage_map[cov_bucket] = init_field(cov_category)
-  else:
-    # Read the JSON for both local variables and formal parameters.
-    variables_total = \
-      init_field('#source variables')
-    variables_with_loc = init_field('#source variables with location')
-    variables_total_locstats = \
-      init_field('#variables processed by location statistics')
-    variables_scope_bytes_covered = \
-      init_field('sum_all_variables(#bytes in parent scope covered ' \
-                  'by DW_AT_location)')
-    variables_scope_bytes = \
-      init_field('sum_all_variables(#bytes in parent scope)')
-
-    if not opts.ignore_debug_entry_values:
-      for cov_bucket in coverage_buckets():
-        cov_category = "#variables with {} of parent scope covered " \
-                       "by DW_AT_location".format(cov_bucket)
-        variables_coverage_map[cov_bucket] = init_field(cov_category)
+    # These will be 
diff erent due to 
diff erent options enabled.
+    variables_total = None
+    variables_total_locstats = None
+    variables_with_loc = None
+    variables_scope_bytes_covered = None
+    variables_scope_bytes = None
+    variables_scope_bytes_entry_values = None
+    variables_coverage_map = OrderedDict()
+
+    # Get the directory of the LLVM tools.
+    llvm_dwarfdump_cmd = os.path.join(os.path.dirname(__file__), "llvm-dwarfdump")
+    # The statistics llvm-dwarfdump option.
+    llvm_dwarfdump_stats_opt = "--statistics"
+
+    # Generate the stats with the llvm-dwarfdump.
+    subproc = Popen(
+        [llvm_dwarfdump_cmd, llvm_dwarfdump_stats_opt, binary],
+        stdin=PIPE,
+        stdout=PIPE,
+        stderr=PIPE,
+        universal_newlines=True,
+    )
+    cmd_stdout, cmd_stderr = subproc.communicate()
+
+    # TODO: Handle errors that are coming from llvm-dwarfdump.
+
+    # Get the JSON and parse it.
+    json_parsed = None
+
+    try:
+        json_parsed = loads(cmd_stdout)
+    except:
+        print("error: No valid llvm-dwarfdump statistics found.")
+        sys.exit(1)
+
+    # TODO: Parse the statistics Version from JSON.
+
+    def init_field(name):
+        if json_parsed[name] == "overflowed":
+            print('warning: "' + name + '" field overflowed.')
+            return TAINT_VALUE
+        return json_parsed[name]
+
+    if opts.only_variables:
+        # Read the JSON only for local variables.
+        variables_total_locstats = init_field(
+            "#local vars processed by location statistics"
+        )
+        variables_scope_bytes_covered = init_field(
+            "sum_all_local_vars(#bytes in parent scope covered" " by DW_AT_location)"
+        )
+        variables_scope_bytes = init_field("sum_all_local_vars(#bytes in parent scope)")
+        if not opts.ignore_debug_entry_values:
+            for cov_bucket in coverage_buckets():
+                cov_category = (
+                    "#local vars with {} of parent scope covered "
+                    "by DW_AT_location".format(cov_bucket)
+                )
+                variables_coverage_map[cov_bucket] = init_field(cov_category)
+        else:
+            variables_scope_bytes_entry_values = init_field(
+                "sum_all_local_vars(#bytes in parent scope "
+                "covered by DW_OP_entry_value)"
+            )
+            if (
+                variables_scope_bytes_covered != TAINT_VALUE
+                and variables_scope_bytes_entry_values != TAINT_VALUE
+            ):
+                variables_scope_bytes_covered = (
+                    variables_scope_bytes_covered - variables_scope_bytes_entry_values
+                )
+            for cov_bucket in coverage_buckets():
+                cov_category = (
+                    "#local vars - entry values with {} of parent scope "
+                    "covered by DW_AT_location".format(cov_bucket)
+                )
+                variables_coverage_map[cov_bucket] = init_field(cov_category)
+    elif opts.only_formal_parameters:
+        # Read the JSON only for formal parameters.
+        variables_total_locstats = init_field(
+            "#params processed by location statistics"
+        )
+        variables_scope_bytes_covered = init_field(
+            "sum_all_params(#bytes in parent scope covered " "by DW_AT_location)"
+        )
+        variables_scope_bytes = init_field("sum_all_params(#bytes in parent scope)")
+        if not opts.ignore_debug_entry_values:
+            for cov_bucket in coverage_buckets():
+                cov_category = (
+                    "#params with {} of parent scope covered "
+                    "by DW_AT_location".format(cov_bucket)
+                )
+                variables_coverage_map[cov_bucket] = init_field(cov_category)
+        else:
+            variables_scope_bytes_entry_values = init_field(
+                "sum_all_params(#bytes in parent scope covered " "by DW_OP_entry_value)"
+            )
+            if (
+                variables_scope_bytes_covered != TAINT_VALUE
+                and variables_scope_bytes_entry_values != TAINT_VALUE
+            ):
+                variables_scope_bytes_covered = (
+                    variables_scope_bytes_covered - variables_scope_bytes_entry_values
+                )
+            for cov_bucket in coverage_buckets():
+                cov_category = (
+                    "#params - entry values with {} of parent scope covered"
+                    " by DW_AT_location".format(cov_bucket)
+                )
+                variables_coverage_map[cov_bucket] = init_field(cov_category)
     else:
-      variables_scope_bytes_entry_values = \
-        init_field('sum_all_variables(#bytes in parent scope covered ' \
-                    'by DW_OP_entry_value)')
-      if variables_scope_bytes_covered != TAINT_VALUE and \
-         variables_scope_bytes_entry_values != TAINT_VALUE:
-        variables_scope_bytes_covered = variables_scope_bytes_covered \
-          - variables_scope_bytes_entry_values
-      for cov_bucket in coverage_buckets():
-        cov_category = \
-          "#variables - entry values with {} of parent scope covered " \
-          "by DW_AT_location".format(cov_bucket)
-        variables_coverage_map[cov_bucket] = init_field(cov_category)
-
-  return LocationStats(binary, variables_total, variables_total_locstats,
-                       variables_with_loc, variables_scope_bytes_covered,
-                       variables_scope_bytes, variables_coverage_map)
+        # Read the JSON for both local variables and formal parameters.
+        variables_total = init_field("#source variables")
+        variables_with_loc = init_field("#source variables with location")
+        variables_total_locstats = init_field(
+            "#variables processed by location statistics"
+        )
+        variables_scope_bytes_covered = init_field(
+            "sum_all_variables(#bytes in parent scope covered " "by DW_AT_location)"
+        )
+        variables_scope_bytes = init_field("sum_all_variables(#bytes in parent scope)")
+
+        if not opts.ignore_debug_entry_values:
+            for cov_bucket in coverage_buckets():
+                cov_category = (
+                    "#variables with {} of parent scope covered "
+                    "by DW_AT_location".format(cov_bucket)
+                )
+                variables_coverage_map[cov_bucket] = init_field(cov_category)
+        else:
+            variables_scope_bytes_entry_values = init_field(
+                "sum_all_variables(#bytes in parent scope covered "
+                "by DW_OP_entry_value)"
+            )
+            if (
+                variables_scope_bytes_covered != TAINT_VALUE
+                and variables_scope_bytes_entry_values != TAINT_VALUE
+            ):
+                variables_scope_bytes_covered = (
+                    variables_scope_bytes_covered - variables_scope_bytes_entry_values
+                )
+            for cov_bucket in coverage_buckets():
+                cov_category = (
+                    "#variables - entry values with {} of parent scope covered "
+                    "by DW_AT_location".format(cov_bucket)
+                )
+                variables_coverage_map[cov_bucket] = init_field(cov_category)
+
+    return LocationStats(
+        binary,
+        variables_total,
+        variables_total_locstats,
+        variables_with_loc,
+        variables_scope_bytes_covered,
+        variables_scope_bytes,
+        variables_coverage_map,
+    )
+
 
 # Parse the program arguments.
 def parse_program_args(parser):
-  parser.add_argument('--only-variables', action='store_true', default=False,
-            help='calculate the location statistics only for local variables')
-  parser.add_argument('--only-formal-parameters', action='store_true',
-            default=False,
-            help='calculate the location statistics only for formal parameters')
-  parser.add_argument('--ignore-debug-entry-values', action='store_true',
-            default=False,
-            help='ignore the location statistics on locations with '
-                 'entry values')
-  parser.add_argument('--draw-plot', action='store_true', default=False,
-            help='show histogram of location buckets generated (requires '
-                 'matplotlib)')
-  parser.add_argument('--compare', action='store_true', default=False,
-            help='compare the debug location coverage on two files provided, '
-                 'and draw a plot showing the 
diff erence  (requires '
-                 'matplotlib)')
-  parser.add_argument('file_names', nargs='+', type=str, help='file to process')
-
-  return parser.parse_args()
+    parser.add_argument(
+        "--only-variables",
+        action="store_true",
+        default=False,
+        help="calculate the location statistics only for local variables",
+    )
+    parser.add_argument(
+        "--only-formal-parameters",
+        action="store_true",
+        default=False,
+        help="calculate the location statistics only for formal parameters",
+    )
+    parser.add_argument(
+        "--ignore-debug-entry-values",
+        action="store_true",
+        default=False,
+        help="ignore the location statistics on locations with " "entry values",
+    )
+    parser.add_argument(
+        "--draw-plot",
+        action="store_true",
+        default=False,
+        help="show histogram of location buckets generated (requires " "matplotlib)",
+    )
+    parser.add_argument(
+        "--compare",
+        action="store_true",
+        default=False,
+        help="compare the debug location coverage on two files provided, "
+        "and draw a plot showing the 
diff erence  (requires "
+        "matplotlib)",
+    )
+    parser.add_argument("file_names", nargs="+", type=str, help="file to process")
+
+    return parser.parse_args()
+
 
 # Verify that the program inputs meet the requirements.
 def verify_program_inputs(opts):
-  if len(sys.argv) < 2:
-    print ('error: Too few arguments.')
-    return False
+    if len(sys.argv) < 2:
+        print("error: Too few arguments.")
+        return False
 
-  if opts.only_variables and opts.only_formal_parameters:
-    print ('error: Please use just one --only* option.')
-    return False
+    if opts.only_variables and opts.only_formal_parameters:
+        print("error: Please use just one --only* option.")
+        return False
 
-  if not opts.compare and len(opts.file_names) != 1:
-    print ('error: Please specify only one file to process.')
-    return False
+    if not opts.compare and len(opts.file_names) != 1:
+        print("error: Please specify only one file to process.")
+        return False
 
-  if opts.compare and len(opts.file_names) != 2:
-    print ('error: Please specify two files to process.')
-    return False
+    if opts.compare and len(opts.file_names) != 2:
+        print("error: Please specify two files to process.")
+        return False
 
-  if opts.draw_plot or opts.compare:
-    try:
-      import matplotlib
-    except ImportError:
-      print('error: matplotlib not found.')
-      return False
+    if opts.draw_plot or opts.compare:
+        try:
+            import matplotlib
+        except ImportError:
+            print("error: matplotlib not found.")
+            return False
 
-  return True
+    return True
 
-def Main():
-  parser = argparse.ArgumentParser()
-  opts = parse_program_args(parser)
 
-  if not verify_program_inputs(opts):
-    parser.print_help()
-    sys.exit(1)
+def Main():
+    parser = argparse.ArgumentParser()
+    opts = parse_program_args(parser)
+
+    if not verify_program_inputs(opts):
+        parser.print_help()
+        sys.exit(1)
+
+    binary_file = opts.file_names[0]
+    locstats = parse_locstats(opts, binary_file)
+
+    if not opts.compare:
+        if opts.draw_plot:
+            # Draw a histogram representing the location buckets.
+            locstats.draw_plot()
+        else:
+            # Pretty print collected info on the standard output.
+            if locstats.pretty_print() == -1:
+                sys.exit(0)
+    else:
+        binary_file_to_compare = opts.file_names[1]
+        locstats_to_compare = parse_locstats(opts, binary_file_to_compare)
+        # Draw a plot showing the 
diff erence in debug location coverage between
+        # two files.
+        locstats.draw_location_
diff (locstats_to_compare)
 
-  binary_file = opts.file_names[0]
-  locstats = parse_locstats(opts, binary_file)
 
-  if not opts.compare:
-    if opts.draw_plot:
-      # Draw a histogram representing the location buckets.
-      locstats.draw_plot()
-    else:
-      # Pretty print collected info on the standard output.
-      if locstats.pretty_print() == -1:
-        sys.exit(0)
-  else:
-    binary_file_to_compare = opts.file_names[1]
-    locstats_to_compare = parse_locstats(opts, binary_file_to_compare)
-    # Draw a plot showing the 
diff erence in debug location coverage between
-    # two files.
-    locstats.draw_location_
diff (locstats_to_compare)
-
-if __name__ == '__main__':
-  Main()
-  sys.exit(0)
+if __name__ == "__main__":
+    Main()
+    sys.exit(0)

diff  --git a/llvm/utils/llvm-original-di-preservation.py b/llvm/utils/llvm-original-di-preservation.py
index 5b53e6ad3d67f..dc1fa518ca8e6 100755
--- a/llvm/utils/llvm-original-di-preservation.py
+++ b/llvm/utils/llvm-original-di-preservation.py
@@ -11,37 +11,50 @@
 from collections import defaultdict
 from collections import OrderedDict
 
+
 class DILocBug:
-  def __init__(self, action, bb_name, fn_name, instr):
-    self.action = action
-    self.bb_name = bb_name
-    self.fn_name = fn_name
-    self.instr = instr
-  def __str__(self):
-    return self.action + self.bb_name + self.fn_name + self.instr
+    def __init__(self, action, bb_name, fn_name, instr):
+        self.action = action
+        self.bb_name = bb_name
+        self.fn_name = fn_name
+        self.instr = instr
+
+    def __str__(self):
+        return self.action + self.bb_name + self.fn_name + self.instr
+
 
 class DISPBug:
-  def __init__(self, action, fn_name):
-    self.action = action
-    self.fn_name = fn_name
-  def __str__(self):
-    return self.action + self.fn_name
+    def __init__(self, action, fn_name):
+        self.action = action
+        self.fn_name = fn_name
+
+    def __str__(self):
+        return self.action + self.fn_name
+
 
 class DIVarBug:
-  def __init__(self, action, name, fn_name):
-    self.action = action
-    self.name = name
-    self.fn_name = fn_name
-  def __str__(self):
-    return self.action + self.name + self.fn_name
+    def __init__(self, action, name, fn_name):
+        self.action = action
+        self.name = name
+        self.fn_name = fn_name
+
+    def __str__(self):
+        return self.action + self.name + self.fn_name
 
-# Report the bugs in form of html.
-def generate_html_report(di_location_bugs, di_subprogram_bugs, di_var_bugs, \
-                         di_location_bugs_summary, di_sp_bugs_summary, \
-                         di_var_bugs_summary, html_file):
-  fileout = open(html_file, "w")
 
-  html_header = """ <html>
+# Report the bugs in form of html.
+def generate_html_report(
+    di_location_bugs,
+    di_subprogram_bugs,
+    di_var_bugs,
+    di_location_bugs_summary,
+    di_sp_bugs_summary,
+    di_var_bugs_summary,
+    html_file,
+):
+    fileout = open(html_file, "w")
+
+    html_header = """ <html>
   <head>
   <style>
   table, th, td {
@@ -56,487 +69,519 @@ def generate_html_report(di_location_bugs, di_subprogram_bugs, di_var_bugs, \
   <body>
   """
 
-  # Create the table for Location bugs.
-  table_title_di_loc = "Location Bugs found by the Debugify"
+    # Create the table for Location bugs.
+    table_title_di_loc = "Location Bugs found by the Debugify"
 
-  table_di_loc = """<table>
+    table_di_loc = """<table>
   <caption><b>{}</b></caption>
   <tr>
-  """.format(table_title_di_loc)
-
-  header_di_loc = ["File", "LLVM Pass Name", "LLVM IR Instruction", \
-                   "Function Name", "Basic Block Name", "Action"]
-
-  for column in header_di_loc:
-    table_di_loc += "    <th>{0}</th>\n".format(column.strip())
-  table_di_loc += "  </tr>\n"
-
-  at_least_one_bug_found = False
-
-  # Handle loction bugs.
-  for file, per_file_bugs in di_location_bugs.items():
-    for llvm_pass, per_pass_bugs in per_file_bugs.items():
-      # No location bugs for the pass.
-      if len(per_pass_bugs) == 0:
-        continue
-      at_least_one_bug_found = True
-      row = []
-      table_di_loc += "  </tr>\n"
-      # Get the bugs info.
-      for x in per_pass_bugs:
-        row.append("    <tr>\n")
-        row.append(file)
-        row.append(llvm_pass)
-        row.append(x.instr)
-        row.append(x.fn_name)
-        row.append(x.bb_name)
-        row.append(x.action)
-        row.append("    </tr>\n")
-      # Dump the bugs info into the table.
-      for column in row:
-        # The same file-pass pair can have multiple bugs.
-        if (column == "    <tr>\n" or column == "    </tr>\n"):
-          table_di_loc += column
-          continue
-        table_di_loc += "    <td>{0}</td>\n".format(column.strip())
-      table_di_loc += "  <tr>\n"
-
-  if not at_least_one_bug_found:
-    table_di_loc += """  <tr>
+  """.format(
+        table_title_di_loc
+    )
+
+    header_di_loc = [
+        "File",
+        "LLVM Pass Name",
+        "LLVM IR Instruction",
+        "Function Name",
+        "Basic Block Name",
+        "Action",
+    ]
+
+    for column in header_di_loc:
+        table_di_loc += "    <th>{0}</th>\n".format(column.strip())
+    table_di_loc += "  </tr>\n"
+
+    at_least_one_bug_found = False
+
+    # Handle loction bugs.
+    for file, per_file_bugs in di_location_bugs.items():
+        for llvm_pass, per_pass_bugs in per_file_bugs.items():
+            # No location bugs for the pass.
+            if len(per_pass_bugs) == 0:
+                continue
+            at_least_one_bug_found = True
+            row = []
+            table_di_loc += "  </tr>\n"
+            # Get the bugs info.
+            for x in per_pass_bugs:
+                row.append("    <tr>\n")
+                row.append(file)
+                row.append(llvm_pass)
+                row.append(x.instr)
+                row.append(x.fn_name)
+                row.append(x.bb_name)
+                row.append(x.action)
+                row.append("    </tr>\n")
+            # Dump the bugs info into the table.
+            for column in row:
+                # The same file-pass pair can have multiple bugs.
+                if column == "    <tr>\n" or column == "    </tr>\n":
+                    table_di_loc += column
+                    continue
+                table_di_loc += "    <td>{0}</td>\n".format(column.strip())
+            table_di_loc += "  <tr>\n"
+
+    if not at_least_one_bug_found:
+        table_di_loc += """  <tr>
         <td colspan='7'> No bugs found </td>
       </tr>
     """
-  table_di_loc += "</table>\n"
+    table_di_loc += "</table>\n"
 
-  # Create the summary table for the loc bugs.
-  table_title_di_loc_sum = "Summary of Location Bugs"
-  table_di_loc_sum = """<table>
+    # Create the summary table for the loc bugs.
+    table_title_di_loc_sum = "Summary of Location Bugs"
+    table_di_loc_sum = """<table>
   <caption><b>{}</b></caption>
   <tr>
-  """.format(table_title_di_loc_sum)
-
-  header_di_loc_sum = ["LLVM Pass Name", "Number of bugs"]
-
-  for column in header_di_loc_sum:
-    table_di_loc_sum += "    <th>{0}</th>\n".format(column.strip())
-  table_di_loc_sum += "  </tr>\n"
-
-  # Print the summary.
-  row = []
-  for llvm_pass, num in sorted(di_location_bugs_summary.items()):
-    row.append("    <tr>\n")
-    row.append(llvm_pass)
-    row.append(str(num))
-    row.append("    </tr>\n")
-  for column in row:
-    if (column == "    <tr>\n" or column == "    </tr>\n"):
-      table_di_loc_sum += column
-      continue
-    table_di_loc_sum += "    <td>{0}</td>\n".format(column.strip())
-  table_di_loc_sum += "  <tr>\n"
-
-  if not at_least_one_bug_found:
-    table_di_loc_sum += """<tr>
+  """.format(
+        table_title_di_loc_sum
+    )
+
+    header_di_loc_sum = ["LLVM Pass Name", "Number of bugs"]
+
+    for column in header_di_loc_sum:
+        table_di_loc_sum += "    <th>{0}</th>\n".format(column.strip())
+    table_di_loc_sum += "  </tr>\n"
+
+    # Print the summary.
+    row = []
+    for llvm_pass, num in sorted(di_location_bugs_summary.items()):
+        row.append("    <tr>\n")
+        row.append(llvm_pass)
+        row.append(str(num))
+        row.append("    </tr>\n")
+    for column in row:
+        if column == "    <tr>\n" or column == "    </tr>\n":
+            table_di_loc_sum += column
+            continue
+        table_di_loc_sum += "    <td>{0}</td>\n".format(column.strip())
+    table_di_loc_sum += "  <tr>\n"
+
+    if not at_least_one_bug_found:
+        table_di_loc_sum += """<tr>
         <td colspan='2'> No bugs found </td>
       </tr>
     """
-  table_di_loc_sum += "</table>\n"
+    table_di_loc_sum += "</table>\n"
 
-  # Create the table for SP bugs.
-  table_title_di_sp = "SP Bugs found by the Debugify"
-  table_di_sp = """<table>
+    # Create the table for SP bugs.
+    table_title_di_sp = "SP Bugs found by the Debugify"
+    table_di_sp = """<table>
   <caption><b>{}</b></caption>
   <tr>
-  """.format(table_title_di_sp)
-
-  header_di_sp = ["File", "LLVM Pass Name", "Function Name", "Action"]
-
-  for column in header_di_sp:
-    table_di_sp += "    <th>{0}</th>\n".format(column.strip())
-  table_di_sp += "  </tr>\n"
-
-  at_least_one_bug_found = False
-
-  # Handle fn bugs.
-  for file, per_file_bugs in di_subprogram_bugs.items():
-    for llvm_pass, per_pass_bugs in per_file_bugs.items():
-      # No SP bugs for the pass.
-      if len(per_pass_bugs) == 0:
-        continue
-      at_least_one_bug_found = True
-      row = []
-      table_di_sp += "  </tr>\n"
-      # Get the bugs info.
-      for x in per_pass_bugs:
-        row.append("    <tr>\n")
-        row.append(file)
-        row.append(llvm_pass)
-        row.append(x.fn_name)
-        row.append(x.action)
-        row.append("    </tr>\n")
-      # Dump the bugs info into the table.
-      for column in row:
-        # The same file-pass pair can have multiple bugs.
-        if (column == "    <tr>\n" or column == "    </tr>\n"):
-          table_di_sp += column
-          continue
-        table_di_sp += "    <td>{0}</td>\n".format(column.strip())
-      table_di_sp += "  <tr>\n"
-
-  if not at_least_one_bug_found:
-    table_di_sp += """<tr>
+  """.format(
+        table_title_di_sp
+    )
+
+    header_di_sp = ["File", "LLVM Pass Name", "Function Name", "Action"]
+
+    for column in header_di_sp:
+        table_di_sp += "    <th>{0}</th>\n".format(column.strip())
+    table_di_sp += "  </tr>\n"
+
+    at_least_one_bug_found = False
+
+    # Handle fn bugs.
+    for file, per_file_bugs in di_subprogram_bugs.items():
+        for llvm_pass, per_pass_bugs in per_file_bugs.items():
+            # No SP bugs for the pass.
+            if len(per_pass_bugs) == 0:
+                continue
+            at_least_one_bug_found = True
+            row = []
+            table_di_sp += "  </tr>\n"
+            # Get the bugs info.
+            for x in per_pass_bugs:
+                row.append("    <tr>\n")
+                row.append(file)
+                row.append(llvm_pass)
+                row.append(x.fn_name)
+                row.append(x.action)
+                row.append("    </tr>\n")
+            # Dump the bugs info into the table.
+            for column in row:
+                # The same file-pass pair can have multiple bugs.
+                if column == "    <tr>\n" or column == "    </tr>\n":
+                    table_di_sp += column
+                    continue
+                table_di_sp += "    <td>{0}</td>\n".format(column.strip())
+            table_di_sp += "  <tr>\n"
+
+    if not at_least_one_bug_found:
+        table_di_sp += """<tr>
         <td colspan='4'> No bugs found </td>
       </tr>
     """
-  table_di_sp += "</table>\n"
+    table_di_sp += "</table>\n"
 
-  # Create the summary table for the sp bugs.
-  table_title_di_sp_sum = "Summary of SP Bugs"
-  table_di_sp_sum = """<table>
+    # Create the summary table for the sp bugs.
+    table_title_di_sp_sum = "Summary of SP Bugs"
+    table_di_sp_sum = """<table>
   <caption><b>{}</b></caption>
   <tr>
-  """.format(table_title_di_sp_sum)
-
-  header_di_sp_sum = ["LLVM Pass Name", "Number of bugs"]
-
-  for column in header_di_sp_sum:
-    table_di_sp_sum += "    <th>{0}</th>\n".format(column.strip())
-  table_di_sp_sum += "  </tr>\n"
-
-  # Print the summary.
-  row = []
-  for llvm_pass, num in sorted(di_sp_bugs_summary.items()):
-    row.append("    <tr>\n")
-    row.append(llvm_pass)
-    row.append(str(num))
-    row.append("    </tr>\n")
-  for column in row:
-    if (column == "    <tr>\n" or column == "    </tr>\n"):
-      table_di_sp_sum += column
-      continue
-    table_di_sp_sum += "    <td>{0}</td>\n".format(column.strip())
-  table_di_sp_sum += "  <tr>\n"
-
-  if not at_least_one_bug_found:
-    table_di_sp_sum += """<tr>
+  """.format(
+        table_title_di_sp_sum
+    )
+
+    header_di_sp_sum = ["LLVM Pass Name", "Number of bugs"]
+
+    for column in header_di_sp_sum:
+        table_di_sp_sum += "    <th>{0}</th>\n".format(column.strip())
+    table_di_sp_sum += "  </tr>\n"
+
+    # Print the summary.
+    row = []
+    for llvm_pass, num in sorted(di_sp_bugs_summary.items()):
+        row.append("    <tr>\n")
+        row.append(llvm_pass)
+        row.append(str(num))
+        row.append("    </tr>\n")
+    for column in row:
+        if column == "    <tr>\n" or column == "    </tr>\n":
+            table_di_sp_sum += column
+            continue
+        table_di_sp_sum += "    <td>{0}</td>\n".format(column.strip())
+    table_di_sp_sum += "  <tr>\n"
+
+    if not at_least_one_bug_found:
+        table_di_sp_sum += """<tr>
         <td colspan='2'> No bugs found </td>
       </tr>
     """
-  table_di_sp_sum += "</table>\n"
+    table_di_sp_sum += "</table>\n"
 
-  # Create the table for Variable bugs.
-  table_title_di_var = "Variable Location Bugs found by the Debugify"
-  table_di_var = """<table>
+    # Create the table for Variable bugs.
+    table_title_di_var = "Variable Location Bugs found by the Debugify"
+    table_di_var = """<table>
   <caption><b>{}</b></caption>
   <tr>
-  """.format(table_title_di_var)
-
-  header_di_var = ["File", "LLVM Pass Name", "Variable", "Function", "Action"]
-
-  for column in header_di_var:
-    table_di_var += "    <th>{0}</th>\n".format(column.strip())
-  table_di_var += "  </tr>\n"
-
-  at_least_one_bug_found = False
-
-  # Handle var bugs.
-  for file, per_file_bugs in di_var_bugs.items():
-    for llvm_pass, per_pass_bugs in per_file_bugs.items():
-      # No SP bugs for the pass.
-      if len(per_pass_bugs) == 0:
-        continue
-      at_least_one_bug_found = True
-      row = []
-      table_di_var += "  </tr>\n"
-      # Get the bugs info.
-      for x in per_pass_bugs:
-        row.append("    <tr>\n")
-        row.append(file)
-        row.append(llvm_pass)
-        row.append(x.name)
-        row.append(x.fn_name)
-        row.append(x.action)
-        row.append("    </tr>\n")
-      # Dump the bugs info into the table.
-      for column in row:
-        # The same file-pass pair can have multiple bugs.
-        if (column == "    <tr>\n" or column == "    </tr>\n"):
-          table_di_var += column
-          continue
-        table_di_var += "    <td>{0}</td>\n".format(column.strip())
-      table_di_var += "  <tr>\n"
-
-  if not at_least_one_bug_found:
-    table_di_var += """<tr>
+  """.format(
+        table_title_di_var
+    )
+
+    header_di_var = ["File", "LLVM Pass Name", "Variable", "Function", "Action"]
+
+    for column in header_di_var:
+        table_di_var += "    <th>{0}</th>\n".format(column.strip())
+    table_di_var += "  </tr>\n"
+
+    at_least_one_bug_found = False
+
+    # Handle var bugs.
+    for file, per_file_bugs in di_var_bugs.items():
+        for llvm_pass, per_pass_bugs in per_file_bugs.items():
+            # No SP bugs for the pass.
+            if len(per_pass_bugs) == 0:
+                continue
+            at_least_one_bug_found = True
+            row = []
+            table_di_var += "  </tr>\n"
+            # Get the bugs info.
+            for x in per_pass_bugs:
+                row.append("    <tr>\n")
+                row.append(file)
+                row.append(llvm_pass)
+                row.append(x.name)
+                row.append(x.fn_name)
+                row.append(x.action)
+                row.append("    </tr>\n")
+            # Dump the bugs info into the table.
+            for column in row:
+                # The same file-pass pair can have multiple bugs.
+                if column == "    <tr>\n" or column == "    </tr>\n":
+                    table_di_var += column
+                    continue
+                table_di_var += "    <td>{0}</td>\n".format(column.strip())
+            table_di_var += "  <tr>\n"
+
+    if not at_least_one_bug_found:
+        table_di_var += """<tr>
         <td colspan='4'> No bugs found </td>
       </tr>
     """
-  table_di_var += "</table>\n"
+    table_di_var += "</table>\n"
 
-  # Create the summary table for the sp bugs.
-  table_title_di_var_sum = "Summary of Variable Location Bugs"
-  table_di_var_sum = """<table>
+    # Create the summary table for the sp bugs.
+    table_title_di_var_sum = "Summary of Variable Location Bugs"
+    table_di_var_sum = """<table>
   <caption><b>{}</b></caption>
   <tr>
-  """.format(table_title_di_var_sum)
-
-  header_di_var_sum = ["LLVM Pass Name", "Number of bugs"]
-
-  for column in header_di_var_sum:
-    table_di_var_sum += "    <th>{0}</th>\n".format(column.strip())
-  table_di_var_sum += "  </tr>\n"
-
-  # Print the summary.
-  row = []
-  for llvm_pass, num in sorted(di_var_bugs_summary.items()):
-    row.append("    <tr>\n")
-    row.append(llvm_pass)
-    row.append(str(num))
-    row.append("    </tr>\n")
-  for column in row:
-    if (column == "    <tr>\n" or column == "    </tr>\n"):
-      table_di_var_sum += column
-      continue
-    table_di_var_sum += "    <td>{0}</td>\n".format(column.strip())
-  table_di_var_sum += "  <tr>\n"
-
-  if not at_least_one_bug_found:
-    table_di_var_sum += """<tr>
+  """.format(
+        table_title_di_var_sum
+    )
+
+    header_di_var_sum = ["LLVM Pass Name", "Number of bugs"]
+
+    for column in header_di_var_sum:
+        table_di_var_sum += "    <th>{0}</th>\n".format(column.strip())
+    table_di_var_sum += "  </tr>\n"
+
+    # Print the summary.
+    row = []
+    for llvm_pass, num in sorted(di_var_bugs_summary.items()):
+        row.append("    <tr>\n")
+        row.append(llvm_pass)
+        row.append(str(num))
+        row.append("    </tr>\n")
+    for column in row:
+        if column == "    <tr>\n" or column == "    </tr>\n":
+            table_di_var_sum += column
+            continue
+        table_di_var_sum += "    <td>{0}</td>\n".format(column.strip())
+    table_di_var_sum += "  <tr>\n"
+
+    if not at_least_one_bug_found:
+        table_di_var_sum += """<tr>
         <td colspan='2'> No bugs found </td>
       </tr>
     """
-  table_di_var_sum += "</table>\n"
+    table_di_var_sum += "</table>\n"
 
-  # Finish the html page.
-  html_footer = """</body>
+    # Finish the html page.
+    html_footer = """</body>
   </html>"""
 
-  new_line = "<br>\n"
-
-  fileout.writelines(html_header)
-  fileout.writelines(table_di_loc)
-  fileout.writelines(new_line)
-  fileout.writelines(table_di_loc_sum)
-  fileout.writelines(new_line)
-  fileout.writelines(new_line)
-  fileout.writelines(table_di_sp)
-  fileout.writelines(new_line)
-  fileout.writelines(table_di_sp_sum)
-  fileout.writelines(new_line)
-  fileout.writelines(new_line)
-  fileout.writelines(table_di_var)
-  fileout.writelines(new_line)
-  fileout.writelines(table_di_var_sum)
-  fileout.writelines(html_footer)
-  fileout.close()
-
-  print("The " + html_file + " generated.")
+    new_line = "<br>\n"
+
+    fileout.writelines(html_header)
+    fileout.writelines(table_di_loc)
+    fileout.writelines(new_line)
+    fileout.writelines(table_di_loc_sum)
+    fileout.writelines(new_line)
+    fileout.writelines(new_line)
+    fileout.writelines(table_di_sp)
+    fileout.writelines(new_line)
+    fileout.writelines(table_di_sp_sum)
+    fileout.writelines(new_line)
+    fileout.writelines(new_line)
+    fileout.writelines(table_di_var)
+    fileout.writelines(new_line)
+    fileout.writelines(table_di_var_sum)
+    fileout.writelines(html_footer)
+    fileout.close()
+
+    print("The " + html_file + " generated.")
+
 
 # Read the JSON file in chunks.
-def get_json_chunk(file,start,size):
-  json_parsed = None
-  di_checker_data = []
-  skipped_lines = 0
-  line = 0
-
-  # The file contains json object per line.
-  # An example of the line (formatted json):
-  # {
-  #  "file": "simple.c",
-  #  "pass": "Deduce function attributes in RPO",
-  #  "bugs": [
-  #    [
-  #      {
-  #        "action": "drop",
-  #        "metadata": "DISubprogram",
-  #        "name": "fn2"
-  #      },
-  #      {
-  #        "action": "drop",
-  #        "metadata": "DISubprogram",
-  #        "name": "fn1"
-  #      }
-  #    ]
-  #  ]
-  #}
-  with open(file) as json_objects_file:
-    for json_object_line in json_objects_file:
-      line += 1
-      if line < start:
-        continue
-      if line >= start+size:
-        break
-      try:
-        json_object = loads(json_object_line)
-      except:
-        skipped_lines += 1
-      else:
-        di_checker_data.append(json_object)
-
-  return (di_checker_data, skipped_lines, line)
+def get_json_chunk(file, start, size):
+    json_parsed = None
+    di_checker_data = []
+    skipped_lines = 0
+    line = 0
+
+    # The file contains json object per line.
+    # An example of the line (formatted json):
+    # {
+    #  "file": "simple.c",
+    #  "pass": "Deduce function attributes in RPO",
+    #  "bugs": [
+    #    [
+    #      {
+    #        "action": "drop",
+    #        "metadata": "DISubprogram",
+    #        "name": "fn2"
+    #      },
+    #      {
+    #        "action": "drop",
+    #        "metadata": "DISubprogram",
+    #        "name": "fn1"
+    #      }
+    #    ]
+    #  ]
+    # }
+    with open(file) as json_objects_file:
+        for json_object_line in json_objects_file:
+            line += 1
+            if line < start:
+                continue
+            if line >= start + size:
+                break
+            try:
+                json_object = loads(json_object_line)
+            except:
+                skipped_lines += 1
+            else:
+                di_checker_data.append(json_object)
+
+    return (di_checker_data, skipped_lines, line)
+
 
 # Parse the program arguments.
 def parse_program_args(parser):
-  parser.add_argument("file_name", type=str, help="json file to process")
-  parser.add_argument("html_file", type=str, help="html file to output data")
-  parser.add_argument("-compress", action="store_true", help="create reduced html report")
+    parser.add_argument("file_name", type=str, help="json file to process")
+    parser.add_argument("html_file", type=str, help="html file to output data")
+    parser.add_argument(
+        "-compress", action="store_true", help="create reduced html report"
+    )
+
+    return parser.parse_args()
 
-  return parser.parse_args()
 
 def Main():
-  parser = argparse.ArgumentParser()
-  opts = parse_program_args(parser)
-
-  if not opts.html_file.endswith('.html'):
-    print ("error: The output file must be '.html'.")
-    sys.exit(1)
-
-  # Use the defaultdict in order to make multidim dicts.
-  di_location_bugs = defaultdict(lambda: defaultdict(dict))
-  di_subprogram_bugs = defaultdict(lambda: defaultdict(dict))
-  di_variable_bugs = defaultdict(lambda: defaultdict(dict))
-
-  # Use the ordered dict to make a summary.
-  di_location_bugs_summary = OrderedDict()
-  di_sp_bugs_summary = OrderedDict()
-  di_var_bugs_summary = OrderedDict()
-
-  # Compress similar bugs.
-  # DILocBugs with same pass & instruction name.
-  di_loc_pass_instr_set = set()
-  # DISPBugs with same pass & function name.
-  di_sp_pass_fn_set = set()
-  # DIVarBugs with same pass & variable name.
-  di_var_pass_var_set = set()
-
-  start_line = 0
-  chunk_size = 1000000
-  end_line = chunk_size - 1
-  skipped_lines = 0
-  skipped_bugs = 0
-  # Process each chunk of 1 million JSON lines.
-  while True:
-    if start_line > end_line:
-      break
-    (debug_info_bugs, skipped, end_line) = get_json_chunk(opts.file_name,start_line,chunk_size)
-    start_line += chunk_size
-    skipped_lines += skipped
-
-    # Map the bugs into the file-pass pairs.
-    for bugs_per_pass in debug_info_bugs:
-      try:
-        bugs_file = bugs_per_pass["file"]
-        bugs_pass = bugs_per_pass["pass"]
-        bugs = bugs_per_pass["bugs"][0]
-      except:
-        skipped_lines += 1
-        continue
-
-      di_loc_bugs = []
-      di_sp_bugs = []
-      di_var_bugs = []
-
-      # Omit duplicated bugs.
-      di_loc_set = set()
-      di_sp_set = set()
-      di_var_set = set()
-      for bug in bugs:
-        try:
-          bugs_metadata = bug["metadata"]
-        except:
-          skipped_bugs += 1
-          continue
-
-        if bugs_metadata == "DILocation":
-          try:
-            action = bug["action"]
-            bb_name = bug["bb-name"]
-            fn_name = bug["fn-name"]
-            instr = bug["instr"]
-          except:
-            skipped_bugs += 1
-            continue
-          di_loc_bug = DILocBug(action, bb_name, fn_name, instr)
-          if not str(di_loc_bug) in di_loc_set:
-            di_loc_set.add(str(di_loc_bug))
-            if opts.compress:
-              pass_instr = bugs_pass + instr
-              if not pass_instr in di_loc_pass_instr_set:
-                di_loc_pass_instr_set.add(pass_instr)
-                di_loc_bugs.append(di_loc_bug)
-            else:
-              di_loc_bugs.append(di_loc_bug)
-
-          # Fill the summary dict.
-          if bugs_pass in di_location_bugs_summary:
-            di_location_bugs_summary[bugs_pass] += 1
-          else:
-            di_location_bugs_summary[bugs_pass] = 1
-        elif bugs_metadata == "DISubprogram":
-          try:
-            action = bug["action"]
-            name = bug["name"]
-          except:
-            skipped_bugs += 1
-            continue
-          di_sp_bug = DISPBug(action, name)
-          if not str(di_sp_bug) in di_sp_set:
-            di_sp_set.add(str(di_sp_bug))
-            if opts.compress:
-              pass_fn = bugs_pass + name
-              if not pass_fn in di_sp_pass_fn_set:
-                di_sp_pass_fn_set.add(pass_fn)
-                di_sp_bugs.append(di_sp_bug)
-            else:
-              di_sp_bugs.append(di_sp_bug)
-
-          # Fill the summary dict.
-          if bugs_pass in di_sp_bugs_summary:
-            di_sp_bugs_summary[bugs_pass] += 1
-          else:
-            di_sp_bugs_summary[bugs_pass] = 1
-        elif bugs_metadata == "dbg-var-intrinsic":
-          try:
-            action = bug["action"]
-            fn_name = bug["fn-name"]
-            name = bug["name"]
-          except:
-            skipped_bugs += 1
-            continue
-          di_var_bug = DIVarBug(action, name, fn_name)
-          if not str(di_var_bug) in di_var_set:
-            di_var_set.add(str(di_var_bug))
-            if opts.compress:
-              pass_var = bugs_pass + name
-              if not pass_var in di_var_pass_var_set:
-                di_var_pass_var_set.add(pass_var)
-                di_var_bugs.append(di_var_bug)
-            else:
-              di_var_bugs.append(di_var_bug)
-
-          # Fill the summary dict.
-          if bugs_pass in di_var_bugs_summary:
-            di_var_bugs_summary[bugs_pass] += 1
-          else:
-            di_var_bugs_summary[bugs_pass] = 1
-        else:
-          # Unsupported metadata.
-          skipped_bugs += 1
-          continue
-
-      di_location_bugs[bugs_file][bugs_pass] = di_loc_bugs
-      di_subprogram_bugs[bugs_file][bugs_pass] = di_sp_bugs
-      di_variable_bugs[bugs_file][bugs_pass] = di_var_bugs
-
-  generate_html_report(di_location_bugs, di_subprogram_bugs, di_variable_bugs, \
-                       di_location_bugs_summary, di_sp_bugs_summary, \
-                       di_var_bugs_summary, opts.html_file)
-
-  if skipped_lines > 0:
-    print ("Skipped lines: " + str(skipped_lines))
-  if skipped_bugs > 0:
-    print ("Skipped bugs: " + str(skipped_bugs))
+    parser = argparse.ArgumentParser()
+    opts = parse_program_args(parser)
+
+    if not opts.html_file.endswith(".html"):
+        print("error: The output file must be '.html'.")
+        sys.exit(1)
+
+    # Use the defaultdict in order to make multidim dicts.
+    di_location_bugs = defaultdict(lambda: defaultdict(dict))
+    di_subprogram_bugs = defaultdict(lambda: defaultdict(dict))
+    di_variable_bugs = defaultdict(lambda: defaultdict(dict))
+
+    # Use the ordered dict to make a summary.
+    di_location_bugs_summary = OrderedDict()
+    di_sp_bugs_summary = OrderedDict()
+    di_var_bugs_summary = OrderedDict()
+
+    # Compress similar bugs.
+    # DILocBugs with same pass & instruction name.
+    di_loc_pass_instr_set = set()
+    # DISPBugs with same pass & function name.
+    di_sp_pass_fn_set = set()
+    # DIVarBugs with same pass & variable name.
+    di_var_pass_var_set = set()
+
+    start_line = 0
+    chunk_size = 1000000
+    end_line = chunk_size - 1
+    skipped_lines = 0
+    skipped_bugs = 0
+    # Process each chunk of 1 million JSON lines.
+    while True:
+        if start_line > end_line:
+            break
+        (debug_info_bugs, skipped, end_line) = get_json_chunk(
+            opts.file_name, start_line, chunk_size
+        )
+        start_line += chunk_size
+        skipped_lines += skipped
+
+        # Map the bugs into the file-pass pairs.
+        for bugs_per_pass in debug_info_bugs:
+            try:
+                bugs_file = bugs_per_pass["file"]
+                bugs_pass = bugs_per_pass["pass"]
+                bugs = bugs_per_pass["bugs"][0]
+            except:
+                skipped_lines += 1
+                continue
+
+            di_loc_bugs = []
+            di_sp_bugs = []
+            di_var_bugs = []
+
+            # Omit duplicated bugs.
+            di_loc_set = set()
+            di_sp_set = set()
+            di_var_set = set()
+            for bug in bugs:
+                try:
+                    bugs_metadata = bug["metadata"]
+                except:
+                    skipped_bugs += 1
+                    continue
+
+                if bugs_metadata == "DILocation":
+                    try:
+                        action = bug["action"]
+                        bb_name = bug["bb-name"]
+                        fn_name = bug["fn-name"]
+                        instr = bug["instr"]
+                    except:
+                        skipped_bugs += 1
+                        continue
+                    di_loc_bug = DILocBug(action, bb_name, fn_name, instr)
+                    if not str(di_loc_bug) in di_loc_set:
+                        di_loc_set.add(str(di_loc_bug))
+                        if opts.compress:
+                            pass_instr = bugs_pass + instr
+                            if not pass_instr in di_loc_pass_instr_set:
+                                di_loc_pass_instr_set.add(pass_instr)
+                                di_loc_bugs.append(di_loc_bug)
+                        else:
+                            di_loc_bugs.append(di_loc_bug)
+
+                    # Fill the summary dict.
+                    if bugs_pass in di_location_bugs_summary:
+                        di_location_bugs_summary[bugs_pass] += 1
+                    else:
+                        di_location_bugs_summary[bugs_pass] = 1
+                elif bugs_metadata == "DISubprogram":
+                    try:
+                        action = bug["action"]
+                        name = bug["name"]
+                    except:
+                        skipped_bugs += 1
+                        continue
+                    di_sp_bug = DISPBug(action, name)
+                    if not str(di_sp_bug) in di_sp_set:
+                        di_sp_set.add(str(di_sp_bug))
+                        if opts.compress:
+                            pass_fn = bugs_pass + name
+                            if not pass_fn in di_sp_pass_fn_set:
+                                di_sp_pass_fn_set.add(pass_fn)
+                                di_sp_bugs.append(di_sp_bug)
+                        else:
+                            di_sp_bugs.append(di_sp_bug)
+
+                    # Fill the summary dict.
+                    if bugs_pass in di_sp_bugs_summary:
+                        di_sp_bugs_summary[bugs_pass] += 1
+                    else:
+                        di_sp_bugs_summary[bugs_pass] = 1
+                elif bugs_metadata == "dbg-var-intrinsic":
+                    try:
+                        action = bug["action"]
+                        fn_name = bug["fn-name"]
+                        name = bug["name"]
+                    except:
+                        skipped_bugs += 1
+                        continue
+                    di_var_bug = DIVarBug(action, name, fn_name)
+                    if not str(di_var_bug) in di_var_set:
+                        di_var_set.add(str(di_var_bug))
+                        if opts.compress:
+                            pass_var = bugs_pass + name
+                            if not pass_var in di_var_pass_var_set:
+                                di_var_pass_var_set.add(pass_var)
+                                di_var_bugs.append(di_var_bug)
+                        else:
+                            di_var_bugs.append(di_var_bug)
+
+                    # Fill the summary dict.
+                    if bugs_pass in di_var_bugs_summary:
+                        di_var_bugs_summary[bugs_pass] += 1
+                    else:
+                        di_var_bugs_summary[bugs_pass] = 1
+                else:
+                    # Unsupported metadata.
+                    skipped_bugs += 1
+                    continue
+
+            di_location_bugs[bugs_file][bugs_pass] = di_loc_bugs
+            di_subprogram_bugs[bugs_file][bugs_pass] = di_sp_bugs
+            di_variable_bugs[bugs_file][bugs_pass] = di_var_bugs
+
+    generate_html_report(
+        di_location_bugs,
+        di_subprogram_bugs,
+        di_variable_bugs,
+        di_location_bugs_summary,
+        di_sp_bugs_summary,
+        di_var_bugs_summary,
+        opts.html_file,
+    )
+
+    if skipped_lines > 0:
+        print("Skipped lines: " + str(skipped_lines))
+    if skipped_bugs > 0:
+        print("Skipped bugs: " + str(skipped_bugs))
+
 
 if __name__ == "__main__":
-  Main()
-  sys.exit(0)
+    Main()
+    sys.exit(0)

diff  --git a/llvm/utils/merge-stats.py b/llvm/utils/merge-stats.py
index 342be3c9002e7..e4de4d395292e 100755
--- a/llvm/utils/merge-stats.py
+++ b/llvm/utils/merge-stats.py
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-'''
+"""
 Merge .stats files generated by llvm tools
 
 merge-stats.py takes as argument a list of stats files to merge
@@ -7,7 +7,7 @@
 
 Usage:
   merge-stats.py $(find ./builddir/ -name "*.stats") > total.stats
-'''
+"""
 
 import json
 import sys
@@ -15,19 +15,18 @@
 result = {}
 
 for arg in range(1, len(sys.argv)):
-  with open(sys.argv[arg], "r", encoding='utf-8',
-            errors='ignore') as f:
-    text = f.read()
-    try:
-      data = json.loads(text)
-    except:
-      print('ignored %s: failed to parse' % sys.argv[arg], file= sys.stderr)
-      continue
-    for key in data:
-      if key in result:
-        result[key] += data[key]
-      else:
-        result[key] = data[key]
+    with open(sys.argv[arg], "r", encoding="utf-8", errors="ignore") as f:
+        text = f.read()
+        try:
+            data = json.loads(text)
+        except:
+            print("ignored %s: failed to parse" % sys.argv[arg], file=sys.stderr)
+            continue
+        for key in data:
+            if key in result:
+                result[key] += data[key]
+            else:
+                result[key] = data[key]
 
 out = json.dumps(result, indent=2)
 print(out)

diff  --git a/llvm/utils/pipeline.py b/llvm/utils/pipeline.py
index 7e49f711d3e72..4d49e8928539b 100644
--- a/llvm/utils/pipeline.py
+++ b/llvm/utils/pipeline.py
@@ -6,46 +6,46 @@ def fromStr(pipeStr):
     """Create pipeline object from string representation."""
     stack = []
     curr = []
-    tok = ''
-    kind = ''
+    tok = ""
+    kind = ""
     for c in pipeStr:
-        if c == ',':
-            if tok != '':
+        if c == ",":
+            if tok != "":
                 curr.append([None, tok])
-            tok = ''
-        elif c == '(':
+            tok = ""
+        elif c == "(":
             stack.append([kind, curr])
             kind = tok
             curr = []
-            tok = ''
-        elif c == ')':
-            if tok != '':
+            tok = ""
+        elif c == ")":
+            if tok != "":
                 curr.append([None, tok])
-            tok = ''
+            tok = ""
             oldKind = kind
             oldCurr = curr
             [kind, curr] = stack.pop()
             curr.append([oldKind, oldCurr])
         else:
             tok += c
-    if tok != '':
+    if tok != "":
         curr.append([None, tok])
     return curr
 
 
 def toStr(pipeObj):
     """Create string representation of pipeline object."""
-    res = ''
+    res = ""
     lastIdx = len(pipeObj) - 1
     for i, c in enumerate(pipeObj):
         if c[0]:
-            res += c[0] + '('
+            res += c[0] + "("
             res += toStr(c[1])
-            res += ')'
+            res += ")"
         else:
             res += c[1]
         if i != lastIdx:
-            res += ','
+            res += ","
     return res
 
 
@@ -62,6 +62,7 @@ def count(pipeObj):
 
 def split(pipeObj, splitIndex):
     """Create two new pipeline objects by splitting pipeObj in two directly after pass with index splitIndex."""
+
     def splitInt(src, splitIndex, dstA, dstB, idx):
         for s in src:
             if s[0]:
@@ -86,6 +87,7 @@ def splitInt(src, splitIndex, dstA, dstB, idx):
 
 def remove(pipeObj, removeIndex):
     """Create new pipeline object by removing pass with index removeIndex from pipeObj."""
+
     def removeInt(src, removeIndex, dst, idx):
         for s in src:
             if s[0]:
@@ -105,6 +107,7 @@ def removeInt(src, removeIndex, dst, idx):
 
 def copy(srcPipeObj):
     """Create copy of pipeline object srcPipeObj."""
+
     def copyInt(dst, src):
         for s in src:
             if s[0]:
@@ -121,6 +124,7 @@ def copyInt(dst, src):
 
 def prune(srcPipeObj):
     """Create new pipeline object by removing empty pass-managers (those with count = 0) from srcPipeObj."""
+
     def pruneInt(dst, src):
         for s in src:
             if s[0]:
@@ -141,7 +145,7 @@ def pruneInt(dst, src):
 
     class Test(unittest.TestCase):
         def test_0(self):
-            pipeStr = 'a,b,A(c,B(d,e),f),g'
+            pipeStr = "a,b,A(c,B(d,e),f),g"
             pipeObj = fromStr(pipeStr)
 
             self.assertEqual(7, count(pipeObj))
@@ -155,18 +159,18 @@ def test_0(self):
             self.assertEqual(pipeStr, toStr(copy(pipeObj)))
 
             [pipeObjA, pipeObjB] = split(pipeObj, 3)
-            self.assertEqual('a,b,A(c,B(d))', toStr(pipeObjA))
-            self.assertEqual('A(B(e),f),g', toStr(pipeObjB))
+            self.assertEqual("a,b,A(c,B(d))", toStr(pipeObjA))
+            self.assertEqual("A(B(e),f),g", toStr(pipeObjB))
 
-            self.assertEqual('b,A(c,B(d,e),f),g', toStr(remove(pipeObj, 0)))
-            self.assertEqual('a,b,A(c,B(d,e),f)', toStr(remove(pipeObj, 6)))
+            self.assertEqual("b,A(c,B(d,e),f),g", toStr(remove(pipeObj, 0)))
+            self.assertEqual("a,b,A(c,B(d,e),f)", toStr(remove(pipeObj, 6)))
 
             pipeObjC = remove(pipeObj, 4)
-            self.assertEqual('a,b,A(c,B(d),f),g', toStr(pipeObjC))
+            self.assertEqual("a,b,A(c,B(d),f),g", toStr(pipeObjC))
             pipeObjC = remove(pipeObjC, 3)
-            self.assertEqual('a,b,A(c,B(),f),g', toStr(pipeObjC))
+            self.assertEqual("a,b,A(c,B(),f),g", toStr(pipeObjC))
             pipeObjC = prune(pipeObjC)
-            self.assertEqual('a,b,A(c,f),g', toStr(pipeObjC))
+            self.assertEqual("a,b,A(c,f),g", toStr(pipeObjC))
 
     unittest.main()
     exit(0)

diff  --git a/llvm/utils/prepare-code-coverage-artifact.py b/llvm/utils/prepare-code-coverage-artifact.py
index 6d9674ebe5dcb..9a51729186970 100755
--- a/llvm/utils/prepare-code-coverage-artifact.py
+++ b/llvm/utils/prepare-code-coverage-artifact.py
@@ -2,14 +2,14 @@
 
 from __future__ import print_function
 
-'''Prepare a code coverage artifact.
+"""Prepare a code coverage artifact.
 
 - Collate raw profiles into one indexed profile.
 - Generate html reports for the given binaries.
 
 Caution: The positional arguments to this script must be specified before any 
 optional arguments, such as --restrict.
-'''
+"""
 
 import argparse
 import glob
@@ -17,101 +17,181 @@
 import subprocess
 import sys
 
+
 def merge_raw_profiles(host_llvm_profdata, profile_data_dir, preserve_profiles):
-    print(':: Merging raw profiles...', end='')
+    print(":: Merging raw profiles...", end="")
     sys.stdout.flush()
-    raw_profiles = glob.glob(os.path.join(profile_data_dir, '*.profraw'))
-    manifest_path = os.path.join(profile_data_dir, 'profiles.manifest')
-    profdata_path = os.path.join(profile_data_dir, 'Coverage.profdata')
-    with open(manifest_path, 'w') as manifest:
-        manifest.write('\n'.join(raw_profiles))
-    subprocess.check_call([host_llvm_profdata, 'merge', '-sparse', '-f',
-                           manifest_path, '-o', profdata_path])
+    raw_profiles = glob.glob(os.path.join(profile_data_dir, "*.profraw"))
+    manifest_path = os.path.join(profile_data_dir, "profiles.manifest")
+    profdata_path = os.path.join(profile_data_dir, "Coverage.profdata")
+    with open(manifest_path, "w") as manifest:
+        manifest.write("\n".join(raw_profiles))
+    subprocess.check_call(
+        [
+            host_llvm_profdata,
+            "merge",
+            "-sparse",
+            "-f",
+            manifest_path,
+            "-o",
+            profdata_path,
+        ]
+    )
     if not preserve_profiles:
         for raw_profile in raw_profiles:
             os.remove(raw_profile)
     os.remove(manifest_path)
-    print('Done!')
+    print("Done!")
     return profdata_path
 
-def prepare_html_report(host_llvm_cov, profile, report_dir, binaries,
-                        restricted_dirs, compilation_dir):
-    print(':: Preparing html report for {0}...'.format(binaries), end='')
+
+def prepare_html_report(
+    host_llvm_cov, profile, report_dir, binaries, restricted_dirs, compilation_dir
+):
+    print(":: Preparing html report for {0}...".format(binaries), end="")
     sys.stdout.flush()
     objects = []
     for i, binary in enumerate(binaries):
         if i == 0:
             objects.append(binary)
         else:
-            objects.extend(('-object', binary))
-    invocation = [host_llvm_cov, 'show'] + objects + ['-format', 'html',
-                  '-instr-profile', profile, '-o', report_dir,
-                  '-show-line-counts-or-regions', '-Xdemangler', 'c++filt',
-                  '-Xdemangler', '-n'] + restricted_dirs
+            objects.extend(("-object", binary))
+    invocation = (
+        [host_llvm_cov, "show"]
+        + objects
+        + [
+            "-format",
+            "html",
+            "-instr-profile",
+            profile,
+            "-o",
+            report_dir,
+            "-show-line-counts-or-regions",
+            "-Xdemangler",
+            "c++filt",
+            "-Xdemangler",
+            "-n",
+        ]
+        + restricted_dirs
+    )
     if compilation_dir:
-        invocation += ['-compilation-dir=' + compilation_dir]
+        invocation += ["-compilation-dir=" + compilation_dir]
     subprocess.check_call(invocation)
-    with open(os.path.join(report_dir, 'summary.txt'), 'wb') as Summary:
-        subprocess.check_call([host_llvm_cov, 'report'] + objects +
-                               ['-instr-profile', profile] + restricted_dirs,
-                               stdout=Summary)
-    print('Done!')
-
-def prepare_html_reports(host_llvm_cov, profdata_path, report_dir, binaries,
-                         unified_report, restricted_dirs, compilation_dir):
+    with open(os.path.join(report_dir, "summary.txt"), "wb") as Summary:
+        subprocess.check_call(
+            [host_llvm_cov, "report"]
+            + objects
+            + ["-instr-profile", profile]
+            + restricted_dirs,
+            stdout=Summary,
+        )
+    print("Done!")
+
+
+def prepare_html_reports(
+    host_llvm_cov,
+    profdata_path,
+    report_dir,
+    binaries,
+    unified_report,
+    restricted_dirs,
+    compilation_dir,
+):
     if unified_report:
-        prepare_html_report(host_llvm_cov, profdata_path, report_dir, binaries,
-                            restricted_dirs, compilation_dir)
+        prepare_html_report(
+            host_llvm_cov,
+            profdata_path,
+            report_dir,
+            binaries,
+            restricted_dirs,
+            compilation_dir,
+        )
     else:
         for binary in binaries:
-            binary_report_dir = os.path.join(report_dir,
-                                             os.path.basename(binary))
-            prepare_html_report(host_llvm_cov, profdata_path, binary_report_dir,
-                                [binary], restricted_dirs, compilation_dir)
+            binary_report_dir = os.path.join(report_dir, os.path.basename(binary))
+            prepare_html_report(
+                host_llvm_cov,
+                profdata_path,
+                binary_report_dir,
+                [binary],
+                restricted_dirs,
+                compilation_dir,
+            )
+
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     parser = argparse.ArgumentParser(description=__doc__)
-    parser.add_argument('host_llvm_profdata', help='Path to llvm-profdata')
-    parser.add_argument('host_llvm_cov', help='Path to llvm-cov')
-    parser.add_argument('profile_data_dir',
-                       help='Path to the directory containing the raw profiles')
-    parser.add_argument('report_dir',
-                       help='Path to the output directory for html reports')
-    parser.add_argument('binaries', metavar='B', type=str, nargs='*',
-                       help='Path to an instrumented binary')
-    parser.add_argument('--only-merge', action='store_true',
-                        help='Only merge raw profiles together, skip report '
-                             'generation')
-    parser.add_argument('--preserve-profiles',
-                       help='Do not delete raw profiles', action='store_true')
-    parser.add_argument('--use-existing-profdata',
-                       help='Specify an existing indexed profile to use')
-    parser.add_argument('--unified-report', action='store_true',
-                       help='Emit a unified report for all binaries')
-    parser.add_argument('--restrict', metavar='R', type=str, nargs='*',
-                       default=[],
-                       help='Restrict the reporting to the given source paths'
-                   ' (must be specified after all other positional arguments)')
-    parser.add_argument('-C', '--compilation-dir', type=str, default="",
-                       help='The compilation directory of the binary')
+    parser.add_argument("host_llvm_profdata", help="Path to llvm-profdata")
+    parser.add_argument("host_llvm_cov", help="Path to llvm-cov")
+    parser.add_argument(
+        "profile_data_dir", help="Path to the directory containing the raw profiles"
+    )
+    parser.add_argument(
+        "report_dir", help="Path to the output directory for html reports"
+    )
+    parser.add_argument(
+        "binaries",
+        metavar="B",
+        type=str,
+        nargs="*",
+        help="Path to an instrumented binary",
+    )
+    parser.add_argument(
+        "--only-merge",
+        action="store_true",
+        help="Only merge raw profiles together, skip report " "generation",
+    )
+    parser.add_argument(
+        "--preserve-profiles", help="Do not delete raw profiles", action="store_true"
+    )
+    parser.add_argument(
+        "--use-existing-profdata", help="Specify an existing indexed profile to use"
+    )
+    parser.add_argument(
+        "--unified-report",
+        action="store_true",
+        help="Emit a unified report for all binaries",
+    )
+    parser.add_argument(
+        "--restrict",
+        metavar="R",
+        type=str,
+        nargs="*",
+        default=[],
+        help="Restrict the reporting to the given source paths"
+        " (must be specified after all other positional arguments)",
+    )
+    parser.add_argument(
+        "-C",
+        "--compilation-dir",
+        type=str,
+        default="",
+        help="The compilation directory of the binary",
+    )
     args = parser.parse_args()
 
     if args.use_existing_profdata and args.only_merge:
-        print('--use-existing-profdata and --only-merge are incompatible')
+        print("--use-existing-profdata and --only-merge are incompatible")
         exit(1)
 
     if args.use_existing_profdata:
         profdata_path = args.use_existing_profdata
     else:
-        profdata_path = merge_raw_profiles(args.host_llvm_profdata,
-                                           args.profile_data_dir,
-                                           args.preserve_profiles)
+        profdata_path = merge_raw_profiles(
+            args.host_llvm_profdata, args.profile_data_dir, args.preserve_profiles
+        )
 
     if not len(args.binaries):
-        print('No binaries specified, no work to do!')
+        print("No binaries specified, no work to do!")
         exit(1)
 
     if not args.only_merge:
-        prepare_html_reports(args.host_llvm_cov, profdata_path, args.report_dir,
-                             args.binaries, args.unified_report, args.restrict, 
-                             args.compilation_dir)
+        prepare_html_reports(
+            args.host_llvm_cov,
+            profdata_path,
+            args.report_dir,
+            args.binaries,
+            args.unified_report,
+            args.restrict,
+            args.compilation_dir,
+        )

diff  --git a/llvm/utils/reduce_pipeline.py b/llvm/utils/reduce_pipeline.py
index baf6b2f6930d2..515fdd22b1617 100755
--- a/llvm/utils/reduce_pipeline.py
+++ b/llvm/utils/reduce_pipeline.py
@@ -31,30 +31,27 @@
 import tempfile
 
 parser = argparse.ArgumentParser(
-    description=
-    'Automatic opt pipeline reducer. Unrecognized arguments are forwarded to opt.'
+    description="Automatic opt pipeline reducer. Unrecognized arguments are forwarded to opt."
 )
-parser.add_argument('--opt-binary',
-                    action='store',
-                    dest='opt_binary',
-                    default='opt')
-parser.add_argument('--passes', action='store', dest='passes', required=True)
-parser.add_argument('--input', action='store', dest='input', required=True)
-parser.add_argument('--output', action='store', dest='output')
-parser.add_argument('--dont-expand-passes',
-                    action='store_true',
-                    dest='dont_expand_passes',
-                    help='Do not expand pipeline before starting reduction.')
+parser.add_argument("--opt-binary", action="store", dest="opt_binary", default="opt")
+parser.add_argument("--passes", action="store", dest="passes", required=True)
+parser.add_argument("--input", action="store", dest="input", required=True)
+parser.add_argument("--output", action="store", dest="output")
 parser.add_argument(
-    '--dont-remove-empty-pm',
-    action='store_true',
-    dest='dont_remove_empty_pm',
-    help='Do not remove empty pass-managers from the pipeline during reduction.'
+    "--dont-expand-passes",
+    action="store_true",
+    dest="dont_expand_passes",
+    help="Do not expand pipeline before starting reduction.",
+)
+parser.add_argument(
+    "--dont-remove-empty-pm",
+    action="store_true",
+    dest="dont_remove_empty_pm",
+    help="Do not remove empty pass-managers from the pipeline during reduction.",
 )
 [args, extra_opt_args] = parser.parse_known_args()
 
-print('The following extra args will be passed to opt: {}'.format(
-    extra_opt_args))
+print("The following extra args will be passed to opt: {}".format(extra_opt_args))
 
 lst = pipeline.fromStr(args.passes)
 ll_input = args.input
@@ -65,38 +62,42 @@
 # '-passes=default<O3>').
 if not args.dont_expand_passes:
     run_args = [
-        args.opt_binary, '-disable-symbolication', '-disable-output',
-        '-print-pipeline-passes', '-passes={}'.format(pipeline.toStr(lst)),
-        ll_input
+        args.opt_binary,
+        "-disable-symbolication",
+        "-disable-output",
+        "-print-pipeline-passes",
+        "-passes={}".format(pipeline.toStr(lst)),
+        ll_input,
     ]
     run_args.extend(extra_opt_args)
-    opt = subprocess.run(run_args,
-                         stdout=subprocess.PIPE,
-                         stderr=subprocess.PIPE)
+    opt = subprocess.run(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     if opt.returncode != 0:
-        print('Failed to expand passes. Aborting.')
+        print("Failed to expand passes. Aborting.")
         print(run_args)
-        print('exitcode: {}'.format(opt.returncode))
+        print("exitcode: {}".format(opt.returncode))
         print(opt.stderr.decode())
         exit(1)
     stdout = opt.stdout.decode()
-    stdout = stdout[:stdout.rfind('\n')]
+    stdout = stdout[: stdout.rfind("\n")]
     lst = pipeline.fromStr(stdout)
-    print('Expanded pass sequence: {}'.format(pipeline.toStr(lst)))
+    print("Expanded pass sequence: {}".format(pipeline.toStr(lst)))
 
 # Step #0
 # Confirm that the given input, passes and options result in failure.
-print('---Starting step #0---')
+print("---Starting step #0---")
 run_args = [
-    args.opt_binary, '-disable-symbolication', '-disable-output',
-    '-passes={}'.format(pipeline.toStr(lst)), ll_input
+    args.opt_binary,
+    "-disable-symbolication",
+    "-disable-output",
+    "-passes={}".format(pipeline.toStr(lst)),
+    ll_input,
 ]
 run_args.extend(extra_opt_args)
 opt = subprocess.run(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
 if opt.returncode >= 0:
-    print('Input does not result in failure as expected. Aborting.')
+    print("Input does not result in failure as expected. Aborting.")
     print(run_args)
-    print('exitcode: {}'.format(opt.returncode))
+    print("exitcode: {}".format(opt.returncode))
     print(opt.stderr.decode())
     exit(1)
 
@@ -111,7 +112,7 @@
 # the expected error. This will accomplish two things first the input IR will be
 # further reduced and second, with that IR, the reduced pipeline for invocation
 # B will be sufficient to reproduce.
-print('---Starting step #1---')
+print("---Starting step #1---")
 prevLstB = None
 prevIntermediate = None
 tmpd = tempfile.TemporaryDirectory()
@@ -122,26 +123,29 @@
         lstA = pipeline.prune(lstA)
         lstB = pipeline.prune(lstB)
 
-    intermediate = 'intermediate-0.ll' if idx % 2 else 'intermediate-1.ll'
-    intermediate = tmpd.name + '/' + intermediate
+    intermediate = "intermediate-0.ll" if idx % 2 else "intermediate-1.ll"
+    intermediate = tmpd.name + "/" + intermediate
     run_args = [
-        args.opt_binary, '-disable-symbolication', '-S', '-o', intermediate,
-        '-passes={}'.format(pipeline.toStr(lstA)), ll_input
+        args.opt_binary,
+        "-disable-symbolication",
+        "-S",
+        "-o",
+        intermediate,
+        "-passes={}".format(pipeline.toStr(lstA)),
+        ll_input,
     ]
     run_args.extend(extra_opt_args)
-    optA = subprocess.run(run_args,
-                          stdout=subprocess.PIPE,
-                          stderr=subprocess.PIPE)
+    optA = subprocess.run(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     run_args = [
-        args.opt_binary, '-disable-symbolication', '-disable-output',
-        '-passes={}'.format(pipeline.toStr(lstB)), intermediate
+        args.opt_binary,
+        "-disable-symbolication",
+        "-disable-output",
+        "-passes={}".format(pipeline.toStr(lstB)),
+        intermediate,
     ]
     run_args.extend(extra_opt_args)
-    optB = subprocess.run(run_args,
-                          stdout=subprocess.PIPE,
-                          stderr=subprocess.PIPE)
-    if not (optA.returncode == 0
-            and optB.returncode == expected_error_returncode):
+    optB = subprocess.run(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    if not (optA.returncode == 0 and optB.returncode == expected_error_returncode):
         break
     prevLstB = lstB
     prevIntermediate = intermediate
@@ -153,20 +157,21 @@
 # Step #2
 # Try removing passes from the end of the remaining pipeline while still
 # reproducing the error.
-print('---Starting step #2---')
+print("---Starting step #2---")
 prevLstA = None
 for idx in reversed(range(pipeline.count(lst))):
     [lstA, lstB] = pipeline.split(lst, idx)
     if not args.dont_remove_empty_pm:
         lstA = pipeline.prune(lstA)
     run_args = [
-        args.opt_binary, '-disable-symbolication', '-disable-output',
-        '-passes={}'.format(pipeline.toStr(lstA)), ll_input
+        args.opt_binary,
+        "-disable-symbolication",
+        "-disable-output",
+        "-passes={}".format(pipeline.toStr(lstA)),
+        ll_input,
     ]
     run_args.extend(extra_opt_args)
-    optA = subprocess.run(run_args,
-                          stdout=subprocess.PIPE,
-                          stderr=subprocess.PIPE)
+    optA = subprocess.run(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     if optA.returncode != expected_error_returncode:
         break
     prevLstA = lstA
@@ -178,7 +183,7 @@
 # Now that we have a pipeline that is reduced both front and back we do
 # exhaustive sweeps over the remainder trying to remove one pass at a time.
 # Repeat as long as reduction is possible.
-print('---Starting step #3---')
+print("---Starting step #3---")
 while True:
     keepGoing = False
     for idx in range(pipeline.count(lst)):
@@ -186,13 +191,14 @@
         if not args.dont_remove_empty_pm:
             candLst = pipeline.prune(candLst)
         run_args = [
-            args.opt_binary, '-disable-symbolication', '-disable-output',
-            '-passes={}'.format(pipeline.toStr(candLst)), ll_input
+            args.opt_binary,
+            "-disable-symbolication",
+            "-disable-output",
+            "-passes={}".format(pipeline.toStr(candLst)),
+            ll_input,
         ]
         run_args.extend(extra_opt_args)
-        opt = subprocess.run(run_args,
-                             stdout=subprocess.PIPE,
-                             stderr=subprocess.PIPE)
+        opt = subprocess.run(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
         if opt.returncode == expected_error_returncode:
             lst = candLst
             keepGoing = True
@@ -200,9 +206,9 @@
         break
 print('-passes="{}"'.format(pipeline.toStr(lst)))
 
-print('---FINISHED---')
+print("---FINISHED---")
 if args.output:
     shutil.copy(ll_input, args.output)
-    print('Wrote output to \'{}\'.'.format(args.output))
+    print("Wrote output to '{}'.".format(args.output))
 print('-passes="{}"'.format(pipeline.toStr(lst)))
 exit(0)

diff  --git a/llvm/utils/reduce_pipeline_test/fake_opt.py b/llvm/utils/reduce_pipeline_test/fake_opt.py
index 8027d04e1a162..2d3b9592b7e5c 100755
--- a/llvm/utils/reduce_pipeline_test/fake_opt.py
+++ b/llvm/utils/reduce_pipeline_test/fake_opt.py
@@ -11,53 +11,50 @@
 import signal
 
 parser = argparse.ArgumentParser()
-parser.add_argument('-passes', action='store', dest='passes', required=True)
-parser.add_argument('-print-pipeline-passes',
-                    dest='print_pipeline_passes',
-                    action='store_true')
-parser.add_argument('-crash-seq',
-                    action='store',
-                    dest='crash_seq',
-                    required=True)
-parser.add_argument('-o', action='store', dest='output')
-parser.add_argument('input')
+parser.add_argument("-passes", action="store", dest="passes", required=True)
+parser.add_argument(
+    "-print-pipeline-passes", dest="print_pipeline_passes", action="store_true"
+)
+parser.add_argument("-crash-seq", action="store", dest="crash_seq", required=True)
+parser.add_argument("-o", action="store", dest="output")
+parser.add_argument("input")
 [args, unknown_args] = parser.parse_known_args()
 
 # Expand pipeline if '-print-pipeline-passes'.
 if args.print_pipeline_passes:
-    if args.passes == 'EXPAND_a_to_f':
-        print('a,b,c,d,e,f')
+    if args.passes == "EXPAND_a_to_f":
+        print("a,b,c,d,e,f")
     else:
         print(args.passes)
     exit(0)
 
 # Parse '-crash-seq'.
 crash_seq = []
-tok = ''
+tok = ""
 for c in args.crash_seq:
-    if c == ',':
-        if tok != '':
+    if c == ",":
+        if tok != "":
             crash_seq.append(tok)
-        tok = ''
+        tok = ""
     else:
         tok += c
-if tok != '':
+if tok != "":
     crash_seq.append(tok)
 print(crash_seq)
 
 # Parse '-passes' and see if we need to crash.
-tok = ''
+tok = ""
 for c in args.passes:
-    if c == ',':
+    if c == ",":
         if len(crash_seq) > 0 and crash_seq[0] == tok:
             crash_seq.pop(0)
-        tok = ''
-    elif c == '(':
-        tok = ''
-    elif c == ')':
+        tok = ""
+    elif c == "(":
+        tok = ""
+    elif c == ")":
         if len(crash_seq) > 0 and crash_seq[0] == tok:
             crash_seq.pop(0)
-        tok = ''
+        tok = ""
     else:
         tok += c
 if len(crash_seq) > 0 and crash_seq[0] == tok:
@@ -69,8 +66,8 @@
 
 # Crash if all 'crash_seq' passes occurred in right order.
 if len(crash_seq) == 0:
-    print('crash')
+    print("crash")
     os.kill(os.getpid(), signal.SIGKILL)
 else:
-    print('no crash')
+    print("no crash")
     exit(0)

diff  --git a/llvm/utils/reduce_pipeline_test/test.py b/llvm/utils/reduce_pipeline_test/test.py
index 170b2d4e8c6a3..0ff1ede9f8c2d 100755
--- a/llvm/utils/reduce_pipeline_test/test.py
+++ b/llvm/utils/reduce_pipeline_test/test.py
@@ -8,95 +8,92 @@
 
 def getFinalPasses(run):
     stdout = run.stdout.decode()
-    stdout = stdout[:stdout.rfind('\n')]
-    stdout = stdout[stdout.rfind('\n') + 1:]
+    stdout = stdout[: stdout.rfind("\n")]
+    stdout = stdout[stdout.rfind("\n") + 1 :]
     return stdout
 
 
 class Test(unittest.TestCase):
     def test_0(self):
         """Test all passes are removed except those required to crash. Verify
-           that PM structure is intact."""
+        that PM structure is intact."""
         run_args = [
-            './utils/reduce_pipeline.py',
-            '--opt-binary=./utils/reduce_pipeline_test/fake_opt.py',
-            '--input=/dev/null', '--passes=a,b,c,A(d,B(e,f),g),h,i',
-            '-crash-seq=b,d,f'
+            "./utils/reduce_pipeline.py",
+            "--opt-binary=./utils/reduce_pipeline_test/fake_opt.py",
+            "--input=/dev/null",
+            "--passes=a,b,c,A(d,B(e,f),g),h,i",
+            "-crash-seq=b,d,f",
         ]
-        run = subprocess.run(run_args,
-                             stdout=subprocess.PIPE,
-                             stderr=subprocess.PIPE)
+        run = subprocess.run(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
         self.assertEqual(run.returncode, 0)
         self.assertEqual(getFinalPasses(run), '-passes="b,A(d,B(f))"')
 
     def test_1(self):
         """Test all passes are removed except those required to crash. The
-           required passes in this case are the first and last in that order
-           (a bit of a corner-case for the reduction algorithm)."""
+        required passes in this case are the first and last in that order
+        (a bit of a corner-case for the reduction algorithm)."""
         run_args = [
-            './utils/reduce_pipeline.py',
-            '--opt-binary=./utils/reduce_pipeline_test/fake_opt.py',
-            '--input=/dev/null', '--passes=a,b,c,A(d,B(e,f),g),h,i',
-            '-crash-seq=a,i'
+            "./utils/reduce_pipeline.py",
+            "--opt-binary=./utils/reduce_pipeline_test/fake_opt.py",
+            "--input=/dev/null",
+            "--passes=a,b,c,A(d,B(e,f),g),h,i",
+            "-crash-seq=a,i",
         ]
-        run = subprocess.run(run_args,
-                             stdout=subprocess.PIPE,
-                             stderr=subprocess.PIPE)
+        run = subprocess.run(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
         self.assertEqual(run.returncode, 0)
         self.assertEqual(getFinalPasses(run), '-passes="a,i"')
 
     def test_2_0(self):
         """Test expansion of EXPAND_a_to_f (expands into 'a,b,c,d,e,f')."""
         run_args = [
-            './utils/reduce_pipeline.py',
-            '--opt-binary=./utils/reduce_pipeline_test/fake_opt.py',
-            '--input=/dev/null', '--passes=EXPAND_a_to_f', '-crash-seq=b,e'
+            "./utils/reduce_pipeline.py",
+            "--opt-binary=./utils/reduce_pipeline_test/fake_opt.py",
+            "--input=/dev/null",
+            "--passes=EXPAND_a_to_f",
+            "-crash-seq=b,e",
         ]
-        run = subprocess.run(run_args,
-                             stdout=subprocess.PIPE,
-                             stderr=subprocess.PIPE)
+        run = subprocess.run(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
         self.assertEqual(run.returncode, 0)
         self.assertEqual(getFinalPasses(run), '-passes="b,e"')
 
     def test_2_1(self):
         """Test EXPAND_a_to_f and the '--dont-expand-passes' option."""
         run_args = [
-            './utils/reduce_pipeline.py',
-            '--opt-binary=./utils/reduce_pipeline_test/fake_opt.py',
-            '--input=/dev/null', '--passes=EXPAND_a_to_f',
-            '-crash-seq=EXPAND_a_to_f', '--dont-expand-passes'
+            "./utils/reduce_pipeline.py",
+            "--opt-binary=./utils/reduce_pipeline_test/fake_opt.py",
+            "--input=/dev/null",
+            "--passes=EXPAND_a_to_f",
+            "-crash-seq=EXPAND_a_to_f",
+            "--dont-expand-passes",
         ]
-        run = subprocess.run(run_args,
-                             stdout=subprocess.PIPE,
-                             stderr=subprocess.PIPE)
+        run = subprocess.run(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
         self.assertEqual(run.returncode, 0)
         self.assertEqual(getFinalPasses(run), '-passes="EXPAND_a_to_f"')
 
     def test_3(self):
         """Test that empty pass-managers get removed by default."""
         run_args = [
-            './utils/reduce_pipeline.py',
-            '--opt-binary=./utils/reduce_pipeline_test/fake_opt.py',
-            '--input=/dev/null', '--passes=a,b,c,A(d,B(e,f),g),h,i',
-            '-crash-seq=b,d,h'
+            "./utils/reduce_pipeline.py",
+            "--opt-binary=./utils/reduce_pipeline_test/fake_opt.py",
+            "--input=/dev/null",
+            "--passes=a,b,c,A(d,B(e,f),g),h,i",
+            "-crash-seq=b,d,h",
         ]
-        run = subprocess.run(run_args,
-                             stdout=subprocess.PIPE,
-                             stderr=subprocess.PIPE)
+        run = subprocess.run(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
         self.assertEqual(run.returncode, 0)
         self.assertEqual(getFinalPasses(run), '-passes="b,A(d),h"')
 
     def test_4(self):
         """Test the '--dont-remove-empty-pm' option."""
         run_args = [
-            './utils/reduce_pipeline.py',
-            '--opt-binary=./utils/reduce_pipeline_test/fake_opt.py',
-            '--input=/dev/null', '--passes=a,b,c,A(d,B(e,f),g),h,i',
-            '-crash-seq=b,d,h', '--dont-remove-empty-pm'
+            "./utils/reduce_pipeline.py",
+            "--opt-binary=./utils/reduce_pipeline_test/fake_opt.py",
+            "--input=/dev/null",
+            "--passes=a,b,c,A(d,B(e,f),g),h,i",
+            "-crash-seq=b,d,h",
+            "--dont-remove-empty-pm",
         ]
-        run = subprocess.run(run_args,
-                             stdout=subprocess.PIPE,
-                             stderr=subprocess.PIPE)
+        run = subprocess.run(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
         self.assertEqual(run.returncode, 0)
         self.assertEqual(getFinalPasses(run), '-passes="b,A(d,B()),h"')
 

diff  --git a/llvm/utils/relative_lines.py b/llvm/utils/relative_lines.py
index 9c3706b77751d..86b190d5ea182 100755
--- a/llvm/utils/relative_lines.py
+++ b/llvm/utils/relative_lines.py
@@ -25,36 +25,54 @@
 import re
 import sys
 
+
 def b(x):
-    return bytes(x, encoding='utf-8')
-
-parser = argparse.ArgumentParser(prog = 'relative_lines',
-                                 description = __doc__,
-                                 epilog = USAGE,
-                                 formatter_class=argparse.RawTextHelpFormatter)
-parser.add_argument('--near', type=int, default=20,
-                    help = "maximum line distance to make relative")
-parser.add_argument('--partial', action='store_true', default=False,
-                    help = "apply replacements to files even if others failed")
-parser.add_argument('--pattern', default=[], action='append',
-                    type=lambda x: re.compile(b(x)),
-                    help = "regex to match, with line numbers captured in ().")
-parser.add_argument('--verbose', action='store_true', default=False,
-                    help = "print matches applied")
-parser.add_argument('--dry-run', action='store_true', default=False,
-                    help = "don't apply replacements. Best with --verbose.")
-parser.add_argument('files', nargs = '+')
+    return bytes(x, encoding="utf-8")
+
+
+parser = argparse.ArgumentParser(
+    prog="relative_lines",
+    description=__doc__,
+    epilog=USAGE,
+    formatter_class=argparse.RawTextHelpFormatter,
+)
+parser.add_argument(
+    "--near", type=int, default=20, help="maximum line distance to make relative"
+)
+parser.add_argument(
+    "--partial",
+    action="store_true",
+    default=False,
+    help="apply replacements to files even if others failed",
+)
+parser.add_argument(
+    "--pattern",
+    default=[],
+    action="append",
+    type=lambda x: re.compile(b(x)),
+    help="regex to match, with line numbers captured in ().",
+)
+parser.add_argument(
+    "--verbose", action="store_true", default=False, help="print matches applied"
+)
+parser.add_argument(
+    "--dry-run",
+    action="store_true",
+    default=False,
+    help="don't apply replacements. Best with --verbose.",
+)
+parser.add_argument("files", nargs="+")
 args = parser.parse_args()
 
 for file in args.files:
     try:
-        contents = open(file, 'rb').read()
+        contents = open(file, "rb").read()
     except UnicodeDecodeError as e:
         print(f"{file}: not valid UTF-8 - {e}", file=sys.stderr)
     failures = 0
 
     def line_number(offset):
-        return 1 + contents[:offset].count(b'\n')
+        return 1 + contents[:offset].count(b"\n")
 
     def replace_one(capture, line, offset):
         """Text to replace a capture group, e.g. 42 => %(line+1)"""
@@ -65,37 +83,40 @@ def replace_one(capture, line, offset):
             return capture
 
         if args.near > 0 and abs(target - line) > args.near:
-            print(f"{file}:{line}: target line {target} is farther than {args.near}", file=sys.stderr)
+            print(
+                f"{file}:{line}: target line {target} is farther than {args.near}",
+                file=sys.stderr,
+            )
             return capture
         if target > line:
-            delta = '+' + str(target - line)
+            delta = "+" + str(target - line)
         elif target < line:
-            delta = '-' + str(line - target)
+            delta = "-" + str(line - target)
         else:
-            delta = ''
+            delta = ""
 
-        prefix = contents[:offset].rsplit(b'\n')[-1]
-        is_lit = b'RUN' in prefix or b'DEFINE' in prefix
-        text = ('%(line{0})' if is_lit else '[[@LINE{0}]]').format(delta)
+        prefix = contents[:offset].rsplit(b"\n")[-1]
+        is_lit = b"RUN" in prefix or b"DEFINE" in prefix
+        text = ("%(line{0})" if is_lit else "[[@LINE{0}]]").format(delta)
         if args.verbose:
             print(f"{file}:{line}: {0} ==> {text}")
         return b(text)
 
     def replace_match(m):
         """Text to replace a whole match, e.g. --at=42:3 => --at=%(line+2):3"""
-        line = 1 + contents[:m.start()].count(b'\n')
-        result = b''
+        line = 1 + contents[: m.start()].count(b"\n")
+        result = b""
         pos = m.start()
         for index, capture in enumerate(m.groups()):
-            index += 1 # re groups are conventionally 1-indexed
-            result += contents[pos:m.start(index)]
+            index += 1  # re groups are conventionally 1-indexed
+            result += contents[pos : m.start(index)]
             replacement = replace_one(capture, line, m.start(index))
             result += replacement
             if replacement == capture:
                 global failures
                 failures += 1
             pos = m.end(index)
-        result += contents[pos:m.end()]
+        result += contents[pos : m.end()]
         return result
 
     for pattern in args.pattern:

diff  --git a/llvm/utils/release/bump-version.py b/llvm/utils/release/bump-version.py
index f064e4e93be01..abff67ae926ac 100755
--- a/llvm/utils/release/bump-version.py
+++ b/llvm/utils/release/bump-version.py
@@ -68,7 +68,9 @@ def process_line(self, line: str) -> str:
                     line,
                 )
             else:
-                nline = re.sub(r"set\(LLVM_VERSION_SUFFIX(.*)\)", f"set(LLVM_VERSION_SUFFIX)", line)
+                nline = re.sub(
+                    r"set\(LLVM_VERSION_SUFFIX(.*)\)", f"set(LLVM_VERSION_SUFFIX)", line
+                )
 
         # Check the rest of the LLVM_VERSION_ lines.
         elif "set(LLVM_VERSION_" in line:
@@ -78,8 +80,8 @@ def process_line(self, line: str) -> str:
                 ("PATCH", self.patch),
             ):
                 nline = re.sub(
-                    fr"set\(LLVM_VERSION_{c} (\d+)",
-                    fr"set(LLVM_VERSION_{c} {cver}",
+                    rf"set\(LLVM_VERSION_{c} (\d+)",
+                    rf"set(LLVM_VERSION_{c} {cver}",
                     line,
                 )
                 if nline != line:
@@ -97,7 +99,9 @@ def process_line(self, line: str) -> str:
                 ("minor", self.minor),
                 ("patch", self.patch),
             ):
-                nline = re.sub(fr"llvm_version_{c} = \d+", f"llvm_version_{c} = {cver}", line)
+                nline = re.sub(
+                    rf"llvm_version_{c} = \d+", f"llvm_version_{c} = {cver}", line
+                )
                 if nline != line:
                     return nline
 
@@ -109,7 +113,7 @@ class LitProcessor(Processor):
     def process_line(self, line: str) -> str:
         if "__versioninfo__" in line:
             nline = re.sub(
-                fr"__versioninfo__(.*)\((\d+), (\d+), (\d+)\)",
+                rf"__versioninfo__(.*)\((\d+), (\d+), (\d+)\)",
                 f"__versioninfo__\\1({self.major}, {self.minor}, {self.patch})",
                 line,
             )
@@ -126,7 +130,7 @@ def process_line(self, line: str) -> str:
             verstr = f"{str(self.major).zfill(2)}{str(self.minor).zfill(2)}{str(self.patch).zfill(2)}"
 
             nline = re.sub(
-                fr"_LIBCPP_VERSION (\d+)",
+                rf"_LIBCPP_VERSION (\d+)",
                 f"_LIBCPP_VERSION {verstr}",
                 line,
             )

diff  --git a/llvm/utils/release/findRegressions-nightly.py b/llvm/utils/release/findRegressions-nightly.py
index e7e13b0838124..1fcc54d33deb6 100755
--- a/llvm/utils/release/findRegressions-nightly.py
+++ b/llvm/utils/release/findRegressions-nightly.py
@@ -4,123 +4,127 @@
 import re, string, sys, os, time
 
 DEBUG = 0
-testDirName = 'llvm-test'
-test      = ['compile', 'llc', 'jit', 'cbe']
-exectime     = ['llc-time', 'jit-time', 'cbe-time',]
-comptime     = ['llc', 'jit-comptime', 'compile']
+testDirName = "llvm-test"
+test = ["compile", "llc", "jit", "cbe"]
+exectime = [
+    "llc-time",
+    "jit-time",
+    "cbe-time",
+]
+comptime = ["llc", "jit-comptime", "compile"]
+
+(tp, exp) = ("compileTime_", "executeTime_")
 
-(tp, exp) = ('compileTime_', 'executeTime_')
 
 def parse(file):
-  f=open(file, 'r')
-  d = f.read()
-  
-  #Cleanup weird stuff
-  d = re.sub(r',\d+:\d','', d)
-   
-  r = re.findall(r'TEST-(PASS|FAIL|RESULT.*?):\s+(.*?)\s+(.*?)\r*\n', d)
-   
-  test = {}
-  fname = ''
-  for t in r:
-    if DEBUG:
-      print(t)
-    if t[0] == 'PASS' or t[0] == 'FAIL' :
-      tmp = t[2].split(testDirName)
-      
-      if DEBUG:
-        print(tmp)
-      
-      if len(tmp) == 2:
-        fname = tmp[1].strip('\r\n')
-      else:
-        fname = tmp[0].strip('\r\n')
-      
-      if fname not in test :
-        test[fname] = {}
-      
-      for k in test:
-        test[fname][k] = 'NA'
-        test[fname][t[1]] = t[0]
-        if DEBUG:
-          print(test[fname][t[1]])
-    else :
-      try:
-        n = t[0].split('RESULT-')[1]
-        
+    f = open(file, "r")
+    d = f.read()
+
+    # Cleanup weird stuff
+    d = re.sub(r",\d+:\d", "", d)
+
+    r = re.findall(r"TEST-(PASS|FAIL|RESULT.*?):\s+(.*?)\s+(.*?)\r*\n", d)
+
+    test = {}
+    fname = ""
+    for t in r:
         if DEBUG:
-          print(n);
-        
-        if n == 'llc' or n == 'jit-comptime' or n == 'compile':
-          test[fname][tp + n] = float(t[2].split(' ')[2])
-          if DEBUG:
-            print(test[fname][tp + n])
-        
-        elif n.endswith('-time') :
-            test[fname][exp + n] = float(t[2].strip('\r\n'))
+            print(t)
+        if t[0] == "PASS" or t[0] == "FAIL":
+            tmp = t[2].split(testDirName)
+
             if DEBUG:
-              print(test[fname][exp + n])
-        
-        else :
-          print("ERROR!")
-          sys.exit(1)
-      
-      except:
-          continue
+                print(tmp)
+
+            if len(tmp) == 2:
+                fname = tmp[1].strip("\r\n")
+            else:
+                fname = tmp[0].strip("\r\n")
+
+            if fname not in test:
+                test[fname] = {}
+
+            for k in test:
+                test[fname][k] = "NA"
+                test[fname][t[1]] = t[0]
+                if DEBUG:
+                    print(test[fname][t[1]])
+        else:
+            try:
+                n = t[0].split("RESULT-")[1]
+
+                if DEBUG:
+                    print(n)
+
+                if n == "llc" or n == "jit-comptime" or n == "compile":
+                    test[fname][tp + n] = float(t[2].split(" ")[2])
+                    if DEBUG:
+                        print(test[fname][tp + n])
+
+                elif n.endswith("-time"):
+                    test[fname][exp + n] = float(t[2].strip("\r\n"))
+                    if DEBUG:
+                        print(test[fname][exp + n])
+
+                else:
+                    print("ERROR!")
+                    sys.exit(1)
+
+            except:
+                continue
+
+    return test
 
-  return test
 
 # Diff results and look for regressions.
 def 
diff Results(d_old, d_new):
 
-  for t in sorted(d_old.keys()) :
-    if DEBUG:
-      print(t)
-        
-    if t in d_new :
-    
-      # Check if the test passed or failed.
-      for x in test:
-        if x in d_old[t]:
-          if x in d_new[t]:
-            if d_old[t][x] == 'PASS':
-              if d_new[t][x] != 'PASS':
-                print(t + " *** REGRESSION (" + x + ")\n")
-            else:
-              if d_new[t][x] == 'PASS':
-                print(t + " * NEW PASS (" + x + ")\n")
-                
-          else :
-            print(t + "*** REGRESSION (" + x + ")\n")
-        
-        # For execution time, if there is no result, its a fail.
-        for x in exectime:
-          if tp + x in d_old[t]:
-            if tp + x not in d_new[t]:
-              print(t + " *** REGRESSION (" + tp + x + ")\n")
-                
-          else :
-            if tp + x in d_new[t]:
-              print(t + " * NEW PASS (" + tp + x + ")\n")
-
-       
-        for x in comptime:
-          if exp + x in d_old[t]:
-            if exp + x not in d_new[t]:
-              print(t + " *** REGRESSION (" + exp + x + ")\n")
-                
-          else :
-            if exp + x in d_new[t]:
-              print(t + " * NEW PASS (" + exp + x + ")\n")
-              
-    else :
-      print(t + ": Removed from test-suite.\n")
-    
-
-#Main
-if len(sys.argv) < 3 :
-    print('Usage:', sys.argv[0], \
-          '<old log> <new log>')
+    for t in sorted(d_old.keys()):
+        if DEBUG:
+            print(t)
+
+        if t in d_new:
+
+            # Check if the test passed or failed.
+            for x in test:
+                if x in d_old[t]:
+                    if x in d_new[t]:
+                        if d_old[t][x] == "PASS":
+                            if d_new[t][x] != "PASS":
+                                print(t + " *** REGRESSION (" + x + ")\n")
+                        else:
+                            if d_new[t][x] == "PASS":
+                                print(t + " * NEW PASS (" + x + ")\n")
+
+                    else:
+                        print(t + "*** REGRESSION (" + x + ")\n")
+
+                # For execution time, if there is no result, its a fail.
+                for x in exectime:
+                    if tp + x in d_old[t]:
+                        if tp + x not in d_new[t]:
+                            print(t + " *** REGRESSION (" + tp + x + ")\n")
+
+                    else:
+                        if tp + x in d_new[t]:
+                            print(t + " * NEW PASS (" + tp + x + ")\n")
+
+                for x in comptime:
+                    if exp + x in d_old[t]:
+                        if exp + x not in d_new[t]:
+                            print(t + " *** REGRESSION (" + exp + x + ")\n")
+
+                    else:
+                        if exp + x in d_new[t]:
+                            print(t + " * NEW PASS (" + exp + x + ")\n")
+
+        else:
+            print(t + ": Removed from test-suite.\n")
+
+
+# Main
+if len(sys.argv) < 3:
+    print("Usage:", sys.argv[0], "<old log> <new log>")
     sys.exit(-1)
 
 d_old = parse(sys.argv[1])
@@ -128,5 +132,3 @@ def 
diff Results(d_old, d_new):
 
 
 
diff Results(d_old, d_new)
-
-

diff  --git a/llvm/utils/release/findRegressions-simple.py b/llvm/utils/release/findRegressions-simple.py
index 7bd1523b58faa..81fbfa980a3a6 100755
--- a/llvm/utils/release/findRegressions-simple.py
+++ b/llvm/utils/release/findRegressions-simple.py
@@ -5,154 +5,171 @@
 
 DEBUG = 0
 
-(tp, exp) = ('compile', 'exec')
+(tp, exp) = ("compile", "exec")
 
-def parse(file):
-  f = open(file, 'r')
-  d = f.read()
-  
-  # Cleanup weird stuff
-  d = re.sub(r',\d+:\d', '', d)
-
-  r = re.findall(r'TEST-(PASS|FAIL|RESULT.*?):\s+(.*?)\s+(.*?)\r*\n', d)
-
-  test = {}
-  fname = ''
-  for t in r:
-    if DEBUG:
-      print(t)
-
-    if t[0] == 'PASS' or t[0] == 'FAIL' :
-      tmp = t[2].split('llvm-test/')
-      
-      if DEBUG:
-        print(tmp)
-
-      if len(tmp) == 2:
-        fname = tmp[1].strip('\r\n')
-      else:
-        fname = tmp[0].strip('\r\n')
-
-      if fname not in test:
-        test[fname] = {}
-
-      test[fname][t[1] + ' state'] = t[0]
-      test[fname][t[1] + ' time'] = float('nan')
-    else :
-      try:
-        n = t[0].split('RESULT-')[1]
-
-        if DEBUG:
-          print("n == ", n);
-        
-        if n == 'compile-success':
-          test[fname]['compile time'] = float(t[2].split('program')[1].strip('\r\n'))
-
-        elif n == 'exec-success':
-          test[fname]['exec time'] = float(t[2].split('program')[1].strip('\r\n'))
-          if DEBUG:
-            print(test[fname][string.replace(n, '-success', '')])
-
-        else :
-          # print "ERROR!"
-          sys.exit(1)
 
-      except:
-          continue
-
-  return test
-
-# Diff results and look for regressions.
-def 
diff Results(d_old, d_new):
-  regressions = {}
-  passes = {}
-  removed = ''
+def parse(file):
+    f = open(file, "r")
+    d = f.read()
 
-  for x in ['compile state', 'compile time', 'exec state', 'exec time']:
-    regressions[x] = ''
-    passes[x] = ''
+    # Cleanup weird stuff
+    d = re.sub(r",\d+:\d", "", d)
 
-  for t in sorted(d_old.keys()) :
-    if t in d_new:
+    r = re.findall(r"TEST-(PASS|FAIL|RESULT.*?):\s+(.*?)\s+(.*?)\r*\n", d)
 
-      # Check if the test passed or failed.
-      for x in ['compile state', 'compile time', 'exec state', 'exec time']:
+    test = {}
+    fname = ""
+    for t in r:
+        if DEBUG:
+            print(t)
 
-        if x not in d_old[t] and x not in d_new[t]:
-          continue
+        if t[0] == "PASS" or t[0] == "FAIL":
+            tmp = t[2].split("llvm-test/")
 
-        if x in d_old[t]:
-          if x in d_new[t]:
+            if DEBUG:
+                print(tmp)
 
-            if d_old[t][x] == 'PASS':
-              if d_new[t][x] != 'PASS':
-                regressions[x] += t + "\n"
+            if len(tmp) == 2:
+                fname = tmp[1].strip("\r\n")
             else:
-              if d_new[t][x] == 'PASS':
-                passes[x] += t + "\n"
-
-          else :
-            regressions[x] += t + "\n"
-
-        if x == 'compile state' or x == 'exec state':
-          continue
+                fname = tmp[0].strip("\r\n")
 
-        # For execution time, if there is no result it's a fail.
-        if x not in d_old[t] and x not in d_new[t]:
-          continue
-        elif x not in d_new[t]:
-          regressions[x] += t + "\n"
-        elif x not in d_old[t]:
-          passes[x] += t + "\n"
+            if fname not in test:
+                test[fname] = {}
 
-        if math.isnan(d_old[t][x]) and math.isnan(d_new[t][x]):
-          continue
+            test[fname][t[1] + " state"] = t[0]
+            test[fname][t[1] + " time"] = float("nan")
+        else:
+            try:
+                n = t[0].split("RESULT-")[1]
 
-        elif math.isnan(d_old[t][x]) and not math.isnan(d_new[t][x]):
-          passes[x] += t + "\n"
+                if DEBUG:
+                    print("n == ", n)
 
-        elif not math.isnan(d_old[t][x]) and math.isnan(d_new[t][x]):
-          regressions[x] += t + ": NaN%\n"
+                if n == "compile-success":
+                    test[fname]["compile time"] = float(
+                        t[2].split("program")[1].strip("\r\n")
+                    )
 
-        if d_new[t][x] > d_old[t][x] and d_old[t][x] > 0.0 and \
-              (d_new[t][x] - d_old[t][x]) / d_old[t][x] > .05:
-          regressions[x] += t + ": " + "{0:.1f}".format(100 * (d_new[t][x] - d_old[t][x]) / d_old[t][x]) + "%\n"
+                elif n == "exec-success":
+                    test[fname]["exec time"] = float(
+                        t[2].split("program")[1].strip("\r\n")
+                    )
+                    if DEBUG:
+                        print(test[fname][string.replace(n, "-success", "")])
 
-    else :
-      removed += t + "\n"
+                else:
+                    # print "ERROR!"
+                    sys.exit(1)
 
-  if len(regressions['compile state']) != 0:
-    print('REGRESSION: Compilation Failed')
-    print(regressions['compile state'])
+            except:
+                continue
 
-  if len(regressions['exec state']) != 0:
-    print('REGRESSION: Execution Failed')
-    print(regressions['exec state'])
+    return test
 
-  if len(regressions['compile time']) != 0:
-    print('REGRESSION: Compilation Time')
-    print(regressions['compile time'])
 
-  if len(regressions['exec time']) != 0:
-    print('REGRESSION: Execution Time')
-    print(regressions['exec time'])
-
-  if len(passes['compile state']) != 0:
-    print('NEW PASSES: Compilation')
-    print(passes['compile state'])
-
-  if len(passes['exec state']) != 0:
-    print('NEW PASSES: Execution')
-    print(passes['exec state'])
+# Diff results and look for regressions.
+def 
diff Results(d_old, d_new):
+    regressions = {}
+    passes = {}
+    removed = ""
+
+    for x in ["compile state", "compile time", "exec state", "exec time"]:
+        regressions[x] = ""
+        passes[x] = ""
+
+    for t in sorted(d_old.keys()):
+        if t in d_new:
+
+            # Check if the test passed or failed.
+            for x in ["compile state", "compile time", "exec state", "exec time"]:
+
+                if x not in d_old[t] and x not in d_new[t]:
+                    continue
+
+                if x in d_old[t]:
+                    if x in d_new[t]:
+
+                        if d_old[t][x] == "PASS":
+                            if d_new[t][x] != "PASS":
+                                regressions[x] += t + "\n"
+                        else:
+                            if d_new[t][x] == "PASS":
+                                passes[x] += t + "\n"
+
+                    else:
+                        regressions[x] += t + "\n"
+
+                if x == "compile state" or x == "exec state":
+                    continue
+
+                # For execution time, if there is no result it's a fail.
+                if x not in d_old[t] and x not in d_new[t]:
+                    continue
+                elif x not in d_new[t]:
+                    regressions[x] += t + "\n"
+                elif x not in d_old[t]:
+                    passes[x] += t + "\n"
+
+                if math.isnan(d_old[t][x]) and math.isnan(d_new[t][x]):
+                    continue
+
+                elif math.isnan(d_old[t][x]) and not math.isnan(d_new[t][x]):
+                    passes[x] += t + "\n"
+
+                elif not math.isnan(d_old[t][x]) and math.isnan(d_new[t][x]):
+                    regressions[x] += t + ": NaN%\n"
+
+                if (
+                    d_new[t][x] > d_old[t][x]
+                    and d_old[t][x] > 0.0
+                    and (d_new[t][x] - d_old[t][x]) / d_old[t][x] > 0.05
+                ):
+                    regressions[x] += (
+                        t
+                        + ": "
+                        + "{0:.1f}".format(
+                            100 * (d_new[t][x] - d_old[t][x]) / d_old[t][x]
+                        )
+                        + "%\n"
+                    )
+
+        else:
+            removed += t + "\n"
+
+    if len(regressions["compile state"]) != 0:
+        print("REGRESSION: Compilation Failed")
+        print(regressions["compile state"])
+
+    if len(regressions["exec state"]) != 0:
+        print("REGRESSION: Execution Failed")
+        print(regressions["exec state"])
+
+    if len(regressions["compile time"]) != 0:
+        print("REGRESSION: Compilation Time")
+        print(regressions["compile time"])
+
+    if len(regressions["exec time"]) != 0:
+        print("REGRESSION: Execution Time")
+        print(regressions["exec time"])
+
+    if len(passes["compile state"]) != 0:
+        print("NEW PASSES: Compilation")
+        print(passes["compile state"])
+
+    if len(passes["exec state"]) != 0:
+        print("NEW PASSES: Execution")
+        print(passes["exec state"])
+
+    if len(removed) != 0:
+        print("REMOVED TESTS")
+        print(removed)
 
-  if len(removed) != 0:
-    print('REMOVED TESTS')
-    print(removed)
 
 # Main
-if len(sys.argv) < 3 :
-  print('Usage:', sys.argv[0], '<old log> <new log>')
-  sys.exit(-1)
+if len(sys.argv) < 3:
+    print("Usage:", sys.argv[0], "<old log> <new log>")
+    sys.exit(-1)
 
 d_old = parse(sys.argv[1])
 d_new = parse(sys.argv[2])

diff  --git a/llvm/utils/release/github-upload-release.py b/llvm/utils/release/github-upload-release.py
index f0570a9e99ce6..86a71368dd843 100755
--- a/llvm/utils/release/github-upload-release.py
+++ b/llvm/utils/release/github-upload-release.py
@@ -5,10 +5,10 @@
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 #
 # Create and manage releases in the llvm github project.
-# 
+#
 # This script requires python3 and the PyGithub module.
 #
 # Example Usage:
@@ -25,53 +25,53 @@
 #
 # You can upload as many files as you want at a time and use wildcards e.g.
 # ./github-upload-release.py --token $github_token --release 8.0.1-rc4 upload --files *.src.*
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 
 import argparse
 import github
 
-def create_release(repo, release, tag = None, name = None, message = None):
+
+def create_release(repo, release, tag=None, name=None, message=None):
     if not tag:
-        tag = 'llvmorg-{}'.format(release)
+        tag = "llvmorg-{}".format(release)
 
     if not name:
-        name = 'LLVM {}'.format(release)
+        name = "LLVM {}".format(release)
 
     if not message:
-        message = 'LLVM {} Release'.format(release)
+        message = "LLVM {} Release".format(release)
 
     prerelease = True if "rc" in release else False
 
-    repo.create_git_release(tag = tag, name = name, message = message,
-                            prerelease = prerelease)
+    repo.create_git_release(tag=tag, name=name, message=message, prerelease=prerelease)
+
 
 def upload_files(repo, release, files):
-    release = repo.get_release('llvmorg-{}'.format(release))
+    release = repo.get_release("llvmorg-{}".format(release))
     for f in files:
-        print('Uploading {}'.format(f))
+        print("Uploading {}".format(f))
         release.upload_asset(f)
         print("Done")
-    
 
 
 parser = argparse.ArgumentParser()
-parser.add_argument('command', type=str, choices=['create', 'upload'])
+parser.add_argument("command", type=str, choices=["create", "upload"])
 
 # All args
-parser.add_argument('--token', type=str)
-parser.add_argument('--release', type=str)
+parser.add_argument("--token", type=str)
+parser.add_argument("--release", type=str)
 
 # Upload args
-parser.add_argument('--files', nargs='+', type=str)
+parser.add_argument("--files", nargs="+", type=str)
 
 
 args = parser.parse_args()
 
 github = github.Github(args.token)
-llvm_repo = github.get_organization('llvm').get_repo('llvm-project')
+llvm_repo = github.get_organization("llvm").get_repo("llvm-project")
 
-if args.command == 'create':
+if args.command == "create":
     create_release(llvm_repo, args.release)
-if args.command == 'upload':
+if args.command == "upload":
     upload_files(llvm_repo, args.release, args.files)

diff  --git a/llvm/utils/remote-exec.py b/llvm/utils/remote-exec.py
index 37b261cca7ee4..636ed9e9548a9 100644
--- a/llvm/utils/remote-exec.py
+++ b/llvm/utils/remote-exec.py
@@ -1,11 +1,11 @@
 #!/usr/bin/env python
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
 
 """
 Runs an executable on a remote host.
@@ -24,28 +24,29 @@
 import tempfile
 import re
 
+
 def ssh(args, command):
-    cmd = ['ssh', '-oBatchMode=yes']
+    cmd = ["ssh", "-oBatchMode=yes"]
     if args.extra_ssh_args is not None:
         cmd.extend(shlex.split(args.extra_ssh_args))
     return cmd + [args.host, command]
 
 
 def scp(args, src, dst):
-    cmd = ['scp', '-q', '-oBatchMode=yes']
+    cmd = ["scp", "-q", "-oBatchMode=yes"]
     if args.extra_scp_args is not None:
         cmd.extend(shlex.split(args.extra_scp_args))
-    return cmd + [src, '{}:{}'.format(args.host, dst)]
+    return cmd + [src, "{}:{}".format(args.host, dst)]
 
 
 def main():
     parser = argparse.ArgumentParser()
-    parser.add_argument('--host', type=str, required=True)
-    parser.add_argument('--execdir', type=str, required=False)
-    parser.add_argument('--extra-ssh-args', type=str, required=False)
-    parser.add_argument('--extra-scp-args', type=str, required=False)
-    parser.add_argument('--codesign_identity', type=str, required=False, default=None)
-    parser.add_argument('--env', type=str, nargs='*', required=False, default=dict())
+    parser.add_argument("--host", type=str, required=True)
+    parser.add_argument("--execdir", type=str, required=False)
+    parser.add_argument("--extra-ssh-args", type=str, required=False)
+    parser.add_argument("--extra-scp-args", type=str, required=False)
+    parser.add_argument("--codesign_identity", type=str, required=False, default=None)
+    parser.add_argument("--env", type=str, nargs="*", required=False, default=dict())
 
     # Note: The default value is for the backward compatibility with a hack in
     # libcxx test suite.
@@ -55,24 +56,29 @@ def main():
     # and changing their path when running on the remote host. It's also possible
     # for there to be no such executable, for example in the case of a .sh.cpp
     # test.
-    parser.add_argument('--exec-pattern', type=str, required=False, default='.*',
-                        help='The name regex pattern of the executables generated by \
+    parser.add_argument(
+        "--exec-pattern",
+        type=str,
+        required=False,
+        default=".*",
+        help="The name regex pattern of the executables generated by \
                               a test file. Specifying it allows us to do custom \
                               processing like codesigning test-executables \
                               and changing their path when running on \
-                              the remote host. It\'s also possible for there \
+                              the remote host. It's also possible for there \
                               to be no such executable, for example in \
-                              the case of a .sh.cpp test.')
+                              the case of a .sh.cpp test.",
+    )
 
     parser.add_argument("command", nargs=argparse.ONE_OR_MORE)
     args = parser.parse_args()
     commandLine = args.command
 
     execdir = args.execdir
-    if execdir == '.':
+    if execdir == ".":
         # Retrieve the exec directory from the command line.
         execdir, _ = os.path.split(commandLine[0])
-        if execdir == '':
+        if execdir == "":
             # Get the current directory in that case.
             execdir = os.getcwd()
     arcname = os.path.basename(execdir) if execdir else None
@@ -80,9 +86,8 @@ def main():
     # Create a temporary directory where the test will be run.
     # That is effectively the value of %T on the remote host.
     tmp = subprocess.check_output(
-                  ssh(args, 'mktemp -d /tmp/llvm.XXXXXXXXXX'),
-                  universal_newlines=True
-              ).strip()
+        ssh(args, "mktemp -d /tmp/llvm.XXXXXXXXXX"), universal_newlines=True
+    ).strip()
 
     isExecutable = lambda exe: re.match(args.exec_pattern, exe) and os.path.exists(exe)
     pathOnRemote = lambda file: posixpath.join(tmp, os.path.basename(file))
@@ -94,15 +99,16 @@ def main():
         if args.codesign_identity:
             for exe in filter(isExecutable, commandLine):
                 subprocess.check_call(
-                    ['xcrun', 'codesign', '-f', '-s', args.codesign_identity, exe],
-                    env={})
+                    ["xcrun", "codesign", "-f", "-s", args.codesign_identity, exe],
+                    env={},
+                )
 
         # tar up the execution directory (which contains everything that's needed
         # to run the test), and copy the tarball over to the remote host.
         if execdir:
             try:
-                tmpTar = tempfile.NamedTemporaryFile(suffix='.tar', delete=False)
-                with tarfile.open(fileobj=tmpTar, mode='w') as tarball:
+                tmpTar = tempfile.NamedTemporaryFile(suffix=".tar", delete=False)
+                with tarfile.open(fileobj=tmpTar, mode="w") as tarball:
                     tarball.add(execdir, arcname=arcname)
 
                 # Make sure we close the file before we scp it, because accessing
@@ -117,10 +123,12 @@ def main():
                 os.remove(tmpTar.name)
 
             # Untar the dependencies in the temporary directory and remove the tarball.
-            remoteCommands.extend([
-                'tar -xf {} -C {} --strip-components 1'.format(remoteTarball, tmp),
-                'rm {}'.format(remoteTarball)
-            ])
+            remoteCommands.extend(
+                [
+                    "tar -xf {} -C {} --strip-components 1".format(remoteTarball, tmp),
+                    "rm {}".format(remoteTarball),
+                ]
+            )
         else:
             # Copy only the files, which are specified in the command line.
             # Copy them to remote host one by one.
@@ -132,7 +140,7 @@ def main():
         # permissions on the remote host. The host that compiled the test-executable
         # might not have a notion of 'executable' permissions.
         for exe in filter(isExecutable, commandLine):
-            remoteCommands.append('chmod +x {}'.format(pathOnRemote(exe)))
+            remoteCommands.append("chmod +x {}".format(pathOnRemote(exe)))
 
         # Execute the command through SSH in the temporary directory, with the
         # correct environment. We tweak the command line to run it on the remote
@@ -141,19 +149,19 @@ def main():
         for i, x in enumerate(commandLine):
             if isExecutable(x):
                 commandLine[i] = pathOnRemote(x)
-        remoteCommands.append('cd {}'.format(tmp))
+        remoteCommands.append("cd {}".format(tmp))
         if args.env:
-            remoteCommands.append('export {}'.format(' '.join(args.env)))
+            remoteCommands.append("export {}".format(" ".join(args.env)))
         remoteCommands.append(subprocess.list2cmdline(commandLine))
 
         # Finally, SSH to the remote host and execute all the commands.
-        rc = subprocess.call(ssh(args, ' && '.join(remoteCommands)))
+        rc = subprocess.call(ssh(args, " && ".join(remoteCommands)))
         return rc
 
     finally:
         # Make sure the temporary directory is removed when we're done.
-        subprocess.check_call(ssh(args, 'rm -r {}'.format(tmp)))
+        subprocess.check_call(ssh(args, "rm -r {}".format(tmp)))
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     exit(main())

diff  --git a/llvm/utils/revert_checker.py b/llvm/utils/revert_checker.py
index ef0c06bd1b5a4..34395a6fe5057 100755
--- a/llvm/utils/revert_checker.py
+++ b/llvm/utils/revert_checker.py
@@ -1,12 +1,12 @@
 #!/usr/bin/env python3
 # -*- coding: utf-8 -*-
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
 """Checks for reverts of commits across a given git commit.
 
 To clarify the meaning of 'across' with an example, if we had the following
@@ -47,7 +47,7 @@
 import sys
 from typing import Generator, List, NamedTuple, Iterable
 
-assert sys.version_info >= (3, 6), 'Only Python 3.6+ is supported.'
+assert sys.version_info >= (3, 6), "Only Python 3.6+ is supported."
 
 # People are creative with their reverts, and heuristics are a bit 
diff icult.
 # Like 90% of of reverts have "This reverts commit ${full_sha}".
@@ -59,213 +59,243 @@
 
 
 def _try_parse_reverts_from_commit_message(commit_message: str) -> List[str]:
-  if not commit_message:
-    return []
+    if not commit_message:
+        return []
 
-  results = re.findall(r'This reverts commit ([a-f0-9]{40})\b', commit_message)
+    results = re.findall(r"This reverts commit ([a-f0-9]{40})\b", commit_message)
 
-  first_line = commit_message.splitlines()[0]
-  initial_revert = re.match(r'Revert ([a-f0-9]{6,}) "', first_line)
-  if initial_revert:
-    results.append(initial_revert.group(1))
-  return results
+    first_line = commit_message.splitlines()[0]
+    initial_revert = re.match(r'Revert ([a-f0-9]{6,}) "', first_line)
+    if initial_revert:
+        results.append(initial_revert.group(1))
+    return results
 
 
 def _stream_stdout(command: List[str]) -> Generator[str, None, None]:
-  with subprocess.Popen(
-      command, stdout=subprocess.PIPE, encoding='utf-8', errors='replace') as p:
-    assert p.stdout is not None  # for mypy's happiness.
-    yield from p.stdout
+    with subprocess.Popen(
+        command, stdout=subprocess.PIPE, encoding="utf-8", errors="replace"
+    ) as p:
+        assert p.stdout is not None  # for mypy's happiness.
+        yield from p.stdout
 
 
 def _resolve_sha(git_dir: str, sha: str) -> str:
-  if len(sha) == 40:
-    return sha
-
-  return subprocess.check_output(
-      ['git', '-C', git_dir, 'rev-parse', sha],
-      encoding='utf-8',
-      stderr=subprocess.DEVNULL,
-  ).strip()
-
-
-_LogEntry = NamedTuple('_LogEntry', [
-    ('sha', str),
-    ('commit_message', str),
-])
-
-
-def _log_stream(git_dir: str, root_sha: str,
-                end_at_sha: str) -> Iterable[_LogEntry]:
-  sep = 50 * '<>'
-  log_command = [
-      'git',
-      '-C',
-      git_dir,
-      'log',
-      '^' + end_at_sha,
-      root_sha,
-      '--format=' + sep + '%n%H%n%B%n',
-  ]
-
-  stdout_stream = iter(_stream_stdout(log_command))
-
-  # Find the next separator line. If there's nothing to log, it may not exist.
-  # It might not be the first line if git feels complainy.
-  found_commit_header = False
-  for line in stdout_stream:
-    if line.rstrip() == sep:
-      found_commit_header = True
-      break
-
-  while found_commit_header:
-    sha = next(stdout_stream, None)
-    assert sha is not None, 'git died?'
-    sha = sha.rstrip()
-
-    commit_message = []
-
+    if len(sha) == 40:
+        return sha
+
+    return subprocess.check_output(
+        ["git", "-C", git_dir, "rev-parse", sha],
+        encoding="utf-8",
+        stderr=subprocess.DEVNULL,
+    ).strip()
+
+
+_LogEntry = NamedTuple(
+    "_LogEntry",
+    [
+        ("sha", str),
+        ("commit_message", str),
+    ],
+)
+
+
+def _log_stream(git_dir: str, root_sha: str, end_at_sha: str) -> Iterable[_LogEntry]:
+    sep = 50 * "<>"
+    log_command = [
+        "git",
+        "-C",
+        git_dir,
+        "log",
+        "^" + end_at_sha,
+        root_sha,
+        "--format=" + sep + "%n%H%n%B%n",
+    ]
+
+    stdout_stream = iter(_stream_stdout(log_command))
+
+    # Find the next separator line. If there's nothing to log, it may not exist.
+    # It might not be the first line if git feels complainy.
     found_commit_header = False
     for line in stdout_stream:
-      line = line.rstrip()
-      if line.rstrip() == sep:
-        found_commit_header = True
-        break
-      commit_message.append(line)
+        if line.rstrip() == sep:
+            found_commit_header = True
+            break
+
+    while found_commit_header:
+        sha = next(stdout_stream, None)
+        assert sha is not None, "git died?"
+        sha = sha.rstrip()
+
+        commit_message = []
+
+        found_commit_header = False
+        for line in stdout_stream:
+            line = line.rstrip()
+            if line.rstrip() == sep:
+                found_commit_header = True
+                break
+            commit_message.append(line)
 
-    yield _LogEntry(sha, '\n'.join(commit_message).rstrip())
+        yield _LogEntry(sha, "\n".join(commit_message).rstrip())
 
 
 def _shas_between(git_dir: str, base_ref: str, head_ref: str) -> Iterable[str]:
-  rev_list = [
-      'git',
-      '-C',
-      git_dir,
-      'rev-list',
-      '--first-parent',
-      f'{base_ref}..{head_ref}',
-  ]
-  return (x.strip() for x in _stream_stdout(rev_list))
+    rev_list = [
+        "git",
+        "-C",
+        git_dir,
+        "rev-list",
+        "--first-parent",
+        f"{base_ref}..{head_ref}",
+    ]
+    return (x.strip() for x in _stream_stdout(rev_list))
 
 
 def _rev_parse(git_dir: str, ref: str) -> str:
-  return subprocess.check_output(
-      ['git', '-C', git_dir, 'rev-parse', ref],
-      encoding='utf-8',
-  ).strip()
+    return subprocess.check_output(
+        ["git", "-C", git_dir, "rev-parse", ref],
+        encoding="utf-8",
+    ).strip()
 
 
-Revert = NamedTuple('Revert', [
-    ('sha', str),
-    ('reverted_sha', str),
-])
+Revert = NamedTuple(
+    "Revert",
+    [
+        ("sha", str),
+        ("reverted_sha", str),
+    ],
+)
 
 
 def _find_common_parent_commit(git_dir: str, ref_a: str, ref_b: str) -> str:
-  """Finds the closest common parent commit between `ref_a` and `ref_b`."""
-  return subprocess.check_output(
-      ['git', '-C', git_dir, 'merge-base', ref_a, ref_b],
-      encoding='utf-8',
-  ).strip()
+    """Finds the closest common parent commit between `ref_a` and `ref_b`."""
+    return subprocess.check_output(
+        ["git", "-C", git_dir, "merge-base", ref_a, ref_b],
+        encoding="utf-8",
+    ).strip()
 
 
 def find_reverts(git_dir: str, across_ref: str, root: str) -> List[Revert]:
-  """Finds reverts across `across_ref` in `git_dir`, starting from `root`.
-
-  These reverts are returned in order of oldest reverts first.
-  """
-  across_sha = _rev_parse(git_dir, across_ref)
-  root_sha = _rev_parse(git_dir, root)
-
-  common_ancestor = _find_common_parent_commit(git_dir, across_sha, root_sha)
-  if common_ancestor != across_sha:
-    raise ValueError(f"{across_sha} isn't an ancestor of {root_sha} "
-                     '(common ancestor: {common_ancestor})')
-
-  intermediate_commits = set(_shas_between(git_dir, across_sha, root_sha))
-  assert across_sha not in intermediate_commits
-
-  logging.debug('%d commits appear between %s and %s',
-                len(intermediate_commits), across_sha, root_sha)
-
-  all_reverts = []
-  for sha, commit_message in _log_stream(git_dir, root_sha, across_sha):
-    reverts = _try_parse_reverts_from_commit_message(commit_message)
-    if not reverts:
-      continue
-
-    resolved_reverts = sorted(set(_resolve_sha(git_dir, x) for x in reverts))
-    for reverted_sha in resolved_reverts:
-      if reverted_sha in intermediate_commits:
-        logging.debug('Commit %s reverts %s, which happened after %s', sha,
-                      reverted_sha, across_sha)
-        continue
-
-      try:
-        object_type = subprocess.check_output(
-            ['git', '-C', git_dir, 'cat-file', '-t', reverted_sha],
-            encoding='utf-8',
-            stderr=subprocess.DEVNULL,
-        ).strip()
-      except subprocess.CalledProcessError:
-        logging.warning(
-            'Failed to resolve reverted object %s (claimed to be reverted '
-            'by sha %s)', reverted_sha, sha)
-        continue
-
-      if object_type == 'commit':
-        all_reverts.append(Revert(sha, reverted_sha))
-        continue
-
-      logging.error("%s claims to revert %s -- which isn't a commit -- %s", sha,
-                    object_type, reverted_sha)
-
-  # Since `all_reverts` contains reverts in log order (e.g., newer comes before
-  # older), we need to reverse this to keep with our guarantee of older =
-  # earlier in the result.
-  all_reverts.reverse()
-  return all_reverts
+    """Finds reverts across `across_ref` in `git_dir`, starting from `root`.
+
+    These reverts are returned in order of oldest reverts first.
+    """
+    across_sha = _rev_parse(git_dir, across_ref)
+    root_sha = _rev_parse(git_dir, root)
+
+    common_ancestor = _find_common_parent_commit(git_dir, across_sha, root_sha)
+    if common_ancestor != across_sha:
+        raise ValueError(
+            f"{across_sha} isn't an ancestor of {root_sha} "
+            "(common ancestor: {common_ancestor})"
+        )
+
+    intermediate_commits = set(_shas_between(git_dir, across_sha, root_sha))
+    assert across_sha not in intermediate_commits
+
+    logging.debug(
+        "%d commits appear between %s and %s",
+        len(intermediate_commits),
+        across_sha,
+        root_sha,
+    )
+
+    all_reverts = []
+    for sha, commit_message in _log_stream(git_dir, root_sha, across_sha):
+        reverts = _try_parse_reverts_from_commit_message(commit_message)
+        if not reverts:
+            continue
+
+        resolved_reverts = sorted(set(_resolve_sha(git_dir, x) for x in reverts))
+        for reverted_sha in resolved_reverts:
+            if reverted_sha in intermediate_commits:
+                logging.debug(
+                    "Commit %s reverts %s, which happened after %s",
+                    sha,
+                    reverted_sha,
+                    across_sha,
+                )
+                continue
+
+            try:
+                object_type = subprocess.check_output(
+                    ["git", "-C", git_dir, "cat-file", "-t", reverted_sha],
+                    encoding="utf-8",
+                    stderr=subprocess.DEVNULL,
+                ).strip()
+            except subprocess.CalledProcessError:
+                logging.warning(
+                    "Failed to resolve reverted object %s (claimed to be reverted "
+                    "by sha %s)",
+                    reverted_sha,
+                    sha,
+                )
+                continue
+
+            if object_type == "commit":
+                all_reverts.append(Revert(sha, reverted_sha))
+                continue
+
+            logging.error(
+                "%s claims to revert %s -- which isn't a commit -- %s",
+                sha,
+                object_type,
+                reverted_sha,
+            )
+
+    # Since `all_reverts` contains reverts in log order (e.g., newer comes before
+    # older), we need to reverse this to keep with our guarantee of older =
+    # earlier in the result.
+    all_reverts.reverse()
+    return all_reverts
 
 
 def _main() -> None:
-  parser = argparse.ArgumentParser(
-      description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
-  parser.add_argument(
-      'base_ref', help='Git ref or sha to check for reverts around.')
-  parser.add_argument(
-      '-C', '--git_dir', default='.', help='Git directory to use.')
-  parser.add_argument(
-      'root', nargs='+', help='Root(s) to search for commits from.')
-  parser.add_argument('--debug', action='store_true')
-  parser.add_argument(
-      '-u', '--review_url', action='store_true',
-      help='Format SHAs as llvm review URLs')
-  opts = parser.parse_args()
-
-  logging.basicConfig(
-      format='%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s',
-      level=logging.DEBUG if opts.debug else logging.INFO,
-  )
-
-  # `root`s can have related history, so we want to filter duplicate commits
-  # out. The overwhelmingly common case is also to have one root, and it's way
-  # easier to reason about output that comes in an order that's meaningful to
-  # git.
-  seen_reverts = set()
-  all_reverts = []
-  for root in opts.root:
-    for revert in find_reverts(opts.git_dir, opts.base_ref, root):
-      if revert not in seen_reverts:
-        seen_reverts.add(revert)
-        all_reverts.append(revert)
-
-  for revert in all_reverts:
-    sha_fmt = (f'https://reviews.llvm.org/rG{revert.sha}'
-               if opts.review_url else revert.sha)
-    reverted_sha_fmt = (f'https://reviews.llvm.org/rG{revert.reverted_sha}'
-                        if opts.review_url else revert.reverted_sha)
-    print(f'{sha_fmt} claims to revert {reverted_sha_fmt}')
-
-
-if __name__ == '__main__':
-  _main()
+    parser = argparse.ArgumentParser(
+        description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
+    )
+    parser.add_argument("base_ref", help="Git ref or sha to check for reverts around.")
+    parser.add_argument("-C", "--git_dir", default=".", help="Git directory to use.")
+    parser.add_argument("root", nargs="+", help="Root(s) to search for commits from.")
+    parser.add_argument("--debug", action="store_true")
+    parser.add_argument(
+        "-u",
+        "--review_url",
+        action="store_true",
+        help="Format SHAs as llvm review URLs",
+    )
+    opts = parser.parse_args()
+
+    logging.basicConfig(
+        format="%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s",
+        level=logging.DEBUG if opts.debug else logging.INFO,
+    )
+
+    # `root`s can have related history, so we want to filter duplicate commits
+    # out. The overwhelmingly common case is also to have one root, and it's way
+    # easier to reason about output that comes in an order that's meaningful to
+    # git.
+    seen_reverts = set()
+    all_reverts = []
+    for root in opts.root:
+        for revert in find_reverts(opts.git_dir, opts.base_ref, root):
+            if revert not in seen_reverts:
+                seen_reverts.add(revert)
+                all_reverts.append(revert)
+
+    for revert in all_reverts:
+        sha_fmt = (
+            f"https://reviews.llvm.org/rG{revert.sha}"
+            if opts.review_url
+            else revert.sha
+        )
+        reverted_sha_fmt = (
+            f"https://reviews.llvm.org/rG{revert.reverted_sha}"
+            if opts.review_url
+            else revert.reverted_sha
+        )
+        print(f"{sha_fmt} claims to revert {reverted_sha_fmt}")
+
+
+if __name__ == "__main__":
+    _main()

diff  --git a/llvm/utils/revert_checker_test.py b/llvm/utils/revert_checker_test.py
index 6573c25c1a0b9..9d992663c5be8 100755
--- a/llvm/utils/revert_checker_test.py
+++ b/llvm/utils/revert_checker_test.py
@@ -1,12 +1,12 @@
 #!/usr/bin/env python3
 # -*- coding: utf-8 -*-
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
 """Tests for revert_checker.
 
 Note that these tests require having LLVM's git history available, since our
@@ -24,95 +24,110 @@
 
 
 def get_llvm_project_path() -> str:
-  """Returns the path to llvm-project's root."""
-  my_dir = os.path.dirname(__file__)
-  return os.path.realpath(os.path.join(my_dir, '..', '..'))
+    """Returns the path to llvm-project's root."""
+    my_dir = os.path.dirname(__file__)
+    return os.path.realpath(os.path.join(my_dir, "..", ".."))
 
 
 class _SilencingFilter(logging.Filter):
-  """Silences all log messages.
+    """Silences all log messages.
 
-  Also collects info about log messages that would've been emitted.
-  """
+    Also collects info about log messages that would've been emitted.
+    """
 
-  def __init__(self) -> None:
-    self.messages: List[str] = []
+    def __init__(self) -> None:
+        self.messages: List[str] = []
 
-  def filter(self, record: logging.LogRecord) -> bool:
-    self.messages.append(record.getMessage())
-    return False
+    def filter(self, record: logging.LogRecord) -> bool:
+        self.messages.append(record.getMessage())
+        return False
 
 
 class Test(unittest.TestCase):
-  """Tests for revert_checker."""
-
-  def silence_logging(self) -> _SilencingFilter:
-    root = logging.getLogger()
-    filt = _SilencingFilter()
-    root.addFilter(filt)
-    self.addCleanup(root.removeFilter, filt)
-    return filt
-
-  def test_log_stream_with_known_sha_range(self) -> None:
-    start_sha = 'e241573d5972d34a323fa5c64774c4207340beb3'
-    end_sha = 'a7a37517751ffb0f5529011b4ba96e67fcb27510'
-    commits = [
-        revert_checker._LogEntry(
-            'e241573d5972d34a323fa5c64774c4207340beb3', '\n'.join((
-                '[mlir] NFC: remove IntegerValueSet / MutableIntegerSet',
-                '',
-                'Summary:',
-                '- these are unused and really not needed now given flat '
-                'affine',
-                '  constraints',
-                '',
-                'Differential Revision: https://reviews.llvm.org/D75792',
-            ))),
-        revert_checker._LogEntry(
-            '97572fa6e9daecd648873496fd11f7d1e25a55f0',
-            '[NFC] use hasAnyOperatorName and hasAnyOverloadedOperatorName '
-            'functions in clang-tidy matchers',
-        ),
-    ]
-
-    logs = list(
-        revert_checker._log_stream(
-            get_llvm_project_path(),
-            root_sha=start_sha,
-            end_at_sha=end_sha,
-        ))
-    self.assertEqual(commits, logs)
-
-  def test_reverted_noncommit_object_is_a_nop(self) -> None:
-    log_filter = self.silence_logging()
-    # c9944df916e41b1014dff5f6f75d52297b48ecdc mentions reverting a non-commit
-    # object. It sits between the given base_ref and root.
-    reverts = revert_checker.find_reverts(
-        git_dir=get_llvm_project_path(),
-        across_ref='c9944df916e41b1014dff5f6f75d52297b48ecdc~',
-        root='c9944df916e41b1014dff5f6f75d52297b48ecdc')
-    self.assertEqual(reverts, [])
-
-    complaint = ('Failed to resolve reverted object '
-                 'edd18355be574122aaa9abf58c15d8c50fb085a1')
-    self.assertTrue(
-        any(x.startswith(complaint) for x in log_filter.messages),
-        log_filter.messages)
-
-  def test_known_reverts_across_arbitrary_llvm_rev(self) -> None:
-    reverts = revert_checker.find_reverts(
-        git_dir=get_llvm_project_path(),
-        across_ref='c47f971694be0159ffddfee8a75ae515eba91439',
-        root='9f981e9adf9c8d29bb80306daf08d2770263ade6')
-    self.assertEqual(reverts, [
-        revert_checker.Revert(
-            sha='4e0fe038f438ae1679eae9e156e1f248595b2373',
-            reverted_sha='65b21282c710afe9c275778820c6e3c1cf46734b'),
-        revert_checker.Revert(
-            sha='9f981e9adf9c8d29bb80306daf08d2770263ade6',
-            reverted_sha='4060016fce3e6a0b926ee9fc59e440a612d3a2ec'),
-    ])
-
-
-if __name__ == '__main__':
-  unittest.main()
+    """Tests for revert_checker."""
+
+    def silence_logging(self) -> _SilencingFilter:
+        root = logging.getLogger()
+        filt = _SilencingFilter()
+        root.addFilter(filt)
+        self.addCleanup(root.removeFilter, filt)
+        return filt
+
+    def test_log_stream_with_known_sha_range(self) -> None:
+        start_sha = "e241573d5972d34a323fa5c64774c4207340beb3"
+        end_sha = "a7a37517751ffb0f5529011b4ba96e67fcb27510"
+        commits = [
+            revert_checker._LogEntry(
+                "e241573d5972d34a323fa5c64774c4207340beb3",
+                "\n".join(
+                    (
+                        "[mlir] NFC: remove IntegerValueSet / MutableIntegerSet",
+                        "",
+                        "Summary:",
+                        "- these are unused and really not needed now given flat "
+                        "affine",
+                        "  constraints",
+                        "",
+                        "Differential Revision: https://reviews.llvm.org/D75792",
+                    )
+                ),
+            ),
+            revert_checker._LogEntry(
+                "97572fa6e9daecd648873496fd11f7d1e25a55f0",
+                "[NFC] use hasAnyOperatorName and hasAnyOverloadedOperatorName "
+                "functions in clang-tidy matchers",
+            ),
+        ]
+
+        logs = list(
+            revert_checker._log_stream(
+                get_llvm_project_path(),
+                root_sha=start_sha,
+                end_at_sha=end_sha,
+            )
+        )
+        self.assertEqual(commits, logs)
+
+    def test_reverted_noncommit_object_is_a_nop(self) -> None:
+        log_filter = self.silence_logging()
+        # c9944df916e41b1014dff5f6f75d52297b48ecdc mentions reverting a non-commit
+        # object. It sits between the given base_ref and root.
+        reverts = revert_checker.find_reverts(
+            git_dir=get_llvm_project_path(),
+            across_ref="c9944df916e41b1014dff5f6f75d52297b48ecdc~",
+            root="c9944df916e41b1014dff5f6f75d52297b48ecdc",
+        )
+        self.assertEqual(reverts, [])
+
+        complaint = (
+            "Failed to resolve reverted object "
+            "edd18355be574122aaa9abf58c15d8c50fb085a1"
+        )
+        self.assertTrue(
+            any(x.startswith(complaint) for x in log_filter.messages),
+            log_filter.messages,
+        )
+
+    def test_known_reverts_across_arbitrary_llvm_rev(self) -> None:
+        reverts = revert_checker.find_reverts(
+            git_dir=get_llvm_project_path(),
+            across_ref="c47f971694be0159ffddfee8a75ae515eba91439",
+            root="9f981e9adf9c8d29bb80306daf08d2770263ade6",
+        )
+        self.assertEqual(
+            reverts,
+            [
+                revert_checker.Revert(
+                    sha="4e0fe038f438ae1679eae9e156e1f248595b2373",
+                    reverted_sha="65b21282c710afe9c275778820c6e3c1cf46734b",
+                ),
+                revert_checker.Revert(
+                    sha="9f981e9adf9c8d29bb80306daf08d2770263ade6",
+                    reverted_sha="4060016fce3e6a0b926ee9fc59e440a612d3a2ec",
+                ),
+            ],
+        )
+
+
+if __name__ == "__main__":
+    unittest.main()

diff  --git a/llvm/utils/rsp_bisect.py b/llvm/utils/rsp_bisect.py
index 8c22974f4d3dc..7efcf46b1a64c 100755
--- a/llvm/utils/rsp_bisect.py
+++ b/llvm/utils/rsp_bisect.py
@@ -1,11 +1,11 @@
 #!/usr/bin/env python3
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
 """Script to bisect over files in an rsp file.
 
 This is mostly used for detecting which file contains a miscompile between two
@@ -78,120 +78,137 @@
 
 
 def is_path(s):
-  return '/' in s
+    return "/" in s
 
 
 def run_test(test):
-  """Runs the test and returns whether it was successful or not."""
-  return subprocess.run([test], capture_output=True).returncode == 0
+    """Runs the test and returns whether it was successful or not."""
+    return subprocess.run([test], capture_output=True).returncode == 0
 
 
 def modify_rsp(rsp_entries, other_rel_path, modify_after_num):
-  """Create a modified rsp file for use in bisection.
-
-  Returns a new list from rsp.
-  For each file in rsp after the first modify_after_num files, prepend
-  other_rel_path.
-  """
-  ret = []
-  for r in rsp_entries:
-    if is_path(r):
-      if modify_after_num == 0:
-        r = os.path.join(other_rel_path, r)
-      else:
-        modify_after_num -= 1
-    ret.append(r)
-  assert modify_after_num == 0
-  return ret
+    """Create a modified rsp file for use in bisection.
+
+    Returns a new list from rsp.
+    For each file in rsp after the first modify_after_num files, prepend
+    other_rel_path.
+    """
+    ret = []
+    for r in rsp_entries:
+        if is_path(r):
+            if modify_after_num == 0:
+                r = os.path.join(other_rel_path, r)
+            else:
+                modify_after_num -= 1
+        ret.append(r)
+    assert modify_after_num == 0
+    return ret
 
 
 def test_modified_rsp(test, modified_rsp_entries, rsp_path):
-  """Write the rsp file to disk and run the test."""
-  with open(rsp_path, 'w') as f:
-    f.write(' '.join(modified_rsp_entries))
-  return run_test(test)
+    """Write the rsp file to disk and run the test."""
+    with open(rsp_path, "w") as f:
+        f.write(" ".join(modified_rsp_entries))
+    return run_test(test)
 
 
 def bisect(test, zero_result, rsp_entries, num_files_in_rsp, other_rel_path, rsp_path):
-  """Bisect over rsp entries.
-
-  Args:
-      zero_result: the test result when modify_after_num is 0.
-
-  Returns:
-      The index of the file in the rsp file where the test result changes.
-  """
-  lower = 0
-  upper = num_files_in_rsp
-  while lower != upper - 1:
-    assert lower < upper - 1
-    mid = int((lower + upper) / 2)
-    assert lower != mid and mid != upper
-    print('Trying {} ({}-{})'.format(mid, lower, upper))
-    result = test_modified_rsp(test, modify_rsp(rsp_entries, other_rel_path, mid),
-                               rsp_path)
-    if zero_result == result:
-      lower = mid
-    else:
-      upper = mid
-  return upper
+    """Bisect over rsp entries.
+
+    Args:
+        zero_result: the test result when modify_after_num is 0.
+
+    Returns:
+        The index of the file in the rsp file where the test result changes.
+    """
+    lower = 0
+    upper = num_files_in_rsp
+    while lower != upper - 1:
+        assert lower < upper - 1
+        mid = int((lower + upper) / 2)
+        assert lower != mid and mid != upper
+        print("Trying {} ({}-{})".format(mid, lower, upper))
+        result = test_modified_rsp(
+            test, modify_rsp(rsp_entries, other_rel_path, mid), rsp_path
+        )
+        if zero_result == result:
+            lower = mid
+        else:
+            upper = mid
+    return upper
 
 
 def main():
-  parser = argparse.ArgumentParser()
-  parser.add_argument('--test',
-                      help='Binary to test if current setup is good or bad',
-                      required=True)
-  parser.add_argument('--rsp', help='rsp file', required=True)
-  parser.add_argument(
-      '--other-rel-path',
-      help='Relative path from current build directory to other build ' +
-      'directory, e.g. from "out/Default" to "out/Other" specify "../Other"',
-      required=True)
-  args = parser.parse_args()
-
-  with open(args.rsp, 'r') as f:
-    rsp_entries = f.read()
-  rsp_entries = rsp_entries.split()
-  num_files_in_rsp = sum(1 for a in rsp_entries if is_path(a))
-  if num_files_in_rsp == 0:
-    print('No files in rsp?')
-    return 1
-  print('{} files in rsp'.format(num_files_in_rsp))
-
-  try:
-    print('Initial testing')
-    test0 = test_modified_rsp(args.test, modify_rsp(rsp_entries, args.other_rel_path,
-                                                    0), args.rsp)
-    test_all = test_modified_rsp(
-        args.test, modify_rsp(rsp_entries, args.other_rel_path, num_files_in_rsp),
-        args.rsp)
-
-    if test0 == test_all:
-      print('Test returned same exit code for both build directories')
-      return 1
-
-    print('First build directory returned ' + ('0' if test_all else '1'))
-
-    result = bisect(args.test, test0, rsp_entries, num_files_in_rsp,
-                    args.other_rel_path, args.rsp)
-    print('First file change: {} ({})'.format(
-        list(filter(is_path, rsp_entries))[result - 1], result))
-
-    rsp_out_0 = args.rsp + '.0'
-    rsp_out_1 = args.rsp + '.1'
-    with open(rsp_out_0, 'w') as f:
-      f.write(' '.join(modify_rsp(rsp_entries, args.other_rel_path, result - 1)))
-    with open(rsp_out_1, 'w') as f:
-      f.write(' '.join(modify_rsp(rsp_entries, args.other_rel_path, result)))
-    print('Bisection point rsp files written to {} and {}'.format(
-        rsp_out_0, rsp_out_1))
-  finally:
-    # Always make sure to write the original rsp file contents back so it's
-    # less of a pain to rerun this script.
-    with open(args.rsp, 'w') as f:
-      f.write(' '.join(rsp_entries))
-
-
-if __name__ == '__main__':
-  sys.exit(main())
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--test", help="Binary to test if current setup is good or bad", required=True
+    )
+    parser.add_argument("--rsp", help="rsp file", required=True)
+    parser.add_argument(
+        "--other-rel-path",
+        help="Relative path from current build directory to other build "
+        + 'directory, e.g. from "out/Default" to "out/Other" specify "../Other"',
+        required=True,
+    )
+    args = parser.parse_args()
+
+    with open(args.rsp, "r") as f:
+        rsp_entries = f.read()
+    rsp_entries = rsp_entries.split()
+    num_files_in_rsp = sum(1 for a in rsp_entries if is_path(a))
+    if num_files_in_rsp == 0:
+        print("No files in rsp?")
+        return 1
+    print("{} files in rsp".format(num_files_in_rsp))
+
+    try:
+        print("Initial testing")
+        test0 = test_modified_rsp(
+            args.test, modify_rsp(rsp_entries, args.other_rel_path, 0), args.rsp
+        )
+        test_all = test_modified_rsp(
+            args.test,
+            modify_rsp(rsp_entries, args.other_rel_path, num_files_in_rsp),
+            args.rsp,
+        )
+
+        if test0 == test_all:
+            print("Test returned same exit code for both build directories")
+            return 1
+
+        print("First build directory returned " + ("0" if test_all else "1"))
+
+        result = bisect(
+            args.test,
+            test0,
+            rsp_entries,
+            num_files_in_rsp,
+            args.other_rel_path,
+            args.rsp,
+        )
+        print(
+            "First file change: {} ({})".format(
+                list(filter(is_path, rsp_entries))[result - 1], result
+            )
+        )
+
+        rsp_out_0 = args.rsp + ".0"
+        rsp_out_1 = args.rsp + ".1"
+        with open(rsp_out_0, "w") as f:
+            f.write(" ".join(modify_rsp(rsp_entries, args.other_rel_path, result - 1)))
+        with open(rsp_out_1, "w") as f:
+            f.write(" ".join(modify_rsp(rsp_entries, args.other_rel_path, result)))
+        print(
+            "Bisection point rsp files written to {} and {}".format(
+                rsp_out_0, rsp_out_1
+            )
+        )
+    finally:
+        # Always make sure to write the original rsp file contents back so it's
+        # less of a pain to rerun this script.
+        with open(args.rsp, "w") as f:
+            f.write(" ".join(rsp_entries))
+
+
+if __name__ == "__main__":
+    sys.exit(main())

diff  --git a/llvm/utils/rsp_bisect_test/test.py b/llvm/utils/rsp_bisect_test/test.py
index 1cf0df589023d..2433d8653e316 100755
--- a/llvm/utils/rsp_bisect_test/test.py
+++ b/llvm/utils/rsp_bisect_test/test.py
@@ -1,11 +1,11 @@
 #!/usr/bin/env python3
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
 
 import os
 import subprocess
@@ -20,82 +20,87 @@
 
 
 def run_bisect(success, test_script):
-  args = [
-      bisect_script, '--test', test_script, '--rsp', rsp, '--other-rel-path',
-      '../Other'
-  ]
-  res = subprocess.run(args, capture_output=True, encoding='UTF-8')
-  if len(sys.argv) > 1 and sys.argv[1] == '-v':
-    print('Ran {} with return code {}'.format(args, res.returncode))
-    print('Stdout:')
-    print(res.stdout)
-    print('Stderr:')
-    print(res.stderr)
-  if res.returncode != (0 if success else 1):
-    print(res.stdout)
-    print(res.stderr)
-    raise AssertionError('unexpected bisection return code for ' + str(args))
-  return res.stdout
+    args = [
+        bisect_script,
+        "--test",
+        test_script,
+        "--rsp",
+        rsp,
+        "--other-rel-path",
+        "../Other",
+    ]
+    res = subprocess.run(args, capture_output=True, encoding="UTF-8")
+    if len(sys.argv) > 1 and sys.argv[1] == "-v":
+        print("Ran {} with return code {}".format(args, res.returncode))
+        print("Stdout:")
+        print(res.stdout)
+        print("Stderr:")
+        print(res.stderr)
+    if res.returncode != (0 if success else 1):
+        print(res.stdout)
+        print(res.stderr)
+        raise AssertionError("unexpected bisection return code for " + str(args))
+    return res.stdout
 
 
 # Test that an empty rsp file fails.
-with open(rsp, 'w') as f:
-  pass
+with open(rsp, "w") as f:
+    pass
 
 run_bisect(False, test1)
 
 # Test that an rsp file without any paths fails.
-with open(rsp, 'w') as f:
-  f.write('hello\nfoo\n')
+with open(rsp, "w") as f:
+    f.write("hello\nfoo\n")
 
 run_bisect(False, test1)
 
 # Test that an rsp file with one path succeeds.
-with open(rsp, 'w') as f:
-  f.write('./foo\n')
+with open(rsp, "w") as f:
+    f.write("./foo\n")
 
 output = run_bisect(True, test1)
-assert './foo' in output
+assert "./foo" in output
 
 # Test that an rsp file with one path and one extra arg succeeds.
-with open(rsp, 'w') as f:
-  f.write('hello\n./foo\n')
+with open(rsp, "w") as f:
+    f.write("hello\n./foo\n")
 
 output = run_bisect(True, test1)
-assert './foo' in output
+assert "./foo" in output
 
 # Test that an rsp file with three paths and one extra arg succeeds.
-with open(rsp, 'w') as f:
-  f.write('hello\n./foo\n./bar\n./baz\n')
+with open(rsp, "w") as f:
+    f.write("hello\n./foo\n./bar\n./baz\n")
 
 output = run_bisect(True, test1)
-assert './foo' in output
+assert "./foo" in output
 
-with open(rsp, 'w') as f:
-  f.write('hello\n./bar\n./foo\n./baz\n')
+with open(rsp, "w") as f:
+    f.write("hello\n./bar\n./foo\n./baz\n")
 
 output = run_bisect(True, test1)
-assert './foo' in output
+assert "./foo" in output
 
-with open(rsp, 'w') as f:
-  f.write('hello\n./bar\n./baz\n./foo\n')
+with open(rsp, "w") as f:
+    f.write("hello\n./bar\n./baz\n./foo\n")
 
 output = run_bisect(True, test1)
-assert './foo' in output
+assert "./foo" in output
 
 output = run_bisect(True, test2)
-assert './foo' in output
+assert "./foo" in output
 
-with open(rsp + '.0', 'r') as f:
-  contents = f.read()
-  assert ' ../Other/./foo' in contents
+with open(rsp + ".0", "r") as f:
+    contents = f.read()
+    assert " ../Other/./foo" in contents
 
-with open(rsp + '.1', 'r') as f:
-  contents = f.read()
-  assert ' ./foo' in contents
+with open(rsp + ".1", "r") as f:
+    contents = f.read()
+    assert " ./foo" in contents
 
 os.remove(rsp)
-os.remove(rsp + '.0')
-os.remove(rsp + '.1')
+os.remove(rsp + ".0")
+os.remove(rsp + ".1")
 
-print('Success!')
+print("Success!")

diff  --git a/llvm/utils/rsp_bisect_test/test_script.py b/llvm/utils/rsp_bisect_test/test_script.py
index 34cfc399fe1b4..be5555a0b79a1 100755
--- a/llvm/utils/rsp_bisect_test/test_script.py
+++ b/llvm/utils/rsp_bisect_test/test_script.py
@@ -1,11 +1,11 @@
 #!/usr/bin/env python3
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
 
 import os
 import sys
@@ -13,8 +13,8 @@
 rsp_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "rsp")
 
 with open(rsp_path) as f:
-  contents = f.read()
-  print(contents)
-  success = '../Other/./foo' in contents
+    contents = f.read()
+    print(contents)
+    success = "../Other/./foo" in contents
 
 sys.exit(0 if success else 1)

diff  --git a/llvm/utils/rsp_bisect_test/test_script_inv.py b/llvm/utils/rsp_bisect_test/test_script_inv.py
index c571b0b93f0fa..31055c91ec966 100755
--- a/llvm/utils/rsp_bisect_test/test_script_inv.py
+++ b/llvm/utils/rsp_bisect_test/test_script_inv.py
@@ -1,11 +1,11 @@
 #!/usr/bin/env python3
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
 
 import os
 import sys
@@ -13,6 +13,6 @@
 rsp_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "rsp")
 
 with open(rsp_path) as f:
-  success = '../Other/./foo' in f.read()
+    success = "../Other/./foo" in f.read()
 
 sys.exit(1 if success else 0)

diff  --git a/llvm/utils/schedcover.py b/llvm/utils/schedcover.py
index 7cd2fcf2563fa..41d0359462d19 100755
--- a/llvm/utils/schedcover.py
+++ b/llvm/utils/schedcover.py
@@ -4,15 +4,16 @@
 #   llvm-tblgen --gen-subtarget --debug-only=subtarget-emitter
 # With thanks to Dave Estes for mentioning the idea at 2014 LLVM Developers' Meeting
 
-import os;
-import sys;
-import re;
-import operator;
+import os
+import sys
+import re
+import operator
 
 table = {}
 models = set()
 filt = None
 
+
 def add(instr, model, resource=None):
     global table, models
 
@@ -20,6 +21,7 @@ def add(instr, model, resource=None):
     entry[model] = resource
     models.add(model)
 
+
 def filter_model(m):
     global filt
     if m and filt:
@@ -36,7 +38,7 @@ def display():
     models.discard("default")
     models.discard("itinerary")
 
-    ordered_table  = sorted(table.items(), key=operator.itemgetter(0))
+    ordered_table = sorted(table.items(), key=operator.itemgetter(0))
     ordered_models = ["itinerary", "default"]
     ordered_models.extend(sorted(models))
     ordered_models = [m for m in ordered_models if filter_model(m)]
@@ -59,28 +61,34 @@ def display():
 
 def machineModelCover(path):
     # The interesting bits
-    re_sched_default  = re.compile("SchedRW machine model for ([^ ]*) (.*)\n");
-    re_sched_no_default = re.compile("No machine model for ([^ ]*)\n");
-    re_sched_spec = re.compile("InstRW on ([^ ]*) for ([^ ]*) (.*)\n");
-    re_sched_no_spec = re.compile("No machine model for ([^ ]*) on processor (.*)\n");
+    re_sched_default = re.compile("SchedRW machine model for ([^ ]*) (.*)\n")
+    re_sched_no_default = re.compile("No machine model for ([^ ]*)\n")
+    re_sched_spec = re.compile("InstRW on ([^ ]*) for ([^ ]*) (.*)\n")
+    re_sched_no_spec = re.compile("No machine model for ([^ ]*) on processor (.*)\n")
     re_sched_itin = re.compile("Itinerary for ([^ ]*): ([^ ]*)\n")
 
     # scan the file
-    with open(path, 'r') as f:
+    with open(path, "r") as f:
         for line in f.readlines():
             match = re_sched_default.match(line)
-            if match: add(match.group(1), "default", match.group(2))
+            if match:
+                add(match.group(1), "default", match.group(2))
             match = re_sched_no_default.match(line)
-            if match: add(match.group(1), "default")
+            if match:
+                add(match.group(1), "default")
             match = re_sched_spec.match(line)
-            if match: add(match.group(2), match.group(1), match.group(3))
+            if match:
+                add(match.group(2), match.group(1), match.group(3))
             match = re_sched_no_spec.match(line)
-            if match: add(match.group(1), match.group(2))
+            if match:
+                add(match.group(1), match.group(2))
             match = re_sched_itin.match(line)
-            if match: add(match.group(1), "itinerary", match.group(2))
+            if match:
+                add(match.group(1), "itinerary", match.group(2))
 
     display()
 
+
 if len(sys.argv) > 2:
     filt = re.compile(sys.argv[2], re.IGNORECASE)
 machineModelCover(sys.argv[1])

diff  --git a/llvm/utils/shuffle_fuzz.py b/llvm/utils/shuffle_fuzz.py
index 2d86cc0a28fa3..c03c77ce092a6 100755
--- a/llvm/utils/shuffle_fuzz.py
+++ b/llvm/utils/shuffle_fuzz.py
@@ -21,181 +21,264 @@
 import sys
 import uuid
 
+
 def main():
-  element_types=['i8', 'i16', 'i32', 'i64', 'f32', 'f64']
-  parser = argparse.ArgumentParser(description=__doc__)
-  parser.add_argument('-v', '--verbose', action='store_true',
-                      help='Show verbose output')
-  parser.add_argument('--seed', default=str(uuid.uuid4()),
-                      help='A string used to seed the RNG')
-  parser.add_argument('--max-shuffle-height', type=int, default=16,
-                      help='Specify a fixed height of shuffle tree to test')
-  parser.add_argument('--no-blends', dest='blends', action='store_false',
-                      help='Include blends of two input vectors')
-  parser.add_argument('--fixed-bit-width', type=int, choices=[128, 256],
-                      help='Specify a fixed bit width of vector to test')
-  parser.add_argument('--fixed-element-type', choices=element_types,
-                      help='Specify a fixed element type to test')
-  parser.add_argument('--triple',
-                      help='Specify a triple string to include in the IR')
-  args = parser.parse_args()
+    element_types = ["i8", "i16", "i32", "i64", "f32", "f64"]
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument(
+        "-v", "--verbose", action="store_true", help="Show verbose output"
+    )
+    parser.add_argument(
+        "--seed", default=str(uuid.uuid4()), help="A string used to seed the RNG"
+    )
+    parser.add_argument(
+        "--max-shuffle-height",
+        type=int,
+        default=16,
+        help="Specify a fixed height of shuffle tree to test",
+    )
+    parser.add_argument(
+        "--no-blends",
+        dest="blends",
+        action="store_false",
+        help="Include blends of two input vectors",
+    )
+    parser.add_argument(
+        "--fixed-bit-width",
+        type=int,
+        choices=[128, 256],
+        help="Specify a fixed bit width of vector to test",
+    )
+    parser.add_argument(
+        "--fixed-element-type",
+        choices=element_types,
+        help="Specify a fixed element type to test",
+    )
+    parser.add_argument("--triple", help="Specify a triple string to include in the IR")
+    args = parser.parse_args()
 
-  random.seed(args.seed)
+    random.seed(args.seed)
 
-  if args.fixed_element_type is not None:
-    element_types=[args.fixed_element_type]
+    if args.fixed_element_type is not None:
+        element_types = [args.fixed_element_type]
 
-  if args.fixed_bit_width is not None:
-    if args.fixed_bit_width == 128:
-      width_map={'i64': 2, 'i32': 4, 'i16': 8, 'i8': 16, 'f64': 2, 'f32': 4}
-      (width, element_type) = random.choice(
-          [(width_map[t], t) for t in element_types])
-    elif args.fixed_bit_width == 256:
-      width_map={'i64': 4, 'i32': 8, 'i16': 16, 'i8': 32, 'f64': 4, 'f32': 8}
-      (width, element_type) = random.choice(
-          [(width_map[t], t) for t in element_types])
+    if args.fixed_bit_width is not None:
+        if args.fixed_bit_width == 128:
+            width_map = {"i64": 2, "i32": 4, "i16": 8, "i8": 16, "f64": 2, "f32": 4}
+            (width, element_type) = random.choice(
+                [(width_map[t], t) for t in element_types]
+            )
+        elif args.fixed_bit_width == 256:
+            width_map = {"i64": 4, "i32": 8, "i16": 16, "i8": 32, "f64": 4, "f32": 8}
+            (width, element_type) = random.choice(
+                [(width_map[t], t) for t in element_types]
+            )
+        else:
+            sys.exit(1)  # Checked above by argument parsing.
     else:
-      sys.exit(1) # Checked above by argument parsing.
-  else:
-    width = random.choice([2, 4, 8, 16, 32, 64])
-    element_type = random.choice(element_types)
+        width = random.choice([2, 4, 8, 16, 32, 64])
+        element_type = random.choice(element_types)
 
-  element_modulus = {
-      'i8': 1 << 8, 'i16': 1 << 16, 'i32': 1 << 32, 'i64': 1 << 64,
-      'f32': 1 << 32, 'f64': 1 << 64}[element_type]
+    element_modulus = {
+        "i8": 1 << 8,
+        "i16": 1 << 16,
+        "i32": 1 << 32,
+        "i64": 1 << 64,
+        "f32": 1 << 32,
+        "f64": 1 << 64,
+    }[element_type]
 
-  shuffle_range = (2 * width) if args.blends else width
+    shuffle_range = (2 * width) if args.blends else width
 
-  # Because undef (-1) saturates and is indistinguishable when testing the
-  # correctness of a shuffle, we want to bias our fuzz toward having a decent
-  # mixture of non-undef lanes in the end. With a deep shuffle tree, the
-  # probabilies aren't good so we need to bias things. The math here is that if
-  # we uniformly select between -1 and the other inputs, each element of the
-  # result will have the following probability of being undef:
-  #
-  #   1 - (shuffle_range/(shuffle_range+1))^max_shuffle_height
-  #
-  # More generally, for any probability P of selecting a defined element in
-  # a single shuffle, the end result is:
-  #
-  #   1 - P^max_shuffle_height
-  #
-  # The power of the shuffle height is the real problem, as we want:
-  #
-  #   1 - shuffle_range/(shuffle_range+1)
-  #
-  # So we bias the selection of undef at any given node based on the tree
-  # height. Below, let 'A' be 'len(shuffle_range)', 'C' be 'max_shuffle_height',
-  # and 'B' be the bias we use to compensate for
-  # C '((A+1)*A^(1/C))/(A*(A+1)^(1/C))':
-  #
-  #   1 - (B * A)/(A + 1)^C = 1 - A/(A + 1)
-  #
-  # So at each node we use:
-  #
-  #   1 - (B * A)/(A + 1)
-  # = 1 - ((A + 1) * A * A^(1/C))/(A * (A + 1) * (A + 1)^(1/C))
-  # = 1 - ((A + 1) * A^((C + 1)/C))/(A * (A + 1)^((C + 1)/C))
-  #
-  # This is the formula we use to select undef lanes in the shuffle.
-  A = float(shuffle_range)
-  C = float(args.max_shuffle_height)
-  undef_prob = 1.0 - (((A + 1.0) * pow(A, (C + 1.0)/C)) /
-                      (A * pow(A + 1.0, (C + 1.0)/C)))
+    # Because undef (-1) saturates and is indistinguishable when testing the
+    # correctness of a shuffle, we want to bias our fuzz toward having a decent
+    # mixture of non-undef lanes in the end. With a deep shuffle tree, the
+    # probabilies aren't good so we need to bias things. The math here is that if
+    # we uniformly select between -1 and the other inputs, each element of the
+    # result will have the following probability of being undef:
+    #
+    #   1 - (shuffle_range/(shuffle_range+1))^max_shuffle_height
+    #
+    # More generally, for any probability P of selecting a defined element in
+    # a single shuffle, the end result is:
+    #
+    #   1 - P^max_shuffle_height
+    #
+    # The power of the shuffle height is the real problem, as we want:
+    #
+    #   1 - shuffle_range/(shuffle_range+1)
+    #
+    # So we bias the selection of undef at any given node based on the tree
+    # height. Below, let 'A' be 'len(shuffle_range)', 'C' be 'max_shuffle_height',
+    # and 'B' be the bias we use to compensate for
+    # C '((A+1)*A^(1/C))/(A*(A+1)^(1/C))':
+    #
+    #   1 - (B * A)/(A + 1)^C = 1 - A/(A + 1)
+    #
+    # So at each node we use:
+    #
+    #   1 - (B * A)/(A + 1)
+    # = 1 - ((A + 1) * A * A^(1/C))/(A * (A + 1) * (A + 1)^(1/C))
+    # = 1 - ((A + 1) * A^((C + 1)/C))/(A * (A + 1)^((C + 1)/C))
+    #
+    # This is the formula we use to select undef lanes in the shuffle.
+    A = float(shuffle_range)
+    C = float(args.max_shuffle_height)
+    undef_prob = 1.0 - (
+        ((A + 1.0) * pow(A, (C + 1.0) / C)) / (A * pow(A + 1.0, (C + 1.0) / C))
+    )
 
-  shuffle_tree = [[[-1 if random.random() <= undef_prob
-                       else random.choice(range(shuffle_range))
-                    for _ in itertools.repeat(None, width)]
-                   for _ in itertools.repeat(None, args.max_shuffle_height - i)]
-                  for i in range(args.max_shuffle_height)]
+    shuffle_tree = [
+        [
+            [
+                -1
+                if random.random() <= undef_prob
+                else random.choice(range(shuffle_range))
+                for _ in itertools.repeat(None, width)
+            ]
+            for _ in itertools.repeat(None, args.max_shuffle_height - i)
+        ]
+        for i in range(args.max_shuffle_height)
+    ]
 
-  if args.verbose:
-    # Print out the shuffle sequence in a compact form.
-    print(('Testing shuffle sequence "%s" (v%d%s):' %
-                         (args.seed, width, element_type)), file=sys.stderr)
-    for i, shuffles in enumerate(shuffle_tree):
-      print('  tree level %d:' % (i,), file=sys.stderr)
-      for j, s in enumerate(shuffles):
-        print('    shuffle %d: %s' % (j, s), file=sys.stderr)
-    print('', file=sys.stderr)
+    if args.verbose:
+        # Print out the shuffle sequence in a compact form.
+        print(
+            (
+                'Testing shuffle sequence "%s" (v%d%s):'
+                % (args.seed, width, element_type)
+            ),
+            file=sys.stderr,
+        )
+        for i, shuffles in enumerate(shuffle_tree):
+            print("  tree level %d:" % (i,), file=sys.stderr)
+            for j, s in enumerate(shuffles):
+                print("    shuffle %d: %s" % (j, s), file=sys.stderr)
+        print("", file=sys.stderr)
 
-  # Symbolically evaluate the shuffle tree.
-  inputs = [[int(j % element_modulus)
-             for j in range(i * width + 1, (i + 1) * width + 1)]
-            for i in range(args.max_shuffle_height + 1)]
-  results = inputs
-  for shuffles in shuffle_tree:
-    results = [[((results[i] if j < width else results[i + 1])[j % width]
-                 if j != -1 else -1)
-                for j in s]
-               for i, s in enumerate(shuffles)]
-  if len(results) != 1:
-    print('ERROR: Bad results: %s' % (results,), file=sys.stderr)
-    sys.exit(1)
-  result = results[0]
+    # Symbolically evaluate the shuffle tree.
+    inputs = [
+        [int(j % element_modulus) for j in range(i * width + 1, (i + 1) * width + 1)]
+        for i in range(args.max_shuffle_height + 1)
+    ]
+    results = inputs
+    for shuffles in shuffle_tree:
+        results = [
+            [
+                (
+                    (results[i] if j < width else results[i + 1])[j % width]
+                    if j != -1
+                    else -1
+                )
+                for j in s
+            ]
+            for i, s in enumerate(shuffles)
+        ]
+    if len(results) != 1:
+        print("ERROR: Bad results: %s" % (results,), file=sys.stderr)
+        sys.exit(1)
+    result = results[0]
 
-  if args.verbose:
-    print('Which transforms:', file=sys.stderr)
-    print('  from: %s' % (inputs,), file=sys.stderr)
-    print('  into: %s' % (result,), file=sys.stderr)
-    print('', file=sys.stderr)
+    if args.verbose:
+        print("Which transforms:", file=sys.stderr)
+        print("  from: %s" % (inputs,), file=sys.stderr)
+        print("  into: %s" % (result,), file=sys.stderr)
+        print("", file=sys.stderr)
 
-  # The IR uses silly names for floating point types. We also need a same-size
-  # integer type.
-  integral_element_type = element_type
-  if element_type == 'f32':
-    integral_element_type = 'i32'
-    element_type = 'float'
-  elif element_type == 'f64':
-    integral_element_type = 'i64'
-    element_type = 'double'
+    # The IR uses silly names for floating point types. We also need a same-size
+    # integer type.
+    integral_element_type = element_type
+    if element_type == "f32":
+        integral_element_type = "i32"
+        element_type = "float"
+    elif element_type == "f64":
+        integral_element_type = "i64"
+        element_type = "double"
 
-  # Now we need to generate IR for the shuffle function.
-  subst = {'N': width, 'T': element_type, 'IT': integral_element_type}
-  print("""
+    # Now we need to generate IR for the shuffle function.
+    subst = {"N": width, "T": element_type, "IT": integral_element_type}
+    print(
+        """
 define internal fastcc <%(N)d x %(T)s> @test(%(arguments)s) noinline nounwind {
-entry:""" % dict(subst,
-                 arguments=', '.join(
-                     ['<%(N)d x %(T)s> %%s.0.%(i)d' % dict(subst, i=i)
-                      for i in range(args.max_shuffle_height + 1)])))
+entry:"""
+        % dict(
+            subst,
+            arguments=", ".join(
+                [
+                    "<%(N)d x %(T)s> %%s.0.%(i)d" % dict(subst, i=i)
+                    for i in range(args.max_shuffle_height + 1)
+                ]
+            ),
+        )
+    )
 
-  for i, shuffles in enumerate(shuffle_tree):
-   for j, s in enumerate(shuffles):
-    print("""
+    for i, shuffles in enumerate(shuffle_tree):
+        for j, s in enumerate(shuffles):
+            print(
+                """
   %%s.%(next_i)d.%(j)d = shufflevector <%(N)d x %(T)s> %%s.%(i)d.%(j)d, <%(N)d x %(T)s> %%s.%(i)d.%(next_j)d, <%(N)d x i32> <%(S)s>
-""".strip('\n') % dict(subst, i=i, next_i=i + 1, j=j, next_j=j + 1,
-                       S=', '.join(['i32 ' + (str(si) if si != -1 else 'undef')
-                                    for si in s])))
+""".strip(
+                    "\n"
+                )
+                % dict(
+                    subst,
+                    i=i,
+                    next_i=i + 1,
+                    j=j,
+                    next_j=j + 1,
+                    S=", ".join(
+                        ["i32 " + (str(si) if si != -1 else "undef") for si in s]
+                    ),
+                )
+            )
 
-  print("""
+    print(
+        """
   ret <%(N)d x %(T)s> %%s.%(i)d.0
 }
-""" % dict(subst, i=len(shuffle_tree)))
+"""
+        % dict(subst, i=len(shuffle_tree))
+    )
 
-  # Generate some string constants that we can use to report errors.
-  for i, r in enumerate(result):
-    if r != -1:
-      s = ('FAIL(%(seed)s): lane %(lane)d, expected %(result)d, found %%d\n\\0A' %
-           {'seed': args.seed, 'lane': i, 'result': r})
-      s += ''.join(['\\00' for _ in itertools.repeat(None, 128 - len(s) + 2)])
-      print("""
+    # Generate some string constants that we can use to report errors.
+    for i, r in enumerate(result):
+        if r != -1:
+            s = (
+                "FAIL(%(seed)s): lane %(lane)d, expected %(result)d, found %%d\n\\0A"
+                % {"seed": args.seed, "lane": i, "result": r}
+            )
+            s += "".join(["\\00" for _ in itertools.repeat(None, 128 - len(s) + 2)])
+            print(
+                """
 @error.%(i)d = private unnamed_addr global [128 x i8] c"%(s)s"
-""".strip() % {'i': i, 's': s})
+""".strip()
+                % {"i": i, "s": s}
+            )
 
-  # Define a wrapper function which is marked 'optnone' to prevent
-  # interprocedural optimizations from deleting the test.
-  print("""
+    # Define a wrapper function which is marked 'optnone' to prevent
+    # interprocedural optimizations from deleting the test.
+    print(
+        """
 define internal fastcc <%(N)d x %(T)s> @test_wrapper(%(arguments)s) optnone noinline {
   %%result = call fastcc <%(N)d x %(T)s> @test(%(arguments)s)
   ret <%(N)d x %(T)s> %%result
 }
-""" % dict(subst,
-           arguments=', '.join(['<%(N)d x %(T)s> %%s.%(i)d' % dict(subst, i=i)
-                                for i in range(args.max_shuffle_height + 1)])))
+"""
+        % dict(
+            subst,
+            arguments=", ".join(
+                [
+                    "<%(N)d x %(T)s> %%s.%(i)d" % dict(subst, i=i)
+                    for i in range(args.max_shuffle_height + 1)
+                ]
+            ),
+        )
+    )
 
-  # Finally, generate a main function which will trap if any lanes are mapped
-  # incorrectly (in an observable way).
-  print("""
+    # Finally, generate a main function which will trap if any lanes are mapped
+    # incorrectly (in an observable way).
+    print(
+        """
 define i32 @main() {
 entry:
   ; Create a scratch space to print error messages.
@@ -208,24 +291,41 @@ def main():
   ; result.
   %%v.cast = bitcast <%(N)d x %(T)s> %%v to <%(N)d x %(IT)s>
   br label %%test.0
-""" % dict(subst,
-           inputs=', '.join(
-               [('<%(N)d x %(T)s> bitcast '
-                 '(<%(N)d x %(IT)s> <%(input)s> to <%(N)d x %(T)s>)' %
-                 dict(subst, input=', '.join(['%(IT)s %(i)d' % dict(subst, i=i)
-                                              for i in input])))
-                for input in inputs])))
+"""
+        % dict(
+            subst,
+            inputs=", ".join(
+                [
+                    (
+                        "<%(N)d x %(T)s> bitcast "
+                        "(<%(N)d x %(IT)s> <%(input)s> to <%(N)d x %(T)s>)"
+                        % dict(
+                            subst,
+                            input=", ".join(
+                                ["%(IT)s %(i)d" % dict(subst, i=i) for i in input]
+                            ),
+                        )
+                    )
+                    for input in inputs
+                ]
+            ),
+        )
+    )
 
-  # Test that each non-undef result lane contains the expected value.
-  for i, r in enumerate(result):
-    if r == -1:
-      print("""
+    # Test that each non-undef result lane contains the expected value.
+    for i, r in enumerate(result):
+        if r == -1:
+            print(
+                """
 test.%(i)d:
   ; Skip this lane, its value is undef.
   br label %%test.%(next_i)d
-""" % dict(subst, i=i, next_i=i + 1))
-    else:
-      print("""
+"""
+                % dict(subst, i=i, next_i=i + 1)
+            )
+        else:
+            print(
+                """
 test.%(i)d:
   %%v.%(i)d = extractelement <%(N)d x %(IT)s> %%v.cast, i32 %(i)d
   %%cmp.%(i)d = icmp ne %(IT)s %%v.%(i)d, %(r)d
@@ -240,9 +340,12 @@ def main():
   call i32 @write(i32 2, i8* %%str.ptr, i32 %%length.%(i)d)
   call void @llvm.trap()
   unreachable
-""" % dict(subst, i=i, next_i=i + 1, r=r))
+"""
+                % dict(subst, i=i, next_i=i + 1, r=r)
+            )
 
-  print("""
+    print(
+        """
 test.%d:
   ret i32 0
 }
@@ -251,7 +354,10 @@ def main():
 declare i32 @write(i32, i8*, i32)
 declare i32 @sprintf(i8*, i8*, ...)
 declare void @llvm.trap() noreturn nounwind
-""" % (len(result),))
+"""
+        % (len(result),)
+    )
+
 
-if __name__ == '__main__':
-  main()
+if __name__ == "__main__":
+    main()

diff  --git a/llvm/utils/shuffle_select_fuzz_tester.py b/llvm/utils/shuffle_select_fuzz_tester.py
index 6b2f94a693533..73bac3c18db14 100755
--- a/llvm/utils/shuffle_select_fuzz_tester.py
+++ b/llvm/utils/shuffle_select_fuzz_tester.py
@@ -34,17 +34,17 @@
 MERGE_SEL_POS = 0.5
 
 
-test_template = r'''
+test_template = r"""
 define internal fastcc {ty} @test({inputs}) noinline nounwind {{
 entry:
 {instructions}
   ret {ty} {last_name}
 }}
-'''
+"""
 
 error_template = r'''@error.{lane} = private unnamed_addr global [64 x i8] c"FAIL: lane {lane}, expected {exp}, found %d\0A{padding}"'''
 
-main_template = r'''
+main_template = r"""
 define i32 @main() {{
 entry:
   ; Create a scratch space to print error messages.
@@ -62,22 +62,22 @@
 declare i32 @write(i32, i8*, i32)
 declare i32 @sprintf(i8*, i8*, ...)
 declare void @llvm.trap() noreturn nounwind
-'''
+"""
 
-check_template = r'''
+check_template = r"""
 test.{lane}:
   %v.{lane} = extractelement {ty} %v, i32 {lane}
   %cmp.{lane} = {i_f}cmp {ordered}ne {scalar_ty} %v.{lane}, {exp}
   br i1 %cmp.{lane}, label %die.{lane}, label %test.{n_lane}
-'''
+"""
 
-undef_check_template = r'''
+undef_check_template = r"""
 test.{lane}:
 ; Skip this lane, its value is undef.
   br label %test.{n_lane}
-'''
+"""
 
-die_template = r'''
+die_template = r"""
 die.{lane}:
 ; Capture the actual value and print an error message.
   call i32 (i8*, i8*, ...) @sprintf(i8* %str.ptr, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @error.{lane}, i32 0, i32 0), {scalar_ty} %v.{lane})
@@ -85,321 +85,371 @@
   call i32 @write(i32 2, i8* %str.ptr, i32 %length.{lane})
   call void @llvm.trap()
   unreachable
-'''
+"""
 
-class Type:
-  def __init__(self, is_float, elt_width, elt_num):
-    self.is_float = is_float        # Boolean
-    self.elt_width = elt_width      # Integer
-    self.elt_num = elt_num          # Integer
 
-  def dump(self):
-    if self.is_float:
-      str_elt = 'float' if self.elt_width == 32 else 'double'
-    else:
-      str_elt = 'i' + str(self.elt_width)
+class Type:
+    def __init__(self, is_float, elt_width, elt_num):
+        self.is_float = is_float  # Boolean
+        self.elt_width = elt_width  # Integer
+        self.elt_num = elt_num  # Integer
 
-    if self.elt_num == 1:
-      return str_elt
-    else:
-      return '<' + str(self.elt_num) + ' x ' + str_elt + '>'
+    def dump(self):
+        if self.is_float:
+            str_elt = "float" if self.elt_width == 32 else "double"
+        else:
+            str_elt = "i" + str(self.elt_width)
 
-  def get_scalar_type(self):
-    return Type(self.is_float, self.elt_width, 1)
+        if self.elt_num == 1:
+            return str_elt
+        else:
+            return "<" + str(self.elt_num) + " x " + str_elt + ">"
 
+    def get_scalar_type(self):
+        return Type(self.is_float, self.elt_width, 1)
 
 
 # Class to represent any value (variable) that can be used.
 class Value:
-  def __init__(self, name, ty, value = None):
-    self.ty = ty                  # Type
-    self.name = name              # String
-    self.value = value            # list of integers or floating points
+    def __init__(self, name, ty, value=None):
+        self.ty = ty  # Type
+        self.name = name  # String
+        self.value = value  # list of integers or floating points
 
 
 # Class to represent an IR instruction (shuffle/select).
 class Instruction(Value):
-  def __init__(self, name, ty, op0, op1, mask):
-    Value.__init__(self, name, ty)
-    self.op0 = op0                # Value
-    self.op1 = op1                # Value
-    self.mask = mask              # list of integers
+    def __init__(self, name, ty, op0, op1, mask):
+        Value.__init__(self, name, ty)
+        self.op0 = op0  # Value
+        self.op1 = op1  # Value
+        self.mask = mask  # list of integers
 
-  def dump(self): pass
+    def dump(self):
+        pass
 
-  def calc_value(self): pass
+    def calc_value(self):
+        pass
 
 
 # Class to represent an IR shuffle instruction
 class ShufInstr(Instruction):
 
-  shuf_template = '  {name} = shufflevector {ty} {op0}, {ty} {op1}, <{num} x i32> {mask}\n'
-
-  def __init__(self, name, ty, op0, op1, mask):
-    Instruction.__init__(self, '%shuf' + name, ty, op0, op1, mask)
-
-  def dump(self):
-    str_mask = [('i32 ' + str(idx)) if idx != -1 else 'i32 undef' for idx in self.mask]
-    str_mask = '<' + (', ').join(str_mask) + '>'
-    return self.shuf_template.format(name = self.name, ty = self.ty.dump(), op0 = self.op0.name,
-                               op1 = self.op1.name, num = self.ty.elt_num, mask = str_mask)
-
-  def calc_value(self):
-    if self.value != None:
-      print('Trying to calculate the value of a shuffle instruction twice')
-      exit(1)
-
-    result = []
-    for i in range(len(self.mask)):
-      index = self.mask[i]
-
-      if index < self.ty.elt_num and index >= 0:
-        result.append(self.op0.value[index])
-      elif index >= self.ty.elt_num:
-        index = index % self.ty.elt_num
-        result.append(self.op1.value[index])
-      else: # -1 => undef
-        result.append(-1)
-
-    self.value = result
+    shuf_template = (
+        "  {name} = shufflevector {ty} {op0}, {ty} {op1}, <{num} x i32> {mask}\n"
+    )
+
+    def __init__(self, name, ty, op0, op1, mask):
+        Instruction.__init__(self, "%shuf" + name, ty, op0, op1, mask)
+
+    def dump(self):
+        str_mask = [
+            ("i32 " + str(idx)) if idx != -1 else "i32 undef" for idx in self.mask
+        ]
+        str_mask = "<" + (", ").join(str_mask) + ">"
+        return self.shuf_template.format(
+            name=self.name,
+            ty=self.ty.dump(),
+            op0=self.op0.name,
+            op1=self.op1.name,
+            num=self.ty.elt_num,
+            mask=str_mask,
+        )
+
+    def calc_value(self):
+        if self.value != None:
+            print("Trying to calculate the value of a shuffle instruction twice")
+            exit(1)
+
+        result = []
+        for i in range(len(self.mask)):
+            index = self.mask[i]
+
+            if index < self.ty.elt_num and index >= 0:
+                result.append(self.op0.value[index])
+            elif index >= self.ty.elt_num:
+                index = index % self.ty.elt_num
+                result.append(self.op1.value[index])
+            else:  # -1 => undef
+                result.append(-1)
+
+        self.value = result
 
 
 # Class to represent an IR select instruction
 class SelectInstr(Instruction):
 
-  sel_template = '  {name} = select <{num} x i1> {mask}, {ty} {op0}, {ty} {op1}\n'
+    sel_template = "  {name} = select <{num} x i1> {mask}, {ty} {op0}, {ty} {op1}\n"
 
-  def __init__(self, name, ty, op0, op1, mask):
-    Instruction.__init__(self, '%sel' + name, ty, op0, op1, mask)
+    def __init__(self, name, ty, op0, op1, mask):
+        Instruction.__init__(self, "%sel" + name, ty, op0, op1, mask)
 
-  def dump(self):
-    str_mask = [('i1 ' + str(idx)) if idx != -1 else 'i1 undef' for idx in self.mask]
-    str_mask = '<' + (', ').join(str_mask) + '>'
-    return self.sel_template.format(name = self.name, ty = self.ty.dump(), op0 = self.op0.name,
-                               op1 = self.op1.name, num = self.ty.elt_num, mask = str_mask)
+    def dump(self):
+        str_mask = [
+            ("i1 " + str(idx)) if idx != -1 else "i1 undef" for idx in self.mask
+        ]
+        str_mask = "<" + (", ").join(str_mask) + ">"
+        return self.sel_template.format(
+            name=self.name,
+            ty=self.ty.dump(),
+            op0=self.op0.name,
+            op1=self.op1.name,
+            num=self.ty.elt_num,
+            mask=str_mask,
+        )
 
-  def calc_value(self):
-    if self.value != None:
-      print('Trying to calculate the value of a select instruction twice')
-      exit(1)
+    def calc_value(self):
+        if self.value != None:
+            print("Trying to calculate the value of a select instruction twice")
+            exit(1)
 
-    result = []
-    for i in range(len(self.mask)):
-      index = self.mask[i]
+        result = []
+        for i in range(len(self.mask)):
+            index = self.mask[i]
 
-      if index == 1:
-        result.append(self.op0.value[i])
-      elif index == 0:
-        result.append(self.op1.value[i])
-      else: # -1 => undef
-        result.append(-1)
+            if index == 1:
+                result.append(self.op0.value[i])
+            elif index == 0:
+                result.append(self.op1.value[i])
+            else:  # -1 => undef
+                result.append(-1)
 
-    self.value = result
+        self.value = result
 
 
 # Returns a list of Values initialized with actual numbers according to the
 # provided type
 def gen_inputs(ty, num):
-  inputs = []
-  for i in range(num):
-    inp = []
-    for j in range(ty.elt_num):
-      if ty.is_float:
-        inp.append(float(i*ty.elt_num + j))
-      else:
-        inp.append((i*ty.elt_num + j) % (1 << ty.elt_width))
-    inputs.append(Value('%inp' + str(i), ty, inp))
+    inputs = []
+    for i in range(num):
+        inp = []
+        for j in range(ty.elt_num):
+            if ty.is_float:
+                inp.append(float(i * ty.elt_num + j))
+            else:
+                inp.append((i * ty.elt_num + j) % (1 << ty.elt_width))
+        inputs.append(Value("%inp" + str(i), ty, inp))
 
-  return inputs
+    return inputs
 
 
 # Returns a random vector type to be tested
 # In case one of the dimensions (scalar type/number of elements) is provided,
 # fill the blank dimension and return appropriate Type object.
 def get_random_type(ty, num_elts):
-  if ty != None:
-    if ty == 'i8':
-      is_float = False
-      width = 8
-    elif ty == 'i16':
-      is_float = False
-      width = 16
-    elif ty == 'i32':
-      is_float = False
-      width = 32
-    elif ty == 'i64':
-      is_float = False
-      width = 64
-    elif ty == 'f32':
-      is_float = True
-      width = 32
-    elif ty == 'f64':
-      is_float = True
-      width = 64
-
-  int_elt_widths = [8, 16, 32, 64]
-  float_elt_widths = [32, 64]
-
-  if num_elts == None:
-    num_elts = random.choice(range(2, 65))
-
-  if ty == None:
-    # 1 for integer type, 0 for floating-point
-    if random.randint(0,1):
-      is_float = False
-      width = random.choice(int_elt_widths)
-    else:
-      is_float = True
-      width = random.choice(float_elt_widths)
-
-  return Type(is_float, width, num_elts)
+    if ty != None:
+        if ty == "i8":
+            is_float = False
+            width = 8
+        elif ty == "i16":
+            is_float = False
+            width = 16
+        elif ty == "i32":
+            is_float = False
+            width = 32
+        elif ty == "i64":
+            is_float = False
+            width = 64
+        elif ty == "f32":
+            is_float = True
+            width = 32
+        elif ty == "f64":
+            is_float = True
+            width = 64
+
+    int_elt_widths = [8, 16, 32, 64]
+    float_elt_widths = [32, 64]
+
+    if num_elts == None:
+        num_elts = random.choice(range(2, 65))
+
+    if ty == None:
+        # 1 for integer type, 0 for floating-point
+        if random.randint(0, 1):
+            is_float = False
+            width = random.choice(int_elt_widths)
+        else:
+            is_float = True
+            width = random.choice(float_elt_widths)
+
+    return Type(is_float, width, num_elts)
 
 
 # Generate mask for shufflevector IR instruction, with SHUF_UNDEF_POS possibility
 # of one undef index.
 def gen_shuf_mask(ty):
-  mask = []
-  for i in range(ty.elt_num):
-    if SHUF_UNDEF_POS/ty.elt_num > random.random():
-      mask.append(-1)
-    else:
-      mask.append(random.randint(0, ty.elt_num*2 - 1))
+    mask = []
+    for i in range(ty.elt_num):
+        if SHUF_UNDEF_POS / ty.elt_num > random.random():
+            mask.append(-1)
+        else:
+            mask.append(random.randint(0, ty.elt_num * 2 - 1))
 
-  return mask
+    return mask
 
 
 # Generate mask for select IR instruction, with SEL_UNDEF_POS possibility
 # of one undef index.
 def gen_sel_mask(ty):
-  mask = []
-  for i in range(ty.elt_num):
-    if SEL_UNDEF_POS/ty.elt_num > random.random():
-      mask.append(-1)
-    else:
-      mask.append(random.randint(0, 1))
+    mask = []
+    for i in range(ty.elt_num):
+        if SEL_UNDEF_POS / ty.elt_num > random.random():
+            mask.append(-1)
+        else:
+            mask.append(random.randint(0, 1))
+
+    return mask
 
-  return mask
 
 # Generate shuffle instructions with optional select instruction after.
 def gen_insts(inputs, ty):
-  int_zero_init = Value('zeroinitializer', ty, [0]*ty.elt_num)
-  float_zero_init = Value('zeroinitializer', ty, [0.0]*ty.elt_num)
-
-  insts = []
-  name_idx = 0
-  while len(inputs) > 1:
-    # Choose 2 available Values - remove them from inputs list.
-    [idx0, idx1] = sorted(random.sample(range(len(inputs)), 2))
-    op0 = inputs[idx0]
-    op1 = inputs[idx1]
-
-    # Create the shuffle instruction.
-    shuf_mask = gen_shuf_mask(ty)
-    shuf_inst = ShufInstr(str(name_idx), ty, op0, op1, shuf_mask)
-    shuf_inst.calc_value()
-
-    # Add the new shuffle instruction to the list of instructions.
-    insts.append(shuf_inst)
-
-    # Optionally, add select instruction with the result of the previous shuffle.
-    if random.random() < ADD_SEL_POS:
-      #  Either blending with a random Value or with an all-zero vector.
-      if random.random() < MERGE_SEL_POS:
-        op2 = random.choice(inputs)
-      else:
-        op2 = float_zero_init if ty.is_float else int_zero_init
-
-      select_mask = gen_sel_mask(ty)
-      select_inst = SelectInstr(str(name_idx), ty, shuf_inst, op2, select_mask)
-      select_inst.calc_value()
-
-      # Add the select instructions to the list of instructions and to the available Values.
-      insts.append(select_inst)
-      inputs.append(select_inst)
-    else:
-      # If the shuffle instruction is not followed by select, add it to the available Values.
-      inputs.append(shuf_inst)
-
-    del inputs[idx1]
-    del inputs[idx0]
-    name_idx += 1
-
-  return insts
+    int_zero_init = Value("zeroinitializer", ty, [0] * ty.elt_num)
+    float_zero_init = Value("zeroinitializer", ty, [0.0] * ty.elt_num)
+
+    insts = []
+    name_idx = 0
+    while len(inputs) > 1:
+        # Choose 2 available Values - remove them from inputs list.
+        [idx0, idx1] = sorted(random.sample(range(len(inputs)), 2))
+        op0 = inputs[idx0]
+        op1 = inputs[idx1]
+
+        # Create the shuffle instruction.
+        shuf_mask = gen_shuf_mask(ty)
+        shuf_inst = ShufInstr(str(name_idx), ty, op0, op1, shuf_mask)
+        shuf_inst.calc_value()
+
+        # Add the new shuffle instruction to the list of instructions.
+        insts.append(shuf_inst)
+
+        # Optionally, add select instruction with the result of the previous shuffle.
+        if random.random() < ADD_SEL_POS:
+            #  Either blending with a random Value or with an all-zero vector.
+            if random.random() < MERGE_SEL_POS:
+                op2 = random.choice(inputs)
+            else:
+                op2 = float_zero_init if ty.is_float else int_zero_init
+
+            select_mask = gen_sel_mask(ty)
+            select_inst = SelectInstr(str(name_idx), ty, shuf_inst, op2, select_mask)
+            select_inst.calc_value()
+
+            # Add the select instructions to the list of instructions and to the available Values.
+            insts.append(select_inst)
+            inputs.append(select_inst)
+        else:
+            # If the shuffle instruction is not followed by select, add it to the available Values.
+            inputs.append(shuf_inst)
+
+        del inputs[idx1]
+        del inputs[idx0]
+        name_idx += 1
+
+    return insts
 
 
 def main():
-  parser = argparse.ArgumentParser(description=__doc__)
-  parser.add_argument('--seed', default=str(uuid.uuid4()),
-                      help='A string used to seed the RNG')
-  parser.add_argument('--max-num-inputs', type=int, default=20,
-          help='Specify the maximum number of vector inputs for the test. (default: 20)')
-  parser.add_argument('--min-num-inputs', type=int, default=10,
-          help='Specify the minimum number of vector inputs for the test. (default: 10)')
-  parser.add_argument('--type', default=None,
-                      help='''
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument(
+        "--seed", default=str(uuid.uuid4()), help="A string used to seed the RNG"
+    )
+    parser.add_argument(
+        "--max-num-inputs",
+        type=int,
+        default=20,
+        help="Specify the maximum number of vector inputs for the test. (default: 20)",
+    )
+    parser.add_argument(
+        "--min-num-inputs",
+        type=int,
+        default=10,
+        help="Specify the minimum number of vector inputs for the test. (default: 10)",
+    )
+    parser.add_argument(
+        "--type",
+        default=None,
+        help="""
                           Choose specific type to be tested.
                           i8, i16, i32, i64, f32 or f64.
-                          (default: random)''')
-  parser.add_argument('--num-elts', default=None, type=int,
-                      help='Choose specific number of vector elements to be tested. (default: random)')
-  args = parser.parse_args()
-
-  print('; The seed used for this test is ' + args.seed)
-
-  assert args.min_num_inputs < args.max_num_inputs , "Minimum value greater than maximum."
-  assert args.type in [None, 'i8', 'i16', 'i32', 'i64', 'f32', 'f64'], "Illegal type."
-  assert args.num_elts == None or args.num_elts > 0, "num_elts must be a positive integer."
-
-  random.seed(args.seed)
-  ty = get_random_type(args.type, args.num_elts)
-  inputs = gen_inputs(ty, random.randint(args.min_num_inputs, args.max_num_inputs))
-  inputs_str = (', ').join([inp.ty.dump() + ' ' + inp.name for inp in inputs])
-  inputs_values = [inp.value for inp in inputs]
-
-  insts = gen_insts(inputs, ty)
-
-  assert len(inputs) == 1, "Only one value should be left after generating phase"
-  res = inputs[0]
-
-  # print the actual test function by dumping the generated instructions.
-  insts_str = ''.join([inst.dump() for inst in insts])
-  print(test_template.format(ty = ty.dump(), inputs = inputs_str,
-                             instructions = insts_str, last_name = res.name))
-
-  # Print the error message templates as global strings
-  for i in range(len(res.value)):
-    pad = ''.join(['\\00']*(31 - len(str(i)) - len(str(res.value[i]))))
-    print(error_template.format(lane = str(i), exp = str(res.value[i]),
-                                padding = pad))
-
-  # Prepare the runtime checks and failure handlers.
-  scalar_ty = ty.get_scalar_type()
-  check_die = ''
-  i_f = 'f' if ty.is_float else 'i'
-  ordered = 'o' if ty.is_float else ''
-  for i in range(len(res.value)):
-    if res.value[i] != -1:
-      # Emit runtime check for each non-undef expected value.
-      check_die += check_template.format(lane = str(i), n_lane = str(i+1),
-                             ty = ty.dump(), i_f = i_f, scalar_ty = scalar_ty.dump(),
-                             exp = str(res.value[i]), ordered = ordered)
-      # Emit failure handler for each runtime check with proper error message
-      check_die += die_template.format(lane = str(i), scalar_ty = scalar_ty.dump())
-    else:
-      # Ignore lanes with undef result
-      check_die += undef_check_template.format(lane = str(i), n_lane = str(i+1))
-
-  check_die += '\ntest.' + str(len(res.value)) + ':\n'
-  check_die += '  ret i32 0'
-
-  # Prepare the input values passed to the test function.
-  inputs_values = [', '.join([scalar_ty.dump() + ' ' + str(i) for i in inp]) for inp in inputs_values]
-  inputs = ', '.join([ty.dump() + ' <' + inp + '>' for inp in inputs_values])
-
-  print(main_template.format(ty = ty.dump(), inputs = inputs, check_die = check_die))
-
-
-if __name__ == '__main__':
-  main()
-
-
+                          (default: random)""",
+    )
+    parser.add_argument(
+        "--num-elts",
+        default=None,
+        type=int,
+        help="Choose specific number of vector elements to be tested. (default: random)",
+    )
+    args = parser.parse_args()
+
+    print("; The seed used for this test is " + args.seed)
+
+    assert (
+        args.min_num_inputs < args.max_num_inputs
+    ), "Minimum value greater than maximum."
+    assert args.type in [None, "i8", "i16", "i32", "i64", "f32", "f64"], "Illegal type."
+    assert (
+        args.num_elts == None or args.num_elts > 0
+    ), "num_elts must be a positive integer."
+
+    random.seed(args.seed)
+    ty = get_random_type(args.type, args.num_elts)
+    inputs = gen_inputs(ty, random.randint(args.min_num_inputs, args.max_num_inputs))
+    inputs_str = (", ").join([inp.ty.dump() + " " + inp.name for inp in inputs])
+    inputs_values = [inp.value for inp in inputs]
+
+    insts = gen_insts(inputs, ty)
+
+    assert len(inputs) == 1, "Only one value should be left after generating phase"
+    res = inputs[0]
+
+    # print the actual test function by dumping the generated instructions.
+    insts_str = "".join([inst.dump() for inst in insts])
+    print(
+        test_template.format(
+            ty=ty.dump(), inputs=inputs_str, instructions=insts_str, last_name=res.name
+        )
+    )
+
+    # Print the error message templates as global strings
+    for i in range(len(res.value)):
+        pad = "".join(["\\00"] * (31 - len(str(i)) - len(str(res.value[i]))))
+        print(error_template.format(lane=str(i), exp=str(res.value[i]), padding=pad))
+
+    # Prepare the runtime checks and failure handlers.
+    scalar_ty = ty.get_scalar_type()
+    check_die = ""
+    i_f = "f" if ty.is_float else "i"
+    ordered = "o" if ty.is_float else ""
+    for i in range(len(res.value)):
+        if res.value[i] != -1:
+            # Emit runtime check for each non-undef expected value.
+            check_die += check_template.format(
+                lane=str(i),
+                n_lane=str(i + 1),
+                ty=ty.dump(),
+                i_f=i_f,
+                scalar_ty=scalar_ty.dump(),
+                exp=str(res.value[i]),
+                ordered=ordered,
+            )
+            # Emit failure handler for each runtime check with proper error message
+            check_die += die_template.format(lane=str(i), scalar_ty=scalar_ty.dump())
+        else:
+            # Ignore lanes with undef result
+            check_die += undef_check_template.format(lane=str(i), n_lane=str(i + 1))
+
+    check_die += "\ntest." + str(len(res.value)) + ":\n"
+    check_die += "  ret i32 0"
+
+    # Prepare the input values passed to the test function.
+    inputs_values = [
+        ", ".join([scalar_ty.dump() + " " + str(i) for i in inp])
+        for inp in inputs_values
+    ]
+    inputs = ", ".join([ty.dump() + " <" + inp + ">" for inp in inputs_values])
+
+    print(main_template.format(ty=ty.dump(), inputs=inputs, check_die=check_die))
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/llvm/utils/sort_includes.py b/llvm/utils/sort_includes.py
index 70bfdedfc6d32..d30efd0b438d1 100755
--- a/llvm/utils/sort_includes.py
+++ b/llvm/utils/sort_includes.py
@@ -11,83 +11,99 @@
 import argparse
 import os
 
+
 def sort_includes(f):
-  """Sort the #include lines of a specific file."""
-
-  # Skip files which are under INPUTS trees or test trees.
-  if 'INPUTS/' in f.name or 'test/' in f.name:
-    return
-
-  ext = os.path.splitext(f.name)[1]
-  if ext not in ['.cpp', '.c', '.h', '.inc', '.def']:
-    return
-
-  lines = f.readlines()
-  look_for_api_header = ext in ['.cpp', '.c']
-  found_headers = False
-  headers_begin = 0
-  headers_end = 0
-  api_headers = []
-  local_headers = []
-  subproject_headers = []
-  llvm_headers = []
-  system_headers = []
-  for (i, l) in enumerate(lines):
-    if l.strip() == '':
-      continue
-    if l.startswith('#include'):
-      if not found_headers:
-        headers_begin = i
-        found_headers = True
-      headers_end = i
-      header = l[len('#include'):].lstrip()
-      if look_for_api_header and header.startswith('"'):
-        api_headers.append(header)
-        look_for_api_header = False
-        continue
-      if (header.startswith('<') or header.startswith('"gtest/') or
-          header.startswith('"isl/') or header.startswith('"json/')):
-        system_headers.append(header)
-        continue
-      if (header.startswith('"clang/') or header.startswith('"clang-c/') or
-          header.startswith('"polly/')):
-        subproject_headers.append(header)
-        continue
-      if (header.startswith('"llvm/') or header.startswith('"llvm-c/')):
-        llvm_headers.append(header)
-        continue
-      local_headers.append(header)
-      continue
-
-    # Only allow comments and #defines prior to any includes. If either are
-    # mixed with includes, the order might be sensitive.
-    if found_headers:
-      break
-    if l.startswith('//') or l.startswith('#define') or l.startswith('#ifndef'):
-      continue
-    break
-  if not found_headers:
-    return
-
-  local_headers = sorted(set(local_headers))
-  subproject_headers = sorted(set(subproject_headers))
-  llvm_headers = sorted(set(llvm_headers))
-  system_headers = sorted(set(system_headers))
-  headers = api_headers + local_headers + subproject_headers + llvm_headers + system_headers
-  header_lines = ['#include ' + h for h in headers]
-  lines = lines[:headers_begin] + header_lines + lines[headers_end + 1:]
-
-  f.seek(0)
-  f.truncate()
-  f.writelines(lines)
+    """Sort the #include lines of a specific file."""
+
+    # Skip files which are under INPUTS trees or test trees.
+    if "INPUTS/" in f.name or "test/" in f.name:
+        return
+
+    ext = os.path.splitext(f.name)[1]
+    if ext not in [".cpp", ".c", ".h", ".inc", ".def"]:
+        return
+
+    lines = f.readlines()
+    look_for_api_header = ext in [".cpp", ".c"]
+    found_headers = False
+    headers_begin = 0
+    headers_end = 0
+    api_headers = []
+    local_headers = []
+    subproject_headers = []
+    llvm_headers = []
+    system_headers = []
+    for (i, l) in enumerate(lines):
+        if l.strip() == "":
+            continue
+        if l.startswith("#include"):
+            if not found_headers:
+                headers_begin = i
+                found_headers = True
+            headers_end = i
+            header = l[len("#include") :].lstrip()
+            if look_for_api_header and header.startswith('"'):
+                api_headers.append(header)
+                look_for_api_header = False
+                continue
+            if (
+                header.startswith("<")
+                or header.startswith('"gtest/')
+                or header.startswith('"isl/')
+                or header.startswith('"json/')
+            ):
+                system_headers.append(header)
+                continue
+            if (
+                header.startswith('"clang/')
+                or header.startswith('"clang-c/')
+                or header.startswith('"polly/')
+            ):
+                subproject_headers.append(header)
+                continue
+            if header.startswith('"llvm/') or header.startswith('"llvm-c/'):
+                llvm_headers.append(header)
+                continue
+            local_headers.append(header)
+            continue
+
+        # Only allow comments and #defines prior to any includes. If either are
+        # mixed with includes, the order might be sensitive.
+        if found_headers:
+            break
+        if l.startswith("//") or l.startswith("#define") or l.startswith("#ifndef"):
+            continue
+        break
+    if not found_headers:
+        return
+
+    local_headers = sorted(set(local_headers))
+    subproject_headers = sorted(set(subproject_headers))
+    llvm_headers = sorted(set(llvm_headers))
+    system_headers = sorted(set(system_headers))
+    headers = (
+        api_headers + local_headers + subproject_headers + llvm_headers + system_headers
+    )
+    header_lines = ["#include " + h for h in headers]
+    lines = lines[:headers_begin] + header_lines + lines[headers_end + 1 :]
+
+    f.seek(0)
+    f.truncate()
+    f.writelines(lines)
+
 
 def main():
-  parser = argparse.ArgumentParser(description=__doc__)
-  parser.add_argument('files', nargs='+', type=argparse.FileType('r+'),
-                      help='the source files to sort includes within')
-  args = parser.parse_args()
-  for f in args.files:
-    sort_includes(f)
-
-if __name__ == '__main__':
-  main()
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument(
+        "files",
+        nargs="+",
+        type=argparse.FileType("r+"),
+        help="the source files to sort includes within",
+    )
+    args = parser.parse_args()
+    for f in args.files:
+        sort_includes(f)
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/llvm/utils/sysroot.py b/llvm/utils/sysroot.py
index 599197dbd0a9e..b9c7d9db4dda8 100755
--- a/llvm/utils/sysroot.py
+++ b/llvm/utils/sysroot.py
@@ -12,92 +12,100 @@ def make_fake_sysroot(out_dir):
     def cmdout(cmd):
         return subprocess.check_output(cmd).decode(sys.stdout.encoding).strip()
 
-    if sys.platform == 'win32':
+    if sys.platform == "win32":
+
         def mkjunction(dst, src):
-            subprocess.check_call(['mklink', '/j', dst, src], shell=True)
+            subprocess.check_call(["mklink", "/j", dst, src], shell=True)
 
         os.mkdir(out_dir)
-        p = os.getenv('ProgramFiles(x86)', 'C:\\Program Files (x86)')
+        p = os.getenv("ProgramFiles(x86)", "C:\\Program Files (x86)")
 
-        winsdk = os.getenv('WindowsSdkDir')
+        winsdk = os.getenv("WindowsSdkDir")
         if not winsdk:
-            winsdk = os.path.join(p, 'Windows Kits', '10')
-            print('%WindowsSdkDir% not set. You might want to run this from')
-            print('a Visual Studio cmd prompt. Defaulting to', winsdk)
-        os.mkdir(os.path.join(out_dir, 'Windows Kits'))
-        mkjunction(os.path.join(out_dir, 'Windows Kits', '10'), winsdk)
-
-        vswhere = os.path.join(
-                p, 'Microsoft Visual Studio', 'Installer', 'vswhere')
-        vcid = 'Microsoft.VisualStudio.Component.VC.Tools.x86.x64'
+            winsdk = os.path.join(p, "Windows Kits", "10")
+            print("%WindowsSdkDir% not set. You might want to run this from")
+            print("a Visual Studio cmd prompt. Defaulting to", winsdk)
+        os.mkdir(os.path.join(out_dir, "Windows Kits"))
+        mkjunction(os.path.join(out_dir, "Windows Kits", "10"), winsdk)
+
+        vswhere = os.path.join(p, "Microsoft Visual Studio", "Installer", "vswhere")
+        vcid = "Microsoft.VisualStudio.Component.VC.Tools.x86.x64"
         vsinstalldir = cmdout(
-                [vswhere, '-latest', '-products', '*', '-requires', vcid,
-                    '-property', 'installationPath'])
-
-        mkjunction(os.path.join(out_dir, 'VC'),
-                   os.path.join(vsinstalldir, 'VC'))
+            [
+                vswhere,
+                "-latest",
+                "-products",
+                "*",
+                "-requires",
+                vcid,
+                "-property",
+                "installationPath",
+            ]
+        )
+
+        mkjunction(os.path.join(out_dir, "VC"), os.path.join(vsinstalldir, "VC"))
         # Not all MSVC versions ship the DIA SDK, so the junction destination
         # might not exist. That's fine.
-        mkjunction(os.path.join(out_dir, 'DIA SDK'),
-                   os.path.join(vsinstalldir, 'DIA SDK'))
-    elif sys.platform == 'darwin':
+        mkjunction(
+            os.path.join(out_dir, "DIA SDK"), os.path.join(vsinstalldir, "DIA SDK")
+        )
+    elif sys.platform == "darwin":
         # The SDKs used by default in compiler-rt/cmake/base-config-ix.cmake.
         # COMPILER_RT_ENABLE_IOS defaults to on.
         # COMPILER_RT_ENABLE_WATCHOS and COMPILER_RT_ENABLE_TV default to off.
         # compiler-rt/cmake/config-ix.cmake sets DARWIN_EMBEDDED_PLATFORMS
         # depending on these.
-        sdks = ['macosx', 'iphoneos', 'iphonesimulator']
+        sdks = ["macosx", "iphoneos", "iphonesimulator"]
         os.mkdir(out_dir)
         for sdk in sdks:
-          sdkpath = cmdout(['xcrun', '-sdk', sdk, '-show-sdk-path'])
-          # sdkpath is something like /.../SDKs/MacOSX11.1.sdk, which is a
-          # symlink to MacOSX.sdk in the same directory. Resolve the symlink,
-          # to make the symlink in out_dir less likely to break when the SDK
-          # is updated (which will bump the number on xcrun's output, but not
-          # on the symlink destination).
-          sdkpath = os.path.realpath(sdkpath)
-          os.symlink(sdkpath, os.path.join(out_dir, os.path.basename(sdkpath)))
+            sdkpath = cmdout(["xcrun", "-sdk", sdk, "-show-sdk-path"])
+            # sdkpath is something like /.../SDKs/MacOSX11.1.sdk, which is a
+            # symlink to MacOSX.sdk in the same directory. Resolve the symlink,
+            # to make the symlink in out_dir less likely to break when the SDK
+            # is updated (which will bump the number on xcrun's output, but not
+            # on the symlink destination).
+            sdkpath = os.path.realpath(sdkpath)
+            os.symlink(sdkpath, os.path.join(out_dir, os.path.basename(sdkpath)))
     else:
-        os.symlink('/', out_dir)
+        os.symlink("/", out_dir)
 
-    print('Done. Pass these flags to cmake:')
+    print("Done. Pass these flags to cmake:")
     abs_out_dir = os.path.abspath(out_dir)
-    if sys.platform == 'win32':
+    if sys.platform == "win32":
         # CMake doesn't like backslashes in commandline args.
-        abs_out_dir = abs_out_dir.replace(os.path.sep, '/')
-        print('  -DLLVM_WINSYSROOT=' + abs_out_dir)
-    elif sys.platform == 'darwin':
+        abs_out_dir = abs_out_dir.replace(os.path.sep, "/")
+        print("  -DLLVM_WINSYSROOT=" + abs_out_dir)
+    elif sys.platform == "darwin":
         flags = [
-          '-DCMAKE_OSX_SYSROOT=' + os.path.join(abs_out_dir, 'MacOSX.sdk'),
-
-          # For find_darwin_sdk_dir() in
-          # compiler-rt/cmake/Modules/CompilerRTDarwinUtils.cmake
-          '-DDARWIN_macosx_CACHED_SYSROOT=' +
-              os.path.join(abs_out_dir, 'MacOSX.sdk'),
-          '-DDARWIN_iphoneos_CACHED_SYSROOT=' +
-              os.path.join(abs_out_dir, 'iPhoneOS.sdk'),
-          '-DDARWIN_iphonesimulator_CACHED_SYSROOT=' +
-              os.path.join(abs_out_dir, 'iPhoneSimulator.sdk'),
+            "-DCMAKE_OSX_SYSROOT=" + os.path.join(abs_out_dir, "MacOSX.sdk"),
+            # For find_darwin_sdk_dir() in
+            # compiler-rt/cmake/Modules/CompilerRTDarwinUtils.cmake
+            "-DDARWIN_macosx_CACHED_SYSROOT=" + os.path.join(abs_out_dir, "MacOSX.sdk"),
+            "-DDARWIN_iphoneos_CACHED_SYSROOT="
+            + os.path.join(abs_out_dir, "iPhoneOS.sdk"),
+            "-DDARWIN_iphonesimulator_CACHED_SYSROOT="
+            + os.path.join(abs_out_dir, "iPhoneSimulator.sdk"),
         ]
-        print('  ' + ' '.join(flags))
+        print("  " + " ".join(flags))
     else:
-        print('  -DCMAKE_SYSROOT=' + abs_out_dir + ' to cmake.')
+        print("  -DCMAKE_SYSROOT=" + abs_out_dir + " to cmake.")
 
 
 def main():
     parser = argparse.ArgumentParser(description=__doc__)
 
-    subparsers = parser.add_subparsers(dest='command', required=True)
+    subparsers = parser.add_subparsers(dest="command", required=True)
 
-    makefake = subparsers.add_parser('make-fake',
-            help='Create a sysroot that symlinks to local directories.')
-    makefake.add_argument('--out-dir', required=True)
+    makefake = subparsers.add_parser(
+        "make-fake", help="Create a sysroot that symlinks to local directories."
+    )
+    makefake.add_argument("--out-dir", required=True)
 
     args = parser.parse_args()
 
-    assert args.command == 'make-fake'
+    assert args.command == "make-fake"
     make_fake_sysroot(args.out_dir)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/testgen/mc-bundling-x86-gen.py b/llvm/utils/testgen/mc-bundling-x86-gen.py
index 1032c9c5a1f3f..a75f94bec814f 100644
--- a/llvm/utils/testgen/mc-bundling-x86-gen.py
+++ b/llvm/utils/testgen/mc-bundling-x86-gen.py
@@ -1,4 +1,3 @@
-
 #!/usr/bin/env python
 
 # Auto-generates an exhaustive and repetitive test for correct bundle-locked
@@ -15,9 +14,9 @@
 import argparse
 
 BUNDLE_SIZE_POW2 = 4
-BUNDLE_SIZE = 2 ** BUNDLE_SIZE_POW2
+BUNDLE_SIZE = 2**BUNDLE_SIZE_POW2
 
-PREAMBLE = '''
+PREAMBLE = """
 # RUN: llvm-mc -filetype=obj -triple i386-pc-linux-gnu %s -o - \\
 # RUN:   | llvm-objdump -triple i386 -disassemble -no-show-raw-insn - | FileCheck %s
 
@@ -27,77 +26,84 @@
 
   .text
   .bundle_align_mode {0}
-'''.format(BUNDLE_SIZE_POW2).lstrip()
+""".format(
+    BUNDLE_SIZE_POW2
+).lstrip()
+
+ALIGNTO = "  .align {0}, 0x90"
+NOPFILL = "  .fill {0}, 1, 0x90"
 
-ALIGNTO = '  .align {0}, 0x90'
-NOPFILL = '  .fill {0}, 1, 0x90'
 
 def print_bundle_locked_sequence(len, align_to_end=False):
-  print('  .bundle_lock{0}'.format(' align_to_end' if align_to_end else ''))
-  print('  .rept {0}'.format(len))
-  print('  inc %eax')
-  print('  .endr')
-  print('  .bundle_unlock')
+    print("  .bundle_lock{0}".format(" align_to_end" if align_to_end else ""))
+    print("  .rept {0}".format(len))
+    print("  inc %eax")
+    print("  .endr")
+    print("  .bundle_unlock")
+
 
 def generate(align_to_end=False):
-  print(PREAMBLE)
-
-  ntest = 0
-  for instlen in range(1, BUNDLE_SIZE + 1):
-    for offset in range(0, BUNDLE_SIZE):
-      # Spread out all the instructions to not worry about cross-bundle
-      # interference.
-      print(ALIGNTO.format(2 * BUNDLE_SIZE))
-      print('INSTRLEN_{0}_OFFSET_{1}:'.format(instlen, offset))
-      if offset > 0:
-        print(NOPFILL.format(offset))
-      print_bundle_locked_sequence(instlen, align_to_end)
-
-      # Now generate an appropriate CHECK line
-      base_offset = ntest * 2 * BUNDLE_SIZE
-      inst_orig_offset = base_offset + offset  # had it not been padded...
-
-      def print_check(adjusted_offset=None, nop_split_offset=None):
-        if adjusted_offset is not None:
-          print('# CHECK: {0:x}: nop'.format(inst_orig_offset))
-          if nop_split_offset is not None:
-            print('# CHECK: {0:x}: nop'.format(nop_split_offset))
-          print('# CHECK: {0:x}: incl'.format(adjusted_offset))
-        else:
-          print('# CHECK: {0:x}: incl'.format(inst_orig_offset))
-
-      if align_to_end:
-        if offset + instlen == BUNDLE_SIZE:
-          # No padding needed
-          print_check()
-        elif offset + instlen < BUNDLE_SIZE:
-          # Pad to end at nearest bundle boundary
-          offset_to_end = base_offset + (BUNDLE_SIZE - instlen)
-          print_check(offset_to_end)
-        else: # offset + instlen > BUNDLE_SIZE
-          # Pad to end at next bundle boundary, splitting the nop sequence
-          # at the nearest bundle boundary
-          offset_to_nearest_bundle = base_offset + BUNDLE_SIZE
-          offset_to_end = base_offset + (BUNDLE_SIZE * 2 - instlen)
-          if offset_to_nearest_bundle == offset_to_end:
-            offset_to_nearest_bundle = None
-          print_check(offset_to_end, offset_to_nearest_bundle)
-      else:
-        if offset + instlen > BUNDLE_SIZE:
-          # Padding needed
-          aligned_offset = (inst_orig_offset + instlen) & ~(BUNDLE_SIZE - 1)
-          print_check(aligned_offset)
-        else:
-          # No padding needed
-          print_check()
-
-      print()
-      ntest += 1
-
-if __name__ == '__main__':
-  argparser = argparse.ArgumentParser()
-  argparser.add_argument('--align-to-end',
-                         action='store_true',
-                         help='generate .bundle_lock with align_to_end option')
-  args = argparser.parse_args()
-  generate(align_to_end=args.align_to_end)
+    print(PREAMBLE)
+
+    ntest = 0
+    for instlen in range(1, BUNDLE_SIZE + 1):
+        for offset in range(0, BUNDLE_SIZE):
+            # Spread out all the instructions to not worry about cross-bundle
+            # interference.
+            print(ALIGNTO.format(2 * BUNDLE_SIZE))
+            print("INSTRLEN_{0}_OFFSET_{1}:".format(instlen, offset))
+            if offset > 0:
+                print(NOPFILL.format(offset))
+            print_bundle_locked_sequence(instlen, align_to_end)
+
+            # Now generate an appropriate CHECK line
+            base_offset = ntest * 2 * BUNDLE_SIZE
+            inst_orig_offset = base_offset + offset  # had it not been padded...
+
+            def print_check(adjusted_offset=None, nop_split_offset=None):
+                if adjusted_offset is not None:
+                    print("# CHECK: {0:x}: nop".format(inst_orig_offset))
+                    if nop_split_offset is not None:
+                        print("# CHECK: {0:x}: nop".format(nop_split_offset))
+                    print("# CHECK: {0:x}: incl".format(adjusted_offset))
+                else:
+                    print("# CHECK: {0:x}: incl".format(inst_orig_offset))
+
+            if align_to_end:
+                if offset + instlen == BUNDLE_SIZE:
+                    # No padding needed
+                    print_check()
+                elif offset + instlen < BUNDLE_SIZE:
+                    # Pad to end at nearest bundle boundary
+                    offset_to_end = base_offset + (BUNDLE_SIZE - instlen)
+                    print_check(offset_to_end)
+                else:  # offset + instlen > BUNDLE_SIZE
+                    # Pad to end at next bundle boundary, splitting the nop sequence
+                    # at the nearest bundle boundary
+                    offset_to_nearest_bundle = base_offset + BUNDLE_SIZE
+                    offset_to_end = base_offset + (BUNDLE_SIZE * 2 - instlen)
+                    if offset_to_nearest_bundle == offset_to_end:
+                        offset_to_nearest_bundle = None
+                    print_check(offset_to_end, offset_to_nearest_bundle)
+            else:
+                if offset + instlen > BUNDLE_SIZE:
+                    # Padding needed
+                    aligned_offset = (inst_orig_offset + instlen) & ~(BUNDLE_SIZE - 1)
+                    print_check(aligned_offset)
+                else:
+                    # No padding needed
+                    print_check()
+
+            print()
+            ntest += 1
+
+
+if __name__ == "__main__":
+    argparser = argparse.ArgumentParser()
+    argparser.add_argument(
+        "--align-to-end",
+        action="store_true",
+        help="generate .bundle_lock with align_to_end option",
+    )
+    args = argparser.parse_args()
+    generate(align_to_end=args.align_to_end)

diff  --git a/llvm/utils/unicode-case-fold.py b/llvm/utils/unicode-case-fold.py
index ad8265ba7bc1b..9639aa0dc44b4 100755
--- a/llvm/utils/unicode-case-fold.py
+++ b/llvm/utils/unicode-case-fold.py
@@ -21,6 +21,7 @@
 
 import sys
 import re
+
 try:
     from urllib.request import urlopen
 except ImportError:
@@ -34,10 +35,11 @@
 # returns a (from_char, to_char, from_name) tuple.
 def mappings(f):
     previous_from = -1
-    expr = re.compile(r'^(.*); [CS]; (.*); # (.*)')
+    expr = re.compile(r"^(.*); [CS]; (.*); # (.*)")
     for line in f:
         m = expr.match(line)
-        if not m: continue
+        if not m:
+            continue
         from_char = int(m.group(1), 16)
         to_char = int(m.group(2), 16)
         from_name = m.group(3)
@@ -47,14 +49,17 @@ def mappings(f):
         yield from_char, to_char, from_name
         previous_from = from_char
 
+
 # Computes the shift (to_char - from_char) in a mapping.
 def shift(mapping):
     return mapping[1] - mapping[0]
 
+
 # Computes the stride (from_char2 - from_char1) of two mappings.
 def stride2(mapping1, mapping2):
     return mapping2[0] - mapping1[0]
 
+
 # Computes the stride of a list of mappings. The list should have at least two
 # mappings. All mappings in the list are assumed to have the same stride.
 def stride(block):
@@ -71,11 +76,11 @@ def dump_block(b):
         # emit the "if (C < X) return C" check below as all characters in this
         # range will be caught by the "C < X" check emitted by the first
         # non-trivial block.
-        body  += "  // {2}\n  if (C == {0:#06x})\n    return {1:#06x};\n".format(*b[0])
+        body += "  // {2}\n  if (C == {0:#06x})\n    return {1:#06x};\n".format(*b[0])
         return
 
     first = b[0][0]
-    last = first + stride(b) * (len(b)-1)
+    last = first + stride(b) * (len(b) - 1)
     modulo = first % stride(b)
 
     # All characters before this block map to themselves.
@@ -98,6 +103,7 @@ def dump_block(b):
 
     body += pattern.format(last, stride(b), modulo, shift(b[0]))
 
+
 current_block = []
 f = urlopen(sys.argv[1])
 for m in mappings(f):
@@ -111,7 +117,9 @@ def dump_block(b):
         current_block = [m]
         continue
 
-    if len(current_block) == 1 or stride(current_block) == stride2(current_block[-1], m):
+    if len(current_block) == 1 or stride(current_block) == stride2(
+        current_block[-1], m
+    ):
         current_block.append(m)
         continue
 
@@ -122,21 +130,25 @@ def dump_block(b):
 
 dump_block(current_block)
 
-print('//===---------- Support/UnicodeCaseFold.cpp -------------------------------===//')
-print('//')
-print('// This file was generated by utils/unicode-case-fold.py from the Unicode')
-print('// case folding database at')
-print('//   ', sys.argv[1])
-print('//')
-print('// To regenerate this file, run:')
-print('//   utils/unicode-case-fold.py \\')
+print(
+    "//===---------- Support/UnicodeCaseFold.cpp -------------------------------===//"
+)
+print("//")
+print("// This file was generated by utils/unicode-case-fold.py from the Unicode")
+print("// case folding database at")
+print("//   ", sys.argv[1])
+print("//")
+print("// To regenerate this file, run:")
+print("//   utils/unicode-case-fold.py \\")
 print('//     "{}" \\'.format(sys.argv[1]))
-print('//     > lib/Support/UnicodeCaseFold.cpp')
-print('//')
-print('//===----------------------------------------------------------------------===//')
-print('')
+print("//     > lib/Support/UnicodeCaseFold.cpp")
+print("//")
+print(
+    "//===----------------------------------------------------------------------===//"
+)
+print("")
 print('#include "llvm/Support/Unicode.h"')
-print('')
+print("")
 print("int llvm::sys::unicode::foldCharSimple(int C) {")
 print(body)
 print("  return C;")

diff  --git a/llvm/utils/update_analyze_test_checks.py b/llvm/utils/update_analyze_test_checks.py
index 0e19bd22d926b..ac0df3c51176d 100755
--- a/llvm/utils/update_analyze_test_checks.py
+++ b/llvm/utils/update_analyze_test_checks.py
@@ -32,160 +32,194 @@
 from __future__ import print_function
 
 import argparse
-import os         # Used to advertise this file's name ("autogenerated_note").
+import os  # Used to advertise this file's name ("autogenerated_note").
 import sys
 import re
 
 from UpdateTestChecks import common
 
+
 def main():
-  from argparse import RawTextHelpFormatter
-  parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
-  parser.add_argument('--opt-binary', default='opt',
-                      help='The opt binary used to generate the test case')
-  parser.add_argument(
-      '--function', help='The function in the test file to update')
-  parser.add_argument('tests', nargs='+')
-  initial_args = common.parse_commandline_args(parser)
-
-  script_name = os.path.basename(__file__)
-
-  opt_basename = os.path.basename(initial_args.opt_binary)
-  if (opt_basename != "opt"):
-    common.error('Unexpected opt name: ' + opt_basename)
-    sys.exit(1)
-
-  for ti in common.itertests(initial_args.tests, parser,
-                             script_name='utils/' + script_name):
-    triple_in_ir = None
-    for l in ti.input_lines:
-      m = common.TRIPLE_IR_RE.match(l)
-      if m:
-        triple_in_ir = m.groups()[0]
-        break
-
-    prefix_list = []
-    for l in ti.run_lines:
-      if '|' not in l:
-        common.warn('Skipping unparsable RUN line: ' + l)
-        continue
-
-      (tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split('|', 1)])
-      common.verify_filecheck_prefixes(filecheck_cmd)
-
-      if not tool_cmd.startswith(opt_basename + ' '):
-        common.warn('WSkipping non-%s RUN line: %s' % (opt_basename, l))
-        continue
-
-      if not filecheck_cmd.startswith('FileCheck '):
-        common.warn('Skipping non-FileChecked RUN line: ' + l)
-        continue
-
-      tool_cmd_args = tool_cmd[len(opt_basename):].strip()
-      tool_cmd_args = tool_cmd_args.replace('< %s', '').replace('%s', '').strip()
-      check_prefixes = common.get_check_prefixes(filecheck_cmd)
-
-      # FIXME: We should use multiple check prefixes to common check lines. For
-      # now, we just ignore all but the last.
-      prefix_list.append((check_prefixes, tool_cmd_args))
-
-    builder = common.FunctionTestBuilder(
-      run_list = prefix_list,
-      flags = type('', (object,), {
-            'verbose': ti.args.verbose,
-            'filters': ti.args.filters,
-            'function_signature': False,
-            'check_attributes': False,
-            'replace_value_regex': []}),
-      scrubber_args = [],
-      path=ti.path)
-
-    for prefixes, opt_args in prefix_list:
-      common.debug('Extracted opt cmd:', opt_basename, opt_args, file=sys.stderr)
-      common.debug('Extracted FileCheck prefixes:', str(prefixes), file=sys.stderr)
-
-      raw_tool_outputs = common.invoke_tool(ti.args.opt_binary, opt_args, ti.path)
-
-      if re.search(r'Printing analysis ', raw_tool_outputs) is not None:
-        # Split analysis outputs by "Printing analysis " declarations.
-        for raw_tool_output in re.split(r'Printing analysis ', raw_tool_outputs):
-          builder.process_run_line(common.ANALYZE_FUNCTION_RE, common.scrub_body,
-                                  raw_tool_output, prefixes, False)
-      elif re.search(r'LV: Checking a loop in ', raw_tool_outputs) is not None:
-        # Split analysis outputs by "Printing analysis " declarations.
-        for raw_tool_output in re.split(r'LV: Checking a loop in ', raw_tool_outputs):
-          builder.process_run_line(common.LV_DEBUG_RE, common.scrub_body,
-                                  raw_tool_output, prefixes, False)
-      else:
-        common.warn('Don\'t know how to deal with this output')
-        continue
-
-      builder.processed_prefixes(prefixes)
-
-    func_dict = builder.finish_and_get_func_dict()
-    is_in_function = False
-    is_in_function_start = False
-    prefix_set = set([prefix for prefixes, _ in prefix_list for prefix in prefixes])
-    common.debug('Rewriting FileCheck prefixes:', str(prefix_set), file=sys.stderr)
-    output_lines = []
-
-    generated_prefixes = []
-    for input_info in ti.iterlines(output_lines):
-      input_line = input_info.line
-      args = input_info.args
-      if is_in_function_start:
-        if input_line == '':
-          continue
-        if input_line.lstrip().startswith(';'):
-          m = common.CHECK_RE.match(input_line)
-          if not m or m.group(1) not in prefix_set:
-            output_lines.append(input_line)
-            continue
-
-        # Print out the various check lines here.
-        generated_prefixes.extend(
-            common.add_analyze_checks(
-                output_lines,
-                ';',
-                prefix_list,
-                func_dict,
-                func_name,
-                is_filtered=builder.is_filtered()))
+    from argparse import RawTextHelpFormatter
+
+    parser = argparse.ArgumentParser(
+        description=__doc__, formatter_class=RawTextHelpFormatter
+    )
+    parser.add_argument(
+        "--opt-binary",
+        default="opt",
+        help="The opt binary used to generate the test case",
+    )
+    parser.add_argument("--function", help="The function in the test file to update")
+    parser.add_argument("tests", nargs="+")
+    initial_args = common.parse_commandline_args(parser)
+
+    script_name = os.path.basename(__file__)
+
+    opt_basename = os.path.basename(initial_args.opt_binary)
+    if opt_basename != "opt":
+        common.error("Unexpected opt name: " + opt_basename)
+        sys.exit(1)
+
+    for ti in common.itertests(
+        initial_args.tests, parser, script_name="utils/" + script_name
+    ):
+        triple_in_ir = None
+        for l in ti.input_lines:
+            m = common.TRIPLE_IR_RE.match(l)
+            if m:
+                triple_in_ir = m.groups()[0]
+                break
+
+        prefix_list = []
+        for l in ti.run_lines:
+            if "|" not in l:
+                common.warn("Skipping unparsable RUN line: " + l)
+                continue
+
+            (tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split("|", 1)])
+            common.verify_filecheck_prefixes(filecheck_cmd)
+
+            if not tool_cmd.startswith(opt_basename + " "):
+                common.warn("WSkipping non-%s RUN line: %s" % (opt_basename, l))
+                continue
+
+            if not filecheck_cmd.startswith("FileCheck "):
+                common.warn("Skipping non-FileChecked RUN line: " + l)
+                continue
+
+            tool_cmd_args = tool_cmd[len(opt_basename) :].strip()
+            tool_cmd_args = tool_cmd_args.replace("< %s", "").replace("%s", "").strip()
+            check_prefixes = common.get_check_prefixes(filecheck_cmd)
+
+            # FIXME: We should use multiple check prefixes to common check lines. For
+            # now, we just ignore all but the last.
+            prefix_list.append((check_prefixes, tool_cmd_args))
+
+        builder = common.FunctionTestBuilder(
+            run_list=prefix_list,
+            flags=type(
+                "",
+                (object,),
+                {
+                    "verbose": ti.args.verbose,
+                    "filters": ti.args.filters,
+                    "function_signature": False,
+                    "check_attributes": False,
+                    "replace_value_regex": [],
+                },
+            ),
+            scrubber_args=[],
+            path=ti.path,
+        )
+
+        for prefixes, opt_args in prefix_list:
+            common.debug("Extracted opt cmd:", opt_basename, opt_args, file=sys.stderr)
+            common.debug(
+                "Extracted FileCheck prefixes:", str(prefixes), file=sys.stderr
+            )
+
+            raw_tool_outputs = common.invoke_tool(ti.args.opt_binary, opt_args, ti.path)
+
+            if re.search(r"Printing analysis ", raw_tool_outputs) is not None:
+                # Split analysis outputs by "Printing analysis " declarations.
+                for raw_tool_output in re.split(
+                    r"Printing analysis ", raw_tool_outputs
+                ):
+                    builder.process_run_line(
+                        common.ANALYZE_FUNCTION_RE,
+                        common.scrub_body,
+                        raw_tool_output,
+                        prefixes,
+                        False,
+                    )
+            elif re.search(r"LV: Checking a loop in ", raw_tool_outputs) is not None:
+                # Split analysis outputs by "Printing analysis " declarations.
+                for raw_tool_output in re.split(
+                    r"LV: Checking a loop in ", raw_tool_outputs
+                ):
+                    builder.process_run_line(
+                        common.LV_DEBUG_RE,
+                        common.scrub_body,
+                        raw_tool_output,
+                        prefixes,
+                        False,
+                    )
+            else:
+                common.warn("Don't know how to deal with this output")
+                continue
+
+            builder.processed_prefixes(prefixes)
+
+        func_dict = builder.finish_and_get_func_dict()
+        is_in_function = False
         is_in_function_start = False
+        prefix_set = set([prefix for prefixes, _ in prefix_list for prefix in prefixes])
+        common.debug("Rewriting FileCheck prefixes:", str(prefix_set), file=sys.stderr)
+        output_lines = []
+
+        generated_prefixes = []
+        for input_info in ti.iterlines(output_lines):
+            input_line = input_info.line
+            args = input_info.args
+            if is_in_function_start:
+                if input_line == "":
+                    continue
+                if input_line.lstrip().startswith(";"):
+                    m = common.CHECK_RE.match(input_line)
+                    if not m or m.group(1) not in prefix_set:
+                        output_lines.append(input_line)
+                        continue
+
+                # Print out the various check lines here.
+                generated_prefixes.extend(
+                    common.add_analyze_checks(
+                        output_lines,
+                        ";",
+                        prefix_list,
+                        func_dict,
+                        func_name,
+                        is_filtered=builder.is_filtered(),
+                    )
+                )
+                is_in_function_start = False
+
+            if is_in_function:
+                if common.should_add_line_to_output(input_line, prefix_set):
+                    # This input line of the function body will go as-is into the output.
+                    # Except make leading whitespace uniform: 2 spaces.
+                    input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub(
+                        r"  ", input_line
+                    )
+                    output_lines.append(input_line)
+                else:
+                    continue
+                if input_line.strip() == "}":
+                    is_in_function = False
+                continue
+
+            # If it's outside a function, it just gets copied to the output.
+            output_lines.append(input_line)
+
+            m = common.IR_FUNCTION_RE.match(input_line)
+            if not m:
+                continue
+            func_name = m.group(1)
+            if ti.args.function is not None and func_name != ti.args.function:
+                # When filtering on a specific function, skip all others.
+                continue
+            is_in_function = is_in_function_start = True
+
+        if ti.args.gen_unused_prefix_body:
+            output_lines.extend(
+                ti.get_checks_for_unused_prefixes(prefix_list, generated_prefixes)
+            )
+
+        common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
+
+        with open(ti.path, "wb") as f:
+            f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
+
 
-      if is_in_function:
-        if common.should_add_line_to_output(input_line, prefix_set):
-          # This input line of the function body will go as-is into the output.
-          # Except make leading whitespace uniform: 2 spaces.
-          input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub(r'  ', input_line)
-          output_lines.append(input_line)
-        else:
-          continue
-        if input_line.strip() == '}':
-          is_in_function = False
-        continue
-
-      # If it's outside a function, it just gets copied to the output.
-      output_lines.append(input_line)
-
-      m = common.IR_FUNCTION_RE.match(input_line)
-      if not m:
-        continue
-      func_name = m.group(1)
-      if ti.args.function is not None and func_name != ti.args.function:
-        # When filtering on a specific function, skip all others.
-        continue
-      is_in_function = is_in_function_start = True
-
-    if ti.args.gen_unused_prefix_body:
-      output_lines.extend(
-          ti.get_checks_for_unused_prefixes(prefix_list, generated_prefixes))
-
-    common.debug('Writing %d lines to %s...' % (len(output_lines), ti.path))
-
-    with open(ti.path, 'wb') as f:
-      f.writelines(['{}\n'.format(l).encode('utf-8') for l in output_lines])
-
-
-if __name__ == '__main__':
-  main()
+if __name__ == "__main__":
+    main()

diff  --git a/llvm/utils/update_any_test_checks.py b/llvm/utils/update_any_test_checks.py
index 33b6689b36fd6..2ad852d710c07 100755
--- a/llvm/utils/update_any_test_checks.py
+++ b/llvm/utils/update_any_test_checks.py
@@ -17,99 +17,115 @@
 from concurrent.futures import ThreadPoolExecutor
 
 RE_ASSERTIONS = re.compile(
-    r'NOTE: Assertions have been autogenerated by ([^\s]+)( UTC_ARGS:.*)?$')
+    r"NOTE: Assertions have been autogenerated by ([^\s]+)( UTC_ARGS:.*)?$"
+)
+
 
 def find_utc_tool(search_path, utc_name):
-  """
-  Return the path to the given UTC tool in the search path, or None if not
-  found.
-  """
-  for path in search_path:
-    candidate = os.path.join(path, utc_name)
-    if os.path.isfile(candidate):
-      return candidate
-  return None
+    """
+    Return the path to the given UTC tool in the search path, or None if not
+    found.
+    """
+    for path in search_path:
+        candidate = os.path.join(path, utc_name)
+        if os.path.isfile(candidate):
+            return candidate
+    return None
+
 
 def run_utc_tool(utc_name, utc_tool, testname):
-  result = subprocess.run([utc_tool, testname], stdout=subprocess.PIPE,
-                          stderr=subprocess.PIPE)
-  return (result.returncode, result.stdout, result.stderr)
+    result = subprocess.run(
+        [utc_tool, testname], stdout=subprocess.PIPE, stderr=subprocess.PIPE
+    )
+    return (result.returncode, result.stdout, result.stderr)
+
 
 def main():
-  from argparse import RawTextHelpFormatter
-  parser = argparse.ArgumentParser(description=__doc__,
-                                   formatter_class=RawTextHelpFormatter)
-  parser.add_argument(
-      '--jobs', '-j', default=1, type=int,
-      help='Run the given number of jobs in parallel')
-  parser.add_argument(
-      '--utc-dir', nargs='*',
-      help='Additional directories to scan for update_*_test_checks scripts')
-  parser.add_argument('tests', nargs='+')
-  config = parser.parse_args()
-
-  if config.utc_dir:
-    utc_search_path = config.utc_dir[:]
-  else:
-    utc_search_path = []
-  script_name = os.path.abspath(__file__)
-  utc_search_path.append(os.path.join(os.path.dirname(script_name),
-                                      os.path.pardir))
-
-  not_autogenerated = []
-  utc_tools = {}
-  have_error = False
-
-  with ThreadPoolExecutor(max_workers=config.jobs) as executor:
-    jobs = []
-
-    for testname in config.tests:
-      with open(testname, 'r') as f:
-        header = f.readline().strip()
-        m = RE_ASSERTIONS.search(header)
-        if m is None:
-          not_autogenerated.append(testname)
-          continue
-
-        utc_name = m.group(1)
-        if utc_name not in utc_tools:
-          utc_tools[utc_name] = find_utc_tool(utc_search_path, utc_name)
-          if not utc_tools[utc_name]:
-            print(f"{utc_name}: not found (used in {testname})",
-                  file=sys.stderr)
-            have_error = True
-            continue
-
-        future = executor.submit(run_utc_tool, utc_name, utc_tools[utc_name],
-                                 testname)
-        jobs.append((testname, future))
-
-    for testname, future in jobs:
-      return_code, stdout, stderr = future.result()
-
-      print(f"Update {testname}")
-      stdout = stdout.decode(errors='replace')
-      if stdout:
-        print(stdout, end='')
-        if not stdout.endswith('\n'):
-          print()
-
-      stderr = stderr.decode(errors='replace')
-      if stderr:
-        print(stderr, end='')
-        if not stderr.endswith('\n'):
-          print()
-      if return_code != 0:
-        print(f"Return code: {return_code}")
-        have_error = True
-
-  if have_error:
-    sys.exit(1)
-
-  if not_autogenerated:
-    print("Tests without autogenerated assertions:")
-    for testname in not_autogenerated:
-      print(f"  {testname}")
-
-if __name__ == '__main__':
-  main()
+    from argparse import RawTextHelpFormatter
+
+    parser = argparse.ArgumentParser(
+        description=__doc__, formatter_class=RawTextHelpFormatter
+    )
+    parser.add_argument(
+        "--jobs",
+        "-j",
+        default=1,
+        type=int,
+        help="Run the given number of jobs in parallel",
+    )
+    parser.add_argument(
+        "--utc-dir",
+        nargs="*",
+        help="Additional directories to scan for update_*_test_checks scripts",
+    )
+    parser.add_argument("tests", nargs="+")
+    config = parser.parse_args()
+
+    if config.utc_dir:
+        utc_search_path = config.utc_dir[:]
+    else:
+        utc_search_path = []
+    script_name = os.path.abspath(__file__)
+    utc_search_path.append(os.path.join(os.path.dirname(script_name), os.path.pardir))
+
+    not_autogenerated = []
+    utc_tools = {}
+    have_error = False
+
+    with ThreadPoolExecutor(max_workers=config.jobs) as executor:
+        jobs = []
+
+        for testname in config.tests:
+            with open(testname, "r") as f:
+                header = f.readline().strip()
+                m = RE_ASSERTIONS.search(header)
+                if m is None:
+                    not_autogenerated.append(testname)
+                    continue
+
+                utc_name = m.group(1)
+                if utc_name not in utc_tools:
+                    utc_tools[utc_name] = find_utc_tool(utc_search_path, utc_name)
+                    if not utc_tools[utc_name]:
+                        print(
+                            f"{utc_name}: not found (used in {testname})",
+                            file=sys.stderr,
+                        )
+                        have_error = True
+                        continue
+
+                future = executor.submit(
+                    run_utc_tool, utc_name, utc_tools[utc_name], testname
+                )
+                jobs.append((testname, future))
+
+        for testname, future in jobs:
+            return_code, stdout, stderr = future.result()
+
+            print(f"Update {testname}")
+            stdout = stdout.decode(errors="replace")
+            if stdout:
+                print(stdout, end="")
+                if not stdout.endswith("\n"):
+                    print()
+
+            stderr = stderr.decode(errors="replace")
+            if stderr:
+                print(stderr, end="")
+                if not stderr.endswith("\n"):
+                    print()
+            if return_code != 0:
+                print(f"Return code: {return_code}")
+                have_error = True
+
+    if have_error:
+        sys.exit(1)
+
+    if not_autogenerated:
+        print("Tests without autogenerated assertions:")
+        for testname in not_autogenerated:
+            print(f"  {testname}")
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/llvm/utils/update_cc_test_checks.py b/llvm/utils/update_cc_test_checks.py
index 0861fe8c18948..e96d4167e6567 100755
--- a/llvm/utils/update_cc_test_checks.py
+++ b/llvm/utils/update_cc_test_checks.py
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-'''A utility to update LLVM IR CHECK lines in C/C++ FileCheck test files.
+"""A utility to update LLVM IR CHECK lines in C/C++ FileCheck test files.
 
 Example RUN lines in .c/.cc test files:
 
@@ -10,7 +10,7 @@
 
 % utils/update_cc_test_checks.py --llvm-bin=release/bin test/a.cc
 % utils/update_cc_test_checks.py --clang=release/bin/clang /tmp/c/a.cc
-'''
+"""
 
 from __future__ import print_function
 
@@ -28,419 +28,531 @@
 from UpdateTestChecks import common
 
 SUBST = {
-    '%clang': [],
-    '%clang_cc1': ['-cc1'],
-    '%clangxx': ['--driver-mode=g++'],
+    "%clang": [],
+    "%clang_cc1": ["-cc1"],
+    "%clangxx": ["--driver-mode=g++"],
 }
 
+
 def get_line2func_list(args, clang_args):
-  ret = collections.defaultdict(list)
-  # Use clang's JSON AST dump to get the mangled name
-  json_dump_args = [args.clang] + clang_args + ['-fsyntax-only', '-o', '-']
-  if '-cc1' not in json_dump_args:
-    # For tests that invoke %clang instead if %clang_cc1 we have to use
-    # -Xclang -ast-dump=json instead:
-    json_dump_args.append('-Xclang')
-  json_dump_args.append('-ast-dump=json')
-  common.debug('Running', ' '.join(json_dump_args))
-
-  popen = subprocess.Popen(json_dump_args, stdout=subprocess.PIPE,
-                           stderr=subprocess.PIPE, universal_newlines=True)
-  stdout, stderr = popen.communicate()
-  if popen.returncode != 0:
-    sys.stderr.write('Failed to run ' + ' '.join(json_dump_args) + '\n')
-    sys.stderr.write(stderr)
-    sys.stderr.write(stdout)
-    sys.exit(2)
-
-  # Parse the clang JSON and add all children of type FunctionDecl.
-  # TODO: Should we add checks for global variables being emitted?
-  def parse_clang_ast_json(node, loc, search):
-    node_kind = node['kind']
-    # Recurse for the following nodes that can contain nested function decls:
-    if node_kind in ('NamespaceDecl', 'LinkageSpecDecl', 'TranslationUnitDecl',
-                     'CXXRecordDecl', 'ClassTemplateSpecializationDecl'):
-      # Specializations must use the loc from the specialization, not the
-      # template, and search for the class's spelling as the specialization
-      # does not mention the method names in the source.
-      if node_kind == 'ClassTemplateSpecializationDecl':
-        inner_loc = node['loc']
-        inner_search = node['name']
-      else:
-        inner_loc = None
-        inner_search = None
-      if 'inner' in node:
-        for inner in node['inner']:
-          parse_clang_ast_json(inner, inner_loc, inner_search)
-    # Otherwise we ignore everything except functions:
-    if node_kind not in ('FunctionDecl', 'CXXMethodDecl', 'CXXConstructorDecl',
-                         'CXXDestructorDecl', 'CXXConversionDecl'):
-      return
-    if loc is None:
-      loc = node['loc']
-    if node.get('isImplicit') is True and node.get('storageClass') == 'extern':
-      common.debug('Skipping builtin function:', node['name'], '@', loc)
-      return
-    common.debug('Found function:', node['kind'], node['name'], '@', loc)
-    line = loc.get('line')
-    # If there is no line it is probably a builtin function -> skip
-    if line is None:
-      common.debug('Skipping function without line number:', node['name'], '@', loc)
-      return
-
-    # If there is no 'inner' object, it is a function declaration and we can
-    # skip it. However, function declarations may also contain an 'inner' list,
-    # but in that case it will only contains ParmVarDecls. If we find an entry
-    # that is not a ParmVarDecl, we know that this is a function definition.
-    has_body = False
-    if 'inner' in node:
-      for i in node['inner']:
-        if i.get('kind', 'ParmVarDecl') != 'ParmVarDecl':
-          has_body = True
-          break
-    if not has_body:
-      common.debug('Skipping function without body:', node['name'], '@', loc)
-      return
-    spell = node['name']
-    if search is None:
-      search = spell
-    mangled = node.get('mangledName', spell)
-    ret[int(line)-1].append((spell, mangled, search))
-
-  ast = json.loads(stdout)
-  if ast['kind'] != 'TranslationUnitDecl':
-    common.error('Clang AST dump JSON format changed?')
-    sys.exit(2)
-  parse_clang_ast_json(ast, None, None)
-
-  for line, funcs in sorted(ret.items()):
-    for func in funcs:
-      common.debug('line {}: found function {}'.format(line+1, func), file=sys.stderr)
-  if not ret:
-    common.warn('Did not find any functions using', ' '.join(json_dump_args))
-  return ret
+    ret = collections.defaultdict(list)
+    # Use clang's JSON AST dump to get the mangled name
+    json_dump_args = [args.clang] + clang_args + ["-fsyntax-only", "-o", "-"]
+    if "-cc1" not in json_dump_args:
+        # For tests that invoke %clang instead if %clang_cc1 we have to use
+        # -Xclang -ast-dump=json instead:
+        json_dump_args.append("-Xclang")
+    json_dump_args.append("-ast-dump=json")
+    common.debug("Running", " ".join(json_dump_args))
+
+    popen = subprocess.Popen(
+        json_dump_args,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        universal_newlines=True,
+    )
+    stdout, stderr = popen.communicate()
+    if popen.returncode != 0:
+        sys.stderr.write("Failed to run " + " ".join(json_dump_args) + "\n")
+        sys.stderr.write(stderr)
+        sys.stderr.write(stdout)
+        sys.exit(2)
+
+    # Parse the clang JSON and add all children of type FunctionDecl.
+    # TODO: Should we add checks for global variables being emitted?
+    def parse_clang_ast_json(node, loc, search):
+        node_kind = node["kind"]
+        # Recurse for the following nodes that can contain nested function decls:
+        if node_kind in (
+            "NamespaceDecl",
+            "LinkageSpecDecl",
+            "TranslationUnitDecl",
+            "CXXRecordDecl",
+            "ClassTemplateSpecializationDecl",
+        ):
+            # Specializations must use the loc from the specialization, not the
+            # template, and search for the class's spelling as the specialization
+            # does not mention the method names in the source.
+            if node_kind == "ClassTemplateSpecializationDecl":
+                inner_loc = node["loc"]
+                inner_search = node["name"]
+            else:
+                inner_loc = None
+                inner_search = None
+            if "inner" in node:
+                for inner in node["inner"]:
+                    parse_clang_ast_json(inner, inner_loc, inner_search)
+        # Otherwise we ignore everything except functions:
+        if node_kind not in (
+            "FunctionDecl",
+            "CXXMethodDecl",
+            "CXXConstructorDecl",
+            "CXXDestructorDecl",
+            "CXXConversionDecl",
+        ):
+            return
+        if loc is None:
+            loc = node["loc"]
+        if node.get("isImplicit") is True and node.get("storageClass") == "extern":
+            common.debug("Skipping builtin function:", node["name"], "@", loc)
+            return
+        common.debug("Found function:", node["kind"], node["name"], "@", loc)
+        line = loc.get("line")
+        # If there is no line it is probably a builtin function -> skip
+        if line is None:
+            common.debug(
+                "Skipping function without line number:", node["name"], "@", loc
+            )
+            return
+
+        # If there is no 'inner' object, it is a function declaration and we can
+        # skip it. However, function declarations may also contain an 'inner' list,
+        # but in that case it will only contains ParmVarDecls. If we find an entry
+        # that is not a ParmVarDecl, we know that this is a function definition.
+        has_body = False
+        if "inner" in node:
+            for i in node["inner"]:
+                if i.get("kind", "ParmVarDecl") != "ParmVarDecl":
+                    has_body = True
+                    break
+        if not has_body:
+            common.debug("Skipping function without body:", node["name"], "@", loc)
+            return
+        spell = node["name"]
+        if search is None:
+            search = spell
+        mangled = node.get("mangledName", spell)
+        ret[int(line) - 1].append((spell, mangled, search))
+
+    ast = json.loads(stdout)
+    if ast["kind"] != "TranslationUnitDecl":
+        common.error("Clang AST dump JSON format changed?")
+        sys.exit(2)
+    parse_clang_ast_json(ast, None, None)
+
+    for line, funcs in sorted(ret.items()):
+        for func in funcs:
+            common.debug(
+                "line {}: found function {}".format(line + 1, func), file=sys.stderr
+            )
+    if not ret:
+        common.warn("Did not find any functions using", " ".join(json_dump_args))
+    return ret
 
 
 def str_to_commandline(value):
-  if not value:
-    return []
-  return shlex.split(value)
+    if not value:
+        return []
+    return shlex.split(value)
 
 
 def infer_dependent_args(args):
-  if not args.clang:
-    if not args.llvm_bin:
-      args.clang = 'clang'
-    else:
-      args.clang = os.path.join(args.llvm_bin, 'clang')
-  if not args.opt:
-    if not args.llvm_bin:
-      args.opt = 'opt'
-    else:
-      args.opt = os.path.join(args.llvm_bin, 'opt')
+    if not args.clang:
+        if not args.llvm_bin:
+            args.clang = "clang"
+        else:
+            args.clang = os.path.join(args.llvm_bin, "clang")
+    if not args.opt:
+        if not args.llvm_bin:
+            args.opt = "opt"
+        else:
+            args.opt = os.path.join(args.llvm_bin, "opt")
 
 
 def find_executable(executable):
-  _, ext = os.path.splitext(executable)
-  if sys.platform == 'win32' and ext != '.exe':
-    executable = executable + '.exe'
+    _, ext = os.path.splitext(executable)
+    if sys.platform == "win32" and ext != ".exe":
+        executable = executable + ".exe"
 
-  return shutil.which(executable)
+    return shutil.which(executable)
 
 
 def config():
-  parser = argparse.ArgumentParser(
-      description=__doc__,
-      formatter_class=argparse.RawTextHelpFormatter)
-  parser.add_argument('--llvm-bin', help='llvm $prefix/bin path')
-  parser.add_argument('--clang',
-                      help='"clang" executable, defaults to $llvm_bin/clang')
-  parser.add_argument('--clang-args', default=[], type=str_to_commandline,
-                      help='Space-separated extra args to clang, e.g. --clang-args=-v')
-  parser.add_argument('--opt',
-                      help='"opt" executable, defaults to $llvm_bin/opt')
-  parser.add_argument(
-      '--functions', nargs='+', help='A list of function name regexes. '
-      'If specified, update CHECK lines for functions matching at least one regex')
-  parser.add_argument(
-      '--x86_extra_scrub', action='store_true',
-      help='Use more regex for x86 matching to reduce 
diff s between various subtargets')
-  parser.add_argument('--function-signature', action='store_true',
-                      help='Keep function signature information around for the check line')
-  parser.add_argument('--check-attributes', action='store_true',
-                      help='Check "Function Attributes" for functions')
-  parser.add_argument('--check-globals', action='store_true',
-                      help='Check global entries (global variables, metadata, attribute sets, ...) for functions')
-  parser.add_argument('tests', nargs='+')
-  args = common.parse_commandline_args(parser)
-  infer_dependent_args(args)
-
-  if not find_executable(args.clang):
-    print('Please specify --llvm-bin or --clang', file=sys.stderr)
-    sys.exit(1)
-
-  # Determine the builtin includes directory so that we can update tests that
-  # depend on the builtin headers. See get_clang_builtin_include_dir() and
-  # use_clang() in llvm/utils/lit/lit/llvm/config.py.
-  try:
-    builtin_include_dir = subprocess.check_output(
-      [args.clang, '-print-file-name=include']).decode().strip()
-    SUBST['%clang_cc1'] = ['-cc1', '-internal-isystem', builtin_include_dir,
-                           '-nostdsysteminc']
-  except subprocess.CalledProcessError:
-    common.warn('Could not determine clang builtins directory, some tests '
-                'might not update correctly.')
-
-  if not find_executable(args.opt):
-    # Many uses of this tool will not need an opt binary, because it's only
-    # needed for updating a test that runs clang | opt | FileCheck. So we
-    # defer this error message until we find that opt is actually needed.
-    args.opt = None
-
-  return args, parser
-
-
-def get_function_body(builder, args, filename, clang_args, extra_commands,
-                      prefixes):
-  # TODO Clean up duplication of asm/common build_function_body_dictionary
-  # Invoke external tool and extract function bodies.
-  raw_tool_output = common.invoke_tool(args.clang, clang_args, filename)
-  for extra_command in extra_commands:
-    extra_args = shlex.split(extra_command)
-    with tempfile.NamedTemporaryFile() as f:
-      f.write(raw_tool_output.encode())
-      f.flush()
-      if extra_args[0] == 'opt':
-        if args.opt is None:
-          print(filename, 'needs to run opt. '
-                'Please specify --llvm-bin or --opt', file=sys.stderr)
-          sys.exit(1)
-        extra_args[0] = args.opt
-      raw_tool_output = common.invoke_tool(extra_args[0],
-                                           extra_args[1:], f.name)
-  if '-emit-llvm' in clang_args:
-    builder.process_run_line(
-            common.OPT_FUNCTION_RE, common.scrub_body, raw_tool_output,
-            prefixes, False)
-    builder.processed_prefixes(prefixes)
-  else:
-    print('The clang command line should include -emit-llvm as asm tests '
-          'are discouraged in Clang testsuite.', file=sys.stderr)
-    sys.exit(1)
+    parser = argparse.ArgumentParser(
+        description=__doc__, formatter_class=argparse.RawTextHelpFormatter
+    )
+    parser.add_argument("--llvm-bin", help="llvm $prefix/bin path")
+    parser.add_argument(
+        "--clang", help='"clang" executable, defaults to $llvm_bin/clang'
+    )
+    parser.add_argument(
+        "--clang-args",
+        default=[],
+        type=str_to_commandline,
+        help="Space-separated extra args to clang, e.g. --clang-args=-v",
+    )
+    parser.add_argument("--opt", help='"opt" executable, defaults to $llvm_bin/opt')
+    parser.add_argument(
+        "--functions",
+        nargs="+",
+        help="A list of function name regexes. "
+        "If specified, update CHECK lines for functions matching at least one regex",
+    )
+    parser.add_argument(
+        "--x86_extra_scrub",
+        action="store_true",
+        help="Use more regex for x86 matching to reduce 
diff s between various subtargets",
+    )
+    parser.add_argument(
+        "--function-signature",
+        action="store_true",
+        help="Keep function signature information around for the check line",
+    )
+    parser.add_argument(
+        "--check-attributes",
+        action="store_true",
+        help='Check "Function Attributes" for functions',
+    )
+    parser.add_argument(
+        "--check-globals",
+        action="store_true",
+        help="Check global entries (global variables, metadata, attribute sets, ...) for functions",
+    )
+    parser.add_argument("tests", nargs="+")
+    args = common.parse_commandline_args(parser)
+    infer_dependent_args(args)
+
+    if not find_executable(args.clang):
+        print("Please specify --llvm-bin or --clang", file=sys.stderr)
+        sys.exit(1)
+
+    # Determine the builtin includes directory so that we can update tests that
+    # depend on the builtin headers. See get_clang_builtin_include_dir() and
+    # use_clang() in llvm/utils/lit/lit/llvm/config.py.
+    try:
+        builtin_include_dir = (
+            subprocess.check_output([args.clang, "-print-file-name=include"])
+            .decode()
+            .strip()
+        )
+        SUBST["%clang_cc1"] = [
+            "-cc1",
+            "-internal-isystem",
+            builtin_include_dir,
+            "-nostdsysteminc",
+        ]
+    except subprocess.CalledProcessError:
+        common.warn(
+            "Could not determine clang builtins directory, some tests "
+            "might not update correctly."
+        )
+
+    if not find_executable(args.opt):
+        # Many uses of this tool will not need an opt binary, because it's only
+        # needed for updating a test that runs clang | opt | FileCheck. So we
+        # defer this error message until we find that opt is actually needed.
+        args.opt = None
+
+    return args, parser
+
+
+def get_function_body(builder, args, filename, clang_args, extra_commands, prefixes):
+    # TODO Clean up duplication of asm/common build_function_body_dictionary
+    # Invoke external tool and extract function bodies.
+    raw_tool_output = common.invoke_tool(args.clang, clang_args, filename)
+    for extra_command in extra_commands:
+        extra_args = shlex.split(extra_command)
+        with tempfile.NamedTemporaryFile() as f:
+            f.write(raw_tool_output.encode())
+            f.flush()
+            if extra_args[0] == "opt":
+                if args.opt is None:
+                    print(
+                        filename,
+                        "needs to run opt. " "Please specify --llvm-bin or --opt",
+                        file=sys.stderr,
+                    )
+                    sys.exit(1)
+                extra_args[0] = args.opt
+            raw_tool_output = common.invoke_tool(extra_args[0], extra_args[1:], f.name)
+    if "-emit-llvm" in clang_args:
+        builder.process_run_line(
+            common.OPT_FUNCTION_RE, common.scrub_body, raw_tool_output, prefixes, False
+        )
+        builder.processed_prefixes(prefixes)
+    else:
+        print(
+            "The clang command line should include -emit-llvm as asm tests "
+            "are discouraged in Clang testsuite.",
+            file=sys.stderr,
+        )
+        sys.exit(1)
+
 
 def exec_run_line(exe):
-  popen = subprocess.Popen(exe, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
-  stdout, stderr = popen.communicate()
-  if popen.returncode != 0:
-    sys.stderr.write('Failed to run ' + ' '.join(exe) + '\n')
-    sys.stderr.write(stderr)
-    sys.stderr.write(stdout)
-    sys.exit(3)
+    popen = subprocess.Popen(
+        exe, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
+    )
+    stdout, stderr = popen.communicate()
+    if popen.returncode != 0:
+        sys.stderr.write("Failed to run " + " ".join(exe) + "\n")
+        sys.stderr.write(stderr)
+        sys.stderr.write(stdout)
+        sys.exit(3)
+
 
 def main():
-  initial_args, parser = config()
-  script_name = os.path.basename(__file__)
-
-  for ti in common.itertests(initial_args.tests, parser, 'utils/' + script_name,
-                             comment_prefix='//', argparse_callback=infer_dependent_args):
-    # Build a list of filechecked and non-filechecked RUN lines.
-    run_list = []
-    line2func_list = collections.defaultdict(list)
-
-    subs = {
-      '%s' : ti.path,
-      '%t' : tempfile.NamedTemporaryFile().name,
-      '%S' : os.path.dirname(ti.path),
-    }
-
-    for l in ti.run_lines:
-      commands = [cmd.strip() for cmd in l.split('|')]
-
-      triple_in_cmd = None
-      m = common.TRIPLE_ARG_RE.search(commands[0])
-      if m:
-        triple_in_cmd = m.groups()[0]
-
-      # Parse executable args.
-      exec_args = shlex.split(commands[0])
-      # Execute non-clang runline.
-      if exec_args[0] not in SUBST:
-        # Do lit-like substitutions.
-        for s in subs:
-          exec_args = [i.replace(s, subs[s]) if s in i else i for i in exec_args]
-        run_list.append((None, exec_args, None, None))
-        continue
-      # This is a clang runline, apply %clang substitution rule, do lit-like substitutions,
-      # and append args.clang_args
-      clang_args = exec_args
-      clang_args[0:1] = SUBST[clang_args[0]]
-      for s in subs:
-        clang_args = [i.replace(s, subs[s]) if s in i else i for i in clang_args]
-      clang_args += ti.args.clang_args
-
-      # Extract -check-prefix in FileCheck args
-      filecheck_cmd = commands[-1]
-      common.verify_filecheck_prefixes(filecheck_cmd)
-      if not filecheck_cmd.startswith('FileCheck '):
-        # Execute non-filechecked clang runline.
-        exe = [ti.args.clang] + clang_args
-        run_list.append((None, exe, None, None))
-        continue
-
-      check_prefixes = common.get_check_prefixes(filecheck_cmd)
-      run_list.append((check_prefixes, clang_args, commands[1:-1], triple_in_cmd))
-
-    # Execute clang, generate LLVM IR, and extract functions.
-
-    # Store only filechecked runlines.
-    filecheck_run_list = [i for i in run_list if i[0]]
-    builder = common.FunctionTestBuilder(
-      run_list=filecheck_run_list,
-      flags=ti.args,
-      scrubber_args=[],
-      path=ti.path)
-
-    for prefixes, args, extra_commands, triple_in_cmd in run_list:
-      # Execute non-filechecked runline.
-      if not prefixes:
-        print('NOTE: Executing non-FileChecked RUN line: ' + ' '.join(args), file=sys.stderr)
-        exec_run_line(args)
-        continue
-
-      clang_args = args
-      common.debug('Extracted clang cmd: clang {}'.format(clang_args))
-      common.debug('Extracted FileCheck prefixes: {}'.format(prefixes))
-
-      get_function_body(builder, ti.args, ti.path, clang_args, extra_commands,
-                        prefixes)
-
-      # Invoke clang -Xclang -ast-dump=json to get mapping from start lines to
-      # mangled names. Forward all clang args for now.
-      for k, v in get_line2func_list(ti.args, clang_args).items():
-        line2func_list[k].extend(v)
-
-    func_dict = builder.finish_and_get_func_dict()
-    global_vars_seen_dict = {}
-    prefix_set = set([prefix for p in filecheck_run_list for prefix in p[0]])
-    output_lines = []
-    has_checked_pre_function_globals = False
-
-    include_generated_funcs = common.find_arg_in_test(ti,
-                                                      lambda args: ti.args.include_generated_funcs,
-                                                      '--include-generated-funcs',
-                                                      True)
-    generated_prefixes = []
-    if include_generated_funcs:
-      # Generate the appropriate checks for each function.  We need to emit
-      # these in the order according to the generated output so that CHECK-LABEL
-      # works properly.  func_order provides that.
-
-      # It turns out that when clang generates functions (for example, with
-      # -fopenmp), it can sometimes cause functions to be re-ordered in the
-      # output, even functions that exist in the source file.  Therefore we
-      # can't insert check lines before each source function and instead have to
-      # put them at the end.  So the first thing to do is dump out the source
-      # lines.
-      common.dump_input_lines(output_lines, ti, prefix_set, '//')
-
-      # Now generate all the checks.
-      def check_generator(my_output_lines, prefixes, func):
-        if '-emit-llvm' in clang_args:
-          return common.add_ir_checks(my_output_lines, '//',
-                                      prefixes,
-                                      func_dict, func, False,
-                                      ti.args.function_signature,
-                                      ti.args.version,
-                                      global_vars_seen_dict,
-                                      is_filtered=builder.is_filtered())
-        else:
-          return asm.add_checks(my_output_lines, '//',
-                                prefixes,
-                                func_dict, func, global_vars_seen_dict,
-                                is_filtered=builder.is_filtered())
-
-      if ti.args.check_globals:
-        generated_prefixes.extend(
-            common.add_global_checks(builder.global_var_dict(), '//', run_list,
-                                     output_lines, global_vars_seen_dict, True,
-                                     True))
-      generated_prefixes.extend(
-          common.add_checks_at_end(
-              output_lines, filecheck_run_list, builder.func_order(), '//',
-              lambda my_output_lines, prefixes, func: check_generator(
-                  my_output_lines, prefixes, func)))
-    else:
-      # Normal mode.  Put checks before each source function.
-      for line_info in ti.iterlines(output_lines):
-        idx = line_info.line_number
-        line = line_info.line
-        args = line_info.args
-        include_line = True
-        m = common.CHECK_RE.match(line)
-        if m and m.group(1) in prefix_set:
-          continue  # Don't append the existing CHECK lines
-        # Skip special separator comments added by commmon.add_global_checks.
-        if line.strip() == '//' + common.SEPARATOR:
-          continue
-        if idx in line2func_list:
-          added = set()
-          for spell, mangled, search in line2func_list[idx]:
-            # One line may contain multiple function declarations.
-            # Skip if the mangled name has been added before.
-            # The line number may come from an included file, we simply require
-            # the search string (normally the function's spelling name, but is
-            # the class's spelling name for class specializations) to appear on
-            # the line to exclude functions from other files.
-            if mangled in added or search not in line:
-              continue
-            if args.functions is None or any(re.search(regex, spell) for regex in args.functions):
-              last_line = output_lines[-1].strip()
-              while last_line == '//':
-                # Remove the comment line since we will generate a new  comment
-                # line as part of common.add_ir_checks()
-                output_lines.pop()
-                last_line = output_lines[-1].strip()
-              if ti.args.check_globals and not has_checked_pre_function_globals:
+    initial_args, parser = config()
+    script_name = os.path.basename(__file__)
+
+    for ti in common.itertests(
+        initial_args.tests,
+        parser,
+        "utils/" + script_name,
+        comment_prefix="//",
+        argparse_callback=infer_dependent_args,
+    ):
+        # Build a list of filechecked and non-filechecked RUN lines.
+        run_list = []
+        line2func_list = collections.defaultdict(list)
+
+        subs = {
+            "%s": ti.path,
+            "%t": tempfile.NamedTemporaryFile().name,
+            "%S": os.path.dirname(ti.path),
+        }
+
+        for l in ti.run_lines:
+            commands = [cmd.strip() for cmd in l.split("|")]
+
+            triple_in_cmd = None
+            m = common.TRIPLE_ARG_RE.search(commands[0])
+            if m:
+                triple_in_cmd = m.groups()[0]
+
+            # Parse executable args.
+            exec_args = shlex.split(commands[0])
+            # Execute non-clang runline.
+            if exec_args[0] not in SUBST:
+                # Do lit-like substitutions.
+                for s in subs:
+                    exec_args = [
+                        i.replace(s, subs[s]) if s in i else i for i in exec_args
+                    ]
+                run_list.append((None, exec_args, None, None))
+                continue
+            # This is a clang runline, apply %clang substitution rule, do lit-like substitutions,
+            # and append args.clang_args
+            clang_args = exec_args
+            clang_args[0:1] = SUBST[clang_args[0]]
+            for s in subs:
+                clang_args = [
+                    i.replace(s, subs[s]) if s in i else i for i in clang_args
+                ]
+            clang_args += ti.args.clang_args
+
+            # Extract -check-prefix in FileCheck args
+            filecheck_cmd = commands[-1]
+            common.verify_filecheck_prefixes(filecheck_cmd)
+            if not filecheck_cmd.startswith("FileCheck "):
+                # Execute non-filechecked clang runline.
+                exe = [ti.args.clang] + clang_args
+                run_list.append((None, exe, None, None))
+                continue
+
+            check_prefixes = common.get_check_prefixes(filecheck_cmd)
+            run_list.append((check_prefixes, clang_args, commands[1:-1], triple_in_cmd))
+
+        # Execute clang, generate LLVM IR, and extract functions.
+
+        # Store only filechecked runlines.
+        filecheck_run_list = [i for i in run_list if i[0]]
+        builder = common.FunctionTestBuilder(
+            run_list=filecheck_run_list, flags=ti.args, scrubber_args=[], path=ti.path
+        )
+
+        for prefixes, args, extra_commands, triple_in_cmd in run_list:
+            # Execute non-filechecked runline.
+            if not prefixes:
+                print(
+                    "NOTE: Executing non-FileChecked RUN line: " + " ".join(args),
+                    file=sys.stderr,
+                )
+                exec_run_line(args)
+                continue
+
+            clang_args = args
+            common.debug("Extracted clang cmd: clang {}".format(clang_args))
+            common.debug("Extracted FileCheck prefixes: {}".format(prefixes))
+
+            get_function_body(
+                builder, ti.args, ti.path, clang_args, extra_commands, prefixes
+            )
+
+            # Invoke clang -Xclang -ast-dump=json to get mapping from start lines to
+            # mangled names. Forward all clang args for now.
+            for k, v in get_line2func_list(ti.args, clang_args).items():
+                line2func_list[k].extend(v)
+
+        func_dict = builder.finish_and_get_func_dict()
+        global_vars_seen_dict = {}
+        prefix_set = set([prefix for p in filecheck_run_list for prefix in p[0]])
+        output_lines = []
+        has_checked_pre_function_globals = False
+
+        include_generated_funcs = common.find_arg_in_test(
+            ti,
+            lambda args: ti.args.include_generated_funcs,
+            "--include-generated-funcs",
+            True,
+        )
+        generated_prefixes = []
+        if include_generated_funcs:
+            # Generate the appropriate checks for each function.  We need to emit
+            # these in the order according to the generated output so that CHECK-LABEL
+            # works properly.  func_order provides that.
+
+            # It turns out that when clang generates functions (for example, with
+            # -fopenmp), it can sometimes cause functions to be re-ordered in the
+            # output, even functions that exist in the source file.  Therefore we
+            # can't insert check lines before each source function and instead have to
+            # put them at the end.  So the first thing to do is dump out the source
+            # lines.
+            common.dump_input_lines(output_lines, ti, prefix_set, "//")
+
+            # Now generate all the checks.
+            def check_generator(my_output_lines, prefixes, func):
+                if "-emit-llvm" in clang_args:
+                    return common.add_ir_checks(
+                        my_output_lines,
+                        "//",
+                        prefixes,
+                        func_dict,
+                        func,
+                        False,
+                        ti.args.function_signature,
+                        ti.args.version,
+                        global_vars_seen_dict,
+                        is_filtered=builder.is_filtered(),
+                    )
+                else:
+                    return asm.add_checks(
+                        my_output_lines,
+                        "//",
+                        prefixes,
+                        func_dict,
+                        func,
+                        global_vars_seen_dict,
+                        is_filtered=builder.is_filtered(),
+                    )
+
+            if ti.args.check_globals:
                 generated_prefixes.extend(
-                    common.add_global_checks(builder.global_var_dict(), '//',
-                                             run_list, output_lines,
-                                             global_vars_seen_dict, True, True))
-                has_checked_pre_function_globals = True
-              if added:
-                output_lines.append('//')
-              added.add(mangled)
-              generated_prefixes.extend(
-                  common.add_ir_checks(
-                      output_lines,
-                      '//',
-                      filecheck_run_list,
-                      func_dict,
-                      mangled,
-                      False,
-                      args.function_signature,
-                      args.version,
-                      global_vars_seen_dict,
-                      is_filtered=builder.is_filtered()))
-              if line.rstrip('\n') == '//':
-                include_line = False
-
-        if include_line:
-          output_lines.append(line.rstrip('\n'))
-
-    if ti.args.check_globals:
-      generated_prefixes.extend(
-          common.add_global_checks(builder.global_var_dict(), '//', run_list,
-                                   output_lines, global_vars_seen_dict, True,
-                                   False))
-    if ti.args.gen_unused_prefix_body:
-      output_lines.extend(
-          ti.get_checks_for_unused_prefixes(run_list, generated_prefixes))
-    common.debug('Writing %d lines to %s...' % (len(output_lines), ti.path))
-    with open(ti.path, 'wb') as f:
-      f.writelines(['{}\n'.format(l).encode('utf-8') for l in output_lines])
-
-  return 0
-
-
-if __name__ == '__main__':
-  sys.exit(main())
+                    common.add_global_checks(
+                        builder.global_var_dict(),
+                        "//",
+                        run_list,
+                        output_lines,
+                        global_vars_seen_dict,
+                        True,
+                        True,
+                    )
+                )
+            generated_prefixes.extend(
+                common.add_checks_at_end(
+                    output_lines,
+                    filecheck_run_list,
+                    builder.func_order(),
+                    "//",
+                    lambda my_output_lines, prefixes, func: check_generator(
+                        my_output_lines, prefixes, func
+                    ),
+                )
+            )
+        else:
+            # Normal mode.  Put checks before each source function.
+            for line_info in ti.iterlines(output_lines):
+                idx = line_info.line_number
+                line = line_info.line
+                args = line_info.args
+                include_line = True
+                m = common.CHECK_RE.match(line)
+                if m and m.group(1) in prefix_set:
+                    continue  # Don't append the existing CHECK lines
+                # Skip special separator comments added by commmon.add_global_checks.
+                if line.strip() == "//" + common.SEPARATOR:
+                    continue
+                if idx in line2func_list:
+                    added = set()
+                    for spell, mangled, search in line2func_list[idx]:
+                        # One line may contain multiple function declarations.
+                        # Skip if the mangled name has been added before.
+                        # The line number may come from an included file, we simply require
+                        # the search string (normally the function's spelling name, but is
+                        # the class's spelling name for class specializations) to appear on
+                        # the line to exclude functions from other files.
+                        if mangled in added or search not in line:
+                            continue
+                        if args.functions is None or any(
+                            re.search(regex, spell) for regex in args.functions
+                        ):
+                            last_line = output_lines[-1].strip()
+                            while last_line == "//":
+                                # Remove the comment line since we will generate a new  comment
+                                # line as part of common.add_ir_checks()
+                                output_lines.pop()
+                                last_line = output_lines[-1].strip()
+                            if (
+                                ti.args.check_globals
+                                and not has_checked_pre_function_globals
+                            ):
+                                generated_prefixes.extend(
+                                    common.add_global_checks(
+                                        builder.global_var_dict(),
+                                        "//",
+                                        run_list,
+                                        output_lines,
+                                        global_vars_seen_dict,
+                                        True,
+                                        True,
+                                    )
+                                )
+                                has_checked_pre_function_globals = True
+                            if added:
+                                output_lines.append("//")
+                            added.add(mangled)
+                            generated_prefixes.extend(
+                                common.add_ir_checks(
+                                    output_lines,
+                                    "//",
+                                    filecheck_run_list,
+                                    func_dict,
+                                    mangled,
+                                    False,
+                                    args.function_signature,
+                                    args.version,
+                                    global_vars_seen_dict,
+                                    is_filtered=builder.is_filtered(),
+                                )
+                            )
+                            if line.rstrip("\n") == "//":
+                                include_line = False
+
+                if include_line:
+                    output_lines.append(line.rstrip("\n"))
+
+        if ti.args.check_globals:
+            generated_prefixes.extend(
+                common.add_global_checks(
+                    builder.global_var_dict(),
+                    "//",
+                    run_list,
+                    output_lines,
+                    global_vars_seen_dict,
+                    True,
+                    False,
+                )
+            )
+        if ti.args.gen_unused_prefix_body:
+            output_lines.extend(
+                ti.get_checks_for_unused_prefixes(run_list, generated_prefixes)
+            )
+        common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
+        with open(ti.path, "wb") as f:
+            f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
+
+    return 0
+
+
+if __name__ == "__main__":
+    sys.exit(main())

diff  --git a/llvm/utils/update_llc_test_checks.py b/llvm/utils/update_llc_test_checks.py
index 3744788b927d7..1ed0132781e2b 100755
--- a/llvm/utils/update_llc_test_checks.py
+++ b/llvm/utils/update_llc_test_checks.py
@@ -16,212 +16,270 @@
 
 # llc is the only llc-like in the LLVM tree but downstream forks can add
 # additional ones here if they have them.
-LLC_LIKE_TOOLS = ('llc',)
+LLC_LIKE_TOOLS = ("llc",)
+
 
 def main():
-  parser = argparse.ArgumentParser(description=__doc__)
-  parser.add_argument('--llc-binary', default=None,
-                      help='The "llc" binary to use to generate the test case')
-  parser.add_argument(
-      '--function', help='The function in the test file to update')
-  parser.add_argument(
-      '--extra_scrub', action='store_true',
-      help='Always use additional regex to further reduce 
diff s between various subtargets')
-  parser.add_argument(
-      '--x86_scrub_sp', action='store_true', default=True,
-      help='Use regex for x86 sp matching to reduce 
diff s between various subtargets')
-  parser.add_argument(
-      '--no_x86_scrub_sp', action='store_false', dest='x86_scrub_sp')
-  parser.add_argument(
-      '--x86_scrub_rip', action='store_true', default=False,
-      help='Use more regex for x86 rip matching to reduce 
diff s between various subtargets')
-  parser.add_argument(
-      '--no_x86_scrub_rip', action='store_false', dest='x86_scrub_rip')
-  parser.add_argument(
-      '--no_x86_scrub_mem_shuffle', action='store_true', default=False,
-      help='Reduce scrubbing shuffles with memory operands')
-  parser.add_argument('tests', nargs='+')
-  initial_args = common.parse_commandline_args(parser)
-
-  script_name = os.path.basename(__file__)
-
-  for ti in common.itertests(initial_args.tests, parser,
-                             script_name='utils/' + script_name):
-    triple_in_ir = None
-    for l in ti.input_lines:
-      m = common.TRIPLE_IR_RE.match(l)
-      if m:
-        triple_in_ir = m.groups()[0]
-        break
-
-    run_list = []
-    for l in ti.run_lines:
-      if '|' not in l:
-        common.warn('Skipping unparsable RUN line: ' + l)
-        continue
-
-      commands = [cmd.strip() for cmd in l.split('|')]
-      assert len(commands) >= 2
-      preprocess_cmd = None
-      if len(commands) > 2:
-        preprocess_cmd = " | ".join(commands[:-2])
-      llc_cmd = commands[-2]
-      filecheck_cmd = commands[-1]
-      llc_tool = llc_cmd.split(' ')[0]
-
-      triple_in_cmd = None
-      m = common.TRIPLE_ARG_RE.search(llc_cmd)
-      if m:
-        triple_in_cmd = m.groups()[0]
-
-      march_in_cmd = None
-      m = common.MARCH_ARG_RE.search(llc_cmd)
-      if m:
-        march_in_cmd = m.groups()[0]
-
-      m = common.DEBUG_ONLY_ARG_RE.search(llc_cmd)
-      if m and m.groups()[0] == 'isel':
-        from UpdateTestChecks import isel as output_type
-      else:
-        from UpdateTestChecks import asm as output_type
-
-      common.verify_filecheck_prefixes(filecheck_cmd)
-      if llc_tool not in LLC_LIKE_TOOLS:
-        common.warn('Skipping non-llc RUN line: ' + l)
-        continue
-
-      if not filecheck_cmd.startswith('FileCheck '):
-        common.warn('Skipping non-FileChecked RUN line: ' + l)
-        continue
-
-      llc_cmd_args = llc_cmd[len(llc_tool):].strip()
-      llc_cmd_args = llc_cmd_args.replace('< %s', '').replace('%s', '').strip()
-      if ti.path.endswith('.mir'):
-        llc_cmd_args += ' -x mir'
-      check_prefixes = common.get_check_prefixes(filecheck_cmd)
-
-      # FIXME: We should use multiple check prefixes to common check lines. For
-      # now, we just ignore all but the last.
-      run_list.append((check_prefixes, llc_tool, llc_cmd_args, preprocess_cmd,
-                       triple_in_cmd, march_in_cmd))
-
-    if ti.path.endswith('.mir'):
-      check_indent = '  '
-    else:
-      check_indent = ''
-
-    builder = common.FunctionTestBuilder(
-        run_list=run_list,
-        flags=type('', (object,), {
-            'verbose': ti.args.verbose,
-            'filters': ti.args.filters,
-            'function_signature': False,
-            'check_attributes': False,
-            'replace_value_regex': []}),
-        scrubber_args=[ti.args],
-        path=ti.path)
-
-    for prefixes, llc_tool, llc_args, preprocess_cmd, triple_in_cmd, march_in_cmd in run_list:
-      common.debug('Extracted LLC cmd:', llc_tool, llc_args)
-      common.debug('Extracted FileCheck prefixes:', str(prefixes))
-
-      raw_tool_output = common.invoke_tool(ti.args.llc_binary or llc_tool,
-                                           llc_args, ti.path, preprocess_cmd,
-                                           verbose=ti.args.verbose)
-      triple = triple_in_cmd or triple_in_ir
-      if not triple:
-        triple = common.get_triple_from_march(march_in_cmd)
-
-      scrubber, function_re = output_type.get_run_handler(triple)
-      builder.process_run_line(function_re, scrubber, raw_tool_output, prefixes, True)
-      builder.processed_prefixes(prefixes)
-
-    func_dict = builder.finish_and_get_func_dict()
-    global_vars_seen_dict = {}
-
-    is_in_function = False
-    is_in_function_start = False
-    func_name = None
-    prefix_set = set([prefix for p in run_list for prefix in p[0]])
-    common.debug('Rewriting FileCheck prefixes:', str(prefix_set))
-    output_lines = []
-
-    include_generated_funcs = common.find_arg_in_test(ti,
-                                                      lambda args: ti.args.include_generated_funcs,
-                                                      '--include-generated-funcs',
-                                                      True)
-
-    generated_prefixes = []
-    if include_generated_funcs:
-      # Generate the appropriate checks for each function.  We need to emit
-      # these in the order according to the generated output so that CHECK-LABEL
-      # works properly.  func_order provides that.
-
-      # We can't predict where various passes might insert functions so we can't
-      # be sure the input function order is maintained.  Therefore, first spit
-      # out all the source lines.
-      common.dump_input_lines(output_lines, ti, prefix_set, ';')
-
-      # Now generate all the checks.
-      generated_prefixes = common.add_checks_at_end(
-          output_lines, run_list, builder.func_order(),
-          check_indent + ';',
-          lambda my_output_lines, prefixes, func:
-          output_type.add_checks(my_output_lines,
-                                 check_indent + ';',
-                                 prefixes, func_dict, func,
-                                 global_vars_seen_dict,
-                                 is_filtered=builder.is_filtered()))
-    else:
-      for input_info in ti.iterlines(output_lines):
-        input_line = input_info.line
-        args = input_info.args
-        if is_in_function_start:
-          if input_line == '':
-            continue
-          if input_line.lstrip().startswith(';'):
-            m = common.CHECK_RE.match(input_line)
-            if not m or m.group(1) not in prefix_set:
-              output_lines.append(input_line)
-              continue
-
-          # Print out the various check lines here.
-          generated_prefixes.extend(
-              output_type.add_checks(output_lines, check_indent + ';', run_list,
-                                     func_dict, func_name, global_vars_seen_dict,
-                                     is_filtered=builder.is_filtered()))
-          is_in_function_start = False
-
-        if is_in_function:
-          if common.should_add_line_to_output(input_line, prefix_set):
-            # This input line of the function body will go as-is into the output.
-            output_lines.append(input_line)
-          else:
-            continue
-          if input_line.strip() == '}':
-            is_in_function = False
-          continue
-
-        # If it's outside a function, it just gets copied to the output.
-        output_lines.append(input_line)
-
-        m = common.IR_FUNCTION_RE.match(input_line)
-        if not m:
-          continue
-        func_name = m.group(1)
-        if args.function is not None and func_name != args.function:
-          # When filtering on a specific function, skip all others.
-          continue
-        is_in_function = is_in_function_start = True
-
-    if ti.args.gen_unused_prefix_body:
-      output_lines.extend(ti.get_checks_for_unused_prefixes(
-          run_list, generated_prefixes))
-    
-    common.debug('Writing %d lines to %s...' % (len(output_lines), ti.path))
-    with open(ti.path, 'wb') as f:
-      f.writelines(['{}\n'.format(l).encode('utf-8') for l in output_lines])
-
-
-if __name__ == '__main__':
-  main()
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument(
+        "--llc-binary",
+        default=None,
+        help='The "llc" binary to use to generate the test case',
+    )
+    parser.add_argument("--function", help="The function in the test file to update")
+    parser.add_argument(
+        "--extra_scrub",
+        action="store_true",
+        help="Always use additional regex to further reduce 
diff s between various subtargets",
+    )
+    parser.add_argument(
+        "--x86_scrub_sp",
+        action="store_true",
+        default=True,
+        help="Use regex for x86 sp matching to reduce 
diff s between various subtargets",
+    )
+    parser.add_argument("--no_x86_scrub_sp", action="store_false", dest="x86_scrub_sp")
+    parser.add_argument(
+        "--x86_scrub_rip",
+        action="store_true",
+        default=False,
+        help="Use more regex for x86 rip matching to reduce 
diff s between various subtargets",
+    )
+    parser.add_argument(
+        "--no_x86_scrub_rip", action="store_false", dest="x86_scrub_rip"
+    )
+    parser.add_argument(
+        "--no_x86_scrub_mem_shuffle",
+        action="store_true",
+        default=False,
+        help="Reduce scrubbing shuffles with memory operands",
+    )
+    parser.add_argument("tests", nargs="+")
+    initial_args = common.parse_commandline_args(parser)
+
+    script_name = os.path.basename(__file__)
+
+    for ti in common.itertests(
+        initial_args.tests, parser, script_name="utils/" + script_name
+    ):
+        triple_in_ir = None
+        for l in ti.input_lines:
+            m = common.TRIPLE_IR_RE.match(l)
+            if m:
+                triple_in_ir = m.groups()[0]
+                break
+
+        run_list = []
+        for l in ti.run_lines:
+            if "|" not in l:
+                common.warn("Skipping unparsable RUN line: " + l)
+                continue
+
+            commands = [cmd.strip() for cmd in l.split("|")]
+            assert len(commands) >= 2
+            preprocess_cmd = None
+            if len(commands) > 2:
+                preprocess_cmd = " | ".join(commands[:-2])
+            llc_cmd = commands[-2]
+            filecheck_cmd = commands[-1]
+            llc_tool = llc_cmd.split(" ")[0]
+
+            triple_in_cmd = None
+            m = common.TRIPLE_ARG_RE.search(llc_cmd)
+            if m:
+                triple_in_cmd = m.groups()[0]
+
+            march_in_cmd = None
+            m = common.MARCH_ARG_RE.search(llc_cmd)
+            if m:
+                march_in_cmd = m.groups()[0]
+
+            m = common.DEBUG_ONLY_ARG_RE.search(llc_cmd)
+            if m and m.groups()[0] == "isel":
+                from UpdateTestChecks import isel as output_type
+            else:
+                from UpdateTestChecks import asm as output_type
+
+            common.verify_filecheck_prefixes(filecheck_cmd)
+            if llc_tool not in LLC_LIKE_TOOLS:
+                common.warn("Skipping non-llc RUN line: " + l)
+                continue
+
+            if not filecheck_cmd.startswith("FileCheck "):
+                common.warn("Skipping non-FileChecked RUN line: " + l)
+                continue
+
+            llc_cmd_args = llc_cmd[len(llc_tool) :].strip()
+            llc_cmd_args = llc_cmd_args.replace("< %s", "").replace("%s", "").strip()
+            if ti.path.endswith(".mir"):
+                llc_cmd_args += " -x mir"
+            check_prefixes = common.get_check_prefixes(filecheck_cmd)
+
+            # FIXME: We should use multiple check prefixes to common check lines. For
+            # now, we just ignore all but the last.
+            run_list.append(
+                (
+                    check_prefixes,
+                    llc_tool,
+                    llc_cmd_args,
+                    preprocess_cmd,
+                    triple_in_cmd,
+                    march_in_cmd,
+                )
+            )
+
+        if ti.path.endswith(".mir"):
+            check_indent = "  "
+        else:
+            check_indent = ""
+
+        builder = common.FunctionTestBuilder(
+            run_list=run_list,
+            flags=type(
+                "",
+                (object,),
+                {
+                    "verbose": ti.args.verbose,
+                    "filters": ti.args.filters,
+                    "function_signature": False,
+                    "check_attributes": False,
+                    "replace_value_regex": [],
+                },
+            ),
+            scrubber_args=[ti.args],
+            path=ti.path,
+        )
+
+        for (
+            prefixes,
+            llc_tool,
+            llc_args,
+            preprocess_cmd,
+            triple_in_cmd,
+            march_in_cmd,
+        ) in run_list:
+            common.debug("Extracted LLC cmd:", llc_tool, llc_args)
+            common.debug("Extracted FileCheck prefixes:", str(prefixes))
+
+            raw_tool_output = common.invoke_tool(
+                ti.args.llc_binary or llc_tool,
+                llc_args,
+                ti.path,
+                preprocess_cmd,
+                verbose=ti.args.verbose,
+            )
+            triple = triple_in_cmd or triple_in_ir
+            if not triple:
+                triple = common.get_triple_from_march(march_in_cmd)
+
+            scrubber, function_re = output_type.get_run_handler(triple)
+            builder.process_run_line(
+                function_re, scrubber, raw_tool_output, prefixes, True
+            )
+            builder.processed_prefixes(prefixes)
+
+        func_dict = builder.finish_and_get_func_dict()
+        global_vars_seen_dict = {}
+
+        is_in_function = False
+        is_in_function_start = False
+        func_name = None
+        prefix_set = set([prefix for p in run_list for prefix in p[0]])
+        common.debug("Rewriting FileCheck prefixes:", str(prefix_set))
+        output_lines = []
+
+        include_generated_funcs = common.find_arg_in_test(
+            ti,
+            lambda args: ti.args.include_generated_funcs,
+            "--include-generated-funcs",
+            True,
+        )
+
+        generated_prefixes = []
+        if include_generated_funcs:
+            # Generate the appropriate checks for each function.  We need to emit
+            # these in the order according to the generated output so that CHECK-LABEL
+            # works properly.  func_order provides that.
+
+            # We can't predict where various passes might insert functions so we can't
+            # be sure the input function order is maintained.  Therefore, first spit
+            # out all the source lines.
+            common.dump_input_lines(output_lines, ti, prefix_set, ";")
+
+            # Now generate all the checks.
+            generated_prefixes = common.add_checks_at_end(
+                output_lines,
+                run_list,
+                builder.func_order(),
+                check_indent + ";",
+                lambda my_output_lines, prefixes, func: output_type.add_checks(
+                    my_output_lines,
+                    check_indent + ";",
+                    prefixes,
+                    func_dict,
+                    func,
+                    global_vars_seen_dict,
+                    is_filtered=builder.is_filtered(),
+                ),
+            )
+        else:
+            for input_info in ti.iterlines(output_lines):
+                input_line = input_info.line
+                args = input_info.args
+                if is_in_function_start:
+                    if input_line == "":
+                        continue
+                    if input_line.lstrip().startswith(";"):
+                        m = common.CHECK_RE.match(input_line)
+                        if not m or m.group(1) not in prefix_set:
+                            output_lines.append(input_line)
+                            continue
+
+                    # Print out the various check lines here.
+                    generated_prefixes.extend(
+                        output_type.add_checks(
+                            output_lines,
+                            check_indent + ";",
+                            run_list,
+                            func_dict,
+                            func_name,
+                            global_vars_seen_dict,
+                            is_filtered=builder.is_filtered(),
+                        )
+                    )
+                    is_in_function_start = False
+
+                if is_in_function:
+                    if common.should_add_line_to_output(input_line, prefix_set):
+                        # This input line of the function body will go as-is into the output.
+                        output_lines.append(input_line)
+                    else:
+                        continue
+                    if input_line.strip() == "}":
+                        is_in_function = False
+                    continue
+
+                # If it's outside a function, it just gets copied to the output.
+                output_lines.append(input_line)
+
+                m = common.IR_FUNCTION_RE.match(input_line)
+                if not m:
+                    continue
+                func_name = m.group(1)
+                if args.function is not None and func_name != args.function:
+                    # When filtering on a specific function, skip all others.
+                    continue
+                is_in_function = is_in_function_start = True
+
+        if ti.args.gen_unused_prefix_body:
+            output_lines.extend(
+                ti.get_checks_for_unused_prefixes(run_list, generated_prefixes)
+            )
+
+        common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
+        with open(ti.path, "wb") as f:
+            f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/llvm/utils/update_mca_test_checks.py b/llvm/utils/update_mca_test_checks.py
index db4511dac7c02..f7b29fec4efec 100755
--- a/llvm/utils/update_mca_test_checks.py
+++ b/llvm/utils/update_mca_test_checks.py
@@ -16,551 +16,574 @@
 from UpdateTestChecks import common
 
 
-COMMENT_CHAR = '#'
-ADVERT_PREFIX = '{} NOTE: Assertions have been autogenerated by '.format(
-    COMMENT_CHAR)
-ADVERT = '{}utils/{}'.format(ADVERT_PREFIX, os.path.basename(__file__))
+COMMENT_CHAR = "#"
+ADVERT_PREFIX = "{} NOTE: Assertions have been autogenerated by ".format(COMMENT_CHAR)
+ADVERT = "{}utils/{}".format(ADVERT_PREFIX, os.path.basename(__file__))
 
 
 class Error(Exception):
-  """ Generic Error that can be raised without printing a traceback.
-  """
-  pass
+    """Generic Error that can be raised without printing a traceback."""
+
+    pass
 
 
 def _warn(msg):
-  """ Log a user warning to stderr.
-  """
-  warnings.warn(msg, Warning, stacklevel=2)
+    """Log a user warning to stderr."""
+    warnings.warn(msg, Warning, stacklevel=2)
 
 
 def _configure_warnings(args):
-  warnings.resetwarnings()
-  if args.w:
-    warnings.simplefilter('ignore')
-  if args.Werror:
-    warnings.simplefilter('error')
+    warnings.resetwarnings()
+    if args.w:
+        warnings.simplefilter("ignore")
+    if args.Werror:
+        warnings.simplefilter("error")
 
 
 def _showwarning(message, category, filename, lineno, file=None, line=None):
-  """ Version of warnings.showwarning that won't attempt to print out the
-      line at the location of the warning if the line text is not explicitly
-      specified.
-  """
-  if file is None:
-    file = sys.stderr
-  if line is None:
-    line = ''
-  file.write(warnings.formatwarning(message, category, filename, lineno, line))
+    """Version of warnings.showwarning that won't attempt to print out the
+    line at the location of the warning if the line text is not explicitly
+    specified.
+    """
+    if file is None:
+        file = sys.stderr
+    if line is None:
+        line = ""
+    file.write(warnings.formatwarning(message, category, filename, lineno, line))
 
 
 def _parse_args():
-  parser = argparse.ArgumentParser(description=__doc__)
-  parser.add_argument('-w',
-                      action='store_true',
-                      help='suppress warnings')
-  parser.add_argument('-Werror',
-                      action='store_true',
-                      help='promote warnings to errors')
-  parser.add_argument('--llvm-mca-binary',
-                      metavar='<path>',
-                      default='llvm-mca',
-                      help='the binary to use to generate the test case '
-                           '(default: llvm-mca)')
-  parser.add_argument('tests',
-                      metavar='<test-path>',
-                      nargs='+')
-  args = common.parse_commandline_args(parser)
-
-  _configure_warnings(args)
-
-  if not args.llvm_mca_binary:
-    raise Error('--llvm-mca-binary value cannot be empty string')
-
-  if 'llvm-mca' not in os.path.basename(args.llvm_mca_binary):
-    _warn('unexpected binary name: {}'.format(args.llvm_mca_binary))
-
-  return args
-
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument("-w", action="store_true", help="suppress warnings")
+    parser.add_argument(
+        "-Werror", action="store_true", help="promote warnings to errors"
+    )
+    parser.add_argument(
+        "--llvm-mca-binary",
+        metavar="<path>",
+        default="llvm-mca",
+        help="the binary to use to generate the test case " "(default: llvm-mca)",
+    )
+    parser.add_argument("tests", metavar="<test-path>", nargs="+")
+    args = common.parse_commandline_args(parser)
 
-def _get_run_infos(run_lines, args):
-  run_infos = []
-  for run_line in run_lines:
-    try:
-      (tool_cmd, filecheck_cmd) = tuple([cmd.strip()
-                                        for cmd in run_line.split('|', 1)])
-    except ValueError:
-      _warn('could not split tool and filecheck commands: {}'.format(run_line))
-      continue
-
-    common.verify_filecheck_prefixes(filecheck_cmd)
-    tool_basename = os.path.splitext(os.path.basename(args.llvm_mca_binary))[0]
+    _configure_warnings(args)
 
-    if not tool_cmd.startswith(tool_basename + ' '):
-      _warn('skipping non-{} RUN line: {}'.format(tool_basename, run_line))
-      continue
+    if not args.llvm_mca_binary:
+        raise Error("--llvm-mca-binary value cannot be empty string")
 
-    if not filecheck_cmd.startswith('FileCheck '):
-      _warn('skipping non-FileCheck RUN line: {}'.format(run_line))
-      continue
+    if "llvm-mca" not in os.path.basename(args.llvm_mca_binary):
+        _warn("unexpected binary name: {}".format(args.llvm_mca_binary))
 
-    tool_cmd_args = tool_cmd[len(tool_basename):].strip()
-    tool_cmd_args = tool_cmd_args.replace('< %s', '').replace('%s', '').strip()
+    return args
 
-    check_prefixes = common.get_check_prefixes(filecheck_cmd)
 
-    run_infos.append((check_prefixes, tool_cmd_args))
+def _get_run_infos(run_lines, args):
+    run_infos = []
+    for run_line in run_lines:
+        try:
+            (tool_cmd, filecheck_cmd) = tuple(
+                [cmd.strip() for cmd in run_line.split("|", 1)]
+            )
+        except ValueError:
+            _warn("could not split tool and filecheck commands: {}".format(run_line))
+            continue
 
-  return run_infos
+        common.verify_filecheck_prefixes(filecheck_cmd)
+        tool_basename = os.path.splitext(os.path.basename(args.llvm_mca_binary))[0]
 
+        if not tool_cmd.startswith(tool_basename + " "):
+            _warn("skipping non-{} RUN line: {}".format(tool_basename, run_line))
+            continue
 
-def _break_down_block(block_info, common_prefix):
-  """ Given a block_info, see if we can analyze it further to let us break it
-      down by prefix per-line rather than per-block.
-  """
-  texts = block_info.keys()
-  prefixes = list(block_info.values())
-  # Split the lines from each of the incoming block_texts and zip them so that
-  # each element contains the corresponding lines from each text.  E.g.
-  #
-  # block_text_1: A   # line 1
-  #               B   # line 2
-  #
-  # block_text_2: A   # line 1
-  #               C   # line 2
-  #
-  # would become:
-  #
-  # [(A, A),   # line 1
-  #  (B, C)]   # line 2
-  #
-  line_tuples = list(zip(*list((text.splitlines() for text in texts))))
-
-  # To simplify output, we'll only proceed if the very first line of the block
-  # texts is common to each of them.
-  if len(set(line_tuples[0])) != 1:
-    return []
-
-  result = []
-  lresult = defaultdict(list)
-  for i, line in enumerate(line_tuples):
-    if len(set(line)) == 1:
-      # We're about to output a line with the common prefix.  This is a sync
-      # point so flush any batched-up lines one prefix at a time to the output
-      # first.
-      for prefix in sorted(lresult):
-        result.extend(lresult[prefix])
-      lresult = defaultdict(list)
+        if not filecheck_cmd.startswith("FileCheck "):
+            _warn("skipping non-FileCheck RUN line: {}".format(run_line))
+            continue
 
-      # The line is common to each block so output with the common prefix.
-      result.append((common_prefix, line[0]))
-    else:
-      # The line is not common to each block, or we don't have a common prefix.
-      # If there are no prefixes available, warn and bail out.
-      if not prefixes[0]:
-        _warn('multiple lines not disambiguated by prefixes:\n{}\n'
-              'Some blocks may be skipped entirely as a result.'.format(
-                  '\n'.join('  - {}'.format(l) for l in line)))
-        return []
+        tool_cmd_args = tool_cmd[len(tool_basename) :].strip()
+        tool_cmd_args = tool_cmd_args.replace("< %s", "").replace("%s", "").strip()
 
-      # Iterate through the line from each of the blocks and add the line with
-      # the corresponding prefix to the current batch of results so that we can
-      # later output them per-prefix.
-      for i, l in enumerate(line):
-        for prefix in prefixes[i]:
-          lresult[prefix].append((prefix, l))
+        check_prefixes = common.get_check_prefixes(filecheck_cmd)
 
-  # Flush any remaining batched-up lines one prefix at a time to the output.
-  for prefix in sorted(lresult):
-    result.extend(lresult[prefix])
-  return result
+        run_infos.append((check_prefixes, tool_cmd_args))
 
+    return run_infos
 
-def _get_useful_prefix_info(run_infos):
-  """ Given the run_infos, calculate any prefixes that are common to every one,
-      and the length of the longest prefix string.
-  """
-  try:
-    all_sets = [set(s) for s in list(zip(*run_infos))[0]]
-    common_to_all = set.intersection(*all_sets)
-    longest_prefix_len = max(len(p) for p in set.union(*all_sets))
-  except IndexError:
-    common_to_all = []
-    longest_prefix_len = 0
-  else:
-    if len(common_to_all) > 1:
-      _warn('Multiple prefixes common to all RUN lines: {}'.format(
-          common_to_all))
-    if common_to_all:
-      common_to_all = sorted(common_to_all)[0]
-  return common_to_all, longest_prefix_len
 
+def _break_down_block(block_info, common_prefix):
+    """Given a block_info, see if we can analyze it further to let us break it
+    down by prefix per-line rather than per-block.
+    """
+    texts = block_info.keys()
+    prefixes = list(block_info.values())
+    # Split the lines from each of the incoming block_texts and zip them so that
+    # each element contains the corresponding lines from each text.  E.g.
+    #
+    # block_text_1: A   # line 1
+    #               B   # line 2
+    #
+    # block_text_2: A   # line 1
+    #               C   # line 2
+    #
+    # would become:
+    #
+    # [(A, A),   # line 1
+    #  (B, C)]   # line 2
+    #
+    line_tuples = list(zip(*list((text.splitlines() for text in texts))))
+
+    # To simplify output, we'll only proceed if the very first line of the block
+    # texts is common to each of them.
+    if len(set(line_tuples[0])) != 1:
+        return []
 
-def _align_matching_blocks(all_blocks, farthest_indexes):
-  """ Some sub-sequences of blocks may be common to multiple lists of blocks,
-      but at 
diff erent indexes in each one.
-
-      For example, in the following case, A,B,E,F, and H are common to both
-      sets, but only A and B would be identified as such due to the indexes
-      matching:
-
-      index | 0 1 2 3 4 5 6
-      ------+--------------
-      setA  | A B C D E F H
-      setB  | A B E F G H
-
-      This function attempts to align the indexes of matching blocks by
-      inserting empty blocks into the block list. With this approach, A, B, E,
-      F, and H would now be able to be identified as matching blocks:
-
-      index | 0 1 2 3 4 5 6 7
-      ------+----------------
-      setA  | A B C D E F   H
-      setB  | A B     E F G H
-  """
-
-  # "Farthest block analysis": essentially, iterate over all blocks and find
-  # the highest index into a block list for the first instance of each block.
-  # This is relatively expensive, but we're dealing with small numbers of
-  # blocks so it doesn't make a perceivable 
diff erence to user time.
-  for blocks in all_blocks.values():
-    for block in blocks:
-      if not block:
-        continue
-
-      index = blocks.index(block)
-
-      if index > farthest_indexes[block]:
-        farthest_indexes[block] = index
-
-  # Use the results of the above analysis to identify any blocks that can be
-  # shunted along to match the farthest index value.
-  for blocks in all_blocks.values():
-    for index, block in enumerate(blocks):
-      if not block:
-        continue
-
-      changed = False
-      # If the block has not already been subject to alignment (i.e. if the
-      # previous block is not empty) then insert empty blocks until the index
-      # matches the farthest index identified for that block.
-      if (index > 0) and blocks[index - 1]:
-        while(index < farthest_indexes[block]):
-          blocks.insert(index, '')
-          index += 1
-          changed = True
-
-      if changed:
-        # Bail out.  We'll need to re-do the farthest block analysis now that
-        # we've inserted some blocks.
-        return True
-
-  return False
+    result = []
+    lresult = defaultdict(list)
+    for i, line in enumerate(line_tuples):
+        if len(set(line)) == 1:
+            # We're about to output a line with the common prefix.  This is a sync
+            # point so flush any batched-up lines one prefix at a time to the output
+            # first.
+            for prefix in sorted(lresult):
+                result.extend(lresult[prefix])
+            lresult = defaultdict(list)
+
+            # The line is common to each block so output with the common prefix.
+            result.append((common_prefix, line[0]))
+        else:
+            # The line is not common to each block, or we don't have a common prefix.
+            # If there are no prefixes available, warn and bail out.
+            if not prefixes[0]:
+                _warn(
+                    "multiple lines not disambiguated by prefixes:\n{}\n"
+                    "Some blocks may be skipped entirely as a result.".format(
+                        "\n".join("  - {}".format(l) for l in line)
+                    )
+                )
+                return []
+
+            # Iterate through the line from each of the blocks and add the line with
+            # the corresponding prefix to the current batch of results so that we can
+            # later output them per-prefix.
+            for i, l in enumerate(line):
+                for prefix in prefixes[i]:
+                    lresult[prefix].append((prefix, l))
+
+    # Flush any remaining batched-up lines one prefix at a time to the output.
+    for prefix in sorted(lresult):
+        result.extend(lresult[prefix])
+    return result
 
 
-def _get_block_infos(run_infos, test_path, args, common_prefix):  # noqa
-  """ For each run line, run the tool with the specified args and collect the
-      output. We use the concept of 'blocks' for uniquing, where a block is
-      a series of lines of text with no more than one newline character between
-      each one.  For example:
-
-      This
-      is
-      one
-      block
-
-      This is
-      another block
-
-      This is yet another block
-
-      We then build up a 'block_infos' structure containing a dict where the
-      text of each block is the key and a list of the sets of prefixes that may
-      generate that particular block.  This then goes through a series of
-      transformations to minimise the amount of CHECK lines that need to be
-      written by taking advantage of common prefixes.
-  """
-
-  def _block_key(tool_args, prefixes):
-    """ Get a hashable key based on the current tool_args and prefixes.
+def _get_useful_prefix_info(run_infos):
+    """Given the run_infos, calculate any prefixes that are common to every one,
+    and the length of the longest prefix string.
     """
-    return ' '.join([tool_args] + prefixes)
-
-  all_blocks = {}
-  max_block_len = 0
-
-  # A cache of the furthest-back position in any block list of the first
-  # instance of each block, indexed by the block itself.
-  farthest_indexes = defaultdict(int)
-
-  # Run the tool for each run line to generate all of the blocks.
-  for prefixes, tool_args in run_infos:
-    key = _block_key(tool_args, prefixes)
-    raw_tool_output = common.invoke_tool(args.llvm_mca_binary,
-                                         tool_args,
-                                         test_path)
-
-    # Replace any lines consisting of purely whitespace with empty lines.
-    raw_tool_output = '\n'.join(line if line.strip() else ''
-                                for line in raw_tool_output.splitlines())
-
-    # Split blocks, stripping all trailing whitespace, but keeping preceding
-    # whitespace except for newlines so that columns will line up visually.
-    all_blocks[key] = [b.lstrip('\n').rstrip()
-                       for b in raw_tool_output.split('\n\n')]
-    max_block_len = max(max_block_len, len(all_blocks[key]))
-
-    # Attempt to align matching blocks until no more changes can be made.
-    made_changes = True
-    while made_changes:
-      made_changes = _align_matching_blocks(all_blocks, farthest_indexes)
-
-  # If necessary, pad the lists of blocks with empty blocks so that they are
-  # all the same length.
-  for key in all_blocks:
-    len_to_pad = max_block_len - len(all_blocks[key])
-    all_blocks[key] += [''] * len_to_pad
-
-  # Create the block_infos structure where it is a nested dict in the form of:
-  # block number -> block text -> list of prefix sets
-  block_infos = defaultdict(lambda: defaultdict(list))
-  for prefixes, tool_args in run_infos:
-    key = _block_key(tool_args, prefixes)
-    for block_num, block_text in enumerate(all_blocks[key]):
-      block_infos[block_num][block_text].append(set(prefixes))
-
-  # Now go through the block_infos structure and attempt to smartly prune the
-  # number of prefixes per block to the minimal set possible to output.
-  for block_num in range(len(block_infos)):
-    # When there are multiple block texts for a block num, remove any
-    # prefixes that are common to more than one of them.
-    # E.g. [ [{ALL,FOO}] , [{ALL,BAR}] ] -> [ [{FOO}] , [{BAR}] ]
-    all_sets = [s for s in block_infos[block_num].values()]
-    pruned_sets = []
-
-    for i, setlist in enumerate(all_sets):
-      other_set_values = set([elem for j, setlist2 in enumerate(all_sets)
-                              for set_ in setlist2 for elem in set_
-                              if i != j])
-      pruned_sets.append([s - other_set_values for s in setlist])
-
-    for i, block_text in enumerate(block_infos[block_num]):
-
-      # When a block text matches multiple sets of prefixes, try removing any
-      # prefixes that aren't common to all of them.
-      # E.g. [ {ALL,FOO} , {ALL,BAR} ] -> [{ALL}]
-      common_values = set.intersection(*pruned_sets[i])
-      if common_values:
-        pruned_sets[i] = [common_values]
-
-      # Everything should be uniqued as much as possible by now.  Apply the
-      # newly pruned sets to the block_infos structure.
-      # If there are any blocks of text that still match multiple prefixes,
-      # output a warning.
-      current_set = set()
-      for s in pruned_sets[i]:
-        s = sorted(list(s))
-        if s:
-          current_set.add(s[0])
-          if len(s) > 1:
-            _warn('Multiple prefixes generating same output: {} '
-                  '(discarding {})'.format(','.join(s), ','.join(s[1:])))
-
-      if block_text and not current_set:
-        raise Error(
-          'block not captured by existing prefixes:\n\n{}'.format(block_text))
-      block_infos[block_num][block_text] = sorted(list(current_set))
-
-    # If we have multiple block_texts, try to break them down further to avoid
-    # the case where we have very similar block_texts repeated after each
-    # other.
-    if common_prefix and len(block_infos[block_num]) > 1:
-      # We'll only attempt this if each of the block_texts have the same number
-      # of lines as each other.
-      same_num_Lines = (len(set(len(k.splitlines())
-                                for k in block_infos[block_num].keys())) == 1)
-      if same_num_Lines:
-        breakdown = _break_down_block(block_infos[block_num], common_prefix)
-        if breakdown:
-          block_infos[block_num] = breakdown
-
-  return block_infos
-
-
-def _write_block(output, block, not_prefix_set, common_prefix, prefix_pad):
-  """ Write an individual block, with correct padding on the prefixes.
-      Returns a set of all of the prefixes that it has written.
-  """
-  end_prefix = ':     '
-  previous_prefix = None
-  num_lines_of_prefix = 0
-  written_prefixes = set()
-
-  for prefix, line in block:
-    if prefix in not_prefix_set:
-      _warn('not writing for prefix {0} due to presence of "{0}-NOT:" '
-            'in input file.'.format(prefix))
-      continue
-
-    # If the previous line isn't already blank and we're writing more than one
-    # line for the current prefix output a blank line first, unless either the
-    # current of previous prefix is common to all.
-    num_lines_of_prefix += 1
-    if prefix != previous_prefix:
-      if output and output[-1]:
-        if num_lines_of_prefix > 1 or any(p == common_prefix
-                                          for p in (prefix, previous_prefix)):
-          output.append('')
-      num_lines_of_prefix = 0
-      previous_prefix = prefix
-
-    written_prefixes.add(prefix)
-    output.append(
-        '{} {}{}{} {}'.format(COMMENT_CHAR,
-                              prefix,
-                              end_prefix,
-                              ' ' * (prefix_pad - len(prefix)),
-                              line).rstrip())
-    end_prefix = '-NEXT:'
-
-  output.append('')
-  return written_prefixes
-
-
-def _write_output(test_path, input_lines, prefix_list, block_infos,  # noqa
-                  args, common_prefix, prefix_pad):
-  prefix_set = set([prefix for prefixes, _ in prefix_list
-                    for prefix in prefixes])
-  not_prefix_set = set()
-
-  output_lines = []
-  for input_line in input_lines:
-    if input_line.startswith(ADVERT_PREFIX):
-      continue
-
-    if input_line.startswith(COMMENT_CHAR):
-      m = common.CHECK_RE.match(input_line)
-      try:
-        prefix = m.group(1)
-      except AttributeError:
-        prefix = None
-
-      if '{}-NOT:'.format(prefix) in input_line:
-        not_prefix_set.add(prefix)
-
-      if prefix not in prefix_set or prefix in not_prefix_set:
-        output_lines.append(input_line)
-        continue
-
-    if common.should_add_line_to_output(input_line, prefix_set):
-      # This input line of the function body will go as-is into the output.
-      # Except make leading whitespace uniform: 2 spaces.
-      input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub(r'  ', input_line)
-
-      # Skip empty lines if the previous output line is also empty.
-      if input_line or output_lines[-1]:
-        output_lines.append(input_line)
-    else:
-      continue
-
-  # Add a blank line before the new checks if required.
-  if len(output_lines) > 0 and output_lines[-1]:
-    output_lines.append('')
-
-  output_check_lines = []
-  used_prefixes = set()
-  for block_num in range(len(block_infos)):
-    if type(block_infos[block_num]) is list:
-      # The block is of the type output from _break_down_block().
-      used_prefixes |= _write_block(output_check_lines,
-                                    block_infos[block_num],
-                                    not_prefix_set,
-                                    common_prefix,
-                                    prefix_pad)
+    try:
+        all_sets = [set(s) for s in list(zip(*run_infos))[0]]
+        common_to_all = set.intersection(*all_sets)
+        longest_prefix_len = max(len(p) for p in set.union(*all_sets))
+    except IndexError:
+        common_to_all = []
+        longest_prefix_len = 0
     else:
-      # _break_down_block() was unable to do do anything so output the block
-      # as-is.
-
-      # Rather than writing out each block as soon we encounter it, save it
-      # indexed by prefix so that we can write all of the blocks out sorted by
-      # prefix at the end.
-      output_blocks = defaultdict(list)
-
-      for block_text in sorted(block_infos[block_num]):
+        if len(common_to_all) > 1:
+            _warn("Multiple prefixes common to all RUN lines: {}".format(common_to_all))
+        if common_to_all:
+            common_to_all = sorted(common_to_all)[0]
+    return common_to_all, longest_prefix_len
 
-        if not block_text:
-          continue
 
-        lines = block_text.split('\n')
-        for prefix in block_infos[block_num][block_text]:
-          assert prefix not in output_blocks
-          used_prefixes |= _write_block(output_blocks[prefix],
-                                        [(prefix, line) for line in lines],
-                                        not_prefix_set,
-                                        common_prefix,
-                                        prefix_pad)
+def _align_matching_blocks(all_blocks, farthest_indexes):
+    """Some sub-sequences of blocks may be common to multiple lists of blocks,
+    but at 
diff erent indexes in each one.
+
+    For example, in the following case, A,B,E,F, and H are common to both
+    sets, but only A and B would be identified as such due to the indexes
+    matching:
+
+    index | 0 1 2 3 4 5 6
+    ------+--------------
+    setA  | A B C D E F H
+    setB  | A B E F G H
+
+    This function attempts to align the indexes of matching blocks by
+    inserting empty blocks into the block list. With this approach, A, B, E,
+    F, and H would now be able to be identified as matching blocks:
+
+    index | 0 1 2 3 4 5 6 7
+    ------+----------------
+    setA  | A B C D E F   H
+    setB  | A B     E F G H
+    """
 
-      for prefix in sorted(output_blocks):
-        output_check_lines.extend(output_blocks[prefix])
+    # "Farthest block analysis": essentially, iterate over all blocks and find
+    # the highest index into a block list for the first instance of each block.
+    # This is relatively expensive, but we're dealing with small numbers of
+    # blocks so it doesn't make a perceivable 
diff erence to user time.
+    for blocks in all_blocks.values():
+        for block in blocks:
+            if not block:
+                continue
+
+            index = blocks.index(block)
+
+            if index > farthest_indexes[block]:
+                farthest_indexes[block] = index
+
+    # Use the results of the above analysis to identify any blocks that can be
+    # shunted along to match the farthest index value.
+    for blocks in all_blocks.values():
+        for index, block in enumerate(blocks):
+            if not block:
+                continue
+
+            changed = False
+            # If the block has not already been subject to alignment (i.e. if the
+            # previous block is not empty) then insert empty blocks until the index
+            # matches the farthest index identified for that block.
+            if (index > 0) and blocks[index - 1]:
+                while index < farthest_indexes[block]:
+                    blocks.insert(index, "")
+                    index += 1
+                    changed = True
+
+            if changed:
+                # Bail out.  We'll need to re-do the farthest block analysis now that
+                # we've inserted some blocks.
+                return True
+
+    return False
 
-  unused_prefixes = (prefix_set - not_prefix_set) - used_prefixes
-  if unused_prefixes:
-    raise Error('unused prefixes: {}'.format(sorted(unused_prefixes)))
 
-  if output_check_lines:
-    output_lines.insert(0, ADVERT)
-    output_lines.extend(output_check_lines)
+def _get_block_infos(run_infos, test_path, args, common_prefix):  # noqa
+    """For each run line, run the tool with the specified args and collect the
+    output. We use the concept of 'blocks' for uniquing, where a block is
+    a series of lines of text with no more than one newline character between
+    each one.  For example:
+
+    This
+    is
+    one
+    block
+
+    This is
+    another block
+
+    This is yet another block
+
+    We then build up a 'block_infos' structure containing a dict where the
+    text of each block is the key and a list of the sets of prefixes that may
+    generate that particular block.  This then goes through a series of
+    transformations to minimise the amount of CHECK lines that need to be
+    written by taking advantage of common prefixes.
+    """
 
-  # The file should not end with two newlines. It creates unnecessary churn.
-  while len(output_lines) > 0 and output_lines[-1] == '':
-    output_lines.pop()
+    def _block_key(tool_args, prefixes):
+        """Get a hashable key based on the current tool_args and prefixes."""
+        return " ".join([tool_args] + prefixes)
+
+    all_blocks = {}
+    max_block_len = 0
+
+    # A cache of the furthest-back position in any block list of the first
+    # instance of each block, indexed by the block itself.
+    farthest_indexes = defaultdict(int)
+
+    # Run the tool for each run line to generate all of the blocks.
+    for prefixes, tool_args in run_infos:
+        key = _block_key(tool_args, prefixes)
+        raw_tool_output = common.invoke_tool(args.llvm_mca_binary, tool_args, test_path)
+
+        # Replace any lines consisting of purely whitespace with empty lines.
+        raw_tool_output = "\n".join(
+            line if line.strip() else "" for line in raw_tool_output.splitlines()
+        )
+
+        # Split blocks, stripping all trailing whitespace, but keeping preceding
+        # whitespace except for newlines so that columns will line up visually.
+        all_blocks[key] = [
+            b.lstrip("\n").rstrip() for b in raw_tool_output.split("\n\n")
+        ]
+        max_block_len = max(max_block_len, len(all_blocks[key]))
+
+        # Attempt to align matching blocks until no more changes can be made.
+        made_changes = True
+        while made_changes:
+            made_changes = _align_matching_blocks(all_blocks, farthest_indexes)
+
+    # If necessary, pad the lists of blocks with empty blocks so that they are
+    # all the same length.
+    for key in all_blocks:
+        len_to_pad = max_block_len - len(all_blocks[key])
+        all_blocks[key] += [""] * len_to_pad
+
+    # Create the block_infos structure where it is a nested dict in the form of:
+    # block number -> block text -> list of prefix sets
+    block_infos = defaultdict(lambda: defaultdict(list))
+    for prefixes, tool_args in run_infos:
+        key = _block_key(tool_args, prefixes)
+        for block_num, block_text in enumerate(all_blocks[key]):
+            block_infos[block_num][block_text].append(set(prefixes))
+
+    # Now go through the block_infos structure and attempt to smartly prune the
+    # number of prefixes per block to the minimal set possible to output.
+    for block_num in range(len(block_infos)):
+        # When there are multiple block texts for a block num, remove any
+        # prefixes that are common to more than one of them.
+        # E.g. [ [{ALL,FOO}] , [{ALL,BAR}] ] -> [ [{FOO}] , [{BAR}] ]
+        all_sets = [s for s in block_infos[block_num].values()]
+        pruned_sets = []
+
+        for i, setlist in enumerate(all_sets):
+            other_set_values = set(
+                [
+                    elem
+                    for j, setlist2 in enumerate(all_sets)
+                    for set_ in setlist2
+                    for elem in set_
+                    if i != j
+                ]
+            )
+            pruned_sets.append([s - other_set_values for s in setlist])
+
+        for i, block_text in enumerate(block_infos[block_num]):
+
+            # When a block text matches multiple sets of prefixes, try removing any
+            # prefixes that aren't common to all of them.
+            # E.g. [ {ALL,FOO} , {ALL,BAR} ] -> [{ALL}]
+            common_values = set.intersection(*pruned_sets[i])
+            if common_values:
+                pruned_sets[i] = [common_values]
+
+            # Everything should be uniqued as much as possible by now.  Apply the
+            # newly pruned sets to the block_infos structure.
+            # If there are any blocks of text that still match multiple prefixes,
+            # output a warning.
+            current_set = set()
+            for s in pruned_sets[i]:
+                s = sorted(list(s))
+                if s:
+                    current_set.add(s[0])
+                    if len(s) > 1:
+                        _warn(
+                            "Multiple prefixes generating same output: {} "
+                            "(discarding {})".format(",".join(s), ",".join(s[1:]))
+                        )
+
+            if block_text and not current_set:
+                raise Error(
+                    "block not captured by existing prefixes:\n\n{}".format(block_text)
+                )
+            block_infos[block_num][block_text] = sorted(list(current_set))
+
+        # If we have multiple block_texts, try to break them down further to avoid
+        # the case where we have very similar block_texts repeated after each
+        # other.
+        if common_prefix and len(block_infos[block_num]) > 1:
+            # We'll only attempt this if each of the block_texts have the same number
+            # of lines as each other.
+            same_num_Lines = (
+                len(set(len(k.splitlines()) for k in block_infos[block_num].keys()))
+                == 1
+            )
+            if same_num_Lines:
+                breakdown = _break_down_block(block_infos[block_num], common_prefix)
+                if breakdown:
+                    block_infos[block_num] = breakdown
+
+    return block_infos
 
-  if input_lines == output_lines:
-    sys.stderr.write('            [unchanged]\n')
-    return
-  sys.stderr.write('      [{} lines total]\n'.format(len(output_lines)))
 
-  common.debug('Writing', len(output_lines), 'lines to', test_path, '..\n\n')
+def _write_block(output, block, not_prefix_set, common_prefix, prefix_pad):
+    """Write an individual block, with correct padding on the prefixes.
+    Returns a set of all of the prefixes that it has written.
+    """
+    end_prefix = ":     "
+    previous_prefix = None
+    num_lines_of_prefix = 0
+    written_prefixes = set()
+
+    for prefix, line in block:
+        if prefix in not_prefix_set:
+            _warn(
+                'not writing for prefix {0} due to presence of "{0}-NOT:" '
+                "in input file.".format(prefix)
+            )
+            continue
+
+        # If the previous line isn't already blank and we're writing more than one
+        # line for the current prefix output a blank line first, unless either the
+        # current of previous prefix is common to all.
+        num_lines_of_prefix += 1
+        if prefix != previous_prefix:
+            if output and output[-1]:
+                if num_lines_of_prefix > 1 or any(
+                    p == common_prefix for p in (prefix, previous_prefix)
+                ):
+                    output.append("")
+            num_lines_of_prefix = 0
+            previous_prefix = prefix
+
+        written_prefixes.add(prefix)
+        output.append(
+            "{} {}{}{} {}".format(
+                COMMENT_CHAR, prefix, end_prefix, " " * (prefix_pad - len(prefix)), line
+            ).rstrip()
+        )
+        end_prefix = "-NEXT:"
+
+    output.append("")
+    return written_prefixes
+
+
+def _write_output(
+    test_path,
+    input_lines,
+    prefix_list,
+    block_infos,  # noqa
+    args,
+    common_prefix,
+    prefix_pad,
+):
+    prefix_set = set([prefix for prefixes, _ in prefix_list for prefix in prefixes])
+    not_prefix_set = set()
+
+    output_lines = []
+    for input_line in input_lines:
+        if input_line.startswith(ADVERT_PREFIX):
+            continue
+
+        if input_line.startswith(COMMENT_CHAR):
+            m = common.CHECK_RE.match(input_line)
+            try:
+                prefix = m.group(1)
+            except AttributeError:
+                prefix = None
+
+            if "{}-NOT:".format(prefix) in input_line:
+                not_prefix_set.add(prefix)
+
+            if prefix not in prefix_set or prefix in not_prefix_set:
+                output_lines.append(input_line)
+                continue
+
+        if common.should_add_line_to_output(input_line, prefix_set):
+            # This input line of the function body will go as-is into the output.
+            # Except make leading whitespace uniform: 2 spaces.
+            input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub(r"  ", input_line)
+
+            # Skip empty lines if the previous output line is also empty.
+            if input_line or output_lines[-1]:
+                output_lines.append(input_line)
+        else:
+            continue
+
+    # Add a blank line before the new checks if required.
+    if len(output_lines) > 0 and output_lines[-1]:
+        output_lines.append("")
+
+    output_check_lines = []
+    used_prefixes = set()
+    for block_num in range(len(block_infos)):
+        if type(block_infos[block_num]) is list:
+            # The block is of the type output from _break_down_block().
+            used_prefixes |= _write_block(
+                output_check_lines,
+                block_infos[block_num],
+                not_prefix_set,
+                common_prefix,
+                prefix_pad,
+            )
+        else:
+            # _break_down_block() was unable to do do anything so output the block
+            # as-is.
+
+            # Rather than writing out each block as soon we encounter it, save it
+            # indexed by prefix so that we can write all of the blocks out sorted by
+            # prefix at the end.
+            output_blocks = defaultdict(list)
+
+            for block_text in sorted(block_infos[block_num]):
+
+                if not block_text:
+                    continue
+
+                lines = block_text.split("\n")
+                for prefix in block_infos[block_num][block_text]:
+                    assert prefix not in output_blocks
+                    used_prefixes |= _write_block(
+                        output_blocks[prefix],
+                        [(prefix, line) for line in lines],
+                        not_prefix_set,
+                        common_prefix,
+                        prefix_pad,
+                    )
+
+            for prefix in sorted(output_blocks):
+                output_check_lines.extend(output_blocks[prefix])
+
+    unused_prefixes = (prefix_set - not_prefix_set) - used_prefixes
+    if unused_prefixes:
+        raise Error("unused prefixes: {}".format(sorted(unused_prefixes)))
+
+    if output_check_lines:
+        output_lines.insert(0, ADVERT)
+        output_lines.extend(output_check_lines)
+
+    # The file should not end with two newlines. It creates unnecessary churn.
+    while len(output_lines) > 0 and output_lines[-1] == "":
+        output_lines.pop()
+
+    if input_lines == output_lines:
+        sys.stderr.write("            [unchanged]\n")
+        return
+    sys.stderr.write("      [{} lines total]\n".format(len(output_lines)))
+
+    common.debug("Writing", len(output_lines), "lines to", test_path, "..\n\n")
+
+    with open(test_path, "wb") as f:
+        f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
 
-  with open(test_path, 'wb') as f:
-    f.writelines(['{}\n'.format(l).encode('utf-8') for l in output_lines])
 
 def main():
-  args = _parse_args()
-  test_paths = [test for pattern in args.tests for test in glob.glob(pattern)]
-  for test_path in test_paths:
-    sys.stderr.write('Test: {}\n'.format(test_path))
-
-    # Call this per test. By default each warning will only be written once
-    # per source location. Reset the warning filter so that now each warning
-    # will be written once per source location per test.
-    _configure_warnings(args)
-
-    if not os.path.isfile(test_path):
-      raise Error('could not find test file: {}'.format(test_path))
-
-    with open(test_path) as f:
-      input_lines = [l.rstrip() for l in f]
-
-    run_lines = common.find_run_lines(test_path, input_lines)
-    run_infos = _get_run_infos(run_lines, args)
-    common_prefix, prefix_pad = _get_useful_prefix_info(run_infos)
-    block_infos = _get_block_infos(run_infos, test_path, args, common_prefix)
-    _write_output(test_path,
-                  input_lines,
-                  run_infos,
-                  block_infos,
-                  args,
-                  common_prefix,
-                  prefix_pad)
-
-  return 0
-
-
-if __name__ == '__main__':
-  try:
-    warnings.showwarning = _showwarning
-    sys.exit(main())
-  except Error as e:
-    sys.stdout.write('error: {}\n'.format(e))
-    sys.exit(1)
+    args = _parse_args()
+    test_paths = [test for pattern in args.tests for test in glob.glob(pattern)]
+    for test_path in test_paths:
+        sys.stderr.write("Test: {}\n".format(test_path))
+
+        # Call this per test. By default each warning will only be written once
+        # per source location. Reset the warning filter so that now each warning
+        # will be written once per source location per test.
+        _configure_warnings(args)
+
+        if not os.path.isfile(test_path):
+            raise Error("could not find test file: {}".format(test_path))
+
+        with open(test_path) as f:
+            input_lines = [l.rstrip() for l in f]
+
+        run_lines = common.find_run_lines(test_path, input_lines)
+        run_infos = _get_run_infos(run_lines, args)
+        common_prefix, prefix_pad = _get_useful_prefix_info(run_infos)
+        block_infos = _get_block_infos(run_infos, test_path, args, common_prefix)
+        _write_output(
+            test_path,
+            input_lines,
+            run_infos,
+            block_infos,
+            args,
+            common_prefix,
+            prefix_pad,
+        )
+
+    return 0
+
+
+if __name__ == "__main__":
+    try:
+        warnings.showwarning = _showwarning
+        sys.exit(main())
+    except Error as e:
+        sys.stdout.write("error: {}\n".format(e))
+        sys.exit(1)

diff  --git a/llvm/utils/update_mir_test_checks.py b/llvm/utils/update_mir_test_checks.py
index 6e3a5e9732761..66efce6c82165 100755
--- a/llvm/utils/update_mir_test_checks.py
+++ b/llvm/utils/update_mir_test_checks.py
@@ -30,37 +30,42 @@
 
 from UpdateTestChecks import common
 
-MIR_FUNC_NAME_RE = re.compile(r' *name: *(?P<func>[A-Za-z0-9_.-]+)')
-MIR_BODY_BEGIN_RE = re.compile(r' *body: *\|')
-MIR_BASIC_BLOCK_RE = re.compile(r' *bb\.[0-9]+.*:$')
-VREG_RE = re.compile(r'(%[0-9]+)(?::[a-z0-9_]+)?(?:\([<>a-z0-9 ]+\))?')
-MI_FLAGS_STR= (
-    r'(frame-setup |frame-destroy |nnan |ninf |nsz |arcp |contract |afn '
-    r'|reassoc |nuw |nsw |exact |nofpexcept |nomerge )*')
-VREG_DEF_FLAGS_STR = r'(?:dead )*'
+MIR_FUNC_NAME_RE = re.compile(r" *name: *(?P<func>[A-Za-z0-9_.-]+)")
+MIR_BODY_BEGIN_RE = re.compile(r" *body: *\|")
+MIR_BASIC_BLOCK_RE = re.compile(r" *bb\.[0-9]+.*:$")
+VREG_RE = re.compile(r"(%[0-9]+)(?::[a-z0-9_]+)?(?:\([<>a-z0-9 ]+\))?")
+MI_FLAGS_STR = (
+    r"(frame-setup |frame-destroy |nnan |ninf |nsz |arcp |contract |afn "
+    r"|reassoc |nuw |nsw |exact |nofpexcept |nomerge )*"
+)
+VREG_DEF_FLAGS_STR = r"(?:dead )*"
 VREG_DEF_RE = re.compile(
-    r'^ *(?P<vregs>{2}{0}(?:, {2}{0})*) = '
-    r'{1}(?P<opcode>[A-Zt][A-Za-z0-9_]+)'.format(
-        VREG_RE.pattern, MI_FLAGS_STR, VREG_DEF_FLAGS_STR))
-MIR_PREFIX_DATA_RE = re.compile(r'^ *(;|bb.[0-9].*: *$|[a-z]+:( |$)|$)')
+    r"^ *(?P<vregs>{2}{0}(?:, {2}{0})*) = "
+    r"{1}(?P<opcode>[A-Zt][A-Za-z0-9_]+)".format(
+        VREG_RE.pattern, MI_FLAGS_STR, VREG_DEF_FLAGS_STR
+    )
+)
+MIR_PREFIX_DATA_RE = re.compile(r"^ *(;|bb.[0-9].*: *$|[a-z]+:( |$)|$)")
 
 IR_FUNC_NAME_RE = re.compile(
-    r'^\s*define\s+(?:internal\s+)?[^@]*@(?P<func>[A-Za-z0-9_.]+)\s*\(')
-IR_PREFIX_DATA_RE = re.compile(r'^ *(;|$)')
+    r"^\s*define\s+(?:internal\s+)?[^@]*@(?P<func>[A-Za-z0-9_.]+)\s*\("
+)
+IR_PREFIX_DATA_RE = re.compile(r"^ *(;|$)")
 
 MIR_FUNC_RE = re.compile(
-    r'^---$'
-    r'\n'
-    r'^ *name: *(?P<func>[A-Za-z0-9_.-]+)$'
-    r'.*?'
-    r'^ *fixedStack: *(\[\])? *\n'
-    r'(?P<fixedStack>.*?)\n?'
-    r'^ *stack:'
-    r'.*?'
-    r'^ *body: *\|\n'
-    r'(?P<body>.*?)\n'
-    r'^\.\.\.$',
-    flags=(re.M | re.S))
+    r"^---$"
+    r"\n"
+    r"^ *name: *(?P<func>[A-Za-z0-9_.-]+)$"
+    r".*?"
+    r"^ *fixedStack: *(\[\])? *\n"
+    r"(?P<fixedStack>.*?)\n?"
+    r"^ *stack:"
+    r".*?"
+    r"^ *body: *\|\n"
+    r"(?P<body>.*?)\n"
+    r"^\.\.\.$",
+    flags=(re.M | re.S),
+)
 
 
 class LLC:
@@ -68,15 +73,16 @@ def __init__(self, bin):
         self.bin = bin
 
     def __call__(self, args, ir):
-        if ir.endswith('.mir'):
-            args = '{} -x mir'.format(args)
+        if ir.endswith(".mir"):
+            args = "{} -x mir".format(args)
         with open(ir) as ir_file:
-            stdout = subprocess.check_output('{} {}'.format(self.bin, args),
-                                             shell=True, stdin=ir_file)
+            stdout = subprocess.check_output(
+                "{} {}".format(self.bin, args), shell=True, stdin=ir_file
+            )
             if sys.version_info[0] > 2:
-              stdout = stdout.decode()
+                stdout = stdout.decode()
             # Fix line endings to unix CR style.
-            stdout = stdout.replace('\r\n', '\n')
+            stdout = stdout.replace("\r\n", "\n")
         return stdout
 
 
@@ -107,21 +113,22 @@ def build_run_list(test, run_lines, verbose=False):
     run_list = []
     all_prefixes = []
     for l in run_lines:
-        if '|' not in l:
-            common.warn('Skipping unparsable RUN line: ' + l)
+        if "|" not in l:
+            common.warn("Skipping unparsable RUN line: " + l)
             continue
 
-        commands = [cmd.strip() for cmd in l.split('|', 1)]
+        commands = [cmd.strip() for cmd in l.split("|", 1)]
         llc_cmd = commands[0]
-        filecheck_cmd = commands[1] if len(commands) > 1 else ''
+        filecheck_cmd = commands[1] if len(commands) > 1 else ""
         common.verify_filecheck_prefixes(filecheck_cmd)
 
-        if not llc_cmd.startswith('llc '):
-            common.warn('Skipping non-llc RUN line: {}'.format(l), test_file=test)
+        if not llc_cmd.startswith("llc "):
+            common.warn("Skipping non-llc RUN line: {}".format(l), test_file=test)
             continue
-        if not filecheck_cmd.startswith('FileCheck '):
-            common.warn('Skipping non-FileChecked RUN line: {}'.format(l),
-                 test_file=test)
+        if not filecheck_cmd.startswith("FileCheck "):
+            common.warn(
+                "Skipping non-FileChecked RUN line: {}".format(l), test_file=test
+            )
             continue
 
         triple = None
@@ -131,10 +138,10 @@ def build_run_list(test, run_lines, verbose=False):
         # If we find -march but not -mtriple, use that.
         m = common.MARCH_ARG_RE.search(llc_cmd)
         if m and not triple:
-            triple = '{}--'.format(m.group(1))
+            triple = "{}--".format(m.group(1))
 
-        cmd_args = llc_cmd[len('llc'):].strip()
-        cmd_args = cmd_args.replace('< %s', '').replace('%s', '').strip()
+        cmd_args = llc_cmd[len("llc") :].strip()
+        cmd_args = cmd_args.replace("< %s", "").replace("%s", "").strip()
         check_prefixes = common.get_check_prefixes(filecheck_cmd)
         all_prefixes += check_prefixes
 
@@ -157,7 +164,7 @@ def find_functions_with_one_bb(lines, verbose=False):
         if m:
             if bbs == 1:
                 result.append(cur_func)
-            cur_func = m.group('func')
+            cur_func = m.group("func")
             bbs = 0
         m = MIR_BASIC_BLOCK_RE.match(line)
         if m:
@@ -178,16 +185,17 @@ def __eq__(self, other):
         return self.body == other.body and self.fixedStack == other.fixedStack
 
 
-def build_function_info_dictionary(test, raw_tool_output, triple, prefixes,
-                                   func_dict, verbose):
+def build_function_info_dictionary(
+    test, raw_tool_output, triple, prefixes, func_dict, verbose
+):
     for m in MIR_FUNC_RE.finditer(raw_tool_output):
-        func = m.group('func')
-        fixedStack = m.group('fixedStack')
-        body = m.group('body')
+        func = m.group("func")
+        fixedStack = m.group("fixedStack")
+        body = m.group("body")
         if verbose:
-            log('Processing function: {}'.format(func))
+            log("Processing function: {}".format(func))
             for l in body.splitlines():
-                log('  {}'.format(l))
+                log("  {}".format(l))
 
         # Vreg mangling
         mangled = []
@@ -195,16 +203,18 @@ def build_function_info_dictionary(test, raw_tool_output, triple, prefixes,
         for func_line in body.splitlines(keepends=True):
             m = VREG_DEF_RE.match(func_line)
             if m:
-                for vreg in VREG_RE.finditer(m.group('vregs')):
-                    name = mangle_vreg(m.group('opcode'), vreg_map.values())
+                for vreg in VREG_RE.finditer(m.group("vregs")):
+                    name = mangle_vreg(m.group("opcode"), vreg_map.values())
                     vreg_map[vreg.group(1)] = name
                     func_line = func_line.replace(
-                        vreg.group(1), '[[{}:%[0-9]+]]'.format(name), 1)
+                        vreg.group(1), "[[{}:%[0-9]+]]".format(name), 1
+                    )
             for number, name in vreg_map.items():
-                func_line = re.sub(r'{}\b'.format(number), '[[{}]]'.format(name),
-                                func_line)
+                func_line = re.sub(
+                    r"{}\b".format(number), "[[{}]]".format(name), func_line
+                )
             mangled.append(func_line)
-        body = ''.join(mangled)
+        body = "".join(mangled)
 
         for prefix in prefixes:
             info = FunctionInfo(body, fixedStack)
@@ -215,8 +225,9 @@ def build_function_info_dictionary(test, raw_tool_output, triple, prefixes,
                 func_dict[prefix][func] = info
 
 
-def add_checks_for_function(test, output_lines, run_list, func_dict, func_name,
-                            single_bb, args):
+def add_checks_for_function(
+    test, output_lines, run_list, func_dict, func_name, single_bb, args
+):
     printed_prefixes = set()
     for run in run_list:
         for prefix in run.prefixes:
@@ -228,83 +239,96 @@ def add_checks_for_function(test, output_lines, run_list, func_dict, func_name,
             #     # Add some space between 
diff erent check prefixes.
             #     output_lines.append('')
             printed_prefixes.add(prefix)
-            log('Adding {} lines for {}'.format(prefix, func_name), args.verbose)
-            add_check_lines(test, output_lines, prefix, func_name, single_bb,
-                            func_dict[prefix][func_name], args)
+            log("Adding {} lines for {}".format(prefix, func_name), args.verbose)
+            add_check_lines(
+                test,
+                output_lines,
+                prefix,
+                func_name,
+                single_bb,
+                func_dict[prefix][func_name],
+                args,
+            )
             break
         else:
             common.warn(
-                'Found conflicting asm for function: {}'.format(func_name),
-                test_file=test)
+                "Found conflicting asm for function: {}".format(func_name),
+                test_file=test,
+            )
     return output_lines
 
 
-def add_check_lines(test, output_lines, prefix, func_name, single_bb,
-                    func_info: FunctionInfo, args):
+def add_check_lines(
+    test, output_lines, prefix, func_name, single_bb, func_info: FunctionInfo, args
+):
     func_body = func_info.body.splitlines()
     if single_bb:
         # Don't bother checking the basic block label for a single BB
         func_body.pop(0)
 
     if not func_body:
-        common.warn('Function has no instructions to check: {}'.format(func_name),
-             test_file=test)
+        common.warn(
+            "Function has no instructions to check: {}".format(func_name),
+            test_file=test,
+        )
         return
 
     first_line = func_body[0]
-    indent = len(first_line) - len(first_line.lstrip(' '))
+    indent = len(first_line) - len(first_line.lstrip(" "))
     # A check comment, indented the appropriate amount
-    check = '{:>{}}; {}'.format('', indent, prefix)
+    check = "{:>{}}; {}".format("", indent, prefix)
 
-    output_lines.append('{}-LABEL: name: {}'.format(check, func_name))
+    output_lines.append("{}-LABEL: name: {}".format(check, func_name))
 
     if args.print_fixed_stack:
-        output_lines.append('{}: fixedStack:'.format(check))
+        output_lines.append("{}: fixedStack:".format(check))
         for stack_line in func_info.fixedStack.splitlines():
-            filecheck_directive = check + '-NEXT'
-            output_lines.append('{}: {}'.format(filecheck_directive, stack_line))
+            filecheck_directive = check + "-NEXT"
+            output_lines.append("{}: {}".format(filecheck_directive, stack_line))
 
     first_check = True
     for func_line in func_body:
         if not func_line.strip():
             # The mir printer prints leading whitespace so we can't use CHECK-EMPTY:
-            output_lines.append(check + '-NEXT: {{' + func_line + '$}}')
+            output_lines.append(check + "-NEXT: {{" + func_line + "$}}")
             continue
-        filecheck_directive = check if first_check else check + '-NEXT'
+        filecheck_directive = check if first_check else check + "-NEXT"
         first_check = False
-        check_line = '{}: {}'.format(filecheck_directive, func_line[indent:]).rstrip()
+        check_line = "{}: {}".format(filecheck_directive, func_line[indent:]).rstrip()
         output_lines.append(check_line)
 
 
 def mangle_vreg(opcode, current_names):
     base = opcode
     # Simplify some common prefixes and suffixes
-    if opcode.startswith('G_'):
-        base = base[len('G_'):]
-    if opcode.endswith('_PSEUDO'):
-        base = base[:len('_PSEUDO')]
+    if opcode.startswith("G_"):
+        base = base[len("G_") :]
+    if opcode.endswith("_PSEUDO"):
+        base = base[: len("_PSEUDO")]
     # Shorten some common opcodes with long-ish names
-    base = dict(IMPLICIT_DEF='DEF',
-                GLOBAL_VALUE='GV',
-                CONSTANT='C',
-                FCONSTANT='C',
-                MERGE_VALUES='MV',
-                UNMERGE_VALUES='UV',
-                INTRINSIC='INT',
-                INTRINSIC_W_SIDE_EFFECTS='INT',
-                INSERT_VECTOR_ELT='IVEC',
-                EXTRACT_VECTOR_ELT='EVEC',
-                SHUFFLE_VECTOR='SHUF').get(base, base)
+    base = dict(
+        IMPLICIT_DEF="DEF",
+        GLOBAL_VALUE="GV",
+        CONSTANT="C",
+        FCONSTANT="C",
+        MERGE_VALUES="MV",
+        UNMERGE_VALUES="UV",
+        INTRINSIC="INT",
+        INTRINSIC_W_SIDE_EFFECTS="INT",
+        INSERT_VECTOR_ELT="IVEC",
+        EXTRACT_VECTOR_ELT="EVEC",
+        SHUFFLE_VECTOR="SHUF",
+    ).get(base, base)
     # Avoid ambiguity when opcodes end in numbers
-    if len(base.rstrip('0123456789')) < len(base):
-        base += '_'
+    if len(base.rstrip("0123456789")) < len(base):
+        base += "_"
 
     i = 0
     for name in current_names:
-        if name.rstrip('0123456789') == base:
+        if name.rstrip("0123456789") == base:
             i += 1
     if i:
-        return '{}{}'.format(base, i)
+        return "{}{}".format(base, i)
     return base
 
 
@@ -331,22 +355,27 @@ def update_test_file(args, test, autogenerated_note):
         for prefix in run.prefixes:
             func_dict.update({prefix: dict()})
     for prefixes, llc_args, triple_in_cmd in run_list:
-        log('Extracted LLC cmd: llc {}'.format(llc_args), args.verbose)
-        log('Extracted FileCheck prefixes: {}'.format(prefixes), args.verbose)
+        log("Extracted LLC cmd: llc {}".format(llc_args), args.verbose)
+        log("Extracted FileCheck prefixes: {}".format(prefixes), args.verbose)
 
         raw_tool_output = args.llc_binary(llc_args, test)
         if not triple_in_cmd and not triple_in_ir:
-            common.warn('No triple found: skipping file', test_file=test)
+            common.warn("No triple found: skipping file", test_file=test)
             return
 
-        build_function_info_dictionary(test, raw_tool_output,
-                                       triple_in_cmd or triple_in_ir,
-                                       prefixes, func_dict, args.verbose)
+        build_function_info_dictionary(
+            test,
+            raw_tool_output,
+            triple_in_cmd or triple_in_ir,
+            prefixes,
+            func_dict,
+            args.verbose,
+        )
 
-    state = 'toplevel'
+    state = "toplevel"
     func_name = None
     prefix_set = set([prefix for run in run_list for prefix in run.prefixes])
-    log('Rewriting FileCheck prefixes: {}'.format(prefix_set), args.verbose)
+    log("Rewriting FileCheck prefixes: {}".format(prefix_set), args.verbose)
 
     output_lines = []
     output_lines.append(autogenerated_note)
@@ -355,96 +384,120 @@ def update_test_file(args, test, autogenerated_note):
         if input_line == autogenerated_note:
             continue
 
-        if state == 'toplevel':
+        if state == "toplevel":
             m = IR_FUNC_NAME_RE.match(input_line)
             if m:
-                state = 'ir function prefix'
-                func_name = m.group('func')
-            if input_line.rstrip('| \r\n') == '---':
-                state = 'document'
+                state = "ir function prefix"
+                func_name = m.group("func")
+            if input_line.rstrip("| \r\n") == "---":
+                state = "document"
             output_lines.append(input_line)
-        elif state == 'document':
+        elif state == "document":
             m = MIR_FUNC_NAME_RE.match(input_line)
             if m:
-                state = 'mir function metadata'
-                func_name = m.group('func')
-            if input_line.strip() == '...':
-                state = 'toplevel'
+                state = "mir function metadata"
+                func_name = m.group("func")
+            if input_line.strip() == "...":
+                state = "toplevel"
                 func_name = None
             if should_add_line_to_output(input_line, prefix_set):
                 output_lines.append(input_line)
-        elif state == 'mir function metadata':
+        elif state == "mir function metadata":
             if should_add_line_to_output(input_line, prefix_set):
                 output_lines.append(input_line)
             m = MIR_BODY_BEGIN_RE.match(input_line)
             if m:
                 if func_name in simple_functions:
                     # If there's only one block, put the checks inside it
-                    state = 'mir function prefix'
+                    state = "mir function prefix"
                     continue
-                state = 'mir function body'
-                add_checks_for_function(test, output_lines, run_list,
-                                        func_dict, func_name, single_bb=False,
-                                        args=args)
-        elif state == 'mir function prefix':
+                state = "mir function body"
+                add_checks_for_function(
+                    test,
+                    output_lines,
+                    run_list,
+                    func_dict,
+                    func_name,
+                    single_bb=False,
+                    args=args,
+                )
+        elif state == "mir function prefix":
             m = MIR_PREFIX_DATA_RE.match(input_line)
             if not m:
-                state = 'mir function body'
-                add_checks_for_function(test, output_lines, run_list,
-                                        func_dict, func_name, single_bb=True,
-                                        args=args)
+                state = "mir function body"
+                add_checks_for_function(
+                    test,
+                    output_lines,
+                    run_list,
+                    func_dict,
+                    func_name,
+                    single_bb=True,
+                    args=args,
+                )
 
             if should_add_line_to_output(input_line, prefix_set):
                 output_lines.append(input_line)
-        elif state == 'mir function body':
-            if input_line.strip() == '...':
-                state = 'toplevel'
+        elif state == "mir function body":
+            if input_line.strip() == "...":
+                state = "toplevel"
                 func_name = None
             if should_add_line_to_output(input_line, prefix_set):
                 output_lines.append(input_line)
-        elif state == 'ir function prefix':
+        elif state == "ir function prefix":
             m = IR_PREFIX_DATA_RE.match(input_line)
             if not m:
-                state = 'ir function body'
-                add_checks_for_function(test, output_lines, run_list,
-                                        func_dict, func_name, single_bb=False,
-                                        args=args)
+                state = "ir function body"
+                add_checks_for_function(
+                    test,
+                    output_lines,
+                    run_list,
+                    func_dict,
+                    func_name,
+                    single_bb=False,
+                    args=args,
+                )
 
             if should_add_line_to_output(input_line, prefix_set):
                 output_lines.append(input_line)
-        elif state == 'ir function body':
-            if input_line.strip() == '}':
-                state = 'toplevel'
+        elif state == "ir function body":
+            if input_line.strip() == "}":
+                state = "toplevel"
                 func_name = None
             if should_add_line_to_output(input_line, prefix_set):
                 output_lines.append(input_line)
 
+    log("Writing {} lines to {}...".format(len(output_lines), test), args.verbose)
 
-    log('Writing {} lines to {}...'.format(len(output_lines), test), args.verbose)
-
-    with open(test, 'wb') as fd:
-        fd.writelines(['{}\n'.format(l).encode('utf-8') for l in output_lines])
+    with open(test, "wb") as fd:
+        fd.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
 
 
 def main():
     parser = argparse.ArgumentParser(
-        description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
-    parser.add_argument('--llc-binary', default='llc', type=LLC,
-                        help='The "llc" binary to generate the test case with')
-    parser.add_argument('--print-fixed-stack', action='store_true',
-                        help='Add check lines for fixedStack')
-    parser.add_argument('tests', nargs='+')
+        description=__doc__, formatter_class=argparse.RawTextHelpFormatter
+    )
+    parser.add_argument(
+        "--llc-binary",
+        default="llc",
+        type=LLC,
+        help='The "llc" binary to generate the test case with',
+    )
+    parser.add_argument(
+        "--print-fixed-stack",
+        action="store_true",
+        help="Add check lines for fixedStack",
+    )
+    parser.add_argument("tests", nargs="+")
     args = common.parse_commandline_args(parser)
 
     script_name = os.path.basename(__file__)
-    for ti in common.itertests(args.tests, parser,
-                               script_name='utils/' + script_name):
+    for ti in common.itertests(args.tests, parser, script_name="utils/" + script_name):
         try:
             update_test_file(ti.args, ti.path, ti.test_autogenerated_note)
         except Exception:
-            common.warn('Error processing file', test_file=ti.path)
+            common.warn("Error processing file", test_file=ti.path)
             raise
 
 
-if __name__ == '__main__':
-  main()
+if __name__ == "__main__":
+    main()

diff  --git a/llvm/utils/update_test_checks.py b/llvm/utils/update_test_checks.py
index a977abbfd3e6b..56ff675ab78d2 100755
--- a/llvm/utils/update_test_checks.py
+++ b/llvm/utils/update_test_checks.py
@@ -43,216 +43,285 @@
 
 
 def main():
-  from argparse import RawTextHelpFormatter
-  parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
-  parser.add_argument('--tool', default='opt',
-                      help='The name of the tool used to generate the test case (defaults to "opt")')
-  parser.add_argument('--tool-binary', '--opt-binary',
-                      help='The tool binary used to generate the test case')
-  parser.add_argument(
-      '--function', help='The function in the test file to update')
-  parser.add_argument('-p', '--preserve-names', action='store_true',
-                      help='Do not scrub IR names')
-  parser.add_argument('--function-signature', action='store_true',
-                      help='Keep function signature information around for the check line')
-  parser.add_argument('--scrub-attributes', action='store_true',
-                      help='Remove attribute annotations (#0) from the end of check line')
-  parser.add_argument('--check-attributes', action='store_true',
-                      help='Check "Function Attributes" for functions')
-  parser.add_argument('--check-globals', action='store_true',
-                      help='Check global entries (global variables, metadata, attribute sets, ...) for functions')
-  parser.add_argument('tests', nargs='+')
-  initial_args = common.parse_commandline_args(parser)
-
-  script_name = os.path.basename(__file__)
-
-  if initial_args.tool_binary:
-    tool_basename = os.path.basename(initial_args.tool_binary)
-    if not re.match(r'^%s(-\d+)?(\.exe)?$' % (initial_args.tool), tool_basename):
-      common.error('Unexpected tool name: ' + tool_basename)
-      sys.exit(1)
-
-  for ti in common.itertests(initial_args.tests, parser,
-                             script_name='utils/' + script_name):
-    # If requested we scrub trailing attribute annotations, e.g., '#0', together with whitespaces
-    if ti.args.scrub_attributes:
-      common.SCRUB_TRAILING_WHITESPACE_TEST_RE = common.SCRUB_TRAILING_WHITESPACE_AND_ATTRIBUTES_RE
-    else:
-      common.SCRUB_TRAILING_WHITESPACE_TEST_RE = common.SCRUB_TRAILING_WHITESPACE_RE
-
-    tool_basename = ti.args.tool
-
-    prefix_list = []
-    for l in ti.run_lines:
-      if '|' not in l:
-        common.warn('Skipping unparsable RUN line: ' + l)
-        continue
-
-      commands = [cmd.strip() for cmd in l.split('|')]
-      assert len(commands) >= 2
-      preprocess_cmd = None
-      if len(commands) > 2:
-        preprocess_cmd = " | ".join(commands[:-2])
-      tool_cmd = commands[-2]
-      filecheck_cmd = commands[-1]
-      common.verify_filecheck_prefixes(filecheck_cmd)
-      if not tool_cmd.startswith(tool_basename + ' '):
-        common.warn('Skipping non-%s RUN line: %s' % (tool_basename, l))
-        continue
-
-      if not filecheck_cmd.startswith('FileCheck '):
-        common.warn('Skipping non-FileChecked RUN line: ' + l)
-        continue
-
-      tool_cmd_args = tool_cmd[len(tool_basename):].strip()
-      tool_cmd_args = tool_cmd_args.replace('< %s', '').replace('%s', '').strip()
-      check_prefixes = common.get_check_prefixes(filecheck_cmd)
-
-      # FIXME: We should use multiple check prefixes to common check lines. For
-      # now, we just ignore all but the last.
-      prefix_list.append((check_prefixes, tool_cmd_args, preprocess_cmd))
-
-    global_vars_seen_dict = {}
-    builder = common.FunctionTestBuilder(
-      run_list=prefix_list,
-      flags=ti.args,
-      scrubber_args=[],
-      path=ti.path)
-
-    tool_binary = ti.args.tool_binary
-    if not tool_binary:
-      tool_binary = tool_basename
-
-    for prefixes, tool_args, preprocess_cmd in prefix_list:
-      common.debug('Extracted tool cmd: ' + tool_basename + ' ' + tool_args)
-      common.debug('Extracted FileCheck prefixes: ' + str(prefixes))
-
-      raw_tool_output = common.invoke_tool(tool_binary, tool_args,
-                                           ti.path, preprocess_cmd=preprocess_cmd,
-                                           verbose=ti.args.verbose)
-      builder.process_run_line(common.OPT_FUNCTION_RE, common.scrub_body,
-              raw_tool_output, prefixes, False)
-      builder.processed_prefixes(prefixes)
-
-    func_dict = builder.finish_and_get_func_dict()
-    is_in_function = False
-    is_in_function_start = False
-    has_checked_pre_function_globals = False
-    prefix_set = set([prefix for prefixes, _, _ in prefix_list for prefix in prefixes])
-    common.debug('Rewriting FileCheck prefixes:', str(prefix_set))
-    output_lines = []
-
-    include_generated_funcs = common.find_arg_in_test(ti,
-                                                      lambda args: ti.args.include_generated_funcs,
-                                                      '--include-generated-funcs',
-                                                      True)
-    generated_prefixes = []
-    if include_generated_funcs:
-      # Generate the appropriate checks for each function.  We need to emit
-      # these in the order according to the generated output so that CHECK-LABEL
-      # works properly.  func_order provides that.
-
-      # We can't predict where various passes might insert functions so we can't
-      # be sure the input function order is maintained.  Therefore, first spit
-      # out all the source lines.
-      common.dump_input_lines(output_lines, ti, prefix_set, ';')
-
-      args = ti.args
-      if args.check_globals:
-        generated_prefixes.extend(
-            common.add_global_checks(builder.global_var_dict(), ';',
-                                     prefix_list, output_lines,
-                                     global_vars_seen_dict, args.preserve_names,
-                                     True))
-
-      # Now generate all the checks.
-      generated_prefixes.extend(
-          common.add_checks_at_end(
-              output_lines, prefix_list, builder.func_order(), ';',
-              lambda my_output_lines, prefixes, func: common.add_ir_checks(
-                  my_output_lines,
-                  ';',
-                  prefixes,
-                  func_dict,
-                  func,
-                  False,
-                  args.function_signature,
-                  args.version,
-                  global_vars_seen_dict,
-                  is_filtered=builder.is_filtered())))
-    else:
-      # "Normal" mode.
-      for input_line_info in ti.iterlines(output_lines):
-        input_line = input_line_info.line
-        args = input_line_info.args
-        if is_in_function_start:
-          if input_line == '':
-            continue
-          if input_line.lstrip().startswith(';'):
-            m = common.CHECK_RE.match(input_line)
-            if not m or m.group(1) not in prefix_set:
-              output_lines.append(input_line)
-              continue
-
-          # Print out the various check lines here.
-          generated_prefixes.extend(
-              common.add_ir_checks(
-                  output_lines,
-                  ';',
-                  prefix_list,
-                  func_dict,
-                  func_name,
-                  args.preserve_names,
-                  args.function_signature,
-                  args.version,
-                  global_vars_seen_dict,
-                  is_filtered=builder.is_filtered()))
-          is_in_function_start = False
-
-        m = common.IR_FUNCTION_RE.match(input_line)
-        if m and not has_checked_pre_function_globals:
-          if args.check_globals:
+    from argparse import RawTextHelpFormatter
+
+    parser = argparse.ArgumentParser(
+        description=__doc__, formatter_class=RawTextHelpFormatter
+    )
+    parser.add_argument(
+        "--tool",
+        default="opt",
+        help='The name of the tool used to generate the test case (defaults to "opt")',
+    )
+    parser.add_argument(
+        "--tool-binary",
+        "--opt-binary",
+        help="The tool binary used to generate the test case",
+    )
+    parser.add_argument("--function", help="The function in the test file to update")
+    parser.add_argument(
+        "-p", "--preserve-names", action="store_true", help="Do not scrub IR names"
+    )
+    parser.add_argument(
+        "--function-signature",
+        action="store_true",
+        help="Keep function signature information around for the check line",
+    )
+    parser.add_argument(
+        "--scrub-attributes",
+        action="store_true",
+        help="Remove attribute annotations (#0) from the end of check line",
+    )
+    parser.add_argument(
+        "--check-attributes",
+        action="store_true",
+        help='Check "Function Attributes" for functions',
+    )
+    parser.add_argument(
+        "--check-globals",
+        action="store_true",
+        help="Check global entries (global variables, metadata, attribute sets, ...) for functions",
+    )
+    parser.add_argument("tests", nargs="+")
+    initial_args = common.parse_commandline_args(parser)
+
+    script_name = os.path.basename(__file__)
+
+    if initial_args.tool_binary:
+        tool_basename = os.path.basename(initial_args.tool_binary)
+        if not re.match(r"^%s(-\d+)?(\.exe)?$" % (initial_args.tool), tool_basename):
+            common.error("Unexpected tool name: " + tool_basename)
+            sys.exit(1)
+
+    for ti in common.itertests(
+        initial_args.tests, parser, script_name="utils/" + script_name
+    ):
+        # If requested we scrub trailing attribute annotations, e.g., '#0', together with whitespaces
+        if ti.args.scrub_attributes:
+            common.SCRUB_TRAILING_WHITESPACE_TEST_RE = (
+                common.SCRUB_TRAILING_WHITESPACE_AND_ATTRIBUTES_RE
+            )
+        else:
+            common.SCRUB_TRAILING_WHITESPACE_TEST_RE = (
+                common.SCRUB_TRAILING_WHITESPACE_RE
+            )
+
+        tool_basename = ti.args.tool
+
+        prefix_list = []
+        for l in ti.run_lines:
+            if "|" not in l:
+                common.warn("Skipping unparsable RUN line: " + l)
+                continue
+
+            commands = [cmd.strip() for cmd in l.split("|")]
+            assert len(commands) >= 2
+            preprocess_cmd = None
+            if len(commands) > 2:
+                preprocess_cmd = " | ".join(commands[:-2])
+            tool_cmd = commands[-2]
+            filecheck_cmd = commands[-1]
+            common.verify_filecheck_prefixes(filecheck_cmd)
+            if not tool_cmd.startswith(tool_basename + " "):
+                common.warn("Skipping non-%s RUN line: %s" % (tool_basename, l))
+                continue
+
+            if not filecheck_cmd.startswith("FileCheck "):
+                common.warn("Skipping non-FileChecked RUN line: " + l)
+                continue
+
+            tool_cmd_args = tool_cmd[len(tool_basename) :].strip()
+            tool_cmd_args = tool_cmd_args.replace("< %s", "").replace("%s", "").strip()
+            check_prefixes = common.get_check_prefixes(filecheck_cmd)
+
+            # FIXME: We should use multiple check prefixes to common check lines. For
+            # now, we just ignore all but the last.
+            prefix_list.append((check_prefixes, tool_cmd_args, preprocess_cmd))
+
+        global_vars_seen_dict = {}
+        builder = common.FunctionTestBuilder(
+            run_list=prefix_list, flags=ti.args, scrubber_args=[], path=ti.path
+        )
+
+        tool_binary = ti.args.tool_binary
+        if not tool_binary:
+            tool_binary = tool_basename
+
+        for prefixes, tool_args, preprocess_cmd in prefix_list:
+            common.debug("Extracted tool cmd: " + tool_basename + " " + tool_args)
+            common.debug("Extracted FileCheck prefixes: " + str(prefixes))
+
+            raw_tool_output = common.invoke_tool(
+                tool_binary,
+                tool_args,
+                ti.path,
+                preprocess_cmd=preprocess_cmd,
+                verbose=ti.args.verbose,
+            )
+            builder.process_run_line(
+                common.OPT_FUNCTION_RE,
+                common.scrub_body,
+                raw_tool_output,
+                prefixes,
+                False,
+            )
+            builder.processed_prefixes(prefixes)
+
+        func_dict = builder.finish_and_get_func_dict()
+        is_in_function = False
+        is_in_function_start = False
+        has_checked_pre_function_globals = False
+        prefix_set = set(
+            [prefix for prefixes, _, _ in prefix_list for prefix in prefixes]
+        )
+        common.debug("Rewriting FileCheck prefixes:", str(prefix_set))
+        output_lines = []
+
+        include_generated_funcs = common.find_arg_in_test(
+            ti,
+            lambda args: ti.args.include_generated_funcs,
+            "--include-generated-funcs",
+            True,
+        )
+        generated_prefixes = []
+        if include_generated_funcs:
+            # Generate the appropriate checks for each function.  We need to emit
+            # these in the order according to the generated output so that CHECK-LABEL
+            # works properly.  func_order provides that.
+
+            # We can't predict where various passes might insert functions so we can't
+            # be sure the input function order is maintained.  Therefore, first spit
+            # out all the source lines.
+            common.dump_input_lines(output_lines, ti, prefix_set, ";")
+
+            args = ti.args
+            if args.check_globals:
+                generated_prefixes.extend(
+                    common.add_global_checks(
+                        builder.global_var_dict(),
+                        ";",
+                        prefix_list,
+                        output_lines,
+                        global_vars_seen_dict,
+                        args.preserve_names,
+                        True,
+                    )
+                )
+
+            # Now generate all the checks.
             generated_prefixes.extend(
-                common.add_global_checks(builder.global_var_dict(), ';',
-                                         prefix_list, output_lines,
-                                         global_vars_seen_dict,
-                                         args.preserve_names, True))
-          has_checked_pre_function_globals = True
-
-        if common.should_add_line_to_output(input_line, prefix_set, not is_in_function):
-          # This input line of the function body will go as-is into the output.
-          # Except make leading whitespace uniform: 2 spaces.
-          input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub(r'  ', input_line)
-          output_lines.append(input_line)
-          if input_line.strip() == '}':
-            is_in_function = False
-            continue
-
-        if is_in_function:
-          continue
-
-        m = common.IR_FUNCTION_RE.match(input_line)
-        if not m:
-          continue
-        func_name = m.group(1)
-        if args.function is not None and func_name != args.function:
-          # When filtering on a specific function, skip all others.
-          continue
-        is_in_function = is_in_function_start = True
-
-    if args.check_globals:
-      generated_prefixes.extend(
-          common.add_global_checks(builder.global_var_dict(), ';', prefix_list,
-                                   output_lines, global_vars_seen_dict,
-                                   args.preserve_names, False))
-    if ti.args.gen_unused_prefix_body:
-      output_lines.extend(ti.get_checks_for_unused_prefixes(
-          prefix_list, generated_prefixes))
-    common.debug('Writing %d lines to %s...' % (len(output_lines), ti.path))
-
-    with open(ti.path, 'wb') as f:
-      f.writelines(['{}\n'.format(l).encode('utf-8') for l in output_lines])
-
-
-if __name__ == '__main__':
-  main()
+                common.add_checks_at_end(
+                    output_lines,
+                    prefix_list,
+                    builder.func_order(),
+                    ";",
+                    lambda my_output_lines, prefixes, func: common.add_ir_checks(
+                        my_output_lines,
+                        ";",
+                        prefixes,
+                        func_dict,
+                        func,
+                        False,
+                        args.function_signature,
+                        args.version,
+                        global_vars_seen_dict,
+                        is_filtered=builder.is_filtered(),
+                    ),
+                )
+            )
+        else:
+            # "Normal" mode.
+            for input_line_info in ti.iterlines(output_lines):
+                input_line = input_line_info.line
+                args = input_line_info.args
+                if is_in_function_start:
+                    if input_line == "":
+                        continue
+                    if input_line.lstrip().startswith(";"):
+                        m = common.CHECK_RE.match(input_line)
+                        if not m or m.group(1) not in prefix_set:
+                            output_lines.append(input_line)
+                            continue
+
+                    # Print out the various check lines here.
+                    generated_prefixes.extend(
+                        common.add_ir_checks(
+                            output_lines,
+                            ";",
+                            prefix_list,
+                            func_dict,
+                            func_name,
+                            args.preserve_names,
+                            args.function_signature,
+                            args.version,
+                            global_vars_seen_dict,
+                            is_filtered=builder.is_filtered(),
+                        )
+                    )
+                    is_in_function_start = False
+
+                m = common.IR_FUNCTION_RE.match(input_line)
+                if m and not has_checked_pre_function_globals:
+                    if args.check_globals:
+                        generated_prefixes.extend(
+                            common.add_global_checks(
+                                builder.global_var_dict(),
+                                ";",
+                                prefix_list,
+                                output_lines,
+                                global_vars_seen_dict,
+                                args.preserve_names,
+                                True,
+                            )
+                        )
+                    has_checked_pre_function_globals = True
+
+                if common.should_add_line_to_output(
+                    input_line, prefix_set, not is_in_function
+                ):
+                    # This input line of the function body will go as-is into the output.
+                    # Except make leading whitespace uniform: 2 spaces.
+                    input_line = common.SCRUB_LEADING_WHITESPACE_RE.sub(
+                        r"  ", input_line
+                    )
+                    output_lines.append(input_line)
+                    if input_line.strip() == "}":
+                        is_in_function = False
+                        continue
+
+                if is_in_function:
+                    continue
+
+                m = common.IR_FUNCTION_RE.match(input_line)
+                if not m:
+                    continue
+                func_name = m.group(1)
+                if args.function is not None and func_name != args.function:
+                    # When filtering on a specific function, skip all others.
+                    continue
+                is_in_function = is_in_function_start = True
+
+        if args.check_globals:
+            generated_prefixes.extend(
+                common.add_global_checks(
+                    builder.global_var_dict(),
+                    ";",
+                    prefix_list,
+                    output_lines,
+                    global_vars_seen_dict,
+                    args.preserve_names,
+                    False,
+                )
+            )
+        if ti.args.gen_unused_prefix_body:
+            output_lines.extend(
+                ti.get_checks_for_unused_prefixes(prefix_list, generated_prefixes)
+            )
+        common.debug("Writing %d lines to %s..." % (len(output_lines), ti.path))
+
+        with open(ti.path, "wb") as f:
+            f.writelines(["{}\n".format(l).encode("utf-8") for l in output_lines])
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/llvm/utils/update_test_prefix.py b/llvm/utils/update_test_prefix.py
index 4e9e18882c69f..434dc84fa4fc0 100755
--- a/llvm/utils/update_test_prefix.py
+++ b/llvm/utils/update_test_prefix.py
@@ -5,60 +5,72 @@
 import sys
 from concurrent.futures import ThreadPoolExecutor, as_completed
 
+
 def remove_prefix(i, d=0):
     if d == 100:
         return 2
-    s = os.popen('llvm-lit -a ' + i).read()
-    r = re.search('no check strings found with (?:prefix|prefixes) \'([^:]+)', s)
-    with open(i, 'r+') as f:
+    s = os.popen("llvm-lit -a " + i).read()
+    r = re.search("no check strings found with (?:prefix|prefixes) '([^:]+)", s)
+    with open(i, "r+") as f:
         s = f.read()
         if r:
             p = r.group(1)
-            s = re.sub('=' + p + ',', '=', s)
-            s = re.sub(',' + p + '([, \n])', '\\1', s)
-            s = re.sub('\s+-?-check-prefix=' + p + '([ \n])', '\\1', s)
+            s = re.sub("=" + p + ",", "=", s)
+            s = re.sub("," + p + "([, \n])", "\\1", s)
+            s = re.sub("\s+-?-check-prefix=" + p + "([ \n])", "\\1", s)
         else:
-            s = re.sub('-?-check-prefixes=([\w-]+)(\Z|[ \t\n])', '--check-prefix=\\1\\2', s)
-            t = re.search('-?-check-(?:prefix|prefixes)=([^ ]+)\s+-?-check-(?:prefix|prefixes)=([^ ]+)', s)
+            s = re.sub(
+                "-?-check-prefixes=([\w-]+)(\Z|[ \t\n])", "--check-prefix=\\1\\2", s
+            )
+            t = re.search(
+                "-?-check-(?:prefix|prefixes)=([^ ]+)\s+-?-check-(?:prefix|prefixes)=([^ ]+)",
+                s,
+            )
             while t:
-                s = re.sub(t.group(), '--check-prefixes=' + t.group(1) + ',' + t.group(2), s)
-                t = re.search('-?-check-(?:prefix|prefixes)=([^ ]+)\s+-?-check-(?:prefix|prefixes)=([^ ]+)', s)
-            s = re.sub('\s+-?-check-prefix=CHECK[ \t]*\n', '\n', s)
+                s = re.sub(
+                    t.group(), "--check-prefixes=" + t.group(1) + "," + t.group(2), s
+                )
+                t = re.search(
+                    "-?-check-(?:prefix|prefixes)=([^ ]+)\s+-?-check-(?:prefix|prefixes)=([^ ]+)",
+                    s,
+                )
+            s = re.sub("\s+-?-check-prefix=CHECK[ \t]*\n", "\n", s)
         f.truncate(0)
         f.seek(0)
         f.write(s)
     if not r:
-        t = re.search('Assertions have been autogenerated by (.*)', s)
+        t = re.search("Assertions have been autogenerated by (.*)", s)
         if t:
-            s = os.popen('llvm/' + t.group(1) + ' ' + i + ' 2>&1').read()
-            if 'had conflicting output from 
diff erent RUN lines for all functions' in s:
+            s = os.popen("llvm/" + t.group(1) + " " + i + " 2>&1").read()
+            if "had conflicting output from 
diff erent RUN lines for all functions" in s:
                 return -1
-            s = os.popen('git 
diff  ' + i).read()
-            if re.search('\n(?:-+)\n', s) or re.search('\n[+-].*(?<!RUN):', s):
+            s = os.popen("git 
diff  " + i).read()
+            if re.search("\n(?:-+)\n", s) or re.search("\n[+-].*(?<!RUN):", s):
                 return 1
         return 0
-    return remove_prefix(i, d+1)
+    return remove_prefix(i, d + 1)
+
 
 with ThreadPoolExecutor(max_workers=32) as e:
     f = []
     c = []
     a = []
-    t = { e.submit(remove_prefix, i): i for i in sys.argv[1:] }
+    t = {e.submit(remove_prefix, i): i for i in sys.argv[1:]}
     for i in as_completed(t):
         if i.result() == 0:
-            print('DONE:', end=' ')
+            print("DONE:", end=" ")
         elif i.result() == -1:
-            print('FAIL:', end=' ')
+            print("FAIL:", end=" ")
             f.append(t[i])
         elif i.result() == 1:
-            print('CHANGE:', end=' ')
+            print("CHANGE:", end=" ")
             c.append(t[i])
         else:
-            print('ABORT:', end=' ')
+            print("ABORT:", end=" ")
             a.append(t[i])
         print(t[i])
-    for i in [ (f, 'Failed'), (c, 'Changed'), (a, 'Aborted') ]:
+    for i in [(f, "Failed"), (c, "Changed"), (a, "Aborted")]:
         if i[0]:
-            print('********************\n%s Tests (%d):' % (i[1], len(i[0])))
+            print("********************\n%s Tests (%d):" % (i[1], len(i[0])))
             for j in i[0]:
-                print('  ' + j)
+                print("  " + j)

diff  --git a/llvm/utils/wciia.py b/llvm/utils/wciia.py
index 666bd63a2797b..7240d3c2a9ff2 100755
--- a/llvm/utils/wciia.py
+++ b/llvm/utils/wciia.py
@@ -26,28 +26,28 @@
 
 
 def process_files_and_folders(owner):
-    filesfolders = owner['filesfolders']
+    filesfolders = owner["filesfolders"]
     # paths must be in ( ... ) so strip them
-    lpar = filesfolders.find('(')
-    rpar = filesfolders.rfind(')')
+    lpar = filesfolders.find("(")
+    rpar = filesfolders.rfind(")")
     if rpar <= lpar:
         # give up
         return
-    paths = filesfolders[lpar + 1:rpar]
+    paths = filesfolders[lpar + 1 : rpar]
     # split paths
-    owner['paths'] = []
+    owner["paths"] = []
     for path in paths.split():
-        owner['paths'].append(path)
+        owner["paths"].append(path)
 
 
 def process_code_owner(owner):
-    if 'filesfolders' in owner:
-        filesfolders = owner['filesfolders']
+    if "filesfolders" in owner:
+        filesfolders = owner["filesfolders"]
     else:
-        #		print "F: field missing, using D: field"
-        owner['filesfolders'] = owner['description']
+        # 		print "F: field missing, using D: field"
+        owner["filesfolders"] = owner["description"]
     process_files_and_folders(owner)
-    code_owners[owner['name']] = owner
+    code_owners[owner["name"]] = owner
 
 
 # process CODE_OWNERS.TXT first
@@ -61,16 +61,16 @@ def process_code_owner(owner):
                 process_code_owner(code_owner)
                 code_owner = {}
             # reset the values
-            code_owner['name'] = name
+            code_owner["name"] = name
         if word == "E:":
             email = line[2:].strip()
-            code_owner['email'] = email
+            code_owner["email"] = email
         if word == "D:":
             description = line[2:].strip()
-            code_owner['description'] = description
+            code_owner["description"] = description
         if word == "F:":
             filesfolders = line[2:].strip()
-            code_owner['filesfolders'].append(filesfolders)
+            code_owner["filesfolders"].append(filesfolders)
 
 
 def find_owners(fpath):
@@ -79,14 +79,14 @@ def find_owners(fpath):
     #  very simplistic way of findning the best match
     for name in code_owners:
         owner = code_owners[name]
-        if 'paths' in owner:
-            for path in owner['paths']:
-                #				print "searching (" + path + ")"
+        if "paths" in owner:
+            for path in owner["paths"]:
+                # 				print "searching (" + path + ")"
                 # try exact match
                 if fpath == path:
                     return name
                 # see if path ends with a *
-                rstar = path.rfind('*')
+                rstar = path.rfind("*")
                 if rstar > 0:
                     # try the longest match,
                     rpos = -1
@@ -94,7 +94,7 @@ def find_owners(fpath):
                         rpos = path.find(fpath)
                     if rpos == 0:
                         onames.append(name)
-    onames.append('Chris Lattner')
+    onames.append("Chris Lattner")
     return onames
 
 


        


More information about the llvm-commits mailing list