[llvm] b71edfa - [NFC][Py Reformat] Reformat python files in llvm

Tobias Hieta via llvm-commits llvm-commits at lists.llvm.org
Wed May 17 01:49:04 PDT 2023


Author: Tobias Hieta
Date: 2023-05-17T10:48:52+02:00
New Revision: b71edfaa4ec3c998aadb35255ce2f60bba2940b0

URL: https://github.com/llvm/llvm-project/commit/b71edfaa4ec3c998aadb35255ce2f60bba2940b0
DIFF: https://github.com/llvm/llvm-project/commit/b71edfaa4ec3c998aadb35255ce2f60bba2940b0.diff

LOG: [NFC][Py Reformat] Reformat python files in llvm

This is the first commit in a series that will reformat
all the python files in the LLVM repository.

Reformatting is done with `black`.

See more information here:

https://discourse.llvm.org/t/rfc-document-and-standardize-python-code-style

Reviewed By: jhenderson, JDevlieghere, MatzeB

Differential Revision: https://reviews.llvm.org/D150545

Added: 
    

Modified: 
    llvm/bindings/python/llvm/bit_reader.py
    llvm/bindings/python/llvm/common.py
    llvm/bindings/python/llvm/core.py
    llvm/bindings/python/llvm/disassembler.py
    llvm/bindings/python/llvm/enumerations.py
    llvm/bindings/python/llvm/object.py
    llvm/bindings/python/llvm/tests/base.py
    llvm/bindings/python/llvm/tests/test_bitreader.py
    llvm/bindings/python/llvm/tests/test_core.py
    llvm/bindings/python/llvm/tests/test_disassembler.py
    llvm/bindings/python/llvm/tests/test_object.py
    llvm/docs/conf.py
    llvm/examples/Kaleidoscope/MCJIT/cached/genk-timing.py
    llvm/examples/Kaleidoscope/MCJIT/cached/split-lib.py
    llvm/examples/Kaleidoscope/MCJIT/complete/genk-timing.py
    llvm/examples/Kaleidoscope/MCJIT/complete/split-lib.py
    llvm/examples/Kaleidoscope/MCJIT/lazy/genk-timing.py
    llvm/lib/Analysis/models/gen-inline-oz-test-model.py
    llvm/lib/Analysis/models/gen-regalloc-eviction-test-model.py
    llvm/lib/Analysis/models/gen-regalloc-priority-test-model.py
    llvm/lib/Analysis/models/interactive_host.py
    llvm/lib/Analysis/models/log_reader.py
    llvm/lib/Analysis/models/saved-model-to-tflite.py
    llvm/test/BugPoint/compile-custom.ll.py
    llvm/test/CodeGen/AArch64/Atomics/generate-tests.py
    llvm/test/CodeGen/MLRegalloc/Inputs/interactive_main.py
    llvm/test/CodeGen/NVPTX/ld-st-addrrspace.py
    llvm/test/CodeGen/NVPTX/surf-tex.py
    llvm/test/CodeGen/NVPTX/wmma.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-01.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-02.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-03.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-04.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-05.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-06.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-07.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-08.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-09.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-10.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-11.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-12.py
    llvm/test/CodeGen/SystemZ/Large/branch-range-13.py
    llvm/test/CodeGen/SystemZ/Large/spill-01.py
    llvm/test/CodeGen/SystemZ/Large/spill-02.py
    llvm/test/CodeGen/WebAssembly/multivalue-stackify.py
    llvm/test/MC/COFF/bigobj.py
    llvm/test/Other/opt-bisect-helper.py
    llvm/test/TableGen/JSON-check.py
    llvm/test/Transforms/Inline/ML/Inputs/interactive_main.py
    llvm/test/Unit/lit.cfg.py
    llvm/test/lit.cfg.py
    llvm/test/tools/llvm-debuginfod-find/Inputs/capture_req.py
    llvm/test/tools/llvm-libtool-darwin/Inputs/DependencyDump.py
    llvm/test/tools/llvm-objcopy/Inputs/ungzip.py
    llvm/test/tools/llvm-objcopy/MachO/Inputs/code-signature-check.py
    llvm/test/tools/llvm-reduce/Inputs/llvm-dis-and-filecheck.py
    llvm/test/tools/llvm-reduce/Inputs/remove-args.py
    llvm/test/tools/llvm-reduce/Inputs/remove-bbs.py
    llvm/test/tools/llvm-reduce/Inputs/remove-instructions.py
    llvm/test/tools/llvm-reduce/Inputs/sleep-and-check-stores.py
    llvm/test/tools/llvm-reduce/remove-bbs-sequence.py
    llvm/test/tools/llvm-symbolizer/Inputs/flush-output.py
    llvm/tools/llvm-shlib/gen-msvc-exports.py
    llvm/tools/opt-viewer/extract-reproducers.py
    llvm/tools/opt-viewer/opt-diff.py
    llvm/tools/opt-viewer/opt-stats.py
    llvm/tools/opt-viewer/opt-viewer.py
    llvm/tools/opt-viewer/optpmap.py
    llvm/tools/opt-viewer/optrecord.py
    llvm/tools/sancov/coverage-report-server.py
    llvm/utils/DSAclean.py
    llvm/utils/DSAextract.py
    llvm/utils/Reviewing/find_interesting_reviews.py
    llvm/utils/Target/ARM/analyze-match-table.py
    llvm/utils/UpdateTestChecks/asm.py
    llvm/utils/UpdateTestChecks/common.py
    llvm/utils/UpdateTestChecks/isel.py
    llvm/utils/abtest.py
    llvm/utils/add_argument_names.py
    llvm/utils/bugpoint_gisel_reducer.py
    llvm/utils/check_ninja_deps.py
    llvm/utils/chunk-print-before-all.py
    llvm/utils/collect_and_build_with_pgo.py
    llvm/utils/convert-constraint-log-to-z3.py
    llvm/utils/create_ladder_graph.py
    llvm/utils/demangle_tree.py
    llvm/utils/docker/scripts/llvm_checksum/llvm_checksum.py
    llvm/utils/docker/scripts/llvm_checksum/project_tree.py
    llvm/utils/extract-section.py
    llvm/utils/extract_symbols.py
    llvm/utils/extract_vplan.py
    llvm/utils/filecheck_lint/filecheck_lint.py
    llvm/utils/filecheck_lint/filecheck_lint_test.py
    llvm/utils/gdb-scripts/prettyprinters.py
    llvm/utils/git/github-automation.py
    llvm/utils/git/pre-push.py
    llvm/utils/gn/build/run_built_binary.py
    llvm/utils/gn/build/symbol_exports.py
    llvm/utils/gn/build/symlink_or_copy.py
    llvm/utils/gn/build/sync_source_lists_from_cmake.py
    llvm/utils/gn/build/write_cmake_config.py
    llvm/utils/gn/build/write_file.py
    llvm/utils/gn/build/write_library_dependencies.py
    llvm/utils/gn/build/write_vcsrevision.py
    llvm/utils/gn/get.py
    llvm/utils/gn/gn.py
    llvm/utils/gn/secondary/libcxx/utils/gen_link_script.py
    llvm/utils/gn/secondary/llvm/include/llvm/Support/write_extension_def.py
    llvm/utils/gn/secondary/llvm/tools/llvm-config/write_extension_dependencies.py
    llvm/utils/indirect_calls.py
    llvm/utils/lint/common_lint.py
    llvm/utils/lint/cpp_lint.py
    llvm/utils/lint/generic_lint.py
    llvm/utils/lit/examples/many-tests/lit.cfg
    llvm/utils/lit/lit.py
    llvm/utils/lit/lit/BooleanExpression.py
    llvm/utils/lit/lit/LitConfig.py
    llvm/utils/lit/lit/LitTestCase.py
    llvm/utils/lit/lit/ProgressBar.py
    llvm/utils/lit/lit/ShCommands.py
    llvm/utils/lit/lit/ShUtil.py
    llvm/utils/lit/lit/Test.py
    llvm/utils/lit/lit/TestRunner.py
    llvm/utils/lit/lit/TestTimes.py
    llvm/utils/lit/lit/TestingConfig.py
    llvm/utils/lit/lit/__init__.py
    llvm/utils/lit/lit/builtin_commands/cat.py
    llvm/utils/lit/lit/builtin_commands/diff.py
    llvm/utils/lit/lit/cl_arguments.py
    llvm/utils/lit/lit/discovery.py
    llvm/utils/lit/lit/display.py
    llvm/utils/lit/lit/formats/__init__.py
    llvm/utils/lit/lit/formats/base.py
    llvm/utils/lit/lit/formats/googletest.py
    llvm/utils/lit/lit/formats/shtest.py
    llvm/utils/lit/lit/llvm/config.py
    llvm/utils/lit/lit/llvm/subst.py
    llvm/utils/lit/lit/main.py
    llvm/utils/lit/lit/reports.py
    llvm/utils/lit/lit/run.py
    llvm/utils/lit/lit/util.py
    llvm/utils/lit/lit/worker.py
    llvm/utils/lit/setup.py
    llvm/utils/lit/tests/Inputs/allow-retries/lit.cfg
    llvm/utils/lit/tests/Inputs/allow-retries/succeeds-within-limit.py
    llvm/utils/lit/tests/Inputs/config-map-discovery/driver.py
    llvm/utils/lit/tests/Inputs/config-map-discovery/main-config/lit.cfg
    llvm/utils/lit/tests/Inputs/custom-result-category/format.py
    llvm/utils/lit/tests/Inputs/custom-result-category/lit.cfg
    llvm/utils/lit/tests/Inputs/discovery/lit.cfg
    llvm/utils/lit/tests/Inputs/discovery/subsuite/lit.cfg
    llvm/utils/lit/tests/Inputs/exec-discovery-in-tree/lit.cfg
    llvm/utils/lit/tests/Inputs/fake-externals/fake_external.py
    llvm/utils/lit/tests/Inputs/googletest-cmd-wrapper/lit.cfg
    llvm/utils/lit/tests/Inputs/googletest-crash/DummySubDir/OneTest.py
    llvm/utils/lit/tests/Inputs/googletest-crash/lit.cfg
    llvm/utils/lit/tests/Inputs/googletest-detect-duplicate/DummySubDir/OneTest.py
    llvm/utils/lit/tests/Inputs/googletest-detect-duplicate/lit.cfg
    llvm/utils/lit/tests/Inputs/googletest-discovery-failed/lit.cfg
    llvm/utils/lit/tests/Inputs/googletest-format-respect-gtest-sharding-env-vars/DummySubDir/OneTest.py
    llvm/utils/lit/tests/Inputs/googletest-format-respect-gtest-sharding-env-vars/lit.cfg
    llvm/utils/lit/tests/Inputs/googletest-format/DummySubDir/OneTest.py
    llvm/utils/lit/tests/Inputs/googletest-format/lit.cfg
    llvm/utils/lit/tests/Inputs/googletest-sanitizer-error/DummySubDir/OneTest.py
    llvm/utils/lit/tests/Inputs/googletest-sanitizer-error/lit.cfg
    llvm/utils/lit/tests/Inputs/googletest-timeout/DummySubDir/OneTest.py
    llvm/utils/lit/tests/Inputs/googletest-timeout/lit.cfg
    llvm/utils/lit/tests/Inputs/ignore-fail/lit.cfg
    llvm/utils/lit/tests/Inputs/lit-opts/lit.cfg
    llvm/utils/lit/tests/Inputs/lld-features/lit.cfg
    llvm/utils/lit/tests/Inputs/max-failures/lit.cfg
    llvm/utils/lit/tests/Inputs/max-time/lit.cfg
    llvm/utils/lit/tests/Inputs/parallelism-groups/lit.cfg
    llvm/utils/lit/tests/Inputs/progress-bar/lit.cfg
    llvm/utils/lit/tests/Inputs/py-config-discovery/lit.site.cfg.py
    llvm/utils/lit/tests/Inputs/reorder/lit.cfg
    llvm/utils/lit/tests/Inputs/show-result-codes/lit.cfg
    llvm/utils/lit/tests/Inputs/show-used-features/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-define/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-env/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-env/print_environment.py
    llvm/utils/lit/tests/Inputs/shtest-format-argv0/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-format/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-if-else/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-inject/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-keyword-parse-errors/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-not/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-not/print_environment.py
    llvm/utils/lit/tests/Inputs/shtest-output-printing/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-pushd-popd/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/does-not-substitute-no-limit/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/does-not-substitute-within-limit/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/escaping/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/negative-integer/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/not-an-integer/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/set-to-none/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-recursive-substitution/substitutes-within-limit/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-run-at-line/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-shell/check_args.py
    llvm/utils/lit/tests/Inputs/shtest-shell/check_path.py
    llvm/utils/lit/tests/Inputs/shtest-shell/lit.cfg
    llvm/utils/lit/tests/Inputs/shtest-timeout/lit.cfg
    llvm/utils/lit/tests/Inputs/standalone-tests-with-excludes/lit.cfg
    llvm/utils/lit/tests/Inputs/standalone-tests-with-suffixes/lit.cfg
    llvm/utils/lit/tests/Inputs/standalone-tests/lit.cfg
    llvm/utils/lit/tests/Inputs/test-data-micro/dummy_format.py
    llvm/utils/lit/tests/Inputs/test-data-micro/lit.cfg
    llvm/utils/lit/tests/Inputs/test-data/dummy_format.py
    llvm/utils/lit/tests/Inputs/test-data/lit.cfg
    llvm/utils/lit/tests/Inputs/test_retry_attempts/lit.cfg
    llvm/utils/lit/tests/Inputs/test_retry_attempts/test.py
    llvm/utils/lit/tests/Inputs/testrunner-custom-parsers/lit.cfg
    llvm/utils/lit/tests/Inputs/unittest-adaptor/lit.cfg
    llvm/utils/lit/tests/Inputs/use-llvm-tool-required/lit.cfg
    llvm/utils/lit/tests/Inputs/use-llvm-tool/lit.cfg
    llvm/utils/lit/tests/Inputs/xfail-cl/a/lit.cfg
    llvm/utils/lit/tests/Inputs/xfail-cl/b/lit.cfg
    llvm/utils/lit/tests/Inputs/xfail-cl/lit.cfg
    llvm/utils/lit/tests/Inputs/xunit-output/dummy_format.py
    llvm/utils/lit/tests/Inputs/xunit-output/lit.cfg
    llvm/utils/lit/tests/lit.cfg
    llvm/utils/lit/tests/unit/ShUtil.py
    llvm/utils/lit/tests/unit/TestRunner.py
    llvm/utils/lit/tests/unparsed-requirements.py
    llvm/utils/lldbDataFormatters.py
    llvm/utils/llvm-gisel-cov.py
    llvm/utils/llvm-locstats/llvm-locstats.py
    llvm/utils/llvm-original-di-preservation.py
    llvm/utils/merge-stats.py
    llvm/utils/pipeline.py
    llvm/utils/prepare-code-coverage-artifact.py
    llvm/utils/reduce_pipeline.py
    llvm/utils/reduce_pipeline_test/fake_opt.py
    llvm/utils/reduce_pipeline_test/test.py
    llvm/utils/relative_lines.py
    llvm/utils/release/bump-version.py
    llvm/utils/release/findRegressions-nightly.py
    llvm/utils/release/findRegressions-simple.py
    llvm/utils/release/github-upload-release.py
    llvm/utils/remote-exec.py
    llvm/utils/revert_checker.py
    llvm/utils/revert_checker_test.py
    llvm/utils/rsp_bisect.py
    llvm/utils/rsp_bisect_test/test.py
    llvm/utils/rsp_bisect_test/test_script.py
    llvm/utils/rsp_bisect_test/test_script_inv.py
    llvm/utils/schedcover.py
    llvm/utils/shuffle_fuzz.py
    llvm/utils/shuffle_select_fuzz_tester.py
    llvm/utils/sort_includes.py
    llvm/utils/sysroot.py
    llvm/utils/testgen/mc-bundling-x86-gen.py
    llvm/utils/unicode-case-fold.py
    llvm/utils/update_analyze_test_checks.py
    llvm/utils/update_any_test_checks.py
    llvm/utils/update_cc_test_checks.py
    llvm/utils/update_llc_test_checks.py
    llvm/utils/update_mca_test_checks.py
    llvm/utils/update_mir_test_checks.py
    llvm/utils/update_test_checks.py
    llvm/utils/update_test_prefix.py
    llvm/utils/wciia.py

Removed: 
    


################################################################################
diff  --git a/llvm/bindings/python/llvm/bit_reader.py b/llvm/bindings/python/llvm/bit_reader.py
index 33b8211076b80..8d1c6d5451932 100644
--- a/llvm/bindings/python/llvm/bit_reader.py
+++ b/llvm/bindings/python/llvm/bit_reader.py
@@ -1,4 +1,3 @@
-
 from .common import LLVMObject
 from .common import c_object_p
 from .common import get_library
@@ -10,21 +9,25 @@
 from ctypes import byref
 from ctypes import c_char_p
 from ctypes import cast
-__all__ = ['parse_bitcode']
+
+__all__ = ["parse_bitcode"]
 lib = get_library()
 
+
 def parse_bitcode(mem_buffer):
     """Input is .core.MemoryBuffer"""
     module = c_object_p()
     result = lib.LLVMParseBitcode2(mem_buffer, byref(module))
     if result:
-        raise RuntimeError('LLVM Error')
+        raise RuntimeError("LLVM Error")
     m = Module(module)
     m.take_ownership(mem_buffer)
     return m
 
+
 def register_library(library):
     library.LLVMParseBitcode2.argtypes = [MemoryBuffer, POINTER(c_object_p)]
     library.LLVMParseBitcode2.restype = bool
 
+
 register_library(lib)

diff  --git a/llvm/bindings/python/llvm/common.py b/llvm/bindings/python/llvm/common.py
index 9c6c6d433458c..4f8912aec3fec 100644
--- a/llvm/bindings/python/llvm/common.py
+++ b/llvm/bindings/python/llvm/common.py
@@ -1,10 +1,10 @@
-#===- common.py - Python LLVM Bindings -----------------------*- python -*--===#
+# ===- common.py - Python LLVM Bindings -----------------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 from ctypes import POINTER
 from ctypes import c_void_p
@@ -15,20 +15,22 @@
 
 # LLVM_VERSION: sync with PACKAGE_VERSION in CMakeLists.txt
 #               but leave out the 'svn' suffix.
-LLVM_VERSION = '10.0.0'
+LLVM_VERSION = "10.0.0"
 
 __all__ = [
-    'c_object_p',
-    'get_library',
+    "c_object_p",
+    "get_library",
 ]
 
 c_object_p = POINTER(c_void_p)
 
+
 class LLVMObject(object):
     """Base class for objects that are backed by an LLVM data structure.
 
     This class should never be instantiated outside of this package.
     """
+
     def __init__(self, ptr, ownable=True, disposer=None):
         assert isinstance(ptr, c_object_p)
 
@@ -61,12 +63,13 @@ def from_param(self):
         return self._as_parameter_
 
     def __del__(self):
-        if not hasattr(self, '_self_owned') or not hasattr(self, '_disposer'):
+        if not hasattr(self, "_self_owned") or not hasattr(self, "_disposer"):
             return
 
         if self._self_owned and self._disposer:
             self._disposer(self)
 
+
 class CachedProperty(object):
     """Decorator that caches the result of a property lookup.
 
@@ -74,11 +77,12 @@ class CachedProperty(object):
     decorator on properties that invoke C API calls for which the result of the
     call will be idempotent.
     """
+
     def __init__(self, wrapped):
         self.wrapped = wrapped
         try:
             self.__doc__ = wrapped.__doc__
-        except: # pragma: no cover
+        except:  # pragma: no cover
             pass
 
     def __get__(self, instance, instance_type=None):
@@ -90,6 +94,7 @@ def __get__(self, instance, instance_type=None):
 
         return value
 
+
 def get_library():
     """Obtain a reference to the llvm library."""
 
@@ -101,14 +106,14 @@ def get_library():
     # library into a default linker search path.  Always Try ctypes.cdll.LoadLibrary()
     # with all possible library names first, then try ctypes.util.find_library().
 
-    names = ['LLVM-' + LLVM_VERSION, 'LLVM-' + LLVM_VERSION + 'svn']
+    names = ["LLVM-" + LLVM_VERSION, "LLVM-" + LLVM_VERSION + "svn"]
     t = platform.system()
-    if t == 'Darwin':
-        pfx, ext = 'lib', '.dylib'
-    elif t == 'Windows':
-        pfx, ext = '', '.dll'
+    if t == "Darwin":
+        pfx, ext = "lib", ".dylib"
+    elif t == "Windows":
+        pfx, ext = "", ".dll"
     else:
-        pfx, ext = 'lib', '.so'
+        pfx, ext = "lib", ".so"
 
     for i in names:
         try:
@@ -122,4 +127,4 @@ def get_library():
         t = ctypes.util.find_library(i)
         if t:
             return cdll.LoadLibrary(t)
-    raise Exception('LLVM shared library not found!')
+    raise Exception("LLVM shared library not found!")

diff  --git a/llvm/bindings/python/llvm/core.py b/llvm/bindings/python/llvm/core.py
index 812d5d0e94129..a2de827ed3bf6 100644
--- a/llvm/bindings/python/llvm/core.py
+++ b/llvm/bindings/python/llvm/core.py
@@ -1,10 +1,10 @@
-#===- core.py - Python LLVM Bindings -------------------------*- python -*--===#
+# ===- core.py - Python LLVM Bindings -------------------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 from __future__ import print_function
 
 from .common import LLVMObject
@@ -36,6 +36,7 @@
 lib = get_library()
 Enums = []
 
+
 class LLVMEnumeration(object):
     """Represents an individual LLVM enumeration."""
 
@@ -44,8 +45,7 @@ def __init__(self, name, value):
         self.value = value
 
     def __repr__(self):
-        return '%s.%s' % (self.__class__.__name__,
-                          self.name)
+        return "%s.%s" % (self.__class__.__name__, self.name)
 
     @classmethod
     def from_value(cls, value):
@@ -53,8 +53,7 @@ def from_value(cls, value):
         result = cls._value_map.get(value, None)
 
         if result is None:
-            raise ValueError('Unknown %s: %d' % (cls.__name__,
-                                                 value))
+            raise ValueError("Unknown %s: %d" % (cls.__name__, value))
 
         return result
 
@@ -66,12 +65,12 @@ def register(cls, name, value):
         enumerations. You should not need to call this outside this module.
         """
         if value in cls._value_map:
-            raise ValueError('%s value already registered: %d' % (cls.__name__,
-                                                                  value))
+            raise ValueError("%s value already registered: %d" % (cls.__name__, value))
         enum = cls(name, value)
         cls._value_map[value] = enum
         setattr(cls, name, enum)
 
+
 class Attribute(LLVMEnumeration):
     """Represents an individual Attribute enumeration."""
 
@@ -80,6 +79,7 @@ class Attribute(LLVMEnumeration):
     def __init__(self, name, value):
         super(Attribute, self).__init__(name, value)
 
+
 class OpCode(LLVMEnumeration):
     """Represents an individual OpCode enumeration."""
 
@@ -88,6 +88,7 @@ class OpCode(LLVMEnumeration):
     def __init__(self, name, value):
         super(OpCode, self).__init__(name, value)
 
+
 class TypeKind(LLVMEnumeration):
     """Represents an individual TypeKind enumeration."""
 
@@ -96,6 +97,7 @@ class TypeKind(LLVMEnumeration):
     def __init__(self, name, value):
         super(TypeKind, self).__init__(name, value)
 
+
 class Linkage(LLVMEnumeration):
     """Represents an individual Linkage enumeration."""
 
@@ -104,6 +106,7 @@ class Linkage(LLVMEnumeration):
     def __init__(self, name, value):
         super(Linkage, self).__init__(name, value)
 
+
 class Visibility(LLVMEnumeration):
     """Represents an individual visibility enumeration."""
 
@@ -112,6 +115,7 @@ class Visibility(LLVMEnumeration):
     def __init__(self, name, value):
         super(Visibility, self).__init__(name, value)
 
+
 class CallConv(LLVMEnumeration):
     """Represents an individual calling convention enumeration."""
 
@@ -120,6 +124,7 @@ class CallConv(LLVMEnumeration):
     def __init__(self, name, value):
         super(CallConv, self).__init__(name, value)
 
+
 class IntPredicate(LLVMEnumeration):
     """Represents an individual IntPredicate enumeration."""
 
@@ -128,6 +133,7 @@ class IntPredicate(LLVMEnumeration):
     def __init__(self, name, value):
         super(IntPredicate, self).__init__(name, value)
 
+
 class RealPredicate(LLVMEnumeration):
     """Represents an individual RealPredicate enumeration."""
 
@@ -136,6 +142,7 @@ class RealPredicate(LLVMEnumeration):
     def __init__(self, name, value):
         super(RealPredicate, self).__init__(name, value)
 
+
 class LandingPadClauseTy(LLVMEnumeration):
     """Represents an individual LandingPadClauseTy enumeration."""
 
@@ -144,6 +151,7 @@ class LandingPadClauseTy(LLVMEnumeration):
     def __init__(self, name, value):
         super(LandingPadClauseTy, self).__init__(name, value)
 
+
 class MemoryBuffer(LLVMObject):
     """Represents an opaque memory buffer."""
 
@@ -159,8 +167,9 @@ def __init__(self, filename=None):
         memory = c_object_p()
         out = c_char_p(None)
 
-        result = lib.LLVMCreateMemoryBufferWithContentsOfFile(filename,
-                byref(memory), byref(out))
+        result = lib.LLVMCreateMemoryBufferWithContentsOfFile(
+            filename, byref(memory), byref(out)
+        )
 
         if result:
             raise Exception("Could not create memory buffer: %s" % out.value)
@@ -170,8 +179,8 @@ def __init__(self, filename=None):
     def __len__(self):
         return lib.LLVMGetBufferSize(self)
 
+
 class Value(LLVMObject):
-    
     def __init__(self, value):
         LLVMObject.__init__(self, value)
 
@@ -181,16 +190,17 @@ def name(self):
 
     def dump(self):
         lib.LLVMDumpValue(self)
-    
+
     def get_operand(self, i):
         return Value(lib.LLVMGetOperand(self, i))
-    
+
     def set_operand(self, i, v):
         return lib.LLVMSetOperand(self, i, v)
-    
+
     def __len__(self):
         return lib.LLVMGetNumOperands(self)
 
+
 class Module(LLVMObject):
     """Represents the top-level structure of an llvm program in an opaque object."""
 
@@ -232,10 +242,10 @@ def __init__(self, module, reverse=False):
                 self.function = self.module.last
             else:
                 self.function = self.module.first
-        
+
         def __iter__(self):
             return self
-        
+
         def __next__(self):
             if not isinstance(self.function, Function):
                 raise StopIteration("")
@@ -266,25 +276,25 @@ def last(self):
     def print_module_to_file(self, filename):
         out = c_char_p(None)
         # Result is inverted so 0 means everything was ok.
-        result = lib.LLVMPrintModuleToFile(self, filename, byref(out))        
+        result = lib.LLVMPrintModuleToFile(self, filename, byref(out))
         if result:
             raise RuntimeError("LLVM Error: %s" % out.value)
 
-class Function(Value):
 
+class Function(Value):
     def __init__(self, value):
         Value.__init__(self, value)
-    
+
     @property
     def next(self):
         f = lib.LLVMGetNextFunction(self)
         return f and Function(f)
-    
+
     @property
     def prev(self):
         f = lib.LLVMGetPreviousFunction(self)
         return f and Function(f)
-    
+
     @property
     def first(self):
         b = lib.LLVMGetFirstBasicBlock(self)
@@ -303,10 +313,10 @@ def __init__(self, function, reverse=False):
                 self.bb = function.last
             else:
                 self.bb = function.first
-        
+
         def __iter__(self):
             return self
-        
+
         def __next__(self):
             if not isinstance(self.bb, BasicBlock):
                 raise StopIteration("")
@@ -319,18 +329,18 @@ def __next__(self):
 
         if sys.version_info.major == 2:
             next = __next__
-    
+
     def __iter__(self):
         return Function.__bb_iterator(self)
 
     def __reversed__(self):
         return Function.__bb_iterator(self, reverse=True)
-    
+
     def __len__(self):
         return lib.LLVMCountBasicBlocks(self)
 
+
 class BasicBlock(LLVMObject):
-    
     def __init__(self, value):
         LLVMObject.__init__(self, value)
 
@@ -343,7 +353,7 @@ def next(self):
     def prev(self):
         b = lib.LLVMGetPreviousBasicBlock(self)
         return b and BasicBlock(b)
-    
+
     @property
     def first(self):
         i = lib.LLVMGetFirstInstruction(self)
@@ -356,7 +366,7 @@ def last(self):
 
     def __as_value(self):
         return Value(lib.LLVMBasicBlockAsValue(self))
-    
+
     @property
     def name(self):
         return lib.LLVMGetValueName(self.__as_value())
@@ -365,28 +375,26 @@ def dump(self):
         lib.LLVMDumpValue(self.__as_value())
 
     def get_operand(self, i):
-        return Value(lib.LLVMGetOperand(self.__as_value(),
-                                        i))
-    
+        return Value(lib.LLVMGetOperand(self.__as_value(), i))
+
     def set_operand(self, i, v):
-        return lib.LLVMSetOperand(self.__as_value(),
-                                  i, v)
-    
+        return lib.LLVMSetOperand(self.__as_value(), i, v)
+
     def __len__(self):
         return lib.LLVMGetNumOperands(self.__as_value())
 
     class __inst_iterator(object):
-        def __init__(self, bb, reverse=False):            
+        def __init__(self, bb, reverse=False):
             self.bb = bb
             self.reverse = reverse
             if self.reverse:
                 self.inst = self.bb.last
             else:
                 self.inst = self.bb.first
-        
+
         def __iter__(self):
             return self
-        
+
         def __next__(self):
             if not isinstance(self.inst, Instruction):
                 raise StopIteration("")
@@ -408,7 +416,6 @@ def __reversed__(self):
 
 
 class Instruction(Value):
-
     def __init__(self, value):
         Value.__init__(self, value)
 
@@ -426,8 +433,8 @@ def prev(self):
     def opcode(self):
         return OpCode.from_value(lib.LLVMGetInstructionOpcode(self))
 
-class Context(LLVMObject):
 
+class Context(LLVMObject):
     def __init__(self, context=None):
         if context is None:
             context = lib.LLVMContextCreate()
@@ -439,6 +446,7 @@ def __init__(self, context=None):
     def GetGlobalContext(cls):
         return Context(lib.LLVMGetGlobalContext())
 
+
 def register_library(library):
     # Initialization/Shutdown declarations.
     library.LLVMShutdown.argtypes = []
@@ -455,8 +463,11 @@ def register_library(library):
     library.LLVMGetGlobalContext.restype = c_object_p
 
     # Memory buffer declarations
-    library.LLVMCreateMemoryBufferWithContentsOfFile.argtypes = [c_char_p,
-            POINTER(c_object_p), POINTER(c_char_p)]
+    library.LLVMCreateMemoryBufferWithContentsOfFile.argtypes = [
+        c_char_p,
+        POINTER(c_object_p),
+        POINTER(c_char_p),
+    ]
     library.LLVMCreateMemoryBufferWithContentsOfFile.restype = bool
 
     library.LLVMGetBufferSize.argtypes = [MemoryBuffer]
@@ -485,8 +496,7 @@ def register_library(library):
     library.LLVMDumpModule.argtypes = [Module]
     library.LLVMDumpModule.restype = None
 
-    library.LLVMPrintModuleToFile.argtypes = [Module, c_char_p,
-                                              POINTER(c_char_p)]
+    library.LLVMPrintModuleToFile.argtypes = [Module, c_char_p, POINTER(c_char_p)]
     library.LLVMPrintModuleToFile.restype = bool
 
     library.LLVMGetFirstFunction.argtypes = [Module]
@@ -552,6 +562,7 @@ def register_library(library):
     library.LLVMGetInstructionOpcode.argtypes = [Instruction]
     library.LLVMGetInstructionOpcode.restype = c_uint
 
+
 def register_enumerations():
     if Enums:
         return None
@@ -572,9 +583,11 @@ def register_enumerations():
             enum_class.register(name, value)
     return enums
 
+
 def initialize_llvm():
     Context.GetGlobalContext()
 
+
 register_library(lib)
 Enums = register_enumerations()
 initialize_llvm()

diff  --git a/llvm/bindings/python/llvm/disassembler.py b/llvm/bindings/python/llvm/disassembler.py
index 75625588911ca..a57b8b838832d 100644
--- a/llvm/bindings/python/llvm/disassembler.py
+++ b/llvm/bindings/python/llvm/disassembler.py
@@ -1,10 +1,10 @@
-#===- disassembler.py - Python LLVM Bindings -----------------*- python -*--===#
+# ===- disassembler.py - Python LLVM Bindings -----------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 from ctypes import CFUNCTYPE
 from ctypes import POINTER
@@ -23,7 +23,7 @@
 from .common import get_library
 
 __all__ = [
-    'Disassembler',
+    "Disassembler",
 ]
 
 lib = get_library()
@@ -33,9 +33,23 @@
 Option_UseMarkup = 1
 
 
-
 _initialized = False
-_targets = ['AArch64', 'ARM', 'Hexagon', 'MSP430', 'Mips', 'NVPTX', 'PowerPC', 'R600', 'Sparc', 'SystemZ', 'X86', 'XCore']
+_targets = [
+    "AArch64",
+    "ARM",
+    "Hexagon",
+    "MSP430",
+    "Mips",
+    "NVPTX",
+    "PowerPC",
+    "R600",
+    "Sparc",
+    "SystemZ",
+    "X86",
+    "XCore",
+]
+
+
 def _ensure_initialized():
     global _initialized
     if not _initialized:
@@ -63,6 +77,7 @@ class Disassembler(LLVMObject):
 
     Disassembler instances can disassemble instructions from multiple sources.
     """
+
     def __init__(self, triple):
         """Create a new disassembler instance.
 
@@ -72,11 +87,15 @@ def __init__(self, triple):
 
         _ensure_initialized()
 
-        ptr = lib.LLVMCreateDisasm(c_char_p(triple), c_void_p(None), c_int(0),
-                callbacks['op_info'](0), callbacks['symbol_lookup'](0))
+        ptr = lib.LLVMCreateDisasm(
+            c_char_p(triple),
+            c_void_p(None),
+            c_int(0),
+            callbacks["op_info"](0),
+            callbacks["symbol_lookup"](0),
+        )
         if not ptr:
-            raise Exception('Could not obtain disassembler for triple: %s' %
-                            triple)
+            raise Exception("Could not obtain disassembler for triple: %s" % triple)
 
         LLVMObject.__init__(self, ptr, disposer=lib.LLVMDisasmDispose)
 
@@ -100,8 +119,9 @@ def get_instruction(self, source, pc=0):
         buf = cast(c_char_p(source), POINTER(c_ubyte))
         out_str = cast((c_byte * 255)(), c_char_p)
 
-        result = lib.LLVMDisasmInstruction(self, buf, c_uint64(len(source)),
-                                           c_uint64(pc), out_str, 255)
+        result = lib.LLVMDisasmInstruction(
+            self, buf, c_uint64(len(source)), c_uint64(pc), out_str, 255
+        )
 
         return (result, out_str.value)
 
@@ -128,9 +148,9 @@ def get_instructions(self, source, pc=0):
         end_address = pc + len(source)
         while address < end_address:
             b = cast(addressof(buf) + offset, POINTER(c_ubyte))
-            result = lib.LLVMDisasmInstruction(self, b,
-                    c_uint64(len(source) - offset), c_uint64(address),
-                    out_str, 255)
+            result = lib.LLVMDisasmInstruction(
+                self, b, c_uint64(len(source) - offset), c_uint64(address), out_str, 255
+            )
 
             if result == 0:
                 break
@@ -142,28 +162,40 @@ def get_instructions(self, source, pc=0):
 
     def set_options(self, options):
         if not lib.LLVMSetDisasmOptions(self, options):
-            raise Exception('Unable to set all disassembler options in %i' % options)
+            raise Exception("Unable to set all disassembler options in %i" % options)
 
 
 def register_library(library):
-    library.LLVMCreateDisasm.argtypes = [c_char_p, c_void_p, c_int,
-        callbacks['op_info'], callbacks['symbol_lookup']]
+    library.LLVMCreateDisasm.argtypes = [
+        c_char_p,
+        c_void_p,
+        c_int,
+        callbacks["op_info"],
+        callbacks["symbol_lookup"],
+    ]
     library.LLVMCreateDisasm.restype = c_object_p
 
     library.LLVMDisasmDispose.argtypes = [Disassembler]
 
-    library.LLVMDisasmInstruction.argtypes = [Disassembler, POINTER(c_ubyte),
-            c_uint64, c_uint64, c_char_p, c_size_t]
+    library.LLVMDisasmInstruction.argtypes = [
+        Disassembler,
+        POINTER(c_ubyte),
+        c_uint64,
+        c_uint64,
+        c_char_p,
+        c_size_t,
+    ]
     library.LLVMDisasmInstruction.restype = c_size_t
 
     library.LLVMSetDisasmOptions.argtypes = [Disassembler, c_uint64]
     library.LLVMSetDisasmOptions.restype = c_int
 
 
-callbacks['op_info'] = CFUNCTYPE(c_int, c_void_p, c_uint64, c_uint64, c_uint64,
-                                 c_int, c_void_p)
-callbacks['symbol_lookup'] = CFUNCTYPE(c_char_p, c_void_p, c_uint64,
-                                       POINTER(c_uint64), c_uint64,
-                                       POINTER(c_char_p))
+callbacks["op_info"] = CFUNCTYPE(
+    c_int, c_void_p, c_uint64, c_uint64, c_uint64, c_int, c_void_p
+)
+callbacks["symbol_lookup"] = CFUNCTYPE(
+    c_char_p, c_void_p, c_uint64, POINTER(c_uint64), c_uint64, POINTER(c_char_p)
+)
 
 register_library(lib)

diff  --git a/llvm/bindings/python/llvm/enumerations.py b/llvm/bindings/python/llvm/enumerations.py
index ebb39a4ded831..34e297603b3f2 100644
--- a/llvm/bindings/python/llvm/enumerations.py
+++ b/llvm/bindings/python/llvm/enumerations.py
@@ -1,10 +1,10 @@
-#===- enumerations.py - Python LLVM Enumerations -------------*- python -*--===#
+# ===- enumerations.py - Python LLVM Enumerations -------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 r"""
 LLVM Enumerations
@@ -18,193 +18,193 @@
 """
 
 __all__ = [
-    'Attributes',
-    'OpCodes',
-    'TypeKinds',
-    'Linkages',
-    'Visibility',
-    'CallConv',
-    'IntPredicate',
-    'RealPredicate',
-    'LandingPadClauseTy',
+    "Attributes",
+    "OpCodes",
+    "TypeKinds",
+    "Linkages",
+    "Visibility",
+    "CallConv",
+    "IntPredicate",
+    "RealPredicate",
+    "LandingPadClauseTy",
 ]
 
 Attributes = [
-    ('ZExt', 1 << 0),
-    ('MSExt', 1 << 1),
-    ('NoReturn', 1 << 2),
-    ('InReg', 1 << 3),
-    ('StructRet', 1 << 4),
-    ('NoUnwind', 1 << 5),
-    ('NoAlias', 1 << 6),
-    ('ByVal', 1 << 7),
-    ('Nest', 1 << 8),
-    ('ReadNone', 1 << 9),
-    ('ReadOnly', 1 << 10),
-    ('NoInline', 1 << 11),
-    ('AlwaysInline', 1 << 12),
-    ('OptimizeForSize', 1 << 13),
-    ('StackProtect', 1 << 14),
-    ('StackProtectReq', 1 << 15),
-    ('Alignment', 31 << 16),
-    ('NoCapture', 1 << 21),
-    ('NoRedZone', 1 << 22),
-    ('ImplicitFloat', 1 << 23),
-    ('Naked', 1 << 24),
-    ('InlineHint', 1 << 25),
-    ('StackAlignment', 7 << 26),
-    ('ReturnsTwice', 1 << 29),
-    ('UWTable', 1 << 30),
-    ('NonLazyBind', 1 << 31),
+    ("ZExt", 1 << 0),
+    ("MSExt", 1 << 1),
+    ("NoReturn", 1 << 2),
+    ("InReg", 1 << 3),
+    ("StructRet", 1 << 4),
+    ("NoUnwind", 1 << 5),
+    ("NoAlias", 1 << 6),
+    ("ByVal", 1 << 7),
+    ("Nest", 1 << 8),
+    ("ReadNone", 1 << 9),
+    ("ReadOnly", 1 << 10),
+    ("NoInline", 1 << 11),
+    ("AlwaysInline", 1 << 12),
+    ("OptimizeForSize", 1 << 13),
+    ("StackProtect", 1 << 14),
+    ("StackProtectReq", 1 << 15),
+    ("Alignment", 31 << 16),
+    ("NoCapture", 1 << 21),
+    ("NoRedZone", 1 << 22),
+    ("ImplicitFloat", 1 << 23),
+    ("Naked", 1 << 24),
+    ("InlineHint", 1 << 25),
+    ("StackAlignment", 7 << 26),
+    ("ReturnsTwice", 1 << 29),
+    ("UWTable", 1 << 30),
+    ("NonLazyBind", 1 << 31),
 ]
 
 OpCodes = [
-    ('Ret', 1),
-    ('Br', 2),
-    ('Switch', 3),
-    ('IndirectBr', 4),
-    ('Invoke', 5),
-    ('Unreachable', 7),
-    ('Add', 8),
-    ('FAdd', 9),
-    ('Sub', 10),
-    ('FSub', 11),
-    ('Mul', 12),
-    ('FMul', 13),
-    ('UDiv', 14),
-    ('SDiv', 15),
-    ('FDiv', 16),
-    ('URem', 17),
-    ('SRem', 18),
-    ('FRem', 19),
-    ('Shl', 20),
-    ('LShr', 21),
-    ('AShr', 22),
-    ('And', 23),
-    ('Or', 24),
-    ('Xor', 25),
-    ('Alloca', 26),
-    ('Load', 27),
-    ('Store', 28),
-    ('GetElementPtr', 29),
-    ('Trunc', 30),
-    ('ZExt', 31),
-    ('SExt', 32),
-    ('FPToUI', 33),
-    ('FPToSI', 34),
-    ('UIToFP', 35),
-    ('SIToFP', 36),
-    ('FPTrunc', 37),
-    ('FPExt', 38),
-    ('PtrToInt', 39),
-    ('IntToPtr', 40),
-    ('BitCast', 41),
-    ('ICmp', 42),
-    ('FCmpl', 43),
-    ('PHI', 44),
-    ('Call', 45),
-    ('Select', 46),
-    ('UserOp1', 47),
-    ('UserOp2', 48),
-    ('AArg', 49),
-    ('ExtractElement', 50),
-    ('InsertElement', 51),
-    ('ShuffleVector', 52),
-    ('ExtractValue', 53),
-    ('InsertValue', 54),
-    ('Fence', 55),
-    ('AtomicCmpXchg', 56),
-    ('AtomicRMW', 57),
-    ('Resume', 58),
-    ('LandingPad', 59),
+    ("Ret", 1),
+    ("Br", 2),
+    ("Switch", 3),
+    ("IndirectBr", 4),
+    ("Invoke", 5),
+    ("Unreachable", 7),
+    ("Add", 8),
+    ("FAdd", 9),
+    ("Sub", 10),
+    ("FSub", 11),
+    ("Mul", 12),
+    ("FMul", 13),
+    ("UDiv", 14),
+    ("SDiv", 15),
+    ("FDiv", 16),
+    ("URem", 17),
+    ("SRem", 18),
+    ("FRem", 19),
+    ("Shl", 20),
+    ("LShr", 21),
+    ("AShr", 22),
+    ("And", 23),
+    ("Or", 24),
+    ("Xor", 25),
+    ("Alloca", 26),
+    ("Load", 27),
+    ("Store", 28),
+    ("GetElementPtr", 29),
+    ("Trunc", 30),
+    ("ZExt", 31),
+    ("SExt", 32),
+    ("FPToUI", 33),
+    ("FPToSI", 34),
+    ("UIToFP", 35),
+    ("SIToFP", 36),
+    ("FPTrunc", 37),
+    ("FPExt", 38),
+    ("PtrToInt", 39),
+    ("IntToPtr", 40),
+    ("BitCast", 41),
+    ("ICmp", 42),
+    ("FCmpl", 43),
+    ("PHI", 44),
+    ("Call", 45),
+    ("Select", 46),
+    ("UserOp1", 47),
+    ("UserOp2", 48),
+    ("AArg", 49),
+    ("ExtractElement", 50),
+    ("InsertElement", 51),
+    ("ShuffleVector", 52),
+    ("ExtractValue", 53),
+    ("InsertValue", 54),
+    ("Fence", 55),
+    ("AtomicCmpXchg", 56),
+    ("AtomicRMW", 57),
+    ("Resume", 58),
+    ("LandingPad", 59),
 ]
 
 TypeKinds = [
-    ('Void', 0),
-    ('Half', 1),
-    ('Float', 2),
-    ('Double', 3),
-    ('X86_FP80', 4),
-    ('FP128', 5),
-    ('PPC_FP128', 6),
-    ('Label', 7),
-    ('Integer', 8),
-    ('Function', 9),
-    ('Struct', 10),
-    ('Array', 11),
-    ('Pointer', 12),
-    ('Vector', 13),
-    ('Metadata', 14),
-    ('X86_MMX', 15),
+    ("Void", 0),
+    ("Half", 1),
+    ("Float", 2),
+    ("Double", 3),
+    ("X86_FP80", 4),
+    ("FP128", 5),
+    ("PPC_FP128", 6),
+    ("Label", 7),
+    ("Integer", 8),
+    ("Function", 9),
+    ("Struct", 10),
+    ("Array", 11),
+    ("Pointer", 12),
+    ("Vector", 13),
+    ("Metadata", 14),
+    ("X86_MMX", 15),
 ]
 
 Linkages = [
-    ('External', 0),
-    ('AvailableExternally', 1),
-    ('LinkOnceAny', 2),
-    ('LinkOnceODR', 3),
-    ('WeakAny', 4),
-    ('WeakODR', 5),
-    ('Appending', 6),
-    ('Internal', 7),
-    ('Private', 8),
-    ('DLLImport', 9),
-    ('DLLExport', 10),
-    ('ExternalWeak', 11),
-    ('Ghost', 12),
-    ('Common', 13),
-    ('LinkerPrivate', 14),
-    ('LinkerPrivateWeak', 15),
-    ('LinkerPrivateWeakDefAuto', 16),
+    ("External", 0),
+    ("AvailableExternally", 1),
+    ("LinkOnceAny", 2),
+    ("LinkOnceODR", 3),
+    ("WeakAny", 4),
+    ("WeakODR", 5),
+    ("Appending", 6),
+    ("Internal", 7),
+    ("Private", 8),
+    ("DLLImport", 9),
+    ("DLLExport", 10),
+    ("ExternalWeak", 11),
+    ("Ghost", 12),
+    ("Common", 13),
+    ("LinkerPrivate", 14),
+    ("LinkerPrivateWeak", 15),
+    ("LinkerPrivateWeakDefAuto", 16),
 ]
 
 Visibility = [
-    ('Default', 0),
-    ('Hidden', 1),
-    ('Protected', 2),
+    ("Default", 0),
+    ("Hidden", 1),
+    ("Protected", 2),
 ]
 
 CallConv = [
-    ('CCall', 0),
-    ('FastCall', 8),
-    ('ColdCall', 9),
-    ('X86StdcallCall', 64),
-    ('X86FastcallCall', 65),
+    ("CCall", 0),
+    ("FastCall", 8),
+    ("ColdCall", 9),
+    ("X86StdcallCall", 64),
+    ("X86FastcallCall", 65),
 ]
 
 IntPredicate = [
-    ('EQ', 32),
-    ('NE', 33),
-    ('UGT', 34),
-    ('UGE', 35),
-    ('ULT', 36),
-    ('ULE', 37),
-    ('SGT', 38),
-    ('SGE', 39),
-    ('SLT', 40),
-    ('SLE', 41),
+    ("EQ", 32),
+    ("NE", 33),
+    ("UGT", 34),
+    ("UGE", 35),
+    ("ULT", 36),
+    ("ULE", 37),
+    ("SGT", 38),
+    ("SGE", 39),
+    ("SLT", 40),
+    ("SLE", 41),
 ]
 
 RealPredicate = [
-    ('PredicateFalse', 0),
-    ('OEQ', 1),
-    ('OGT', 2),
-    ('OGE', 3),
-    ('OLT', 4),
-    ('OLE', 5),
-    ('ONE', 6),
-    ('ORD', 7),
-    ('UNO', 8),
-    ('UEQ', 9),
-    ('UGT', 10),
-    ('UGE', 11),
-    ('ULT', 12),
-    ('ULE', 13),
-    ('UNE', 14),
-    ('PredicateTrue', 15),
+    ("PredicateFalse", 0),
+    ("OEQ", 1),
+    ("OGT", 2),
+    ("OGE", 3),
+    ("OLT", 4),
+    ("OLE", 5),
+    ("ONE", 6),
+    ("ORD", 7),
+    ("UNO", 8),
+    ("UEQ", 9),
+    ("UGT", 10),
+    ("UGE", 11),
+    ("ULT", 12),
+    ("ULE", 13),
+    ("UNE", 14),
+    ("PredicateTrue", 15),
 ]
 
 LandingPadClauseTy = [
-    ('Catch', 0),
-    ('Filter', 1),
+    ("Catch", 0),
+    ("Filter", 1),
 ]

diff  --git a/llvm/bindings/python/llvm/object.py b/llvm/bindings/python/llvm/object.py
index e8841b6045f62..b63b9ce46c41d 100644
--- a/llvm/bindings/python/llvm/object.py
+++ b/llvm/bindings/python/llvm/object.py
@@ -1,10 +1,10 @@
-#===- object.py - Python Object Bindings --------------------*- python -*--===#
+# ===- object.py - Python Object Bindings --------------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 r"""
 Object File Interface
@@ -96,6 +96,7 @@
     "Symbol",
 ]
 
+
 class ObjectFile(LLVMObject):
     """Represents an object/binary file."""
 
@@ -113,7 +114,7 @@ def __init__(self, filename=None, contents=None):
             contents = MemoryBuffer(filename=filename)
 
         if contents is None:
-            raise Exception('No input found.')
+            raise Exception("No input found.")
 
         ptr = lib.LLVMCreateObjectFile(contents)
         LLVMObject.__init__(self, ptr, disposer=lib.LLVMDisposeObjectFile)
@@ -175,6 +176,7 @@ def get_symbols(self, cache=False):
 
         lib.LLVMDisposeSymbolIterator(symbols)
 
+
 class Section(LLVMObject):
     """Represents a section in an object file."""
 
@@ -196,7 +198,7 @@ def name(self):
         This is typically something like '.dynsym' or '.rodata'.
         """
         if self.expired:
-            raise Exception('Section instance has expired.')
+            raise Exception("Section instance has expired.")
 
         return lib.LLVMGetSectionName(self)
 
@@ -204,14 +206,14 @@ def name(self):
     def size(self):
         """The size of the section, in long bytes."""
         if self.expired:
-            raise Exception('Section instance has expired.')
+            raise Exception("Section instance has expired.")
 
         return lib.LLVMGetSectionSize(self)
 
     @CachedProperty
     def contents(self):
         if self.expired:
-            raise Exception('Section instance has expired.')
+            raise Exception("Section instance has expired.")
 
         siz = self.size
 
@@ -224,14 +226,14 @@ def contents(self):
     def address(self):
         """The address of this section, in long bytes."""
         if self.expired:
-            raise Exception('Section instance has expired.')
+            raise Exception("Section instance has expired.")
 
         return lib.LLVMGetSectionAddress(self)
 
     def has_symbol(self, symbol):
         """Returns whether a Symbol instance is present in this Section."""
         if self.expired:
-            raise Exception('Section instance has expired.')
+            raise Exception("Section instance has expired.")
 
         assert isinstance(symbol, Symbol)
         return lib.LLVMGetSectionContainsSymbol(self, symbol)
@@ -245,7 +247,7 @@ def get_relocations(self, cache=False):
         on iterators for more.
         """
         if self.expired:
-            raise Exception('Section instance has expired.')
+            raise Exception("Section instance has expired.")
 
         relocations = lib.LLVMGetRelocations(self)
         last = None
@@ -274,10 +276,10 @@ def cache(self):
         limitation. When called, the properties of the Section are fetched so
         they are still available after the Section has been marked inactive.
         """
-        getattr(self, 'name')
-        getattr(self, 'size')
-        getattr(self, 'contents')
-        getattr(self, 'address')
+        getattr(self, "name")
+        getattr(self, "size")
+        getattr(self, "contents")
+        getattr(self, "address")
 
     def expire(self):
         """Expire the section.
@@ -286,8 +288,10 @@ def expire(self):
         """
         self.expired = True
 
+
 class Symbol(LLVMObject):
     """Represents a symbol in an object file."""
+
     def __init__(self, ptr, object_file):
         assert isinstance(ptr, c_object_p)
         assert isinstance(object_file, ObjectFile)
@@ -305,7 +309,7 @@ def name(self):
         mangling could be in effect.
         """
         if self.expired:
-            raise Exception('Symbol instance has expired.')
+            raise Exception("Symbol instance has expired.")
 
         return lib.LLVMGetSymbolName(self)
 
@@ -313,7 +317,7 @@ def name(self):
     def address(self):
         """The address of this symbol, in long bytes."""
         if self.expired:
-            raise Exception('Symbol instance has expired.')
+            raise Exception("Symbol instance has expired.")
 
         return lib.LLVMGetSymbolAddress(self)
 
@@ -321,7 +325,7 @@ def address(self):
     def size(self):
         """The size of the symbol, in long bytes."""
         if self.expired:
-            raise Exception('Symbol instance has expired.')
+            raise Exception("Symbol instance has expired.")
 
         return lib.LLVMGetSymbolSize(self)
 
@@ -342,9 +346,9 @@ def section(self):
 
     def cache(self):
         """Cache all cacheable properties."""
-        getattr(self, 'name')
-        getattr(self, 'address')
-        getattr(self, 'size')
+        getattr(self, "name")
+        getattr(self, "address")
+        getattr(self, "size")
 
     def expire(self):
         """Mark the object as expired to prevent future API accesses.
@@ -354,8 +358,10 @@ def expire(self):
         """
         self.expired = True
 
+
 class Relocation(LLVMObject):
     """Represents a relocation definition."""
+
     def __init__(self, ptr):
         """Create a new relocation instance.
 
@@ -374,7 +380,7 @@ def __init__(self, ptr):
     def offset(self):
         """The offset of this relocation, in long bytes."""
         if self.expired:
-            raise Exception('Relocation instance has expired.')
+            raise Exception("Relocation instance has expired.")
 
         return lib.LLVMGetRelocationOffset(self)
 
@@ -382,7 +388,7 @@ def offset(self):
     def symbol(self):
         """The Symbol corresponding to this Relocation."""
         if self.expired:
-            raise Exception('Relocation instance has expired.')
+            raise Exception("Relocation instance has expired.")
 
         ptr = lib.LLVMGetRelocationSymbol(self)
         return Symbol(ptr)
@@ -391,7 +397,7 @@ def symbol(self):
     def type_number(self):
         """The relocation type, as a long."""
         if self.expired:
-            raise Exception('Relocation instance has expired.')
+            raise Exception("Relocation instance has expired.")
 
         return lib.LLVMGetRelocationType(self)
 
@@ -399,14 +405,14 @@ def type_number(self):
     def type_name(self):
         """The relocation type's name, as a str."""
         if self.expired:
-            raise Exception('Relocation instance has expired.')
+            raise Exception("Relocation instance has expired.")
 
         return lib.LLVMGetRelocationTypeName(self)
 
     @CachedProperty
     def value_string(self):
         if self.expired:
-            raise Exception('Relocation instance has expired.')
+            raise Exception("Relocation instance has expired.")
 
         return lib.LLVMGetRelocationValueString(self)
 
@@ -416,12 +422,13 @@ def expire(self):
 
     def cache(self):
         """Cache all cacheable properties on this instance."""
-        getattr(self, 'address')
-        getattr(self, 'offset')
-        getattr(self, 'symbol')
-        getattr(self, 'type')
-        getattr(self, 'type_name')
-        getattr(self, 'value_string')
+        getattr(self, "address")
+        getattr(self, "offset")
+        getattr(self, "symbol")
+        getattr(self, "type")
+        getattr(self, "type_name")
+        getattr(self, "value_string")
+
 
 def register_library(library):
     """Register function prototypes with LLVM library instance."""
@@ -504,5 +511,6 @@ def register_library(library):
     library.LLVMGetRelocationValueString.argtypes = [c_object_p]
     library.LLVMGetRelocationValueString.restype = c_char_p
 
+
 lib = get_library()
 register_library(lib)

diff  --git a/llvm/bindings/python/llvm/tests/base.py b/llvm/bindings/python/llvm/tests/base.py
index aa435bc1f35f5..7350cb419c1eb 100644
--- a/llvm/bindings/python/llvm/tests/base.py
+++ b/llvm/bindings/python/llvm/tests/base.py
@@ -4,18 +4,19 @@
 
 
 POSSIBLE_TEST_BINARIES = [
-    'libreadline.so.5',
-    'libreadline.so.6',
+    "libreadline.so.5",
+    "libreadline.so.6",
 ]
 
 POSSIBLE_TEST_BINARY_PATHS = [
-    '/usr/lib/debug',
-    '/lib',
-    '/usr/lib',
-    '/usr/local/lib',
-    '/lib/i386-linux-gnu',
+    "/usr/lib/debug",
+    "/lib",
+    "/usr/lib",
+    "/usr/local/lib",
+    "/lib/i386-linux-gnu",
 ]
 
+
 class TestBase(unittest.TestCase):
     if sys.version_info.major == 2:
         assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
@@ -33,7 +34,8 @@ def get_test_binary(self):
                 if os.path.exists(path):
                     return path
 
-        raise Exception('No suitable test binaries available!')
+        raise Exception("No suitable test binaries available!")
+
     get_test_binary.__test__ = False
 
     def get_test_file(self):

diff  --git a/llvm/bindings/python/llvm/tests/test_bitreader.py b/llvm/bindings/python/llvm/tests/test_bitreader.py
index 460005a2b87ab..08e55e1297714 100644
--- a/llvm/bindings/python/llvm/tests/test_bitreader.py
+++ b/llvm/bindings/python/llvm/tests/test_bitreader.py
@@ -8,8 +8,8 @@
 from ..core import Module
 from ..bit_reader import parse_bitcode
 
-class TestBitReader(TestBase):
 
+class TestBitReader(TestBase):
     def test_parse_bitcode(self):
         source = self.get_test_bc()
         m = parse_bitcode(MemoryBuffer(filename=source))

diff  --git a/llvm/bindings/python/llvm/tests/test_core.py b/llvm/bindings/python/llvm/tests/test_core.py
index 68572b50b3d66..76a2eaf9db900 100644
--- a/llvm/bindings/python/llvm/tests/test_core.py
+++ b/llvm/bindings/python/llvm/tests/test_core.py
@@ -9,6 +9,7 @@
 from ..core import OpCode
 from ..bit_reader import parse_bitcode
 
+
 class TestCore(TestBase):
     def test_enumerations(self):
         for enum_cls, enum_spec in Enums:
@@ -77,8 +78,7 @@ def test_module_print_module_to_file(self):
     def test_module_function_iteration(self):
         m = parse_bitcode(MemoryBuffer(filename=self.get_test_bc()))
         i = 0
-        functions = ["f", "f2", "f3", "f4", "f5", "f6", "g1", "g2", "h1", "h2",
-                     "h3"]
+        functions = ["f", "f2", "f3", "f4", "f5", "f6", "g1", "g2", "h1", "h2", "h3"]
         # Forward
         for f in m:
             self.assertEqual(f.name, functions[i])
@@ -94,7 +94,7 @@ def test_function_basicblock_iteration(self):
         m = parse_bitcode(MemoryBuffer(filename=self.get_test_bc()))
         i = 0
 
-        bb_list = ['b1', 'b2', 'end']
+        bb_list = ["b1", "b2", "end"]
 
         f = m.first
         while f.name != "f6":
@@ -116,10 +116,12 @@ def test_basicblock_instruction_iteration(self):
         m = parse_bitcode(MemoryBuffer(filename=self.get_test_bc()))
         i = 0
 
-        inst_list = [('arg1', OpCode.ExtractValue),
-                     ('arg2', OpCode.ExtractValue),
-                     ('', OpCode.Call),
-                     ('', OpCode.Ret)]
+        inst_list = [
+            ("arg1", OpCode.ExtractValue),
+            ("arg2", OpCode.ExtractValue),
+            ("", OpCode.Call),
+            ("", OpCode.Ret),
+        ]
 
         bb = m.first.first
 

diff  --git a/llvm/bindings/python/llvm/tests/test_disassembler.py b/llvm/bindings/python/llvm/tests/test_disassembler.py
index 29f2f7060bac5..d4620f69da733 100644
--- a/llvm/bindings/python/llvm/tests/test_disassembler.py
+++ b/llvm/bindings/python/llvm/tests/test_disassembler.py
@@ -4,42 +4,45 @@
 
 from ..disassembler import Disassembler, Option_UseMarkup
 
+
 class TestDisassembler(TestBase):
     def test_instantiate(self):
-         Disassembler('i686-apple-darwin9')
+        Disassembler("i686-apple-darwin9")
 
     def test_basic(self):
-        sequence = '\x67\xe3\x81' # jcxz -127
-        triple = 'i686-apple-darwin9'
+        sequence = "\x67\xe3\x81"  # jcxz -127
+        triple = "i686-apple-darwin9"
 
         disassembler = Disassembler(triple)
 
         count, s = disassembler.get_instruction(sequence)
         self.assertEqual(count, 3)
-        self.assertEqual(s, '\tjcxz\t-127')
+        self.assertEqual(s, "\tjcxz\t-127")
 
     def test_nonexistent_triple(self):
-        with self.assertRaisesRegex(Exception, "Could not obtain disassembler for triple"):
+        with self.assertRaisesRegex(
+            Exception, "Could not obtain disassembler for triple"
+        ):
             Disassembler("nonexistent-triple-raises")
 
     def test_get_instructions(self):
-        sequence = '\x67\xe3\x81\x01\xc7' # jcxz -127; addl %eax, %edi
+        sequence = "\x67\xe3\x81\x01\xc7"  # jcxz -127; addl %eax, %edi
 
-        disassembler = Disassembler('i686-apple-darwin9')
+        disassembler = Disassembler("i686-apple-darwin9")
 
         instructions = list(disassembler.get_instructions(sequence))
         self.assertEqual(len(instructions), 2)
 
-        self.assertEqual(instructions[0], (0, 3, '\tjcxz\t-127'))
-        self.assertEqual(instructions[1], (3, 2, '\taddl\t%eax, %edi'))
+        self.assertEqual(instructions[0], (0, 3, "\tjcxz\t-127"))
+        self.assertEqual(instructions[1], (3, 2, "\taddl\t%eax, %edi"))
 
     def test_set_options(self):
-        sequence = '\x10\x40\x2d\xe9'
-        triple = 'arm-linux-android'
+        sequence = "\x10\x40\x2d\xe9"
+        triple = "arm-linux-android"
 
         disassembler = Disassembler(triple)
         disassembler.set_options(Option_UseMarkup)
         count, s = disassembler.get_instruction(sequence)
         print(s)
         self.assertEqual(count, 4)
-        self.assertEqual(s, '\tpush\t{<reg:r4>, <reg:lr>}')
+        self.assertEqual(s, "\tpush\t{<reg:r4>, <reg:lr>}")

diff  --git a/llvm/bindings/python/llvm/tests/test_object.py b/llvm/bindings/python/llvm/tests/test_object.py
index a45b7beec3353..b9d5868dbfadc 100644
--- a/llvm/bindings/python/llvm/tests/test_object.py
+++ b/llvm/bindings/python/llvm/tests/test_object.py
@@ -6,6 +6,7 @@
 from ..object import Section
 from ..object import Symbol
 
+
 class TestObjectFile(TestBase):
     def get_object_file(self):
         source = self.get_test_binary()

diff  --git a/llvm/docs/conf.py b/llvm/docs/conf.py
index 617ce564bbeef..206f72285a830 100644
--- a/llvm/docs/conf.py
+++ b/llvm/docs/conf.py
@@ -17,211 +17,209 @@
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+# sys.path.insert(0, os.path.abspath('.'))
 
 # -- General configuration -----------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
+extensions = ["sphinx.ext.intersphinx", "sphinx.ext.todo"]
 
 # Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
 
 # The suffix of source filenames.
 source_suffix = {
-    '.rst': 'restructuredtext',
+    ".rst": "restructuredtext",
 }
 
 try:
-  import recommonmark
+    import recommonmark
 except ImportError:
-  # manpages do not use any .md sources
-  if not tags.has('builder-man'):
-    raise
+    # manpages do not use any .md sources
+    if not tags.has("builder-man"):
+        raise
 else:
-  import sphinx
-  if sphinx.version_info >= (3, 0):
-    # This requires 0.5 or later.
-    extensions.append('recommonmark')
-  else:
-    source_parsers = {'.md': 'recommonmark.parser.CommonMarkParser'}
-  source_suffix['.md'] = 'markdown'
+    import sphinx
+
+    if sphinx.version_info >= (3, 0):
+        # This requires 0.5 or later.
+        extensions.append("recommonmark")
+    else:
+        source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
+    source_suffix[".md"] = "markdown"
 
 # The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
 
 # The master toctree document.
-master_doc = 'index'
+master_doc = "index"
 
 # General information about the project.
-project = u'LLVM'
-copyright = u'2003-%d, LLVM Project' % date.today().year
+project = "LLVM"
+copyright = "2003-%d, LLVM Project" % date.today().year
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
-#language = None
+# language = None
 
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
-#today = ''
+# today = ''
 # Else, today_fmt is used as the format for a strftime call.
-today_fmt = '%Y-%m-%d'
+today_fmt = "%Y-%m-%d"
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
-exclude_patterns = ['_build']
+exclude_patterns = ["_build"]
 
 # The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# default_role = None
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
 
 # If true, the current module name will be prepended to all description
 # unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
 
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # output. They are ignored by default.
 show_authors = True
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'friendly'
+pygments_style = "friendly"
 
 # A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
 
 
 # -- Options for HTML output ---------------------------------------------------
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
-html_theme = 'llvm-theme'
+html_theme = "llvm-theme"
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
 # documentation.
-html_theme_options = { "nosidebar": False }
+html_theme_options = {"nosidebar": False}
 
 # Add any paths that contain custom themes here, relative to this directory.
 html_theme_path = ["_themes"]
 
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
-#html_title = None
+# html_title = None
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
-#html_logo = None
+# html_logo = None
 
 # The name of an image file (within the static path) to use as favicon of the
 # docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
 # pixels large.
-#html_favicon = None
+# html_favicon = None
 
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
-html_last_updated_fmt = '%Y-%m-%d'
+html_last_updated_fmt = "%Y-%m-%d"
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
 
 # Custom sidebar templates, maps document names to template names.
 
 html_sidebars = {
-    '**': [
-        'indexsidebar.html',
-        'sourcelink.html',
-        'searchbox.html',
+    "**": [
+        "indexsidebar.html",
+        "sourcelink.html",
+        "searchbox.html",
     ]
 }
 
 # Additional templates that should be rendered to pages, maps page names to
 # template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
 
 # If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
 
 # If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
 
 # If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
 html_show_sourcelink = True
 
 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
 
 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
 
 # If true, an OpenSearch description file will be output, and all pages will
 # contain a <link> tag referring to it.  The value of this option must be the
 # base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
 
 # This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = 'LLVMdoc'
+htmlhelp_basename = "LLVMdoc"
 
 
 # -- Options for LaTeX output --------------------------------------------------
 
 latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
+    # The paper size ('letterpaper' or 'a4paper').
+    #'papersize': 'letterpaper',
+    # The font size ('10pt', '11pt' or '12pt').
+    #'pointsize': '10pt',
+    # Additional stuff for the LaTeX preamble.
+    #'preamble': '',
 }
 
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
-  ('index', 'LLVM.tex', u'LLVM Documentation',
-   u'LLVM project', 'manual'),
+    ("index", "LLVM.tex", "LLVM Documentation", "LLVM project", "manual"),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
 # the title page.
-#latex_logo = None
+# latex_logo = None
 
 # For "manual" documents, if this is true, then toplevel headings are parts,
 # not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
 
 # If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
 
 # If true, show URL addresses after external links.
-#latex_show_urls = False
+# latex_show_urls = False
 
 # Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
 
 # If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
 
 
 # -- Options for manual page output --------------------------------------------
@@ -234,59 +232,73 @@
 # guide subdirectory.
 basedir = os.path.dirname(__file__)
 man_page_authors = "Maintained by the LLVM Team (https://llvm.org/)."
-command_guide_subpath = 'CommandGuide'
+command_guide_subpath = "CommandGuide"
 command_guide_path = os.path.join(basedir, command_guide_subpath)
-manpages_url = '{page}.html'
+manpages_url = "{page}.html"
 
 
 def process_md(name):
     file_subpath = os.path.join(command_guide_subpath, name)
     with open(os.path.join(command_guide_path, name)) as f:
-        title = f.readline().rstrip('\n')
+        title = f.readline().rstrip("\n")
 
-        m = re.match(r'^# (\S+) - (.+)$', title)
+        m = re.match(r"^# (\S+) - (.+)$", title)
         if m is None:
-            print("error: invalid title in %r "
-                  "(expected '# <name> - <description>')" % file_subpath,
-                  file=sys.stderr)
+            print(
+                "error: invalid title in %r "
+                "(expected '# <name> - <description>')" % file_subpath,
+                file=sys.stderr,
+            )
         else:
-            man_pages.append((file_subpath.replace('.md',''), m.group(1),
-                              m.group(2), man_page_authors, 1))
+            man_pages.append(
+                (
+                    file_subpath.replace(".md", ""),
+                    m.group(1),
+                    m.group(2),
+                    man_page_authors,
+                    1,
+                )
+            )
 
 
 def process_rst(name):
     file_subpath = os.path.join(command_guide_subpath, name)
     with open(os.path.join(command_guide_path, name)) as f:
-        title = f.readline().rstrip('\n')
-        header = f.readline().rstrip('\n')
+        title = f.readline().rstrip("\n")
+        header = f.readline().rstrip("\n")
 
         if len(header) != len(title):
-            print('error: invalid header in %r (does not match title)' %
-                  file_subpath, file=sys.stderr)
-        if ' - ' not in title:
-            print("error: invalid title in %r "
-                  "(expected '<name> - <description>')" % file_subpath,
-                  file=sys.stderr)
+            print(
+                "error: invalid header in %r (does not match title)" % file_subpath,
+                file=sys.stderr,
+            )
+        if " - " not in title:
+            print(
+                "error: invalid title in %r "
+                "(expected '<name> - <description>')" % file_subpath,
+                file=sys.stderr,
+            )
         # Split the name out of the title.
-        name,description = title.split(' - ', 1)
-        man_pages.append((file_subpath.replace('.rst',''), name,
-                          description, man_page_authors, 1))
+        name, description = title.split(" - ", 1)
+        man_pages.append(
+            (file_subpath.replace(".rst", ""), name, description, man_page_authors, 1)
+        )
 
 
 for name in os.listdir(command_guide_path):
     # Process Markdown files
-    if name.endswith('.md'):
+    if name.endswith(".md"):
         process_md(name)
     # Process ReST files apart from the index page.
-    elif name.endswith('.rst') and name != 'index.rst':
+    elif name.endswith(".rst") and name != "index.rst":
         process_rst(name)
 
 # If true, show URL addresses after external links.
-#man_show_urls = False
+# man_show_urls = False
 
 # FIXME: Define intersphinx configuration.
 intersphinx_mapping = {}
 
 # Pygment lexer are sometimes out of date (when parsing LLVM for example) or
 # wrong. Suppress the warning so the build doesn't abort.
-suppress_warnings = [ 'misc.highlighting_failure' ]
+suppress_warnings = ["misc.highlighting_failure"]

diff  --git a/llvm/examples/Kaleidoscope/MCJIT/cached/genk-timing.py b/llvm/examples/Kaleidoscope/MCJIT/cached/genk-timing.py
index 87bbfbf32bda3..2e39f103cd9f3 100644
--- a/llvm/examples/Kaleidoscope/MCJIT/cached/genk-timing.py
+++ b/llvm/examples/Kaleidoscope/MCJIT/cached/genk-timing.py
@@ -5,36 +5,53 @@
 import sys
 import random
 
+
 class TimingScriptGenerator:
     """Used to generate a bash script which will invoke the toy and time it"""
+
     def __init__(self, scriptname, outputname):
         self.timeFile = outputname
-        self.shfile = open(scriptname, 'w')
-        self.shfile.write("echo \"\" > %s\n" % self.timeFile)
+        self.shfile = open(scriptname, "w")
+        self.shfile.write('echo "" > %s\n' % self.timeFile)
 
     def writeTimingCall(self, filename, numFuncs, funcsCalled, totalCalls):
         """Echo some comments and invoke both versions of toy"""
         rootname = filename
-        if '.' in filename:
-            rootname = filename[:filename.rfind('.')]
-        self.shfile.write("echo \"%s: Calls %d of %d functions, %d total\" >> %s\n" % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With MCJIT\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        if "." in filename:
+            rootname = filename[: filename.rfind(".")]
+        self.shfile.write(
+            'echo "%s: Calls %d of %d functions, %d total" >> %s\n'
+            % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With MCJIT" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy-mcjit < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (filename, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        self.shfile.write(
+            "./toy-mcjit < %s > %s-mcjit.out 2> %s-mcjit.err\n"
+            % (filename, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With JIT" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy-jit < %s > %s-jit.out 2> %s-jit.err\n" % (filename, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
+        self.shfile.write(
+            "./toy-jit < %s > %s-jit.out 2> %s-jit.err\n"
+            % (filename, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+
 
 class KScriptGenerator:
     """Used to generate random Kaleidoscope code"""
+
     def __init__(self, filename):
-        self.kfile = open(filename, 'w')
+        self.kfile = open(filename, "w")
         self.nextFuncNum = 1
         self.lastFuncNum = None
         self.callWeighting = 0.1
@@ -80,20 +97,22 @@ def updateCalledFunctionList(self, callee):
                 self.updateCalledFunctionList(subCallee)
 
     def setCallWeighting(self, weight):
-        """ Sets the probably of generating a function call"""
+        """Sets the probably of generating a function call"""
         self.callWeighting = weight
 
     def writeln(self, line):
-        self.kfile.write(line + '\n')
+        self.kfile.write(line + "\n")
 
     def writeComment(self, comment):
-        self.writeln('# ' + comment)
+        self.writeln("# " + comment)
 
     def writeEmptyLine(self):
         self.writeln("")
 
     def writePredefinedFunctions(self):
-        self.writeComment("Define ':' for sequencing: as a low-precedence operator that ignores operands")
+        self.writeComment(
+            "Define ':' for sequencing: as a low-precedence operator that ignores operands"
+        )
         self.writeComment("and just returns the RHS.")
         self.writeln("def binary : 1 (x y) y;")
         self.writeEmptyLine()
@@ -105,16 +124,18 @@ def writePredefinedFunctions(self):
         self.writeComment("Print the result of a function call")
         self.writeln("def printresult(N Result)")
         self.writeln("  # 'result('")
-        self.writeln("  putchard(114) : putchard(101) : putchard(115) : putchard(117) : putchard(108) : putchard(116) : putchard(40) :")
-        self.writeln("  printd(N) :");
+        self.writeln(
+            "  putchard(114) : putchard(101) : putchard(115) : putchard(117) : putchard(108) : putchard(116) : putchard(40) :"
+        )
+        self.writeln("  printd(N) :")
         self.writeln("  # ') = '")
         self.writeln("  putchard(41) : putchard(32) : putchard(61) : putchard(32) :")
-        self.writeln("  printd(Result) :");
+        self.writeln("  printd(Result) :")
         self.writeln("  printlf();")
         self.writeEmptyLine()
 
     def writeRandomOperation(self, LValue, LHS, RHS):
-        shouldCallFunc = (self.lastFuncNum > 2 and random.random() < self.callWeighting)
+        shouldCallFunc = self.lastFuncNum > 2 and random.random() < self.callWeighting
         if shouldCallFunc:
             funcToCall = random.randrange(1, self.lastFuncNum - 1)
             self.updateFunctionCallMap(self.lastFuncNum, funcToCall)
@@ -130,7 +151,10 @@ def writeRandomOperation(self, LValue, LHS, RHS):
                 self.writeln("  else if %s < %s then" % (RHS, LHS))
                 self.writeln("    %s = %s %s %s" % (LValue, LHS, operation, RHS))
                 self.writeln("  else")
-                self.writeln("    %s = %s %s %f :" % (LValue, LHS, operation, random.uniform(1, 100)))
+                self.writeln(
+                    "    %s = %s %s %f :"
+                    % (LValue, LHS, operation, random.uniform(1, 100))
+                )
             else:
                 self.writeln("  %s = %s %s %s :" % (LValue, LHS, operation, RHS))
 
@@ -166,27 +190,43 @@ def writeFunctionCall(self):
         self.writeComment("Call the last function")
         arg1 = random.uniform(1, 100)
         arg2 = random.uniform(1, 100)
-        self.writeln("printresult(%d, func%d(%f, %f) )" % (self.lastFuncNum, self.lastFuncNum, arg1, arg2))
+        self.writeln(
+            "printresult(%d, func%d(%f, %f) )"
+            % (self.lastFuncNum, self.lastFuncNum, arg1, arg2)
+        )
         self.writeEmptyLine()
         self.updateCalledFunctionList(self.lastFuncNum)
 
     def writeFinalFunctionCounts(self):
-        self.writeComment("Called %d of %d functions" % (len(self.calledFunctions), self.lastFuncNum))
+        self.writeComment(
+            "Called %d of %d functions" % (len(self.calledFunctions), self.lastFuncNum)
+        )
+
 
-def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript):
-    """ Generate a random Kaleidoscope script based on the given parameters """
+def generateKScript(
+    filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript
+):
+    """Generate a random Kaleidoscope script based on the given parameters"""
     print("Generating " + filename)
-    print("  %d functions, %d elements per function, %d functions between execution" %
-          (numFuncs, elementsPerFunc, funcsBetweenExec))
+    print(
+        "  %d functions, %d elements per function, %d functions between execution"
+        % (numFuncs, elementsPerFunc, funcsBetweenExec)
+    )
     print("  Call weighting = %f" % callWeighting)
     script = KScriptGenerator(filename)
     script.setCallWeighting(callWeighting)
-    script.writeComment("===========================================================================")
+    script.writeComment(
+        "==========================================================================="
+    )
     script.writeComment("Auto-generated script")
-    script.writeComment("  %d functions, %d elements per function, %d functions between execution"
-                         % (numFuncs, elementsPerFunc, funcsBetweenExec))
+    script.writeComment(
+        "  %d functions, %d elements per function, %d functions between execution"
+        % (numFuncs, elementsPerFunc, funcsBetweenExec)
+    )
     script.writeComment("  call weighting = %f" % callWeighting)
-    script.writeComment("===========================================================================")
+    script.writeComment(
+        "==========================================================================="
+    )
     script.writeEmptyLine()
     script.writePredefinedFunctions()
     funcsSinceLastExec = 0
@@ -202,20 +242,49 @@ def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callW
     script.writeEmptyLine()
     script.writeFinalFunctionCounts()
     funcsCalled = len(script.calledFunctions)
-    print("  Called %d of %d functions, %d total" % (funcsCalled, numFuncs, script.totalCallsExecuted))
-    timingScript.writeTimingCall(filename, numFuncs, funcsCalled, script.totalCallsExecuted)
+    print(
+        "  Called %d of %d functions, %d total"
+        % (funcsCalled, numFuncs, script.totalCallsExecuted)
+    )
+    timingScript.writeTimingCall(
+        filename, numFuncs, funcsCalled, script.totalCallsExecuted
+    )
+
 
 # Execution begins here
 random.seed()
 
 timingScript = TimingScriptGenerator("time-toy.sh", "timing-data.txt")
 
-dataSets = [(5000, 3,  50, 0.50), (5000, 10, 100, 0.10), (5000, 10, 5, 0.10), (5000, 10, 1, 0.0),
-            (1000, 3,  10, 0.50), (1000, 10, 100, 0.10), (1000, 10, 5, 0.10), (1000, 10, 1, 0.0),
-            ( 200, 3,   2, 0.50), ( 200, 10,  40, 0.10), ( 200, 10, 2, 0.10), ( 200, 10, 1, 0.0)]
+dataSets = [
+    (5000, 3, 50, 0.50),
+    (5000, 10, 100, 0.10),
+    (5000, 10, 5, 0.10),
+    (5000, 10, 1, 0.0),
+    (1000, 3, 10, 0.50),
+    (1000, 10, 100, 0.10),
+    (1000, 10, 5, 0.10),
+    (1000, 10, 1, 0.0),
+    (200, 3, 2, 0.50),
+    (200, 10, 40, 0.10),
+    (200, 10, 2, 0.10),
+    (200, 10, 1, 0.0),
+]
 
 # Generate the code
 for (numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting) in dataSets:
-    filename = "test-%d-%d-%d-%d.k" % (numFuncs, elementsPerFunc, funcsBetweenExec, int(callWeighting * 100))
-    generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript)
+    filename = "test-%d-%d-%d-%d.k" % (
+        numFuncs,
+        elementsPerFunc,
+        funcsBetweenExec,
+        int(callWeighting * 100),
+    )
+    generateKScript(
+        filename,
+        numFuncs,
+        elementsPerFunc,
+        funcsBetweenExec,
+        callWeighting,
+        timingScript,
+    )
 print("All done!")

diff  --git a/llvm/examples/Kaleidoscope/MCJIT/cached/split-lib.py b/llvm/examples/Kaleidoscope/MCJIT/cached/split-lib.py
index 1aa80ee83ac4c..7a289e868a9ce 100644
--- a/llvm/examples/Kaleidoscope/MCJIT/cached/split-lib.py
+++ b/llvm/examples/Kaleidoscope/MCJIT/cached/split-lib.py
@@ -2,71 +2,105 @@
 
 from __future__ import print_function
 
+
 class TimingScriptGenerator:
     """Used to generate a bash script which will invoke the toy and time it"""
+
     def __init__(self, scriptname, outputname):
-        self.shfile = open(scriptname, 'w')
+        self.shfile = open(scriptname, "w")
         self.timeFile = outputname
-        self.shfile.write("echo \"\" > %s\n" % self.timeFile)
+        self.shfile.write('echo "" > %s\n' % self.timeFile)
 
     def writeTimingCall(self, irname, callname):
         """Echo some comments and invoke both versions of toy"""
         rootname = irname
-        if '.' in irname:
-            rootname = irname[:irname.rfind('.')]
-        self.shfile.write("echo \"%s: Calls %s\" >> %s\n" % (callname, irname, self.timeFile))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With MCJIT\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        if "." in irname:
+            rootname = irname[: irname.rfind(".")]
+        self.shfile.write(
+            'echo "%s: Calls %s" >> %s\n' % (callname, irname, self.timeFile)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With MCJIT" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy-mcjit -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With MCJIT again\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        self.shfile.write(
+            "./toy-mcjit -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n"
+            % (irname, callname, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With MCJIT again" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy-mcjit -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        self.shfile.write(
+            "./toy-mcjit -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n"
+            % (irname, callname, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With JIT" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy-jit -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
+        self.shfile.write(
+            "./toy-jit -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n"
+            % (irname, callname, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+
 
 class LibScriptGenerator:
     """Used to generate a bash script which will convert Kaleidoscope files to IR"""
+
     def __init__(self, filename):
-        self.shfile = open(filename, 'w')
+        self.shfile = open(filename, "w")
 
     def writeLibGenCall(self, libname, irname):
         self.shfile.write("./toy-ir-gen < %s 2> %s\n" % (libname, irname))
 
+
 def splitScript(inputname, libGenScript, timingScript):
-  rootname = inputname[:-2]
-  libname = rootname + "-lib.k"
-  irname = rootname + "-lib.ir"
-  callname = rootname + "-call.k"
-  infile = open(inputname, "r")
-  libfile = open(libname, "w")
-  callfile = open(callname, "w")
-  print("Splitting %s into %s and %s" % (inputname, callname, libname))
-  for line in infile:
-    if not line.startswith("#"):
-      if line.startswith("print"):
-        callfile.write(line)
-      else:
-        libfile.write(line)
-  libGenScript.writeLibGenCall(libname, irname)
-  timingScript.writeTimingCall(irname, callname)
+    rootname = inputname[:-2]
+    libname = rootname + "-lib.k"
+    irname = rootname + "-lib.ir"
+    callname = rootname + "-call.k"
+    infile = open(inputname, "r")
+    libfile = open(libname, "w")
+    callfile = open(callname, "w")
+    print("Splitting %s into %s and %s" % (inputname, callname, libname))
+    for line in infile:
+        if not line.startswith("#"):
+            if line.startswith("print"):
+                callfile.write(line)
+            else:
+                libfile.write(line)
+    libGenScript.writeLibGenCall(libname, irname)
+    timingScript.writeTimingCall(irname, callname)
+
 
 # Execution begins here
 libGenScript = LibScriptGenerator("make-libs.sh")
 timingScript = TimingScriptGenerator("time-lib.sh", "lib-timing.txt")
 
-script_list = ["test-5000-3-50-50.k", "test-5000-10-100-10.k", "test-5000-10-5-10.k", "test-5000-10-1-0.k", 
-               "test-1000-3-10-50.k", "test-1000-10-100-10.k", "test-1000-10-5-10.k", "test-1000-10-1-0.k",
-               "test-200-3-2-50.k", "test-200-10-40-10.k", "test-200-10-2-10.k", "test-200-10-1-0.k"]
+script_list = [
+    "test-5000-3-50-50.k",
+    "test-5000-10-100-10.k",
+    "test-5000-10-5-10.k",
+    "test-5000-10-1-0.k",
+    "test-1000-3-10-50.k",
+    "test-1000-10-100-10.k",
+    "test-1000-10-5-10.k",
+    "test-1000-10-1-0.k",
+    "test-200-3-2-50.k",
+    "test-200-10-40-10.k",
+    "test-200-10-2-10.k",
+    "test-200-10-1-0.k",
+]
 
 for script in script_list:
-  splitScript(script, libGenScript, timingScript)
+    splitScript(script, libGenScript, timingScript)
 print("All done!")

diff  --git a/llvm/examples/Kaleidoscope/MCJIT/complete/genk-timing.py b/llvm/examples/Kaleidoscope/MCJIT/complete/genk-timing.py
index c3b4d23c002a1..9045541cbe4e8 100644
--- a/llvm/examples/Kaleidoscope/MCJIT/complete/genk-timing.py
+++ b/llvm/examples/Kaleidoscope/MCJIT/complete/genk-timing.py
@@ -5,41 +5,63 @@
 import sys
 import random
 
+
 class TimingScriptGenerator:
     """Used to generate a bash script which will invoke the toy and time it"""
+
     def __init__(self, scriptname, outputname):
         self.timeFile = outputname
-        self.shfile = open(scriptname, 'w')
-        self.shfile.write("echo \"\" > %s\n" % self.timeFile)
+        self.shfile = open(scriptname, "w")
+        self.shfile.write('echo "" > %s\n' % self.timeFile)
 
     def writeTimingCall(self, filename, numFuncs, funcsCalled, totalCalls):
         """Echo some comments and invoke both versions of toy"""
         rootname = filename
-        if '.' in filename:
-            rootname = filename[:filename.rfind('.')]
-        self.shfile.write("echo \"%s: Calls %d of %d functions, %d total\" >> %s\n" % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With MCJIT (original)\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        if "." in filename:
+            rootname = filename[: filename.rfind(".")]
+        self.shfile.write(
+            'echo "%s: Calls %d of %d functions, %d total" >> %s\n'
+            % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With MCJIT (original)" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=false < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (filename, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With MCJIT (lazy)\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        self.shfile.write(
+            "./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=false < %s > %s-mcjit.out 2> %s-mcjit.err\n"
+            % (filename, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With MCJIT (lazy)" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=true < %s > %s-mcjit-lazy.out 2> %s-mcjit-lazy.err\n" % (filename, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        self.shfile.write(
+            "./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=true < %s > %s-mcjit-lazy.out 2> %s-mcjit-lazy.err\n"
+            % (filename, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With JIT" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy -suppress-prompts -use-mcjit=false < %s > %s-jit.out 2> %s-jit.err\n" % (filename, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
+        self.shfile.write(
+            "./toy -suppress-prompts -use-mcjit=false < %s > %s-jit.out 2> %s-jit.err\n"
+            % (filename, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+
 
 class KScriptGenerator:
     """Used to generate random Kaleidoscope code"""
+
     def __init__(self, filename):
-        self.kfile = open(filename, 'w')
+        self.kfile = open(filename, "w")
         self.nextFuncNum = 1
         self.lastFuncNum = None
         self.callWeighting = 0.1
@@ -85,20 +107,22 @@ def updateCalledFunctionList(self, callee):
                 self.updateCalledFunctionList(subCallee)
 
     def setCallWeighting(self, weight):
-        """ Sets the probably of generating a function call"""
+        """Sets the probably of generating a function call"""
         self.callWeighting = weight
 
     def writeln(self, line):
-        self.kfile.write(line + '\n')
+        self.kfile.write(line + "\n")
 
     def writeComment(self, comment):
-        self.writeln('# ' + comment)
+        self.writeln("# " + comment)
 
     def writeEmptyLine(self):
         self.writeln("")
 
     def writePredefinedFunctions(self):
-        self.writeComment("Define ':' for sequencing: as a low-precedence operator that ignores operands")
+        self.writeComment(
+            "Define ':' for sequencing: as a low-precedence operator that ignores operands"
+        )
         self.writeComment("and just returns the RHS.")
         self.writeln("def binary : 1 (x y) y;")
         self.writeEmptyLine()
@@ -110,16 +134,18 @@ def writePredefinedFunctions(self):
         self.writeComment("Print the result of a function call")
         self.writeln("def printresult(N Result)")
         self.writeln("  # 'result('")
-        self.writeln("  putchard(114) : putchard(101) : putchard(115) : putchard(117) : putchard(108) : putchard(116) : putchard(40) :")
-        self.writeln("  printd(N) :");
+        self.writeln(
+            "  putchard(114) : putchard(101) : putchard(115) : putchard(117) : putchard(108) : putchard(116) : putchard(40) :"
+        )
+        self.writeln("  printd(N) :")
         self.writeln("  # ') = '")
         self.writeln("  putchard(41) : putchard(32) : putchard(61) : putchard(32) :")
-        self.writeln("  printd(Result) :");
+        self.writeln("  printd(Result) :")
         self.writeln("  printlf();")
         self.writeEmptyLine()
 
     def writeRandomOperation(self, LValue, LHS, RHS):
-        shouldCallFunc = (self.lastFuncNum > 2 and random.random() < self.callWeighting)
+        shouldCallFunc = self.lastFuncNum > 2 and random.random() < self.callWeighting
         if shouldCallFunc:
             funcToCall = random.randrange(1, self.lastFuncNum - 1)
             self.updateFunctionCallMap(self.lastFuncNum, funcToCall)
@@ -135,7 +161,10 @@ def writeRandomOperation(self, LValue, LHS, RHS):
                 self.writeln("  else if %s < %s then" % (RHS, LHS))
                 self.writeln("    %s = %s %s %s" % (LValue, LHS, operation, RHS))
                 self.writeln("  else")
-                self.writeln("    %s = %s %s %f :" % (LValue, LHS, operation, random.uniform(1, 100)))
+                self.writeln(
+                    "    %s = %s %s %f :"
+                    % (LValue, LHS, operation, random.uniform(1, 100))
+                )
             else:
                 self.writeln("  %s = %s %s %s :" % (LValue, LHS, operation, RHS))
 
@@ -171,27 +200,43 @@ def writeFunctionCall(self):
         self.writeComment("Call the last function")
         arg1 = random.uniform(1, 100)
         arg2 = random.uniform(1, 100)
-        self.writeln("printresult(%d, func%d(%f, %f) )" % (self.lastFuncNum, self.lastFuncNum, arg1, arg2))
+        self.writeln(
+            "printresult(%d, func%d(%f, %f) )"
+            % (self.lastFuncNum, self.lastFuncNum, arg1, arg2)
+        )
         self.writeEmptyLine()
         self.updateCalledFunctionList(self.lastFuncNum)
 
     def writeFinalFunctionCounts(self):
-        self.writeComment("Called %d of %d functions" % (len(self.calledFunctions), self.lastFuncNum))
+        self.writeComment(
+            "Called %d of %d functions" % (len(self.calledFunctions), self.lastFuncNum)
+        )
+
 
-def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript):
-    """ Generate a random Kaleidoscope script based on the given parameters """
+def generateKScript(
+    filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript
+):
+    """Generate a random Kaleidoscope script based on the given parameters"""
     print("Generating " + filename)
-    print("  %d functions, %d elements per function, %d functions between execution" %
-          (numFuncs, elementsPerFunc, funcsBetweenExec))
+    print(
+        "  %d functions, %d elements per function, %d functions between execution"
+        % (numFuncs, elementsPerFunc, funcsBetweenExec)
+    )
     print("  Call weighting = %f" % callWeighting)
     script = KScriptGenerator(filename)
     script.setCallWeighting(callWeighting)
-    script.writeComment("===========================================================================")
+    script.writeComment(
+        "==========================================================================="
+    )
     script.writeComment("Auto-generated script")
-    script.writeComment("  %d functions, %d elements per function, %d functions between execution"
-                         % (numFuncs, elementsPerFunc, funcsBetweenExec))
+    script.writeComment(
+        "  %d functions, %d elements per function, %d functions between execution"
+        % (numFuncs, elementsPerFunc, funcsBetweenExec)
+    )
     script.writeComment("  call weighting = %f" % callWeighting)
-    script.writeComment("===========================================================================")
+    script.writeComment(
+        "==========================================================================="
+    )
     script.writeEmptyLine()
     script.writePredefinedFunctions()
     funcsSinceLastExec = 0
@@ -207,20 +252,49 @@ def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callW
     script.writeEmptyLine()
     script.writeFinalFunctionCounts()
     funcsCalled = len(script.calledFunctions)
-    print("  Called %d of %d functions, %d total" % (funcsCalled, numFuncs, script.totalCallsExecuted))
-    timingScript.writeTimingCall(filename, numFuncs, funcsCalled, script.totalCallsExecuted)
+    print(
+        "  Called %d of %d functions, %d total"
+        % (funcsCalled, numFuncs, script.totalCallsExecuted)
+    )
+    timingScript.writeTimingCall(
+        filename, numFuncs, funcsCalled, script.totalCallsExecuted
+    )
+
 
 # Execution begins here
 random.seed()
 
 timingScript = TimingScriptGenerator("time-toy.sh", "timing-data.txt")
 
-dataSets = [(5000, 3,  50, 0.50), (5000, 10, 100, 0.10), (5000, 10, 5, 0.10), (5000, 10, 1, 0.0),
-            (1000, 3,  10, 0.50), (1000, 10, 100, 0.10), (1000, 10, 5, 0.10), (1000, 10, 1, 0.0),
-            ( 200, 3,   2, 0.50), ( 200, 10,  40, 0.10), ( 200, 10, 2, 0.10), ( 200, 10, 1, 0.0)]
+dataSets = [
+    (5000, 3, 50, 0.50),
+    (5000, 10, 100, 0.10),
+    (5000, 10, 5, 0.10),
+    (5000, 10, 1, 0.0),
+    (1000, 3, 10, 0.50),
+    (1000, 10, 100, 0.10),
+    (1000, 10, 5, 0.10),
+    (1000, 10, 1, 0.0),
+    (200, 3, 2, 0.50),
+    (200, 10, 40, 0.10),
+    (200, 10, 2, 0.10),
+    (200, 10, 1, 0.0),
+]
 
 # Generate the code
 for (numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting) in dataSets:
-    filename = "test-%d-%d-%d-%d.k" % (numFuncs, elementsPerFunc, funcsBetweenExec, int(callWeighting * 100))
-    generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript)
+    filename = "test-%d-%d-%d-%d.k" % (
+        numFuncs,
+        elementsPerFunc,
+        funcsBetweenExec,
+        int(callWeighting * 100),
+    )
+    generateKScript(
+        filename,
+        numFuncs,
+        elementsPerFunc,
+        funcsBetweenExec,
+        callWeighting,
+        timingScript,
+    )
 print("All done!")

diff  --git a/llvm/examples/Kaleidoscope/MCJIT/complete/split-lib.py b/llvm/examples/Kaleidoscope/MCJIT/complete/split-lib.py
index 61c9a5b169fcd..c920e594e675a 100644
--- a/llvm/examples/Kaleidoscope/MCJIT/complete/split-lib.py
+++ b/llvm/examples/Kaleidoscope/MCJIT/complete/split-lib.py
@@ -2,71 +2,108 @@
 
 from __future__ import print_function
 
+
 class TimingScriptGenerator:
     """Used to generate a bash script which will invoke the toy and time it"""
+
     def __init__(self, scriptname, outputname):
-        self.shfile = open(scriptname, 'w')
+        self.shfile = open(scriptname, "w")
         self.timeFile = outputname
-        self.shfile.write("echo \"\" > %s\n" % self.timeFile)
+        self.shfile.write('echo "" > %s\n' % self.timeFile)
 
     def writeTimingCall(self, irname, callname):
         """Echo some comments and invoke both versions of toy"""
         rootname = irname
-        if '.' in irname:
-            rootname = irname[:irname.rfind('.')]
-        self.shfile.write("echo \"%s: Calls %s\" >> %s\n" % (callname, irname, self.timeFile))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With MCJIT\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        if "." in irname:
+            rootname = irname[: irname.rfind(".")]
+        self.shfile.write(
+            'echo "%s: Calls %s" >> %s\n' % (callname, irname, self.timeFile)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With MCJIT" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=true -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With MCJIT again\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        self.shfile.write(
+            "./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=true -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n"
+            % (irname, callname, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With MCJIT again" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=true -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        self.shfile.write(
+            "./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=true -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n"
+            % (irname, callname, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With JIT" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy -suppress-prompts -use-mcjit=false -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
+        self.shfile.write(
+            "./toy -suppress-prompts -use-mcjit=false -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n"
+            % (irname, callname, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+
 
 class LibScriptGenerator:
     """Used to generate a bash script which will invoke the toy and time it"""
+
     def __init__(self, filename):
-        self.shfile = open(filename, 'w')
+        self.shfile = open(filename, "w")
 
     def writeLibGenCall(self, libname, irname):
-        self.shfile.write("./toy -suppress-prompts -use-mcjit=false -dump-modules < %s 2> %s\n" % (libname, irname))
+        self.shfile.write(
+            "./toy -suppress-prompts -use-mcjit=false -dump-modules < %s 2> %s\n"
+            % (libname, irname)
+        )
+
 
 def splitScript(inputname, libGenScript, timingScript):
-  rootname = inputname[:-2]
-  libname = rootname + "-lib.k"
-  irname = rootname + "-lib.ir"
-  callname = rootname + "-call.k"
-  infile = open(inputname, "r")
-  libfile = open(libname, "w")
-  callfile = open(callname, "w")
-  print("Splitting %s into %s and %s" % (inputname, callname, libname))
-  for line in infile:
-    if not line.startswith("#"):
-      if line.startswith("print"):
-        callfile.write(line)
-      else:
-        libfile.write(line)
-  libGenScript.writeLibGenCall(libname, irname)
-  timingScript.writeTimingCall(irname, callname)
+    rootname = inputname[:-2]
+    libname = rootname + "-lib.k"
+    irname = rootname + "-lib.ir"
+    callname = rootname + "-call.k"
+    infile = open(inputname, "r")
+    libfile = open(libname, "w")
+    callfile = open(callname, "w")
+    print("Splitting %s into %s and %s" % (inputname, callname, libname))
+    for line in infile:
+        if not line.startswith("#"):
+            if line.startswith("print"):
+                callfile.write(line)
+            else:
+                libfile.write(line)
+    libGenScript.writeLibGenCall(libname, irname)
+    timingScript.writeTimingCall(irname, callname)
+
 
 # Execution begins here
 libGenScript = LibScriptGenerator("make-libs.sh")
 timingScript = TimingScriptGenerator("time-lib.sh", "lib-timing.txt")
 
-script_list = ["test-5000-3-50-50.k", "test-5000-10-100-10.k", "test-5000-10-5-10.k", "test-5000-10-1-0.k", 
-               "test-1000-3-10-50.k", "test-1000-10-100-10.k", "test-1000-10-5-10.k", "test-1000-10-1-0.k",
-               "test-200-3-2-50.k", "test-200-10-40-10.k", "test-200-10-2-10.k", "test-200-10-1-0.k"]
+script_list = [
+    "test-5000-3-50-50.k",
+    "test-5000-10-100-10.k",
+    "test-5000-10-5-10.k",
+    "test-5000-10-1-0.k",
+    "test-1000-3-10-50.k",
+    "test-1000-10-100-10.k",
+    "test-1000-10-5-10.k",
+    "test-1000-10-1-0.k",
+    "test-200-3-2-50.k",
+    "test-200-10-40-10.k",
+    "test-200-10-2-10.k",
+    "test-200-10-1-0.k",
+]
 
 for script in script_list:
-  splitScript(script, libGenScript, timingScript)
+    splitScript(script, libGenScript, timingScript)
 print("All done!")

diff  --git a/llvm/examples/Kaleidoscope/MCJIT/lazy/genk-timing.py b/llvm/examples/Kaleidoscope/MCJIT/lazy/genk-timing.py
index 87bbfbf32bda3..2e39f103cd9f3 100644
--- a/llvm/examples/Kaleidoscope/MCJIT/lazy/genk-timing.py
+++ b/llvm/examples/Kaleidoscope/MCJIT/lazy/genk-timing.py
@@ -5,36 +5,53 @@
 import sys
 import random
 
+
 class TimingScriptGenerator:
     """Used to generate a bash script which will invoke the toy and time it"""
+
     def __init__(self, scriptname, outputname):
         self.timeFile = outputname
-        self.shfile = open(scriptname, 'w')
-        self.shfile.write("echo \"\" > %s\n" % self.timeFile)
+        self.shfile = open(scriptname, "w")
+        self.shfile.write('echo "" > %s\n' % self.timeFile)
 
     def writeTimingCall(self, filename, numFuncs, funcsCalled, totalCalls):
         """Echo some comments and invoke both versions of toy"""
         rootname = filename
-        if '.' in filename:
-            rootname = filename[:filename.rfind('.')]
-        self.shfile.write("echo \"%s: Calls %d of %d functions, %d total\" >> %s\n" % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With MCJIT\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        if "." in filename:
+            rootname = filename[: filename.rfind(".")]
+        self.shfile.write(
+            'echo "%s: Calls %d of %d functions, %d total" >> %s\n'
+            % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With MCJIT" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy-mcjit < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (filename, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile)
-        self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
+        self.shfile.write(
+            "./toy-mcjit < %s > %s-mcjit.out 2> %s-mcjit.err\n"
+            % (filename, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "With JIT" >> %s\n' % self.timeFile)
+        self.shfile.write(
+            '/usr/bin/time -f "Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb"'
+        )
         self.shfile.write(" -o %s -a " % self.timeFile)
-        self.shfile.write("./toy-jit < %s > %s-jit.out 2> %s-jit.err\n" % (filename, rootname, rootname))
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
-        self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
+        self.shfile.write(
+            "./toy-jit < %s > %s-jit.out 2> %s-jit.err\n"
+            % (filename, rootname, rootname)
+        )
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+        self.shfile.write('echo "" >> %s\n' % self.timeFile)
+
 
 class KScriptGenerator:
     """Used to generate random Kaleidoscope code"""
+
     def __init__(self, filename):
-        self.kfile = open(filename, 'w')
+        self.kfile = open(filename, "w")
         self.nextFuncNum = 1
         self.lastFuncNum = None
         self.callWeighting = 0.1
@@ -80,20 +97,22 @@ def updateCalledFunctionList(self, callee):
                 self.updateCalledFunctionList(subCallee)
 
     def setCallWeighting(self, weight):
-        """ Sets the probably of generating a function call"""
+        """Sets the probably of generating a function call"""
         self.callWeighting = weight
 
     def writeln(self, line):
-        self.kfile.write(line + '\n')
+        self.kfile.write(line + "\n")
 
     def writeComment(self, comment):
-        self.writeln('# ' + comment)
+        self.writeln("# " + comment)
 
     def writeEmptyLine(self):
         self.writeln("")
 
     def writePredefinedFunctions(self):
-        self.writeComment("Define ':' for sequencing: as a low-precedence operator that ignores operands")
+        self.writeComment(
+            "Define ':' for sequencing: as a low-precedence operator that ignores operands"
+        )
         self.writeComment("and just returns the RHS.")
         self.writeln("def binary : 1 (x y) y;")
         self.writeEmptyLine()
@@ -105,16 +124,18 @@ def writePredefinedFunctions(self):
         self.writeComment("Print the result of a function call")
         self.writeln("def printresult(N Result)")
         self.writeln("  # 'result('")
-        self.writeln("  putchard(114) : putchard(101) : putchard(115) : putchard(117) : putchard(108) : putchard(116) : putchard(40) :")
-        self.writeln("  printd(N) :");
+        self.writeln(
+            "  putchard(114) : putchard(101) : putchard(115) : putchard(117) : putchard(108) : putchard(116) : putchard(40) :"
+        )
+        self.writeln("  printd(N) :")
         self.writeln("  # ') = '")
         self.writeln("  putchard(41) : putchard(32) : putchard(61) : putchard(32) :")
-        self.writeln("  printd(Result) :");
+        self.writeln("  printd(Result) :")
         self.writeln("  printlf();")
         self.writeEmptyLine()
 
     def writeRandomOperation(self, LValue, LHS, RHS):
-        shouldCallFunc = (self.lastFuncNum > 2 and random.random() < self.callWeighting)
+        shouldCallFunc = self.lastFuncNum > 2 and random.random() < self.callWeighting
         if shouldCallFunc:
             funcToCall = random.randrange(1, self.lastFuncNum - 1)
             self.updateFunctionCallMap(self.lastFuncNum, funcToCall)
@@ -130,7 +151,10 @@ def writeRandomOperation(self, LValue, LHS, RHS):
                 self.writeln("  else if %s < %s then" % (RHS, LHS))
                 self.writeln("    %s = %s %s %s" % (LValue, LHS, operation, RHS))
                 self.writeln("  else")
-                self.writeln("    %s = %s %s %f :" % (LValue, LHS, operation, random.uniform(1, 100)))
+                self.writeln(
+                    "    %s = %s %s %f :"
+                    % (LValue, LHS, operation, random.uniform(1, 100))
+                )
             else:
                 self.writeln("  %s = %s %s %s :" % (LValue, LHS, operation, RHS))
 
@@ -166,27 +190,43 @@ def writeFunctionCall(self):
         self.writeComment("Call the last function")
         arg1 = random.uniform(1, 100)
         arg2 = random.uniform(1, 100)
-        self.writeln("printresult(%d, func%d(%f, %f) )" % (self.lastFuncNum, self.lastFuncNum, arg1, arg2))
+        self.writeln(
+            "printresult(%d, func%d(%f, %f) )"
+            % (self.lastFuncNum, self.lastFuncNum, arg1, arg2)
+        )
         self.writeEmptyLine()
         self.updateCalledFunctionList(self.lastFuncNum)
 
     def writeFinalFunctionCounts(self):
-        self.writeComment("Called %d of %d functions" % (len(self.calledFunctions), self.lastFuncNum))
+        self.writeComment(
+            "Called %d of %d functions" % (len(self.calledFunctions), self.lastFuncNum)
+        )
+
 
-def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript):
-    """ Generate a random Kaleidoscope script based on the given parameters """
+def generateKScript(
+    filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript
+):
+    """Generate a random Kaleidoscope script based on the given parameters"""
     print("Generating " + filename)
-    print("  %d functions, %d elements per function, %d functions between execution" %
-          (numFuncs, elementsPerFunc, funcsBetweenExec))
+    print(
+        "  %d functions, %d elements per function, %d functions between execution"
+        % (numFuncs, elementsPerFunc, funcsBetweenExec)
+    )
     print("  Call weighting = %f" % callWeighting)
     script = KScriptGenerator(filename)
     script.setCallWeighting(callWeighting)
-    script.writeComment("===========================================================================")
+    script.writeComment(
+        "==========================================================================="
+    )
     script.writeComment("Auto-generated script")
-    script.writeComment("  %d functions, %d elements per function, %d functions between execution"
-                         % (numFuncs, elementsPerFunc, funcsBetweenExec))
+    script.writeComment(
+        "  %d functions, %d elements per function, %d functions between execution"
+        % (numFuncs, elementsPerFunc, funcsBetweenExec)
+    )
     script.writeComment("  call weighting = %f" % callWeighting)
-    script.writeComment("===========================================================================")
+    script.writeComment(
+        "==========================================================================="
+    )
     script.writeEmptyLine()
     script.writePredefinedFunctions()
     funcsSinceLastExec = 0
@@ -202,20 +242,49 @@ def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callW
     script.writeEmptyLine()
     script.writeFinalFunctionCounts()
     funcsCalled = len(script.calledFunctions)
-    print("  Called %d of %d functions, %d total" % (funcsCalled, numFuncs, script.totalCallsExecuted))
-    timingScript.writeTimingCall(filename, numFuncs, funcsCalled, script.totalCallsExecuted)
+    print(
+        "  Called %d of %d functions, %d total"
+        % (funcsCalled, numFuncs, script.totalCallsExecuted)
+    )
+    timingScript.writeTimingCall(
+        filename, numFuncs, funcsCalled, script.totalCallsExecuted
+    )
+
 
 # Execution begins here
 random.seed()
 
 timingScript = TimingScriptGenerator("time-toy.sh", "timing-data.txt")
 
-dataSets = [(5000, 3,  50, 0.50), (5000, 10, 100, 0.10), (5000, 10, 5, 0.10), (5000, 10, 1, 0.0),
-            (1000, 3,  10, 0.50), (1000, 10, 100, 0.10), (1000, 10, 5, 0.10), (1000, 10, 1, 0.0),
-            ( 200, 3,   2, 0.50), ( 200, 10,  40, 0.10), ( 200, 10, 2, 0.10), ( 200, 10, 1, 0.0)]
+dataSets = [
+    (5000, 3, 50, 0.50),
+    (5000, 10, 100, 0.10),
+    (5000, 10, 5, 0.10),
+    (5000, 10, 1, 0.0),
+    (1000, 3, 10, 0.50),
+    (1000, 10, 100, 0.10),
+    (1000, 10, 5, 0.10),
+    (1000, 10, 1, 0.0),
+    (200, 3, 2, 0.50),
+    (200, 10, 40, 0.10),
+    (200, 10, 2, 0.10),
+    (200, 10, 1, 0.0),
+]
 
 # Generate the code
 for (numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting) in dataSets:
-    filename = "test-%d-%d-%d-%d.k" % (numFuncs, elementsPerFunc, funcsBetweenExec, int(callWeighting * 100))
-    generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript)
+    filename = "test-%d-%d-%d-%d.k" % (
+        numFuncs,
+        elementsPerFunc,
+        funcsBetweenExec,
+        int(callWeighting * 100),
+    )
+    generateKScript(
+        filename,
+        numFuncs,
+        elementsPerFunc,
+        funcsBetweenExec,
+        callWeighting,
+        timingScript,
+    )
 print("All done!")

diff  --git a/llvm/lib/Analysis/models/gen-inline-oz-test-model.py b/llvm/lib/Analysis/models/gen-inline-oz-test-model.py
index d8737f26e0d88..4898509ea544f 100644
--- a/llvm/lib/Analysis/models/gen-inline-oz-test-model.py
+++ b/llvm/lib/Analysis/models/gen-inline-oz-test-model.py
@@ -11,7 +11,7 @@
 
 import tensorflow as tf
 
-POLICY_DECISION_LABEL = 'inlining_decision'
+POLICY_DECISION_LABEL = "inlining_decision"
 POLICY_OUTPUT_SPEC = """
 [
     {
@@ -31,106 +31,110 @@
 
 # pylint: disable=g-complex-comprehension
 def get_input_signature():
-  """Returns the list of features for LLVM inlining."""
-  # int64 features
-  inputs = [
-      tf.TensorSpec(dtype=tf.int64, shape=(), name=key) for key in [
-          'caller_basic_block_count',
-          'caller_conditionally_executed_blocks',
-          'caller_users',
-          'callee_basic_block_count',
-          'callee_conditionally_executed_blocks',
-          'callee_users',
-          'nr_ctant_params',
-          'node_count',
-          'edge_count',
-          'callsite_height',
-          'cost_estimate',
-          'inlining_default',
-          'sroa_savings',
-          'sroa_losses',
-          'load_elimination',
-          'call_penalty',
-          'call_argument_setup',
-          'load_relative_intrinsic',
-          'lowered_call_arg_setup',
-          'indirect_call_penalty',
-          'jump_table_penalty',
-          'case_cluster_penalty',
-          'switch_penalty',
-          'unsimplified_common_instructions',
-          'num_loops',
-          'dead_blocks',
-          'simplified_instructions',
-          'constant_args',
-          'constant_offset_ptr_args',
-          'callsite_cost',
-          'cold_cc_penalty',
-          'last_call_to_static_bonus',
-          'is_multiple_blocks',
-          'nested_inlines',
-          'nested_inline_cost_estimate',
-          'threshold',
-      ]
-  ]
-
-  # float32 features
-  inputs.extend([
-      tf.TensorSpec(dtype=tf.float32, shape=(), name=key)
-      for key in ['discount', 'reward']
-  ])
-
-  # int32 features
-  inputs.extend([
-      tf.TensorSpec(dtype=tf.int32, shape=(), name=key)
-      for key in ['step_type']
-  ])
-  return inputs
+    """Returns the list of features for LLVM inlining."""
+    # int64 features
+    inputs = [
+        tf.TensorSpec(dtype=tf.int64, shape=(), name=key)
+        for key in [
+            "caller_basic_block_count",
+            "caller_conditionally_executed_blocks",
+            "caller_users",
+            "callee_basic_block_count",
+            "callee_conditionally_executed_blocks",
+            "callee_users",
+            "nr_ctant_params",
+            "node_count",
+            "edge_count",
+            "callsite_height",
+            "cost_estimate",
+            "inlining_default",
+            "sroa_savings",
+            "sroa_losses",
+            "load_elimination",
+            "call_penalty",
+            "call_argument_setup",
+            "load_relative_intrinsic",
+            "lowered_call_arg_setup",
+            "indirect_call_penalty",
+            "jump_table_penalty",
+            "case_cluster_penalty",
+            "switch_penalty",
+            "unsimplified_common_instructions",
+            "num_loops",
+            "dead_blocks",
+            "simplified_instructions",
+            "constant_args",
+            "constant_offset_ptr_args",
+            "callsite_cost",
+            "cold_cc_penalty",
+            "last_call_to_static_bonus",
+            "is_multiple_blocks",
+            "nested_inlines",
+            "nested_inline_cost_estimate",
+            "threshold",
+        ]
+    ]
+
+    # float32 features
+    inputs.extend(
+        [
+            tf.TensorSpec(dtype=tf.float32, shape=(), name=key)
+            for key in ["discount", "reward"]
+        ]
+    )
+
+    # int32 features
+    inputs.extend(
+        [tf.TensorSpec(dtype=tf.int32, shape=(), name=key) for key in ["step_type"]]
+    )
+    return inputs
 
 
 def get_output_signature():
-  return POLICY_DECISION_LABEL
+    return POLICY_DECISION_LABEL
 
 
 def get_output_spec():
-  return POLICY_OUTPUT_SPEC
+    return POLICY_OUTPUT_SPEC
+
 
 def get_output_spec_path(path):
-  return os.path.join(path, 'output_spec.json')
+    return os.path.join(path, "output_spec.json")
 
 
 def build_mock_model(path, signature):
-  """Build and save the mock model with the given signature"""
-  module = tf.Module()
-  def action(*inputs):
-    return {signature['output']: tf.constant(value=1, dtype=tf.int64)}
+    """Build and save the mock model with the given signature"""
+    module = tf.Module()
+
+    def action(*inputs):
+        return {signature["output"]: tf.constant(value=1, dtype=tf.int64)}
 
-  module.action = tf.function()(action)
-  action = {'action': module.action.get_concrete_function(signature['inputs'])}
-  tf.saved_model.save(module, path, signatures=action)
+    module.action = tf.function()(action)
+    action = {"action": module.action.get_concrete_function(signature["inputs"])}
+    tf.saved_model.save(module, path, signatures=action)
 
-  output_spec_path = get_output_spec_path(path)
-  with open(output_spec_path, 'w') as f:
-    print(f'Writing output spec to {output_spec_path}.')
-    f.write(signature['output_spec'])
+    output_spec_path = get_output_spec_path(path)
+    with open(output_spec_path, "w") as f:
+        print(f"Writing output spec to {output_spec_path}.")
+        f.write(signature["output_spec"])
 
 
 def get_signature():
-  return {
-      'inputs': get_input_signature(),
-      'output': get_output_signature(),
-      'output_spec': get_output_spec()
-  }
+    return {
+        "inputs": get_input_signature(),
+        "output": get_output_signature(),
+        "output_spec": get_output_spec(),
+    }
 
 
 def main(argv):
-  assert len(argv) == 2
-  model_path = argv[1]
+    assert len(argv) == 2
+    model_path = argv[1]
 
-  print(f'Output model to: [{argv[1]}]')
-  signature = get_signature()
-  build_mock_model(model_path, signature)
+    print(f"Output model to: [{argv[1]}]")
+    signature = get_signature()
+    build_mock_model(model_path, signature)
 
 
-if __name__ == '__main__':
-  main(sys.argv)
+if __name__ == "__main__":
+    main(sys.argv)

diff  --git a/llvm/lib/Analysis/models/gen-regalloc-eviction-test-model.py b/llvm/lib/Analysis/models/gen-regalloc-eviction-test-model.py
index e41e71a09d828..5af2fb2878b5b 100644
--- a/llvm/lib/Analysis/models/gen-regalloc-eviction-test-model.py
+++ b/llvm/lib/Analysis/models/gen-regalloc-eviction-test-model.py
@@ -6,7 +6,8 @@
 import os
 import sys
 import tensorflow as tf
-POLICY_DECISION_LABEL = 'index_to_evict'
+
+POLICY_DECISION_LABEL = "index_to_evict"
 POLICY_OUTPUT_SPEC = """
 [
     {
@@ -22,49 +23,50 @@
     }
 ]
 """
-PER_REGISTER_FEATURE_LIST = ['mask']
+PER_REGISTER_FEATURE_LIST = ["mask"]
 NUM_REGISTERS = 33
 
 
 def get_input_signature():
-  """Returns (time_step_spec, action_spec) for LLVM register allocation."""
-  inputs = dict(
-      (key, tf.TensorSpec(dtype=tf.int64, shape=(NUM_REGISTERS), name=key))
-      for key in PER_REGISTER_FEATURE_LIST)
-  return inputs
+    """Returns (time_step_spec, action_spec) for LLVM register allocation."""
+    inputs = dict(
+        (key, tf.TensorSpec(dtype=tf.int64, shape=(NUM_REGISTERS), name=key))
+        for key in PER_REGISTER_FEATURE_LIST
+    )
+    return inputs
 
 
 def get_output_spec_path(path):
-  return os.path.join(path, 'output_spec.json')
+    return os.path.join(path, "output_spec.json")
 
 
 def build_mock_model(path):
-  """Build and save the mock model with the given signature."""
-  module = tf.Module()
-  # We have to set this useless variable in order for the TF C API to correctly
-  # intake it
-  module.var = tf.Variable(0, dtype=tf.int64)
+    """Build and save the mock model with the given signature."""
+    module = tf.Module()
+    # We have to set this useless variable in order for the TF C API to correctly
+    # intake it
+    module.var = tf.Variable(0, dtype=tf.int64)
+
+    def action(*inputs):
+        result = (
+            tf.math.argmax(tf.cast(inputs[0]["mask"], tf.int32), axis=-1) + module.var
+        )
+        return {POLICY_DECISION_LABEL: result}
 
-  def action(*inputs):
-    result = tf.math.argmax(
-        tf.cast(inputs[0]['mask'], tf.int32), axis=-1) + module.var
-    return {POLICY_DECISION_LABEL: result}
-  module.action = tf.function()(action)
-  action = {
-      'action': module.action.get_concrete_function(get_input_signature())
-  }
-  tf.saved_model.save(module, path, signatures=action)
-  output_spec_path = get_output_spec_path(path)
-  with open(output_spec_path, 'w') as f:
-    print(f'Writing output spec to {output_spec_path}.')
-    f.write(POLICY_OUTPUT_SPEC)
+    module.action = tf.function()(action)
+    action = {"action": module.action.get_concrete_function(get_input_signature())}
+    tf.saved_model.save(module, path, signatures=action)
+    output_spec_path = get_output_spec_path(path)
+    with open(output_spec_path, "w") as f:
+        print(f"Writing output spec to {output_spec_path}.")
+        f.write(POLICY_OUTPUT_SPEC)
 
 
 def main(argv):
-  assert len(argv) == 2
-  model_path = argv[1]
-  build_mock_model(model_path)
+    assert len(argv) == 2
+    model_path = argv[1]
+    build_mock_model(model_path)
 
 
-if __name__ == '__main__':
-  main(sys.argv)
+if __name__ == "__main__":
+    main(sys.argv)

diff  --git a/llvm/lib/Analysis/models/gen-regalloc-priority-test-model.py b/llvm/lib/Analysis/models/gen-regalloc-priority-test-model.py
index 81de2c70565a8..889ddae48b1ff 100644
--- a/llvm/lib/Analysis/models/gen-regalloc-priority-test-model.py
+++ b/llvm/lib/Analysis/models/gen-regalloc-priority-test-model.py
@@ -7,7 +7,8 @@
 import os
 import sys
 import tensorflow as tf
-POLICY_DECISION_LABEL = 'priority'
+
+POLICY_DECISION_LABEL = "priority"
 POLICY_OUTPUT_SPEC = """
 [
     {
@@ -23,73 +24,83 @@
     }
 ]
 """
-PER_LIVEINTERVAL_INT64_FEATURE_LIST = [
-    'li_size', 'stage'
-]
-PER_LIVEINTERVAL_FLOAT32_FEATURE_LIST = ['weight'
-]
-PER_LIVEINTERVAL_FEATURE_LIST = PER_LIVEINTERVAL_FLOAT32_FEATURE_LIST + \
-    PER_LIVEINTERVAL_INT64_FEATURE_LIST
-CONTEXT_FEATURE_LIST =  ('discount', 'reward', 'step_type')
+PER_LIVEINTERVAL_INT64_FEATURE_LIST = ["li_size", "stage"]
+PER_LIVEINTERVAL_FLOAT32_FEATURE_LIST = ["weight"]
+PER_LIVEINTERVAL_FEATURE_LIST = (
+    PER_LIVEINTERVAL_FLOAT32_FEATURE_LIST + PER_LIVEINTERVAL_INT64_FEATURE_LIST
+)
+CONTEXT_FEATURE_LIST = ("discount", "reward", "step_type")
 
 
 def get_input_signature():
-   """Returns (time_step_spec, action_spec) for LLVM register allocation."""
-   inputs = dict(
-       (key, tf.TensorSpec(dtype=tf.int64, shape=(), name=key))
-       for key in PER_LIVEINTERVAL_INT64_FEATURE_LIST)
-   inputs.update(
-       dict((key,
-             tf.TensorSpec(dtype=tf.float32, shape=(), name=key))
-            for key in PER_LIVEINTERVAL_FLOAT32_FEATURE_LIST))
-   inputs.update(
-       dict((key, tf.TensorSpec(dtype=tf.float32, shape=(), name=key))
-            for key in ['discount', 'reward']))
-   inputs.update(
-       dict((key, tf.TensorSpec(dtype=tf.int32, shape=(), name=key))
-            for key in ['step_type']))
-   return inputs
+    """Returns (time_step_spec, action_spec) for LLVM register allocation."""
+    inputs = dict(
+        (key, tf.TensorSpec(dtype=tf.int64, shape=(), name=key))
+        for key in PER_LIVEINTERVAL_INT64_FEATURE_LIST
+    )
+    inputs.update(
+        dict(
+            (key, tf.TensorSpec(dtype=tf.float32, shape=(), name=key))
+            for key in PER_LIVEINTERVAL_FLOAT32_FEATURE_LIST
+        )
+    )
+    inputs.update(
+        dict(
+            (key, tf.TensorSpec(dtype=tf.float32, shape=(), name=key))
+            for key in ["discount", "reward"]
+        )
+    )
+    inputs.update(
+        dict(
+            (key, tf.TensorSpec(dtype=tf.int32, shape=(), name=key))
+            for key in ["step_type"]
+        )
+    )
+    return inputs
 
 
 def get_output_spec_path(path):
-   return os.path.join(path, 'output_spec.json')
+    return os.path.join(path, "output_spec.json")
 
 
 def build_mock_model(path):
-   """Build and save the mock model with the given signature."""
-   module = tf.Module()
-   # We have to set this useless variable in order for the TF C API to correctly
-   # intake it
-   module.var = tf.Variable(0, dtype=tf.float32)
+    """Build and save the mock model with the given signature."""
+    module = tf.Module()
+    # We have to set this useless variable in order for the TF C API to correctly
+    # intake it
+    module.var = tf.Variable(0, dtype=tf.float32)
+
+    def action(*inputs):
+        s1 = tf.reduce_sum(
+            [
+                tf.cast(inputs[0][key], tf.float32)
+                for key in PER_LIVEINTERVAL_FEATURE_LIST
+            ],
+            axis=0,
+        )
+        s2 = tf.reduce_sum(
+            [tf.cast(inputs[0][key], tf.float32) for key in CONTEXT_FEATURE_LIST]
+        )
+        # Add a large number so s won't be 0.
+        s = s1 + s2
+        result = s + module.var
+        return {POLICY_DECISION_LABEL: result}
 
-   def action(*inputs):
-     s1 = tf.reduce_sum([
-         tf.cast(inputs[0][key], tf.float32) for key in PER_LIVEINTERVAL_FEATURE_LIST
-     ],
-         axis=0)
-     s2 = tf.reduce_sum(
-         [tf.cast(inputs[0][key], tf.float32) for key in CONTEXT_FEATURE_LIST])
-     # Add a large number so s won't be 0.
-     s = s1 + s2
-     result = s + module.var
-     return {POLICY_DECISION_LABEL: result}
-   module.action = tf.function()(action)
-   action = {
-       'action': module.action.get_concrete_function(get_input_signature())
-   }
+    module.action = tf.function()(action)
+    action = {"action": module.action.get_concrete_function(get_input_signature())}
 
-   tf.saved_model.save(module, path, signatures=action)
-   output_spec_path = get_output_spec_path(path)
-   with open(output_spec_path, 'w') as f:
-     print(f'Writing output spec to {output_spec_path}.')
-     f.write(POLICY_OUTPUT_SPEC)
+    tf.saved_model.save(module, path, signatures=action)
+    output_spec_path = get_output_spec_path(path)
+    with open(output_spec_path, "w") as f:
+        print(f"Writing output spec to {output_spec_path}.")
+        f.write(POLICY_OUTPUT_SPEC)
 
 
 def main(argv):
-   assert len(argv) == 2
-   model_path = argv[1]
-   build_mock_model(model_path)
+    assert len(argv) == 2
+    model_path = argv[1]
+    build_mock_model(model_path)
 
 
-if __name__ == '__main__':
-   main(sys.argv)
+if __name__ == "__main__":
+    main(sys.argv)

diff  --git a/llvm/lib/Analysis/models/interactive_host.py b/llvm/lib/Analysis/models/interactive_host.py
index 79c74ac3cd881..759c791614a1d 100644
--- a/llvm/lib/Analysis/models/interactive_host.py
+++ b/llvm/lib/Analysis/models/interactive_host.py
@@ -20,68 +20,76 @@
 from typing import Callable, List, Union
 
 
-def send(f: io.BufferedWriter, value: Union[int, float],
-         spec: log_reader.TensorSpec):
-  """Send the `value` - currently just a scalar - formatted as per `spec`."""
+def send(f: io.BufferedWriter, value: Union[int, float], spec: log_reader.TensorSpec):
+    """Send the `value` - currently just a scalar - formatted as per `spec`."""
 
-  # just int64 for now
-  assert (spec.element_type == ctypes.c_int64)
-  to_send = ctypes.c_int64(int(value))
-  assert f.write(bytes(to_send)) == ctypes.sizeof(
-      spec.element_type) * math.prod(spec.shape)
-  f.flush()
+    # just int64 for now
+    assert spec.element_type == ctypes.c_int64
+    to_send = ctypes.c_int64(int(value))
+    assert f.write(bytes(to_send)) == ctypes.sizeof(spec.element_type) * math.prod(
+        spec.shape
+    )
+    f.flush()
 
 
-def run_interactive(temp_rootname: str,
-                    make_response: Callable[[List[log_reader.TensorValue]],
-                                            Union[int, float]],
-                    process_and_args: List[str]):
-  """Host the compiler.
-  Args:
-    temp_rootname: the base file name from which to construct the 2 pipes for
-    communicating with the compiler.
-    make_response: a function that, given the current tensor values, provides a
-    response.
-    process_and_args: the full commandline for the compiler. It it assumed it
-    contains a flag poiting to `temp_rootname` so that the InteractiveModeRunner
-    would attempt communication on the same pair as this function opens.
+def run_interactive(
+    temp_rootname: str,
+    make_response: Callable[[List[log_reader.TensorValue]], Union[int, float]],
+    process_and_args: List[str],
+):
+    """Host the compiler.
+    Args:
+      temp_rootname: the base file name from which to construct the 2 pipes for
+      communicating with the compiler.
+      make_response: a function that, given the current tensor values, provides a
+      response.
+      process_and_args: the full commandline for the compiler. It it assumed it
+      contains a flag poiting to `temp_rootname` so that the InteractiveModeRunner
+      would attempt communication on the same pair as this function opens.
 
-  This function sets up the communication with the compiler - via 2 files named
-  `temp_rootname`.in and `temp_rootname`.out - prints out the received features,
-  and sends back to the compiler an advice (which it gets from `make_response`).
-  It's used for testing, and also to showcase how to set up communication in an
-  interactive ML ("gym") environment.
-  """
-  to_compiler = temp_rootname + ".in"
-  from_compiler = temp_rootname + ".out"
-  try:
-    os.mkfifo(to_compiler, 0o666)
-    os.mkfifo(from_compiler, 0o666)
-    compiler_proc = subprocess.Popen(
-        process_and_args, stderr=subprocess.PIPE, stdout=subprocess.DEVNULL)
-    with io.BufferedWriter(io.FileIO(to_compiler, 'wb')) as tc:
-      with io.BufferedReader(io.FileIO(from_compiler, 'rb')) as fc:
-        tensor_specs, _, advice_spec = log_reader.read_header(fc)
-        context = None
-        while compiler_proc.poll() is None:
-          next_event = fc.readline()
-          if not next_event:
-            break
-          last_context, observation_id, features, _ = log_reader.read_one_observation(
-              context, next_event, fc, tensor_specs, None)
-          if last_context != context:
-            print(f'context: {last_context}')
-          context = last_context
-          print(f'observation: {observation_id}')
-          tensor_values = []
-          for fv in features:
-            log_reader.pretty_print_tensor_value(fv)
-            tensor_values.append(fv)
-          send(tc, make_response(tensor_values), advice_spec)
-    _, err = compiler_proc.communicate()
-    print(err.decode('utf-8'))
-    compiler_proc.wait()
+    This function sets up the communication with the compiler - via 2 files named
+    `temp_rootname`.in and `temp_rootname`.out - prints out the received features,
+    and sends back to the compiler an advice (which it gets from `make_response`).
+    It's used for testing, and also to showcase how to set up communication in an
+    interactive ML ("gym") environment.
+    """
+    to_compiler = temp_rootname + ".in"
+    from_compiler = temp_rootname + ".out"
+    try:
+        os.mkfifo(to_compiler, 0o666)
+        os.mkfifo(from_compiler, 0o666)
+        compiler_proc = subprocess.Popen(
+            process_and_args, stderr=subprocess.PIPE, stdout=subprocess.DEVNULL
+        )
+        with io.BufferedWriter(io.FileIO(to_compiler, "wb")) as tc:
+            with io.BufferedReader(io.FileIO(from_compiler, "rb")) as fc:
+                tensor_specs, _, advice_spec = log_reader.read_header(fc)
+                context = None
+                while compiler_proc.poll() is None:
+                    next_event = fc.readline()
+                    if not next_event:
+                        break
+                    (
+                        last_context,
+                        observation_id,
+                        features,
+                        _,
+                    ) = log_reader.read_one_observation(
+                        context, next_event, fc, tensor_specs, None
+                    )
+                    if last_context != context:
+                        print(f"context: {last_context}")
+                    context = last_context
+                    print(f"observation: {observation_id}")
+                    tensor_values = []
+                    for fv in features:
+                        log_reader.pretty_print_tensor_value(fv)
+                        tensor_values.append(fv)
+                    send(tc, make_response(tensor_values), advice_spec)
+        _, err = compiler_proc.communicate()
+        print(err.decode("utf-8"))
+        compiler_proc.wait()
 
-  finally:
-    os.unlink(to_compiler)
-    os.unlink(from_compiler)
+    finally:
+        os.unlink(to_compiler)
+        os.unlink(from_compiler)

diff  --git a/llvm/lib/Analysis/models/log_reader.py b/llvm/lib/Analysis/models/log_reader.py
index be0f218d4ae77..7080276a0d85d 100644
--- a/llvm/lib/Analysis/models/log_reader.py
+++ b/llvm/lib/Analysis/models/log_reader.py
@@ -11,128 +11,130 @@
 from typing import List, Optional
 
 _element_types = {
-    'float': ctypes.c_float,
-    'double': ctypes.c_double,
-    'int8_t': ctypes.c_int8,
-    'uint8_t': ctypes.c_uint8,
-    'int16_t': ctypes.c_int16,
-    'uint16_t': ctypes.c_uint16,
-    'int32_t': ctypes.c_int32,
-    'uint32_t': ctypes.c_uint32,
-    'int64_t': ctypes.c_int64,
-    'uint64_t': ctypes.c_uint64
+    "float": ctypes.c_float,
+    "double": ctypes.c_double,
+    "int8_t": ctypes.c_int8,
+    "uint8_t": ctypes.c_uint8,
+    "int16_t": ctypes.c_int16,
+    "uint16_t": ctypes.c_uint16,
+    "int32_t": ctypes.c_int32,
+    "uint32_t": ctypes.c_uint32,
+    "int64_t": ctypes.c_int64,
+    "uint64_t": ctypes.c_uint64,
 }
 
 
 @dataclasses.dataclass(frozen=True)
 class TensorSpec:
-  name: str
-  port: int
-  shape: List[int]
-  element_type: type
-
-  @staticmethod
-  def from_dict(d: dict):
-    name = d['name']
-    port = d['port']
-    shape = [int(e) for e in d['shape']]
-    element_type_str = d['type']
-    if element_type_str not in _element_types:
-      raise ValueError(f'uknown type: {element_type_str}')
-    return TensorSpec(
-        name=name,
-        port=port,
-        shape=shape,
-        element_type=_element_types[element_type_str])
+    name: str
+    port: int
+    shape: List[int]
+    element_type: type
+
+    @staticmethod
+    def from_dict(d: dict):
+        name = d["name"]
+        port = d["port"]
+        shape = [int(e) for e in d["shape"]]
+        element_type_str = d["type"]
+        if element_type_str not in _element_types:
+            raise ValueError(f"uknown type: {element_type_str}")
+        return TensorSpec(
+            name=name,
+            port=port,
+            shape=shape,
+            element_type=_element_types[element_type_str],
+        )
 
 
 class TensorValue:
+    def __init__(self, spec: TensorSpec, buffer: bytes):
+        self._spec = spec
+        self._buffer = buffer
+        self._view = ctypes.cast(self._buffer, ctypes.POINTER(self._spec.element_type))
+        self._len = math.prod(self._spec.shape)
 
-  def __init__(self, spec: TensorSpec, buffer: bytes):
-    self._spec = spec
-    self._buffer = buffer
-    self._view = ctypes.cast(self._buffer,
-                             ctypes.POINTER(self._spec.element_type))
-    self._len = math.prod(self._spec.shape)
+    def spec(self) -> TensorSpec:
+        return self._spec
 
-  def spec(self) -> TensorSpec:
-    return self._spec
+    def __len__(self) -> int:
+        return self._len
 
-  def __len__(self) -> int:
-    return self._len
-
-  def __getitem__(self, index):
-    if index < 0 or index >= self._len:
-      raise IndexError(f'Index {index} out of range [0..{self._len})')
-    return self._view[index]
+    def __getitem__(self, index):
+        if index < 0 or index >= self._len:
+            raise IndexError(f"Index {index} out of range [0..{self._len})")
+        return self._view[index]
 
 
 def read_tensor(fs: io.BufferedReader, ts: TensorSpec) -> TensorValue:
-  size = math.prod(ts.shape) * ctypes.sizeof(ts.element_type)
-  data = fs.read(size)
-  return TensorValue(ts, data)
+    size = math.prod(ts.shape) * ctypes.sizeof(ts.element_type)
+    data = fs.read(size)
+    return TensorValue(ts, data)
 
 
 def pretty_print_tensor_value(tv: TensorValue):
-  print(f'{tv.spec().name}: {",".join([str(v) for v in tv])}')
+    print(f'{tv.spec().name}: {",".join([str(v) for v in tv])}')
 
 
 def read_header(f: io.BufferedReader):
-  header = json.loads(f.readline())
-  tensor_specs = [TensorSpec.from_dict(ts) for ts in header['features']]
-  score_spec = TensorSpec.from_dict(
-      header['score']) if 'score' in header else None
-  advice_spec = TensorSpec.from_dict(
-      header['advice']) if 'advice' in header else None
-  return tensor_specs, score_spec, advice_spec
-
-
-def read_one_observation(context: Optional[str], event_str: str,
-                         f: io.BufferedReader, tensor_specs: List[TensorSpec],
-                         score_spec: Optional[TensorSpec]):
-  event = json.loads(event_str)
-  if 'context' in event:
-    context = event['context']
-    event = json.loads(f.readline())
-  observation_id = int(event['observation'])
-  features = []
-  for ts in tensor_specs:
-    features.append(read_tensor(f, ts))
-  f.readline()
-  score = None
-  if score_spec is not None:
-    score_header = json.loads(f.readline())
-    assert int(score_header['outcome']) == observation_id
-    score = read_tensor(f, score_spec)
+    header = json.loads(f.readline())
+    tensor_specs = [TensorSpec.from_dict(ts) for ts in header["features"]]
+    score_spec = TensorSpec.from_dict(header["score"]) if "score" in header else None
+    advice_spec = TensorSpec.from_dict(header["advice"]) if "advice" in header else None
+    return tensor_specs, score_spec, advice_spec
+
+
+def read_one_observation(
+    context: Optional[str],
+    event_str: str,
+    f: io.BufferedReader,
+    tensor_specs: List[TensorSpec],
+    score_spec: Optional[TensorSpec],
+):
+    event = json.loads(event_str)
+    if "context" in event:
+        context = event["context"]
+        event = json.loads(f.readline())
+    observation_id = int(event["observation"])
+    features = []
+    for ts in tensor_specs:
+        features.append(read_tensor(f, ts))
     f.readline()
-  return context, observation_id, features, score
+    score = None
+    if score_spec is not None:
+        score_header = json.loads(f.readline())
+        assert int(score_header["outcome"]) == observation_id
+        score = read_tensor(f, score_spec)
+        f.readline()
+    return context, observation_id, features, score
 
 
 def read_stream(fname: str):
-  with io.BufferedReader(io.FileIO(fname, 'rb')) as f:
-    tensor_specs, score_spec, _ = read_header(f)
-    context = None
-    while True:
-      event_str = f.readline()
-      if not event_str:
-        break
-      context, observation_id, features, score = read_one_observation(
-          context, event_str, f, tensor_specs, score_spec)
-      yield context, observation_id, features, score
+    with io.BufferedReader(io.FileIO(fname, "rb")) as f:
+        tensor_specs, score_spec, _ = read_header(f)
+        context = None
+        while True:
+            event_str = f.readline()
+            if not event_str:
+                break
+            context, observation_id, features, score = read_one_observation(
+                context, event_str, f, tensor_specs, score_spec
+            )
+            yield context, observation_id, features, score
 
 
 def main(args):
-  last_context = None
-  for ctx, obs_id, features, score in read_stream(args[1]):
-    if last_context != ctx:
-      print(f'context: {ctx}')
-      last_context = ctx
-    print(f'observation: {obs_id}')
-    for fv in features:
-      pretty_print_tensor_value(fv)
-    if score:
-      pretty_print_tensor_value(score)
-
-
-if __name__ == '__main__':
-  main(sys.argv)
+    last_context = None
+    for ctx, obs_id, features, score in read_stream(args[1]):
+        if last_context != ctx:
+            print(f"context: {ctx}")
+            last_context = ctx
+        print(f"observation: {obs_id}")
+        for fv in features:
+            pretty_print_tensor_value(fv)
+        if score:
+            pretty_print_tensor_value(score)
+
+
+if __name__ == "__main__":
+    main(sys.argv)

diff  --git a/llvm/lib/Analysis/models/saved-model-to-tflite.py b/llvm/lib/Analysis/models/saved-model-to-tflite.py
index e9d45fdf983b7..9c83718732945 100644
--- a/llvm/lib/Analysis/models/saved-model-to-tflite.py
+++ b/llvm/lib/Analysis/models/saved-model-to-tflite.py
@@ -14,24 +14,24 @@
 
 
 def main(argv):
-  assert len(argv) == 3
-  sm_dir = argv[1]
-  tfl_dir = argv[2]
-  tf.io.gfile.makedirs(tfl_dir)
-  tfl_path = os.path.join(tfl_dir, 'model.tflite')
-  converter = tf.lite.TFLiteConverter.from_saved_model(sm_dir)
-  converter.target_spec.supported_ops = [
-    tf.lite.OpsSet.TFLITE_BUILTINS,
-  ]
-  tfl_model = converter.convert()
-  with tf.io.gfile.GFile(tfl_path, 'wb') as f:
-    f.write(tfl_model)
-  
-  json_file = 'output_spec.json'
-  src_json = os.path.join(sm_dir, json_file)
-  if tf.io.gfile.exists(src_json):
-    tf.io.gfile.copy(src_json,
-                     os.path.join(tfl_dir, json_file))
-
-if __name__ == '__main__':
-  main(sys.argv)
+    assert len(argv) == 3
+    sm_dir = argv[1]
+    tfl_dir = argv[2]
+    tf.io.gfile.makedirs(tfl_dir)
+    tfl_path = os.path.join(tfl_dir, "model.tflite")
+    converter = tf.lite.TFLiteConverter.from_saved_model(sm_dir)
+    converter.target_spec.supported_ops = [
+        tf.lite.OpsSet.TFLITE_BUILTINS,
+    ]
+    tfl_model = converter.convert()
+    with tf.io.gfile.GFile(tfl_path, "wb") as f:
+        f.write(tfl_model)
+
+    json_file = "output_spec.json"
+    src_json = os.path.join(sm_dir, json_file)
+    if tf.io.gfile.exists(src_json):
+        tf.io.gfile.copy(src_json, os.path.join(tfl_dir, json_file))
+
+
+if __name__ == "__main__":
+    main(sys.argv)

diff  --git a/llvm/test/BugPoint/compile-custom.ll.py b/llvm/test/BugPoint/compile-custom.ll.py
index b0062ac0b74f3..8f3e3c41d7799 100755
--- a/llvm/test/BugPoint/compile-custom.ll.py
+++ b/llvm/test/BugPoint/compile-custom.ll.py
@@ -7,6 +7,6 @@
 # Currently any print-out from the custom tool is interpreted as a crash
 # (i.e. test is still interesting)
 
-print("Error: " + ' '.join(sys.argv[1:]))
+print("Error: " + " ".join(sys.argv[1:]))
 
 sys.exit(1)

diff  --git a/llvm/test/CodeGen/AArch64/Atomics/generate-tests.py b/llvm/test/CodeGen/AArch64/Atomics/generate-tests.py
index 8bc10be446ff9..ecda5fd69ca5d 100755
--- a/llvm/test/CodeGen/AArch64/Atomics/generate-tests.py
+++ b/llvm/test/CodeGen/AArch64/Atomics/generate-tests.py
@@ -2,13 +2,14 @@
 import textwrap
 import enum
 import os
+
 """
 Generate the tests in llvm/test/CodeGen/AArch64/Atomics. Run from top level llvm-project.
 """
 
 TRIPLES = [
-    'aarch64',
-    'aarch64_be',
+    "aarch64",
+    "aarch64_be",
 ]
 
 
@@ -117,28 +118,28 @@ class Feature(enum.Flag):
     @property
     def mattr(self):
         if self == Feature.outline_atomics:
-            return '+outline-atomics'
+            return "+outline-atomics"
         if self == Feature.v8_1a:
-            return '+v8.1a'
+            return "+v8.1a"
         if self == Feature.rcpc3:
-            return '+lse2,+rcpc3'
+            return "+lse2,+rcpc3"
         if self == Feature.lse2_lse128:
-            return '+lse2,+lse128'
-        return '+' + self.name
+            return "+lse2,+lse128"
+        return "+" + self.name
 
 
 ATOMICRMW_OPS = [
-    'xchg',
-    'add',
-    'sub',
-    'and',
-    'nand',
-    'or',
-    'xor',
-    'max',
-    'min',
-    'umax',
-    'umin',
+    "xchg",
+    "add",
+    "sub",
+    "and",
+    "nand",
+    "or",
+    "xor",
+    "max",
+    "min",
+    "umax",
+    "umin",
 ]
 
 
@@ -147,15 +148,18 @@ def all_atomicrmw(f):
         for aligned in Aligned:
             for ty in Type:
                 for ordering in ATOMICRMW_ORDERS:
-                    name = f'atomicrmw_{op}_{ty}_{aligned}_{ordering}'
-                    instr = 'atomicrmw'
+                    name = f"atomicrmw_{op}_{ty}_{aligned}_{ordering}"
+                    instr = "atomicrmw"
                     f.write(
-                        textwrap.dedent(f'''
+                        textwrap.dedent(
+                            f"""
                         define dso_local {ty} @{name}(ptr %ptr, {ty} %value) {{
                             %r = {instr} {op} ptr %ptr, {ty} %value {ordering}, align {ty.align(aligned)}
                             ret {ty} %r
                         }}
-                    '''))
+                    """
+                        )
+                    )
 
 
 def all_load(f):
@@ -163,33 +167,39 @@ def all_load(f):
         for ty in Type:
             for ordering in ATOMIC_LOAD_ORDERS:
                 for const in [False, True]:
-                    name = f'load_atomic_{ty}_{aligned}_{ordering}'
-                    instr = 'load atomic'
+                    name = f"load_atomic_{ty}_{aligned}_{ordering}"
+                    instr = "load atomic"
                     if const:
-                        name += '_const'
-                    arg = 'ptr readonly %ptr' if const else 'ptr %ptr'
+                        name += "_const"
+                    arg = "ptr readonly %ptr" if const else "ptr %ptr"
                     f.write(
-                        textwrap.dedent(f'''
+                        textwrap.dedent(
+                            f"""
                         define dso_local {ty} @{name}({arg}) {{
                             %r = {instr} {ty}, ptr %ptr {ordering}, align {ty.align(aligned)}
                             ret {ty} %r
                         }}
-                    '''))
+                    """
+                        )
+                    )
 
 
 def all_store(f):
     for aligned in Aligned:
         for ty in Type:
             for ordering in ATOMIC_STORE_ORDERS:  # FIXME stores
-                name = f'store_atomic_{ty}_{aligned}_{ordering}'
-                instr = 'store atomic'
+                name = f"store_atomic_{ty}_{aligned}_{ordering}"
+                instr = "store atomic"
                 f.write(
-                    textwrap.dedent(f'''
+                    textwrap.dedent(
+                        f"""
                     define dso_local void @{name}({ty} %value, ptr %ptr) {{
                         {instr} {ty} %value, ptr %ptr {ordering}, align {ty.align(aligned)}
                         ret void
                     }}
-                '''))
+                """
+                    )
+                )
 
 
 def all_cmpxchg(f):
@@ -198,85 +208,113 @@ def all_cmpxchg(f):
             for success_ordering in CMPXCHG_SUCCESS_ORDERS:
                 for failure_ordering in CMPXCHG_FAILURE_ORDERS:
                     for weak in [False, True]:
-                        name = f'cmpxchg_{ty}_{aligned}_{success_ordering}_{failure_ordering}'
-                        instr = 'cmpxchg'
+                        name = f"cmpxchg_{ty}_{aligned}_{success_ordering}_{failure_ordering}"
+                        instr = "cmpxchg"
                         if weak:
-                            name += '_weak'
-                            instr += ' weak'
+                            name += "_weak"
+                            instr += " weak"
                         f.write(
-                            textwrap.dedent(f'''
+                            textwrap.dedent(
+                                f"""
                             define dso_local {ty} @{name}({ty} %expected, {ty} %new, ptr %ptr) {{
                                 %pair = {instr} ptr %ptr, {ty} %expected, {ty} %new {success_ordering} {failure_ordering}, align {ty.align(aligned)}
                                 %r = extractvalue {{ {ty}, i1 }} %pair, 0
                                 ret {ty} %r
                             }}
-                        '''))
+                        """
+                            )
+                        )
 
 
 def all_fence(f):
     for ordering in FENCE_ORDERS:
-        name = f'fence_{ordering}'
+        name = f"fence_{ordering}"
         f.write(
-            textwrap.dedent(f'''
+            textwrap.dedent(
+                f"""
             define dso_local void @{name}() {{
                 fence {ordering}
                 ret void
             }}
-        '''))
+        """
+            )
+        )
 
 
 def header(f, triple, features, filter_args: str):
-    f.write('; NOTE: Assertions have been autogenerated by '
-            'utils/update_llc_test_checks.py UTC_ARGS: ')
+    f.write(
+        "; NOTE: Assertions have been autogenerated by "
+        "utils/update_llc_test_checks.py UTC_ARGS: "
+    )
     f.write(filter_args)
-    f.write('\n')
-    f.write(f'; The base test file was generated by {__file__}\n')
+    f.write("\n")
+    f.write(f"; The base test file was generated by {__file__}\n")
     for feat in features:
-        for OptFlag in ['-O0', '-O1']:
-            f.write(' '.join([
-                ';', 'RUN:', 'llc', '%s', '-o', '-', '-verify-machineinstrs',
-                f'-mtriple={triple}', f'-mattr={feat.mattr}', OptFlag, '|',
-                'FileCheck', '%s', f'--check-prefixes=CHECK,{OptFlag}\n'
-            ]))
+        for OptFlag in ["-O0", "-O1"]:
+            f.write(
+                " ".join(
+                    [
+                        ";",
+                        "RUN:",
+                        "llc",
+                        "%s",
+                        "-o",
+                        "-",
+                        "-verify-machineinstrs",
+                        f"-mtriple={triple}",
+                        f"-mattr={feat.mattr}",
+                        OptFlag,
+                        "|",
+                        "FileCheck",
+                        "%s",
+                        f"--check-prefixes=CHECK,{OptFlag}\n",
+                    ]
+                )
+            )
 
 
 def write_lit_tests():
-    os.chdir('llvm/test/CodeGen/AArch64/Atomics/')
+    os.chdir("llvm/test/CodeGen/AArch64/Atomics/")
     for triple in TRIPLES:
         # Feature has no effect on fence, so keep it to one file.
-        with open(f'{triple}-fence.ll', 'w') as f:
+        with open(f"{triple}-fence.ll", "w") as f:
             filter_args = r'--filter "^\s*(dmb)"'
             header(f, triple, Feature, filter_args)
             all_fence(f)
 
         for feat in Feature:
-            with open(f'{triple}-atomicrmw-{feat.name}.ll', 'w') as f:
+            with open(f"{triple}-atomicrmw-{feat.name}.ll", "w") as f:
                 filter_args = r'--filter-out "\b(sp)\b" --filter "^\s*(ld[^r]|st[^r]|swp|cas|bl|add|and|eor|orn|orr|sub|mvn|sxt|cmp|ccmp|csel|dmb)"'
                 header(f, triple, [feat], filter_args)
                 all_atomicrmw(f)
 
-            with open(f'{triple}-cmpxchg-{feat.name}.ll', 'w') as f:
+            with open(f"{triple}-cmpxchg-{feat.name}.ll", "w") as f:
                 filter_args = r'--filter-out "\b(sp)\b" --filter "^\s*(ld[^r]|st[^r]|swp|cas|bl|add|and|eor|orn|orr|sub|mvn|sxt|cmp|ccmp|csel|dmb)"'
                 header(f, triple, [feat], filter_args)
                 all_cmpxchg(f)
 
-            with open(f'{triple}-atomic-load-{feat.name}.ll', 'w') as f:
+            with open(f"{triple}-atomic-load-{feat.name}.ll", "w") as f:
                 filter_args = r'--filter-out "\b(sp)\b" --filter "^\s*(ld|st[^r]|swp|cas|bl|add|and|eor|orn|orr|sub|mvn|sxt|cmp|ccmp|csel|dmb)"'
                 header(f, triple, [feat], filter_args)
                 all_load(f)
 
-            with open(f'{triple}-atomic-store-{feat.name}.ll', 'w') as f:
+            with open(f"{triple}-atomic-store-{feat.name}.ll", "w") as f:
                 filter_args = r'--filter-out "\b(sp)\b" --filter "^\s*(ld[^r]|st|swp|cas|bl|add|and|eor|orn|orr|sub|mvn|sxt|cmp|ccmp|csel|dmb)"'
                 header(f, triple, [feat], filter_args)
                 all_store(f)
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     write_lit_tests()
 
-    print(textwrap.dedent('''
+    print(
+        textwrap.dedent(
+            """
         Testcases written. To update checks run:
             $ ./llvm/utils/update_llc_test_checks.py -u llvm/test/CodeGen/AArch64/Atomics/*.ll
 
         Or in parallel:
             $ parallel ./llvm/utils/update_llc_test_checks.py -u ::: llvm/test/CodeGen/AArch64/Atomics/*.ll
-    '''))
+    """
+        )
+    )

diff  --git a/llvm/test/CodeGen/MLRegalloc/Inputs/interactive_main.py b/llvm/test/CodeGen/MLRegalloc/Inputs/interactive_main.py
index dc96804c0ba91..53809b0a04008 100644
--- a/llvm/test/CodeGen/MLRegalloc/Inputs/interactive_main.py
+++ b/llvm/test/CodeGen/MLRegalloc/Inputs/interactive_main.py
@@ -4,25 +4,24 @@
 
 
 def main(args):
-  # this advisor just picks the first legal register to evict, which is
-  # identifiable by the "mask" feature
-  class Advisor:
-    to_return = False
+    # this advisor just picks the first legal register to evict, which is
+    # identifiable by the "mask" feature
+    class Advisor:
+        to_return = False
 
-    def advice(self, tensor_values: list[log_reader.TensorValue]):
-      for tv in tensor_values:
-        if tv.spec().name != 'mask':
-          continue
-        for i, v in enumerate(tv):
-          if v == 1:
-            return i
-      # i.e. invalid:
-      return -1
+        def advice(self, tensor_values: list[log_reader.TensorValue]):
+            for tv in tensor_values:
+                if tv.spec().name != "mask":
+                    continue
+                for i, v in enumerate(tv):
+                    if v == 1:
+                        return i
+            # i.e. invalid:
+            return -1
 
+    a = Advisor()
+    interactive_host.run_interactive(args[0], a.advice, args[1:])
 
-  a = Advisor()
-  interactive_host.run_interactive(args[0], a.advice, args[1:])
 
-
-if __name__ == '__main__':
-  main(sys.argv[1:])
+if __name__ == "__main__":
+    main(sys.argv[1:])

diff  --git a/llvm/test/CodeGen/NVPTX/ld-st-addrrspace.py b/llvm/test/CodeGen/NVPTX/ld-st-addrrspace.py
index 8ed4406fe9b3d..67fabbac1d4e9 100644
--- a/llvm/test/CodeGen/NVPTX/ld-st-addrrspace.py
+++ b/llvm/test/CodeGen/NVPTX/ld-st-addrrspace.py
@@ -20,7 +20,7 @@
     "half": "b16",
     "<2 x half>": "b32",
     "float": "f32",
-    "double": "f64"
+    "double": "f64",
 }
 
 llvm_type_to_ptx_reg = {
@@ -31,7 +31,7 @@
     "half": "h",
     "<2 x half>": "hh",
     "float": "f",
-    "double": "fd"
+    "double": "fd",
 }
 
 addrspace_id = {
@@ -40,12 +40,12 @@
     ".shared": 3,
     ".const": 4,
     ".local": 5,
-    ".param": 101
+    ".param": 101,
 }
 
 
 def gen_load_tests():
-  load_template = """
+    load_template = """
 define ${type} @${testname}(${type} addrspace(${asid})* %ptr) {
 ; CHECK: ${testname}
 ; CHECK_P32: ld${_volatile}${_volatile_as}.${ptx_type} %${ptx_reg}{{[0-9]+}}, [%r{{[0-9]+}}]
@@ -56,51 +56,52 @@ def gen_load_tests():
   ret ${type} %a
 }
 """
-  for op_type, volatile, space in product(
-      ["i8", "i16", "i32", "i64", "half", "float", "double", "<2 x half>"],
-      [True, False],  # volatile
-      ["", ".shared", ".global", ".const", ".local", ".param"]):
-
-    # Volatile is only supported for global, shared and generic.
-    if volatile and not space in ["", ".global", ".shared"]:
-      continue
-
-    # Volatile is only supported for global, shared and generic.
-    # All other volatile accesses are done in generic AS.
-    if volatile and not space in ["", ".global", ".shared"]:
-      volatile_as = ""
-    else:
-      volatile_as = space
-
-    params = {
-        "type": op_type,
-        "volatile": "volatile" if volatile else "",
-        "_volatile": ".volatile" if volatile else "",
-        "_volatile_as": volatile_as,
-        "_space": space,
-        "ptx_reg": llvm_type_to_ptx_reg[op_type],
-        "ptx_type": llvm_type_to_ptx_type[op_type],
-        "asid": addrspace_id[space],
-    }
-
-    testname = \
-      Template("ld_${_volatile}${_space}.${ptx_type}").substitute(params)
-    params["testname"] = testname.replace(".", "_")
-
-    # LLVM does not accept "addrspacecast Type* addrspace(0) to Type*", so we
-    # need to avoid it for generic pointer tests.
-    if space:
-      generic_ptr_template = ("addrspacecast ${type} addrspace(${asid})* %ptr "
-                              "to ${type}*")
-    else:
-      generic_ptr_template = "select i1 true, ${type}* %ptr, ${type}* %ptr"
-    params["generic_ptr"] = Template(generic_ptr_template).substitute(params)
-
-    print(Template(load_template).substitute(params))
+    for op_type, volatile, space in product(
+        ["i8", "i16", "i32", "i64", "half", "float", "double", "<2 x half>"],
+        [True, False],  # volatile
+        ["", ".shared", ".global", ".const", ".local", ".param"],
+    ):
+
+        # Volatile is only supported for global, shared and generic.
+        if volatile and not space in ["", ".global", ".shared"]:
+            continue
+
+        # Volatile is only supported for global, shared and generic.
+        # All other volatile accesses are done in generic AS.
+        if volatile and not space in ["", ".global", ".shared"]:
+            volatile_as = ""
+        else:
+            volatile_as = space
+
+        params = {
+            "type": op_type,
+            "volatile": "volatile" if volatile else "",
+            "_volatile": ".volatile" if volatile else "",
+            "_volatile_as": volatile_as,
+            "_space": space,
+            "ptx_reg": llvm_type_to_ptx_reg[op_type],
+            "ptx_type": llvm_type_to_ptx_type[op_type],
+            "asid": addrspace_id[space],
+        }
+
+        testname = Template("ld_${_volatile}${_space}.${ptx_type}").substitute(params)
+        params["testname"] = testname.replace(".", "_")
+
+        # LLVM does not accept "addrspacecast Type* addrspace(0) to Type*", so we
+        # need to avoid it for generic pointer tests.
+        if space:
+            generic_ptr_template = (
+                "addrspacecast ${type} addrspace(${asid})* %ptr " "to ${type}*"
+            )
+        else:
+            generic_ptr_template = "select i1 true, ${type}* %ptr, ${type}* %ptr"
+        params["generic_ptr"] = Template(generic_ptr_template).substitute(params)
+
+        print(Template(load_template).substitute(params))
 
 
 def main():
-  gen_load_tests()
+    gen_load_tests()
 
 
 main()

diff  --git a/llvm/test/CodeGen/NVPTX/surf-tex.py b/llvm/test/CodeGen/NVPTX/surf-tex.py
index 4e239ae7b4f48..d63cfc521117d 100644
--- a/llvm/test/CodeGen/NVPTX/surf-tex.py
+++ b/llvm/test/CodeGen/NVPTX/surf-tex.py
@@ -28,213 +28,227 @@
 import textwrap
 from itertools import product
 
+
 def get_llvm_geom(geom_ptx):
-  geom = {
-    "1d"    : "1d",
-    "2d"    : "2d",
-    "3d"    : "3d",
-    "a1d"   : "1d.array",
-    "a2d"   : "2d.array",
-    "cube"  : "cube",
-    "acube" : "cube.array"
-  }
-  return geom[geom_ptx]
+    geom = {
+        "1d": "1d",
+        "2d": "2d",
+        "3d": "3d",
+        "a1d": "1d.array",
+        "a2d": "2d.array",
+        "cube": "cube",
+        "acube": "cube.array",
+    }
+    return geom[geom_ptx]
+
 
 def get_ptx_reg(ty):
-  reg = {
-    "b8"  : "%rs{{[0-9]+}}",
-    "b16" : "%rs{{[0-9]+}}",
-    "b32" : "%r{{[0-9]+}}",
-    "b64" : "%rd{{[0-9]+}}",
-    "f32" : "%f{{[0-9]+}}",
-    "u32" : "%r{{[0-9]+}}",
-    "s32" : "%r{{[0-9]+}}"
-  }
-  return reg[ty]
+    reg = {
+        "b8": "%rs{{[0-9]+}}",
+        "b16": "%rs{{[0-9]+}}",
+        "b32": "%r{{[0-9]+}}",
+        "b64": "%rd{{[0-9]+}}",
+        "f32": "%f{{[0-9]+}}",
+        "u32": "%r{{[0-9]+}}",
+        "s32": "%r{{[0-9]+}}",
+    }
+    return reg[ty]
+
 
 def get_ptx_vec_reg(vec, ty):
-  vec_reg = {
-    ""   : "{{{reg}}}",
-    "v2" : "{{{reg}, {reg}}}",
-    "v4" : "{{{reg}, {reg}, {reg}, {reg}}}"
-  }
-  return vec_reg[vec].format(reg=get_ptx_reg(ty))
+    vec_reg = {
+        "": "{{{reg}}}",
+        "v2": "{{{reg}, {reg}}}",
+        "v4": "{{{reg}, {reg}, {reg}, {reg}}}",
+    }
+    return vec_reg[vec].format(reg=get_ptx_reg(ty))
+
 
 def get_llvm_type(ty):
-  if ty[0] in ("b", "s", "u"):
-    return "i" + ty[1:]
-  if ty == "f16":
-    return "half"
-  if ty == "f32":
-    return "float"
-  raise RuntimeError("invalid type: " + ty)
+    if ty[0] in ("b", "s", "u"):
+        return "i" + ty[1:]
+    if ty == "f16":
+        return "half"
+    if ty == "f32":
+        return "float"
+    raise RuntimeError("invalid type: " + ty)
+
 
 def get_llvm_vec_type(vec, ty_ptx):
-  ty = get_llvm_type(ty_ptx)
+    ty = get_llvm_type(ty_ptx)
 
-  # i8 is passed as i16, same as in PTX
-  if ty == "i8":
-    ty = "i16"
+    # i8 is passed as i16, same as in PTX
+    if ty == "i8":
+        ty = "i16"
+
+    vec_ty = {
+        "": "{ty}",
+        "v2": "{{ {ty}, {ty} }}",
+        "v4": "{{ {ty}, {ty}, {ty}, {ty} }}",
+    }
+    return vec_ty[vec].format(ty=ty)
 
-  vec_ty = {
-    ""   : "{ty}",
-    "v2" : "{{ {ty}, {ty} }}",
-    "v4" : "{{ {ty}, {ty}, {ty}, {ty} }}"
-  }
-  return vec_ty[vec].format(ty=ty)
 
 def get_llvm_value(vec, ty_ptx):
-  ty = get_llvm_type(ty_ptx)
+    ty = get_llvm_type(ty_ptx)
 
-  # i8 is passed as i16, same as in PTX
-  if ty == "i8":
-    ty = "i16"
+    # i8 is passed as i16, same as in PTX
+    if ty == "i8":
+        ty = "i16"
+
+    value = {
+        "": "{ty} %v1",
+        "v2": "{ty} %v1, {ty} %v2",
+        "v4": "{ty} %v1, {ty} %v2, {ty} %v3, {ty} %v4",
+    }
+    return value[vec].format(ty=ty)
 
-  value = {
-    ""   : "{ty} %v1",
-    "v2" : "{ty} %v1, {ty} %v2",
-    "v4" : "{ty} %v1, {ty} %v2, {ty} %v3, {ty} %v4"
-  }
-  return value[vec].format(ty=ty)
 
 def get_llvm_value_type(vec, ty_ptx):
-  ty = get_llvm_type(ty_ptx)
+    ty = get_llvm_type(ty_ptx)
 
-  # i8 is passed as i16, same as in PTX
-  if ty == "i8":
-    ty = "i16"
+    # i8 is passed as i16, same as in PTX
+    if ty == "i8":
+        ty = "i16"
+
+    value = {"": "{ty}", "v2": "{ty}, {ty}", "v4": "{ty}, {ty}, {ty}, {ty}"}
+    return value[vec].format(ty=ty)
 
-  value = {
-    ""   : "{ty}",
-    "v2" : "{ty}, {ty}",
-    "v4" : "{ty}, {ty}, {ty}, {ty}"
-  }
-  return value[vec].format(ty=ty)
 
 def gen_triple(target):
-  if target == "cuda":
-    print("target triple = \"nvptx64-unknown-cuda\"\n")
-  elif target == "nvcl":
-    print("target triple = \"nvptx64-unknown-nvcl\"\n")
-  else:
-    raise RuntimeError("invalid target: " + target)
+    if target == "cuda":
+        print('target triple = "nvptx64-unknown-cuda"\n')
+    elif target == "nvcl":
+        print('target triple = "nvptx64-unknown-nvcl"\n')
+    else:
+        raise RuntimeError("invalid target: " + target)
+
 
 def gen_globals(target, surf_name, tex_name, sampler_name):
-  print("declare i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)*)")
-  print("; CHECK: .global .surfref {}".format(surf_name))
-  print("; CHECK: .global .texref {}".format(tex_name))
-  print("@{} = internal addrspace(1) global i64 0, align 8".format(surf_name))
-  print("@{} = internal addrspace(1) global i64 1, align 8".format(tex_name))
-  generated_metadata = [
-    "!{{i64 addrspace(1)* @{}, !\"surface\", i32 1}}".format(surf_name),
-    "!{{i64 addrspace(1)* @{}, !\"texture\", i32 1}}".format(tex_name),
-  ]
-
-  if not is_unified(target):
-    print("; CHECK: .global .samplerref {}".format(sampler_name))
-    print("@{} = internal addrspace(1) global i64 1, align 8".format(
-      sampler_name))
-    generated_metadata.append(
-      "!{{i64 addrspace(1)* @{}, !\"sampler\", i32 1}}".format(sampler_name))
-
-  return generated_metadata
+    print("declare i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)*)")
+    print("; CHECK: .global .surfref {}".format(surf_name))
+    print("; CHECK: .global .texref {}".format(tex_name))
+    print("@{} = internal addrspace(1) global i64 0, align 8".format(surf_name))
+    print("@{} = internal addrspace(1) global i64 1, align 8".format(tex_name))
+    generated_metadata = [
+        '!{{i64 addrspace(1)* @{}, !"surface", i32 1}}'.format(surf_name),
+        '!{{i64 addrspace(1)* @{}, !"texture", i32 1}}'.format(tex_name),
+    ]
+
+    if not is_unified(target):
+        print("; CHECK: .global .samplerref {}".format(sampler_name))
+        print("@{} = internal addrspace(1) global i64 1, align 8".format(sampler_name))
+        generated_metadata.append(
+            '!{{i64 addrspace(1)* @{}, !"sampler", i32 1}}'.format(sampler_name)
+        )
+
+    return generated_metadata
+
 
 def gen_metadata(metadata):
-  md_values = ["!{}".format(i) for i in range(len(metadata))]
-  print("!nvvm.annotations = !{{{values}}}".format(values=(", ".join(md_values))))
-  for i, md in enumerate(metadata):
-    print("!{} = {}".format(i, md))
+    md_values = ["!{}".format(i) for i in range(len(metadata))]
+    print("!nvvm.annotations = !{{{values}}}".format(values=(", ".join(md_values))))
+    for i, md in enumerate(metadata):
+        print("!{} = {}".format(i, md))
+
 
 def get_llvm_surface_access(geom_ptx):
-  access = {
-    "1d"  : "i32 %x",
-    "2d"  : "i32 %x, i32 %y",
-    "3d"  : "i32 %x, i32 %y, i32 %z",
-    "a1d" : "i32 %l, i32 %x",
-    "a2d" : "i32 %l, i32 %x, i32 %y",
-  }
-  return access[geom_ptx]
+    access = {
+        "1d": "i32 %x",
+        "2d": "i32 %x, i32 %y",
+        "3d": "i32 %x, i32 %y, i32 %z",
+        "a1d": "i32 %l, i32 %x",
+        "a2d": "i32 %l, i32 %x, i32 %y",
+    }
+    return access[geom_ptx]
+
 
 def get_llvm_surface_access_type(geom_ptx):
-  access_ty = {
-    "1d"  : "i32",
-    "2d"  : "i32, i32",
-    "3d"  : "i32, i32, i32",
-    "a1d" : "i32, i32",
-    "a2d" : "i32, i32, i32",
-  }
-  return access_ty[geom_ptx]
+    access_ty = {
+        "1d": "i32",
+        "2d": "i32, i32",
+        "3d": "i32, i32, i32",
+        "a1d": "i32, i32",
+        "a2d": "i32, i32, i32",
+    }
+    return access_ty[geom_ptx]
+
 
 def get_ptx_surface_access(geom_ptx):
-  """
-  Operand b is a scalar or singleton tuple for 1d surfaces; is a
-  two-element vector for 2d surfaces; and is a four-element vector
-  for 3d surfaces, where the fourth element is ignored. Coordinate
-  elements are of type .s32.
-
-  For 1d surface arrays, operand b has type .v2.b32. The first
-  element is interpreted as an unsigned integer index (.u32) into
-  the surface array, and the second element is interpreted as a 1d
-  surface coordinate of type .s32.
-
-  For 2d surface arrays, operand b has type .v4.b32. The first
-  element is interpreted as an unsigned integer index (.u32) into
-  the surface array, and the next two elements are interpreted as 2d
-  surface coordinates of type .s32. The fourth element is ignored.
-  """
-  access_reg = {
-    "1d"  : "{%r{{[0-9]}}}",
-    "2d"  : "{%r{{[0-9]}}, %r{{[0-9]}}}",
-    "3d"  : "{%r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}}",
-    "a1d" : "{%r{{[0-9]}}, %r{{[0-9]}}}",
-    "a2d" : "{%r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}}",
-  }
-  return access_reg[geom_ptx]
+    """
+    Operand b is a scalar or singleton tuple for 1d surfaces; is a
+    two-element vector for 2d surfaces; and is a four-element vector
+    for 3d surfaces, where the fourth element is ignored. Coordinate
+    elements are of type .s32.
+
+    For 1d surface arrays, operand b has type .v2.b32. The first
+    element is interpreted as an unsigned integer index (.u32) into
+    the surface array, and the second element is interpreted as a 1d
+    surface coordinate of type .s32.
+
+    For 2d surface arrays, operand b has type .v4.b32. The first
+    element is interpreted as an unsigned integer index (.u32) into
+    the surface array, and the next two elements are interpreted as 2d
+    surface coordinates of type .s32. The fourth element is ignored.
+    """
+    access_reg = {
+        "1d": "{%r{{[0-9]}}}",
+        "2d": "{%r{{[0-9]}}, %r{{[0-9]}}}",
+        "3d": "{%r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}}",
+        "a1d": "{%r{{[0-9]}}, %r{{[0-9]}}}",
+        "a2d": "{%r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}}",
+    }
+    return access_reg[geom_ptx]
+
 
 def get_ptx_surface(target):
-  # With 'cuda' environment surface is copied with ld.param, so the
-  # instruction uses a register. For 'nvcl' the instruction uses the
-  # parameter directly.
-  if target == "cuda":
-    return "%rd{{[0-9]+}}"
-  elif target == "nvcl":
-    return "test_{{.*}}_param_0"
-  raise RuntimeError("invalid target: " + target)
+    # With 'cuda' environment surface is copied with ld.param, so the
+    # instruction uses a register. For 'nvcl' the instruction uses the
+    # parameter directly.
+    if target == "cuda":
+        return "%rd{{[0-9]+}}"
+    elif target == "nvcl":
+        return "test_{{.*}}_param_0"
+    raise RuntimeError("invalid target: " + target)
+
 
 def get_surface_metadata(target, fun_ty, fun_name, has_surface_param):
-  metadata = []
+    metadata = []
+
+    md_kernel = '!{{{fun_ty} @{fun_name}, !"kernel", i32 1}}'.format(
+        fun_ty=fun_ty, fun_name=fun_name
+    )
+    metadata.append(md_kernel)
 
-  md_kernel = "!{{{fun_ty} @{fun_name}, !\"kernel\", i32 1}}".format(
-    fun_ty=fun_ty, fun_name=fun_name)
-  metadata.append(md_kernel)
+    if target == "cuda":
+        # When a parameter is lowered as a .surfref, it still has the
+        # corresponding ld.param.u64, which is illegal. Do not emit the
+        # metadata to keep the parameter as .b64 instead.
+        has_surface_param = False
 
-  if target == "cuda":
-    # When a parameter is lowered as a .surfref, it still has the
-    # corresponding ld.param.u64, which is illegal. Do not emit the
-    # metadata to keep the parameter as .b64 instead.
-    has_surface_param = False
+    if has_surface_param:
+        md_surface = '!{{{fun_ty} @{fun_name}, !"rdwrimage", i32 0}}'.format(
+            fun_ty=fun_ty, fun_name=fun_name
+        )
+        metadata.append(md_surface)
 
-  if has_surface_param:
-    md_surface = "!{{{fun_ty} @{fun_name}, !\"rdwrimage\", i32 0}}".format(
-      fun_ty=fun_ty, fun_name=fun_name)
-    metadata.append(md_surface)
+    return metadata
 
-  return metadata
 
 def gen_suld_tests(target, global_surf):
-  """
-  PTX spec s9.7.10.1. Surface Instructions:
+    """
+    PTX spec s9.7.10.1. Surface Instructions:
 
-  suld.b.geom{.cop}.vec.dtype.clamp  d, [a, b];  // unformatted
+    suld.b.geom{.cop}.vec.dtype.clamp  d, [a, b];  // unformatted
 
-  .geom  = { .1d, .2d, .3d, .a1d, .a2d };
-  .cop   = { .ca, .cg, .cs, .cv };               // cache operation
-  .vec   = { none, .v2, .v4 };
-  .dtype = { .b8 , .b16, .b32, .b64 };
-  .clamp = { .trap, .clamp, .zero };
-  """
+    .geom  = { .1d, .2d, .3d, .a1d, .a2d };
+    .cop   = { .ca, .cg, .cs, .cv };               // cache operation
+    .vec   = { none, .v2, .v4 };
+    .dtype = { .b8 , .b16, .b32, .b64 };
+    .clamp = { .trap, .clamp, .zero };
+    """
 
-  template = """
+    template = """
   declare ${retty} @${intrinsic}(i64 %s, ${access});
 
   ; CHECK-LABEL: .entry ${test_name}_param
@@ -256,75 +270,79 @@ def gen_suld_tests(target, global_surf):
   }
   """
 
-  generated_items = []
-  generated_metadata = []
-  # FIXME: "cop" is missing
-  for geom, vec, dtype, clamp in product(
-      ["1d", "2d", "3d", "a1d", "a2d"],
-      ["", "v2", "v4"],
-      ["b8" , "b16", "b32", "b64"],
-      ["trap", "clamp", "zero"]):
-
-    if vec == "v4" and dtype == "b64":
-      continue
-
-    test_name = "test_suld_" + geom + vec + dtype + clamp
-
-    params = {
-      "test_name"   : test_name,
-
-      "intrinsic"   : "llvm.nvvm.suld.{geom}.{dtype}.{clamp}".format(
-        geom=get_llvm_geom(geom),
-        dtype=(vec + get_llvm_type(dtype)),
-        clamp=clamp),
-      "retty"       : get_llvm_vec_type(vec, dtype),
-      "access"      : get_llvm_surface_access(geom),
-      "global_surf" : global_surf,
-
-      "instruction" : "suld.b.{geom}{vec}.{dtype}.{clamp}".format(
-        geom=geom,
-        vec=("" if vec == "" else "." + vec),
-        dtype=dtype,
-        clamp=clamp),
-      "reg_ret"     : get_ptx_vec_reg(vec, dtype),
-      "reg_surf"    : get_ptx_surface(target),
-      "reg_access"  : get_ptx_surface_access(geom),
-    }
-    gen_test(template, params)
-    generated_items.append((params["intrinsic"], params["instruction"]))
-
-    fun_name = test_name + "_param";
-    fun_ty = "void (i64, {retty}*, {access_ty})*".format(
-      retty=params["retty"],
-      access_ty=get_llvm_surface_access_type(geom))
-    generated_metadata += get_surface_metadata(
-      target, fun_ty, fun_name, has_surface_param=True)
-
-    fun_name = test_name + "_global";
-    fun_ty = "void ({retty}*, {access_ty})*".format(
-      retty=params["retty"],
-      access_ty=get_llvm_surface_access_type(geom))
-    generated_metadata += get_surface_metadata(
-      target, fun_ty, fun_name, has_surface_param=False)
+    generated_items = []
+    generated_metadata = []
+    # FIXME: "cop" is missing
+    for geom, vec, dtype, clamp in product(
+        ["1d", "2d", "3d", "a1d", "a2d"],
+        ["", "v2", "v4"],
+        ["b8", "b16", "b32", "b64"],
+        ["trap", "clamp", "zero"],
+    ):
+
+        if vec == "v4" and dtype == "b64":
+            continue
+
+        test_name = "test_suld_" + geom + vec + dtype + clamp
+
+        params = {
+            "test_name": test_name,
+            "intrinsic": "llvm.nvvm.suld.{geom}.{dtype}.{clamp}".format(
+                geom=get_llvm_geom(geom),
+                dtype=(vec + get_llvm_type(dtype)),
+                clamp=clamp,
+            ),
+            "retty": get_llvm_vec_type(vec, dtype),
+            "access": get_llvm_surface_access(geom),
+            "global_surf": global_surf,
+            "instruction": "suld.b.{geom}{vec}.{dtype}.{clamp}".format(
+                geom=geom,
+                vec=("" if vec == "" else "." + vec),
+                dtype=dtype,
+                clamp=clamp,
+            ),
+            "reg_ret": get_ptx_vec_reg(vec, dtype),
+            "reg_surf": get_ptx_surface(target),
+            "reg_access": get_ptx_surface_access(geom),
+        }
+        gen_test(template, params)
+        generated_items.append((params["intrinsic"], params["instruction"]))
+
+        fun_name = test_name + "_param"
+        fun_ty = "void (i64, {retty}*, {access_ty})*".format(
+            retty=params["retty"], access_ty=get_llvm_surface_access_type(geom)
+        )
+        generated_metadata += get_surface_metadata(
+            target, fun_ty, fun_name, has_surface_param=True
+        )
+
+        fun_name = test_name + "_global"
+        fun_ty = "void ({retty}*, {access_ty})*".format(
+            retty=params["retty"], access_ty=get_llvm_surface_access_type(geom)
+        )
+        generated_metadata += get_surface_metadata(
+            target, fun_ty, fun_name, has_surface_param=False
+        )
+
+    return generated_items, generated_metadata
 
-  return generated_items, generated_metadata
 
 def gen_sust_tests(target, global_surf):
-  """
-  PTX spec s9.7.10.2. Surface Instructions
+    """
+    PTX spec s9.7.10.2. Surface Instructions
 
-  sust.b.{1d,2d,3d}{.cop}.vec.ctype.clamp  [a, b], c;  // unformatted
-  sust.p.{1d,2d,3d}.vec.b32.clamp          [a, b], c;  // formatted
+    sust.b.{1d,2d,3d}{.cop}.vec.ctype.clamp  [a, b], c;  // unformatted
+    sust.p.{1d,2d,3d}.vec.b32.clamp          [a, b], c;  // formatted
 
-  sust.b.{a1d,a2d}{.cop}.vec.ctype.clamp   [a, b], c;  // unformatted
+    sust.b.{a1d,a2d}{.cop}.vec.ctype.clamp   [a, b], c;  // unformatted
 
-  .cop   = { .wb, .cg, .cs, .wt };                     // cache operation
-  .vec   = { none, .v2, .v4 };
-  .ctype = { .b8 , .b16, .b32, .b64 };
-  .clamp = { .trap, .clamp, .zero };
-  """
+    .cop   = { .wb, .cg, .cs, .wt };                     // cache operation
+    .vec   = { none, .v2, .v4 };
+    .ctype = { .b8 , .b16, .b32, .b64 };
+    .clamp = { .trap, .clamp, .zero };
+    """
 
-  template = """
+    template = """
   declare void @${intrinsic}(i64 %s, ${access}, ${value});
 
   ; CHECK-LABEL: .entry ${test_name}_param
@@ -344,226 +362,248 @@ def gen_sust_tests(target, global_surf):
   }
   """
 
-  generated_items = []
-  generated_metadata = []
-  # FIXME: "cop" is missing
-  for fmt, geom, vec, ctype, clamp in product(
-      ["b", "p"],
-      ["1d", "2d", "3d", "a1d", "a2d"],
-      ["", "v2", "v4"],
-      ["b8" , "b16", "b32", "b64"],
-      ["trap", "clamp", "zero"]):
-
-    if fmt == "p" and geom[0] == "a":
-      continue
-    if fmt == "p" and ctype != "b32":
-      continue
-    if vec == "v4" and ctype == "b64":
-      continue
-
-    # FIXME: these intrinsics are missing, but at least one of them is
-    # listed in the PTX spec: sust.p.{1d,2d,3d}.vec.b32.clamp
-    if fmt == "p" and clamp != "trap":
-      continue
-
-    test_name = "test_sust_" + fmt + geom + vec + ctype + clamp
-
-    params = {
-      "test_name"   : test_name,
-
-      "intrinsic" : "llvm.nvvm.sust.{fmt}.{geom}.{ctype}.{clamp}".format(
-        fmt=fmt,
-        geom=get_llvm_geom(geom),
-        ctype=(vec + get_llvm_type(ctype)),
-        clamp=clamp),
-      "access"      : get_llvm_surface_access(geom),
-      "value"       : get_llvm_value(vec, ctype),
-      "global_surf" : global_surf,
-
-      "instruction" : "sust.{fmt}.{geom}{vec}.{ctype}.{clamp}".format(
-        fmt=fmt,
-        geom=geom,
-        vec=("" if vec == "" else "." + vec),
-        ctype=ctype,
-        clamp=clamp),
-      "reg_value"   : get_ptx_vec_reg(vec, ctype),
-      "reg_surf"    : get_ptx_surface(target),
-      "reg_access"  : get_ptx_surface_access(geom)
-    }
-    gen_test(template, params)
-    generated_items.append((params["intrinsic"], params["instruction"]))
-
-    fun_name = test_name + "_param";
-    fun_ty = "void (i64, {value_ty}, {access_ty})*".format(
-      value_ty=get_llvm_value_type(vec, ctype),
-      access_ty=get_llvm_surface_access_type(geom))
-    generated_metadata += get_surface_metadata(
-      target, fun_ty, fun_name, has_surface_param=True)
+    generated_items = []
+    generated_metadata = []
+    # FIXME: "cop" is missing
+    for fmt, geom, vec, ctype, clamp in product(
+        ["b", "p"],
+        ["1d", "2d", "3d", "a1d", "a2d"],
+        ["", "v2", "v4"],
+        ["b8", "b16", "b32", "b64"],
+        ["trap", "clamp", "zero"],
+    ):
+
+        if fmt == "p" and geom[0] == "a":
+            continue
+        if fmt == "p" and ctype != "b32":
+            continue
+        if vec == "v4" and ctype == "b64":
+            continue
+
+        # FIXME: these intrinsics are missing, but at least one of them is
+        # listed in the PTX spec: sust.p.{1d,2d,3d}.vec.b32.clamp
+        if fmt == "p" and clamp != "trap":
+            continue
+
+        test_name = "test_sust_" + fmt + geom + vec + ctype + clamp
+
+        params = {
+            "test_name": test_name,
+            "intrinsic": "llvm.nvvm.sust.{fmt}.{geom}.{ctype}.{clamp}".format(
+                fmt=fmt,
+                geom=get_llvm_geom(geom),
+                ctype=(vec + get_llvm_type(ctype)),
+                clamp=clamp,
+            ),
+            "access": get_llvm_surface_access(geom),
+            "value": get_llvm_value(vec, ctype),
+            "global_surf": global_surf,
+            "instruction": "sust.{fmt}.{geom}{vec}.{ctype}.{clamp}".format(
+                fmt=fmt,
+                geom=geom,
+                vec=("" if vec == "" else "." + vec),
+                ctype=ctype,
+                clamp=clamp,
+            ),
+            "reg_value": get_ptx_vec_reg(vec, ctype),
+            "reg_surf": get_ptx_surface(target),
+            "reg_access": get_ptx_surface_access(geom),
+        }
+        gen_test(template, params)
+        generated_items.append((params["intrinsic"], params["instruction"]))
+
+        fun_name = test_name + "_param"
+        fun_ty = "void (i64, {value_ty}, {access_ty})*".format(
+            value_ty=get_llvm_value_type(vec, ctype),
+            access_ty=get_llvm_surface_access_type(geom),
+        )
+        generated_metadata += get_surface_metadata(
+            target, fun_ty, fun_name, has_surface_param=True
+        )
+
+        fun_name = test_name + "_global"
+        fun_ty = "void ({value_ty}, {access_ty})*".format(
+            value_ty=get_llvm_value_type(vec, ctype),
+            access_ty=get_llvm_surface_access_type(geom),
+        )
+        generated_metadata += get_surface_metadata(
+            target, fun_ty, fun_name, has_surface_param=False
+        )
+
+    return generated_items, generated_metadata
 
-    fun_name = test_name + "_global";
-    fun_ty = "void ({value_ty}, {access_ty})*".format(
-      value_ty=get_llvm_value_type(vec, ctype),
-      access_ty=get_llvm_surface_access_type(geom))
-    generated_metadata += get_surface_metadata(
-      target, fun_ty, fun_name, has_surface_param=False)
-
-  return generated_items, generated_metadata
 
 def is_unified(target):
-  """
-  PTX has two modes of operation. In the unified mode, texture and
-  sampler information is accessed through a single .texref handle. In
-  the independent mode, texture and sampler information each have their
-  own handle, allowing them to be defined separately and combined at the
-  site of usage in the program.
+    """
+    PTX has two modes of operation. In the unified mode, texture and
+    sampler information is accessed through a single .texref handle. In
+    the independent mode, texture and sampler information each have their
+    own handle, allowing them to be defined separately and combined at the
+    site of usage in the program.
+
+    """
+    return target == "cuda"
 
-  """
-  return target == "cuda"
 
 def get_llvm_texture_access(geom_ptx, ctype, mipmap):
-  geom_access = {
-    "1d"    : "{ctype} %x",
-    "2d"    : "{ctype} %x, {ctype} %y",
-    "3d"    : "{ctype} %x, {ctype} %y, {ctype} %z",
-    "cube"  : "{ctype} %s, {ctype} %t, {ctype} %r",
-    "a1d"   : "i32 %l, {ctype} %x",
-    "a2d"   : "i32 %l, {ctype} %x, {ctype} %y",
-    "acube" : "i32 %l, {ctype} %s, {ctype} %t, {ctype} %r",
-  }
+    geom_access = {
+        "1d": "{ctype} %x",
+        "2d": "{ctype} %x, {ctype} %y",
+        "3d": "{ctype} %x, {ctype} %y, {ctype} %z",
+        "cube": "{ctype} %s, {ctype} %t, {ctype} %r",
+        "a1d": "i32 %l, {ctype} %x",
+        "a2d": "i32 %l, {ctype} %x, {ctype} %y",
+        "acube": "i32 %l, {ctype} %s, {ctype} %t, {ctype} %r",
+    }
 
-  access = geom_access[geom_ptx]
+    access = geom_access[geom_ptx]
 
-  if mipmap == "level":
-    access += ", {ctype} %lvl"
-  elif mipmap == "grad":
-    if geom_ptx in ("1d", "a1d"):
-      access += ", {ctype} %dpdx1, {ctype} %dpdy1"
-    elif geom_ptx in ("2d", "a2d"):
-      access += (", {ctype} %dpdx1, {ctype} %dpdx2" +
-                 ", {ctype} %dpdy1, {ctype} %dpdy2")
-    else:
-      access += (", {ctype} %dpdx1, {ctype} %dpdx2, {ctype} %dpdx3" +
-                 ", {ctype} %dpdy1, {ctype} %dpdy2, {ctype} %dpdy3")
+    if mipmap == "level":
+        access += ", {ctype} %lvl"
+    elif mipmap == "grad":
+        if geom_ptx in ("1d", "a1d"):
+            access += ", {ctype} %dpdx1, {ctype} %dpdy1"
+        elif geom_ptx in ("2d", "a2d"):
+            access += (
+                ", {ctype} %dpdx1, {ctype} %dpdx2" + ", {ctype} %dpdy1, {ctype} %dpdy2"
+            )
+        else:
+            access += (
+                ", {ctype} %dpdx1, {ctype} %dpdx2, {ctype} %dpdx3"
+                + ", {ctype} %dpdy1, {ctype} %dpdy2, {ctype} %dpdy3"
+            )
+
+    return access.format(ctype=get_llvm_type(ctype))
 
-  return access.format(ctype=get_llvm_type(ctype))
 
 def get_llvm_texture_access_type(geom_ptx, ctype, mipmap):
-  geom_access = {
-    "1d"    : "{ctype}",
-    "2d"    : "{ctype}, {ctype}",
-    "3d"    : "{ctype}, {ctype}, {ctype}",
-    "cube"  : "{ctype}, {ctype}, {ctype}",
-    "a1d"   : "i32, {ctype}",
-    "a2d"   : "i32, {ctype}, {ctype}",
-    "acube" : "i32, {ctype}, {ctype}, {ctype}",
-  }
+    geom_access = {
+        "1d": "{ctype}",
+        "2d": "{ctype}, {ctype}",
+        "3d": "{ctype}, {ctype}, {ctype}",
+        "cube": "{ctype}, {ctype}, {ctype}",
+        "a1d": "i32, {ctype}",
+        "a2d": "i32, {ctype}, {ctype}",
+        "acube": "i32, {ctype}, {ctype}, {ctype}",
+    }
 
-  access = geom_access[geom_ptx]
+    access = geom_access[geom_ptx]
 
-  if mipmap == "level":
-    access += ", {ctype}"
-  elif mipmap == "grad":
-    if geom_ptx in ("1d", "a1d"):
-      access += ", {ctype}, {ctype}"
-    elif geom_ptx in ("2d", "a2d"):
-      access += (", {ctype}, {ctype}, {ctype}, {ctype}")
-    else:
-      access += (", {ctype}, {ctype}, {ctype}" +
-                 ", {ctype}, {ctype}, {ctype}")
+    if mipmap == "level":
+        access += ", {ctype}"
+    elif mipmap == "grad":
+        if geom_ptx in ("1d", "a1d"):
+            access += ", {ctype}, {ctype}"
+        elif geom_ptx in ("2d", "a2d"):
+            access += ", {ctype}, {ctype}, {ctype}, {ctype}"
+        else:
+            access += ", {ctype}, {ctype}, {ctype}" + ", {ctype}, {ctype}, {ctype}"
+
+    return access.format(ctype=get_llvm_type(ctype))
 
-  return access.format(ctype=get_llvm_type(ctype))
 
 def get_ptx_texture_access(geom_ptx, ctype):
-  access_reg = {
-    "1d"    : "{{{ctype_reg}}}",
-    "2d"    : "{{{ctype_reg}, {ctype_reg}}}",
-    "3d"    : "{{{ctype_reg}, {ctype_reg}, {ctype_reg}, {ctype_reg}}}",
-    "a1d"   : "{{{b32_reg}, {ctype_reg}}}",
-    "a2d"   : "{{{b32_reg}, {ctype_reg}, {ctype_reg}, {ctype_reg}}}",
-    "cube"  : "{{{f32_reg}, {f32_reg}, {f32_reg}, {f32_reg}}}",
-    "acube" : "{{{b32_reg}, {f32_reg}, {f32_reg}, {f32_reg}}}",
-  }
-  return access_reg[geom_ptx].format(ctype_reg=get_ptx_reg(ctype),
-                                     b32_reg=get_ptx_reg("b32"),
-                                     f32_reg=get_ptx_reg("f32"))
+    access_reg = {
+        "1d": "{{{ctype_reg}}}",
+        "2d": "{{{ctype_reg}, {ctype_reg}}}",
+        "3d": "{{{ctype_reg}, {ctype_reg}, {ctype_reg}, {ctype_reg}}}",
+        "a1d": "{{{b32_reg}, {ctype_reg}}}",
+        "a2d": "{{{b32_reg}, {ctype_reg}, {ctype_reg}, {ctype_reg}}}",
+        "cube": "{{{f32_reg}, {f32_reg}, {f32_reg}, {f32_reg}}}",
+        "acube": "{{{b32_reg}, {f32_reg}, {f32_reg}, {f32_reg}}}",
+    }
+    return access_reg[geom_ptx].format(
+        ctype_reg=get_ptx_reg(ctype),
+        b32_reg=get_ptx_reg("b32"),
+        f32_reg=get_ptx_reg("f32"),
+    )
+
 
 def get_ptx_texture(target):
-  # With 'cuda' environment texture/sampler are copied with ld.param,
-  # so the instruction uses registers. For 'nvcl' the instruction uses
-  # texture/sampler parameters directly.
-  if target == "cuda":
-    return "%rd{{[0-9]+}}"
-  elif target == "nvcl":
-    return "test_{{.*}}_param_0, test_{{.*}}_param_1"
-  raise RuntimeError("unknown target: " + target)
+    # With 'cuda' environment texture/sampler are copied with ld.param,
+    # so the instruction uses registers. For 'nvcl' the instruction uses
+    # texture/sampler parameters directly.
+    if target == "cuda":
+        return "%rd{{[0-9]+}}"
+    elif target == "nvcl":
+        return "test_{{.*}}_param_0, test_{{.*}}_param_1"
+    raise RuntimeError("unknown target: " + target)
+
 
 def get_llvm_global_sampler(target, global_sampler):
-  if is_unified(target):
-    return "", ""
-  else:
-    sampler_handle = "i64 %gs,"
-    get_sampler_handle = (
-      "%gs = tail call i64 @llvm.nvvm.texsurf.handle.internal.p1i64" +
-      "(i64 addrspace(1)* @{})".format(global_sampler))
-    return sampler_handle, get_sampler_handle
+    if is_unified(target):
+        return "", ""
+    else:
+        sampler_handle = "i64 %gs,"
+        get_sampler_handle = (
+            "%gs = tail call i64 @llvm.nvvm.texsurf.handle.internal.p1i64"
+            + "(i64 addrspace(1)* @{})".format(global_sampler)
+        )
+        return sampler_handle, get_sampler_handle
+
 
 def get_ptx_global_sampler(target, global_sampler):
-  if is_unified(target):
-    return ""
-  else:
-    return global_sampler + ","
+    if is_unified(target):
+        return ""
+    else:
+        return global_sampler + ","
+
 
 def get_texture_metadata(target, fun_ty, fun_name, has_texture_params):
-  metadata = []
+    metadata = []
 
-  md_kernel = "!{{{fun_ty} @{fun_name}, !\"kernel\", i32 1}}".format(
-    fun_ty=fun_ty, fun_name=fun_name)
-  metadata.append(md_kernel)
+    md_kernel = '!{{{fun_ty} @{fun_name}, !"kernel", i32 1}}'.format(
+        fun_ty=fun_ty, fun_name=fun_name
+    )
+    metadata.append(md_kernel)
 
-  if target == "cuda":
-    # When a parameter is lowered as a .texref, it still has the
-    # corresponding ld.param.u64, which is illegal. Do not emit the
-    # metadata to keep the parameter as .b64 instead.
-    has_texture_params = False
+    if target == "cuda":
+        # When a parameter is lowered as a .texref, it still has the
+        # corresponding ld.param.u64, which is illegal. Do not emit the
+        # metadata to keep the parameter as .b64 instead.
+        has_texture_params = False
 
-  if has_texture_params:
-    md_texture = "!{{{fun_ty} @{fun_name}, !\"rdoimage\", i32 0}}".format(
-      fun_ty=fun_ty, fun_name=fun_name)
-    metadata.append(md_texture)
+    if has_texture_params:
+        md_texture = '!{{{fun_ty} @{fun_name}, !"rdoimage", i32 0}}'.format(
+            fun_ty=fun_ty, fun_name=fun_name
+        )
+        metadata.append(md_texture)
 
-    if not is_unified(target):
-      md_sampler = "!{{{fun_ty} @{fun_name}, !\"sampler\", i32 1}}".format(
-      fun_ty=fun_ty, fun_name=fun_name)
-      metadata.append(md_sampler)
+        if not is_unified(target):
+            md_sampler = '!{{{fun_ty} @{fun_name}, !"sampler", i32 1}}'.format(
+                fun_ty=fun_ty, fun_name=fun_name
+            )
+            metadata.append(md_sampler)
+
+    return metadata
 
-  return metadata
 
 def gen_tex_tests(target, global_tex, global_sampler):
-  """
-  PTX spec s9.7.9.3. Texture Instructions
+    """
+    PTX spec s9.7.9.3. Texture Instructions
 
-  tex.geom.v4.dtype.ctype  d, [a, c] {, e} {, f};
-  tex.geom.v4.dtype.ctype  d[|p], [a, b, c] {, e} {, f};  // explicit sampler
+    tex.geom.v4.dtype.ctype  d, [a, c] {, e} {, f};
+    tex.geom.v4.dtype.ctype  d[|p], [a, b, c] {, e} {, f};  // explicit sampler
 
-  tex.geom.v2.f16x2.ctype  d[|p], [a, c] {, e} {, f};
-  tex.geom.v2.f16x2.ctype  d[|p], [a, b, c] {, e} {, f};  // explicit sampler
+    tex.geom.v2.f16x2.ctype  d[|p], [a, c] {, e} {, f};
+    tex.geom.v2.f16x2.ctype  d[|p], [a, b, c] {, e} {, f};  // explicit sampler
 
-  // mipmaps
-  tex.base.geom.v4.dtype.ctype   d[|p], [a, {b,} c] {, e} {, f};
-  tex.level.geom.v4.dtype.ctype  d[|p], [a, {b,} c], lod {, e} {, f};
-  tex.grad.geom.v4.dtype.ctype   d[|p], [a, {b,} c], dPdx, dPdy {, e} {, f};
+    // mipmaps
+    tex.base.geom.v4.dtype.ctype   d[|p], [a, {b,} c] {, e} {, f};
+    tex.level.geom.v4.dtype.ctype  d[|p], [a, {b,} c], lod {, e} {, f};
+    tex.grad.geom.v4.dtype.ctype   d[|p], [a, {b,} c], dPdx, dPdy {, e} {, f};
 
-  tex.base.geom.v2.f16x2.ctype   d[|p], [a, {b,} c] {, e} {, f};
-  tex.level.geom.v2.f16x2.ctype  d[|p], [a, {b,} c], lod {, e} {, f};
-  tex.grad.geom.v2.f16x2.ctype   d[|p], [a, {b,} c], dPdx, dPdy {, e} {, f};
+    tex.base.geom.v2.f16x2.ctype   d[|p], [a, {b,} c] {, e} {, f};
+    tex.level.geom.v2.f16x2.ctype  d[|p], [a, {b,} c], lod {, e} {, f};
+    tex.grad.geom.v2.f16x2.ctype   d[|p], [a, {b,} c], dPdx, dPdy {, e} {, f};
 
-  .geom  = { .1d, .2d, .3d, .a1d, .a2d, .cube, .acube, .2dms, .a2dms };
-  .dtype = { .u32, .s32, .f16,  .f32 };
-  .ctype = {       .s32, .f32 };          // .cube, .acube require .f32
-                                          // .2dms, .a2dms require .s32
-  """
+    .geom  = { .1d, .2d, .3d, .a1d, .a2d, .cube, .acube, .2dms, .a2dms };
+    .dtype = { .u32, .s32, .f16,  .f32 };
+    .ctype = {       .s32, .f32 };          // .cube, .acube require .f32
+                                            // .2dms, .a2dms require .s32
+    """
 
-  template = """
+    template = """
   declare ${retty} @${intrinsic}(i64 %tex, ${sampler} ${access})
 
   ; CHECK-LABEL: .entry ${test_name}_param
@@ -584,160 +624,170 @@ def gen_tex_tests(target, global_tex, global_sampler):
   }
   """
 
-  generated_items = []
-  generated_metadata = []
-  for mipmap, geom, vec, dtype, ctype in product(
-      ["", "level", "grad"],
-      ["1d", "2d", "3d", "a1d", "a2d", "cube", "acube", "2dms", "a2dms"],
-      ["v2", "v4"],
-      ["u32", "s32", "f16", "f32"],
-      ["s32", "f32"]):
-
-    # FIXME: missing intrinsics.
-    # Multi-sample textures and multi-sample texture arrays
-    # introduced in PTX ISA version 3.2.
-    if geom in ("2dms", "a2dms"):
-      continue
-
-    # FIXME: missing intrinsics? no such restriction in the PTX spec
-    if ctype == "s32" and mipmap != "":
-      continue
-
-    # FIXME: missing intrinsics?
-    if ctype == "s32" and geom in ("cube", "acube"):
-      continue
-
-    # FIXME: missing intrinsics.
-    # Support for textures returning f16 and f16x2 data introduced in
-    # PTX ISA version 4.2.
-    if vec == "v2" or dtype == "f16":
-      continue
-
-    # FIXME: missing intrinsics.
-    # Support for tex.grad.{cube, acube} introduced in PTX ISA version
-    # 4.3.
-    if mipmap == "grad" and geom in ("cube", "acube"):
-      continue
-
-    # The instruction returns a two-element vector for destination
-    # type f16x2. For all other destination types, the instruction
-    # returns a four-element vector. Coordinates may be given in
-    # either signed 32-bit integer or 32-bit floating point form.
-    if vec == "v2" and dtype != "f16":
-      continue
-
-    sampler_handle, get_sampler_handle = get_llvm_global_sampler(
-      target, global_sampler)
-
-    test_name = "test_tex_" + "".join((mipmap, geom, vec, dtype, ctype))
-    params = {
-      "test_name" : test_name,
-      "intrinsic" :
-        "llvm.nvvm.tex{unified}.{geom}{mipmap}.{vec}{dtype}.{ctype}".format(
-          unified=(".unified" if is_unified(target) else ""),
-          geom=get_llvm_geom(geom),
-          mipmap=("" if mipmap == "" else "." + mipmap),
-          vec=vec,
-          dtype=dtype,
-          ctype=ctype),
-      "global_tex": global_tex,
-      "retty"     : get_llvm_vec_type(vec, dtype),
-      "sampler"   : sampler_handle,
-      "access"    : get_llvm_texture_access(geom, ctype, mipmap),
-      "get_sampler_handle" : get_sampler_handle,
-
-      "instruction" : "tex{mipmap}.{geom}.{vec}.{dtype}.{ctype}".format(
-        mipmap=("" if mipmap == "" else "." + mipmap),
-        geom=geom,
-        vec=vec,
-        dtype=dtype,
-        ctype=ctype),
-      "ptx_ret"     : get_ptx_vec_reg(vec, dtype),
-      "ptx_tex"     : get_ptx_texture(target),
-      "ptx_access"  : get_ptx_texture_access(geom, ctype),
-      "ptx_global_sampler" : get_ptx_global_sampler(target, global_sampler),
-    }
-    gen_test(template, params)
-    generated_items.append((params["intrinsic"], params["instruction"]))
-
-    fun_name = test_name + "_param";
-    fun_ty = "void (i64, {sampler} {retty}*, {access_ty})*".format(
-      sampler=("" if is_unified(target) else "i64,"),
-      retty=params["retty"],
-      access_ty=get_llvm_texture_access_type(geom, ctype, mipmap))
-    generated_metadata += get_texture_metadata(
-      target, fun_ty, fun_name, has_texture_params=True)
-
-    fun_name = test_name + "_global";
-    fun_ty = "void ({retty}*, {access_ty})*".format(
-      retty=params["retty"],
-      access_ty=get_llvm_texture_access_type(geom, ctype, mipmap))
-    generated_metadata += get_texture_metadata(
-      target, fun_ty, fun_name, has_texture_params=False)
-
-  return generated_items, generated_metadata
+    generated_items = []
+    generated_metadata = []
+    for mipmap, geom, vec, dtype, ctype in product(
+        ["", "level", "grad"],
+        ["1d", "2d", "3d", "a1d", "a2d", "cube", "acube", "2dms", "a2dms"],
+        ["v2", "v4"],
+        ["u32", "s32", "f16", "f32"],
+        ["s32", "f32"],
+    ):
+
+        # FIXME: missing intrinsics.
+        # Multi-sample textures and multi-sample texture arrays
+        # introduced in PTX ISA version 3.2.
+        if geom in ("2dms", "a2dms"):
+            continue
+
+        # FIXME: missing intrinsics? no such restriction in the PTX spec
+        if ctype == "s32" and mipmap != "":
+            continue
+
+        # FIXME: missing intrinsics?
+        if ctype == "s32" and geom in ("cube", "acube"):
+            continue
+
+        # FIXME: missing intrinsics.
+        # Support for textures returning f16 and f16x2 data introduced in
+        # PTX ISA version 4.2.
+        if vec == "v2" or dtype == "f16":
+            continue
+
+        # FIXME: missing intrinsics.
+        # Support for tex.grad.{cube, acube} introduced in PTX ISA version
+        # 4.3.
+        if mipmap == "grad" and geom in ("cube", "acube"):
+            continue
+
+        # The instruction returns a two-element vector for destination
+        # type f16x2. For all other destination types, the instruction
+        # returns a four-element vector. Coordinates may be given in
+        # either signed 32-bit integer or 32-bit floating point form.
+        if vec == "v2" and dtype != "f16":
+            continue
+
+        sampler_handle, get_sampler_handle = get_llvm_global_sampler(
+            target, global_sampler
+        )
+
+        test_name = "test_tex_" + "".join((mipmap, geom, vec, dtype, ctype))
+        params = {
+            "test_name": test_name,
+            "intrinsic": "llvm.nvvm.tex{unified}.{geom}{mipmap}.{vec}{dtype}.{ctype}".format(
+                unified=(".unified" if is_unified(target) else ""),
+                geom=get_llvm_geom(geom),
+                mipmap=("" if mipmap == "" else "." + mipmap),
+                vec=vec,
+                dtype=dtype,
+                ctype=ctype,
+            ),
+            "global_tex": global_tex,
+            "retty": get_llvm_vec_type(vec, dtype),
+            "sampler": sampler_handle,
+            "access": get_llvm_texture_access(geom, ctype, mipmap),
+            "get_sampler_handle": get_sampler_handle,
+            "instruction": "tex{mipmap}.{geom}.{vec}.{dtype}.{ctype}".format(
+                mipmap=("" if mipmap == "" else "." + mipmap),
+                geom=geom,
+                vec=vec,
+                dtype=dtype,
+                ctype=ctype,
+            ),
+            "ptx_ret": get_ptx_vec_reg(vec, dtype),
+            "ptx_tex": get_ptx_texture(target),
+            "ptx_access": get_ptx_texture_access(geom, ctype),
+            "ptx_global_sampler": get_ptx_global_sampler(target, global_sampler),
+        }
+        gen_test(template, params)
+        generated_items.append((params["intrinsic"], params["instruction"]))
+
+        fun_name = test_name + "_param"
+        fun_ty = "void (i64, {sampler} {retty}*, {access_ty})*".format(
+            sampler=("" if is_unified(target) else "i64,"),
+            retty=params["retty"],
+            access_ty=get_llvm_texture_access_type(geom, ctype, mipmap),
+        )
+        generated_metadata += get_texture_metadata(
+            target, fun_ty, fun_name, has_texture_params=True
+        )
+
+        fun_name = test_name + "_global"
+        fun_ty = "void ({retty}*, {access_ty})*".format(
+            retty=params["retty"],
+            access_ty=get_llvm_texture_access_type(geom, ctype, mipmap),
+        )
+        generated_metadata += get_texture_metadata(
+            target, fun_ty, fun_name, has_texture_params=False
+        )
+
+    return generated_items, generated_metadata
+
 
 def get_llvm_tld4_access(geom):
-  """
-  For 2D textures, operand c specifies coordinates as a two-element,
-  32-bit floating-point vector.
-
-  For 2d texture arrays operand c is a four element, 32-bit
-  vector. The first element in operand c is interpreted as an unsigned
-  integer index (.u32) into the texture array, and the next two
-  elements are interpreted as 32-bit floating point coordinates of 2d
-  texture. The fourth element is ignored.
-
-  For cubemap textures, operand c specifies four-element vector which
-  comprises three floating-point coordinates (s, t, r) and a fourth
-  padding argument which is ignored.
-
-  [For cube arrays] The first element in operand c is interpreted as
-  an unsigned integer index (.u32) into the cubemap texture array, and
-  the remaining three elements are interpreted as floating-point
-  cubemap coordinates (s, t, r), used to lookup in the selected
-  cubemap.
-  """
-  geom_to_access = {
-    "2d"    : "float %x, float %y",
-    "a2d"   : "i32 %l, float %x, float %y",
-    "cube"  : "float %s, float %t, float %r",
-    "acube" : "i32 %l, float %s, float %t, float %r"
-  }
-  return geom_to_access[geom]
+    """
+    For 2D textures, operand c specifies coordinates as a two-element,
+    32-bit floating-point vector.
+
+    For 2d texture arrays operand c is a four element, 32-bit
+    vector. The first element in operand c is interpreted as an unsigned
+    integer index (.u32) into the texture array, and the next two
+    elements are interpreted as 32-bit floating point coordinates of 2d
+    texture. The fourth element is ignored.
+
+    For cubemap textures, operand c specifies four-element vector which
+    comprises three floating-point coordinates (s, t, r) and a fourth
+    padding argument which is ignored.
+
+    [For cube arrays] The first element in operand c is interpreted as
+    an unsigned integer index (.u32) into the cubemap texture array, and
+    the remaining three elements are interpreted as floating-point
+    cubemap coordinates (s, t, r), used to lookup in the selected
+    cubemap.
+    """
+    geom_to_access = {
+        "2d": "float %x, float %y",
+        "a2d": "i32 %l, float %x, float %y",
+        "cube": "float %s, float %t, float %r",
+        "acube": "i32 %l, float %s, float %t, float %r",
+    }
+    return geom_to_access[geom]
+
 
 def get_llvm_tld4_access_type(geom):
-  geom_to_access = {
-    "2d"    : "float, float",
-    "a2d"   : "i32, float, float",
-    "cube"  : "float, float, float",
-    "acube" : "i32, float, float, float"
-  }
-  return geom_to_access[geom]
+    geom_to_access = {
+        "2d": "float, float",
+        "a2d": "i32, float, float",
+        "cube": "float, float, float",
+        "acube": "i32, float, float, float",
+    }
+    return geom_to_access[geom]
+
 
 def get_ptx_tld4_access(geom):
-  geom_to_access = {
-    "2d"    : "{%f{{[0-9]+}}, %f{{[0-9]+}}}",
-    "a2d"   : "{%r{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}",
-    "cube"  : "{%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}",
-    "acube" : "{%r{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}"
-  }
-  return geom_to_access[geom]
+    geom_to_access = {
+        "2d": "{%f{{[0-9]+}}, %f{{[0-9]+}}}",
+        "a2d": "{%r{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}",
+        "cube": "{%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}",
+        "acube": "{%r{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}",
+    }
+    return geom_to_access[geom]
+
 
 def gen_tld4_tests(target, global_tex, global_sampler):
-  """
-  PTX spec s9.7.9.4. Texture Instructions: tld4
-  Perform a texture fetch of the 4-texel bilerp footprint.
+    """
+    PTX spec s9.7.9.4. Texture Instructions: tld4
+    Perform a texture fetch of the 4-texel bilerp footprint.
 
-  tld4.comp.2d.v4.dtype.f32    d[|p], [a, c] {, e} {, f};
-  tld4.comp.geom.v4.dtype.f32  d[|p], [a, b, c] {, e} {, f};  // explicit sampler
+    tld4.comp.2d.v4.dtype.f32    d[|p], [a, c] {, e} {, f};
+    tld4.comp.geom.v4.dtype.f32  d[|p], [a, b, c] {, e} {, f};  // explicit sampler
 
-  .comp  = { .r, .g, .b, .a };
-  .geom  = { .2d, .a2d, .cube, .acube };
-  .dtype = { .u32, .s32, .f32 };
-  """
+    .comp  = { .r, .g, .b, .a };
+    .geom  = { .2d, .a2d, .cube, .acube };
+    .dtype = { .u32, .s32, .f32 };
+    """
 
-  template = """
+    template = """
   declare ${retty} @${intrinsic}(i64 %tex, ${sampler} ${access})
 
   ; CHECK-LABEL: .entry ${test_name}_param
@@ -758,258 +808,272 @@ def gen_tld4_tests(target, global_tex, global_sampler):
   }
   """
 
-  generated_items = []
-  generated_metadata = []
-  for comp, geom, dtype in product(
-      ["r", "g", "b", "a"],
-      ["2d", "a2d", "cube", "acube"],
-      ["u32", "s32", "f32"]):
-
-    # FIXME: missing intrinsics.
-    # tld4.{a2d,cube,acube} introduced in PTX ISA version 4.3.
-    if geom in ("a2d", "cube", "acube"):
-      continue
-
-    sampler_handle, get_sampler_handle = get_llvm_global_sampler(
-      target, global_sampler)
-
-    test_name = "test_tld4_" + "".join((comp, geom, dtype))
-    params = {
-      "test_name" : test_name,
-      "intrinsic" :
-        "llvm.nvvm.tld4{unified}.{comp}.{geom}.v4{dtype}.f32".format(
-          unified=(".unified" if is_unified(target) else ""),
-          comp=comp,
-          geom=get_llvm_geom(geom),
-          dtype=dtype),
-      "global_tex" : global_tex,
-      "retty"      : get_llvm_vec_type("v4", dtype),
-      "sampler"    : sampler_handle,
-      "access"     : get_llvm_tld4_access(geom),
-      "get_sampler_handle" : get_sampler_handle,
-
-      "instruction" : "tld4.{comp}.{geom}.v4.{dtype}.f32".format(
-        comp=comp, geom=geom, dtype=dtype),
-      "ptx_ret"     : get_ptx_vec_reg("v4", dtype),
-      "ptx_tex"     : get_ptx_texture(target),
-      "ptx_access"  : get_ptx_tld4_access(geom),
-      "ptx_global_sampler" : get_ptx_global_sampler(target, global_sampler),
-    }
-    gen_test(template, params)
-    generated_items.append((params["intrinsic"], params["instruction"]))
-
-    fun_name = test_name + "_param";
-    fun_ty = "void (i64, {sampler} {retty}*, {access_ty})*".format(
-      sampler=("" if is_unified(target) else "i64,"),
-      retty=params["retty"],
-      access_ty=get_llvm_tld4_access_type(geom))
-    generated_metadata += get_texture_metadata(
-      target, fun_ty, fun_name, has_texture_params=True)
-
-    fun_name = test_name + "_global";
-    fun_ty = "void ({retty}*, {access_ty})*".format(
-      retty=params["retty"],
-      access_ty=get_llvm_tld4_access_type(geom))
-    generated_metadata += get_texture_metadata(
-      target, fun_ty, fun_name, has_texture_params=False)
-
-  return generated_items, generated_metadata
+    generated_items = []
+    generated_metadata = []
+    for comp, geom, dtype in product(
+        ["r", "g", "b", "a"], ["2d", "a2d", "cube", "acube"], ["u32", "s32", "f32"]
+    ):
+
+        # FIXME: missing intrinsics.
+        # tld4.{a2d,cube,acube} introduced in PTX ISA version 4.3.
+        if geom in ("a2d", "cube", "acube"):
+            continue
+
+        sampler_handle, get_sampler_handle = get_llvm_global_sampler(
+            target, global_sampler
+        )
+
+        test_name = "test_tld4_" + "".join((comp, geom, dtype))
+        params = {
+            "test_name": test_name,
+            "intrinsic": "llvm.nvvm.tld4{unified}.{comp}.{geom}.v4{dtype}.f32".format(
+                unified=(".unified" if is_unified(target) else ""),
+                comp=comp,
+                geom=get_llvm_geom(geom),
+                dtype=dtype,
+            ),
+            "global_tex": global_tex,
+            "retty": get_llvm_vec_type("v4", dtype),
+            "sampler": sampler_handle,
+            "access": get_llvm_tld4_access(geom),
+            "get_sampler_handle": get_sampler_handle,
+            "instruction": "tld4.{comp}.{geom}.v4.{dtype}.f32".format(
+                comp=comp, geom=geom, dtype=dtype
+            ),
+            "ptx_ret": get_ptx_vec_reg("v4", dtype),
+            "ptx_tex": get_ptx_texture(target),
+            "ptx_access": get_ptx_tld4_access(geom),
+            "ptx_global_sampler": get_ptx_global_sampler(target, global_sampler),
+        }
+        gen_test(template, params)
+        generated_items.append((params["intrinsic"], params["instruction"]))
+
+        fun_name = test_name + "_param"
+        fun_ty = "void (i64, {sampler} {retty}*, {access_ty})*".format(
+            sampler=("" if is_unified(target) else "i64,"),
+            retty=params["retty"],
+            access_ty=get_llvm_tld4_access_type(geom),
+        )
+        generated_metadata += get_texture_metadata(
+            target, fun_ty, fun_name, has_texture_params=True
+        )
+
+        fun_name = test_name + "_global"
+        fun_ty = "void ({retty}*, {access_ty})*".format(
+            retty=params["retty"], access_ty=get_llvm_tld4_access_type(geom)
+        )
+        generated_metadata += get_texture_metadata(
+            target, fun_ty, fun_name, has_texture_params=False
+        )
+
+    return generated_items, generated_metadata
+
 
 def gen_test(template, params):
-  if debug:
-    print()
-    for param, value in params.items():
-      print(";; {}: {}".format(param, value))
+    if debug:
+        print()
+        for param, value in params.items():
+            print(";; {}: {}".format(param, value))
+
+    print(string.Template(textwrap.dedent(template)).substitute(params))
 
-  print(string.Template(textwrap.dedent(template)).substitute(params))
 
 def gen_tests(target, tests):
-  gen_triple(target)
-
-  items = []
-  metadata = []
-
-  global_surf = "gsurf"
-  global_tex = "gtex"
-  global_sampler = "gsam"
-  metadata += gen_globals(target, global_surf, global_tex, global_sampler)
-
-  if "suld" in tests:
-    suld_items, suld_md = gen_suld_tests(target, global_surf)
-    items += suld_items
-    metadata += suld_md
-  if "sust" in tests:
-    sust_items, sust_md = gen_sust_tests(target, global_surf)
-    items += sust_items
-    metadata += sust_md
-  if "tex" in tests:
-    tex_items, tex_md = gen_tex_tests(target, global_tex, global_sampler)
-    items += tex_items
-    metadata += tex_md
-  if "tld4" in tests:
-    tld4_items, tld4_md = gen_tld4_tests(target, global_tex, global_sampler)
-    items += tld4_items
-    metadata += tld4_md
-
-  gen_metadata(metadata)
-  return items
+    gen_triple(target)
+
+    items = []
+    metadata = []
+
+    global_surf = "gsurf"
+    global_tex = "gtex"
+    global_sampler = "gsam"
+    metadata += gen_globals(target, global_surf, global_tex, global_sampler)
+
+    if "suld" in tests:
+        suld_items, suld_md = gen_suld_tests(target, global_surf)
+        items += suld_items
+        metadata += suld_md
+    if "sust" in tests:
+        sust_items, sust_md = gen_sust_tests(target, global_surf)
+        items += sust_items
+        metadata += sust_md
+    if "tex" in tests:
+        tex_items, tex_md = gen_tex_tests(target, global_tex, global_sampler)
+        items += tex_items
+        metadata += tex_md
+    if "tld4" in tests:
+        tld4_items, tld4_md = gen_tld4_tests(target, global_tex, global_sampler)
+        items += tld4_items
+        metadata += tld4_md
+
+    gen_metadata(metadata)
+    return items
+
 
 def write_gen_list(filename, append, items):
-  with open(filename, ("a" if append else "w")) as f:
-    for intrinsic, instruction in items:
-      f.write("{} {}\n".format(intrinsic, instruction))
+    with open(filename, ("a" if append else "w")) as f:
+        for intrinsic, instruction in items:
+            f.write("{} {}\n".format(intrinsic, instruction))
+
 
 def read_gen_list(filename):
-  intrinsics = set()
-  instructions = set()
-  with open(filename) as f:
-    for line in f:
-      intrinsic, instruction = line.split()
-      intrinsics.add(intrinsic)
-      instructions.add(instruction)
-  return (intrinsics, instructions)
+    intrinsics = set()
+    instructions = set()
+    with open(filename) as f:
+        for line in f:
+            intrinsic, instruction = line.split()
+            intrinsics.add(intrinsic)
+            instructions.add(instruction)
+    return (intrinsics, instructions)
+
 
 def read_td_list(filename, regex):
-  td_list = set()
-  with open(filename) as f:
-    for line in f:
-      match = re.search(regex, line)
-      if match:
-        td_list.add(match.group(1))
+    td_list = set()
+    with open(filename) as f:
+        for line in f:
+            match = re.search(regex, line)
+            if match:
+                td_list.add(match.group(1))
+
+    # Arbitrary value - we should find quite a lot of instructions
+    if len(td_list) < 30:
+        raise RuntimeError(
+            "found only {} instructions in {}".format(filename, len(td_list))
+        )
 
-  # Arbitrary value - we should find quite a lot of instructions
-  if len(td_list) < 30:
-    raise RuntimeError("found only {} instructions in {}".format(
-      filename, len(td_list)))
+    return td_list
 
-  return td_list
 
 def verify_inst_tablegen(path_td, gen_instr):
-  """
-  Verify that all instructions defined in NVPTXIntrinsics.td are
-  tested.
-  """
+    """
+    Verify that all instructions defined in NVPTXIntrinsics.td are
+    tested.
+    """
+
+    td_instr = read_td_list(path_td, '"((suld|sust|tex|tld4)\\..*)"')
+
+    gen_instr.update(
+        {
+            # FIXME: spec does not list any sust.p variants other than b32
+            "sust.p.1d.b8.trap",
+            "sust.p.1d.b16.trap",
+            "sust.p.1d.v2.b8.trap",
+            "sust.p.1d.v2.b16.trap",
+            "sust.p.1d.v4.b8.trap",
+            "sust.p.1d.v4.b16.trap",
+            "sust.p.a1d.b8.trap",
+            "sust.p.a1d.b16.trap",
+            "sust.p.a1d.v2.b8.trap",
+            "sust.p.a1d.v2.b16.trap",
+            "sust.p.a1d.v4.b8.trap",
+            "sust.p.a1d.v4.b16.trap",
+            "sust.p.2d.b8.trap",
+            "sust.p.2d.b16.trap",
+            "sust.p.2d.v2.b8.trap",
+            "sust.p.2d.v2.b16.trap",
+            "sust.p.2d.v4.b8.trap",
+            "sust.p.2d.v4.b16.trap",
+            "sust.p.a2d.b8.trap",
+            "sust.p.a2d.b16.trap",
+            "sust.p.a2d.v2.b8.trap",
+            "sust.p.a2d.v2.b16.trap",
+            "sust.p.a2d.v4.b8.trap",
+            "sust.p.a2d.v4.b16.trap",
+            "sust.p.3d.b8.trap",
+            "sust.p.3d.b16.trap",
+            "sust.p.3d.v2.b8.trap",
+            "sust.p.3d.v2.b16.trap",
+            "sust.p.3d.v4.b8.trap",
+            "sust.p.3d.v4.b16.trap",
+            # FIXME: sust.p is also not supported for arrays
+            "sust.p.a1d.b32.trap",
+            "sust.p.a1d.v2.b32.trap",
+            "sust.p.a1d.v4.b32.trap",
+            "sust.p.a2d.b32.trap",
+            "sust.p.a2d.v2.b32.trap",
+            "sust.p.a2d.v4.b32.trap",
+        }
+    )
+
+    td_instr = list(td_instr)
+    td_instr.sort()
+    gen_instr = list(gen_instr)
+    gen_instr.sort()
+    for i, td in enumerate(td_instr):
+        if i == len(gen_instr) or td != gen_instr[i]:
+            raise RuntimeError(
+                "{} is present in tablegen, but not tested.\n".format(td)
+            )
 
-  td_instr = read_td_list(path_td, "\"((suld|sust|tex|tld4)\\..*)\"")
-
-  gen_instr.update({
-    # FIXME: spec does not list any sust.p variants other than b32
-    "sust.p.1d.b8.trap",
-    "sust.p.1d.b16.trap",
-    "sust.p.1d.v2.b8.trap",
-    "sust.p.1d.v2.b16.trap",
-    "sust.p.1d.v4.b8.trap",
-    "sust.p.1d.v4.b16.trap",
-    "sust.p.a1d.b8.trap",
-    "sust.p.a1d.b16.trap",
-    "sust.p.a1d.v2.b8.trap",
-    "sust.p.a1d.v2.b16.trap",
-    "sust.p.a1d.v4.b8.trap",
-    "sust.p.a1d.v4.b16.trap",
-    "sust.p.2d.b8.trap",
-    "sust.p.2d.b16.trap",
-    "sust.p.2d.v2.b8.trap",
-    "sust.p.2d.v2.b16.trap",
-    "sust.p.2d.v4.b8.trap",
-    "sust.p.2d.v4.b16.trap",
-    "sust.p.a2d.b8.trap",
-    "sust.p.a2d.b16.trap",
-    "sust.p.a2d.v2.b8.trap",
-    "sust.p.a2d.v2.b16.trap",
-    "sust.p.a2d.v4.b8.trap",
-    "sust.p.a2d.v4.b16.trap",
-    "sust.p.3d.b8.trap",
-    "sust.p.3d.b16.trap",
-    "sust.p.3d.v2.b8.trap",
-    "sust.p.3d.v2.b16.trap",
-    "sust.p.3d.v4.b8.trap",
-    "sust.p.3d.v4.b16.trap",
-
-    # FIXME: sust.p is also not supported for arrays
-    "sust.p.a1d.b32.trap",
-    "sust.p.a1d.v2.b32.trap",
-    "sust.p.a1d.v4.b32.trap",
-    "sust.p.a2d.b32.trap",
-    "sust.p.a2d.v2.b32.trap",
-    "sust.p.a2d.v4.b32.trap",
-  })
-
-  td_instr = list(td_instr)
-  td_instr.sort()
-  gen_instr = list(gen_instr)
-  gen_instr.sort()
-  for i, td in enumerate(td_instr):
-    if i == len(gen_instr) or td != gen_instr[i]:
-      raise RuntimeError(
-        "{} is present in tablegen, but not tested.\n".format(td))
 
 def verify_llvm_tablegen(path_td, gen_intr):
-  """
-  Verify that all intrinsics defined in IntrinsicsNVVM.td are
-  tested.
-  """
+    """
+    Verify that all intrinsics defined in IntrinsicsNVVM.td are
+    tested.
+    """
+
+    td_intr = read_td_list(path_td, '"(llvm\\.nvvm\\.(suld|sust|tex|tld4)\\..*)"')
+
+    gen_intr.update(
+        {
+            # FIXME: spec does not list any sust.p variants other than b32
+            "llvm.nvvm.sust.p.1d.i8.trap",
+            "llvm.nvvm.sust.p.1d.i16.trap",
+            "llvm.nvvm.sust.p.1d.v2i8.trap",
+            "llvm.nvvm.sust.p.1d.v2i16.trap",
+            "llvm.nvvm.sust.p.1d.v4i8.trap",
+            "llvm.nvvm.sust.p.1d.v4i16.trap",
+            "llvm.nvvm.sust.p.1d.array.i8.trap",
+            "llvm.nvvm.sust.p.1d.array.i16.trap",
+            "llvm.nvvm.sust.p.1d.array.v2i8.trap",
+            "llvm.nvvm.sust.p.1d.array.v2i16.trap",
+            "llvm.nvvm.sust.p.1d.array.v4i8.trap",
+            "llvm.nvvm.sust.p.1d.array.v4i16.trap",
+            "llvm.nvvm.sust.p.2d.i8.trap",
+            "llvm.nvvm.sust.p.2d.i16.trap",
+            "llvm.nvvm.sust.p.2d.v2i8.trap",
+            "llvm.nvvm.sust.p.2d.v2i16.trap",
+            "llvm.nvvm.sust.p.2d.v4i8.trap",
+            "llvm.nvvm.sust.p.2d.v4i16.trap",
+            "llvm.nvvm.sust.p.2d.array.i8.trap",
+            "llvm.nvvm.sust.p.2d.array.i16.trap",
+            "llvm.nvvm.sust.p.2d.array.v2i8.trap",
+            "llvm.nvvm.sust.p.2d.array.v2i16.trap",
+            "llvm.nvvm.sust.p.2d.array.v4i8.trap",
+            "llvm.nvvm.sust.p.2d.array.v4i16.trap",
+            "llvm.nvvm.sust.p.3d.i8.trap",
+            "llvm.nvvm.sust.p.3d.i16.trap",
+            "llvm.nvvm.sust.p.3d.v2i8.trap",
+            "llvm.nvvm.sust.p.3d.v2i16.trap",
+            "llvm.nvvm.sust.p.3d.v4i8.trap",
+            "llvm.nvvm.sust.p.3d.v4i16.trap",
+            # FIXME: sust.p is also not supported for arrays
+            "llvm.nvvm.sust.p.1d.array.i32.trap",
+            "llvm.nvvm.sust.p.1d.array.v2i32.trap",
+            "llvm.nvvm.sust.p.1d.array.v4i32.trap",
+            "llvm.nvvm.sust.p.2d.array.i32.trap",
+            "llvm.nvvm.sust.p.2d.array.v2i32.trap",
+            "llvm.nvvm.sust.p.2d.array.v4i32.trap",
+        }
+    )
+
+    td_intr = list(td_intr)
+    td_intr.sort()
+    gen_intr = list(gen_intr)
+    gen_intr.sort()
+    for i, td in enumerate(td_intr):
+        if i == len(gen_intr) or td != gen_intr[i]:
+            raise RuntimeError(
+                "{} is present in tablegen, but not tested.\n".format(td)
+            )
 
-  td_intr = read_td_list(
-    path_td, "\"(llvm\\.nvvm\\.(suld|sust|tex|tld4)\\..*)\"")
-
-  gen_intr.update({
-    # FIXME: spec does not list any sust.p variants other than b32
-    "llvm.nvvm.sust.p.1d.i8.trap",
-    "llvm.nvvm.sust.p.1d.i16.trap",
-    "llvm.nvvm.sust.p.1d.v2i8.trap",
-    "llvm.nvvm.sust.p.1d.v2i16.trap",
-    "llvm.nvvm.sust.p.1d.v4i8.trap",
-    "llvm.nvvm.sust.p.1d.v4i16.trap",
-    "llvm.nvvm.sust.p.1d.array.i8.trap",
-    "llvm.nvvm.sust.p.1d.array.i16.trap",
-    "llvm.nvvm.sust.p.1d.array.v2i8.trap",
-    "llvm.nvvm.sust.p.1d.array.v2i16.trap",
-    "llvm.nvvm.sust.p.1d.array.v4i8.trap",
-    "llvm.nvvm.sust.p.1d.array.v4i16.trap",
-    "llvm.nvvm.sust.p.2d.i8.trap",
-    "llvm.nvvm.sust.p.2d.i16.trap",
-    "llvm.nvvm.sust.p.2d.v2i8.trap",
-    "llvm.nvvm.sust.p.2d.v2i16.trap",
-    "llvm.nvvm.sust.p.2d.v4i8.trap",
-    "llvm.nvvm.sust.p.2d.v4i16.trap",
-    "llvm.nvvm.sust.p.2d.array.i8.trap",
-    "llvm.nvvm.sust.p.2d.array.i16.trap",
-    "llvm.nvvm.sust.p.2d.array.v2i8.trap",
-    "llvm.nvvm.sust.p.2d.array.v2i16.trap",
-    "llvm.nvvm.sust.p.2d.array.v4i8.trap",
-    "llvm.nvvm.sust.p.2d.array.v4i16.trap",
-    "llvm.nvvm.sust.p.3d.i8.trap",
-    "llvm.nvvm.sust.p.3d.i16.trap",
-    "llvm.nvvm.sust.p.3d.v2i8.trap",
-    "llvm.nvvm.sust.p.3d.v2i16.trap",
-    "llvm.nvvm.sust.p.3d.v4i8.trap",
-    "llvm.nvvm.sust.p.3d.v4i16.trap",
-
-    # FIXME: sust.p is also not supported for arrays
-    "llvm.nvvm.sust.p.1d.array.i32.trap",
-    "llvm.nvvm.sust.p.1d.array.v2i32.trap",
-    "llvm.nvvm.sust.p.1d.array.v4i32.trap",
-    "llvm.nvvm.sust.p.2d.array.i32.trap",
-    "llvm.nvvm.sust.p.2d.array.v2i32.trap",
-    "llvm.nvvm.sust.p.2d.array.v4i32.trap"
-  })
-
-  td_intr = list(td_intr)
-  td_intr.sort()
-  gen_intr = list(gen_intr)
-  gen_intr.sort()
-  for i, td in enumerate(td_intr):
-    if i == len(gen_intr) or td != gen_intr[i]:
-      raise RuntimeError(
-        "{} is present in tablegen, but not tested.\n".format(td))
 
 parser = argparse.ArgumentParser()
 parser.add_argument("--debug", action="store_true")
 parser.add_argument("--tests", type=str)
 parser.add_argument("--target", type=str)
 parser.add_argument("--gen-list", dest="gen_list", type=str)
-parser.add_argument("--gen-list-append", dest="gen_list_append",
-                    action="store_true")
+parser.add_argument("--gen-list-append", dest="gen_list_append", action="store_true")
 parser.add_argument("--verify", action="store_true")
 parser.add_argument("--llvm-tablegen", dest="llvm_td", type=str)
 parser.add_argument("--inst-tablegen", dest="inst_td", type=str)
@@ -1018,10 +1082,10 @@ def verify_llvm_tablegen(path_td, gen_intr):
 debug = args.debug
 
 if args.verify:
-  intrinsics, instructions = read_gen_list(args.gen_list)
-  verify_inst_tablegen(args.inst_td, instructions)
-  verify_llvm_tablegen(args.llvm_td, intrinsics)
+    intrinsics, instructions = read_gen_list(args.gen_list)
+    verify_inst_tablegen(args.inst_td, instructions)
+    verify_llvm_tablegen(args.llvm_td, intrinsics)
 else:
-  items = gen_tests(args.target, args.tests.split(","))
-  if (args.gen_list):
-    write_gen_list(args.gen_list, args.gen_list_append, items)
+    items = gen_tests(args.target, args.tests.split(","))
+    if args.gen_list:
+        write_gen_list(args.gen_list, args.gen_list_append, items)

diff  --git a/llvm/test/CodeGen/NVPTX/wmma.py b/llvm/test/CodeGen/NVPTX/wmma.py
index 4df0434b21b95..928abe8795a7b 100644
--- a/llvm/test/CodeGen/NVPTX/wmma.py
+++ b/llvm/test/CodeGen/NVPTX/wmma.py
@@ -98,405 +98,458 @@
 from itertools import product
 from string import Template
 
+
 class MMAType:
-  def __init__(self, ptx_type):
-    self.ptx_type = ptx_type
-    self.llvm_type = {
-        "f16"  : "<2 x half>",
-        "f32"  : "float",
-        "f64"  : "double",
-        "s32"  : "i32",
-        "b16"  : "i32",
-        "s8"   : "i32",
-        "u8"   : "i32",
-        "s4"   : "i32",
-        "u4"   : "i32",
-        "b1"   : "i32",
-        "bf16" : "i32",
-        "tf32" : "i32",
-    }[ptx_type];
-
-    self.ptx_reg_pattern = {
-        "f16" : "%hh[0-9]+",
-        "f32" : "%f[0-9]+",
-        "f64" : "%fd[0-9]+",
-    }.get(ptx_type, "%r[0-9]+")
-
-  def __repr__(self):
-    return "%s/%s" % (self.ptx_type, self.llvm_type)
+    def __init__(self, ptx_type):
+        self.ptx_type = ptx_type
+        self.llvm_type = {
+            "f16": "<2 x half>",
+            "f32": "float",
+            "f64": "double",
+            "s32": "i32",
+            "b16": "i32",
+            "s8": "i32",
+            "u8": "i32",
+            "s4": "i32",
+            "u4": "i32",
+            "b1": "i32",
+            "bf16": "i32",
+            "tf32": "i32",
+        }[ptx_type]
+
+        self.ptx_reg_pattern = {
+            "f16": "%hh[0-9]+",
+            "f32": "%f[0-9]+",
+            "f64": "%fd[0-9]+",
+        }.get(ptx_type, "%r[0-9]+")
+
+    def __repr__(self):
+        return "%s/%s" % (self.ptx_type, self.llvm_type)
+
 
 class MMAFrag:
-  def __init__(self, geom, frag, ptx_elt_type):
-    self.geom = geom
-    self.frag = frag
-    self.mma_type = MMAType(ptx_elt_type);
-    self.nregs = {
-        # u8/s8 -> s32 @ m16n16k16/m8n32k16/m32n8k16
-        "m16n16k16:a:u8" : 2,
-        "m16n16k16:a:s8" : 2,
-        "m16n16k16:b:u8" : 2,
-        "m16n16k16:b:s8" : 2,
-        "m16n16k16:c:s32" : 8,
-        "m16n16k16:d:s32" : 8,
-
-        "m8n32k16:a:u8" : 1,
-        "m8n32k16:a:s8" : 1,
-        "m8n32k16:b:u8" : 4,
-        "m8n32k16:b:s8" : 4,
-        "m8n32k16:c:s32" : 8,
-        "m8n32k16:d:s32" : 8,
-
-        "m32n8k16:a:u8" : 4,
-        "m32n8k16:a:s8" : 4,
-        "m32n8k16:b:u8" : 1,
-        "m32n8k16:b:s8" : 1,
-        "m32n8k16:c:s32" : 8,
-        "m32n8k16:d:s32" : 8,
-
-        "m8n8k16:a:u8": 1,
-        "m8n8k16:a:s8": 1,
-        "m8n8k16:b:u8": 1,
-        "m8n8k16:b:s8": 1,
-        "m8n8k16:c:s32": 2,
-        "m8n8k16:d:s32": 2,
-
-        "m16n8k16:a:u8": 2,
-        "m16n8k16:a:s8": 2,
-        "m16n8k16:b:u8": 1,
-        "m16n8k16:b:s8": 1,
-        "m16n8k16:c:s32": 4,
-        "m16n8k16:d:s32": 4,
-
-        "m16n8k32:a:u8": 4,
-        "m16n8k32:a:s8": 4,
-        "m16n8k32:b:u8": 2,
-        "m16n8k32:b:s8": 2,
-        "m16n8k32:c:s32": 4,
-        "m16n8k32:d:s32": 4,
-
-        # u4/s4 -> s32 @ m8n8k32 (u4/s4)
-        "m8n8k32:a:u4" : 1,
-        "m8n8k32:a:s4" : 1,
-        "m8n8k32:b:u4" : 1,
-        "m8n8k32:b:s4" : 1,
-        "m8n8k32:c:s32" : 2,
-        "m8n8k32:d:s32" : 2,
-
-        "m16n8k32:a:u4" : 2,
-        "m16n8k32:a:s4" : 2,
-        "m16n8k32:b:u4" : 1,
-        "m16n8k32:b:s4" : 1,
-        "m16n8k32:c:s32" : 4,
-        "m16n8k32:d:s32" : 4,
-
-        "m16n8k64:a:u4" : 4,
-        "m16n8k64:a:s4" : 4,
-        "m16n8k64:b:u4" : 2,
-        "m16n8k64:b:s4" : 2,
-        "m16n8k64:c:s32" : 4,
-        "m16n8k64:d:s32" : 4,
-
-        # b1 -> s32 @ m8n8k128(b1)
-        "m8n8k128:a:b1" : 1,
-        "m8n8k128:b:b1" : 1,
-        "m8n8k128:c:s32" : 2,
-        "m8n8k128:d:s32" : 2,
-
-        "m16n8k128:a:b1" : 2,
-        "m16n8k128:b:b1" : 1,
-        "m16n8k128:c:s32" : 4,
-        "m16n8k128:d:s32" : 4,
-
-        "m16n8k256:a:b1" : 4,
-        "m16n8k256:b:b1" : 2,
-        "m16n8k256:c:s32" : 4,
-        "m16n8k256:d:s32" : 4,
-
-        # bf16 -> s32 @ m16n16k16/m8n32k16/m32n8k16
-        "m16n16k16:a:bf16" : 4,
-        "m16n16k16:b:bf16" : 4,
-        "m8n32k16:a:bf16" : 2,
-        "m8n32k16:b:bf16" : 8,
-        "m32n8k16:a:bf16" : 8,
-        "m32n8k16:b:bf16" : 2,
-
-        "m16n8k16:a:bf16" : 4,
-        "m16n8k16:b:bf16" : 2,
-        "m16n8k16:c:f32" : 4,
-        "m16n8k16:d:f32" : 4,
-        "m16n8k8:a:bf16" : 2,
-        "m16n8k8:b:bf16" : 1,
-        "m16n8k8:c:f32" : 4,
-        "m16n8k8:d:f32" : 4,
-
-        "m8n8k4:a:f64" : 1,
-        "m8n8k4:b:f64" : 1,
-        "m8n8k4:c:f64" : 2,
-        "m8n8k4:d:f64" : 2,
-
-        # tf32 -> s32 @ m16n16k8
-        "m16n16k8:a:tf32" : 4,
-        "m16n16k8:b:tf32" : 4,
-
-        "m16n8k4:a:tf32" : 2,
-        "m16n8k4:b:tf32" : 1,
-        "m16n8k4:c:f32" : 4,
-        "m16n8k4:d:f32" : 4,
-        "m16n8k8:a:tf32" : 4,
-        "m16n8k8:b:tf32" : 2,
-        "m16n8k8:c:f32" : 4,
-        "m16n8k8:d:f32" : 4,
-
-        "m8n8k4:a:f16": 2,
-        "m8n8k4:b:f16": 2,
-        "m16n8k8:a:f16": 2,
-        "m16n8k8:b:f16": 1,
-        "m16n8k8:c:f16": 2,
-        "m16n8k8:d:f16": 2,
-        "m16n8k8:c:f32": 4,
-        "m16n8k8:d:f32": 4,
-        "m16n8k16:a:f16": 4,
-        "m16n8k16:b:f16": 2,
-        "m16n8k16:c:f16": 2,
-        "m16n8k16:d:f16": 2,
-        "m16n8k16:c:f32": 4,
-        "m16n8k16:d:f32": 4,
-
-        # ldmatrix
-        "m8n8:x1:b16": 1,
-        "m8n8:x2:b16": 2,
-        "m8n8:x4:b16": 4,
-    }.get("%s:%s:%s" % (geom, frag, ptx_elt_type), {
-        # All other FP shape/fragment/type combinations have the same size
-        "a:f16" : 8,
-        "b:f16" : 8,
-        "c:f16" : 4,
-        "d:f16" : 4,
-        "c:f32" : 8,
-        "d:f32" : 8,
-    }.get("%s:%s" % (frag, ptx_elt_type), None))
-    assert(self.nregs);
-
-  def __repr__(self):
-    return "%s:%s:%s%s" % (self.geom, self.frag, self.mma_type,
-                           "" if self.nregs == 1 else ("*%d" % self.nregs))
+    def __init__(self, geom, frag, ptx_elt_type):
+        self.geom = geom
+        self.frag = frag
+        self.mma_type = MMAType(ptx_elt_type)
+        self.nregs = {
+            # u8/s8 -> s32 @ m16n16k16/m8n32k16/m32n8k16
+            "m16n16k16:a:u8": 2,
+            "m16n16k16:a:s8": 2,
+            "m16n16k16:b:u8": 2,
+            "m16n16k16:b:s8": 2,
+            "m16n16k16:c:s32": 8,
+            "m16n16k16:d:s32": 8,
+            "m8n32k16:a:u8": 1,
+            "m8n32k16:a:s8": 1,
+            "m8n32k16:b:u8": 4,
+            "m8n32k16:b:s8": 4,
+            "m8n32k16:c:s32": 8,
+            "m8n32k16:d:s32": 8,
+            "m32n8k16:a:u8": 4,
+            "m32n8k16:a:s8": 4,
+            "m32n8k16:b:u8": 1,
+            "m32n8k16:b:s8": 1,
+            "m32n8k16:c:s32": 8,
+            "m32n8k16:d:s32": 8,
+            "m8n8k16:a:u8": 1,
+            "m8n8k16:a:s8": 1,
+            "m8n8k16:b:u8": 1,
+            "m8n8k16:b:s8": 1,
+            "m8n8k16:c:s32": 2,
+            "m8n8k16:d:s32": 2,
+            "m16n8k16:a:u8": 2,
+            "m16n8k16:a:s8": 2,
+            "m16n8k16:b:u8": 1,
+            "m16n8k16:b:s8": 1,
+            "m16n8k16:c:s32": 4,
+            "m16n8k16:d:s32": 4,
+            "m16n8k32:a:u8": 4,
+            "m16n8k32:a:s8": 4,
+            "m16n8k32:b:u8": 2,
+            "m16n8k32:b:s8": 2,
+            "m16n8k32:c:s32": 4,
+            "m16n8k32:d:s32": 4,
+            # u4/s4 -> s32 @ m8n8k32 (u4/s4)
+            "m8n8k32:a:u4": 1,
+            "m8n8k32:a:s4": 1,
+            "m8n8k32:b:u4": 1,
+            "m8n8k32:b:s4": 1,
+            "m8n8k32:c:s32": 2,
+            "m8n8k32:d:s32": 2,
+            "m16n8k32:a:u4": 2,
+            "m16n8k32:a:s4": 2,
+            "m16n8k32:b:u4": 1,
+            "m16n8k32:b:s4": 1,
+            "m16n8k32:c:s32": 4,
+            "m16n8k32:d:s32": 4,
+            "m16n8k64:a:u4": 4,
+            "m16n8k64:a:s4": 4,
+            "m16n8k64:b:u4": 2,
+            "m16n8k64:b:s4": 2,
+            "m16n8k64:c:s32": 4,
+            "m16n8k64:d:s32": 4,
+            # b1 -> s32 @ m8n8k128(b1)
+            "m8n8k128:a:b1": 1,
+            "m8n8k128:b:b1": 1,
+            "m8n8k128:c:s32": 2,
+            "m8n8k128:d:s32": 2,
+            "m16n8k128:a:b1": 2,
+            "m16n8k128:b:b1": 1,
+            "m16n8k128:c:s32": 4,
+            "m16n8k128:d:s32": 4,
+            "m16n8k256:a:b1": 4,
+            "m16n8k256:b:b1": 2,
+            "m16n8k256:c:s32": 4,
+            "m16n8k256:d:s32": 4,
+            # bf16 -> s32 @ m16n16k16/m8n32k16/m32n8k16
+            "m16n16k16:a:bf16": 4,
+            "m16n16k16:b:bf16": 4,
+            "m8n32k16:a:bf16": 2,
+            "m8n32k16:b:bf16": 8,
+            "m32n8k16:a:bf16": 8,
+            "m32n8k16:b:bf16": 2,
+            "m16n8k16:a:bf16": 4,
+            "m16n8k16:b:bf16": 2,
+            "m16n8k16:c:f32": 4,
+            "m16n8k16:d:f32": 4,
+            "m16n8k8:a:bf16": 2,
+            "m16n8k8:b:bf16": 1,
+            "m16n8k8:c:f32": 4,
+            "m16n8k8:d:f32": 4,
+            "m8n8k4:a:f64": 1,
+            "m8n8k4:b:f64": 1,
+            "m8n8k4:c:f64": 2,
+            "m8n8k4:d:f64": 2,
+            # tf32 -> s32 @ m16n16k8
+            "m16n16k8:a:tf32": 4,
+            "m16n16k8:b:tf32": 4,
+            "m16n8k4:a:tf32": 2,
+            "m16n8k4:b:tf32": 1,
+            "m16n8k4:c:f32": 4,
+            "m16n8k4:d:f32": 4,
+            "m16n8k8:a:tf32": 4,
+            "m16n8k8:b:tf32": 2,
+            "m16n8k8:c:f32": 4,
+            "m16n8k8:d:f32": 4,
+            "m8n8k4:a:f16": 2,
+            "m8n8k4:b:f16": 2,
+            "m16n8k8:a:f16": 2,
+            "m16n8k8:b:f16": 1,
+            "m16n8k8:c:f16": 2,
+            "m16n8k8:d:f16": 2,
+            "m16n8k8:c:f32": 4,
+            "m16n8k8:d:f32": 4,
+            "m16n8k16:a:f16": 4,
+            "m16n8k16:b:f16": 2,
+            "m16n8k16:c:f16": 2,
+            "m16n8k16:d:f16": 2,
+            "m16n8k16:c:f32": 4,
+            "m16n8k16:d:f32": 4,
+            # ldmatrix
+            "m8n8:x1:b16": 1,
+            "m8n8:x2:b16": 2,
+            "m8n8:x4:b16": 4,
+        }.get(
+            "%s:%s:%s" % (geom, frag, ptx_elt_type),
+            {
+                # All other FP shape/fragment/type combinations have the same size
+                "a:f16": 8,
+                "b:f16": 8,
+                "c:f16": 4,
+                "d:f16": 4,
+                "c:f32": 8,
+                "d:f32": 8,
+            }.get("%s:%s" % (frag, ptx_elt_type), None),
+        )
+        assert self.nregs
+
+    def __repr__(self):
+        return "%s:%s:%s%s" % (
+            self.geom,
+            self.frag,
+            self.mma_type,
+            "" if self.nregs == 1 else ("*%d" % self.nregs),
+        )
+
 
 class MMAOp:
-  def __init__(self, a, b, c, d):
-    self.a = a
-    self.b = b
-    self.c = c
-    self.d = d
+    def __init__(self, a, b, c, d):
+        self.a = a
+        self.b = b
+        self.c = c
+        self.d = d
+
+    def __repr__(self):
+        return "{A:%s, B:%s, C:%s, D:%s}" % (self.a, self.b, self.c, self.d)
 
-  def __repr__(self):
-    return ("{A:%s, B:%s, C:%s, D:%s}" % (self.a, self.b, self.c, self.d ))
 
 def make_mma_ops(geoms, types_a, types_b, types_c, types_d):
-  ops = []
-  for geom, type_a, type_c in product( geoms,  types_a, types_c):
-    for type_b, type_d in product(types_b if types_b else [type_a],
-                                  types_d if types_d else [type_c]):
-      ops.append(MMAOp(MMAFrag(geom, "a", type_a),
-                       MMAFrag(geom, "b", type_b),
-                       MMAFrag(geom, "c", type_c),
-                       MMAFrag(geom, "d", type_d)))
-  return ops
+    ops = []
+    for geom, type_a, type_c in product(geoms, types_a, types_c):
+        for type_b, type_d in product(
+            types_b if types_b else [type_a], types_d if types_d else [type_c]
+        ):
+            ops.append(
+                MMAOp(
+                    MMAFrag(geom, "a", type_a),
+                    MMAFrag(geom, "b", type_b),
+                    MMAFrag(geom, "c", type_c),
+                    MMAFrag(geom, "d", type_d),
+                )
+            )
+    return ops
+
 
 def make_ldst_ops(geoms, frags, types):
-  return [MMAFrag(geom, frag, ptx_type) for (geom, frag, ptx_type)
-          in product(geoms, frags, types)]
+    return [
+        MMAFrag(geom, frag, ptx_type)
+        for (geom, frag, ptx_type) in product(geoms, frags, types)
+    ]
+
 
 def make_ldmatrix_ops(geoms, frags, types):
-  return [MMAFrag(geom, frag, ptx_type) for (geom, frag, ptx_type)
-          in product(geoms, frags, types)]
+    return [
+        MMAFrag(geom, frag, ptx_type)
+        for (geom, frag, ptx_type) in product(geoms, frags, types)
+    ]
+
 
 def get_wmma_ops():
-  return (make_mma_ops(["m16n16k8"],
-                       ["tf32"], [], ["f32"], []) +
-          make_mma_ops(["m16n16k16", "m32n8k16", "m8n32k16"],
-                       ["bf16"], [], ["f32"], []) +
-          make_mma_ops(["m8n8k4"],
-                       ["f64"], [], ["f64"], []) +
-          make_mma_ops(["m16n16k16", "m32n8k16", "m8n32k16"],
-                       ["f16"], [], ["f16", "f32"], ["f16", "f32"]) +
-          make_mma_ops(["m16n16k16", "m32n8k16", "m8n32k16"],
-                       ["s8", "u8"], [], ["s32"], []) +
-          make_mma_ops(["m8n8k32"],
-                       ["s4", "u4"], [], ["s32"], []) +
-          make_mma_ops(["m8n8k128"],
-                       ["b1"], [], ["s32"], []))
+    return (
+        make_mma_ops(["m16n16k8"], ["tf32"], [], ["f32"], [])
+        + make_mma_ops(["m16n16k16", "m32n8k16", "m8n32k16"], ["bf16"], [], ["f32"], [])
+        + make_mma_ops(["m8n8k4"], ["f64"], [], ["f64"], [])
+        + make_mma_ops(
+            ["m16n16k16", "m32n8k16", "m8n32k16"],
+            ["f16"],
+            [],
+            ["f16", "f32"],
+            ["f16", "f32"],
+        )
+        + make_mma_ops(
+            ["m16n16k16", "m32n8k16", "m8n32k16"], ["s8", "u8"], [], ["s32"], []
+        )
+        + make_mma_ops(["m8n8k32"], ["s4", "u4"], [], ["s32"], [])
+        + make_mma_ops(["m8n8k128"], ["b1"], [], ["s32"], [])
+    )
+
 
 def get_mma_ops():
-  return (make_mma_ops(["m8n8k4"],
-                       ["f64"], [], ["f64"], []) +
-          make_mma_ops(["m16n8k4", "m16n8k8"],
-                       ["tf32"], [], ["f32"], []) +
-          make_mma_ops(["m16n8k16", "m16n8k8"],
-                       ["bf16"], [], ["f32"], []) +
-          make_mma_ops(["m8n8k4", "m16n8k8", "m16n8k16"],
-                       ["f16"], [], ["f16", "f32"], ["f16", "f32"]) +
-          make_mma_ops(["m8n8k16", "m16n8k16", "m16n8k32"],
-                       ["s8", "u8"], ["s8", "u8"], ["s32"], []) +
-          make_mma_ops(["m8n8k32", "m16n8k32", "m16n8k64"],
-                       ["s4", "u4"], ["s4", "u4"], ["s32"], []) +
-          make_mma_ops(["m8n8k128", "m16n8k128", "m16n8k256"],
-                       ["b1"], [], ["s32"], []))
+    return (
+        make_mma_ops(["m8n8k4"], ["f64"], [], ["f64"], [])
+        + make_mma_ops(["m16n8k4", "m16n8k8"], ["tf32"], [], ["f32"], [])
+        + make_mma_ops(["m16n8k16", "m16n8k8"], ["bf16"], [], ["f32"], [])
+        + make_mma_ops(
+            ["m8n8k4", "m16n8k8", "m16n8k16"],
+            ["f16"],
+            [],
+            ["f16", "f32"],
+            ["f16", "f32"],
+        )
+        + make_mma_ops(
+            ["m8n8k16", "m16n8k16", "m16n8k32"], ["s8", "u8"], ["s8", "u8"], ["s32"], []
+        )
+        + make_mma_ops(
+            ["m8n8k32", "m16n8k32", "m16n8k64"], ["s4", "u4"], ["s4", "u4"], ["s32"], []
+        )
+        + make_mma_ops(["m8n8k128", "m16n8k128", "m16n8k256"], ["b1"], [], ["s32"], [])
+    )
+
 
 def get_ldst_ops(kind):
-  ldst_ops = (make_ldst_ops(["m16n16k16", "m32n8k16", "m8n32k16"],
-                            ["a", "b"], ["f16", "u8", "s8", "bf16"]) +
-              make_ldst_ops(["m16n16k16", "m32n8k16", "m8n32k16"],
-                            ["c", "d"], ["f16", "f32", "s32"]) +
-              make_ldst_ops(["m8n8k32"], ["a", "b"], ["s4","u4"]) +
-              make_ldst_ops(["m8n8k128"], ["a", "b"], ["b1"]) +
-              make_ldst_ops(["m8n8k32", "m8n8k128"],  ["c", "d"], ["s32"]) +
-              make_ldst_ops(["m8n8k4"], ["a", "b", "c", "d"], ["f64"]) +
-              make_ldst_ops(["m16n16k8"], ["a", "b"], ["tf32"]) +
-              make_ldst_ops(["m16n16k8"], ["c", "d"], ["f32"]))
-  return [ x for x in ldst_ops if (x.frag == "d") == (kind == "store")]
+    ldst_ops = (
+        make_ldst_ops(
+            ["m16n16k16", "m32n8k16", "m8n32k16"],
+            ["a", "b"],
+            ["f16", "u8", "s8", "bf16"],
+        )
+        + make_ldst_ops(
+            ["m16n16k16", "m32n8k16", "m8n32k16"], ["c", "d"], ["f16", "f32", "s32"]
+        )
+        + make_ldst_ops(["m8n8k32"], ["a", "b"], ["s4", "u4"])
+        + make_ldst_ops(["m8n8k128"], ["a", "b"], ["b1"])
+        + make_ldst_ops(["m8n8k32", "m8n8k128"], ["c", "d"], ["s32"])
+        + make_ldst_ops(["m8n8k4"], ["a", "b", "c", "d"], ["f64"])
+        + make_ldst_ops(["m16n16k8"], ["a", "b"], ["tf32"])
+        + make_ldst_ops(["m16n16k8"], ["c", "d"], ["f32"])
+    )
+    return [x for x in ldst_ops if (x.frag == "d") == (kind == "store")]
+
 
 def get_ldmatrix_ops():
-  return make_ldmatrix_ops(["m8n8"], ["x1", "x2", "x4"], ["b16"])
+    return make_ldmatrix_ops(["m8n8"], ["x1", "x2", "x4"], ["b16"])
+
 
 def is_wmma_geom_supported(geom):
-  # geometries for FP and ints.
-  if geom in ["m8n32k16", "m32n8k16"]:
-    return ptx_version >= 61
-  # geometries for sub-ints.
-  if geom in ["m8n8k32", "m8n8k128"]:
-    return ptx_version >= 63 and gpu_arch >= 75
-  if geom == "m16n16k16":
-    return ptx_version >= 60
-  if geom == "m16n8k8":
-    return ptx_version >= 65
-  if geom in ["m16n16k8", "m8n8k4"]:
-    return ptx_version >= 70
-  assert(False) # Unexpected geometry.
+    # geometries for FP and ints.
+    if geom in ["m8n32k16", "m32n8k16"]:
+        return ptx_version >= 61
+    # geometries for sub-ints.
+    if geom in ["m8n8k32", "m8n8k128"]:
+        return ptx_version >= 63 and gpu_arch >= 75
+    if geom == "m16n16k16":
+        return ptx_version >= 60
+    if geom == "m16n8k8":
+        return ptx_version >= 65
+    if geom in ["m16n16k8", "m8n8k4"]:
+        return ptx_version >= 70
+    assert False  # Unexpected geometry.
+
 
 def is_mma_geom_supported(geom):
-  # geometries for FP and ints.
-  if geom == "m8n8k4":
-    return ptx_version >= 64
-  if geom in ["m16n8k8", "m8n8k16", "m8n8k32"]:
-    return ptx_version >= 65
-  if geom in ["m16n8k16", "m16n8k4", "m16n8k32", "m16n8k64", "m8n8k128",
-              "m16n8k128", "m16n8k256"]:
-    return ptx_version >= 70
-  assert(False) # Unexpected geometry.
+    # geometries for FP and ints.
+    if geom == "m8n8k4":
+        return ptx_version >= 64
+    if geom in ["m16n8k8", "m8n8k16", "m8n8k32"]:
+        return ptx_version >= 65
+    if geom in [
+        "m16n8k16",
+        "m16n8k4",
+        "m16n8k32",
+        "m16n8k64",
+        "m8n8k128",
+        "m16n8k128",
+        "m16n8k256",
+    ]:
+        return ptx_version >= 70
+    assert False  # Unexpected geometry.
+
 
 def is_ldmatrix_geom_supported(geom):
-  if geom in ["m8n8"]:
-    return ptx_version >= 65 and gpu_arch >= 75
-  assert(False) # Unexpected geometry.
+    if geom in ["m8n8"]:
+        return ptx_version >= 65 and gpu_arch >= 75
+    assert False  # Unexpected geometry.
+
 
 def is_type_supported(ptx_type):
-  if ptx_type in ["s8", "u8", "s32"]:
-    return ptx_version >= 63 and gpu_arch >= 72
-  if ptx_type in ["s4", "u4", "b1"]:
-    return ptx_version >= 63 and gpu_arch >= 75
-  if ptx_type == "b16":
-    return ptx_version >= 65 and gpu_arch >= 75
-  if ptx_type in ["bf16", "tf32", "f64"]:
-    return ptx_version >= 70
-  return ptx_version >= 60 and gpu_arch >= 70
+    if ptx_type in ["s8", "u8", "s32"]:
+        return ptx_version >= 63 and gpu_arch >= 72
+    if ptx_type in ["s4", "u4", "b1"]:
+        return ptx_version >= 63 and gpu_arch >= 75
+    if ptx_type == "b16":
+        return ptx_version >= 65 and gpu_arch >= 75
+    if ptx_type in ["bf16", "tf32", "f64"]:
+        return ptx_version >= 70
+    return ptx_version >= 60 and gpu_arch >= 70
+
 
 def is_wmma_variant_supported(op, layout_a, layout_b, rnd, satf):
-  if not (is_type_supported(op.a.mma_type.ptx_type)
-          and is_wmma_geom_supported(op.a.geom)):
-    return False
-
-  # rnd is only supported for FP64 WMMA
-  if rnd and op.a.mma_type.ptx_type != "f64":
-    return False
-
-  if satf:
-    # satfinite for floating points was removed in PTX 6.5
-    if op.a.mma_type.ptx_type == "f16" and ptx_version >= 65:
-      return False
-    if not op.a.mma_type.ptx_type in ["f16", "s8", "u8", "s4", "u4"]:
-      return False
-
-  # sub-integer require row/col layout.
-  if op.a.mma_type.ptx_type in ["s4", "u4", "b1"]:
-    return layout_a == "row" and layout_b == "col"
-  return True
+    if not (
+        is_type_supported(op.a.mma_type.ptx_type) and is_wmma_geom_supported(op.a.geom)
+    ):
+        return False
+
+    # rnd is only supported for FP64 WMMA
+    if rnd and op.a.mma_type.ptx_type != "f64":
+        return False
+
+    if satf:
+        # satfinite for floating points was removed in PTX 6.5
+        if op.a.mma_type.ptx_type == "f16" and ptx_version >= 65:
+            return False
+        if not op.a.mma_type.ptx_type in ["f16", "s8", "u8", "s4", "u4"]:
+            return False
+
+    # sub-integer require row/col layout.
+    if op.a.mma_type.ptx_type in ["s4", "u4", "b1"]:
+        return layout_a == "row" and layout_b == "col"
+    return True
+
 
 def is_mma_variant_supported(op, layout_a, layout_b, satf):
-  if not (is_type_supported(op.a.mma_type.ptx_type)
-          and is_mma_geom_supported(op.a.geom)):
-    return False
-
-  if satf and not op.a.mma_type.ptx_type in ["s8", "u8", "s4", "u4"]:
-    return False
-
-  # If the type of C is f32 then so must the type of D
-  if (op.a.geom == "m8n8k4" and op.c.mma_type.ptx_type == "f32"
-      and op.d.mma_type.ptx_type != "f32"):
-    return False
-
-  # A and B type must be the same. C and D type must be the same
-  if (op.a.geom == "m16n8k8"
-        and (op.a.mma_type.ptx_type != op.b.mma_type.ptx_type
-             or op.c.mma_type.ptx_type != op.d.mma_type.ptx_type)):
-      return False
-
-  # C and D type must be the same
-  if (op.a.geom == "m16n8k16"
-      and op.c.mma_type.ptx_type != op.d.mma_type.ptx_type):
-      return False
-
-  # Require row/col layout for all MMA except m8n8k4 on FP16
-  if not (op.a.geom == "m8n8k4" and op.a.mma_type.ptx_type == "f16"):
-    return layout_a == "row" and layout_b == "col"
-  return True
+    if not (
+        is_type_supported(op.a.mma_type.ptx_type) and is_mma_geom_supported(op.a.geom)
+    ):
+        return False
+
+    if satf and not op.a.mma_type.ptx_type in ["s8", "u8", "s4", "u4"]:
+        return False
+
+    # If the type of C is f32 then so must the type of D
+    if (
+        op.a.geom == "m8n8k4"
+        and op.c.mma_type.ptx_type == "f32"
+        and op.d.mma_type.ptx_type != "f32"
+    ):
+        return False
+
+    # A and B type must be the same. C and D type must be the same
+    if op.a.geom == "m16n8k8" and (
+        op.a.mma_type.ptx_type != op.b.mma_type.ptx_type
+        or op.c.mma_type.ptx_type != op.d.mma_type.ptx_type
+    ):
+        return False
+
+    # C and D type must be the same
+    if op.a.geom == "m16n8k16" and op.c.mma_type.ptx_type != op.d.mma_type.ptx_type:
+        return False
+
+    # Require row/col layout for all MMA except m8n8k4 on FP16
+    if not (op.a.geom == "m8n8k4" and op.a.mma_type.ptx_type == "f16"):
+        return layout_a == "row" and layout_b == "col"
+    return True
+
 
 def is_ldst_variant_supported(frag, layout):
-  if not (is_type_supported(frag.mma_type.ptx_type)
-          and is_wmma_geom_supported(frag.geom)):
-    return False
-  if frag.mma_type.ptx_type in ["s4", "u4", "b1"]:
-    # sub-integer require sm_75 and ptx63, row/col layout for a/b.
-    return ((frag.frag == "a" and layout == "row")
+    if not (
+        is_type_supported(frag.mma_type.ptx_type) and is_wmma_geom_supported(frag.geom)
+    ):
+        return False
+    if frag.mma_type.ptx_type in ["s4", "u4", "b1"]:
+        # sub-integer require sm_75 and ptx63, row/col layout for a/b.
+        return (
+            (frag.frag == "a" and layout == "row")
             or (frag.frag == "b" and layout == "col")
-            or frag.frag in ["c", "d"])
-  return True
+            or frag.frag in ["c", "d"]
+        )
+    return True
+
 
 def is_ldmatrix_variant_supported(frag):
-  if not (is_type_supported(frag.mma_type.ptx_type)
-          and is_ldmatrix_geom_supported(frag.geom)):
-    return False
-  return frag.frag in ["x1", "x2", "x4"]
+    if not (
+        is_type_supported(frag.mma_type.ptx_type)
+        and is_ldmatrix_geom_supported(frag.geom)
+    ):
+        return False
+    return frag.frag in ["x1", "x2", "x4"]
+
 
 def make_wmma_slice_ty(frag):
-  return [frag.mma_type.llvm_type] * frag.nregs
+    return [frag.mma_type.llvm_type] * frag.nregs
+
 
 def make_wmma_ld_ret_ty(frag):
-  results = make_wmma_slice_ty(frag)
-  if len(results) == 1:
-    return "%s" % results[0]
-  return "{%s}" % ", ".join(results)
+    results = make_wmma_slice_ty(frag)
+    if len(results) == 1:
+        return "%s" % results[0]
+    return "{%s}" % ", ".join(results)
+
 
 # returns address space
 def get_aspace(space):
-  space_map = {
-      ".global" : 1,
-      ".shared" : 3,
-      ".const"  : 4,
-      ".local"  : 5,
-      ".param"  : 101,
-      ""        : 0,
-      ".generic": 0
-  }
-  return space_map[space];
+    space_map = {
+        ".global": 1,
+        ".shared": 3,
+        ".const": 4,
+        ".local": 5,
+        ".param": 101,
+        "": 0,
+        ".generic": 0,
+    }
+    return space_map[space]
+
 
 def get_pspace(space):
-  return "p%di8" % get_aspace(space);
+    return "p%di8" % get_aspace(space)
+
 
 def check_pattern(frag):
-   return "{{%s}}" % ", *".join([frag.mma_type.ptx_reg_pattern] * frag.nregs)
+    return "{{%s}}" % ", *".join([frag.mma_type.ptx_reg_pattern] * frag.nregs)
+
 
 def gen_wmma_load_tests():
-  load_template = """
+    load_template = """
 declare ${ret_ty} @${intrinsic}(i8 ${as}* %src ${extra_args});
 
 ; CHECK-LABEL: .func {{.*}}test_${function}(
@@ -518,59 +571,68 @@ def gen_wmma_load_tests():
   ret ${ret_ty} %v0;
 }
 """
-  intrinsic_template = "llvm.nvvm.wmma.${geom}.load.${abc}.${layout}${stride}.${itype}.${pspace}"
-  instruction_template = "wmma.load.${abc}.sync${aligned}.${layout}.${geom}${space}.${itype}"
-
-  generated_items = []
-
-  for frag, layout, space, stride in product(
-      get_ldst_ops("load"),
-      ["row","col"],
-      ["",".shared",".global"],
-      ["", ".stride"],
-      ):
-    if not is_ldst_variant_supported(frag, layout):
-      continue
-
-    params = {
-        "abc" : frag.frag,
-        "aligned" : ".aligned" if ptx_version >= 63 else "",
-        "layout" : layout,
-        "space" : space,
-        "stride" : stride,
-        "itype" : frag.mma_type.ptx_type,
-        "pspace" : get_pspace(space),
-        "as"     : "addrspace(%d)" % get_aspace(space),
-        "geom"   : frag.geom,
-    }
-
-    test_params = params
-    test_params["intrinsic"] = Template(intrinsic_template).substitute(params)
-    test_params["function"] = test_params["intrinsic"].replace(".","_")
-    test_params["instruction"] = Template(instruction_template).substitute(params)
-    test_params["ret_ty"] = make_wmma_ld_ret_ty(frag)
-    test_params["check_result"] = check_pattern(frag)
-
-    if stride:
-      test_params["extra_args"] = ", i32 %stride";
-      test_params["stride_pattern"] = ", %r{{[0-9]+}}"
-    else:
-      test_params["extra_args"] = ""
-      test_params["stride_pattern"] = ""
-
-    print(Template(load_template).substitute(test_params))
+    intrinsic_template = (
+        "llvm.nvvm.wmma.${geom}.load.${abc}.${layout}${stride}.${itype}.${pspace}"
+    )
+    instruction_template = (
+        "wmma.load.${abc}.sync${aligned}.${layout}.${geom}${space}.${itype}"
+    )
+
+    generated_items = []
+
+    for frag, layout, space, stride in product(
+        get_ldst_ops("load"),
+        ["row", "col"],
+        ["", ".shared", ".global"],
+        ["", ".stride"],
+    ):
+        if not is_ldst_variant_supported(frag, layout):
+            continue
+
+        params = {
+            "abc": frag.frag,
+            "aligned": ".aligned" if ptx_version >= 63 else "",
+            "layout": layout,
+            "space": space,
+            "stride": stride,
+            "itype": frag.mma_type.ptx_type,
+            "pspace": get_pspace(space),
+            "as": "addrspace(%d)" % get_aspace(space),
+            "geom": frag.geom,
+        }
+
+        test_params = params
+        test_params["intrinsic"] = Template(intrinsic_template).substitute(params)
+        test_params["function"] = test_params["intrinsic"].replace(".", "_")
+        test_params["instruction"] = Template(instruction_template).substitute(params)
+        test_params["ret_ty"] = make_wmma_ld_ret_ty(frag)
+        test_params["check_result"] = check_pattern(frag)
+
+        if stride:
+            test_params["extra_args"] = ", i32 %stride"
+            test_params["stride_pattern"] = ", %r{{[0-9]+}}"
+        else:
+            test_params["extra_args"] = ""
+            test_params["stride_pattern"] = ""
+
+        print(Template(load_template).substitute(test_params))
+
+        generated_items.append((test_params["intrinsic"], test_params["instruction"]))
+
+    return generated_items
 
-    generated_items.append((test_params["intrinsic"],
-                            test_params["instruction"]))
-
-  return generated_items
 
 def make_wmma_slice_args(frag):
-  return ", ".join(["%s %%%s%d" % (t, frag.frag, i) for i,t
-                  in enumerate(make_wmma_slice_ty(frag))])
+    return ", ".join(
+        [
+            "%s %%%s%d" % (t, frag.frag, i)
+            for i, t in enumerate(make_wmma_slice_ty(frag))
+        ]
+    )
+
 
 def gen_wmma_store_tests():
-  store_template = """
+    store_template = """
 declare void @${intrinsic}(i8 ${as}* %src, ${args}${extra_args});
 
 ; CHECK-LABEL: .func {{.*}}test_${function}(
@@ -592,54 +654,59 @@ def gen_wmma_store_tests():
   ret void
 }
 """
-  intrinsic_template = "llvm.nvvm.wmma.${geom}.store.${abc}.${layout}${stride}.${itype}.${pspace}"
-  instruction_template = "wmma.store.${abc}.sync${aligned}.${layout}.${geom}${space}.${itype}"
-
-  generated_items = []
-
-  for frag, layout, space, stride in product(
-      get_ldst_ops("store"),
-      ["row","col"],
-      ["",".shared",".global"],
-      ["", ".stride"]):
-
-    if not is_ldst_variant_supported(frag, layout):
-      continue
-
-    params = {
-        "abc" : frag.frag,
-        "aligned" : ".aligned" if ptx_version >= 63 else "",
-        "layout" : layout,
-        "space" : space,
-        "stride" : stride,
-        "itype" : frag.mma_type.ptx_type,
-        "pspace" : get_pspace(space),
-        "as"     : "addrspace(%d)" % get_aspace(space),
-        "geom"   : frag.geom,
-    }
-
-    test_params = params
-    test_params["intrinsic"] = Template(intrinsic_template).substitute(params)
-    test_params["function"] = test_params["intrinsic"].replace(".","_")
-    test_params["instruction"] = Template(instruction_template).substitute(params)
-    test_params["ret_ty"] = make_wmma_ld_ret_ty(frag)
-    test_params["check_args"] = check_pattern(frag)
-    if stride:
-      test_params["extra_args"] = ", i32 %stride";
-      test_params["stride_pattern"] = ", %r{{[0-9]+}};"
-    else:
-      test_params["extra_args"] = ""
-      test_params["stride_pattern"] = ";"
-    test_params["args"] = make_wmma_slice_args(frag);
-
-    print(Template(store_template).substitute(test_params))
-    generated_items.append((test_params["intrinsic"],
-                            test_params["instruction"]))
+    intrinsic_template = (
+        "llvm.nvvm.wmma.${geom}.store.${abc}.${layout}${stride}.${itype}.${pspace}"
+    )
+    instruction_template = (
+        "wmma.store.${abc}.sync${aligned}.${layout}.${geom}${space}.${itype}"
+    )
+
+    generated_items = []
+
+    for frag, layout, space, stride in product(
+        get_ldst_ops("store"),
+        ["row", "col"],
+        ["", ".shared", ".global"],
+        ["", ".stride"],
+    ):
+
+        if not is_ldst_variant_supported(frag, layout):
+            continue
+
+        params = {
+            "abc": frag.frag,
+            "aligned": ".aligned" if ptx_version >= 63 else "",
+            "layout": layout,
+            "space": space,
+            "stride": stride,
+            "itype": frag.mma_type.ptx_type,
+            "pspace": get_pspace(space),
+            "as": "addrspace(%d)" % get_aspace(space),
+            "geom": frag.geom,
+        }
+
+        test_params = params
+        test_params["intrinsic"] = Template(intrinsic_template).substitute(params)
+        test_params["function"] = test_params["intrinsic"].replace(".", "_")
+        test_params["instruction"] = Template(instruction_template).substitute(params)
+        test_params["ret_ty"] = make_wmma_ld_ret_ty(frag)
+        test_params["check_args"] = check_pattern(frag)
+        if stride:
+            test_params["extra_args"] = ", i32 %stride"
+            test_params["stride_pattern"] = ", %r{{[0-9]+}};"
+        else:
+            test_params["extra_args"] = ""
+            test_params["stride_pattern"] = ";"
+        test_params["args"] = make_wmma_slice_args(frag)
+
+        print(Template(store_template).substitute(test_params))
+        generated_items.append((test_params["intrinsic"], test_params["instruction"]))
+
+    return generated_items
 
-  return generated_items
 
 def gen_ldmatrix_tests():
-  ldmatrix_template = """
+    ldmatrix_template = """
 declare ${ret_ty} @${intrinsic}(i8 ${as}* %src);
 
 ; CHECK-LABEL: .func {{.*}}test_${function}(
@@ -661,76 +728,84 @@ def gen_ldmatrix_tests():
   ret ${ret_ty} %v0;
 }
 """
-  intrinsic_template = "llvm.nvvm.ldmatrix.sync.aligned.${geom}.${frag}${trans}.${itype}.${pspace}"
-  instruction_template = "ldmatrix.sync.aligned.${geom}.${frag}${trans}${space}.${itype}"
-
-  generated_items = []
-
-  for frag, space, trans in product(
-      get_ldmatrix_ops(),
-      ["",".shared"],
-      ["",".trans"],
-      ):
-    if not is_ldmatrix_variant_supported(frag):
-      continue
-
-    params = {
-        "frag" : frag.frag,
-        "space" : space,
-        "trans" : trans,
-        "itype" : frag.mma_type.ptx_type,
-        "pspace" : get_pspace(space),
-        "as"     : "addrspace(%d)" % get_aspace(space),
-        "geom"   : frag.geom,
-    }
-
-    test_params = params
-    test_params["intrinsic"] = Template(intrinsic_template).substitute(params)
-    test_params["function"] = test_params["intrinsic"].replace(".","_")
-    test_params["instruction"] = Template(instruction_template).substitute(params)
-    test_params["ret_ty"] = make_wmma_ld_ret_ty(frag)
-    test_params["check_result"] = check_pattern(frag)
+    intrinsic_template = (
+        "llvm.nvvm.ldmatrix.sync.aligned.${geom}.${frag}${trans}.${itype}.${pspace}"
+    )
+    instruction_template = (
+        "ldmatrix.sync.aligned.${geom}.${frag}${trans}${space}.${itype}"
+    )
+
+    generated_items = []
+
+    for frag, space, trans in product(
+        get_ldmatrix_ops(),
+        ["", ".shared"],
+        ["", ".trans"],
+    ):
+        if not is_ldmatrix_variant_supported(frag):
+            continue
+
+        params = {
+            "frag": frag.frag,
+            "space": space,
+            "trans": trans,
+            "itype": frag.mma_type.ptx_type,
+            "pspace": get_pspace(space),
+            "as": "addrspace(%d)" % get_aspace(space),
+            "geom": frag.geom,
+        }
+
+        test_params = params
+        test_params["intrinsic"] = Template(intrinsic_template).substitute(params)
+        test_params["function"] = test_params["intrinsic"].replace(".", "_")
+        test_params["instruction"] = Template(instruction_template).substitute(params)
+        test_params["ret_ty"] = make_wmma_ld_ret_ty(frag)
+        test_params["check_result"] = check_pattern(frag)
+
+        print(Template(ldmatrix_template).substitute(test_params))
+
+        generated_items.append((test_params["intrinsic"], test_params["instruction"]))
+
+    return generated_items
 
-    print(Template(ldmatrix_template).substitute(test_params))
-
-    generated_items.append((test_params["intrinsic"],
-                            test_params["instruction"]))
-
-  return generated_items
 
 def mma_signature(op):
-  if op.a.mma_type.ptx_type == "f16":
-    # FP16 ops identified by accumulator & result type.
-    return "%s.%s" % (op.d.mma_type.ptx_type, op.c.mma_type.ptx_type)
-  elif op.a.mma_type.ptx_type != op.b.mma_type.ptx_type:
-    # other ops are identified by input types.
-    return "%s.%s" % (op.a.mma_type.ptx_type, op.b.mma_type.ptx_type)
-  else:
-    # if input types are the same, it only appears once.
-    return op.a.mma_type.ptx_type
+    if op.a.mma_type.ptx_type == "f16":
+        # FP16 ops identified by accumulator & result type.
+        return "%s.%s" % (op.d.mma_type.ptx_type, op.c.mma_type.ptx_type)
+    elif op.a.mma_type.ptx_type != op.b.mma_type.ptx_type:
+        # other ops are identified by input types.
+        return "%s.%s" % (op.a.mma_type.ptx_type, op.b.mma_type.ptx_type)
+    else:
+        # if input types are the same, it only appears once.
+        return op.a.mma_type.ptx_type
+
 
 def mma_ptx_signature(op):
-  # Encode all four types as D.A.B.C
-  return ".".join(x.mma_type.ptx_type for x in (op.d, op.a, op.b, op.c))
+    # Encode all four types as D.A.B.C
+    return ".".join(x.mma_type.ptx_type for x in (op.d, op.a, op.b, op.c))
+
 
 def wmma_signature(op):
-  if op.a.mma_type.ptx_type == "f16":
-    # FP16 ops identified by accumulator & result type.
-    return "%s.%s" % (op.d.mma_type.ptx_type, op.c.mma_type.ptx_type)
-  else:
-    # other ops are identified by input type.
-    return op.a.mma_type.ptx_type
+    if op.a.mma_type.ptx_type == "f16":
+        # FP16 ops identified by accumulator & result type.
+        return "%s.%s" % (op.d.mma_type.ptx_type, op.c.mma_type.ptx_type)
+    else:
+        # other ops are identified by input type.
+        return op.a.mma_type.ptx_type
+
 
 def wmma_ptx_signature(op):
-  if op.a.mma_type.ptx_type == "f16":
-    # FP16 instructions use D.C
-    return "%s.%s" % (op.d.mma_type.ptx_type, op.c.mma_type.ptx_type)
-  else:
-    # other instructions encode all four types as D.A.B.C
-    return ".".join(x.mma_type.ptx_type for x in (op.d, op.a, op.b, op.c))
+    if op.a.mma_type.ptx_type == "f16":
+        # FP16 instructions use D.C
+        return "%s.%s" % (op.d.mma_type.ptx_type, op.c.mma_type.ptx_type)
+    else:
+        # other instructions encode all four types as D.A.B.C
+        return ".".join(x.mma_type.ptx_type for x in (op.d, op.a, op.b, op.c))
+
 
 def common_mma_test_gen(params, op, intrinsic_template, instruction_template):
-  mma_template = """
+    mma_template = """
 declare ${ret_ty} @${intrinsic}(
         ${args});
 
@@ -748,109 +823,120 @@ def common_mma_test_gen(params, op, intrinsic_template, instruction_template):
 }
 """
 
-  test_params = params
-  test_params["intrinsic"] = Template(intrinsic_template).substitute(params)
-  test_params["function"] = test_params["intrinsic"].replace(".", "_")
-  test_params["instruction"] = Template(instruction_template).substitute(params)
-  test_params["ret_ty"] = make_wmma_ld_ret_ty(op.d)
-  test_params["check_a"] = check_pattern(op.a)
-  test_params["check_b"] = check_pattern(op.b)
-  test_params["check_c"] = check_pattern(op.c)
-  test_params["check_d"] = check_pattern(op.d)
-  args = ",\n        ".join(make_wmma_slice_args(frag)
-                            for frag in (op.a, op.b, op.c))
-  test_params["args"] = args
-  print(Template(mma_template).substitute(test_params))
-  return (test_params["intrinsic"], test_params["instruction"])
+    test_params = params
+    test_params["intrinsic"] = Template(intrinsic_template).substitute(params)
+    test_params["function"] = test_params["intrinsic"].replace(".", "_")
+    test_params["instruction"] = Template(instruction_template).substitute(params)
+    test_params["ret_ty"] = make_wmma_ld_ret_ty(op.d)
+    test_params["check_a"] = check_pattern(op.a)
+    test_params["check_b"] = check_pattern(op.b)
+    test_params["check_c"] = check_pattern(op.c)
+    test_params["check_d"] = check_pattern(op.d)
+    args = ",\n        ".join(make_wmma_slice_args(frag) for frag in (op.a, op.b, op.c))
+    test_params["args"] = args
+    print(Template(mma_template).substitute(test_params))
+    return (test_params["intrinsic"], test_params["instruction"])
+
 
 def get_b1_ops(ptx_type):
-  if ptx_type != "b1":
-    return [""]
-  if ptx_version >= 71:
-    return [".xor.popc", ".and.popc"]
-  return [".xor.popc"]
+    if ptx_type != "b1":
+        return [""]
+    if ptx_version >= 71:
+        return [".xor.popc", ".and.popc"]
+    return [".xor.popc"]
+
 
 def gen_wmma_mma_tests():
-  wmma_intrinsic_template = "llvm.nvvm.wmma.${geom}.mma${b1op}.${alayout}.${blayout}${rnd}.${intrinsic_signature}${satf}"
-  wmma_instruction_template = "wmma.mma${b1op}.sync${aligned}.${alayout}.${blayout}.${geom}${rnd}.${ptx_signature}${satf}"
-
-  generated_items=[]
-
-  for op, alayout, blayout, rnd, satf in product(
-      get_wmma_ops(),
-      ["row","col"],
-      ["row","col"],
-      [".rn", ".rz", ".rm", ".rp", ""],
-      [".satfinite", ""]):
-
-    if not is_wmma_variant_supported(op, alayout, blayout, rnd, satf):
-      continue
-
-    for b1op in get_b1_ops(op.a.mma_type.ptx_type):
-      params = {
-          "aligned" : ".aligned" if ptx_version >= 63 else "",
-          "alayout" : alayout,
-          "blayout" : blayout,
-          "intrinsic_signature" : wmma_signature(op),
-          "ptx_signature" : wmma_ptx_signature(op),
-          "satf"  : satf,
-          "rnd"   : rnd,
-          "geom"  : op.a.geom,
-          "b1op"  : b1op
-      }
-
-      intrinsic_template = wmma_intrinsic_template
-      instruction_template = wmma_instruction_template
-
-      generated_items.append(common_mma_test_gen(params, op,
-                                                 intrinsic_template, instruction_template))
-
-  return generated_items
+    wmma_intrinsic_template = "llvm.nvvm.wmma.${geom}.mma${b1op}.${alayout}.${blayout}${rnd}.${intrinsic_signature}${satf}"
+    wmma_instruction_template = "wmma.mma${b1op}.sync${aligned}.${alayout}.${blayout}.${geom}${rnd}.${ptx_signature}${satf}"
+
+    generated_items = []
+
+    for op, alayout, blayout, rnd, satf in product(
+        get_wmma_ops(),
+        ["row", "col"],
+        ["row", "col"],
+        [".rn", ".rz", ".rm", ".rp", ""],
+        [".satfinite", ""],
+    ):
+
+        if not is_wmma_variant_supported(op, alayout, blayout, rnd, satf):
+            continue
+
+        for b1op in get_b1_ops(op.a.mma_type.ptx_type):
+            params = {
+                "aligned": ".aligned" if ptx_version >= 63 else "",
+                "alayout": alayout,
+                "blayout": blayout,
+                "intrinsic_signature": wmma_signature(op),
+                "ptx_signature": wmma_ptx_signature(op),
+                "satf": satf,
+                "rnd": rnd,
+                "geom": op.a.geom,
+                "b1op": b1op,
+            }
+
+            intrinsic_template = wmma_intrinsic_template
+            instruction_template = wmma_instruction_template
+
+            generated_items.append(
+                common_mma_test_gen(
+                    params, op, intrinsic_template, instruction_template
+                )
+            )
+
+    return generated_items
+
 
 def gen_mma_tests():
-  mma_intrinsic_template = "llvm.nvvm.mma${b1op}.${geom}.${alayout}.${blayout}${satf}.${intrinsic_signature}"
-  mma_instruction_template = "mma.sync${aligned}.${geom}.${alayout}.${blayout}${satf}.${ptx_signature}${b1op}"
+    mma_intrinsic_template = "llvm.nvvm.mma${b1op}.${geom}.${alayout}.${blayout}${satf}.${intrinsic_signature}"
+    mma_instruction_template = "mma.sync${aligned}.${geom}.${alayout}.${blayout}${satf}.${ptx_signature}${b1op}"
 
-  generated_items=[]
+    generated_items = []
 
-  for op, alayout, blayout, satf in product(
-      get_mma_ops(),
-      ["row","col"],
-      ["row","col"],
-      [".satfinite", ""]):
+    for op, alayout, blayout, satf in product(
+        get_mma_ops(), ["row", "col"], ["row", "col"], [".satfinite", ""]
+    ):
 
-    if not is_mma_variant_supported(op, alayout, blayout, satf):
-      continue
+        if not is_mma_variant_supported(op, alayout, blayout, satf):
+            continue
 
-    for b1op in get_b1_ops(op.a.mma_type.ptx_type):
-      params = {
-          "aligned" : ".aligned" if ptx_version >= 63 else "",
-          "alayout" : alayout,
-          "blayout" : blayout,
-          "intrinsic_signature" : mma_signature(op),
-          "ptx_signature" : mma_ptx_signature(op),
-          "satf"  : satf,
-          "geom"  : op.a.geom,
-          "b1op"  : b1op
-      }
+        for b1op in get_b1_ops(op.a.mma_type.ptx_type):
+            params = {
+                "aligned": ".aligned" if ptx_version >= 63 else "",
+                "alayout": alayout,
+                "blayout": blayout,
+                "intrinsic_signature": mma_signature(op),
+                "ptx_signature": mma_ptx_signature(op),
+                "satf": satf,
+                "geom": op.a.geom,
+                "b1op": b1op,
+            }
 
-      intrinsic_template = mma_intrinsic_template
-      instruction_template = mma_instruction_template
+            intrinsic_template = mma_intrinsic_template
+            instruction_template = mma_instruction_template
 
-      generated_items.append(common_mma_test_gen(params, op,
-        intrinsic_template, instruction_template))
+            generated_items.append(
+                common_mma_test_gen(
+                    params, op, intrinsic_template, instruction_template
+                )
+            )
+
+    return generated_items
 
-  return generated_items
 
 # Append complete list of intrinsics and instructions we've generated tests for.
 # Generate set of checks to verify that that we did generate sensible set of
 # tests for the given combination of PTX and SM variants.
 #
 def gen_check_unsupported_ops(items):
-  print("; Complete list of intrinsics supported by PTX%d on sm_%d"
-        % (ptx_version, gpu_arch))
-  print("; INTRINSICS: {{^; INTRINSICS_LIST_BEGIN}}")
-  print("""
+    print(
+        "; Complete list of intrinsics supported by PTX%d on sm_%d"
+        % (ptx_version, gpu_arch)
+    )
+    print("; INTRINSICS: {{^; INTRINSICS_LIST_BEGIN}}")
+    print(
+        """
 
 ; NOEXTGEOM-NOT: {{m8n32|m32n8}}
 ; NOINT-NOT: .{{s32|s8}}
@@ -978,21 +1064,24 @@ def gen_check_unsupported_ops(items):
 ; PTX71MMA-DAG: mma.xor.popc.m16n8k256.row.col.b1
 ;
 
-""")
+"""
+    )
+
+    print("; INTRINSICS_LIST_BEGIN")
+    for intrinsic, instruction in sorted(items):
+        print("; ", intrinsic, " -> ", instruction, "")
+    print("; INTRINSICS_LIST_END")
+    print("; INTRINSICS: ; INTRINSICS_LIST_END")
 
-  print("; INTRINSICS_LIST_BEGIN")
-  for intrinsic, instruction in sorted(items):
-    print("; ", intrinsic, " -> ", instruction,"")
-  print("; INTRINSICS_LIST_END")
-  print("; INTRINSICS: ; INTRINSICS_LIST_END")
 
 def gen_tests():
-  items = gen_wmma_load_tests()
-  items += gen_wmma_store_tests()
-  items += gen_ldmatrix_tests()
-  items += gen_wmma_mma_tests()
-  items += gen_mma_tests()
-  gen_check_unsupported_ops(items)
+    items = gen_wmma_load_tests()
+    items += gen_wmma_store_tests()
+    items += gen_ldmatrix_tests()
+    items += gen_wmma_mma_tests()
+    items += gen_mma_tests()
+    gen_check_unsupported_ops(items)
+
 
 parser = argparse.ArgumentParser()
 parser.add_argument("--ptx", type=int, default=60)

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-01.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-01.py
index 162cabdc3bb4b..7efa436fabb69 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-01.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-01.py
@@ -70,41 +70,41 @@
 from __future__ import print_function
 
 branch_blocks = 10
-main_size = 0xffd8
+main_size = 0xFFD8
 
-print('@global = global i32 0')
+print("@global = global i32 0")
 
-print('define void @f1(i8 *%base, i32 *%stop, i32 %limit) {')
-print('entry:')
-print('  br label %before0')
-print('')
+print("define void @f1(i8 *%base, i32 *%stop, i32 %limit) {")
+print("entry:")
+print("  br label %before0")
+print("")
 
 for i in range(branch_blocks):
-    next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print('before%d:' % i)
-    print('  %%bstop%d = getelementptr i32, i32 *%%stop, i64 %d' % (i, i))
-    print('  %%bcur%d = load i32 , i32 *%%bstop%d' % (i, i))
-    print('  %%btest%d = icmp eq i32 %%limit, %%bcur%d' % (i, i))
-    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
-    print('')
+    next = "before%d" % (i + 1) if i + 1 < branch_blocks else "main"
+    print("before%d:" % i)
+    print("  %%bstop%d = getelementptr i32, i32 *%%stop, i64 %d" % (i, i))
+    print("  %%bcur%d = load i32 , i32 *%%bstop%d" % (i, i))
+    print("  %%btest%d = icmp eq i32 %%limit, %%bcur%d" % (i, i))
+    print("  br i1 %%btest%d, label %%after0, label %%%s" % (i, next))
+    print("")
 
-print('%s:' % next)
+print("%s:" % next)
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%astop%d = getelementptr i32, i32 *%%stop, i64 %d' % (i, i + 25))
-    print('  %%acur%d = load i32 , i32 *%%astop%d' % (i, i))
-    print('  %%atest%d = icmp eq i32 %%limit, %%acur%d' % (i, i))
-    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%astop%d = getelementptr i32, i32 *%%stop, i64 %d" % (i, i + 25))
+    print("  %%acur%d = load i32 , i32 *%%astop%d" % (i, i))
+    print("  %%atest%d = icmp eq i32 %%limit, %%acur%d" % (i, i))
+    print("  br i1 %%atest%d, label %%main, label %%after%d" % (i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  %dummy = load volatile i32, i32 *@global')
-print('  ret void')
-print('}')
+print("  %dummy = load volatile i32, i32 *@global")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-02.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-02.py
index a636309963b27..b4d17a26d5e53 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-02.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-02.py
@@ -60,25 +60,25 @@
 
 blocks = 256 + 4
 
-print('define void @f1(i8 *%base, i32 *%stop, i32 %limit) {')
-print('entry:')
-print('  br label %b0')
-print('')
+print("define void @f1(i8 *%base, i32 *%stop, i32 %limit) {")
+print("entry:")
+print("  br label %b0")
+print("")
 
 a, b = 1, 1
 for i in range(blocks):
     a, b = b, a + b
     value = a % 256
-    next = 'b%d' % (i + 1) if i + 1 < blocks else 'end'
-    other = 'end' if 2 * i < blocks else 'b0'
-    print('b%d:' % i)
-    print('  store volatile i8 %d, i8 *%%base' % value)
-    print('  %%astop%d = getelementptr i32, i32 *%%stop, i64 %d' % (i, i))
-    print('  %%acur%d = load i32 , i32 *%%astop%d' % (i, i))
-    print('  %%atest%d = icmp eq i32 %%limit, %%acur%d' % (i, i))
-    print('  br i1 %%atest%d, label %%%s, label %%%s' % (i, other, next))
+    next = "b%d" % (i + 1) if i + 1 < blocks else "end"
+    other = "end" if 2 * i < blocks else "b0"
+    print("b%d:" % i)
+    print("  store volatile i8 %d, i8 *%%base" % value)
+    print("  %%astop%d = getelementptr i32, i32 *%%stop, i64 %d" % (i, i))
+    print("  %%acur%d = load i32 , i32 *%%astop%d" % (i, i))
+    print("  %%atest%d = icmp eq i32 %%limit, %%acur%d" % (i, i))
+    print("  br i1 %%atest%d, label %%%s, label %%%s" % (i, other, next))
 
-print('')
-print('%s:' % next)
-print('  ret void')
-print('}')
+print("")
+print("%s:" % next)
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-03.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-03.py
index 025cbdfeff737..44b7d51b19d10 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-03.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-03.py
@@ -70,43 +70,43 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffcc
+main_size = 0xFFCC
 
-print('@global = global i32 0')
+print("@global = global i32 0")
 
-print('define void @f1(i8 *%base, i8 *%stop, i32 %limit) {')
-print('entry:')
-print('  br label %before0')
-print('')
+print("define void @f1(i8 *%base, i8 *%stop, i32 %limit) {")
+print("entry:")
+print("  br label %before0")
+print("")
 
 for i in range(branch_blocks):
-    next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print('before%d:' % i)
-    print('  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i))
-    print('  %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i))
-    print('  %%bext%d = sext i8 %%bcur%d to i32' % (i, i))
-    print('  %%btest%d = icmp eq i32 %%limit, %%bext%d' % (i, i))
-    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
-    print('')
+    next = "before%d" % (i + 1) if i + 1 < branch_blocks else "main"
+    print("before%d:" % i)
+    print("  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d" % (i, i))
+    print("  %%bcur%d = load i8 , i8 *%%bstop%d" % (i, i))
+    print("  %%bext%d = sext i8 %%bcur%d to i32" % (i, i))
+    print("  %%btest%d = icmp eq i32 %%limit, %%bext%d" % (i, i))
+    print("  br i1 %%btest%d, label %%after0, label %%%s" % (i, next))
+    print("")
 
-print('%s:' % next)
+print("%s:" % next)
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25))
-    print('  %%acur%d = load i8 , i8 *%%astop%d' % (i, i))
-    print('  %%aext%d = sext i8 %%acur%d to i32' % (i, i))
-    print('  %%atest%d = icmp eq i32 %%limit, %%aext%d' % (i, i))
-    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d" % (i, i + 25))
+    print("  %%acur%d = load i8 , i8 *%%astop%d" % (i, i))
+    print("  %%aext%d = sext i8 %%acur%d to i32" % (i, i))
+    print("  %%atest%d = icmp eq i32 %%limit, %%aext%d" % (i, i))
+    print("  br i1 %%atest%d, label %%main, label %%after%d" % (i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  %dummy = load volatile i32, i32 *@global')
-print('  ret void')
-print('}')
+print("  %dummy = load volatile i32, i32 *@global")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-04.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-04.py
index 9292dc9110bdf..5298de996c498 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-04.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-04.py
@@ -74,43 +74,43 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffcc
+main_size = 0xFFCC
 
-print('@global = global i32 0')
+print("@global = global i32 0")
 
-print('define void @f1(i8 *%base, i8 *%stop, i64 %limit) {')
-print('entry:')
-print('  br label %before0')
-print('')
+print("define void @f1(i8 *%base, i8 *%stop, i64 %limit) {")
+print("entry:")
+print("  br label %before0")
+print("")
 
 for i in range(branch_blocks):
-    next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print('before%d:' % i)
-    print('  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i))
-    print('  %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i))
-    print('  %%bext%d = sext i8 %%bcur%d to i64' % (i, i))
-    print('  %%btest%d = icmp eq i64 %%limit, %%bext%d' % (i, i))
-    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
-    print('')
+    next = "before%d" % (i + 1) if i + 1 < branch_blocks else "main"
+    print("before%d:" % i)
+    print("  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d" % (i, i))
+    print("  %%bcur%d = load i8 , i8 *%%bstop%d" % (i, i))
+    print("  %%bext%d = sext i8 %%bcur%d to i64" % (i, i))
+    print("  %%btest%d = icmp eq i64 %%limit, %%bext%d" % (i, i))
+    print("  br i1 %%btest%d, label %%after0, label %%%s" % (i, next))
+    print("")
 
-print('%s:' % next)
+print("%s:" % next)
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25))
-    print('  %%acur%d = load i8 , i8 *%%astop%d' % (i, i))
-    print('  %%aext%d = sext i8 %%acur%d to i64' % (i, i))
-    print('  %%atest%d = icmp eq i64 %%limit, %%aext%d' % (i, i))
-    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d" % (i, i + 25))
+    print("  %%acur%d = load i8 , i8 *%%astop%d" % (i, i))
+    print("  %%aext%d = sext i8 %%acur%d to i64" % (i, i))
+    print("  %%atest%d = icmp eq i64 %%limit, %%aext%d" % (i, i))
+    print("  br i1 %%atest%d, label %%main, label %%after%d" % (i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  %dummy = load volatile i32, i32 *@global')
-print('  ret void')
-print('}')
+print("  %dummy = load volatile i32, i32 *@global")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-05.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-05.py
index 4c01f90ec2bc6..66fa4ca418cc9 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-05.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-05.py
@@ -74,41 +74,41 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffcc
+main_size = 0xFFCC
 
-print('@global = global i32 0')
+print("@global = global i32 0")
 
-print('define void @f1(i8 *%base, i8 *%stop) {')
-print('entry:')
-print('  br label %before0')
-print('')
+print("define void @f1(i8 *%base, i8 *%stop) {")
+print("entry:")
+print("  br label %before0")
+print("")
 
 for i in range(branch_blocks):
-    next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print('before%d:' % i)
-    print('  %%bcur%d = load i8 , i8 *%%stop' % i)
-    print('  %%bext%d = sext i8 %%bcur%d to i32' % (i, i))
-    print('  %%btest%d = icmp slt i32 %%bext%d, %d' % (i, i, i + 50))
-    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
-    print('')
+    next = "before%d" % (i + 1) if i + 1 < branch_blocks else "main"
+    print("before%d:" % i)
+    print("  %%bcur%d = load i8 , i8 *%%stop" % i)
+    print("  %%bext%d = sext i8 %%bcur%d to i32" % (i, i))
+    print("  %%btest%d = icmp slt i32 %%bext%d, %d" % (i, i, i + 50))
+    print("  br i1 %%btest%d, label %%after0, label %%%s" % (i, next))
+    print("")
 
-print('%s:' % next)
+print("%s:" % next)
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%acur%d = load i8 , i8 *%%stop' % i)
-    print('  %%aext%d = sext i8 %%acur%d to i32' % (i, i))
-    print('  %%atest%d = icmp slt i32 %%aext%d, %d' % (i, i, i + 100))
-    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%acur%d = load i8 , i8 *%%stop" % i)
+    print("  %%aext%d = sext i8 %%acur%d to i32" % (i, i))
+    print("  %%atest%d = icmp slt i32 %%aext%d, %d" % (i, i, i + 100))
+    print("  br i1 %%atest%d, label %%main, label %%after%d" % (i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  %dummy = load volatile i32, i32 *@global')
-print('  ret void')
-print('}')
+print("  %dummy = load volatile i32, i32 *@global")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-06.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-06.py
index 45538870f66a3..bdccdeb48a3a4 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-06.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-06.py
@@ -74,41 +74,41 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffcc
+main_size = 0xFFCC
 
-print('@global = global i32 0')
+print("@global = global i32 0")
 
-print('define void @f1(i8 *%base, i8 *%stop) {')
-print('entry:')
-print('  br label %before0')
-print('')
+print("define void @f1(i8 *%base, i8 *%stop) {")
+print("entry:")
+print("  br label %before0")
+print("")
 
 for i in range(branch_blocks):
-    next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print('before%d:' % i)
-    print('  %%bcur%d = load i8 , i8 *%%stop' % i)
-    print('  %%bext%d = sext i8 %%bcur%d to i64' % (i, i))
-    print('  %%btest%d = icmp slt i64 %%bext%d, %d' % (i, i, i + 50))
-    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
-    print('')
+    next = "before%d" % (i + 1) if i + 1 < branch_blocks else "main"
+    print("before%d:" % i)
+    print("  %%bcur%d = load i8 , i8 *%%stop" % i)
+    print("  %%bext%d = sext i8 %%bcur%d to i64" % (i, i))
+    print("  %%btest%d = icmp slt i64 %%bext%d, %d" % (i, i, i + 50))
+    print("  br i1 %%btest%d, label %%after0, label %%%s" % (i, next))
+    print("")
 
-print('%s:' % next)
+print("%s:" % next)
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%acur%d = load i8 , i8 *%%stop' % i)
-    print('  %%aext%d = sext i8 %%acur%d to i64' % (i, i))
-    print('  %%atest%d = icmp slt i64 %%aext%d, %d' % (i, i, i + 100))
-    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%acur%d = load i8 , i8 *%%stop" % i)
+    print("  %%aext%d = sext i8 %%acur%d to i64" % (i, i))
+    print("  %%atest%d = icmp slt i64 %%aext%d, %d" % (i, i, i + 100))
+    print("  br i1 %%atest%d, label %%main, label %%after%d" % (i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  %dummy = load volatile i32, i32 *@global')
-print('  ret void')
-print('}')
+print("  %dummy = load volatile i32, i32 *@global")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-07.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-07.py
index 4fd72d68d7d37..a00318f8ac119 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-07.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-07.py
@@ -35,36 +35,40 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffd8
+main_size = 0xFFD8
 
-print('define void @f1(i8 *%base, i32 *%counts) {')
-print('entry:')
+print("define void @f1(i8 *%base, i32 *%counts) {")
+print("entry:")
 
 for i in range(branch_blocks - 1, -1, -1):
-    print('  %%countptr%d = getelementptr i32, i32 *%%counts, i64 %d' % (i, i))
-    print('  %%initcount%d = load i32 , i32 *%%countptr%d' % (i, i))
-    print('  br label %%loop%d' % i)
-    
-    print('loop%d:' % i)
-    block1 = 'entry' if i == branch_blocks - 1 else 'loop%d' % (i + 1)
-    block2 = 'loop0' if i == 0 else 'after%d' % (i - 1)
-    print(('  %%count%d = phi i32 [ %%initcount%d, %%%s ],'
-           ' [ %%nextcount%d, %%%s ]' % (i, i, block1, i, block2)))
+    print("  %%countptr%d = getelementptr i32, i32 *%%counts, i64 %d" % (i, i))
+    print("  %%initcount%d = load i32 , i32 *%%countptr%d" % (i, i))
+    print("  br label %%loop%d" % i)
+
+    print("loop%d:" % i)
+    block1 = "entry" if i == branch_blocks - 1 else "loop%d" % (i + 1)
+    block2 = "loop0" if i == 0 else "after%d" % (i - 1)
+    print(
+        (
+            "  %%count%d = phi i32 [ %%initcount%d, %%%s ],"
+            " [ %%nextcount%d, %%%s ]" % (i, i, block1, i, block2)
+        )
+    )
 
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%nextcount%d = add i32 %%count%d, -1' % (i, i))
-    print('  %%test%d = icmp ne i32 %%nextcount%d, 0' % (i, i))
-    print('  br i1 %%test%d, label %%loop%d, label %%after%d' % (i, i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%nextcount%d = add i32 %%count%d, -1" % (i, i))
+    print("  %%test%d = icmp ne i32 %%nextcount%d, 0" % (i, i))
+    print("  br i1 %%test%d, label %%loop%d, label %%after%d" % (i, i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  ret void')
-print('}')
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-08.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-08.py
index 7e6e2bf9d039f..dbde991fd65cd 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-08.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-08.py
@@ -36,36 +36,40 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffd8
+main_size = 0xFFD8
 
-print('define void @f1(i8 *%base, i64 *%counts) {')
-print('entry:')
+print("define void @f1(i8 *%base, i64 *%counts) {")
+print("entry:")
 
 for i in range(branch_blocks - 1, -1, -1):
-    print('  %%countptr%d = getelementptr i64, i64 *%%counts, i64 %d' % (i, i))
-    print('  %%initcount%d = load i64 , i64 *%%countptr%d' % (i, i))
-    print('  br label %%loop%d' % i)
-    
-    print('loop%d:' % i)
-    block1 = 'entry' if i == branch_blocks - 1 else 'loop%d' % (i + 1)
-    block2 = 'loop0' if i == 0 else 'after%d' % (i - 1)
-    print(('  %%count%d = phi i64 [ %%initcount%d, %%%s ],'
-           ' [ %%nextcount%d, %%%s ]' % (i, i, block1, i, block2)))
+    print("  %%countptr%d = getelementptr i64, i64 *%%counts, i64 %d" % (i, i))
+    print("  %%initcount%d = load i64 , i64 *%%countptr%d" % (i, i))
+    print("  br label %%loop%d" % i)
+
+    print("loop%d:" % i)
+    block1 = "entry" if i == branch_blocks - 1 else "loop%d" % (i + 1)
+    block2 = "loop0" if i == 0 else "after%d" % (i - 1)
+    print(
+        (
+            "  %%count%d = phi i64 [ %%initcount%d, %%%s ],"
+            " [ %%nextcount%d, %%%s ]" % (i, i, block1, i, block2)
+        )
+    )
 
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%nextcount%d = add i64 %%count%d, -1' % (i, i))
-    print('  %%test%d = icmp ne i64 %%nextcount%d, 0' % (i, i))
-    print('  br i1 %%test%d, label %%loop%d, label %%after%d' % (i, i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%nextcount%d = add i64 %%count%d, -1" % (i, i))
+    print("  %%test%d = icmp ne i64 %%nextcount%d, 0" % (i, i))
+    print("  br i1 %%test%d, label %%loop%d, label %%after%d" % (i, i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  ret void')
-print('}')
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-09.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-09.py
index cfdf31d43b636..a2e63686f1ec0 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-09.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-09.py
@@ -70,43 +70,43 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffcc
+main_size = 0xFFCC
 
-print('@global = global i32 0')
+print("@global = global i32 0")
 
-print('define void @f1(i8 *%base, i8 *%stop, i32 %limit) {')
-print('entry:')
-print('  br label %before0')
-print('')
+print("define void @f1(i8 *%base, i8 *%stop, i32 %limit) {")
+print("entry:")
+print("  br label %before0")
+print("")
 
 for i in range(branch_blocks):
-    next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print('before%d:' % i)
-    print('  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i))
-    print('  %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i))
-    print('  %%bext%d = sext i8 %%bcur%d to i32' % (i, i))
-    print('  %%btest%d = icmp ult i32 %%limit, %%bext%d' % (i, i))
-    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
-    print('')
+    next = "before%d" % (i + 1) if i + 1 < branch_blocks else "main"
+    print("before%d:" % i)
+    print("  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d" % (i, i))
+    print("  %%bcur%d = load i8 , i8 *%%bstop%d" % (i, i))
+    print("  %%bext%d = sext i8 %%bcur%d to i32" % (i, i))
+    print("  %%btest%d = icmp ult i32 %%limit, %%bext%d" % (i, i))
+    print("  br i1 %%btest%d, label %%after0, label %%%s" % (i, next))
+    print("")
 
-print('%s:' % next)
+print("%s:" % next)
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25))
-    print('  %%acur%d = load i8 , i8 *%%astop%d' % (i, i))
-    print('  %%aext%d = sext i8 %%acur%d to i32' % (i, i))
-    print('  %%atest%d = icmp ult i32 %%limit, %%aext%d' % (i, i))
-    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d" % (i, i + 25))
+    print("  %%acur%d = load i8 , i8 *%%astop%d" % (i, i))
+    print("  %%aext%d = sext i8 %%acur%d to i32" % (i, i))
+    print("  %%atest%d = icmp ult i32 %%limit, %%aext%d" % (i, i))
+    print("  br i1 %%atest%d, label %%main, label %%after%d" % (i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  %dummy = load volatile i32, i32 *@global')
-print('  ret void')
-print('}')
+print("  %dummy = load volatile i32, i32 *@global")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-10.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-10.py
index 4b7ba24388b7f..3710e8ebb80b8 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-10.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-10.py
@@ -74,43 +74,43 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffcc
+main_size = 0xFFCC
 
-print('@global = global i32 0')
+print("@global = global i32 0")
 
-print('define void @f1(i8 *%base, i8 *%stop, i64 %limit) {')
-print('entry:')
-print('  br label %before0')
-print('')
+print("define void @f1(i8 *%base, i8 *%stop, i64 %limit) {")
+print("entry:")
+print("  br label %before0")
+print("")
 
 for i in range(branch_blocks):
-    next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print('before%d:' % i)
-    print('  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i))
-    print('  %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i))
-    print('  %%bext%d = sext i8 %%bcur%d to i64' % (i, i))
-    print('  %%btest%d = icmp ult i64 %%limit, %%bext%d' % (i, i))
-    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
-    print('')
+    next = "before%d" % (i + 1) if i + 1 < branch_blocks else "main"
+    print("before%d:" % i)
+    print("  %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d" % (i, i))
+    print("  %%bcur%d = load i8 , i8 *%%bstop%d" % (i, i))
+    print("  %%bext%d = sext i8 %%bcur%d to i64" % (i, i))
+    print("  %%btest%d = icmp ult i64 %%limit, %%bext%d" % (i, i))
+    print("  br i1 %%btest%d, label %%after0, label %%%s" % (i, next))
+    print("")
 
-print('%s:' % next)
+print("%s:" % next)
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25))
-    print('  %%acur%d = load i8 , i8 *%%astop%d' % (i, i))
-    print('  %%aext%d = sext i8 %%acur%d to i64' % (i, i))
-    print('  %%atest%d = icmp ult i64 %%limit, %%aext%d' % (i, i))
-    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%astop%d = getelementptr i8, i8 *%%stop, i64 %d" % (i, i + 25))
+    print("  %%acur%d = load i8 , i8 *%%astop%d" % (i, i))
+    print("  %%aext%d = sext i8 %%acur%d to i64" % (i, i))
+    print("  %%atest%d = icmp ult i64 %%limit, %%aext%d" % (i, i))
+    print("  br i1 %%atest%d, label %%main, label %%after%d" % (i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  %dummy = load volatile i32, i32 *@global')
-print('  ret void')
-print('}')
+print("  %dummy = load volatile i32, i32 *@global")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-11.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-11.py
index 1d330a3c412ce..edfac100a739c 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-11.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-11.py
@@ -90,43 +90,43 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffc6
+main_size = 0xFFC6
 
-print('@global = global i32 0')
+print("@global = global i32 0")
 
-print('define void @f1(i8 *%base, i32 *%stopa, i32 *%stopb) {')
-print('entry:')
-print('  br label %before0')
-print('')
+print("define void @f1(i8 *%base, i32 *%stopa, i32 *%stopb) {")
+print("entry:")
+print("  br label %before0")
+print("")
 
 for i in range(branch_blocks):
-    next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print('before%d:' % i)
-    print('  %%bcur%da = load i32 , i32 *%%stopa' % i)
-    print('  %%bcur%db = load i32 , i32 *%%stopb' % i)
-    print('  %%bsub%d = sub i32 %%bcur%da, %%bcur%db' % (i, i, i))
-    print('  %%btest%d = icmp ult i32 %%bsub%d, %d' % (i, i, i + 50))
-    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
-    print('')
+    next = "before%d" % (i + 1) if i + 1 < branch_blocks else "main"
+    print("before%d:" % i)
+    print("  %%bcur%da = load i32 , i32 *%%stopa" % i)
+    print("  %%bcur%db = load i32 , i32 *%%stopb" % i)
+    print("  %%bsub%d = sub i32 %%bcur%da, %%bcur%db" % (i, i, i))
+    print("  %%btest%d = icmp ult i32 %%bsub%d, %d" % (i, i, i + 50))
+    print("  br i1 %%btest%d, label %%after0, label %%%s" % (i, next))
+    print("")
 
-print('%s:' % next)
+print("%s:" % next)
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%acur%da = load i32 , i32 *%%stopa' % i)
-    print('  %%acur%db = load i32 , i32 *%%stopb' % i)
-    print('  %%asub%d = sub i32 %%acur%da, %%acur%db' % (i, i, i))
-    print('  %%atest%d = icmp ult i32 %%asub%d, %d' % (i, i, i + 100))
-    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%acur%da = load i32 , i32 *%%stopa" % i)
+    print("  %%acur%db = load i32 , i32 *%%stopb" % i)
+    print("  %%asub%d = sub i32 %%acur%da, %%acur%db" % (i, i, i))
+    print("  %%atest%d = icmp ult i32 %%asub%d, %d" % (i, i, i + 100))
+    print("  br i1 %%atest%d, label %%main, label %%after%d" % (i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  %dummy = load volatile i32, i32 *@global')
-print('  ret void')
-print('}')
+print("  %dummy = load volatile i32, i32 *@global")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-12.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-12.py
index 56155077e1f4c..2d0d875095da8 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-12.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-12.py
@@ -90,43 +90,43 @@
 from __future__ import print_function
 
 branch_blocks = 8
-main_size = 0xffb4
+main_size = 0xFFB4
 
-print('@global = global i32 0')
+print("@global = global i32 0")
 
-print('define void @f1(i8 *%base, i64 *%stopa, i64 *%stopb) {')
-print('entry:')
-print('  br label %before0')
-print('')
+print("define void @f1(i8 *%base, i64 *%stopa, i64 *%stopb) {")
+print("entry:")
+print("  br label %before0")
+print("")
 
 for i in range(branch_blocks):
-    next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
-    print('before%d:' % i)
-    print('  %%bcur%da = load i64 , i64 *%%stopa' % i)
-    print('  %%bcur%db = load i64 , i64 *%%stopb' % i)
-    print('  %%bsub%d = sub i64 %%bcur%da, %%bcur%db' % (i, i, i))
-    print('  %%btest%d = icmp ult i64 %%bsub%d, %d' % (i, i, i + 50))
-    print('  br i1 %%btest%d, label %%after0, label %%%s' % (i, next))
-    print('')
+    next = "before%d" % (i + 1) if i + 1 < branch_blocks else "main"
+    print("before%d:" % i)
+    print("  %%bcur%da = load i64 , i64 *%%stopa" % i)
+    print("  %%bcur%db = load i64 , i64 *%%stopb" % i)
+    print("  %%bsub%d = sub i64 %%bcur%da, %%bcur%db" % (i, i, i))
+    print("  %%btest%d = icmp ult i64 %%bsub%d, %d" % (i, i, i + 50))
+    print("  br i1 %%btest%d, label %%after0, label %%%s" % (i, next))
+    print("")
 
-print('%s:' % next)
+print("%s:" % next)
 a, b = 1, 1
 for i in range(0, main_size, 6):
     a, b = b, a + b
     offset = 4096 + b % 500000
     value = a % 256
-    print('  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset))
-    print('  store volatile i8 %d, i8 *%%ptr%d' % (value, i))
+    print("  %%ptr%d = getelementptr i8, i8 *%%base, i64 %d" % (i, offset))
+    print("  store volatile i8 %d, i8 *%%ptr%d" % (value, i))
 
 for i in range(branch_blocks):
-    print('  %%acur%da = load i64 , i64 *%%stopa' % i)
-    print('  %%acur%db = load i64 , i64 *%%stopb' % i)
-    print('  %%asub%d = sub i64 %%acur%da, %%acur%db' % (i, i, i))
-    print('  %%atest%d = icmp ult i64 %%asub%d, %d' % (i, i, i + 100))
-    print('  br i1 %%atest%d, label %%main, label %%after%d' % (i, i))
-    print('')
-    print('after%d:' % i)
+    print("  %%acur%da = load i64 , i64 *%%stopa" % i)
+    print("  %%acur%db = load i64 , i64 *%%stopb" % i)
+    print("  %%asub%d = sub i64 %%acur%da, %%acur%db" % (i, i, i))
+    print("  %%atest%d = icmp ult i64 %%asub%d, %d" % (i, i, i + 100))
+    print("  br i1 %%atest%d, label %%main, label %%after%d" % (i, i))
+    print("")
+    print("after%d:" % i)
 
-print('  %dummy = load volatile i32, i32 *@global')
-print('  ret void')
-print('}')
+print("  %dummy = load volatile i32, i32 *@global")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/branch-range-13.py b/llvm/test/CodeGen/SystemZ/Large/branch-range-13.py
index 84410dbfba083..5fdc63bff2a71 100644
--- a/llvm/test/CodeGen/SystemZ/Large/branch-range-13.py
+++ b/llvm/test/CodeGen/SystemZ/Large/branch-range-13.py
@@ -24,18 +24,20 @@
 
 num = 11000
 
-print('define void @f1() {')
-print('entry:')
-print('  br label %block')
-print('')
-print('block:')
+print("define void @f1() {")
+print("entry:")
+print("  br label %block")
+print("")
+print("block:")
 
 for i in range(num):
-    print('  tail call i64 asm "lang\\09$0,$2,$1\\0A", "=d,=*Q,d,*Q"(i32* elementtype(i32) undef, i32 undef, i32* elementtype(i32) undef)')
+    print(
+        '  tail call i64 asm "lang\\09$0,$2,$1\\0A", "=d,=*Q,d,*Q"(i32* elementtype(i32) undef, i32 undef, i32* elementtype(i32) undef)'
+    )
 
-print('  br label %block')
+print("  br label %block")
 
-print('')
-print('exit:')
-print('  ret void')
-print('}')
+print("")
+print("exit:")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/spill-01.py b/llvm/test/CodeGen/SystemZ/Large/spill-01.py
index c1b094e36e5f7..42e8d9f5a05c6 100644
--- a/llvm/test/CodeGen/SystemZ/Large/spill-01.py
+++ b/llvm/test/CodeGen/SystemZ/Large/spill-01.py
@@ -23,21 +23,21 @@
 
 count = 500
 
-print('declare void @foo()')
-print('')
-print('define void @f1(i64 *%base0, i64 *%base1) {')
+print("declare void @foo()")
+print("")
+print("define void @f1(i64 *%base0, i64 *%base1) {")
 
 for i in range(count):
-    print('  %%ptr%d = getelementptr i64, i64 *%%base%d, i64 %d' % (i, i % 2, i / 2))
-    print('  %%val%d = load i64 , i64 *%%ptr%d' % (i, i))
-    print('')
+    print("  %%ptr%d = getelementptr i64, i64 *%%base%d, i64 %d" % (i, i % 2, i / 2))
+    print("  %%val%d = load i64 , i64 *%%ptr%d" % (i, i))
+    print("")
 
-print('  call void @foo()')
-print('')
+print("  call void @foo()")
+print("")
 
 for i in range(count):
-    print('  store i64 %%val%d, i64 *%%ptr%d' % (i, i))
+    print("  store i64 %%val%d, i64 *%%ptr%d" % (i, i))
 
-print('')
-print('  ret void')
-print('}')
+print("")
+print("  ret void")
+print("}")

diff  --git a/llvm/test/CodeGen/SystemZ/Large/spill-02.py b/llvm/test/CodeGen/SystemZ/Large/spill-02.py
index aa2900b3d0489..d2ce2cfdbb3b1 100644
--- a/llvm/test/CodeGen/SystemZ/Large/spill-02.py
+++ b/llvm/test/CodeGen/SystemZ/Large/spill-02.py
@@ -24,53 +24,53 @@
 
 args = int((8168 - 160) / 8 + (5 - 1))
 
-print('declare i64 *@foo(i64 *%s)' % (', i64' * args))
-print('declare void @bar(i64 *)')
-print('')
-print('define i64 @f1(i64 %foo) {')
-print('entry:')
+print("declare i64 *@foo(i64 *%s)" % (", i64" * args))
+print("declare void @bar(i64 *)")
+print("")
+print("define i64 @f1(i64 %foo) {")
+print("entry:")
 
 # Make the allocation big, so that it goes at the top of the frame.
-print('  %array = alloca [1000 x i64]')
-print('  %area = getelementptr [1000 x i64], [1000 x i64] *%array, i64 0, i64 0')
-print('  %%base = call i64 *@foo(i64 *%%area%s)' % (', i64 0' * args))
-print('')
+print("  %array = alloca [1000 x i64]")
+print("  %area = getelementptr [1000 x i64], [1000 x i64] *%array, i64 0, i64 0")
+print("  %%base = call i64 *@foo(i64 *%%area%s)" % (", i64 0" * args))
+print("")
 
 # Make sure all GPRs are used.  One is needed for the stack pointer and
 # another for %base, so we need 14 live values.
 count = 14
 for i in range(count):
-    print('  %%ptr%d = getelementptr i64, i64 *%%base, i64 %d' % (i, i / 2))
-    print('  %%val%d = load volatile i64 , i64 *%%ptr%d' % (i, i))
-    print('')
+    print("  %%ptr%d = getelementptr i64, i64 *%%base, i64 %d" % (i, i / 2))
+    print("  %%val%d = load volatile i64 , i64 *%%ptr%d" % (i, i))
+    print("")
 
 # Encourage the register allocator to give preference to these %vals
 # by using them several times.
 for j in range(4):
     for i in range(count):
-        print('  store volatile i64 %%val%d, i64 *%%ptr%d' % (i, i))
-    print('')
+        print("  store volatile i64 %%val%d, i64 *%%ptr%d" % (i, i))
+    print("")
 
 # Copy the incoming argument, which we expect to be spilled, to the frame
 # index for the alloca area.  Also throw in a volatile store, so that this
 # block cannot be reordered with the surrounding code.
-print('  %cond = icmp eq i64 %val0, %val1')
-print('  br i1 %cond, label %skip, label %fallthru')
-print('')
-print('fallthru:')
-print('  store i64 %foo, i64 *%area')
-print('  store volatile i64 %val0, i64 *%ptr0')
-print('  br label %skip')
-print('')
-print('skip:')
+print("  %cond = icmp eq i64 %val0, %val1")
+print("  br i1 %cond, label %skip, label %fallthru")
+print("")
+print("fallthru:")
+print("  store i64 %foo, i64 *%area")
+print("  store volatile i64 %val0, i64 *%ptr0")
+print("  br label %skip")
+print("")
+print("skip:")
 
 # Use each %val a few more times to emphasise the point, and to make sure
 # that they are live across the store of %foo.
 for j in range(4):
     for i in range(count):
-        print('  store volatile i64 %%val%d, i64 *%%ptr%d' % (i, i))
-    print('')
+        print("  store volatile i64 %%val%d, i64 *%%ptr%d" % (i, i))
+    print("")
 
-print('  call void @bar(i64 *%area)')
-print('  ret i64 0')
-print('}')
+print("  call void @bar(i64 *%area)")
+print("  ret i64 0")
+print("}")

diff  --git a/llvm/test/CodeGen/WebAssembly/multivalue-stackify.py b/llvm/test/CodeGen/WebAssembly/multivalue-stackify.py
index 50aac5aba8758..7200facdfcf2a 100755
--- a/llvm/test/CodeGen/WebAssembly/multivalue-stackify.py
+++ b/llvm/test/CodeGen/WebAssembly/multivalue-stackify.py
@@ -36,183 +36,182 @@
 
 
 def get_num_defs(program):
-  num_defs = 0
-  for _, defs in program:
-    num_defs += len(defs)
-  return num_defs
+    num_defs = 0
+    for _, defs in program:
+        num_defs += len(defs)
+    return num_defs
 
 
 def possible_ops(program):
-  program_defs = get_num_defs(program)
-  for num_defs in range(MAX_PROGRAM_DEFS - program_defs + 1):
-    for num_uses in range(MAX_OP_USES + 1):
-      if num_defs == 0 and num_uses == 0:
-        continue
-      for uses in product(range(program_defs), repeat=num_uses):
-        yield uses, tuple(program_defs + i for i in range(num_defs))
+    program_defs = get_num_defs(program)
+    for num_defs in range(MAX_PROGRAM_DEFS - program_defs + 1):
+        for num_uses in range(MAX_OP_USES + 1):
+            if num_defs == 0 and num_uses == 0:
+                continue
+            for uses in product(range(program_defs), repeat=num_uses):
+                yield uses, tuple(program_defs + i for i in range(num_defs))
 
 
 def generate_programs():
-  queue = deque()
-  queue.append([])
-  program_id = 0
-  while True:
-    program = queue.popleft()
-    if len(program) == MAX_PROGRAM_OPS:
-      break
-    for op in possible_ops(program):
-      program_id += 1
-      new_program = program + [op]
-      queue.append(new_program)
-      yield program_id, new_program
+    queue = deque()
+    queue.append([])
+    program_id = 0
+    while True:
+        program = queue.popleft()
+        if len(program) == MAX_PROGRAM_OPS:
+            break
+        for op in possible_ops(program):
+            program_id += 1
+            new_program = program + [op]
+            queue.append(new_program)
+            yield program_id, new_program
 
 
 def get_num_terminal_ops(program):
-  num_terminal_ops = 0
-  for _, defs in program:
-    if len(defs) == 0:
-      num_terminal_ops += 1
-  return num_terminal_ops
+    num_terminal_ops = 0
+    for _, defs in program:
+        if len(defs) == 0:
+            num_terminal_ops += 1
+    return num_terminal_ops
 
 
 def get_max_uses(program):
-  num_uses = [0] * MAX_PROGRAM_DEFS
-  for uses, _ in program:
-    for u in uses:
-      num_uses[u] += 1
-  return max(num_uses)
+    num_uses = [0] * MAX_PROGRAM_DEFS
+    for uses, _ in program:
+        for u in uses:
+            num_uses[u] += 1
+    return max(num_uses)
 
 
 def has_unused_op(program):
-  used = [False] * MAX_PROGRAM_DEFS
-  for uses, defs in program[::-1]:
-    if defs and all(not used[d] for d in defs):
-      return True
-    for u in uses:
-      used[u] = True
-  return False
+    used = [False] * MAX_PROGRAM_DEFS
+    for uses, defs in program[::-1]:
+        if defs and all(not used[d] for d in defs):
+            return True
+        for u in uses:
+            used[u] = True
+    return False
 
 
 def has_multivalue_use(program):
-  is_multi = [False] * MAX_PROGRAM_DEFS
-  for uses, defs in program:
-    if any(is_multi[u] for u in uses):
-      return True
-    if len(defs) >= 2:
-      for d in defs:
-        is_multi[d] = True
-  return False
+    is_multi = [False] * MAX_PROGRAM_DEFS
+    for uses, defs in program:
+        if any(is_multi[u] for u in uses):
+            return True
+        if len(defs) >= 2:
+            for d in defs:
+                is_multi[d] = True
+    return False
 
 
 def has_mvp_use(program):
-  is_mvp = [False] * MAX_PROGRAM_DEFS
-  for uses, defs in program:
-    if uses and all(is_mvp[u] for u in uses):
-      return True
-    if len(defs) <= 1:
-      if any(is_mvp[u] for u in uses):
-        return True
-      for d in defs:
-        is_mvp[d] = True
-  return False
+    is_mvp = [False] * MAX_PROGRAM_DEFS
+    for uses, defs in program:
+        if uses and all(is_mvp[u] for u in uses):
+            return True
+        if len(defs) <= 1:
+            if any(is_mvp[u] for u in uses):
+                return True
+            for d in defs:
+                is_mvp[d] = True
+    return False
 
 
 def is_interesting(program):
-  # Allow only multivalue single-op programs
-  if len(program) == 1:
-    return len(program[0][1]) > 1
+    # Allow only multivalue single-op programs
+    if len(program) == 1:
+        return len(program[0][1]) > 1
 
-  # Reject programs where the last two instructions are identical
-  if len(program) >= 2 and program[-1][0] == program[-2][0]:
-    return False
+    # Reject programs where the last two instructions are identical
+    if len(program) >= 2 and program[-1][0] == program[-2][0]:
+        return False
 
-  # Reject programs with too many ops that don't produce values
-  if get_num_terminal_ops(program) > 2:
-    return False
+    # Reject programs with too many ops that don't produce values
+    if get_num_terminal_ops(program) > 2:
+        return False
 
-  # The third use of a value is no more interesting than the second
-  if get_max_uses(program) >= 3:
-    return False
+    # The third use of a value is no more interesting than the second
+    if get_max_uses(program) >= 3:
+        return False
 
-  # Reject nontrivial programs that have unused instructions
-  if has_unused_op(program):
-    return False
+    # Reject nontrivial programs that have unused instructions
+    if has_unused_op(program):
+        return False
 
-  # Reject programs that have boring MVP uses of MVP defs
-  if has_mvp_use(program):
-    return False
+    # Reject programs that have boring MVP uses of MVP defs
+    if has_mvp_use(program):
+        return False
 
-  # Otherwise if it has multivalue usage it is interesting
-  return has_multivalue_use(program)
+    # Otherwise if it has multivalue usage it is interesting
+    return has_multivalue_use(program)
 
 
 def make_llvm_type(num_defs):
-  if num_defs == 0:
-    return 'void'
-  else:
-    return '{' + ', '.join(['i32'] * num_defs) + '}'
+    if num_defs == 0:
+        return "void"
+    else:
+        return "{" + ", ".join(["i32"] * num_defs) + "}"
 
 
 def make_llvm_op_name(num_uses, num_defs):
-  return f'op_{num_uses}_to_{num_defs}'
+    return f"op_{num_uses}_to_{num_defs}"
 
 
 def make_llvm_args(first_use, num_uses):
-  return ', '.join([f'i32 %t{first_use + i}' for i in range(num_uses)])
+    return ", ".join([f"i32 %t{first_use + i}" for i in range(num_uses)])
 
 
 def print_llvm_program(program, name):
-  tmp = 0
-  def_data = []
-  print(f'define void @{name}() {{')
-  for uses, defs in program:
-    first_arg = tmp
-    # Extract operands
-    for use in uses:
-      ret_type, var, idx = def_data[use]
-      print(f'  %t{tmp} = extractvalue {ret_type} %t{var}, {idx}')
-      tmp += 1
-    # Print instruction
-    assignment = ''
-    if len(defs) > 0:
-      assignment = f'%t{tmp} = '
-      result_var = tmp
-      tmp += 1
-    ret_type = make_llvm_type(len(defs))
-    op_name = make_llvm_op_name(len(uses), len(defs))
-    args = make_llvm_args(first_arg, len(uses))
-    print(f'  {assignment}call {ret_type} @{op_name}({args})')
-    # Update def_data
-    for i in range(len(defs)):
-      def_data.append((ret_type, result_var, i))
-  print('  ret void')
-  print('}')
+    tmp = 0
+    def_data = []
+    print(f"define void @{name}() {{")
+    for uses, defs in program:
+        first_arg = tmp
+        # Extract operands
+        for use in uses:
+            ret_type, var, idx = def_data[use]
+            print(f"  %t{tmp} = extractvalue {ret_type} %t{var}, {idx}")
+            tmp += 1
+        # Print instruction
+        assignment = ""
+        if len(defs) > 0:
+            assignment = f"%t{tmp} = "
+            result_var = tmp
+            tmp += 1
+        ret_type = make_llvm_type(len(defs))
+        op_name = make_llvm_op_name(len(uses), len(defs))
+        args = make_llvm_args(first_arg, len(uses))
+        print(f"  {assignment}call {ret_type} @{op_name}({args})")
+        # Update def_data
+        for i in range(len(defs)):
+            def_data.append((ret_type, result_var, i))
+    print("  ret void")
+    print("}")
 
 
 def print_header():
-  print('; NOTE: Test functions have been generated by multivalue-stackify.py.')
-  print()
-  print('; RUN: llc < %s -verify-machineinstrs -mattr=+multivalue',
-        '| FileCheck %s')
-  print()
-  print('; Test that the multivalue stackification works')
-  print()
-  print('target triple = "wasm32-unknown-unknown"')
-  print()
-  for num_uses in range(MAX_OP_USES + 1):
-    for num_defs in range(MAX_PROGRAM_DEFS + 1):
-      if num_uses == 0 and num_defs == 0:
-        continue
-      ret_type = make_llvm_type(num_defs)
-      op_name = make_llvm_op_name(num_uses, num_defs)
-      args = make_llvm_args(0, num_uses)
-      print(f'declare {ret_type} @{op_name}({args})')
-  print()
-
-
-if __name__ == '__main__':
-  print_header()
-  for i, program in generate_programs():
-    if is_interesting(program):
-      print_llvm_program(program, 'f' + str(i))
-      print()
+    print("; NOTE: Test functions have been generated by multivalue-stackify.py.")
+    print()
+    print("; RUN: llc < %s -verify-machineinstrs -mattr=+multivalue", "| FileCheck %s")
+    print()
+    print("; Test that the multivalue stackification works")
+    print()
+    print('target triple = "wasm32-unknown-unknown"')
+    print()
+    for num_uses in range(MAX_OP_USES + 1):
+        for num_defs in range(MAX_PROGRAM_DEFS + 1):
+            if num_uses == 0 and num_defs == 0:
+                continue
+            ret_type = make_llvm_type(num_defs)
+            op_name = make_llvm_op_name(num_uses, num_defs)
+            args = make_llvm_args(0, num_uses)
+            print(f"declare {ret_type} @{op_name}({args})")
+    print()
+
+
+if __name__ == "__main__":
+    print_header()
+    for i, program in generate_programs():
+        if is_interesting(program):
+            print_llvm_program(program, "f" + str(i))
+            print()

diff  --git a/llvm/test/MC/COFF/bigobj.py b/llvm/test/MC/COFF/bigobj.py
index f7c000d20d345..ca6eb98a33af1 100644
--- a/llvm/test/MC/COFF/bigobj.py
+++ b/llvm/test/MC/COFF/bigobj.py
@@ -22,8 +22,11 @@
 # CHECK-NEXT: }
 
 for i in range(0, num_sections):
-	print("""	.section	.bss,"bw",discard,_b%d
+    print(
+        """	.section	.bss,"bw",discard,_b%d
 	.globl	_b%d                     # @b%d
 _b%d:
 	.byte	0                       # 0x0
-""" % (i, i, i, i))
+"""
+        % (i, i, i, i)
+    )

diff  --git a/llvm/test/Other/opt-bisect-helper.py b/llvm/test/Other/opt-bisect-helper.py
index d2ab4ce096f93..86c0851272e2c 100755
--- a/llvm/test/Other/opt-bisect-helper.py
+++ b/llvm/test/Other/opt-bisect-helper.py
@@ -9,12 +9,12 @@
 
 parser = argparse.ArgumentParser()
 
-parser.add_argument('--start', type=int, default=0)
-parser.add_argument('--end', type=int, default=(1 << 32))
-parser.add_argument('--optcmd', default=("opt"))
-parser.add_argument('--filecheckcmd', default=("FileCheck"))
-parser.add_argument('--prefix', default=("CHECK-BISECT"))
-parser.add_argument('--test', default=(""))
+parser.add_argument("--start", type=int, default=0)
+parser.add_argument("--end", type=int, default=(1 << 32))
+parser.add_argument("--optcmd", default=("opt"))
+parser.add_argument("--filecheckcmd", default=("FileCheck"))
+parser.add_argument("--prefix", default=("CHECK-BISECT"))
+parser.add_argument("--test", default=(""))
 
 args = parser.parse_args()
 
@@ -24,9 +24,9 @@
 opt_command = [args.optcmd, "-O2", "-opt-bisect-limit=%(count)s", "-S", args.test]
 check_command = [args.filecheckcmd, args.test, "--check-prefix=%s" % args.prefix]
 last = None
-while start != end and start != end-1:
-    count = int(round(start + (end - start)/2))
-    cmd = [x % {'count':count} for x in opt_command]
+while start != end and start != end - 1:
+    count = int(round(start + (end - start) / 2))
+    cmd = [x % {"count": count} for x in opt_command]
     print("opt: " + str(cmd))
     opt_result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     filecheck_result = subprocess.Popen(check_command, stdin=opt_result.stdout)

diff  --git a/llvm/test/TableGen/JSON-check.py b/llvm/test/TableGen/JSON-check.py
index b6bc4ee6c909b..296a5dcfc4fdc 100644
--- a/llvm/test/TableGen/JSON-check.py
+++ b/llvm/test/TableGen/JSON-check.py
@@ -21,11 +21,11 @@
             prefix_pos = line.index(prefix)
         except ValueError:
             continue
-        check_expr = line[prefix_pos + len(prefix):]
+        check_expr = line[prefix_pos + len(prefix) :]
 
         try:
             exception = None
-            result = eval(check_expr, {"data":data})
+            result = eval(check_expr, {"data": data})
         except Exception:
             result = False
             exception = traceback.format_exc().splitlines()[-1]
@@ -34,13 +34,16 @@
             sys.stderr.write(
                 "{file}:{line:d}: check threw exception: {expr}\n"
                 "{file}:{line:d}: exception was: {exception}\n".format(
-                    file=testfile, line=lineno,
-                    expr=check_expr, exception=exception))
+                    file=testfile, line=lineno, expr=check_expr, exception=exception
+                )
+            )
             fails += 1
         elif not result:
             sys.stderr.write(
                 "{file}:{line:d}: check returned False: {expr}\n".format(
-                    file=testfile, line=lineno, expr=check_expr))
+                    file=testfile, line=lineno, expr=check_expr
+                )
+            )
             fails += 1
         else:
             passes += 1

diff  --git a/llvm/test/Transforms/Inline/ML/Inputs/interactive_main.py b/llvm/test/Transforms/Inline/ML/Inputs/interactive_main.py
index 2f8084523e672..36f237a31d6b8 100644
--- a/llvm/test/Transforms/Inline/ML/Inputs/interactive_main.py
+++ b/llvm/test/Transforms/Inline/ML/Inputs/interactive_main.py
@@ -3,19 +3,18 @@
 
 
 def main(args):
+    class Advisor:
+        to_return = False
 
-  class Advisor:
-    to_return = False
+        def advice(self, _):
+            # The adice will be a sequence of yes/no/yes/no/...
+            # see ../interactive-mode.ll
+            self.to_return = not self.to_return
+            return int(self.to_return)
 
-    def advice(self, _):
-      # The adice will be a sequence of yes/no/yes/no/...
-      # see ../interactive-mode.ll
-      self.to_return = not self.to_return
-      return int(self.to_return)
+    a = Advisor()
+    interactive_host.run_interactive(args[0], a.advice, args[1:])
 
-  a = Advisor()
-  interactive_host.run_interactive(args[0], a.advice, args[1:])
 
-
-if __name__ == '__main__':
-  main(sys.argv[1:])
+if __name__ == "__main__":
+    main(sys.argv[1:])

diff  --git a/llvm/test/Unit/lit.cfg.py b/llvm/test/Unit/lit.cfg.py
index 61f60de73889b..f15c30dbcdb0a 100644
--- a/llvm/test/Unit/lit.cfg.py
+++ b/llvm/test/Unit/lit.cfg.py
@@ -8,52 +8,53 @@
 import lit.formats
 
 # name: The name of this test suite.
-config.name = 'LLVM-Unit'
+config.name = "LLVM-Unit"
 
 # suffixes: A list of file extensions to treat as test files.
 config.suffixes = []
 
 # test_source_root: The root path where tests are located.
 # test_exec_root: The root path where tests should be run.
-config.test_exec_root = os.path.join(config.llvm_obj_root, 'unittests')
+config.test_exec_root = os.path.join(config.llvm_obj_root, "unittests")
 config.test_source_root = config.test_exec_root
 
 # testFormat: The test format to use to interpret tests.
-config.test_format = lit.formats.GoogleTest(config.llvm_build_mode, 'Tests')
+config.test_format = lit.formats.GoogleTest(config.llvm_build_mode, "Tests")
 
 # Propagate the temp directory. Windows requires this because it uses \Windows\
 # if none of these are present.
-if 'TMP' in os.environ:
-    config.environment['TMP'] = os.environ['TMP']
-if 'TEMP' in os.environ:
-    config.environment['TEMP'] = os.environ['TEMP']
+if "TMP" in os.environ:
+    config.environment["TMP"] = os.environ["TMP"]
+if "TEMP" in os.environ:
+    config.environment["TEMP"] = os.environ["TEMP"]
 
 # Propagate HOME as it can be used to override incorrect homedir in passwd
 # that causes the tests to fail.
-if 'HOME' in os.environ:
-    config.environment['HOME'] = os.environ['HOME']
+if "HOME" in os.environ:
+    config.environment["HOME"] = os.environ["HOME"]
 
 # Propagate sanitizer options.
 for var in [
-    'ASAN_SYMBOLIZER_PATH',
-    'HWASAN_SYMBOLIZER_PATH',
-    'MSAN_SYMBOLIZER_PATH',
-    'TSAN_SYMBOLIZER_PATH',
-    'UBSAN_SYMBOLIZER_PATH',
-    'ASAN_OPTIONS',
-    'HWASAN_OPTIONS',
-    'MSAN_OPTIONS',
-    'TSAN_OPTIONS',
-    'UBSAN_OPTIONS',
+    "ASAN_SYMBOLIZER_PATH",
+    "HWASAN_SYMBOLIZER_PATH",
+    "MSAN_SYMBOLIZER_PATH",
+    "TSAN_SYMBOLIZER_PATH",
+    "UBSAN_SYMBOLIZER_PATH",
+    "ASAN_OPTIONS",
+    "HWASAN_OPTIONS",
+    "MSAN_OPTIONS",
+    "TSAN_OPTIONS",
+    "UBSAN_OPTIONS",
 ]:
     if var in os.environ:
         config.environment[var] = os.environ[var]
 
 # Win32 seeks DLLs along %PATH%.
-if sys.platform in ['win32', 'cygwin'] and os.path.isdir(config.shlibdir):
-    config.environment['PATH'] = os.path.pathsep.join((
-            config.shlibdir, config.environment['PATH']))
+if sys.platform in ["win32", "cygwin"] and os.path.isdir(config.shlibdir):
+    config.environment["PATH"] = os.path.pathsep.join(
+        (config.shlibdir, config.environment["PATH"])
+    )
 
 # Win32 may use %SYSTEMDRIVE% during file system shell operations, so propogate.
-if sys.platform == 'win32' and 'SYSTEMDRIVE' in os.environ:
-    config.environment['SYSTEMDRIVE'] = os.environ['SYSTEMDRIVE']
+if sys.platform == "win32" and "SYSTEMDRIVE" in os.environ:
+    config.environment["SYSTEMDRIVE"] = os.environ["SYSTEMDRIVE"]

diff  --git a/llvm/test/lit.cfg.py b/llvm/test/lit.cfg.py
index 59bfd8ecfd13a..fc682bae5093c 100644
--- a/llvm/test/lit.cfg.py
+++ b/llvm/test/lit.cfg.py
@@ -15,81 +15,82 @@
 from lit.llvm.subst import ToolSubst
 
 # name: The name of this test suite.
-config.name = 'LLVM'
+config.name = "LLVM"
 
 # testFormat: The test format to use to interpret tests.
 config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
 
 # suffixes: A list of file extensions to treat as test files. This is overriden
 # by individual lit.local.cfg files in the test subdirectories.
-config.suffixes = ['.ll', '.c', '.test', '.txt', '.s', '.mir', '.yaml']
+config.suffixes = [".ll", ".c", ".test", ".txt", ".s", ".mir", ".yaml"]
 
 # excludes: A list of directories to exclude from the testsuite. The 'Inputs'
 # subdirectories contain auxiliary inputs for various tests in their parent
 # directories.
-config.excludes = ['Inputs', 'CMakeLists.txt', 'README.txt', 'LICENSE.txt']
+config.excludes = ["Inputs", "CMakeLists.txt", "README.txt", "LICENSE.txt"]
 
 # test_source_root: The root path where tests are located.
 config.test_source_root = os.path.dirname(__file__)
 
 # test_exec_root: The root path where tests should be run.
-config.test_exec_root = os.path.join(config.llvm_obj_root, 'test')
+config.test_exec_root = os.path.join(config.llvm_obj_root, "test")
 
 # Tweak the PATH to include the tools dir.
-llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
+llvm_config.with_environment("PATH", config.llvm_tools_dir, append_path=True)
 
 # Propagate some variables from the host environment.
-llvm_config.with_system_environment(
-    ['HOME', 'INCLUDE', 'LIB', 'TMP', 'TEMP'])
+llvm_config.with_system_environment(["HOME", "INCLUDE", "LIB", "TMP", "TEMP"])
 
 
 # Set up OCAMLPATH to include newly built OCaml libraries.
-top_ocaml_lib = os.path.join(config.llvm_lib_dir, 'ocaml')
-llvm_ocaml_lib = os.path.join(top_ocaml_lib, 'llvm')
+top_ocaml_lib = os.path.join(config.llvm_lib_dir, "ocaml")
+llvm_ocaml_lib = os.path.join(top_ocaml_lib, "llvm")
 
-llvm_config.with_system_environment('OCAMLPATH')
-llvm_config.with_environment('OCAMLPATH', top_ocaml_lib, append_path=True)
-llvm_config.with_environment('OCAMLPATH', llvm_ocaml_lib, append_path=True)
+llvm_config.with_system_environment("OCAMLPATH")
+llvm_config.with_environment("OCAMLPATH", top_ocaml_lib, append_path=True)
+llvm_config.with_environment("OCAMLPATH", llvm_ocaml_lib, append_path=True)
 
-llvm_config.with_system_environment('CAML_LD_LIBRARY_PATH')
-llvm_config.with_environment(
-    'CAML_LD_LIBRARY_PATH', llvm_ocaml_lib, append_path=True)
+llvm_config.with_system_environment("CAML_LD_LIBRARY_PATH")
+llvm_config.with_environment("CAML_LD_LIBRARY_PATH", llvm_ocaml_lib, append_path=True)
 
 # Set up OCAMLRUNPARAM to enable backtraces in OCaml tests.
-llvm_config.with_environment('OCAMLRUNPARAM', 'b')
+llvm_config.with_environment("OCAMLRUNPARAM", "b")
 
 # Provide the path to asan runtime lib 'libclang_rt.asan_osx_dynamic.dylib' if
 # available. This is darwin specific since it's currently only needed on darwin.
 
 
 def get_asan_rtlib():
-    if not 'Address' in config.llvm_use_sanitizer or \
-       not 'Darwin' in config.host_os or \
-       not 'x86' in config.host_triple:
-        return ''
+    if (
+        not "Address" in config.llvm_use_sanitizer
+        or not "Darwin" in config.host_os
+        or not "x86" in config.host_triple
+    ):
+        return ""
     try:
         import glob
     except:
-        print('glob module not found, skipping get_asan_rtlib() lookup')
-        return ''
+        print("glob module not found, skipping get_asan_rtlib() lookup")
+        return ""
     # The libclang_rt.asan_osx_dynamic.dylib path is obtained using the relative
     # path from the host cc.
-    host_lib_dir = os.path.join(os.path.dirname(config.host_cc), '../lib')
-    asan_dylib_dir_pattern = host_lib_dir + \
-        '/clang/*/lib/darwin/libclang_rt.asan_osx_dynamic.dylib'
+    host_lib_dir = os.path.join(os.path.dirname(config.host_cc), "../lib")
+    asan_dylib_dir_pattern = (
+        host_lib_dir + "/clang/*/lib/darwin/libclang_rt.asan_osx_dynamic.dylib"
+    )
     found_dylibs = glob.glob(asan_dylib_dir_pattern)
     if len(found_dylibs) != 1:
-        return ''
+        return ""
     return found_dylibs[0]
 
 
 llvm_config.use_default_substitutions()
 
 # Add site-specific substitutions.
-config.substitutions.append(('%llvmshlibdir', config.llvm_shlib_dir))
-config.substitutions.append(('%shlibext', config.llvm_shlib_ext))
-config.substitutions.append(('%pluginext', config.llvm_plugin_ext))
-config.substitutions.append(('%exeext', config.llvm_exe_ext))
+config.substitutions.append(("%llvmshlibdir", config.llvm_shlib_dir))
+config.substitutions.append(("%shlibext", config.llvm_shlib_ext))
+config.substitutions.append(("%pluginext", config.llvm_plugin_ext))
+config.substitutions.append(("%exeext", config.llvm_exe_ext))
 
 
 lli_args = []
@@ -98,15 +99,14 @@ def get_asan_rtlib():
 # we don't support COFF in MCJIT well enough for the tests, force ELF format on
 # Windows.  FIXME: the process target triple should be used here, but this is
 # 
diff icult to obtain on Windows.
-if re.search(r'cygwin|windows-gnu|windows-msvc', config.host_triple):
-    lli_args = ['-mtriple=' + config.host_triple + '-elf']
+if re.search(r"cygwin|windows-gnu|windows-msvc", config.host_triple):
+    lli_args = ["-mtriple=" + config.host_triple + "-elf"]
 
 llc_args = []
 
 # Similarly, have a macro to use llc with DWARF even when the host is Windows
-if re.search(r'windows-msvc', config.target_triple):
-    llc_args = [' -mtriple=' +
-                config.target_triple.replace('-msvc', '-gnu')]
+if re.search(r"windows-msvc", config.target_triple):
+    llc_args = [" -mtriple=" + config.target_triple.replace("-msvc", "-gnu")]
 
 # Provide the path to asan runtime lib if available. On darwin, this lib needs
 # to be loaded via DYLD_INSERT_LIBRARIES before libLTO.dylib in case the files
@@ -114,95 +114,171 @@ def get_asan_rtlib():
 ld64_cmd = config.ld64_executable
 asan_rtlib = get_asan_rtlib()
 if asan_rtlib:
-    ld64_cmd = 'DYLD_INSERT_LIBRARIES={} {}'.format(asan_rtlib, ld64_cmd)
+    ld64_cmd = "DYLD_INSERT_LIBRARIES={} {}".format(asan_rtlib, ld64_cmd)
 if config.osx_sysroot:
-    ld64_cmd = '{} -syslibroot {}'.format(ld64_cmd, config.osx_sysroot)
-
-ocamlc_command = '%s ocamlc -cclib -L%s %s' % (
-    config.ocamlfind_executable, config.llvm_lib_dir, config.ocaml_flags)
-ocamlopt_command = 'true'
+    ld64_cmd = "{} -syslibroot {}".format(ld64_cmd, config.osx_sysroot)
+
+ocamlc_command = "%s ocamlc -cclib -L%s %s" % (
+    config.ocamlfind_executable,
+    config.llvm_lib_dir,
+    config.ocaml_flags,
+)
+ocamlopt_command = "true"
 if config.have_ocamlopt:
-    ocamlopt_command = '%s ocamlopt -cclib -L%s -cclib -Wl,-rpath,%s %s' % (
-        config.ocamlfind_executable, config.llvm_lib_dir, config.llvm_lib_dir, config.ocaml_flags)
-
-opt_viewer_cmd = '%s %s/tools/opt-viewer/opt-viewer.py' % (sys.executable, config.llvm_src_root)
+    ocamlopt_command = "%s ocamlopt -cclib -L%s -cclib -Wl,-rpath,%s %s" % (
+        config.ocamlfind_executable,
+        config.llvm_lib_dir,
+        config.llvm_lib_dir,
+        config.ocaml_flags,
+    )
+
+opt_viewer_cmd = "%s %s/tools/opt-viewer/opt-viewer.py" % (
+    sys.executable,
+    config.llvm_src_root,
+)
 
 llvm_original_di_preservation_cmd = os.path.join(
-    config.llvm_src_root,'utils', 'llvm-original-di-preservation.py')
+    config.llvm_src_root, "utils", "llvm-original-di-preservation.py"
+)
 config.substitutions.append(
-    ('%llvm-original-di-preservation', "'%s' %s" % (
-        config.python_executable, llvm_original_di_preservation_cmd)))
+    (
+        "%llvm-original-di-preservation",
+        "'%s' %s" % (config.python_executable, llvm_original_di_preservation_cmd),
+    )
+)
 
-llvm_locstats_tool = os.path.join(config.llvm_tools_dir, 'llvm-locstats')
+llvm_locstats_tool = os.path.join(config.llvm_tools_dir, "llvm-locstats")
 config.substitutions.append(
-    ('%llvm-locstats', "'%s' %s" % (config.python_executable, llvm_locstats_tool)))
+    ("%llvm-locstats", "'%s' %s" % (config.python_executable, llvm_locstats_tool))
+)
 config.llvm_locstats_used = os.path.exists(llvm_locstats_tool)
 
 tools = [
-    ToolSubst('%llvm', FindTool('llvm'), unresolved='ignore'),
-    ToolSubst('%lli', FindTool('lli'), post='.', extra_args=lli_args),
-    ToolSubst('%llc_dwarf', FindTool('llc'), extra_args=llc_args),
-    ToolSubst('%gold', config.gold_executable, unresolved='ignore'),
-    ToolSubst('%ld64', ld64_cmd, unresolved='ignore'),
-    ToolSubst('%ocamlc', ocamlc_command, unresolved='ignore'),
-    ToolSubst('%ocamlopt', ocamlopt_command, unresolved='ignore'),
-    ToolSubst('%opt-viewer', opt_viewer_cmd),
-    ToolSubst('%llvm-objcopy', FindTool('llvm-objcopy')),
-    ToolSubst('%llvm-strip', FindTool('llvm-strip')),
-    ToolSubst('%llvm-install-name-tool', FindTool('llvm-install-name-tool')),
-    ToolSubst('%llvm-bitcode-strip', FindTool('llvm-bitcode-strip')),
-    ToolSubst('%split-file', FindTool('split-file')),
+    ToolSubst("%llvm", FindTool("llvm"), unresolved="ignore"),
+    ToolSubst("%lli", FindTool("lli"), post=".", extra_args=lli_args),
+    ToolSubst("%llc_dwarf", FindTool("llc"), extra_args=llc_args),
+    ToolSubst("%gold", config.gold_executable, unresolved="ignore"),
+    ToolSubst("%ld64", ld64_cmd, unresolved="ignore"),
+    ToolSubst("%ocamlc", ocamlc_command, unresolved="ignore"),
+    ToolSubst("%ocamlopt", ocamlopt_command, unresolved="ignore"),
+    ToolSubst("%opt-viewer", opt_viewer_cmd),
+    ToolSubst("%llvm-objcopy", FindTool("llvm-objcopy")),
+    ToolSubst("%llvm-strip", FindTool("llvm-strip")),
+    ToolSubst("%llvm-install-name-tool", FindTool("llvm-install-name-tool")),
+    ToolSubst("%llvm-bitcode-strip", FindTool("llvm-bitcode-strip")),
+    ToolSubst("%split-file", FindTool("split-file")),
 ]
 
 # FIXME: Why do we have both `lli` and `%lli` that do slightly 
diff erent things?
-tools.extend([
-    'dsymutil', 'lli', 'lli-child-target', 'llvm-ar', 'llvm-as',
-    'llvm-addr2line', 'llvm-bcanalyzer', 'llvm-bitcode-strip', 'llvm-config',
-    'llvm-cov', 'llvm-cxxdump', 'llvm-cvtres', 'llvm-debuginfod-find',
-    'llvm-debuginfo-analyzer',
-    'llvm-
diff ', 'llvm-dis', 'llvm-dwarfdump', 'llvm-dwarfutil', 'llvm-dlltool',
-    'llvm-exegesis', 'llvm-extract', 'llvm-isel-fuzzer', 'llvm-ifs',
-    'llvm-install-name-tool', 'llvm-jitlink', 'llvm-opt-fuzzer', 'llvm-lib',
-    'llvm-link', 'llvm-lto', 'llvm-lto2', 'llvm-mc', 'llvm-mca',
-    'llvm-modextract', 'llvm-nm', 'llvm-objcopy', 'llvm-objdump', 'llvm-otool',
-    'llvm-pdbutil', 'llvm-profdata', 'llvm-profgen', 'llvm-ranlib', 'llvm-rc', 'llvm-readelf',
-    'llvm-readobj', 'llvm-remark-size-
diff ', 'llvm-rtdyld', 'llvm-sim',
-    'llvm-size', 'llvm-split', 'llvm-stress', 'llvm-strings', 'llvm-strip',
-    'llvm-tblgen', 'llvm-tapi-
diff ', 'llvm-undname', 'llvm-windres',
-    'llvm-c-test', 'llvm-cxxfilt', 'llvm-xray', 'yaml2obj', 'obj2yaml',
-    'yaml-bench', 'verify-uselistorder', 'bugpoint', 'llc', 'llvm-symbolizer',
-    'opt', 'sancov', 'sanstats', 'llvm-remarkutil'])
+tools.extend(
+    [
+        "dsymutil",
+        "lli",
+        "lli-child-target",
+        "llvm-ar",
+        "llvm-as",
+        "llvm-addr2line",
+        "llvm-bcanalyzer",
+        "llvm-bitcode-strip",
+        "llvm-config",
+        "llvm-cov",
+        "llvm-cxxdump",
+        "llvm-cvtres",
+        "llvm-debuginfod-find",
+        "llvm-debuginfo-analyzer",
+        "llvm-
diff ",
+        "llvm-dis",
+        "llvm-dwarfdump",
+        "llvm-dwarfutil",
+        "llvm-dlltool",
+        "llvm-exegesis",
+        "llvm-extract",
+        "llvm-isel-fuzzer",
+        "llvm-ifs",
+        "llvm-install-name-tool",
+        "llvm-jitlink",
+        "llvm-opt-fuzzer",
+        "llvm-lib",
+        "llvm-link",
+        "llvm-lto",
+        "llvm-lto2",
+        "llvm-mc",
+        "llvm-mca",
+        "llvm-modextract",
+        "llvm-nm",
+        "llvm-objcopy",
+        "llvm-objdump",
+        "llvm-otool",
+        "llvm-pdbutil",
+        "llvm-profdata",
+        "llvm-profgen",
+        "llvm-ranlib",
+        "llvm-rc",
+        "llvm-readelf",
+        "llvm-readobj",
+        "llvm-remark-size-
diff ",
+        "llvm-rtdyld",
+        "llvm-sim",
+        "llvm-size",
+        "llvm-split",
+        "llvm-stress",
+        "llvm-strings",
+        "llvm-strip",
+        "llvm-tblgen",
+        "llvm-tapi-
diff ",
+        "llvm-undname",
+        "llvm-windres",
+        "llvm-c-test",
+        "llvm-cxxfilt",
+        "llvm-xray",
+        "yaml2obj",
+        "obj2yaml",
+        "yaml-bench",
+        "verify-uselistorder",
+        "bugpoint",
+        "llc",
+        "llvm-symbolizer",
+        "opt",
+        "sancov",
+        "sanstats",
+        "llvm-remarkutil",
+    ]
+)
 
 # The following tools are optional
-tools.extend([
-    ToolSubst('llvm-mt', unresolved='ignore'),
-    ToolSubst('llvm-debuginfod', unresolved='ignore'),
-    ToolSubst('Kaleidoscope-Ch3', unresolved='ignore'),
-    ToolSubst('Kaleidoscope-Ch4', unresolved='ignore'),
-    ToolSubst('Kaleidoscope-Ch5', unresolved='ignore'),
-    ToolSubst('Kaleidoscope-Ch6', unresolved='ignore'),
-    ToolSubst('Kaleidoscope-Ch7', unresolved='ignore'),
-    ToolSubst('Kaleidoscope-Ch8', unresolved='ignore'),
-    ToolSubst('LLJITWithThinLTOSummaries', unresolved='ignore'),
-    ToolSubst('LLJITWithRemoteDebugging', unresolved='ignore'),
-    ToolSubst('OrcV2CBindingsBasicUsage', unresolved='ignore'),
-    ToolSubst('OrcV2CBindingsAddObjectFile', unresolved='ignore'),
-    ToolSubst('OrcV2CBindingsRemovableCode', unresolved='ignore'),
-    ToolSubst('OrcV2CBindingsLazy', unresolved='ignore'),
-    ToolSubst('OrcV2CBindingsVeryLazy', unresolved='ignore'),
-    ToolSubst('dxil-dis', unresolved='ignore')])
+tools.extend(
+    [
+        ToolSubst("llvm-mt", unresolved="ignore"),
+        ToolSubst("llvm-debuginfod", unresolved="ignore"),
+        ToolSubst("Kaleidoscope-Ch3", unresolved="ignore"),
+        ToolSubst("Kaleidoscope-Ch4", unresolved="ignore"),
+        ToolSubst("Kaleidoscope-Ch5", unresolved="ignore"),
+        ToolSubst("Kaleidoscope-Ch6", unresolved="ignore"),
+        ToolSubst("Kaleidoscope-Ch7", unresolved="ignore"),
+        ToolSubst("Kaleidoscope-Ch8", unresolved="ignore"),
+        ToolSubst("LLJITWithThinLTOSummaries", unresolved="ignore"),
+        ToolSubst("LLJITWithRemoteDebugging", unresolved="ignore"),
+        ToolSubst("OrcV2CBindingsBasicUsage", unresolved="ignore"),
+        ToolSubst("OrcV2CBindingsAddObjectFile", unresolved="ignore"),
+        ToolSubst("OrcV2CBindingsRemovableCode", unresolved="ignore"),
+        ToolSubst("OrcV2CBindingsLazy", unresolved="ignore"),
+        ToolSubst("OrcV2CBindingsVeryLazy", unresolved="ignore"),
+        ToolSubst("dxil-dis", unresolved="ignore"),
+    ]
+)
 
 # Find (major, minor) version of ptxas
 def ptxas_version(ptxas):
-    ptxas_cmd = subprocess.Popen([ptxas, '--version'], stdout=subprocess.PIPE)
-    ptxas_out = ptxas_cmd.stdout.read().decode('ascii')
+    ptxas_cmd = subprocess.Popen([ptxas, "--version"], stdout=subprocess.PIPE)
+    ptxas_out = ptxas_cmd.stdout.read().decode("ascii")
     ptxas_cmd.wait()
-    match = re.search('release (\d+)\.(\d+)', ptxas_out)
+    match = re.search("release (\d+)\.(\d+)", ptxas_out)
     if match:
         return (int(match.group(1)), int(match.group(2)))
-    print('couldn\'t determine ptxas version')
+    print("couldn't determine ptxas version")
     return None
 
+
 # Enable %ptxas and %ptxas-verify tools.
 # %ptxas-verify defaults to sm_60 architecture. It can be overriden
 # by specifying required one, for instance: %ptxas-verify -arch=sm_80.
@@ -213,10 +289,22 @@ def enable_ptxas(ptxas_executable):
         # versions, so add a feature for every known version prior to
         # the current one.
         ptxas_known_versions = [
-            (9, 0), (9, 1), (9, 2),
-            (10, 0), (10, 1), (10, 2),
-            (11, 0), (11, 1), (11, 2), (11, 3), (11, 4), (11, 5), (11, 6),
-            (11, 7), (11, 8), (12, 0),
+            (9, 0),
+            (9, 1),
+            (9, 2),
+            (10, 0),
+            (10, 1),
+            (10, 2),
+            (11, 0),
+            (11, 1),
+            (11, 2),
+            (11, 3),
+            (11, 4),
+            (11, 5),
+            (11, 6),
+            (11, 7),
+            (11, 8),
+            (12, 0),
         ]
 
         def version_int(ver):
@@ -227,23 +315,29 @@ def version_int(ver):
         min_version = ptxas_known_versions[0]
         if version_int(version) < version_int(min_version):
             print(
-                'Warning: ptxas version {}.{} is not supported'.format(
-                    version[0], version[1]))
+                "Warning: ptxas version {}.{} is not supported".format(
+                    version[0], version[1]
+                )
+            )
             return
 
         for known_version in ptxas_known_versions:
             if version_int(known_version) <= version_int(version):
                 major, minor = known_version
-                config.available_features.add(
-                    'ptxas-{}.{}'.format(major, minor))
+                config.available_features.add("ptxas-{}.{}".format(major, minor))
 
-    config.available_features.add('ptxas')
-    tools.extend([ToolSubst('%ptxas', ptxas_executable),
-                  ToolSubst('%ptxas-verify', '{} -arch=sm_60 -c -'.format(
-                      ptxas_executable))])
+    config.available_features.add("ptxas")
+    tools.extend(
+        [
+            ToolSubst("%ptxas", ptxas_executable),
+            ToolSubst("%ptxas-verify", "{} -arch=sm_60 -c -".format(ptxas_executable)),
+        ]
+    )
 
-ptxas_executable = \
-    os.environ.get('LLVM_PTXAS_EXECUTABLE', None) or config.ptxas_executable
+
+ptxas_executable = (
+    os.environ.get("LLVM_PTXAS_EXECUTABLE", None) or config.ptxas_executable
+)
 if ptxas_executable:
     enable_ptxas(ptxas_executable)
 
@@ -254,63 +348,78 @@ def version_int(ver):
 config.targets = frozenset(config.targets_to_build.split())
 
 for arch in config.targets_to_build.split():
-    config.available_features.add(arch.lower() + '-registered-target')
+    config.available_features.add(arch.lower() + "-registered-target")
 
 # Features
 known_arches = ["x86_64", "mips64", "ppc64", "aarch64"]
-if (config.host_ldflags.find("-m32") < 0
-    and any(config.llvm_host_triple.startswith(x) for x in known_arches)):
-  config.available_features.add("llvm-64-bits")
+if config.host_ldflags.find("-m32") < 0 and any(
+    config.llvm_host_triple.startswith(x) for x in known_arches
+):
+    config.available_features.add("llvm-64-bits")
 
 config.available_features.add("host-byteorder-" + sys.byteorder + "-endian")
 
-if sys.platform in ['win32']:
+if sys.platform in ["win32"]:
     # ExecutionEngine, no weak symbols in COFF.
-    config.available_features.add('uses_COFF')
+    config.available_features.add("uses_COFF")
 else:
     # Others/can-execute.txt
-    config.available_features.add('can-execute')
+    config.available_features.add("can-execute")
 
 # Loadable module
 if config.has_plugins:
-    config.available_features.add('plugins')
+    config.available_features.add("plugins")
 
 if config.build_examples:
-    config.available_features.add('examples')
+    config.available_features.add("examples")
 
 if config.linked_bye_extension:
-    config.substitutions.append(('%llvmcheckext', 'CHECK-EXT'))
-    config.substitutions.append(('%loadbye', ''))
-    config.substitutions.append(('%loadnewpmbye', ''))
+    config.substitutions.append(("%llvmcheckext", "CHECK-EXT"))
+    config.substitutions.append(("%loadbye", ""))
+    config.substitutions.append(("%loadnewpmbye", ""))
 else:
-    config.substitutions.append(('%llvmcheckext', 'CHECK-NOEXT'))
-    config.substitutions.append(('%loadbye',
-                                 '-load={}/Bye{}'.format(config.llvm_shlib_dir,
-                                                         config.llvm_shlib_ext)))
-    config.substitutions.append(('%loadnewpmbye',
-                                 '-load-pass-plugin={}/Bye{}'
-                                 .format(config.llvm_shlib_dir,
-                                         config.llvm_shlib_ext)))
+    config.substitutions.append(("%llvmcheckext", "CHECK-NOEXT"))
+    config.substitutions.append(
+        (
+            "%loadbye",
+            "-load={}/Bye{}".format(config.llvm_shlib_dir, config.llvm_shlib_ext),
+        )
+    )
+    config.substitutions.append(
+        (
+            "%loadnewpmbye",
+            "-load-pass-plugin={}/Bye{}".format(
+                config.llvm_shlib_dir, config.llvm_shlib_ext
+            ),
+        )
+    )
 
 if config.linked_exampleirtransforms_extension:
-    config.substitutions.append(('%loadexampleirtransforms',''))
+    config.substitutions.append(("%loadexampleirtransforms", ""))
 else:
-    config.substitutions.append(('%loadexampleirtransforms',
-                                 '-load-pass-plugin={}/ExampleIRTransforms{}'
-                                 .format(config.llvm_shlib_dir,
-                                 config.llvm_shlib_ext)))
+    config.substitutions.append(
+        (
+            "%loadexampleirtransforms",
+            "-load-pass-plugin={}/ExampleIRTransforms{}".format(
+                config.llvm_shlib_dir, config.llvm_shlib_ext
+            ),
+        )
+    )
 
 # Static libraries are not built if BUILD_SHARED_LIBS is ON.
 if not config.build_shared_libs and not config.link_llvm_dylib:
-    config.available_features.add('static-libs')
+    config.available_features.add("static-libs")
 
 if config.link_llvm_dylib:
-    config.available_features.add('llvm-dylib')
+    config.available_features.add("llvm-dylib")
     config.substitutions.append(
-        ('%llvmdylib',
-         '{}/libLLVM-{}{}'.format(config.llvm_shlib_dir,
-                                  config.llvm_dylib_version,
-                                  config.llvm_shlib_ext)))
+        (
+            "%llvmdylib",
+            "{}/libLLVM-{}{}".format(
+                config.llvm_shlib_dir, config.llvm_dylib_version, config.llvm_shlib_ext
+            ),
+        )
+    )
 
 if config.have_tf_aot:
     config.available_features.add("have_tf_aot")
@@ -324,86 +433,93 @@ def version_int(ver):
 if config.llvm_raevict_model_autogenerated:
     config.available_features.add("llvm_raevict_model_autogenerated")
 
+
 def have_cxx_shared_library():
-    readobj_exe = lit.util.which('llvm-readobj', config.llvm_tools_dir)
+    readobj_exe = lit.util.which("llvm-readobj", config.llvm_tools_dir)
     if not readobj_exe:
-        print('llvm-readobj not found')
+        print("llvm-readobj not found")
         return False
 
     try:
         readobj_cmd = subprocess.Popen(
-            [readobj_exe, '--needed-libs', readobj_exe], stdout=subprocess.PIPE)
+            [readobj_exe, "--needed-libs", readobj_exe], stdout=subprocess.PIPE
+        )
     except OSError:
-        print('could not exec llvm-readobj')
+        print("could not exec llvm-readobj")
         return False
 
-    readobj_out = readobj_cmd.stdout.read().decode('ascii')
+    readobj_out = readobj_cmd.stdout.read().decode("ascii")
     readobj_cmd.wait()
 
-    regex = re.compile(r'(libc\+\+|libstdc\+\+|msvcp).*\.(so|dylib|dll)')
+    regex = re.compile(r"(libc\+\+|libstdc\+\+|msvcp).*\.(so|dylib|dll)")
     needed_libs = False
     for line in readobj_out.splitlines():
-        if 'NeededLibraries [' in line:
+        if "NeededLibraries [" in line:
             needed_libs = True
-        if ']' in line:
+        if "]" in line:
             needed_libs = False
         if needed_libs and regex.search(line.lower()):
             return True
     return False
 
+
 if have_cxx_shared_library():
-    config.available_features.add('cxx-shared-library')
+    config.available_features.add("cxx-shared-library")
 
 if config.libcxx_used:
-    config.available_features.add('libcxx-used')
+    config.available_features.add("libcxx-used")
 
 # LLVM can be configured with an empty default triple
 # Some tests are "generic" and require a valid default triple
 if config.target_triple:
-    config.available_features.add('default_triple')
+    config.available_features.add("default_triple")
     # Direct object generation
     if not config.target_triple.startswith(("nvptx", "xcore")):
-        config.available_features.add('object-emission')
+        config.available_features.add("object-emission")
 
 # Allow checking for specific details in the host triple
 if config.host_triple:
     config.available_features.add('host=%s' % config.host_triple)
 
 if config.have_llvm_driver:
-  config.available_features.add('llvm-driver')
+    config.available_features.add("llvm-driver")
 
 import subprocess
 
 
 def have_ld_plugin_support():
-    if not os.path.exists(os.path.join(config.llvm_shlib_dir, 'LLVMgold' + config.llvm_shlib_ext)):
+    if not os.path.exists(
+        os.path.join(config.llvm_shlib_dir, "LLVMgold" + config.llvm_shlib_ext)
+    ):
         return False
 
     ld_cmd = subprocess.Popen(
-        [config.gold_executable, '--help'], stdout=subprocess.PIPE, env={'LANG': 'C'})
+        [config.gold_executable, "--help"], stdout=subprocess.PIPE, env={"LANG": "C"}
+    )
     ld_out = ld_cmd.stdout.read().decode()
     ld_cmd.wait()
 
-    if not '-plugin' in ld_out:
+    if not "-plugin" in ld_out:
         return False
 
     # check that the used emulations are supported.
-    emu_line = [l for l in ld_out.split('\n') if 'supported emulations' in l]
+    emu_line = [l for l in ld_out.split("\n") if "supported emulations" in l]
     if len(emu_line) != 1:
         return False
     emu_line = emu_line[0]
-    fields = emu_line.split(':')
+    fields = emu_line.split(":")
     if len(fields) != 3:
         return False
     emulations = fields[2].split()
-    if 'elf_x86_64' not in emulations:
+    if "elf_x86_64" not in emulations:
         return False
-    if 'elf32ppc' in emulations:
-        config.available_features.add('ld_emu_elf32ppc')
+    if "elf32ppc" in emulations:
+        config.available_features.add("ld_emu_elf32ppc")
 
     ld_version = subprocess.Popen(
-        [config.gold_executable, '--version'], stdout=subprocess.PIPE, env={'LANG': 'C'})
-    if not 'GNU gold' in ld_version.stdout.read().decode():
+        [config.gold_executable, "--version"], stdout=subprocess.PIPE, env={"LANG": "C"}
+    )
+    if not "GNU gold" in ld_version.stdout.read().decode():
         return False
     ld_version.wait()
 
@@ -411,94 +527,113 @@ def have_ld_plugin_support():
 
 
 if have_ld_plugin_support():
-    config.available_features.add('ld_plugin')
+    config.available_features.add("ld_plugin")
 
 
 def have_ld64_plugin_support():
-    if not os.path.exists(os.path.join(config.llvm_shlib_dir, 'libLTO' + config.llvm_shlib_ext)):
+    if not os.path.exists(
+        os.path.join(config.llvm_shlib_dir, "libLTO" + config.llvm_shlib_ext)
+    ):
         return False
 
-    if config.ld64_executable == '':
+    if config.ld64_executable == "":
         return False
 
-    ld_cmd = subprocess.Popen(
-        [config.ld64_executable, '-v'], stderr=subprocess.PIPE)
+    ld_cmd = subprocess.Popen([config.ld64_executable, "-v"], stderr=subprocess.PIPE)
     ld_out = ld_cmd.stderr.read().decode()
     ld_cmd.wait()
 
-    if 'ld64' not in ld_out or 'LTO' not in ld_out:
+    if "ld64" not in ld_out or "LTO" not in ld_out:
         return False
 
     return True
 
 
 if have_ld64_plugin_support():
-    config.available_features.add('ld64_plugin')
+    config.available_features.add("ld64_plugin")
 
 # Ask llvm-config about asserts
 llvm_config.feature_config(
-    [('--assertion-mode', {'ON': 'asserts'}),
-     ('--build-mode', {'[Dd][Ee][Bb][Uu][Gg]': 'debug'})])
-
-if 'darwin' == sys.platform:
-    cmd = ['sysctl', 'hw.optional.fma']
+    [
+        ("--assertion-mode", {"ON": "asserts"}),
+        ("--build-mode", {"[Dd][Ee][Bb][Uu][Gg]": "debug"}),
+    ]
+)
+
+if "darwin" == sys.platform:
+    cmd = ["sysctl", "hw.optional.fma"]
     sysctl_cmd = subprocess.Popen(cmd, stdout=subprocess.PIPE)
 
     # Non zero return, probably a permission issue
     if sysctl_cmd.wait():
         print(
-          "Warning: sysctl exists but calling \"{}\" failed, defaulting to no fma3.".format(
-          " ".join(cmd)))
+            'Warning: sysctl exists but calling "{}" failed, defaulting to no fma3.'.format(
+                " ".join(cmd)
+            )
+        )
     else:
-        result = sysctl_cmd.stdout.read().decode('ascii')
-        if 'hw.optional.fma: 1' in result:
-            config.available_features.add('fma3')
+        result = sysctl_cmd.stdout.read().decode("ascii")
+        if "hw.optional.fma: 1" in result:
+            config.available_features.add("fma3")
 
 # .debug_frame is not emitted for targeting Windows x64, aarch64/arm64, AIX, or Apple Silicon Mac.
-if not re.match(r'^(x86_64|aarch64|arm64|powerpc|powerpc64).*-(windows-gnu|windows-msvc|aix)', config.target_triple) \
-    and not re.match(r'^arm64(e)?-apple-(macos|darwin)', config.target_triple):
-    config.available_features.add('debug_frame')
+if not re.match(
+    r"^(x86_64|aarch64|arm64|powerpc|powerpc64).*-(windows-gnu|windows-msvc|aix)",
+    config.target_triple,
+) and not re.match(r"^arm64(e)?-apple-(macos|darwin)", config.target_triple):
+    config.available_features.add("debug_frame")
 
 if config.have_libxar:
-    config.available_features.add('xar')
+    config.available_features.add("xar")
 
 if config.enable_threads:
-    config.available_features.add('thread_support')
+    config.available_features.add("thread_support")
 
 if config.have_libxml2:
-    config.available_features.add('libxml2')
+    config.available_features.add("libxml2")
 
 if config.have_curl:
-    config.available_features.add('curl')
+    config.available_features.add("curl")
 
 if config.have_httplib:
-    config.available_features.add('httplib')
+    config.available_features.add("httplib")
 
 if config.have_opt_viewer_modules:
-    config.available_features.add('have_opt_viewer_modules')
+    config.available_features.add("have_opt_viewer_modules")
 
 if config.expensive_checks:
-    config.available_features.add('expensive_checks')
+    config.available_features.add("expensive_checks")
 
 if "MemoryWithOrigins" in config.llvm_use_sanitizer:
-    config.available_features.add('use_msan_with_origins')
+    config.available_features.add("use_msan_with_origins")
+
 
 def exclude_unsupported_files_for_aix(dirname):
-   for filename in os.listdir(dirname):
-       source_path = os.path.join( dirname, filename)
-       if os.path.isdir(source_path):
-           continue
-       f = open(source_path, 'r')
-       try:
-          data = f.read()
-          # 64-bit object files are not supported on AIX, so exclude the tests.
-          if ('-emit-obj' in data or '-filetype=obj' in data) and '64' in config.target_triple:
-            config.excludes += [ filename ]
-       finally:
-          f.close()
-
-if 'aix' in config.target_triple:
-    for directory in ('/CodeGen/X86', '/DebugInfo', '/DebugInfo/X86', '/DebugInfo/Generic', '/LTO/X86', '/Linker'):
+    for filename in os.listdir(dirname):
+        source_path = os.path.join(dirname, filename)
+        if os.path.isdir(source_path):
+            continue
+        f = open(source_path, "r")
+        try:
+            data = f.read()
+            # 64-bit object files are not supported on AIX, so exclude the tests.
+            if (
+                "-emit-obj" in data or "-filetype=obj" in data
+            ) and "64" in config.target_triple:
+                config.excludes += [filename]
+        finally:
+            f.close()
+
+
+if "aix" in config.target_triple:
+    for directory in (
+        "/CodeGen/X86",
+        "/DebugInfo",
+        "/DebugInfo/X86",
+        "/DebugInfo/Generic",
+        "/LTO/X86",
+        "/Linker",
+    ):
         exclude_unsupported_files_for_aix(config.test_source_root + directory)
 
 # Some tools support an environment variable "OBJECT_MODE" on AIX OS, which
@@ -507,5 +642,5 @@ def exclude_unsupported_files_for_aix(dirname):
 # objects only. In order to not affect most test cases, which expect to support
 # 32-bit and 64-bit objects by default, set the environment variable
 # "OBJECT_MODE" to 'any' by default on AIX OS.
-if 'system-aix' in config.available_features:
-    config.environment['OBJECT_MODE'] = 'any'
+if "system-aix" in config.available_features:
+    config.environment["OBJECT_MODE"] = "any"

diff  --git a/llvm/test/tools/llvm-debuginfod-find/Inputs/capture_req.py b/llvm/test/tools/llvm-debuginfod-find/Inputs/capture_req.py
index 56fa2d08a0897..fa823e61f9397 100644
--- a/llvm/test/tools/llvm-debuginfod-find/Inputs/capture_req.py
+++ b/llvm/test/tools/llvm-debuginfod-find/Inputs/capture_req.py
@@ -4,20 +4,22 @@
 import sys
 import threading
 
+
 class TrivialHandler(http.server.BaseHTTPRequestHandler):
-  def do_GET(self):
-    self.send_response(501)
+    def do_GET(self):
+        self.send_response(501)
+
+    def log_request(self, *args, **kwargs):
+        print(self.requestline)
+        print(self.headers)
 
-  def log_request(self, *args, **kwargs):
-    print(self.requestline)
-    print(self.headers)
 
-httpd = http.server.HTTPServer(('', 0),  TrivialHandler)
+httpd = http.server.HTTPServer(("", 0), TrivialHandler)
 port = httpd.socket.getsockname()[1]
 
 try:
-  t = threading.Thread(target=httpd.serve_forever).start()
-  os.environ['DEBUGINFOD_URLS'] =f'http://localhost:{port}'
-  subprocess.run(sys.argv[1:], capture_output = True)
+    t = threading.Thread(target=httpd.serve_forever).start()
+    os.environ["DEBUGINFOD_URLS"] = f"http://localhost:{port}"
+    subprocess.run(sys.argv[1:], capture_output=True)
 finally:
-  httpd.shutdown()
+    httpd.shutdown()

diff  --git a/llvm/test/tools/llvm-libtool-darwin/Inputs/DependencyDump.py b/llvm/test/tools/llvm-libtool-darwin/Inputs/DependencyDump.py
index ddf9873614785..4c9ffb6ef8e28 100755
--- a/llvm/test/tools/llvm-libtool-darwin/Inputs/DependencyDump.py
+++ b/llvm/test/tools/llvm-libtool-darwin/Inputs/DependencyDump.py
@@ -7,17 +7,17 @@
 
 f = open(sys.argv[1], "rb")
 byte = f.read(1)
-while byte != b'':
-    if byte == b'\x00':
+while byte != b"":
+    if byte == b"\x00":
         sys.stdout.write("version: ")
-    elif byte == b'\x10':
+    elif byte == b"\x10":
         sys.stdout.write("input-file: ")
-    elif byte == b'\x11':
+    elif byte == b"\x11":
         sys.stdout.write("not-found: ")
-    elif byte == b'\x40':
+    elif byte == b"\x40":
         sys.stdout.write("output-file: ")
     byte = f.read(1)
-    while byte != b'\x00':
+    while byte != b"\x00":
         sys.stdout.write(byte.decode("ascii"))
         byte = f.read(1)
     sys.stdout.write("\n")

diff  --git a/llvm/test/tools/llvm-objcopy/Inputs/ungzip.py b/llvm/test/tools/llvm-objcopy/Inputs/ungzip.py
index c7b1de96b25c4..7824e9ababb3c 100644
--- a/llvm/test/tools/llvm-objcopy/Inputs/ungzip.py
+++ b/llvm/test/tools/llvm-objcopy/Inputs/ungzip.py
@@ -1,13 +1,14 @@
 import gzip
 import sys
 
-with gzip.open(sys.argv[1], 'rb') as f:
-  writer = getattr(sys.stdout, 'buffer', None)
-  if writer is None:
-    writer = sys.stdout
-    if sys.platform == "win32":
-      import os, msvcrt
-      msvcrt.setmode(sys.stdout.fileno(),os.O_BINARY)
+with gzip.open(sys.argv[1], "rb") as f:
+    writer = getattr(sys.stdout, "buffer", None)
+    if writer is None:
+        writer = sys.stdout
+        if sys.platform == "win32":
+            import os, msvcrt
 
-  writer.write(f.read())
-  sys.stdout.flush()
+            msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+
+    writer.write(f.read())
+    sys.stdout.flush()

diff  --git a/llvm/test/tools/llvm-objcopy/MachO/Inputs/code-signature-check.py b/llvm/test/tools/llvm-objcopy/MachO/Inputs/code-signature-check.py
index 75c01a2174c7e..2efcf80bdd41d 100644
--- a/llvm/test/tools/llvm-objcopy/MachO/Inputs/code-signature-check.py
+++ b/llvm/test/tools/llvm-objcopy/MachO/Inputs/code-signature-check.py
@@ -24,15 +24,25 @@
 import sys
 import typing
 
+
 class CodeDirectoryVersion:
     SUPPORTSSCATTER = 0x20100
     SUPPORTSTEAMID = 0x20200
     SUPPORTSCODELIMIT64 = 0x20300
     SUPPORTSEXECSEG = 0x20400
 
+
 class CodeDirectory:
     @staticmethod
-    def make(buf: memoryview) -> typing.Union['CodeDirectoryBase', 'CodeDirectoryV20100', 'CodeDirectoryV20200', 'CodeDirectoryV20300', 'CodeDirectoryV20400']:
+    def make(
+        buf: memoryview,
+    ) -> typing.Union[
+        "CodeDirectoryBase",
+        "CodeDirectoryV20100",
+        "CodeDirectoryV20200",
+        "CodeDirectoryV20300",
+        "CodeDirectoryV20400",
+    ]:
         _magic, _length, version = struct.unpack_from(">III", buf, 0)
         subtype = {
             CodeDirectoryVersion.SUPPORTSSCATTER: CodeDirectoryV20100,
@@ -43,6 +53,7 @@ def make(buf: memoryview) -> typing.Union['CodeDirectoryBase', 'CodeDirectoryV20
 
         return subtype._make(struct.unpack_from(subtype._format(), buf, 0))
 
+
 class CodeDirectoryBase(typing.NamedTuple):
     magic: int
     length: int
@@ -63,6 +74,7 @@ class CodeDirectoryBase(typing.NamedTuple):
     def _format() -> str:
         return ">IIIIIIIIIBBBBI"
 
+
 class CodeDirectoryV20100(typing.NamedTuple):
     magic: int
     length: int
@@ -85,6 +97,7 @@ class CodeDirectoryV20100(typing.NamedTuple):
     def _format() -> str:
         return CodeDirectoryBase._format() + "I"
 
+
 class CodeDirectoryV20200(typing.NamedTuple):
     magic: int
     length: int
@@ -109,6 +122,7 @@ class CodeDirectoryV20200(typing.NamedTuple):
     def _format() -> str:
         return CodeDirectoryV20100._format() + "I"
 
+
 class CodeDirectoryV20300(typing.NamedTuple):
     magic: int
     length: int
@@ -136,6 +150,7 @@ class CodeDirectoryV20300(typing.NamedTuple):
     def _format() -> str:
         return CodeDirectoryV20200._format() + "IQ"
 
+
 class CodeDirectoryV20400(typing.NamedTuple):
     magic: int
     length: int
@@ -167,13 +182,16 @@ class CodeDirectoryV20400(typing.NamedTuple):
     def _format() -> str:
         return CodeDirectoryV20300._format() + "QQQ"
 
+
 class CodeDirectoryBlobIndex(typing.NamedTuple):
     type_: int
     offset: int
 
     @staticmethod
-    def make(buf: memoryview) -> 'CodeDirectoryBlobIndex':
-        return CodeDirectoryBlobIndex._make(struct.unpack_from(CodeDirectoryBlobIndex.__format(), buf, 0))
+    def make(buf: memoryview) -> "CodeDirectoryBlobIndex":
+        return CodeDirectoryBlobIndex._make(
+            struct.unpack_from(CodeDirectoryBlobIndex.__format(), buf, 0)
+        )
 
     @staticmethod
     def bytesize() -> int:
@@ -183,6 +201,7 @@ def bytesize() -> int:
     def __format() -> str:
         return ">II"
 
+
 class CodeDirectorySuperBlob(typing.NamedTuple):
     magic: int
     length: int
@@ -190,7 +209,7 @@ class CodeDirectorySuperBlob(typing.NamedTuple):
     blob_indices: typing.List[CodeDirectoryBlobIndex]
 
     @staticmethod
-    def make(buf: memoryview) -> 'CodeDirectorySuperBlob':
+    def make(buf: memoryview) -> "CodeDirectorySuperBlob":
         super_blob_layout = ">III"
         super_blob = struct.unpack_from(super_blob_layout, buf, 0)
 
@@ -202,17 +221,25 @@ def make(buf: memoryview) -> 'CodeDirectorySuperBlob':
 
         return CodeDirectorySuperBlob(*super_blob, blob_indices)
 
+
 def unpack_null_terminated_string(buf: memoryview) -> str:
     b = bytes(itertools.takewhile(lambda b: b != 0, buf))
     return b.decode()
 
+
 def main():
     parser = argparse.ArgumentParser()
-    parser.add_argument('binary', type=argparse.FileType('rb'), help='The file to analyze')
-    parser.add_argument('offset', type=int, help='Offset to start of Code Directory data')
-    parser.add_argument('size', type=int, help='Size of Code Directory data')
-    parser.add_argument('code_offset', type=int, help='Offset to start of code pages to hash')
-    parser.add_argument('code_size', type=int, help='Size of the code pages to hash')
+    parser.add_argument(
+        "binary", type=argparse.FileType("rb"), help="The file to analyze"
+    )
+    parser.add_argument(
+        "offset", type=int, help="Offset to start of Code Directory data"
+    )
+    parser.add_argument("size", type=int, help="Size of Code Directory data")
+    parser.add_argument(
+        "code_offset", type=int, help="Offset to start of code pages to hash"
+    )
+    parser.add_argument("code_size", type=int, help="Size of the code pages to hash")
 
     args = parser.parse_args()
 
@@ -229,7 +256,10 @@ def main():
         print(code_directory)
 
         ident_offset = code_directory_offset + code_directory.identOffset
-        print("Code Directory ID: " + unpack_null_terminated_string(super_blob_mem[ident_offset:]))
+        print(
+            "Code Directory ID: "
+            + unpack_null_terminated_string(super_blob_mem[ident_offset:])
+        )
 
         code_offset = args.code_offset
         code_end = code_offset + args.code_size
@@ -238,7 +268,9 @@ def main():
 
         hashes_offset = code_directory_offset + code_directory.hashOffset
         for idx in range(code_directory.nCodeSlots):
-            hash_bytes = bytes(super_blob_mem[hashes_offset:hashes_offset+code_directory.hashSize])
+            hash_bytes = bytes(
+                super_blob_mem[hashes_offset : hashes_offset + code_directory.hashSize]
+            )
             hashes_offset += code_directory.hashSize
 
             hasher = hashlib.sha256()
@@ -253,5 +285,5 @@ def main():
                 sys.exit(-1)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/llvm/test/tools/llvm-reduce/Inputs/llvm-dis-and-filecheck.py b/llvm/test/tools/llvm-reduce/Inputs/llvm-dis-and-filecheck.py
index 672a932515b50..eb0c8f27dc3ed 100755
--- a/llvm/test/tools/llvm-reduce/Inputs/llvm-dis-and-filecheck.py
+++ b/llvm/test/tools/llvm-reduce/Inputs/llvm-dis-and-filecheck.py
@@ -18,7 +18,9 @@
 
 llvm_dis = sys.argv[1]
 filecheck = sys.argv[2]
-filecheck_args = [filecheck, ]
+filecheck_args = [
+    filecheck,
+]
 filecheck_args.extend(sys.argv[3:-1])
 bitcode_file = sys.argv[-1]
 ir_file = bitcode_file + ".ll"
@@ -36,7 +38,7 @@
     print(disassemble.stdout)
     sys.exit(1)
 
-check=None
+check = None
 with open(ir_file, "r") as ir:
     check = subprocess.Popen(filecheck_args, stdin=ir, stdout=sys.stdout)
 check.communicate()

diff  --git a/llvm/test/tools/llvm-reduce/Inputs/remove-args.py b/llvm/test/tools/llvm-reduce/Inputs/remove-args.py
index fea62c3174e06..e003c6a6acecd 100755
--- a/llvm/test/tools/llvm-reduce/Inputs/remove-args.py
+++ b/llvm/test/tools/llvm-reduce/Inputs/remove-args.py
@@ -5,12 +5,12 @@
 
 input = open(sys.argv[1], "r")
 for line in input:
-  if "%interesting" in line:
-    InterestingArgumentPresent = True
-  if "call void @interesting" in line:
-    FunctionCallPresent = True
+    if "%interesting" in line:
+        InterestingArgumentPresent = True
+    if "call void @interesting" in line:
+        FunctionCallPresent = True
 
 if InterestingArgumentPresent and FunctionCallPresent:
-  sys.exit(0) # Interesting!
+    sys.exit(0)  # Interesting!
 
 sys.exit(1)

diff  --git a/llvm/test/tools/llvm-reduce/Inputs/remove-bbs.py b/llvm/test/tools/llvm-reduce/Inputs/remove-bbs.py
index 71f099daaba06..4e5775a9dda97 100755
--- a/llvm/test/tools/llvm-reduce/Inputs/remove-bbs.py
+++ b/llvm/test/tools/llvm-reduce/Inputs/remove-bbs.py
@@ -3,13 +3,13 @@
 InterestingBBs = 0
 input = open(sys.argv[1], "r")
 for line in input:
-  i = line.find(';')
-  if i >= 0:
-    line = line[:i]
-  if line.startswith("interesting") or "%interesting" in line:
-    InterestingBBs += 1
+    i = line.find(";")
+    if i >= 0:
+        line = line[:i]
+    if line.startswith("interesting") or "%interesting" in line:
+        InterestingBBs += 1
 
 if InterestingBBs == 6:
-  sys.exit(0) # interesting!
+    sys.exit(0)  # interesting!
 
-sys.exit(1) # IR isn't interesting
+sys.exit(1)  # IR isn't interesting

diff  --git a/llvm/test/tools/llvm-reduce/Inputs/remove-instructions.py b/llvm/test/tools/llvm-reduce/Inputs/remove-instructions.py
index 9717c73b01aa7..c9396457b4295 100755
--- a/llvm/test/tools/llvm-reduce/Inputs/remove-instructions.py
+++ b/llvm/test/tools/llvm-reduce/Inputs/remove-instructions.py
@@ -4,14 +4,14 @@
 
 input = open(sys.argv[1], "r")
 for line in input:
-  i = line.find(';')
-  if i >= 0:
-    line = line[:i]
-  if "%interesting" in line:
-    InterestingInstructions += 1
-  print(InterestingInstructions)
+    i = line.find(";")
+    if i >= 0:
+        line = line[:i]
+    if "%interesting" in line:
+        InterestingInstructions += 1
+    print(InterestingInstructions)
 
 if InterestingInstructions == 5:
-  sys.exit(0) # interesting!
+    sys.exit(0)  # interesting!
 
 sys.exit(1)

diff  --git a/llvm/test/tools/llvm-reduce/Inputs/sleep-and-check-stores.py b/llvm/test/tools/llvm-reduce/Inputs/sleep-and-check-stores.py
index 8df093e667b2a..c5a6cf7b52157 100755
--- a/llvm/test/tools/llvm-reduce/Inputs/sleep-and-check-stores.py
+++ b/llvm/test/tools/llvm-reduce/Inputs/sleep-and-check-stores.py
@@ -10,8 +10,8 @@
 try:
     input = open(file_input, "r")
 except Exception as err:
-   print(err, file=sys.stderr)
-   sys.exit(1)
+    print(err, file=sys.stderr)
+    sys.exit(1)
 
 InterestingStores = 0
 for line in input:
@@ -23,6 +23,6 @@
 
 
 if InterestingStores > num_stores:
-  sys.exit(0) # interesting!
+    sys.exit(0)  # interesting!
 
-sys.exit(1) # IR isn't interesting
+sys.exit(1)  # IR isn't interesting

diff  --git a/llvm/test/tools/llvm-reduce/remove-bbs-sequence.py b/llvm/test/tools/llvm-reduce/remove-bbs-sequence.py
index f38d501d13700..e211a8c6c7697 100755
--- a/llvm/test/tools/llvm-reduce/remove-bbs-sequence.py
+++ b/llvm/test/tools/llvm-reduce/remove-bbs-sequence.py
@@ -1,15 +1,19 @@
 import subprocess
 import sys
 
-opt = subprocess.run( [ 'opt', '-passes=print<loops>','-disable-output', sys.argv[1]], stdout=subprocess.PIPE, stderr=subprocess.PIPE )
+opt = subprocess.run(
+    ["opt", "-passes=print<loops>", "-disable-output", sys.argv[1]],
+    stdout=subprocess.PIPE,
+    stderr=subprocess.PIPE,
+)
 
 stdout = opt.stdout.decode()
 
-pattern = 'Loop at depth 1 containing'
+pattern = "Loop at depth 1 containing"
 
-if (pattern in opt.stderr.decode()):
-  print('This is interesting!')
-  sys.exit(0)
+if pattern in opt.stderr.decode():
+    print("This is interesting!")
+    sys.exit(0)
 else:
-  print('This is NOT interesting!')
-  sys.exit(1)
+    print("This is NOT interesting!")
+    sys.exit(1)

diff  --git a/llvm/test/tools/llvm-symbolizer/Inputs/flush-output.py b/llvm/test/tools/llvm-symbolizer/Inputs/flush-output.py
index 120d49226fa9a..c8e0b959c7da7 100644
--- a/llvm/test/tools/llvm-symbolizer/Inputs/flush-output.py
+++ b/llvm/test/tools/llvm-symbolizer/Inputs/flush-output.py
@@ -4,21 +4,24 @@
 import sys
 import threading
 
+
 def kill_subprocess(process):
     process.kill()
     os._exit(1)
 
+
 # Pass -f=none and --output-style=GNU to get only one line of output per input.
-cmd = subprocess.Popen([sys.argv[1],
-                        '--obj=' + sys.argv[2],
-                        '-f=none',
-                        '--output-style=GNU'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
+cmd = subprocess.Popen(
+    [sys.argv[1], "--obj=" + sys.argv[2], "-f=none", "--output-style=GNU"],
+    stdout=subprocess.PIPE,
+    stdin=subprocess.PIPE,
+)
 watchdog = threading.Timer(20, kill_subprocess, args=[cmd])
 watchdog.start()
-cmd.stdin.write(b'0\n')
+cmd.stdin.write(b"0\n")
 cmd.stdin.flush()
 print(cmd.stdout.readline())
-cmd.stdin.write(b'bad\n')
+cmd.stdin.write(b"bad\n")
 cmd.stdin.flush()
 print(cmd.stdout.readline())
 watchdog.cancel()

diff  --git a/llvm/tools/llvm-shlib/gen-msvc-exports.py b/llvm/tools/llvm-shlib/gen-msvc-exports.py
index 671faf1152812..5ecb8c2da3476 100644
--- a/llvm/tools/llvm-shlib/gen-msvc-exports.py
+++ b/llvm/tools/llvm-shlib/gen-msvc-exports.py
@@ -1,10 +1,10 @@
-#===- gen-msvc-exports.py - Generate C API export file -------*- python -*--===#
+# ===- gen-msvc-exports.py - Generate C API export file -------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 #
 # Generate an export file from a list of given LIB files. This only exports symbols
 # that start with LLVM, so it only exports the LLVM C API.
@@ -22,7 +22,7 @@
 #
 # You can use the --output flag to set the name of the export file.
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 from tempfile import mkstemp
 from contextlib import contextmanager
 from subprocess import check_call
@@ -33,7 +33,7 @@
 
 _UNDERSCORE_REGEX = {
     False: re.compile(r"^\w+\s+T\s+(LLVM.*)$"),
-    True:  re.compile(r"^\w+\s+T\s+_(LLVM.*)$")
+    True: re.compile(r"^\w+\s+T\s+_(LLVM.*)$"),
 }
 
 
@@ -58,46 +58,54 @@ def gen_llvm_c_export(output, underscore, libs, nm):
     to `output`. If `underscore` is true, symbols will
     be assumed to be prefixed with an underscore.
     """
-    with removing(touch_tempfile(prefix='dumpout', suffix='.txt')) as dumpout:
+    with removing(touch_tempfile(prefix="dumpout", suffix=".txt")) as dumpout:
 
         # Get the right regex.
         p = _UNDERSCORE_REGEX[underscore]
 
-        with open(output, 'w+t') as output_f:
+        with open(output, "w+t") as output_f:
 
             # For each lib get the LLVM* functions it exports.
             for lib in libs:
                 # Call dumpbin.
-                with open(dumpout, 'w+t') as dumpout_f:
-                    check_call([nm, '-g', lib], stdout=dumpout_f)
+                with open(dumpout, "w+t") as dumpout_f:
+                    check_call([nm, "-g", lib], stdout=dumpout_f)
 
                 # Get the matching lines.
                 with open(dumpout) as dumpbin:
                     for line in dumpbin:
                         m = p.match(line)
                         if m is not None:
-                            output_f.write(m.group(1) + '\n')
+                            output_f.write(m.group(1) + "\n")
 
 
 def main():
-    parser = argparse.ArgumentParser('gen-msvc-exports')
+    parser = argparse.ArgumentParser("gen-msvc-exports")
 
     parser.add_argument(
-        '-i', '--libsfile', help='file with list of libs, new line separated',
-        action='store', default=None
+        "-i",
+        "--libsfile",
+        help="file with list of libs, new line separated",
+        action="store",
+        default=None,
     )
     parser.add_argument(
-        '-o', '--output', help='output filename', default='LLVM-C.exports'
+        "-o", "--output", help="output filename", default="LLVM-C.exports"
     )
-    parser.add_argument('-u', '--underscore',
-        help='labels are prefixed with an underscore (use for 32 bit DLLs)',
-        action='store_true'
+    parser.add_argument(
+        "-u",
+        "--underscore",
+        help="labels are prefixed with an underscore (use for 32 bit DLLs)",
+        action="store_true",
     )
     parser.add_argument(
-        '--nm', help='path to the llvm-nm executable', default='llvm-nm'
+        "--nm", help="path to the llvm-nm executable", default="llvm-nm"
     )
     parser.add_argument(
-        'libs', metavar='LIBS', nargs='*', help='list of libraries to generate export from'
+        "libs",
+        metavar="LIBS",
+        nargs="*",
+        help="list of libraries to generate export from",
     )
 
     ns = parser.parse_args()
@@ -112,5 +120,5 @@ def main():
     gen_llvm_c_export(ns.output, ns.underscore, libs, ns.nm)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/llvm/tools/opt-viewer/extract-reproducers.py b/llvm/tools/opt-viewer/extract-reproducers.py
index 1fa3fb9360e15..610439d6f4c94 100644
--- a/llvm/tools/opt-viewer/extract-reproducers.py
+++ b/llvm/tools/opt-viewer/extract-reproducers.py
@@ -1,21 +1,22 @@
 #!/usr/bin/env python
 
-desc = '''
+desc = """
 A script to extract ConstraintElimination's reproducer remarks. The extracted
 modules are written as textual LLVM IR to files named reproducerXXXX.ll in the
 current directory.
-'''
+"""
 
 import optrecord
 import argparse
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     parser = argparse.ArgumentParser(description=desc)
     parser.add_argument(
-        'yaml_dirs_or_files',
-        nargs='+',
-        help='List of optimization record files or directories searched '
-             'for optimization record files.')
+        "yaml_dirs_or_files",
+        nargs="+",
+        help="List of optimization record files or directories searched "
+        "for optimization record files.",
+    )
 
     args = parser.parse_args()
 
@@ -27,13 +28,12 @@
         parser.error("No *.opt.yaml files found")
         sys.exit(1)
 
-    all_remarks, file_remarks, _ = optrecord.gather_results(
-        files, jobs, True)
+    all_remarks, file_remarks, _ = optrecord.gather_results(files, jobs, True)
 
     i = 0
     for r in all_remarks:
-        if r[1] != 'constraint-elimination' or r[2] != 'Reproducer':
+        if r[1] != "constraint-elimination" or r[2] != "Reproducer":
             continue
-        with open('reproducer{}.ll'.format(i), 'wt') as f:
+        with open("reproducer{}.ll".format(i), "wt") as f:
             f.write(r[7][1][0][1])
         i += 1

diff  --git a/llvm/tools/opt-viewer/opt-
diff .py b/llvm/tools/opt-viewer/opt-
diff .py
index 36e81a5d569a7..2763ca4010c8a 100755
--- a/llvm/tools/opt-viewer/opt-
diff .py
+++ b/llvm/tools/opt-viewer/opt-
diff .py
@@ -2,14 +2,15 @@
 
 from __future__ import print_function
 
-desc = '''Generate the 
diff erence of two YAML files into a new YAML file (works on
+desc = """Generate the 
diff erence of two YAML files into a new YAML file (works on
 pair of directories too).  A new attribute 'Added' is set to True or False
 depending whether the entry is added or removed from the first input to the
 next.
 
-The tools requires PyYAML.'''
+The tools requires PyYAML."""
 
 import yaml
+
 # Try to use the C parser.
 try:
     from yaml import CLoader as Loader
@@ -20,35 +21,40 @@
 import argparse
 from collections import defaultdict
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     parser = argparse.ArgumentParser(description=desc)
     parser.add_argument(
-        'yaml_dir_or_file_1',
-        help='An optimization record file or a directory searched for optimization '
-             'record files that are used as the old version for the comparison')
+        "yaml_dir_or_file_1",
+        help="An optimization record file or a directory searched for optimization "
+        "record files that are used as the old version for the comparison",
+    )
     parser.add_argument(
-        'yaml_dir_or_file_2',
-        help='An optimization record file or a directory searched for optimization '
-             'record files that are used as the new version for the comparison')
+        "yaml_dir_or_file_2",
+        help="An optimization record file or a directory searched for optimization "
+        "record files that are used as the new version for the comparison",
+    )
     parser.add_argument(
-        '--jobs',
-        '-j',
+        "--jobs",
+        "-j",
         default=None,
         type=int,
-        help='Max job count (defaults to %(default)s, the current CPU count)')
+        help="Max job count (defaults to %(default)s, the current CPU count)",
+    )
     parser.add_argument(
-        '--max-size',
-        '-m',
+        "--max-size",
+        "-m",
         default=100000,
         type=int,
-        help='Maximum number of remarks stored in an output file')
+        help="Maximum number of remarks stored in an output file",
+    )
     parser.add_argument(
-        '--no-progress-indicator',
-        '-n',
-        action='store_true',
+        "--no-progress-indicator",
+        "-n",
+        action="store_true",
         default=False,
-        help='Do not display any indicator of how many YAML files were read.')
-    parser.add_argument('--output', '-o', default='
diff {}.opt.yaml')
+        help="Do not display any indicator of how many YAML files were read.",
+    )
+    parser.add_argument("--output", "-o", default="
diff {}.opt.yaml")
     args = parser.parse_args()
 
     files1 = optrecord.find_opt_files(args.yaml_dir_or_file_1)
@@ -71,5 +77,5 @@
         r.recover_yaml_structure()
 
     for i in range(0, len(result), args.max_size):
-        with open(args.output.format(i / args.max_size), 'w') as stream:
-            yaml.dump_all(result[i:i + args.max_size], stream)
+        with open(args.output.format(i / args.max_size), "w") as stream:
+            yaml.dump_all(result[i : i + args.max_size], stream)

diff  --git a/llvm/tools/opt-viewer/opt-stats.py b/llvm/tools/opt-viewer/opt-stats.py
index f4ee3a7d44e63..716143b31a890 100755
--- a/llvm/tools/opt-viewer/opt-stats.py
+++ b/llvm/tools/opt-viewer/opt-stats.py
@@ -2,10 +2,10 @@
 
 from __future__ import print_function
 
-desc = '''Generate statistics about optimization records from the YAML files
+desc = """Generate statistics about optimization records from the YAML files
 generated with -fsave-optimization-record and -fdiagnostics-show-hotness.
 
-The tools requires PyYAML and Pygments Python packages.'''
+The tools requires PyYAML and Pygments Python packages."""
 
 import optrecord
 import argparse
@@ -15,30 +15,34 @@
 
 try:
     from guppy import hpy
+
     hp = hpy()
 except ImportError:
     print("Memory consumption not shown because guppy is not installed")
     hp = None
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     parser = argparse.ArgumentParser(description=desc)
     parser.add_argument(
-        'yaml_dirs_or_files',
-        nargs='+',
-        help='List of optimization record files or directories searched '
-             'for optimization record files.')
+        "yaml_dirs_or_files",
+        nargs="+",
+        help="List of optimization record files or directories searched "
+        "for optimization record files.",
+    )
     parser.add_argument(
-        '--jobs',
-        '-j',
+        "--jobs",
+        "-j",
         default=None,
         type=int,
-        help='Max job count (defaults to %(default)s, the current CPU count)')
+        help="Max job count (defaults to %(default)s, the current CPU count)",
+    )
     parser.add_argument(
-        '--no-progress-indicator',
-        '-n',
-        action='store_true',
+        "--no-progress-indicator",
+        "-n",
+        action="store_true",
         default=False,
-        help='Do not display any indicator of how many YAML files were read.')
+        help="Do not display any indicator of how many YAML files were read.",
+    )
     args = parser.parse_args()
 
     print_progress = not args.no_progress_indicator
@@ -49,9 +53,10 @@
         sys.exit(1)
 
     all_remarks, file_remarks, _ = optrecord.gather_results(
-        files, args.jobs, print_progress)
+        files, args.jobs, print_progress
+    )
     if print_progress:
-        print('\n')
+        print("\n")
 
     bypass = defaultdict(int)
     byname = defaultdict(int)
@@ -63,16 +68,17 @@
     print("{:24s} {:10d}".format("Total number of remarks", total))
     if hp:
         h = hp.heap()
-        print("{:24s} {:10d}".format("Memory per remark",
-                                     h.size / len(all_remarks)))
-    print('\n')
+        print("{:24s} {:10d}".format("Memory per remark", h.size / len(all_remarks)))
+    print("\n")
 
     print("Top 10 remarks by pass:")
-    for (passname, count) in sorted(bypass.items(), key=operator.itemgetter(1),
-                                    reverse=True)[:10]:
-        print("  {:30s} {:2.0f}%". format(passname, count * 100. / total))
+    for (passname, count) in sorted(
+        bypass.items(), key=operator.itemgetter(1), reverse=True
+    )[:10]:
+        print("  {:30s} {:2.0f}%".format(passname, count * 100.0 / total))
 
     print("\nTop 10 remarks:")
-    for (name, count) in sorted(byname.items(), key=operator.itemgetter(1),
-                                reverse=True)[:10]:
-        print("  {:30s} {:2.0f}%". format(name, count * 100. / total))
+    for (name, count) in sorted(
+        byname.items(), key=operator.itemgetter(1), reverse=True
+    )[:10]:
+        print("  {:30s} {:2.0f}%".format(name, count * 100.0 / total))

diff  --git a/llvm/tools/opt-viewer/opt-viewer.py b/llvm/tools/opt-viewer/opt-viewer.py
index 714fccc8df967..c9c7db726f765 100755
--- a/llvm/tools/opt-viewer/opt-viewer.py
+++ b/llvm/tools/opt-viewer/opt-viewer.py
@@ -21,27 +21,32 @@
 import optrecord
 
 
-desc = '''Generate HTML output to visualize optimization records from the YAML files
+desc = """Generate HTML output to visualize optimization records from the YAML files
 generated with -fsave-optimization-record and -fdiagnostics-show-hotness.
 
-The tools requires PyYAML and Pygments Python packages.'''
+The tools requires PyYAML and Pygments Python packages."""
 
 
 # This allows passing the global context to the child processes.
 class Context:
-    def __init__(self, caller_loc = dict()):
-       # Map function names to their source location for function where inlining happened
-       self.caller_loc = caller_loc
+    def __init__(self, caller_loc=dict()):
+        # Map function names to their source location for function where inlining happened
+        self.caller_loc = caller_loc
+
 
 context = Context()
 
+
 def suppress(remark):
-    if remark.Name == 'sil.Specialized':
-        return remark.getArgDict()['Function'][0].startswith('\"Swift.')
-    elif remark.Name == 'sil.Inlined':
-        return remark.getArgDict()['Callee'][0].startswith(('\"Swift.', '\"specialized Swift.'))
+    if remark.Name == "sil.Specialized":
+        return remark.getArgDict()["Function"][0].startswith('"Swift.')
+    elif remark.Name == "sil.Inlined":
+        return remark.getArgDict()["Callee"][0].startswith(
+            ('"Swift.', '"specialized Swift.')
+        )
     return False
 
+
 class SourceFileRenderer:
     def __init__(self, source_dir, output_dir, filename, no_highlight):
         self.filename = filename
@@ -54,18 +59,27 @@ def __init__(self, source_dir, output_dir, filename, no_highlight):
                 existing_filename = fn
 
         self.no_highlight = no_highlight
-        self.stream = io.open(os.path.join(output_dir, optrecord.html_file_name(filename)), 'w', encoding='utf-8')
+        self.stream = io.open(
+            os.path.join(output_dir, optrecord.html_file_name(filename)),
+            "w",
+            encoding="utf-8",
+        )
         if existing_filename:
-            self.source_stream = io.open(existing_filename, encoding='utf-8')
+            self.source_stream = io.open(existing_filename, encoding="utf-8")
         else:
             self.source_stream = None
-            print(u'''
+            print(
+                """
 <html>
 <h1>Unable to locate file {}</h1>
 </html>
-            '''.format(filename), file=self.stream)
+            """.format(
+                    filename
+                ),
+                file=self.stream,
+            )
 
-        self.html_formatter = HtmlFormatter(encoding='utf-8')
+        self.html_formatter = HtmlFormatter(encoding="utf-8")
         self.cpp_lexer = CppLexer(stripnl=False)
 
     def render_source_lines(self, stream, line_remarks):
@@ -74,31 +88,35 @@ def render_source_lines(self, stream, line_remarks):
         if self.no_highlight:
             html_highlighted = file_text
         else:
-            html_highlighted = highlight(
-            file_text,
-                self.cpp_lexer,
-                self.html_formatter)
+            html_highlighted = highlight(file_text, self.cpp_lexer, self.html_formatter)
 
             # Note that the API is 
diff erent between Python 2 and 3.  On
             # Python 3, pygments.highlight() returns a bytes object, so we
             # have to decode.  On Python 2, the output is str but since we
             # support unicode characters and the output streams is unicode we
             # decode too.
-            html_highlighted = html_highlighted.decode('utf-8')
+            html_highlighted = html_highlighted.decode("utf-8")
 
             # Take off the header and footer, these must be
             #   reapplied line-wise, within the page structure
-            html_highlighted = html_highlighted.replace('<div class="highlight"><pre>', '')
-            html_highlighted = html_highlighted.replace('</pre></div>', '')
-
-        for (linenum, html_line) in enumerate(html_highlighted.split('\n'), start=1):
-            print(u'''
+            html_highlighted = html_highlighted.replace(
+                '<div class="highlight"><pre>', ""
+            )
+            html_highlighted = html_highlighted.replace("</pre></div>", "")
+
+        for (linenum, html_line) in enumerate(html_highlighted.split("\n"), start=1):
+            print(
+                """
 <tr>
 <td><a name=\"L{linenum}\">{linenum}</a></td>
 <td></td>
 <td></td>
 <td><div class="highlight"><pre>{html_line}</pre></div></td>
-</tr>'''.format(**locals()), file=self.stream)
+</tr>""".format(
+                    **locals()
+                ),
+                file=self.stream,
+            )
 
             for remark in line_remarks.get(linenum, []):
                 if not suppress(remark):
@@ -109,42 +127,52 @@ def render_inline_remarks(self, r, line):
         dl = context.caller_loc.get(r.Function)
         if dl:
             dl_dict = dict(list(dl))
-            link = optrecord.make_link(dl_dict['File'], dl_dict['Line'] - 2)
-            inlining_context = "<a href={link}>{r.DemangledFunctionName}</a>".format(**locals())
+            link = optrecord.make_link(dl_dict["File"], dl_dict["Line"] - 2)
+            inlining_context = "<a href={link}>{r.DemangledFunctionName}</a>".format(
+                **locals()
+            )
 
         # Column is the number of characters *including* tabs, keep those and
         # replace everything else with spaces.
-        indent = line[:max(r.Column, 1) - 1]
-        indent = re.sub('\S', ' ', indent)
+        indent = line[: max(r.Column, 1) - 1]
+        indent = re.sub("\S", " ", indent)
 
         # Create expanded message and link if we have a multiline message.
-        lines = r.message.split('\n')
+        lines = r.message.split("\n")
         if len(lines) > 1:
             expand_link = '<a style="text-decoration: none;" href="" onclick="toggleExpandedMessage(this); return false;">+</a>'
             message = lines[0]
-            expand_message = u'''
+            expand_message = """
 <div class="full-info" style="display:none;">
   <div class="col-left"><pre style="display:inline">{}</pre></div>
   <div class="expanded col-left"><pre>{}</pre></div>
-</div>'''.format(indent, '\n'.join(lines[1:]))
+</div>""".format(
+                indent, "\n".join(lines[1:])
+            )
         else:
-            expand_link = ''
-            expand_message = ''
+            expand_link = ""
+            expand_message = ""
             message = r.message
-        print(u'''
+        print(
+            """
 <tr>
 <td></td>
 <td>{r.RelativeHotness}</td>
 <td class=\"column-entry-{r.color}\">{r.PassWithDiffPrefix}</td>
 <td><pre style="display:inline">{indent}</pre><span class=\"column-entry-yellow\">{expand_link} {message} </span>{expand_message}</td>
 <td class=\"column-entry-yellow\">{inlining_context}</td>
-</tr>'''.format(**locals()), file=self.stream)
+</tr>""".format(
+                **locals()
+            ),
+            file=self.stream,
+        )
 
     def render(self, line_remarks):
         if not self.source_stream:
             return
 
-        print(u'''
+        print(
+            """
 <html>
 <title>{}</title>
 <meta charset="utf-8" />
@@ -180,34 +208,51 @@ def render(self, line_remarks):
 <th style="width: 15%">Inline Context</td>
 </tr>
 </thead>
-<tbody>'''.format(os.path.basename(self.filename)), file=self.stream)
+<tbody>""".format(
+                os.path.basename(self.filename)
+            ),
+            file=self.stream,
+        )
         self.render_source_lines(self.source_stream, line_remarks)
 
-        print(u'''
+        print(
+            """
 </tbody>
 </table>
 </body>
-</html>''', file=self.stream)
+</html>""",
+            file=self.stream,
+        )
 
 
 class IndexRenderer:
-    def __init__(self, output_dir, should_display_hotness, max_hottest_remarks_on_index):
-        self.stream = io.open(os.path.join(output_dir, 'index.html'), 'w', encoding='utf-8')
+    def __init__(
+        self, output_dir, should_display_hotness, max_hottest_remarks_on_index
+    ):
+        self.stream = io.open(
+            os.path.join(output_dir, "index.html"), "w", encoding="utf-8"
+        )
         self.should_display_hotness = should_display_hotness
         self.max_hottest_remarks_on_index = max_hottest_remarks_on_index
 
     def render_entry(self, r, odd):
         escaped_name = html.escape(r.DemangledFunctionName)
-        print(u'''
+        print(
+            """
 <tr>
 <td class=\"column-entry-{odd}\"><a href={r.Link}>{r.DebugLocString}</a></td>
 <td class=\"column-entry-{odd}\">{r.RelativeHotness}</td>
 <td class=\"column-entry-{odd}\">{escaped_name}</td>
 <td class=\"column-entry-{r.color}\">{r.PassWithDiffPrefix}</td>
-</tr>'''.format(**locals()), file=self.stream)
+</tr>""".format(
+                **locals()
+            ),
+            file=self.stream,
+        )
 
     def render(self, all_remarks):
-        print(u'''
+        print(
+            """
 <html>
 <meta charset="utf-8" />
 <head>
@@ -221,7 +266,9 @@ def render(self, all_remarks):
 <td>Hotness</td>
 <td>Function</td>
 <td>Pass</td>
-</tr>''', file=self.stream)
+</tr>""",
+            file=self.stream,
+        )
 
         max_entries = None
         if self.should_display_hotness:
@@ -230,10 +277,13 @@ def render(self, all_remarks):
         for i, remark in enumerate(all_remarks[:max_entries]):
             if not suppress(remark):
                 self.render_entry(remark, i % 2)
-        print(u'''
+        print(
+            """
 </table>
 </body>
-</html>''', file=self.stream)
+</html>""",
+            file=self.stream,
+        )
 
 
 def _render_file(source_dir, output_dir, ctx, no_highlight, entry, filter_):
@@ -247,26 +297,32 @@ def map_remarks(all_remarks):
     # Set up a map between function names and their source location for
     # function where inlining happened
     for remark in optrecord.itervalues(all_remarks):
-        if isinstance(remark, optrecord.Passed) and remark.Pass == "inline" and remark.Name == "Inlined":
+        if (
+            isinstance(remark, optrecord.Passed)
+            and remark.Pass == "inline"
+            and remark.Name == "Inlined"
+        ):
             for arg in remark.Args:
                 arg_dict = dict(list(arg))
-                caller = arg_dict.get('Caller')
+                caller = arg_dict.get("Caller")
                 if caller:
                     try:
-                        context.caller_loc[caller] = arg_dict['DebugLoc']
+                        context.caller_loc[caller] = arg_dict["DebugLoc"]
                     except KeyError:
                         pass
 
 
-def generate_report(all_remarks,
-                    file_remarks,
-                    source_dir,
-                    output_dir,
-                    no_highlight,
-                    should_display_hotness,
-                    max_hottest_remarks_on_index,
-                    num_jobs,
-                    should_print_progress):
+def generate_report(
+    all_remarks,
+    file_remarks,
+    source_dir,
+    output_dir,
+    no_highlight,
+    should_display_hotness,
+    max_hottest_remarks_on_index,
+    num_jobs,
+    should_print_progress,
+):
     try:
         os.makedirs(output_dir)
     except OSError as e:
@@ -276,75 +332,107 @@ def generate_report(all_remarks,
             raise
 
     if should_print_progress:
-        print('Rendering index page...')
+        print("Rendering index page...")
     if should_display_hotness:
-        sorted_remarks = sorted(optrecord.itervalues(all_remarks), key=lambda r: (r.Hotness, r.File, r.Line, r.Column, r.PassWithDiffPrefix, r.yaml_tag, r.Function), reverse=True)
+        sorted_remarks = sorted(
+            optrecord.itervalues(all_remarks),
+            key=lambda r: (
+                r.Hotness,
+                r.File,
+                r.Line,
+                r.Column,
+                r.PassWithDiffPrefix,
+                r.yaml_tag,
+                r.Function,
+            ),
+            reverse=True,
+        )
     else:
-        sorted_remarks = sorted(optrecord.itervalues(all_remarks), key=lambda r: (r.File, r.Line, r.Column, r.PassWithDiffPrefix, r.yaml_tag, r.Function))
-    IndexRenderer(output_dir, should_display_hotness, max_hottest_remarks_on_index).render(sorted_remarks)
-
-    shutil.copy(os.path.join(os.path.dirname(os.path.realpath(__file__)),
-            "style.css"), output_dir)
-
-    _render_file_bound = functools.partial(_render_file, source_dir, output_dir, context, no_highlight)
+        sorted_remarks = sorted(
+            optrecord.itervalues(all_remarks),
+            key=lambda r: (
+                r.File,
+                r.Line,
+                r.Column,
+                r.PassWithDiffPrefix,
+                r.yaml_tag,
+                r.Function,
+            ),
+        )
+    IndexRenderer(
+        output_dir, should_display_hotness, max_hottest_remarks_on_index
+    ).render(sorted_remarks)
+
+    shutil.copy(
+        os.path.join(os.path.dirname(os.path.realpath(__file__)), "style.css"),
+        output_dir,
+    )
+
+    _render_file_bound = functools.partial(
+        _render_file, source_dir, output_dir, context, no_highlight
+    )
     if should_print_progress:
-        print('Rendering HTML files...')
-    optpmap.pmap(_render_file_bound,
-                 file_remarks.items(),
-                 num_jobs,
-                 should_print_progress)
+        print("Rendering HTML files...")
+    optpmap.pmap(
+        _render_file_bound, file_remarks.items(), num_jobs, should_print_progress
+    )
 
 
 def main():
     parser = argparse.ArgumentParser(description=desc)
     parser.add_argument(
-        'yaml_dirs_or_files',
-        nargs='+',
-        help='List of optimization record files or directories searched '
-             'for optimization record files.')
+        "yaml_dirs_or_files",
+        nargs="+",
+        help="List of optimization record files or directories searched "
+        "for optimization record files.",
+    )
     parser.add_argument(
-        '--output-dir',
-        '-o',
-        default='html',
-        help='Path to a directory where generated HTML files will be output. '
-             'If the directory does not already exist, it will be created. '
-             '"%(default)s" by default.')
+        "--output-dir",
+        "-o",
+        default="html",
+        help="Path to a directory where generated HTML files will be output. "
+        "If the directory does not already exist, it will be created. "
+        '"%(default)s" by default.',
+    )
     parser.add_argument(
-        '--jobs',
-        '-j',
+        "--jobs",
+        "-j",
         default=None,
         type=int,
-        help='Max job count (defaults to %(default)s, the current CPU count)')
-    parser.add_argument(
-        '--source-dir',
-        '-s',
-        default='',
-        help='set source directory')
+        help="Max job count (defaults to %(default)s, the current CPU count)",
+    )
+    parser.add_argument("--source-dir", "-s", default="", help="set source directory")
     parser.add_argument(
-        '--no-progress-indicator',
-        '-n',
-        action='store_true',
+        "--no-progress-indicator",
+        "-n",
+        action="store_true",
         default=False,
-        help='Do not display any indicator of how many YAML files were read '
-             'or rendered into HTML.')
+        help="Do not display any indicator of how many YAML files were read "
+        "or rendered into HTML.",
+    )
     parser.add_argument(
-        '--max-hottest-remarks-on-index',
+        "--max-hottest-remarks-on-index",
         default=1000,
         type=int,
-        help='Maximum number of the hottest remarks to appear on the index page')
+        help="Maximum number of the hottest remarks to appear on the index page",
+    )
     parser.add_argument(
-        '--no-highlight',
-        action='store_true',
+        "--no-highlight",
+        action="store_true",
         default=False,
-        help='Do not use a syntax highlighter when rendering the source code')
+        help="Do not use a syntax highlighter when rendering the source code",
+    )
     parser.add_argument(
-        '--demangler',
-        help='Set the demangler to be used (defaults to %s)' % optrecord.Remark.default_demangler)
+        "--demangler",
+        help="Set the demangler to be used (defaults to %s)"
+        % optrecord.Remark.default_demangler,
+    )
 
     parser.add_argument(
-        '--filter',
-        default='',
-        help='Only display remarks from passes matching filter expression')
+        "--filter",
+        default="",
+        help="Only display remarks from passes matching filter expression",
+    )
 
     # Do not make this a global variable.  Values needed to be propagated through
     # to individual classes and functions to be portable with multiprocessing across
@@ -360,20 +448,24 @@ def main():
         parser.error("No *.opt.yaml files found")
         sys.exit(1)
 
-    all_remarks, file_remarks, should_display_hotness = \
-        optrecord.gather_results(files, args.jobs, print_progress, args.filter)
+    all_remarks, file_remarks, should_display_hotness = optrecord.gather_results(
+        files, args.jobs, print_progress, args.filter
+    )
 
     map_remarks(all_remarks)
 
-    generate_report(all_remarks,
-                    file_remarks,
-                    args.source_dir,
-                    args.output_dir,
-                    args.no_highlight,
-                    should_display_hotness,
-                    args.max_hottest_remarks_on_index,
-                    args.jobs,
-                    print_progress)
-
-if __name__ == '__main__':
+    generate_report(
+        all_remarks,
+        file_remarks,
+        args.source_dir,
+        args.output_dir,
+        args.no_highlight,
+        should_display_hotness,
+        args.max_hottest_remarks_on_index,
+        args.jobs,
+        print_progress,
+    )
+
+
+if __name__ == "__main__":
     main()

diff  --git a/llvm/tools/opt-viewer/optpmap.py b/llvm/tools/opt-viewer/optpmap.py
index 8124c8c9036eb..28c608dbfaeee 100644
--- a/llvm/tools/opt-viewer/optpmap.py
+++ b/llvm/tools/opt-viewer/optpmap.py
@@ -19,13 +19,15 @@ def _wrapped_func(func_and_args):
     if should_print_progress:
         with _current.get_lock():
             _current.value += 1
-        sys.stdout.write('\r\t{} of {}'.format(_current.value, _total.value))
+        sys.stdout.write("\r\t{} of {}".format(_current.value, _total.value))
         sys.stdout.flush()
 
     return func(argument, filter_)
 
 
-def pmap(func, iterable, processes, should_print_progress, filter_=None, *args, **kwargs):
+def pmap(
+    func, iterable, processes, should_print_progress, filter_=None, *args, **kwargs
+):
     """
     A parallel map function that reports on its progress.
 
@@ -37,20 +39,25 @@ def pmap(func, iterable, processes, should_print_progress, filter_=None, *args,
     """
     global _current
     global _total
-    _current = multiprocessing.Value('i', 0)
-    _total = multiprocessing.Value('i', len(iterable))
+    _current = multiprocessing.Value("i", 0)
+    _total = multiprocessing.Value("i", len(iterable))
 
     func_and_args = [(func, arg, should_print_progress, filter_) for arg in iterable]
     if processes == 1:
         result = list(map(_wrapped_func, func_and_args, *args, **kwargs))
     else:
-        pool = multiprocessing.Pool(initializer=_init,
-                                    initargs=(_current, _total,),
-                                    processes=processes)
+        pool = multiprocessing.Pool(
+            initializer=_init,
+            initargs=(
+                _current,
+                _total,
+            ),
+            processes=processes,
+        )
         result = pool.map(_wrapped_func, func_and_args, *args, **kwargs)
         pool.close()
         pool.join()
 
     if should_print_progress:
-        sys.stdout.write('\r')
+        sys.stdout.write("\r")
     return result

diff  --git a/llvm/tools/opt-viewer/optrecord.py b/llvm/tools/opt-viewer/optrecord.py
index 6a53e13f4c2b8..9e2fc7cb553b5 100644
--- a/llvm/tools/opt-viewer/optrecord.py
+++ b/llvm/tools/opt-viewer/optrecord.py
@@ -4,6 +4,7 @@
 
 import io
 import yaml
+
 # Try to use the C parser.
 try:
     from yaml import CLoader as Loader
@@ -18,6 +19,7 @@
 from multiprocessing import Lock
 import os, os.path
 import subprocess
+
 try:
     # The previously builtin function `intern()` was moved
     # to the `sys` module in Python 3.
@@ -35,42 +37,47 @@
     # Python 3
     def itervalues(d):
         return iter(d.values())
+
     def iteritems(d):
         return iter(d.items())
+
 else:
     # Python 2
     def itervalues(d):
         return d.itervalues()
+
     def iteritems(d):
         return d.iteritems()
 
 
 def html_file_name(filename):
-    return filename.replace('/', '_').replace('#', '_') + ".html"
+    return filename.replace("/", "_").replace("#", "_") + ".html"
 
 
 def make_link(File, Line):
-    return "\"{}#L{}\"".format(html_file_name(File), Line)
+    return '"{}#L{}"'.format(html_file_name(File), Line)
 
 
 class Remark(yaml.YAMLObject):
     # Work-around for http://pyyaml.org/ticket/154.
     yaml_loader = Loader
 
-    default_demangler = 'c++filt -n'
+    default_demangler = "c++filt -n"
     demangler_proc = None
 
     @classmethod
     def set_demangler(cls, demangler):
-        cls.demangler_proc = subprocess.Popen(demangler.split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+        cls.demangler_proc = subprocess.Popen(
+            demangler.split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE
+        )
         cls.demangler_lock = Lock()
 
     @classmethod
     def demangle(cls, name):
         with cls.demangler_lock:
-            cls.demangler_proc.stdin.write((name + '\n').encode('utf-8'))
+            cls.demangler_proc.stdin.write((name + "\n").encode("utf-8"))
             cls.demangler_proc.stdin.flush()
-            return cls.demangler_proc.stdout.readline().rstrip().decode('utf-8')
+            return cls.demangler_proc.stdout.readline().rstrip().decode("utf-8")
 
     # Intern all strings since we have lot of duplication across filenames,
     # remark text.
@@ -119,23 +126,23 @@ def tuple_to_dict(t):
         self.Args = [tuple_to_dict(arg_tuple) for arg_tuple in self.Args]
 
     def canonicalize(self):
-        if not hasattr(self, 'Hotness'):
+        if not hasattr(self, "Hotness"):
             self.Hotness = 0
-        if not hasattr(self, 'Args'):
+        if not hasattr(self, "Args"):
             self.Args = []
         self._reduce_memory()
 
     @property
     def File(self):
-        return self.DebugLoc['File']
+        return self.DebugLoc["File"]
 
     @property
     def Line(self):
-        return int(self.DebugLoc['Line'])
+        return int(self.DebugLoc["Line"])
 
     @property
     def Column(self):
-        return self.DebugLoc['Column']
+        return self.DebugLoc["Column"]
 
     @property
     def DebugLocString(self):
@@ -151,20 +158,21 @@ def Link(self):
 
     def getArgString(self, mapping):
         mapping = dict(list(mapping))
-        dl = mapping.get('DebugLoc')
+        dl = mapping.get("DebugLoc")
         if dl:
-            del mapping['DebugLoc']
+            del mapping["DebugLoc"]
 
-        assert(len(mapping) == 1)
+        assert len(mapping) == 1
         (key, value) = list(mapping.items())[0]
 
-        if key == 'Caller' or key == 'Callee' or key == 'DirectCallee':
+        if key == "Caller" or key == "Callee" or key == "DirectCallee":
             value = html.escape(self.demangle(value))
 
-        if dl and key != 'Caller':
+        if dl and key != "Caller":
             dl_dict = dict(list(dl))
-            return u"<a href={}>{}</a>".format(
-                make_link(dl_dict['File'], dl_dict['Line']), value)
+            return "<a href={}>{}</a>".format(
+                make_link(dl_dict["File"], dl_dict["Line"]), value
+            )
         else:
             return value
 
@@ -173,15 +181,15 @@ def getArgString(self, mapping):
     # list containing the value (e.g. for 'Callee' the function) and
     # optionally a DebugLoc.
     def getArgDict(self):
-        if hasattr(self, 'ArgDict'):
+        if hasattr(self, "ArgDict"):
             return self.ArgDict
         self.ArgDict = {}
         for arg in self.Args:
             if len(arg) == 2:
-                if arg[0][0] == 'DebugLoc':
+                if arg[0][0] == "DebugLoc":
                     dbgidx = 0
                 else:
-                    assert(arg[1][0] == 'DebugLoc')
+                    assert arg[1][0] == "DebugLoc"
                     dbgidx = 1
 
                 key = arg[1 - dbgidx][0]
@@ -189,18 +197,18 @@ def getArgDict(self):
             else:
                 arg = arg[0]
                 key = arg[0]
-                entry = (arg[1], )
+                entry = (arg[1],)
 
             self.ArgDict[key] = entry
         return self.ArgDict
 
     def getDiffPrefix(self):
-        if hasattr(self, 'Added'):
+        if hasattr(self, "Added"):
             if self.Added:
-                return '+'
+                return "+"
             else:
-                return '-'
-        return ''
+                return "-"
+        return ""
 
     @property
     def PassWithDiffPrefix(self):
@@ -215,14 +223,22 @@ def message(self):
     @property
     def RelativeHotness(self):
         if self.max_hotness:
-            return "{0:.2f}%".format(self.Hotness * 100. / self.max_hotness)
+            return "{0:.2f}%".format(self.Hotness * 100.0 / self.max_hotness)
         else:
-            return ''
+            return ""
 
     @property
     def key(self):
-        return (self.__class__, self.PassWithDiffPrefix, self.Name, self.File,
-                self.Line, self.Column, self.Function, self.Args)
+        return (
+            self.__class__,
+            self.PassWithDiffPrefix,
+            self.Name,
+            self.File,
+            self.Line,
+            self.Column,
+            self.Function,
+            self.Args,
+        )
 
     def __hash__(self):
         return hash(self.key)
@@ -235,7 +251,7 @@ def __repr__(self):
 
 
 class Analysis(Remark):
-    yaml_tag = '!Analysis'
+    yaml_tag = "!Analysis"
 
     @property
     def color(self):
@@ -243,15 +259,15 @@ def color(self):
 
 
 class AnalysisFPCommute(Analysis):
-    yaml_tag = '!AnalysisFPCommute'
+    yaml_tag = "!AnalysisFPCommute"
 
 
 class AnalysisAliasing(Analysis):
-    yaml_tag = '!AnalysisAliasing'
+    yaml_tag = "!AnalysisAliasing"
 
 
 class Passed(Remark):
-    yaml_tag = '!Passed'
+    yaml_tag = "!Passed"
 
     @property
     def color(self):
@@ -259,21 +275,23 @@ def color(self):
 
 
 class Missed(Remark):
-    yaml_tag = '!Missed'
+    yaml_tag = "!Missed"
 
     @property
     def color(self):
         return "red"
 
+
 class Failure(Missed):
-    yaml_tag = '!Failure'
+    yaml_tag = "!Failure"
+
 
 def get_remarks(input_file, filter_=None):
     max_hotness = 0
     all_remarks = dict()
     file_remarks = defaultdict(functools.partial(defaultdict, list))
 
-    with io.open(input_file, encoding = 'utf-8') as f:
+    with io.open(input_file, encoding="utf-8") as f:
         docs = yaml.load_all(f, Loader=Loader)
 
         filter_e = None
@@ -282,7 +300,7 @@ def get_remarks(input_file, filter_=None):
         for remark in docs:
             remark.canonicalize()
             # Avoid remarks withoug debug location or if they are duplicated
-            if not hasattr(remark, 'DebugLoc') or remark.key in all_remarks:
+            if not hasattr(remark, "DebugLoc") or remark.key in all_remarks:
                 continue
 
             if filter_e and not filter_e.search(remark.Pass):
@@ -295,7 +313,7 @@ def get_remarks(input_file, filter_=None):
             # If we're reading a back a 
diff  yaml file, max_hotness is already
             # captured which may actually be less than the max hotness found
             # in the file.
-            if hasattr(remark, 'max_hotness'):
+            if hasattr(remark, "max_hotness"):
                 max_hotness = remark.max_hotness
             max_hotness = max(max_hotness, remark.Hotness)
 
@@ -304,11 +322,12 @@ def get_remarks(input_file, filter_=None):
 
 def gather_results(filenames, num_jobs, should_print_progress, filter_=None):
     if should_print_progress:
-        print('Reading YAML files...')
+        print("Reading YAML files...")
     if not Remark.demangler_proc:
         Remark.set_demangler(Remark.default_demangler)
     remarks = optpmap.pmap(
-        get_remarks, filenames, num_jobs, should_print_progress, filter_)
+        get_remarks, filenames, num_jobs, should_print_progress, filter_
+    )
     max_hotness = max(entry[0] for entry in remarks)
 
     def merge_file_remarks(file_remarks_job, all_remarks, merged):
@@ -338,8 +357,9 @@ def find_opt_files(*dirs_or_files):
         else:
             for dir, subdirs, files in os.walk(dir_or_file):
                 # Exclude mounted directories and symlinks (os.walk default).
-                subdirs[:] = [d for d in subdirs
-                              if not os.path.ismount(os.path.join(dir, d))]
+                subdirs[:] = [
+                    d for d in subdirs if not os.path.ismount(os.path.join(dir, d))
+                ]
                 for file in files:
                     if fnmatch.fnmatch(file, "*.opt.yaml*"):
                         all.append(os.path.join(dir, file))

diff  --git a/llvm/tools/sancov/coverage-report-server.py b/llvm/tools/sancov/coverage-report-server.py
index 4c666dbc111d8..7b0b494218cc1 100755
--- a/llvm/tools/sancov/coverage-report-server.py
+++ b/llvm/tools/sancov/coverage-report-server.py
@@ -1,12 +1,12 @@
 #!/usr/bin/env python3
-#===- symcov-report-server.py - Coverage Reports HTTP Serve --*- python -*--===#
+# ===- symcov-report-server.py - Coverage Reports HTTP Serve --*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
-'''(EXPERIMENTAL) HTTP server to browse coverage reports from .symcov files.
+# ===------------------------------------------------------------------------===#
+"""(EXPERIMENTAL) HTTP server to browse coverage reports from .symcov files.
 
 Coverage reports for big binaries are too huge, generating them statically
 makes no sense. Start the server and go to localhost:8001 instead.
@@ -19,7 +19,7 @@
 Other options:
     --port port_number - specifies the port to use (8001)
     --host host_name - host name to bind server to (127.0.0.1)
-'''
+"""
 
 from __future__ import print_function
 
@@ -73,10 +73,11 @@
 
 FILE_URI_PREFIX = "/file/"
 
+
 class SymcovData:
     def __init__(self, symcov_json):
-        self.covered_points = frozenset(symcov_json['covered-points'])
-        self.point_symbol_info = symcov_json['point-symbol-info']
+        self.covered_points = frozenset(symcov_json["covered-points"])
+        self.point_symbol_info = symcov_json["point-symbol-info"]
         self.file_coverage = self.compute_filecoverage()
 
     def filenames(self):
@@ -114,25 +115,29 @@ def compute_filecoverage(self):
             for fn, points in fns.items():
                 file_points.extend(points.keys())
             covered_points = self.covered_points & set(file_points)
-            result[filename] = int(math.ceil(
-                len(covered_points) * 100 / len(file_points)))
+            result[filename] = int(
+                math.ceil(len(covered_points) * 100 / len(file_points))
+            )
         return result
 
 
 def format_pct(pct):
     pct_str = str(max(0, min(100, pct)))
-    zeroes = '0' * (3 - len(pct_str))
+    zeroes = "0" * (3 - len(pct_str))
     if zeroes:
         zeroes = '<span class="lz">{0}</span>'.format(zeroes)
     return zeroes + pct_str
 
+
 class ServerHandler(http.server.BaseHTTPRequestHandler):
     symcov_data = None
     src_path = None
 
     def do_GET(self):
-        norm_path = os.path.normpath(urllib.parse.unquote(self.path[len(FILE_URI_PREFIX):]))
-        if self.path == '/':
+        norm_path = os.path.normpath(
+            urllib.parse.unquote(self.path[len(FILE_URI_PREFIX) :])
+        )
+        if self.path == "/":
             self.send_response(200)
             self.send_header("Content-type", "text/html; charset=utf-8")
             self.end_headers()
@@ -143,18 +148,21 @@ def do_GET(self):
                 if not file_coverage:
                     continue
                 filelist.append(
-                        "<tr><td><a href=\"{prefix}{name}\">{name}</a></td>"
-                        "<td>{coverage}%</td></tr>".format(
-                            prefix=FILE_URI_PREFIX,
-                            name=html.escape(filename, quote=True), 
-                            coverage=format_pct(file_coverage)))
+                    '<tr><td><a href="{prefix}{name}">{name}</a></td>'
+                    "<td>{coverage}%</td></tr>".format(
+                        prefix=FILE_URI_PREFIX,
+                        name=html.escape(filename, quote=True),
+                        coverage=format_pct(file_coverage),
+                    )
+                )
 
             response = string.Template(INDEX_PAGE_TMPL).safe_substitute(
-                filenames='\n'.join(filelist))
-            self.wfile.write(response.encode('UTF-8', 'replace'))
+                filenames="\n".join(filelist)
+            )
+            self.wfile.write(response.encode("UTF-8", "replace"))
         elif self.symcov_data.has_file(norm_path):
             filename = norm_path
-            filepath = os.path.join(self.src_path, filename) 
+            filepath = os.path.join(self.src_path, filename)
             if not os.path.exists(filepath):
                 self.send_response(404)
                 self.end_headers()
@@ -166,18 +174,22 @@ def do_GET(self):
 
             linemap = self.symcov_data.compute_linemap(filename)
 
-            with open(filepath, 'r', encoding='utf8') as f:
+            with open(filepath, "r", encoding="utf8") as f:
                 content = "\n".join(
-                        ["<span class='{cls}'>{line} </span>".format(
-                            line=html.escape(line.rstrip()), 
-                            cls=linemap.get(line_no, ""))
-                            for line_no, line in enumerate(f, start=1)])
+                    [
+                        "<span class='{cls}'>{line} </span>".format(
+                            line=html.escape(line.rstrip()),
+                            cls=linemap.get(line_no, ""),
+                        )
+                        for line_no, line in enumerate(f, start=1)
+                    ]
+                )
 
             response = string.Template(CONTENT_PAGE_TMPL).safe_substitute(
-                path=self.path[1:],
-                content=content)
+                path=self.path[1:], content=content
+            )
 
-            self.wfile.write(response.encode('UTF-8', 'replace'))
+            self.wfile.write(response.encode("UTF-8", "replace"))
         else:
             self.send_response(404)
             self.end_headers()
@@ -185,10 +197,10 @@ def do_GET(self):
 
 def main():
     parser = argparse.ArgumentParser(description="symcov report http server.")
-    parser.add_argument('--host', default='127.0.0.1')
-    parser.add_argument('--port', default=8001)
-    parser.add_argument('--symcov', required=True, type=argparse.FileType('r'))
-    parser.add_argument('--srcpath', required=True)
+    parser.add_argument("--host", default="127.0.0.1")
+    parser.add_argument("--port", default=8001)
+    parser.add_argument("--symcov", required=True, type=argparse.FileType("r"))
+    parser.add_argument("--srcpath", required=True)
     args = parser.parse_args()
 
     print("Loading coverage...")
@@ -205,5 +217,6 @@ def main():
         pass
     httpd.server_close()
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/DSAclean.py b/llvm/utils/DSAclean.py
index c5fb56b037ebf..1b833ff893248 100755
--- a/llvm/utils/DSAclean.py
+++ b/llvm/utils/DSAclean.py
@@ -1,35 +1,36 @@
 #!/usr/bin/env python
 
-#changelog: 
-#10/13/2005b: replaced the # in tmp(.#*)* with alphanumeric and _, this will then remove
-#nodes such as %tmp.1.i and %tmp._i.3
-#10/13/2005: exntended to remove variables of the form %tmp(.#)* rather than just 
+# changelog:
+# 10/13/2005b: replaced the # in tmp(.#*)* with alphanumeric and _, this will then remove
+# nodes such as %tmp.1.i and %tmp._i.3
+# 10/13/2005: exntended to remove variables of the form %tmp(.#)* rather than just
 #%tmp.#, i.e. it now will remove %tmp.12.3.15 etc, additionally fixed a spelling error in
-#the comments
-#10/12/2005: now it only removes nodes and edges for which the label is %tmp.# rather
-#than removing all lines for which the lable CONTAINS %tmp.#
+# the comments
+# 10/12/2005: now it only removes nodes and edges for which the label is %tmp.# rather
+# than removing all lines for which the lable CONTAINS %tmp.#
 
 from __future__ import print_function
 
 import re
 import sys
-if( len(sys.argv) < 3 ):
-	print('usage is: ./DSAclean <dot_file_to_be_cleaned> <out_put_file>')
-	sys.exit(1)
-#get a file object
-input = open(sys.argv[1], 'r')
-output = open(sys.argv[2], 'w')
-#we'll get this one line at a time...while we could just put the whole thing in a string
-#it would kill old computers
+
+if len(sys.argv) < 3:
+    print("usage is: ./DSAclean <dot_file_to_be_cleaned> <out_put_file>")
+    sys.exit(1)
+# get a file object
+input = open(sys.argv[1], "r")
+output = open(sys.argv[2], "w")
+# we'll get this one line at a time...while we could just put the whole thing in a string
+# it would kill old computers
 buffer = input.readline()
-while buffer != '':
-	if re.compile("label(\s*)=(\s*)\"\s%tmp(.\w*)*(\s*)\"").search(buffer):
-		#skip next line, write neither this line nor the next
-		buffer = input.readline()
-	else:
-		#this isn't a tmp Node, we can write it
-		output.write(buffer)
-	#prepare for the next iteration
-	buffer = input.readline()
+while buffer != "":
+    if re.compile('label(\s*)=(\s*)"\s%tmp(.\w*)*(\s*)"').search(buffer):
+        # skip next line, write neither this line nor the next
+        buffer = input.readline()
+    else:
+        # this isn't a tmp Node, we can write it
+        output.write(buffer)
+    # prepare for the next iteration
+    buffer = input.readline()
 input.close()
 output.close()

diff  --git a/llvm/utils/DSAextract.py b/llvm/utils/DSAextract.py
index 1d93f1e30c55f..96f818bd2a831 100755
--- a/llvm/utils/DSAextract.py
+++ b/llvm/utils/DSAextract.py
@@ -1,29 +1,29 @@
 #!/usr/bin/env python
 
-#this is a script to extract given named nodes from a dot file, with
-#the associated edges.  An edge is kept iff for edge x -> y
+# this is a script to extract given named nodes from a dot file, with
+# the associated edges.  An edge is kept iff for edge x -> y
 # x and y are both nodes specified to be kept.
 
-#known issues: if a line contains '->' and is not an edge line
-#problems will occur.  If node labels do not begin with
-#Node this also will not work.  Since this is designed to work
-#on DSA dot output and not general dot files this is ok.
-#If you want to use this on other files rename the node labels
-#to Node[.*] with a script or something.  This also relies on
-#the length of a node name being 13 characters (as it is in all
-#DSA dot output files)
+# known issues: if a line contains '->' and is not an edge line
+# problems will occur.  If node labels do not begin with
+# Node this also will not work.  Since this is designed to work
+# on DSA dot output and not general dot files this is ok.
+# If you want to use this on other files rename the node labels
+# to Node[.*] with a script or something.  This also relies on
+# the length of a node name being 13 characters (as it is in all
+# DSA dot output files)
 
-#Note that the name of the node can be any substring of the actual
-#name in the dot file.  Thus if you say specify COLLAPSED
-#as a parameter this script will pull out all COLLAPSED
-#nodes in the file
+# Note that the name of the node can be any substring of the actual
+# name in the dot file.  Thus if you say specify COLLAPSED
+# as a parameter this script will pull out all COLLAPSED
+# nodes in the file
 
-#Specifying escape characters in the name like \n also will not work, 
-#as Python
-#will make it \\n, I'm not really sure how to fix this
+# Specifying escape characters in the name like \n also will not work,
+# as Python
+# will make it \\n, I'm not really sure how to fix this
 
-#currently the script prints the names it is searching for
-#to STDOUT, so you can check to see if they are what you intend
+# currently the script prints the names it is searching for
+# to STDOUT, so you can check to see if they are what you intend
 
 from __future__ import print_function
 
@@ -33,81 +33,81 @@
 
 
 if len(sys.argv) < 3:
-	print('usage is ./DSAextract <dot_file_to_modify> \
-			<output_file> [list of nodes to extract]')
+    print(
+        "usage is ./DSAextract <dot_file_to_modify> \
+			<output_file> [list of nodes to extract]"
+    )
 
-#open the input file
-input = open(sys.argv[1], 'r')
+# open the input file
+input = open(sys.argv[1], "r")
 
-#construct a set of node names
+# construct a set of node names
 node_name_set = set()
 for name in sys.argv[3:]:
-	node_name_set |= set([name])
+    node_name_set |= set([name])
 
-#construct a list of compiled regular expressions from the 
-#node_name_set
+# construct a list of compiled regular expressions from the
+# node_name_set
 regexp_list = []
 for name in node_name_set:
-	regexp_list.append(re.compile(name))
+    regexp_list.append(re.compile(name))
 
-#used to see what kind of line we are on
-nodeexp = re.compile('Node')
-#used to check to see if the current line is an edge line
-arrowexp = re.compile('->')
+# used to see what kind of line we are on
+nodeexp = re.compile("Node")
+# used to check to see if the current line is an edge line
+arrowexp = re.compile("->")
 
 node_set = set()
 
-#read the file one line at a time
+# read the file one line at a time
 buffer = input.readline()
-while buffer != '':
-	#filter out the unnecessary checks on all the edge lines
-	if not arrowexp.search(buffer):
-		#check to see if this is a node we are looking for
-		for regexp in regexp_list:
-			#if this name is for the current node, add the dot variable name
-			#for the node (it will be Node(hex number)) to our set of nodes
-			if regexp.search(buffer):
-				node_set |= set([re.split('\s+',buffer,2)[1]])
-				break
-	buffer = input.readline()
-
-
-#test code
-#print '\n'
+while buffer != "":
+    # filter out the unnecessary checks on all the edge lines
+    if not arrowexp.search(buffer):
+        # check to see if this is a node we are looking for
+        for regexp in regexp_list:
+            # if this name is for the current node, add the dot variable name
+            # for the node (it will be Node(hex number)) to our set of nodes
+            if regexp.search(buffer):
+                node_set |= set([re.split("\s+", buffer, 2)[1]])
+                break
+    buffer = input.readline()
+
+
+# test code
+# print '\n'
 
 print(node_name_set)
 
-#print node_set
-	
+# print node_set
 
-#open the output file
-output = open(sys.argv[2], 'w')
-#start the second pass over the file
-input = open(sys.argv[1], 'r')
 
-buffer = input.readline()
-while buffer != '':
-	#there are three types of lines we are looking for
-	#1) node lines, 2) edge lines 3) support lines (like page size, etc)
-	
-	#is this an edge line?
-	#note that this is no completely robust, if a none edge line
-	#for some reason contains -> it will be missidentified
-	#hand edit the file if this happens
-	if arrowexp.search(buffer):
-		#check to make sure that both nodes are in the node list
-		#if they are print this to output
-		nodes = arrowexp.split(buffer)
-		nodes[0] = string.strip(nodes[0])
-		nodes[1] = string.strip(nodes[1])
-		if nodes[0][:13] in node_set and \
-				nodes[1][:13] in node_set:
-					output.write(buffer)
-	elif nodeexp.search(buffer): #this is a node line
-		node = re.split('\s+', buffer,2)[1]
-		if node in node_set:
-			output.write(buffer)
-	else: #this is a support line
-		output.write(buffer)
-	buffer = input.readline()
+# open the output file
+output = open(sys.argv[2], "w")
+# start the second pass over the file
+input = open(sys.argv[1], "r")
 
+buffer = input.readline()
+while buffer != "":
+    # there are three types of lines we are looking for
+    # 1) node lines, 2) edge lines 3) support lines (like page size, etc)
+
+    # is this an edge line?
+    # note that this is no completely robust, if a none edge line
+    # for some reason contains -> it will be missidentified
+    # hand edit the file if this happens
+    if arrowexp.search(buffer):
+        # check to make sure that both nodes are in the node list
+        # if they are print this to output
+        nodes = arrowexp.split(buffer)
+        nodes[0] = string.strip(nodes[0])
+        nodes[1] = string.strip(nodes[1])
+        if nodes[0][:13] in node_set and nodes[1][:13] in node_set:
+            output.write(buffer)
+    elif nodeexp.search(buffer):  # this is a node line
+        node = re.split("\s+", buffer, 2)[1]
+        if node in node_set:
+            output.write(buffer)
+    else:  # this is a support line
+        output.write(buffer)
+    buffer = input.readline()

diff  --git a/llvm/utils/Reviewing/find_interesting_reviews.py b/llvm/utils/Reviewing/find_interesting_reviews.py
index 5e72631b20383..c006691079b64 100644
--- a/llvm/utils/Reviewing/find_interesting_reviews.py
+++ b/llvm/utils/Reviewing/find_interesting_reviews.py
@@ -21,8 +21,7 @@
 # $ . ./venv/bin/activate
 # $ pip install Phabricator
 
-GIT_REPO_METADATA = (("llvm-monorepo", "https://github.com/llvm/llvm-project"),
-                     )
+GIT_REPO_METADATA = (("llvm-monorepo", "https://github.com/llvm/llvm-project"),)
 
 # The below PhabXXX classes represent objects as modelled by Phabricator.
 # The classes can be serialized to disk, to try and make sure that we don't
@@ -72,25 +71,30 @@ def populate_cache_from_disk(self, directory=DEFAULT_DIRECTORY):
         try:
             f = open(self._get_pickle_name(directory), "rb")
         except IOError as err:
-            print("Could not find cache. Error message: {0}. Continuing..."
-                  .format(err))
+            print("Could not find cache. Error message: {0}. Continuing...".format(err))
         else:
             with f:
                 try:
                     d = pickle.load(f)
                     self.__dict__.update(d)
                 except EOFError as err:
-                    print("Cache seems to be corrupt. " +
-                          "Not using cache. Error message: {0}".format(err))
+                    print(
+                        "Cache seems to be corrupt. "
+                        + "Not using cache. Error message: {0}".format(err)
+                    )
 
     def write_cache_to_disk(self, directory=DEFAULT_DIRECTORY):
         if not os.path.exists(directory):
             os.makedirs(directory)
         with open(self._get_pickle_name(directory), "wb") as f:
             pickle.dump(self.__dict__, f)
-        print("wrote cache to disk, most_recent_info= {0}".format(
-            datetime.fromtimestamp(self.most_recent_info)
-            if self.most_recent_info is not None else None))
+        print(
+            "wrote cache to disk, most_recent_info= {0}".format(
+                datetime.fromtimestamp(self.most_recent_info)
+                if self.most_recent_info is not None
+                else None
+            )
+        )
 
 
 class PhabReview(PhabObject):
@@ -162,8 +166,9 @@ def __init__(self, rest_api_hunk):
         # Merge the adjacent and overlapping ranges in there:
         t = []
         lastRange = None
-        for start, end in self.actual_lines_changed_offset + \
-                [(sys.maxsize, sys.maxsize)]:
+        for start, end in self.actual_lines_changed_offset + [
+            (sys.maxsize, sys.maxsize)
+        ]:
             if lastRange is None:
                 lastRange = (start, end)
             else:
@@ -214,48 +219,64 @@ def init_phab_connection():
     return phab
 
 
-def update_cached_info(phab, cache, phab_query, order, record_results,
-                       max_nr_entries_per_fetch, max_nr_days_to_cache):
+def update_cached_info(
+    phab,
+    cache,
+    phab_query,
+    order,
+    record_results,
+    max_nr_entries_per_fetch,
+    max_nr_days_to_cache,
+):
     q = phab
     LIMIT = max_nr_entries_per_fetch
     for query_step in phab_query:
         q = getattr(q, query_step)
     results = q(order=order, limit=LIMIT)
     most_recent_info, oldest_info = record_results(cache, results, phab)
-    oldest_info_to_fetch = datetime.fromtimestamp(most_recent_info) - \
-        timedelta(days=max_nr_days_to_cache)
+    oldest_info_to_fetch = datetime.fromtimestamp(most_recent_info) - timedelta(
+        days=max_nr_days_to_cache
+    )
     most_recent_info_overall = most_recent_info
     cache.write_cache_to_disk()
     after = results["cursor"]["after"]
     print("after: {0!r}".format(after))
-    print("most_recent_info: {0}".format(
-        datetime.fromtimestamp(most_recent_info)))
-    while (after is not None
-           and datetime.fromtimestamp(oldest_info) > oldest_info_to_fetch):
-        need_more_older_data = \
-            (cache.oldest_info is None or
-             datetime.fromtimestamp(cache.oldest_info) > oldest_info_to_fetch)
-        print(("need_more_older_data={0} cache.oldest_info={1} " +
-               "oldest_info_to_fetch={2}").format(
-                   need_more_older_data,
-                   datetime.fromtimestamp(cache.oldest_info)
-                   if cache.oldest_info is not None else None,
-                   oldest_info_to_fetch))
-        need_more_newer_data = \
-            (cache.most_recent_info is None or
-             cache.most_recent_info < most_recent_info)
-        print(("need_more_newer_data={0} cache.most_recent_info={1} " +
-               "most_recent_info={2}")
-              .format(need_more_newer_data, cache.most_recent_info,
-                      most_recent_info))
+    print("most_recent_info: {0}".format(datetime.fromtimestamp(most_recent_info)))
+    while (
+        after is not None and datetime.fromtimestamp(oldest_info) > oldest_info_to_fetch
+    ):
+        need_more_older_data = (
+            cache.oldest_info is None
+            or datetime.fromtimestamp(cache.oldest_info) > oldest_info_to_fetch
+        )
+        print(
+            (
+                "need_more_older_data={0} cache.oldest_info={1} "
+                + "oldest_info_to_fetch={2}"
+            ).format(
+                need_more_older_data,
+                datetime.fromtimestamp(cache.oldest_info)
+                if cache.oldest_info is not None
+                else None,
+                oldest_info_to_fetch,
+            )
+        )
+        need_more_newer_data = (
+            cache.most_recent_info is None or cache.most_recent_info < most_recent_info
+        )
+        print(
+            (
+                "need_more_newer_data={0} cache.most_recent_info={1} "
+                + "most_recent_info={2}"
+            ).format(need_more_newer_data, cache.most_recent_info, most_recent_info)
+        )
         if not need_more_older_data and not need_more_newer_data:
             break
         results = q(order=order, after=after, limit=LIMIT)
         most_recent_info, oldest_info = record_results(cache, results, phab)
         after = results["cursor"]["after"]
         print("after: {0!r}".format(after))
-        print("most_recent_info: {0}".format(
-            datetime.fromtimestamp(most_recent_info)))
+        print("most_recent_info: {0}".format(datetime.fromtimestamp(most_recent_info)))
         cache.write_cache_to_disk()
     cache.most_recent_info = most_recent_info_overall
     if after is None:
@@ -279,8 +300,10 @@ def record_reviews(cache, reviews, phab):
         title = reviewInfo["fields"]["title"]
         author = reviewInfo["fields"]["authorPHID"]
         phabReview = cache.get(id)
-        if "dateModified" not in phabReview.__dict__ or \
-           dateModified > phabReview.dateModified:
+        if (
+            "dateModified" not in phabReview.__dict__
+            or dateModified > phabReview.dateModified
+        ):
             
diff _results = phab.
diff erential.query
diff s(revisionIDs=[id])
             
diff _ids = sorted(
diff _results.keys())
             phabDiffs = []
@@ -291,8 +314,11 @@ def record_reviews(cache, reviews, phab):
                 phabDiffs.append(d)
             phabReview.update(title, dateCreated, dateModified, author)
             phabReview.setPhabDiffs(phabDiffs)
-            print("Updated D{0} modified on {1} ({2} 
diff s)".format(
-                id, datetime.fromtimestamp(dateModified), len(phabDiffs)))
+            print(
+                "Updated D{0} modified on {1} ({2} 
diff s)".format(
+                    id, datetime.fromtimestamp(dateModified), len(phabDiffs)
+                )
+            )
 
         if most_recent_info is None:
             most_recent_info = dateModified
@@ -330,41 +356,66 @@ def record_users(cache, users, phab):
     return most_recent_info, oldest_info
 
 
-PHABCACHESINFO = ((reviews_cache, ("
diff erential", "revision", "search"),
-                   "updated", record_reviews, 5, 7),
-                  (users_cache, ("user", "search"), "newest", record_users,
-                   100, 1000))
+PHABCACHESINFO = (
+    (
+        reviews_cache,
+        ("
diff erential", "revision", "search"),
+        "updated",
+        record_reviews,
+        5,
+        7,
+    ),
+    (users_cache, ("user", "search"), "newest", record_users, 100, 1000),
+)
 
 
 def load_cache():
     for cache, phab_query, order, record_results, _, _ in PHABCACHESINFO:
         cache.populate_cache_from_disk()
-        print("Loaded {0} nr entries: {1}".format(
-            cache.get_name(), len(cache.get_ids_in_cache())))
-        print("Loaded {0} has most recent info: {1}".format(
-            cache.get_name(),
-            datetime.fromtimestamp(cache.most_recent_info)
-            if cache.most_recent_info is not None else None))
+        print(
+            "Loaded {0} nr entries: {1}".format(
+                cache.get_name(), len(cache.get_ids_in_cache())
+            )
+        )
+        print(
+            "Loaded {0} has most recent info: {1}".format(
+                cache.get_name(),
+                datetime.fromtimestamp(cache.most_recent_info)
+                if cache.most_recent_info is not None
+                else None,
+            )
+        )
 
 
 def update_cache(phab):
     load_cache()
-    for cache, phab_query, order, record_results, max_nr_entries_per_fetch, \
-            max_nr_days_to_cache in PHABCACHESINFO:
-        update_cached_info(phab, cache, phab_query, order, record_results,
-                           max_nr_entries_per_fetch, max_nr_days_to_cache)
+    for (
+        cache,
+        phab_query,
+        order,
+        record_results,
+        max_nr_entries_per_fetch,
+        max_nr_days_to_cache,
+    ) in PHABCACHESINFO:
+        update_cached_info(
+            phab,
+            cache,
+            phab_query,
+            order,
+            record_results,
+            max_nr_entries_per_fetch,
+            max_nr_days_to_cache,
+        )
         ids_in_cache = cache.get_ids_in_cache()
         print("{0} objects in {1}".format(len(ids_in_cache), cache.get_name()))
         cache.write_cache_to_disk()
 
 
 def get_most_recent_reviews(days):
-    newest_reviews = sorted(
-        reviews_cache.get_objects(), key=lambda r: -r.dateModified)
+    newest_reviews = sorted(reviews_cache.get_objects(), key=lambda r: -r.dateModified)
     if len(newest_reviews) == 0:
         return newest_reviews
-    most_recent_review_time = \
-        datetime.fromtimestamp(newest_reviews[0].dateModified)
+    most_recent_review_time = datetime.fromtimestamp(newest_reviews[0].dateModified)
     cut_off_date = most_recent_review_time - timedelta(days=days)
     result = []
     for review in newest_reviews:
@@ -395,36 +446,51 @@ def print_most_recent_reviews(phab, days, filter_reviewers):
 
     def add_msg(msg):
         msgs.append(msg)
-        print(msg.encode('utf-8'))
+        print(msg.encode("utf-8"))
 
     newest_reviews = get_most_recent_reviews(days)
-    add_msg(u"These are the reviews that look interesting to be reviewed. " +
-            u"The report below has 2 sections. The first " +
-            u"section is organized per review; the second section is organized "
-            + u"per potential reviewer.\n")
+    add_msg(
+        "These are the reviews that look interesting to be reviewed. "
+        + "The report below has 2 sections. The first "
+        + "section is organized per review; the second section is organized "
+        + "per potential reviewer.\n"
+    )
     oldest_review = newest_reviews[-1] if len(newest_reviews) > 0 else None
-    oldest_datetime = \
-        datetime.fromtimestamp(oldest_review.dateModified) \
-        if oldest_review else None
-    add_msg((u"The report below is based on analyzing the reviews that got " +
-             u"touched in the past {0} days (since {1}). " +
-             u"The script found {2} such reviews.\n").format(
-                 days, oldest_datetime, len(newest_reviews)))
+    oldest_datetime = (
+        datetime.fromtimestamp(oldest_review.dateModified) if oldest_review else None
+    )
+    add_msg(
+        (
+            "The report below is based on analyzing the reviews that got "
+            + "touched in the past {0} days (since {1}). "
+            + "The script found {2} such reviews.\n"
+        ).format(days, oldest_datetime, len(newest_reviews))
+    )
     reviewer2reviews_and_scores = {}
     for i, review in enumerate(newest_reviews):
         matched_reviewers = find_reviewers_for_review(review)
         matched_reviewers = filter_reviewers(matched_reviewers)
         if len(matched_reviewers) == 0:
             continue
-        add_msg((u"{0:>3}. https://reviews.llvm.org/D{1} by {2}\n     {3}\n" +
-                 u"     Last updated on {4}").format(
-                     i, review.id,
-                     get_real_name_from_author(review.author), review.title,
-                     datetime.fromtimestamp(review.dateModified)))
+        add_msg(
+            (
+                "{0:>3}. https://reviews.llvm.org/D{1} by {2}\n     {3}\n"
+                + "     Last updated on {4}"
+            ).format(
+                i,
+                review.id,
+                get_real_name_from_author(review.author),
+                review.title,
+                datetime.fromtimestamp(review.dateModified),
+            )
+        )
         for reviewer, scores in matched_reviewers:
-            add_msg(u"    potential reviewer {0}, score {1}".format(
-                reviewer,
-                "(" + "/".join(["{0:.1f}%".format(s) for s in scores]) + ")"))
+            add_msg(
+                "    potential reviewer {0}, score {1}".format(
+                    reviewer,
+                    "(" + "/".join(["{0:.1f}%".format(s) for s in scores]) + ")",
+                )
+            )
             if reviewer not in reviewer2reviews_and_scores:
                 reviewer2reviews_and_scores[reviewer] = []
             reviewer2reviews_and_scores[reviewer].append((review, scores))
@@ -433,12 +499,20 @@ def add_msg(msg):
     for reviewer in sorted(reviewer2reviews_and_scores.keys()):
         reviews_and_scores = reviewer2reviews_and_scores[reviewer]
         reviews_and_scores.sort(key=lambda rs: rs[1], reverse=True)
-        add_msg(u"\n\nSUMMARY FOR {0} (found {1} reviews):".format(
-            reviewer, len(reviews_and_scores)))
+        add_msg(
+            "\n\nSUMMARY FOR {0} (found {1} reviews):".format(
+                reviewer, len(reviews_and_scores)
+            )
+        )
         for review, scores in reviews_and_scores:
-            add_msg(u"[{0}] https://reviews.llvm.org/D{1} '{2}' by {3}".format(
-                "/".join(["{0:.1f}%".format(s) for s in scores]), review.id,
-                review.title, get_real_name_from_author(review.author)))
+            add_msg(
+                "[{0}] https://reviews.llvm.org/D{1} '{2}' by {3}".format(
+                    "/".join(["{0:.1f}%".format(s) for s in scores]),
+                    review.id,
+                    review.title,
+                    get_real_name_from_author(review.author),
+                )
+            )
     return "\n".join(msgs)
 
 
@@ -446,13 +520,12 @@ def get_git_cmd_output(cmd):
     output = None
     try:
         logging.debug(cmd)
-        output = subprocess.check_output(
-            cmd, shell=True, stderr=subprocess.STDOUT)
+        output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
     except subprocess.CalledProcessError as e:
         logging.debug(str(e))
     if output is None:
         return None
-    return output.decode("utf-8", errors='ignore')
+    return output.decode("utf-8", errors="ignore")
 
 
 reAuthorMail = re.compile("^author-mail <([^>]*)>.*$")
@@ -480,12 +553,14 @@ def __init__(self):
     def _populate_cache_for(self, cache_key):
         assert cache_key not in self.cache
         git_repo, base_revision, path = cache_key
-        cmd = ("git -C {0} blame --encoding=utf-8 --date iso -f -e -w " +
-               "--line-porcelain {1} -- {2}").format(git_repo, base_revision,
-                                                     path)
+        cmd = (
+            "git -C {0} blame --encoding=utf-8 --date iso -f -e -w "
+            + "--line-porcelain {1} -- {2}"
+        ).format(git_repo, base_revision, path)
         blame_output = get_git_cmd_output(cmd)
-        self.cache[cache_key] = \
-            blame_output.split('\n') if blame_output is not None else None
+        self.cache[cache_key] = (
+            blame_output.split("\n") if blame_output is not None else None
+        )
         # FIXME: the blame cache could probably be made more effective still if
         # instead of storing the requested base_revision in the cache, the last
         # revision before the base revision this file/path got changed in gets
@@ -493,8 +568,9 @@ def _populate_cache_for(self, cache_key):
         # file/patch hasn't changed would get cache hits (instead of misses in
         # the current implementation).
 
-    def get_blame_output_for(self, git_repo, base_revision, path, start_line=-1,
-                             end_line=-1):
+    def get_blame_output_for(
+        self, git_repo, base_revision, path, start_line=-1, end_line=-1
+    ):
         cache_key = (git_repo, base_revision, path)
         if cache_key not in self.cache:
             self._populate_cache_for(cache_key)
@@ -511,11 +587,14 @@ def get_blame_output_for(self, git_repo, base_revision, path, start_line=-1,
         assert start_line <= end_line
         return all_blame_lines[start_line:end_line]
 
-    def get_parsed_git_blame_for(self, git_repo, base_revision, path,
-                                 start_line=-1, end_line=-1):
+    def get_parsed_git_blame_for(
+        self, git_repo, base_revision, path, start_line=-1, end_line=-1
+    ):
         return parse_blame_output_line_porcelain(
-            self.get_blame_output_for(git_repo, base_revision, path, start_line,
-                                      end_line))
+            self.get_blame_output_for(
+                git_repo, base_revision, path, start_line, end_line
+            )
+        )
 
 
 blameOutputCache = BlameOutputCache()
@@ -534,8 +613,8 @@ def find_reviewers_for_
diff _heuristic(
diff ):
     git_repo = os.path.join("git_repos", GIT_REPO_METADATA[0][0])
     cmd = 'git -C {0} rev-list -n 1 --before="{1}" main'.format(
         git_repo,
-        datetime.fromtimestamp(
-            
diff .dateModified).strftime("%Y-%m-%d %H:%M:%s"))
+        datetime.fromtimestamp(
diff .dateModified).strftime("%Y-%m-%d %H:%M:%s"),
+    )
     base_revision = get_git_cmd_output(cmd).strip()
     logging.debug("Base revision={0}".format(base_revision))
     for change in 
diff .changes:
@@ -544,18 +623,20 @@ def find_reviewers_for_
diff _heuristic(
diff ):
         for hunk in change.hunks:
             for start_line, end_line in hunk.actual_lines_changed_offset:
                 # Collect git blame results for authors in those ranges.
-                for reviewer, nr_occurences in \
-                        blameOutputCache.get_parsed_git_blame_for(
-                            git_repo, base_revision, path, start_line, end_line
-                        ).items():
+                for (
+                    reviewer,
+                    nr_occurences,
+                ) in blameOutputCache.get_parsed_git_blame_for(
+                    git_repo, base_revision, path, start_line, end_line
+                ).items():
                     if reviewer not in reviewers2nr_lines_touched:
                         reviewers2nr_lines_touched[reviewer] = 0
                     reviewers2nr_lines_touched[reviewer] += nr_occurences
         # Compute heuristic 2: don't look at context, just at files touched.
         # Collect git blame results for authors in those ranges.
-        for reviewer, nr_occurences in \
-                blameOutputCache.get_parsed_git_blame_for(
-                    git_repo, base_revision, path).items():
+        for reviewer, nr_occurences in blameOutputCache.get_parsed_git_blame_for(
+            git_repo, base_revision, path
+        ).items():
             if reviewer not in reviewers2nr_files_touched:
                 reviewers2nr_files_touched[reviewer] = 0
             reviewers2nr_files_touched[reviewer] += 1
@@ -563,30 +644,35 @@ def find_reviewers_for_
diff _heuristic(
diff ):
     # Compute "match scores"
     total_nr_lines = sum(reviewers2nr_lines_touched.values())
     total_nr_files = len(
diff .changes)
-    reviewers_matchscores = \
-        [(reviewer,
-          (reviewers2nr_lines_touched.get(reviewer, 0)*100.0/total_nr_lines
-           if total_nr_lines != 0 else 0,
-           reviewers2nr_files_touched[reviewer]*100.0/total_nr_files
-           if total_nr_files != 0 else 0))
-         for reviewer, nr_lines
-         in reviewers2nr_files_touched.items()]
+    reviewers_matchscores = [
+        (
+            reviewer,
+            (
+                reviewers2nr_lines_touched.get(reviewer, 0) * 100.0 / total_nr_lines
+                if total_nr_lines != 0
+                else 0,
+                reviewers2nr_files_touched[reviewer] * 100.0 / total_nr_files
+                if total_nr_files != 0
+                else 0,
+            ),
+        )
+        for reviewer, nr_lines in reviewers2nr_files_touched.items()
+    ]
     reviewers_matchscores.sort(key=lambda i: i[1], reverse=True)
     return reviewers_matchscores
 
 
 def find_reviewers_for_review(review):
     # Process the newest 
diff  first.
-    
diff s = sorted(
-        review.phabDiffs, key=lambda d: d.dateModified, reverse=True)
+    
diff s = sorted(review.phabDiffs, key=lambda d: d.dateModified, reverse=True)
     if len(
diff s) == 0:
         return
     
diff  = 
diff s[0]
     matched_reviewers = find_reviewers_for_
diff _heuristic(
diff )
     # Show progress, as this is a slow operation:
-    sys.stdout.write('.')
+    sys.stdout.write(".")
     sys.stdout.flush()
-    logging.debug(u"matched_reviewers: {0}".format(matched_reviewers))
+    logging.debug("matched_reviewers: {0}".format(matched_reviewers))
     return matched_reviewers
 
 
@@ -606,58 +692,66 @@ def send_emails(email_addresses, sender, msg):
     s.connect()
     for email_address in email_addresses:
         email_msg = email.mime.multipart.MIMEMultipart()
-        email_msg['From'] = sender
-        email_msg['To'] = email_address
-        email_msg['Subject'] = 'LLVM patches you may be able to review.'
-        email_msg.attach(email.mime.text.MIMEText(msg.encode('utf-8'), 'plain'))
+        email_msg["From"] = sender
+        email_msg["To"] = email_address
+        email_msg["Subject"] = "LLVM patches you may be able to review."
+        email_msg.attach(email.mime.text.MIMEText(msg.encode("utf-8"), "plain"))
         # python 3.x: s.send_message(email_msg)
-        s.sendmail(email_msg['From'], email_msg['To'], email_msg.as_string())
+        s.sendmail(email_msg["From"], email_msg["To"], email_msg.as_string())
     s.quit()
 
 
 def filter_reviewers_to_report_for(people_to_look_for):
     # The below is just an example filter, to only report potential reviews
     # to do for the people that will receive the report email.
-    return lambda potential_reviewers: [r for r in potential_reviewers
-                                        if r[0] in people_to_look_for]
+    return lambda potential_reviewers: [
+        r for r in potential_reviewers if r[0] in people_to_look_for
+    ]
 
 
 def main():
     parser = argparse.ArgumentParser(
-        description='Match open reviews to potential reviewers.')
+        description="Match open reviews to potential reviewers."
+    )
     parser.add_argument(
-        '--no-update-cache',
-        dest='update_cache',
-        action='store_false',
+        "--no-update-cache",
+        dest="update_cache",
+        action="store_false",
         default=True,
-        help='Do not update cached Phabricator objects')
+        help="Do not update cached Phabricator objects",
+    )
     parser.add_argument(
-        '--email-report',
-        dest='email_report',
-        nargs='*',
+        "--email-report",
+        dest="email_report",
+        nargs="*",
         default="",
-        help="A email addresses to send the report to.")
+        help="A email addresses to send the report to.",
+    )
     parser.add_argument(
-        '--sender',
-        dest='sender',
+        "--sender",
+        dest="sender",
         default="",
-        help="The email address to use in 'From' on messages emailed out.")
+        help="The email address to use in 'From' on messages emailed out.",
+    )
     parser.add_argument(
-        '--email-addresses',
-        dest='email_addresses',
-        nargs='*',
-        help="The email addresses (as known by LLVM git) of " +
-        "the people to look for reviews for.")
-    parser.add_argument('--verbose', '-v', action='count')
+        "--email-addresses",
+        dest="email_addresses",
+        nargs="*",
+        help="The email addresses (as known by LLVM git) of "
+        + "the people to look for reviews for.",
+    )
+    parser.add_argument("--verbose", "-v", action="count")
 
     args = parser.parse_args()
 
     if args.verbose >= 1:
         logging.basicConfig(level=logging.DEBUG)
 
-    people_to_look_for = [e.decode('utf-8') for e in args.email_addresses]
-    logging.debug("Will look for reviews that following contributors could " +
-                  "review: {}".format(people_to_look_for))
+    people_to_look_for = [e.decode("utf-8") for e in args.email_addresses]
+    logging.debug(
+        "Will look for reviews that following contributors could "
+        + "review: {}".format(people_to_look_for)
+    )
     logging.debug("Will email a report to: {}".format(args.email_report))
 
     phab = init_phab_connection()
@@ -670,7 +764,8 @@ def main():
     msg = print_most_recent_reviews(
         phab,
         days=1,
-        filter_reviewers=filter_reviewers_to_report_for(people_to_look_for))
+        filter_reviewers=filter_reviewers_to_report_for(people_to_look_for),
+    )
 
     if args.email_report != []:
         send_emails(args.email_report, args.sender, msg)

diff  --git a/llvm/utils/Target/ARM/analyze-match-table.py b/llvm/utils/Target/ARM/analyze-match-table.py
index d4e158d9e1d80..c14b1a19fe39e 100644
--- a/llvm/utils/Target/ARM/analyze-match-table.py
+++ b/llvm/utils/Target/ARM/analyze-match-table.py
@@ -2,6 +2,7 @@
 
 from __future__ import print_function
 
+
 def analyze_match_table(path):
     # Extract the instruction table.
     data = open(path).read()
@@ -14,15 +15,14 @@ def analyze_match_table(path):
     for ln in lines:
         ln = ln.split("{", 1)[1]
         ln = ln.rsplit("}", 1)[0]
-        a,bc = ln.split("{", 1)
-        b,c = bc.split("}", 1)
-        code, string, converter, _ = [s.strip()
-                                      for s in a.split(",")]
+        a, bc = ln.split("{", 1)
+        b, c = bc.split("}", 1)
+        code, string, converter, _ = [s.strip() for s in a.split(",")]
         items = [s.strip() for s in b.split(",")]
-        _,features = [s.strip() for s in c.split(",")]
+        _, features = [s.strip() for s in c.split(",")]
         assert string[0] == string[-1] == '"'
         string = string[1:-1]
-        insns.append((code,string,converter,items,features))
+        insns.append((code, string, converter, items, features))
 
     # For every mnemonic, compute whether or not it can have a carry setting
     # operand and whether or not it can have a predication code.
@@ -34,24 +34,24 @@ def analyze_match_table(path):
         flags.update(items)
 
     mnemonics = set(mnemonic_flags)
-    ccout_mnemonics = set(m for m in mnemonics
-                          if 'MCK_CCOut' in mnemonic_flags[m])
-    condcode_mnemonics = set(m for m in mnemonics
-                             if 'MCK_CondCode' in mnemonic_flags[m])
+    ccout_mnemonics = set(m for m in mnemonics if "MCK_CCOut" in mnemonic_flags[m])
+    condcode_mnemonics = set(
+        m for m in mnemonics if "MCK_CondCode" in mnemonic_flags[m]
+    )
     noncondcode_mnemonics = mnemonics - condcode_mnemonics
-    print(' || '.join('Mnemonic == "%s"' % m
-                      for m in ccout_mnemonics))
-    print(' || '.join('Mnemonic == "%s"' % m
-                      for m in noncondcode_mnemonics))
+    print(" || ".join('Mnemonic == "%s"' % m for m in ccout_mnemonics))
+    print(" || ".join('Mnemonic == "%s"' % m for m in noncondcode_mnemonics))
+
 
 def main():
     import sys
+
     if len(sys.argv) == 1:
         import os
         from lit.Util import capture
+
         llvm_obj_root = capture(["llvm-config", "--obj-root"])
-        file = os.path.join(llvm_obj_root,
-                            "lib/Target/ARM/ARMGenAsmMatcher.inc")
+        file = os.path.join(llvm_obj_root, "lib/Target/ARM/ARMGenAsmMatcher.inc")
     elif len(sys.argv) == 2:
         file = sys.argv[1]
     else:
@@ -59,5 +59,6 @@ def main():
 
     analyze_match_table(file)
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/UpdateTestChecks/asm.py b/llvm/utils/UpdateTestChecks/asm.py
index 54719228e1264..72ff67b03d81e 100644
--- a/llvm/utils/UpdateTestChecks/asm.py
+++ b/llvm/utils/UpdateTestChecks/asm.py
@@ -5,10 +5,12 @@
 from . import common
 
 if sys.version_info[0] > 2:
-  class string:
-    expandtabs = str.expandtabs
+
+    class string:
+        expandtabs = str.expandtabs
+
 else:
-  import string
+    import string
 
 # RegEx: this is where the magic happens.
 
@@ -16,525 +18,580 @@ class string:
 
 ASM_FUNCTION_X86_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*(@"?(?P=func)"?| -- Begin function (?P=func))\n(?:\s*\.?Lfunc_begin[^:\n]*:\n)?'
-    r'(?:\.L(?P=func)\$local:\n)?'      # drop .L<func>$local:
-    r'(?:\s*\.type\s+\.L(?P=func)\$local, at function\n)?'  # drop .type .L<func>$local
-    r'(?:[ \t]*(?:\.cfi_startproc|\.cfi_personality|\.cfi_lsda|\.seh_proc|\.seh_handler)\b[^\n]*\n)*'  # drop optional cfi
-    r'(?P<body>^##?[ \t]+[^:]+:.*?)\s*'
-    r'^\s*(?:[^:\n]+?:\s*\n\s*\.size|\.cfi_endproc|\.globl|\.comm|\.(?:sub)?section|#+ -- End function)',
-    flags=(re.M | re.S))
+    r"(?:\.L(?P=func)\$local:\n)?"  # drop .L<func>$local:
+    r"(?:\s*\.type\s+\.L(?P=func)\$local, at function\n)?"  # drop .type .L<func>$local
+    r"(?:[ \t]*(?:\.cfi_startproc|\.cfi_personality|\.cfi_lsda|\.seh_proc|\.seh_handler)\b[^\n]*\n)*"  # drop optional cfi
+    r"(?P<body>^##?[ \t]+[^:]+:.*?)\s*"
+    r"^\s*(?:[^:\n]+?:\s*\n\s*\.size|\.cfi_endproc|\.globl|\.comm|\.(?:sub)?section|#+ -- End function)",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_ARM_RE = re.compile(
-    r'^(?P<func>[0-9a-zA-Z_$]+):\n' # f: (name of function)
-    r'(?:\.L(?P=func)\$local:\n)?'  # drop .L<func>$local:
-    r'(?:\s*\.type\s+\.L(?P=func)\$local, at function\n)?'  # drop .type .L<func>$local
-    r'\s+\.fnstart\n' # .fnstart
-    r'(?P<body>.*?)' # (body of the function)
-    r'^.Lfunc_end[0-9]+:', # .Lfunc_end0: or # -- End function
-    flags=(re.M | re.S))
+    r"^(?P<func>[0-9a-zA-Z_$]+):\n"  # f: (name of function)
+    r"(?:\.L(?P=func)\$local:\n)?"  # drop .L<func>$local:
+    r"(?:\s*\.type\s+\.L(?P=func)\$local, at function\n)?"  # drop .type .L<func>$local
+    r"\s+\.fnstart\n"  # .fnstart
+    r"(?P<body>.*?)"  # (body of the function)
+    r"^.Lfunc_end[0-9]+:",  # .Lfunc_end0: or # -- End function
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_AARCH64_RE = re.compile(
-     r'^_?(?P<func>[^:]+):[ \t]*\/\/[ \t]*@"?(?P=func)"?( (Function|Tail Call))?\n'
-     r'(?:[ \t]+.cfi_startproc\n)?'  # drop optional cfi noise
-     r'(?P<body>.*?)\n'
-     # This list is incomplete
-     r'^\s*(\.Lfunc_end[0-9]+|// -- End function)',
-     flags=(re.M | re.S))
+    r'^_?(?P<func>[^:]+):[ \t]*\/\/[ \t]*@"?(?P=func)"?( (Function|Tail Call))?\n'
+    r"(?:[ \t]+.cfi_startproc\n)?"  # drop optional cfi noise
+    r"(?P<body>.*?)\n"
+    # This list is incomplete
+    r"^\s*(\.Lfunc_end[0-9]+|// -- End function)",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_AMDGPU_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*;+[ \t]*@"?(?P=func)"?\n[^:]*?'
-    r'(?P<body>.*?)\n' # (body of the function)
+    r"(?P<body>.*?)\n"  # (body of the function)
     # This list is incomplete
-    r'^\s*(\.Lfunc_end[0-9]+:\n|\.section)',
-    flags=(re.M | re.S))
+    r"^\s*(\.Lfunc_end[0-9]+:\n|\.section)",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_BPF_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n'
-    r'(?:[ \t]+.cfi_startproc\n|.seh_proc[^\n]+\n)?'  # drop optional cfi
-    r'(?P<body>.*?)\s*'
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"(?:[ \t]+.cfi_startproc\n|.seh_proc[^\n]+\n)?"  # drop optional cfi
+    r"(?P<body>.*?)\s*"
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_HEXAGON_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*//[ \t]*@"?(?P=func)"?\n[^:]*?'
-    r'(?P<body>.*?)\n' # (body of the function)
+    r"(?P<body>.*?)\n"  # (body of the function)
     # This list is incomplete
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_M68K_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*;[ \t]*@"?(?P=func)"?\n'
-    r'(?P<body>.*?)\s*' # (body of the function)
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"(?P<body>.*?)\s*"  # (body of the function)
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_MIPS_RE = re.compile(
-    r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n[^:]*?' # f: (name of func)
-    r'(?:\s*\.?Ltmp[^:\n]*:\n)?[^:]*?'        # optional .Ltmp<N> for EH
-    r'(?:^[ \t]+\.(frame|f?mask|set).*?\n)+'  # Mips+LLVM standard asm prologue
-    r'(?P<body>.*?)\n'                        # (body of the function)
+    r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n[^:]*?'  # f: (name of func)
+    r"(?:\s*\.?Ltmp[^:\n]*:\n)?[^:]*?"  # optional .Ltmp<N> for EH
+    r"(?:^[ \t]+\.(frame|f?mask|set).*?\n)+"  # Mips+LLVM standard asm prologue
+    r"(?P<body>.*?)\n"  # (body of the function)
     # Mips+LLVM standard asm epilogue
-    r'(?:(^[ \t]+\.set[^\n]*?\n)*^[ \t]+\.end.*?\n)'
-    r'(\$|\.L)func_end[0-9]+:\n',             # $func_end0: (mips32 - O32) or
-                                              # .Lfunc_end0: (mips64 - NewABI)
-    flags=(re.M | re.S))
+    r"(?:(^[ \t]+\.set[^\n]*?\n)*^[ \t]+\.end.*?\n)"
+    r"(\$|\.L)func_end[0-9]+:\n",  # $func_end0: (mips32 - O32) or
+    # .Lfunc_end0: (mips64 - NewABI)
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_MSP430_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*;+[ \t]*@"?(?P=func)"?\n[^:]*?'
-    r'(?P<body>.*?)\n'
-    r'(\$|\.L)func_end[0-9]+:\n',             # $func_end0:
-    flags=(re.M | re.S))
+    r"(?P<body>.*?)\n"
+    r"(\$|\.L)func_end[0-9]+:\n",  # $func_end0:
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_AVR_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*;+[ \t]*@"?(?P=func)"?\n[^:]*?'
-    r'(?P<body>.*?)\n'
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"(?P<body>.*?)\n"
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_PPC_RE = re.compile(
-    r'#[ \-\t]*Begin function (?P<func>[^.:]+)\n'
-    r'.*?'
+    r"#[ \-\t]*Begin function (?P<func>[^.:]+)\n"
+    r".*?"
     r'^[_.]?(?P=func):(?:[ \t]*#+[ \t]*@"?(?P=func)"?)?\n'
-    r'(?:^[^#]*\n)*'
-    r'(?P<body>.*?)\n'
+    r"(?:^[^#]*\n)*"
+    r"(?P<body>.*?)\n"
     # This list is incomplete
-    r'(?:^[ \t]*(?:\.(?:long|quad|v?byte)[ \t]+[^\n]+)\n)*'
-    r'(?:\.Lfunc_end|L\.\.(?P=func))[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"(?:^[ \t]*(?:\.(?:long|quad|v?byte)[ \t]+[^\n]+)\n)*"
+    r"(?:\.Lfunc_end|L\.\.(?P=func))[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_RISCV_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n'
-    r'(?:\s*\.?L(?P=func)\$local:\n)?'  # optional .L<func>$local: due to -fno-semantic-interposition
-    r'(?:\s*\.type\s+\.?L(?P=func)\$local, at function\n)?'  # optional .type .L<func>$local
-    r'(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?'
-    r'(?P<body>^##?[ \t]+[^:]+:.*?)\s*'
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"(?:\s*\.?L(?P=func)\$local:\n)?"  # optional .L<func>$local: due to -fno-semantic-interposition
+    r"(?:\s*\.type\s+\.?L(?P=func)\$local, at function\n)?"  # optional .type .L<func>$local
+    r"(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?"
+    r"(?P<body>^##?[ \t]+[^:]+:.*?)\s*"
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_LANAI_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*!+[ \t]*@"?(?P=func)"?\n'
-    r'(?:[ \t]+.cfi_startproc\n)?'  # drop optional cfi noise
-    r'(?P<body>.*?)\s*'
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"(?:[ \t]+.cfi_startproc\n)?"  # drop optional cfi noise
+    r"(?P<body>.*?)\s*"
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_SPARC_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*!+[ \t]*@"?(?P=func)"?\n'
-    r'(?P<body>.*?)\s*'
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"(?P<body>.*?)\s*"
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_SYSTEMZ_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n'
-    r'(?:[ \t]+.cfi_startproc\n)?'
-    r'(?P<body>.*?)\n'
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"(?:[ \t]+.cfi_startproc\n)?"
+    r"(?P<body>.*?)\n"
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_AARCH64_DARWIN_RE = re.compile(
     r'^_(?P<func>[^:]+):[ \t]*;[ \t]@"?(?P=func)"?\n'
-    r'([ \t]*.cfi_startproc\n[\s]*)?'
-    r'(?P<body>.*?)'
-    r'([ \t]*.cfi_endproc\n[\s]*)?'
-    r'^[ \t]*;[ \t]--[ \t]End[ \t]function',
-    flags=(re.M | re.S))
+    r"([ \t]*.cfi_startproc\n[\s]*)?"
+    r"(?P<body>.*?)"
+    r"([ \t]*.cfi_endproc\n[\s]*)?"
+    r"^[ \t]*;[ \t]--[ \t]End[ \t]function",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_ARM_DARWIN_RE = re.compile(
-    r'@[ \t]--[ \t]Begin[ \t]function[ \t](?P<func>[^ \t]+?)\n'
-    r'^[ \t]*\.globl[ \t]*_(?P=func)[ \t]*'
-    r'(?P<directives>.*?)'
-    r'^_(?P=func):\n[ \t]*'
-    r'(?P<body>.*?)'
-    r'^[ \t]*@[ \t]--[ \t]End[ \t]function',
-    flags=(re.M | re.S ))
+    r"@[ \t]--[ \t]Begin[ \t]function[ \t](?P<func>[^ \t]+?)\n"
+    r"^[ \t]*\.globl[ \t]*_(?P=func)[ \t]*"
+    r"(?P<directives>.*?)"
+    r"^_(?P=func):\n[ \t]*"
+    r"(?P<body>.*?)"
+    r"^[ \t]*@[ \t]--[ \t]End[ \t]function",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_ARM_MACHO_RE = re.compile(
-    r'^_(?P<func>[^:]+):[ \t]*\n'
-    r'([ \t]*.cfi_startproc\n[ \t]*)?'
-    r'(?P<body>.*?)\n'
-    r'[ \t]*\.cfi_endproc\n',
-    flags=(re.M | re.S))
+    r"^_(?P<func>[^:]+):[ \t]*\n"
+    r"([ \t]*.cfi_startproc\n[ \t]*)?"
+    r"(?P<body>.*?)\n"
+    r"[ \t]*\.cfi_endproc\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_THUMBS_DARWIN_RE = re.compile(
-    r'^_(?P<func>[^:]+):\n'
-    r'(?P<body>.*?)\n'
-    r'[ \t]*\.data_region\n',
-    flags=(re.M | re.S))
+    r"^_(?P<func>[^:]+):\n" r"(?P<body>.*?)\n" r"[ \t]*\.data_region\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_THUMB_DARWIN_RE = re.compile(
-    r'^_(?P<func>[^:]+):\n'
-    r'(?P<body>.*?)\n'
-    r'^[ \t]*@[ \t]--[ \t]End[ \t]function',
-    flags=(re.M | re.S))
+    r"^_(?P<func>[^:]+):\n" r"(?P<body>.*?)\n" r"^[ \t]*@[ \t]--[ \t]End[ \t]function",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_ARM_IOS_RE = re.compile(
-    r'^_(?P<func>[^:]+):\n'
-    r'(?P<body>.*?)'
-    r'^[ \t]*@[ \t]--[ \t]End[ \t]function',
-    flags=(re.M | re.S))
+    r"^_(?P<func>[^:]+):\n" r"(?P<body>.*?)" r"^[ \t]*@[ \t]--[ \t]End[ \t]function",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_WASM_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n'
-    r'(?P<body>.*?)\n'
-    r'^\s*(\.Lfunc_end[0-9]+:\n|end_function)',
-    flags=(re.M | re.S))
+    r"(?P<body>.*?)\n"
+    r"^\s*(\.Lfunc_end[0-9]+:\n|end_function)",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_VE_RE = re.compile(
-    r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n'
-    r'(?:\s*\.?L(?P=func)\$local:\n)?'  # optional .L<func>$local: due to -fno-semantic-interposition
-    r'(?:\s*\.type\s+\.?L(?P=func)\$local, at function\n)?'  # optional .type .L<func>$local
-    r'(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?'
-    r'(?P<body>^##?[ \t]+[^:]+:.*?)\s*'
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n"
+    r"(?:\s*\.?L(?P=func)\$local:\n)?"  # optional .L<func>$local: due to -fno-semantic-interposition
+    r"(?:\s*\.type\s+\.?L(?P=func)\$local, at function\n)?"  # optional .type .L<func>$local
+    r"(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?"
+    r"(?P<body>^##?[ \t]+[^:]+:.*?)\s*"
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_CSKY_RE = re.compile(
-    r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?'
-    r'(?P<body>^##?[ \t]+[^:]+:.*?)\s*'
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
+    r"^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?"
+    r"(?P<body>^##?[ \t]+[^:]+:.*?)\s*"
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_NVPTX_RE = re.compile(
     # function attributes and retval
     # .visible .func (.param .align 16 .b8 func_retval0[32])
-    #r'^(\.visible\s+)?\.func\s+(\([^\)]*\)\s*)?'
-    r'^(\.(func|visible|weak|entry|noreturn|extern)\s+)+(\([^\)]*\)\s*)?'
-
+    # r'^(\.visible\s+)?\.func\s+(\([^\)]*\)\s*)?'
+    r"^(\.(func|visible|weak|entry|noreturn|extern)\s+)+(\([^\)]*\)\s*)?"
     # function name
-    r'(?P<func>[^\(\n]+)'
-
+    r"(?P<func>[^\(\n]+)"
     # function name separator (opening brace)
-    r'(?P<func_name_separator>\()'
-
+    r"(?P<func_name_separator>\()"
     # function parameters
     # (
     #   .param .align 16 .b8 callee_St8x4_param_0[32]
     # ) // -- Begin function callee_St8x4
-    r'[^\)]*\)(\s*//[^\n]*)?\n'
-
+    r"[^\)]*\)(\s*//[^\n]*)?\n"
     # function body
-    r'(?P<body>.*?)\n'
-
+    r"(?P<body>.*?)\n"
     # function body end marker
-    r'\s*// -- End function',
-    flags=(re.M | re.S))
+    r"\s*// -- End function",
+    flags=(re.M | re.S),
+)
 
 ASM_FUNCTION_LOONGARCH_RE = re.compile(
     r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@"?(?P=func)"?\n'
-    r'(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?'
-    r'(?P<body>^##?[ \t]+[^:]+:.*?)\s*'
-    r'.Lfunc_end[0-9]+:\n',
-    flags=(re.M | re.S))
-
-SCRUB_X86_SHUFFLES_RE = (
-    re.compile(
-        r'^(\s*\w+) [^#\n]+#+ ((?:[xyz]mm\d+|mem)( \{%k\d+\}( \{z\})?)? = .*)$',
-        flags=re.M))
-
-SCRUB_X86_SHUFFLES_NO_MEM_RE = (
-    re.compile(
-        r'^(\s*\w+) [^#\n]+#+ ((?:[xyz]mm\d+|mem)( \{%k\d+\}( \{z\})?)? = (?!.*(?:mem)).*)$',
-        flags=re.M))
-
-SCRUB_X86_SPILL_RELOAD_RE = (
-    re.compile(
-        r'-?\d+\(%([er])[sb]p\)(.*(?:Spill|Reload))$',
-        flags=re.M))
-SCRUB_X86_SP_RE = re.compile(r'\d+\(%(esp|rsp)\)')
-SCRUB_X86_RIP_RE = re.compile(r'[.\w]+\(%rip\)')
-SCRUB_X86_LCP_RE = re.compile(r'\.?LCPI[0-9]+_[0-9]+')
-SCRUB_X86_RET_RE = re.compile(r'ret[l|q]')
+    r"(?:\s*\.?Lfunc_begin[^:\n]*:\n)?[^:]*?"
+    r"(?P<body>^##?[ \t]+[^:]+:.*?)\s*"
+    r".Lfunc_end[0-9]+:\n",
+    flags=(re.M | re.S),
+)
+
+SCRUB_X86_SHUFFLES_RE = re.compile(
+    r"^(\s*\w+) [^#\n]+#+ ((?:[xyz]mm\d+|mem)( \{%k\d+\}( \{z\})?)? = .*)$", flags=re.M
+)
+
+SCRUB_X86_SHUFFLES_NO_MEM_RE = re.compile(
+    r"^(\s*\w+) [^#\n]+#+ ((?:[xyz]mm\d+|mem)( \{%k\d+\}( \{z\})?)? = (?!.*(?:mem)).*)$",
+    flags=re.M,
+)
+
+SCRUB_X86_SPILL_RELOAD_RE = re.compile(
+    r"-?\d+\(%([er])[sb]p\)(.*(?:Spill|Reload))$", flags=re.M
+)
+SCRUB_X86_SP_RE = re.compile(r"\d+\(%(esp|rsp)\)")
+SCRUB_X86_RIP_RE = re.compile(r"[.\w]+\(%rip\)")
+SCRUB_X86_LCP_RE = re.compile(r"\.?LCPI[0-9]+_[0-9]+")
+SCRUB_X86_RET_RE = re.compile(r"ret[l|q]")
+
 
 def scrub_asm_x86(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-
-  # Detect shuffle asm comments and hide the operands in favor of the comments.
-  if getattr(args, 'no_x86_scrub_mem_shuffle', True):
-    asm = SCRUB_X86_SHUFFLES_NO_MEM_RE.sub(r'\1 {{.*#+}} \2', asm)
-  else:
-    asm = SCRUB_X86_SHUFFLES_RE.sub(r'\1 {{.*#+}} \2', asm)
-
-  # Detect stack spills and reloads and hide their exact offset and whether
-  # they used the stack pointer or frame pointer.
-  asm = SCRUB_X86_SPILL_RELOAD_RE.sub(r'{{[-0-9]+}}(%\1{{[sb]}}p)\2', asm)
-  if getattr(args, 'x86_scrub_sp', True):
-    # Generically match the stack offset of a memory operand.
-    asm = SCRUB_X86_SP_RE.sub(r'{{[0-9]+}}(%\1)', asm)
-  if getattr(args, 'x86_scrub_rip', False):
-    # Generically match a RIP-relative memory operand.
-    asm = SCRUB_X86_RIP_RE.sub(r'{{.*}}(%rip)', asm)
-  # Generically match a LCP symbol.
-  asm = SCRUB_X86_LCP_RE.sub(r'{{\.?LCPI[0-9]+_[0-9]+}}', asm)
-  if getattr(args, 'extra_scrub', False):
-    # Avoid generating 
diff erent checks for 32- and 64-bit because of 'retl' vs 'retq'.
-    asm = SCRUB_X86_RET_RE.sub(r'ret{{[l|q]}}', asm)
-  # Strip kill operands inserted into the asm.
-  asm = common.SCRUB_KILL_COMMENT_RE.sub('', asm)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+
+    # Detect shuffle asm comments and hide the operands in favor of the comments.
+    if getattr(args, "no_x86_scrub_mem_shuffle", True):
+        asm = SCRUB_X86_SHUFFLES_NO_MEM_RE.sub(r"\1 {{.*#+}} \2", asm)
+    else:
+        asm = SCRUB_X86_SHUFFLES_RE.sub(r"\1 {{.*#+}} \2", asm)
+
+    # Detect stack spills and reloads and hide their exact offset and whether
+    # they used the stack pointer or frame pointer.
+    asm = SCRUB_X86_SPILL_RELOAD_RE.sub(r"{{[-0-9]+}}(%\1{{[sb]}}p)\2", asm)
+    if getattr(args, "x86_scrub_sp", True):
+        # Generically match the stack offset of a memory operand.
+        asm = SCRUB_X86_SP_RE.sub(r"{{[0-9]+}}(%\1)", asm)
+    if getattr(args, "x86_scrub_rip", False):
+        # Generically match a RIP-relative memory operand.
+        asm = SCRUB_X86_RIP_RE.sub(r"{{.*}}(%rip)", asm)
+    # Generically match a LCP symbol.
+    asm = SCRUB_X86_LCP_RE.sub(r"{{\.?LCPI[0-9]+_[0-9]+}}", asm)
+    if getattr(args, "extra_scrub", False):
+        # Avoid generating 
diff erent checks for 32- and 64-bit because of 'retl' vs 'retq'.
+        asm = SCRUB_X86_RET_RE.sub(r"ret{{[l|q]}}", asm)
+    # Strip kill operands inserted into the asm.
+    asm = common.SCRUB_KILL_COMMENT_RE.sub("", asm)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_amdgpu(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_arm_eabi(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip kill operands inserted into the asm.
-  asm = common.SCRUB_KILL_COMMENT_RE.sub('', asm)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip kill operands inserted into the asm.
+    asm = common.SCRUB_KILL_COMMENT_RE.sub("", asm)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_bpf(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_hexagon(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_powerpc(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip unimportant comments, but leave the token '#' in place.
-  asm = common.SCRUB_LOOP_COMMENT_RE.sub(r'#', asm)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  # Strip the tailing token '#', except the line only has token '#'.
-  asm = common.SCRUB_TAILING_COMMENT_TOKEN_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip unimportant comments, but leave the token '#' in place.
+    asm = common.SCRUB_LOOP_COMMENT_RE.sub(r"#", asm)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    # Strip the tailing token '#', except the line only has token '#'.
+    asm = common.SCRUB_TAILING_COMMENT_TOKEN_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_m68k(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_mips(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_msp430(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_avr(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_riscv(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_lanai(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_sparc(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_systemz(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_wasm(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_ve(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_csky(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip kill operands inserted into the asm.
-  asm = common.SCRUB_KILL_COMMENT_RE.sub('', asm)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip kill operands inserted into the asm.
+    asm = common.SCRUB_KILL_COMMENT_RE.sub("", asm)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_nvptx(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 def scrub_asm_loongarch(asm, args):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
-  # Expand the tabs used for indentation.
-  asm = string.expandtabs(asm, 2)
-  # Strip trailing whitespace.
-  asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
-  return asm
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    asm = common.SCRUB_WHITESPACE_RE.sub(r" ", asm)
+    # Expand the tabs used for indentation.
+    asm = string.expandtabs(asm, 2)
+    # Strip trailing whitespace.
+    asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", asm)
+    return asm
+
 
 # Returns a tuple of a scrub function and a function regex. Scrub function is
 # used to alter function body in some way, for example, remove trailing spaces.
 # Function regex is used to match function name, body, etc. in raw llc output.
 def get_run_handler(triple):
-  target_handlers = {
-      'i686': (scrub_asm_x86, ASM_FUNCTION_X86_RE),
-      'x86': (scrub_asm_x86, ASM_FUNCTION_X86_RE),
-      'i386': (scrub_asm_x86, ASM_FUNCTION_X86_RE),
-      'arm64_32-apple-ios': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
-      'arm64_32-apple-watchos2.0.0': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
-      'aarch64': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE),
-      'aarch64-apple-darwin': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
-      'aarch64-apple-ios': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
-      'bpf': (scrub_asm_bpf, ASM_FUNCTION_BPF_RE),
-      'bpfel': (scrub_asm_bpf, ASM_FUNCTION_BPF_RE),
-      'bpfeb': (scrub_asm_bpf, ASM_FUNCTION_BPF_RE),
-      'hexagon': (scrub_asm_hexagon, ASM_FUNCTION_HEXAGON_RE),
-      'r600': (scrub_asm_amdgpu, ASM_FUNCTION_AMDGPU_RE),
-      'amdgcn': (scrub_asm_amdgpu, ASM_FUNCTION_AMDGPU_RE),
-      'arm': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_RE),
-      'arm64': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE),
-      'arm64e': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
-      'arm64ec': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE),
-      'arm64-apple-ios': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
-      'armv7-apple-ios' : (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_IOS_RE),
-      'armv7-apple-darwin': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_DARWIN_RE),
-      'thumb': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_RE),
-      'thumb-macho': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_MACHO_RE),
-      'thumbv5-macho': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_MACHO_RE),
-      'thumbv7s-apple-darwin' : (scrub_asm_arm_eabi, ASM_FUNCTION_THUMBS_DARWIN_RE),
-      'thumbv7-apple-darwin' : (scrub_asm_arm_eabi, ASM_FUNCTION_THUMB_DARWIN_RE),
-      'thumbv7-apple-ios' : (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_IOS_RE),
-      'm68k': (scrub_asm_m68k, ASM_FUNCTION_M68K_RE),
-      'mips': (scrub_asm_mips, ASM_FUNCTION_MIPS_RE),
-      'msp430': (scrub_asm_msp430, ASM_FUNCTION_MSP430_RE),
-      'avr': (scrub_asm_avr, ASM_FUNCTION_AVR_RE),
-      'ppc32': (scrub_asm_powerpc, ASM_FUNCTION_PPC_RE),
-      'ppc64': (scrub_asm_powerpc, ASM_FUNCTION_PPC_RE),
-      'powerpc': (scrub_asm_powerpc, ASM_FUNCTION_PPC_RE),
-      'riscv32': (scrub_asm_riscv, ASM_FUNCTION_RISCV_RE),
-      'riscv64': (scrub_asm_riscv, ASM_FUNCTION_RISCV_RE),
-      'lanai': (scrub_asm_lanai, ASM_FUNCTION_LANAI_RE),
-      'sparc': (scrub_asm_sparc, ASM_FUNCTION_SPARC_RE),
-      's390x': (scrub_asm_systemz, ASM_FUNCTION_SYSTEMZ_RE),
-      'wasm32': (scrub_asm_wasm, ASM_FUNCTION_WASM_RE),
-      'wasm64': (scrub_asm_wasm, ASM_FUNCTION_WASM_RE),
-      've': (scrub_asm_ve, ASM_FUNCTION_VE_RE),
-      'csky': (scrub_asm_csky, ASM_FUNCTION_CSKY_RE),
-      'nvptx': (scrub_asm_nvptx, ASM_FUNCTION_NVPTX_RE),
-      'loongarch32': (scrub_asm_loongarch, ASM_FUNCTION_LOONGARCH_RE),
-      'loongarch64': (scrub_asm_loongarch, ASM_FUNCTION_LOONGARCH_RE)
-  }
-  handler = None
-  best_prefix = ''
-  for prefix, s in target_handlers.items():
-    if triple.startswith(prefix) and len(prefix) > len(best_prefix):
-      handler = s
-      best_prefix = prefix
-
-  if handler is None:
-    raise KeyError('Triple %r is not supported' % (triple))
-
-  return handler
+    target_handlers = {
+        "i686": (scrub_asm_x86, ASM_FUNCTION_X86_RE),
+        "x86": (scrub_asm_x86, ASM_FUNCTION_X86_RE),
+        "i386": (scrub_asm_x86, ASM_FUNCTION_X86_RE),
+        "arm64_32-apple-ios": (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
+        "arm64_32-apple-watchos2.0.0": (
+            scrub_asm_arm_eabi,
+            ASM_FUNCTION_AARCH64_DARWIN_RE,
+        ),
+        "aarch64": (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE),
+        "aarch64-apple-darwin": (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
+        "aarch64-apple-ios": (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
+        "bpf": (scrub_asm_bpf, ASM_FUNCTION_BPF_RE),
+        "bpfel": (scrub_asm_bpf, ASM_FUNCTION_BPF_RE),
+        "bpfeb": (scrub_asm_bpf, ASM_FUNCTION_BPF_RE),
+        "hexagon": (scrub_asm_hexagon, ASM_FUNCTION_HEXAGON_RE),
+        "r600": (scrub_asm_amdgpu, ASM_FUNCTION_AMDGPU_RE),
+        "amdgcn": (scrub_asm_amdgpu, ASM_FUNCTION_AMDGPU_RE),
+        "arm": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_RE),
+        "arm64": (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE),
+        "arm64e": (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
+        "arm64ec": (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE),
+        "arm64-apple-ios": (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_DARWIN_RE),
+        "armv7-apple-ios": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_IOS_RE),
+        "armv7-apple-darwin": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_DARWIN_RE),
+        "thumb": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_RE),
+        "thumb-macho": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_MACHO_RE),
+        "thumbv5-macho": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_MACHO_RE),
+        "thumbv7s-apple-darwin": (scrub_asm_arm_eabi, ASM_FUNCTION_THUMBS_DARWIN_RE),
+        "thumbv7-apple-darwin": (scrub_asm_arm_eabi, ASM_FUNCTION_THUMB_DARWIN_RE),
+        "thumbv7-apple-ios": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_IOS_RE),
+        "m68k": (scrub_asm_m68k, ASM_FUNCTION_M68K_RE),
+        "mips": (scrub_asm_mips, ASM_FUNCTION_MIPS_RE),
+        "msp430": (scrub_asm_msp430, ASM_FUNCTION_MSP430_RE),
+        "avr": (scrub_asm_avr, ASM_FUNCTION_AVR_RE),
+        "ppc32": (scrub_asm_powerpc, ASM_FUNCTION_PPC_RE),
+        "ppc64": (scrub_asm_powerpc, ASM_FUNCTION_PPC_RE),
+        "powerpc": (scrub_asm_powerpc, ASM_FUNCTION_PPC_RE),
+        "riscv32": (scrub_asm_riscv, ASM_FUNCTION_RISCV_RE),
+        "riscv64": (scrub_asm_riscv, ASM_FUNCTION_RISCV_RE),
+        "lanai": (scrub_asm_lanai, ASM_FUNCTION_LANAI_RE),
+        "sparc": (scrub_asm_sparc, ASM_FUNCTION_SPARC_RE),
+        "s390x": (scrub_asm_systemz, ASM_FUNCTION_SYSTEMZ_RE),
+        "wasm32": (scrub_asm_wasm, ASM_FUNCTION_WASM_RE),
+        "wasm64": (scrub_asm_wasm, ASM_FUNCTION_WASM_RE),
+        "ve": (scrub_asm_ve, ASM_FUNCTION_VE_RE),
+        "csky": (scrub_asm_csky, ASM_FUNCTION_CSKY_RE),
+        "nvptx": (scrub_asm_nvptx, ASM_FUNCTION_NVPTX_RE),
+        "loongarch32": (scrub_asm_loongarch, ASM_FUNCTION_LOONGARCH_RE),
+        "loongarch64": (scrub_asm_loongarch, ASM_FUNCTION_LOONGARCH_RE),
+    }
+    handler = None
+    best_prefix = ""
+    for prefix, s in target_handlers.items():
+        if triple.startswith(prefix) and len(prefix) > len(best_prefix):
+            handler = s
+            best_prefix = prefix
+
+    if handler is None:
+        raise KeyError("Triple %r is not supported" % (triple))
+
+    return handler
+
 
 ##### Generator of assembly CHECK lines
 
-def add_checks(output_lines, comment_marker, prefix_list, func_dict,
-               func_name, global_vars_seen_dict, is_filtered):
-  # Label format is based on ASM string.
-  check_label_format = '{} %s-LABEL: %s%s%s%s'.format(comment_marker)
-  return common.add_checks(output_lines, comment_marker, prefix_list, func_dict,
-                           func_name, check_label_format, True, False, 1,
-                           global_vars_seen_dict, is_filtered=is_filtered)
+
+def add_checks(
+    output_lines,
+    comment_marker,
+    prefix_list,
+    func_dict,
+    func_name,
+    global_vars_seen_dict,
+    is_filtered,
+):
+    # Label format is based on ASM string.
+    check_label_format = "{} %s-LABEL: %s%s%s%s".format(comment_marker)
+    return common.add_checks(
+        output_lines,
+        comment_marker,
+        prefix_list,
+        func_dict,
+        func_name,
+        check_label_format,
+        True,
+        False,
+        1,
+        global_vars_seen_dict,
+        is_filtered=is_filtered,
+    )

diff  --git a/llvm/utils/UpdateTestChecks/common.py b/llvm/utils/UpdateTestChecks/common.py
index aa0b12812e205..b22d7e3276abf 100644
--- a/llvm/utils/UpdateTestChecks/common.py
+++ b/llvm/utils/UpdateTestChecks/common.py
@@ -16,7 +16,7 @@
 
 
 _verbose = False
-_prefix_filecheck_ir_name = ''
+_prefix_filecheck_ir_name = ""
 
 """
 Version changelog:
@@ -27,792 +27,1028 @@
 """
 DEFAULT_VERSION = 2
 
+
 class Regex(object):
-  """Wrap a compiled regular expression object to allow deep copy of a regexp.
-  This is required for the deep copy done in do_scrub.
+    """Wrap a compiled regular expression object to allow deep copy of a regexp.
+    This is required for the deep copy done in do_scrub.
 
-  """
-  def __init__(self, regex):
-    self.regex = regex
+    """
 
-  def __deepcopy__(self, memo):
-    result = copy.copy(self)
-    result.regex = self.regex
-    return result
+    def __init__(self, regex):
+        self.regex = regex
+
+    def __deepcopy__(self, memo):
+        result = copy.copy(self)
+        result.regex = self.regex
+        return result
+
+    def search(self, line):
+        return self.regex.search(line)
 
-  def search(self, line):
-    return self.regex.search(line)
+    def sub(self, repl, line):
+        return self.regex.sub(repl, line)
 
-  def sub(self, repl, line):
-    return self.regex.sub(repl, line)
+    def pattern(self):
+        return self.regex.pattern
 
-  def pattern(self):
-    return self.regex.pattern
+    def flags(self):
+        return self.regex.flags
 
-  def flags(self):
-    return self.regex.flags
 
 class Filter(Regex):
-  """Augment a Regex object with a flag indicating whether a match should be
+    """Augment a Regex object with a flag indicating whether a match should be
     added (!is_filter_out) or removed (is_filter_out) from the generated checks.
 
-  """
-  def __init__(self, regex, is_filter_out):
-    super(Filter, self).__init__(regex)
-    self.is_filter_out = is_filter_out
+    """
+
+    def __init__(self, regex, is_filter_out):
+        super(Filter, self).__init__(regex)
+        self.is_filter_out = is_filter_out
+
+    def __deepcopy__(self, memo):
+        result = copy.deepcopy(super(Filter, self), memo)
+        result.is_filter_out = copy.deepcopy(self.is_filter_out, memo)
+        return result
 
-  def __deepcopy__(self, memo):
-    result = copy.deepcopy(super(Filter, self), memo)
-    result.is_filter_out = copy.deepcopy(self.is_filter_out, memo)
-    return result
 
 def parse_commandline_args(parser):
-  class RegexAction(argparse.Action):
-    """Add a regular expression option value to a list of regular expressions.
-    This compiles the expression, wraps it in a Regex and adds it to the option
-    value list."""
-    def __init__(self, option_strings, dest, nargs=None, **kwargs):
-      if nargs is not None:
-        raise ValueError('nargs not allowed')
-      super(RegexAction, self).__init__(option_strings, dest, **kwargs)
-
-    def do_call(self, namespace, values, flags):
-      value_list = getattr(namespace, self.dest)
-      if value_list is None:
-        value_list = []
-
-      try:
-        value_list.append(Regex(re.compile(values, flags)))
-      except re.error as error:
-        raise ValueError('{}: Invalid regular expression \'{}\' ({})'.format(
-          option_string, error.pattern, error.msg))
-
-      setattr(namespace, self.dest, value_list)
-
-    def __call__(self, parser, namespace, values, option_string=None):
-      self.do_call(namespace, values, 0)
-
-  class FilterAction(RegexAction):
-    """Add a filter to a list of filter option values."""
-    def __init__(self, option_strings, dest, nargs=None, **kwargs):
-      super(FilterAction, self).__init__(option_strings, dest, nargs, **kwargs)
-
-    def __call__(self, parser, namespace, values, option_string=None):
-      super(FilterAction, self).__call__(parser, namespace, values, option_string)
-
-      value_list = getattr(namespace, self.dest)
-
-      is_filter_out = ( option_string == '--filter-out' )
-
-      value_list[-1] = Filter(value_list[-1].regex, is_filter_out)
-
-      setattr(namespace, self.dest, value_list)
-
-  filter_group = parser.add_argument_group(
-    'filtering',
-    """Filters are applied to each output line according to the order given. The
-    first matching filter terminates filter processing for that current line.""")
-
-  filter_group.add_argument('--filter', action=FilterAction, dest='filters',
-                            metavar='REGEX',
-                            help='Only include lines matching REGEX (may be specified multiple times)')
-  filter_group.add_argument('--filter-out', action=FilterAction, dest='filters',
-                            metavar='REGEX',
-                            help='Exclude lines matching REGEX')
-
-  parser.add_argument('--include-generated-funcs', action='store_true',
-                      help='Output checks for functions not in source')
-  parser.add_argument('-v', '--verbose', action='store_true',
-                      help='Show verbose output')
-  parser.add_argument('-u', '--update-only', action='store_true',
-                      help='Only update test if it was already autogened')
-  parser.add_argument('--force-update', action='store_true',
-                      help='Update test even if it was autogened by a 
diff erent script')
-  parser.add_argument('--enable', action='store_true', dest='enabled', default=True,
-                       help='Activate CHECK line generation from this point forward')
-  parser.add_argument('--disable', action='store_false', dest='enabled',
-                      help='Deactivate CHECK line generation from this point forward')
-  parser.add_argument('--replace-value-regex', nargs='+', default=[],
-                      help='List of regular expressions to replace matching value names')
-  parser.add_argument('--prefix-filecheck-ir-name', default='',
-                      help='Add a prefix to FileCheck IR value names to avoid conflicts with scripted names')
-  parser.add_argument('--global-value-regex', nargs='+', default=[],
-                      help='List of regular expressions that a global value declaration must match to generate a check (has no effect if checking globals is not enabled)')
-  parser.add_argument('--global-hex-value-regex', nargs='+', default=[],
-                      help='List of regular expressions such that, for matching global value declarations, literal integer values should be encoded in hex in the associated FileCheck directives')
-  # FIXME: in 3.9, we can use argparse.BooleanOptionalAction. At that point,
-  # we need to rename the flag to just -generate-body-for-unused-prefixes.
-  parser.add_argument('--no-generate-body-for-unused-prefixes',
-                      action='store_false',
-                      dest='gen_unused_prefix_body',
-                      default=True,
-                      help='Generate a function body that always matches for unused prefixes. This is useful when unused prefixes are desired, and it avoids needing to annotate each FileCheck as allowing them.')
-  # This is the default when regenerating existing tests. The default when
-  # generating new tests is determined by DEFAULT_VERSION.
-  parser.add_argument('--version', type=int, default=1,
-                      help='The version of output format')
-  args = parser.parse_args()
-  global _verbose, _global_value_regex, _global_hex_value_regex
-  _verbose = args.verbose
-  _global_value_regex = args.global_value_regex
-  _global_hex_value_regex = args.global_hex_value_regex
-  return args
+    class RegexAction(argparse.Action):
+        """Add a regular expression option value to a list of regular expressions.
+        This compiles the expression, wraps it in a Regex and adds it to the option
+        value list."""
+
+        def __init__(self, option_strings, dest, nargs=None, **kwargs):
+            if nargs is not None:
+                raise ValueError("nargs not allowed")
+            super(RegexAction, self).__init__(option_strings, dest, **kwargs)
+
+        def do_call(self, namespace, values, flags):
+            value_list = getattr(namespace, self.dest)
+            if value_list is None:
+                value_list = []
+
+            try:
+                value_list.append(Regex(re.compile(values, flags)))
+            except re.error as error:
+                raise ValueError(
+                    "{}: Invalid regular expression '{}' ({})".format(
+                        option_string, error.pattern, error.msg
+                    )
+                )
+
+            setattr(namespace, self.dest, value_list)
+
+        def __call__(self, parser, namespace, values, option_string=None):
+            self.do_call(namespace, values, 0)
+
+    class FilterAction(RegexAction):
+        """Add a filter to a list of filter option values."""
+
+        def __init__(self, option_strings, dest, nargs=None, **kwargs):
+            super(FilterAction, self).__init__(option_strings, dest, nargs, **kwargs)
+
+        def __call__(self, parser, namespace, values, option_string=None):
+            super(FilterAction, self).__call__(parser, namespace, values, option_string)
+
+            value_list = getattr(namespace, self.dest)
+
+            is_filter_out = option_string == "--filter-out"
+
+            value_list[-1] = Filter(value_list[-1].regex, is_filter_out)
+
+            setattr(namespace, self.dest, value_list)
+
+    filter_group = parser.add_argument_group(
+        "filtering",
+        """Filters are applied to each output line according to the order given. The
+    first matching filter terminates filter processing for that current line.""",
+    )
+
+    filter_group.add_argument(
+        "--filter",
+        action=FilterAction,
+        dest="filters",
+        metavar="REGEX",
+        help="Only include lines matching REGEX (may be specified multiple times)",
+    )
+    filter_group.add_argument(
+        "--filter-out",
+        action=FilterAction,
+        dest="filters",
+        metavar="REGEX",
+        help="Exclude lines matching REGEX",
+    )
+
+    parser.add_argument(
+        "--include-generated-funcs",
+        action="store_true",
+        help="Output checks for functions not in source",
+    )
+    parser.add_argument(
+        "-v", "--verbose", action="store_true", help="Show verbose output"
+    )
+    parser.add_argument(
+        "-u",
+        "--update-only",
+        action="store_true",
+        help="Only update test if it was already autogened",
+    )
+    parser.add_argument(
+        "--force-update",
+        action="store_true",
+        help="Update test even if it was autogened by a 
diff erent script",
+    )
+    parser.add_argument(
+        "--enable",
+        action="store_true",
+        dest="enabled",
+        default=True,
+        help="Activate CHECK line generation from this point forward",
+    )
+    parser.add_argument(
+        "--disable",
+        action="store_false",
+        dest="enabled",
+        help="Deactivate CHECK line generation from this point forward",
+    )
+    parser.add_argument(
+        "--replace-value-regex",
+        nargs="+",
+        default=[],
+        help="List of regular expressions to replace matching value names",
+    )
+    parser.add_argument(
+        "--prefix-filecheck-ir-name",
+        default="",
+        help="Add a prefix to FileCheck IR value names to avoid conflicts with scripted names",
+    )
+    parser.add_argument(
+        "--global-value-regex",
+        nargs="+",
+        default=[],
+        help="List of regular expressions that a global value declaration must match to generate a check (has no effect if checking globals is not enabled)",
+    )
+    parser.add_argument(
+        "--global-hex-value-regex",
+        nargs="+",
+        default=[],
+        help="List of regular expressions such that, for matching global value declarations, literal integer values should be encoded in hex in the associated FileCheck directives",
+    )
+    # FIXME: in 3.9, we can use argparse.BooleanOptionalAction. At that point,
+    # we need to rename the flag to just -generate-body-for-unused-prefixes.
+    parser.add_argument(
+        "--no-generate-body-for-unused-prefixes",
+        action="store_false",
+        dest="gen_unused_prefix_body",
+        default=True,
+        help="Generate a function body that always matches for unused prefixes. This is useful when unused prefixes are desired, and it avoids needing to annotate each FileCheck as allowing them.",
+    )
+    # This is the default when regenerating existing tests. The default when
+    # generating new tests is determined by DEFAULT_VERSION.
+    parser.add_argument(
+        "--version", type=int, default=1, help="The version of output format"
+    )
+    args = parser.parse_args()
+    global _verbose, _global_value_regex, _global_hex_value_regex
+    _verbose = args.verbose
+    _global_value_regex = args.global_value_regex
+    _global_hex_value_regex = args.global_hex_value_regex
+    return args
+
 
 def parse_args(parser, argv):
-  args = parser.parse_args(argv)
-  if args.version >= 2:
-    args.function_signature = True
-  return args
+    args = parser.parse_args(argv)
+    if args.version >= 2:
+        args.function_signature = True
+    return args
+
 
 class InputLineInfo(object):
-  def __init__(self, line, line_number, args, argv):
-    self.line = line
-    self.line_number = line_number
-    self.args = args
-    self.argv = argv
+    def __init__(self, line, line_number, args, argv):
+        self.line = line
+        self.line_number = line_number
+        self.args = args
+        self.argv = argv
 
 
 class TestInfo(object):
-  def __init__(self, test, parser, script_name, input_lines, args, argv,
-               comment_prefix, argparse_callback):
-    self.parser = parser
-    self.argparse_callback = argparse_callback
-    self.path = test
-    self.args = args
-    if args.prefix_filecheck_ir_name:
-      global _prefix_filecheck_ir_name
-      _prefix_filecheck_ir_name = args.prefix_filecheck_ir_name
-    self.argv = argv
-    self.input_lines = input_lines
-    self.run_lines = find_run_lines(test, self.input_lines)
-    self.comment_prefix = comment_prefix
-    if self.comment_prefix is None:
-      if self.path.endswith('.mir'):
-        self.comment_prefix = '#'
-      else:
-        self.comment_prefix = ';'
-    self.autogenerated_note_prefix = self.comment_prefix + ' ' + UTC_ADVERT
-    self.test_autogenerated_note = self.autogenerated_note_prefix + script_name
-    self.test_autogenerated_note += get_autogennote_suffix(parser, self.args)
-    self.test_unused_note = self.comment_prefix + self.comment_prefix + ' ' + UNUSED_NOTE
-
-  def ro_iterlines(self):
-    for line_num, input_line in enumerate(self.input_lines):
-      args, argv = check_for_command(input_line, self.parser,
-                                     self.args, self.argv, self.argparse_callback)
-      yield InputLineInfo(input_line, line_num, args, argv)
-
-  def iterlines(self, output_lines):
-    output_lines.append(self.test_autogenerated_note)
-    for line_info in self.ro_iterlines():
-      input_line = line_info.line
-      # Discard any previous script advertising.
-      if input_line.startswith(self.autogenerated_note_prefix):
-        continue
-      self.args = line_info.args
-      self.argv = line_info.argv
-      if not self.args.enabled:
-        output_lines.append(input_line)
-        continue
-      yield line_info
-
-  def get_checks_for_unused_prefixes(self, run_list, used_prefixes: List[str]) -> List[str]:
-    run_list = [element for element in run_list if element[0] is not None]
-    unused_prefixes = set([
-        prefix for sublist in run_list for prefix in sublist[0]
-    ]).
diff erence(set(used_prefixes))
-
-    ret = []
-    if not unused_prefixes:
-      return ret
-    ret.append(self.test_unused_note)
-    for unused in sorted(unused_prefixes):
-      ret.append('{comment} {prefix}: {match_everything}'.format(
-        comment=self.comment_prefix,
-        prefix=unused,
-        match_everything=r"""{{.*}}"""
-      ))
-    return ret
-
-def itertests(test_patterns, parser, script_name, comment_prefix=None, argparse_callback=None):
-  for pattern in test_patterns:
-    # On Windows we must expand the patterns ourselves.
-    tests_list = glob.glob(pattern)
-    if not tests_list:
-      warn("Test file pattern '%s' was not found. Ignoring it." % (pattern,))
-      continue
-    for test in tests_list:
-      with open(test) as f:
-        input_lines = [l.rstrip() for l in f]
-      first_line = input_lines[0] if input_lines else ""
-      is_regenerate = UTC_ADVERT in first_line
-
-      # If we're generating a new test, set the default version to the latest.
-      argv = sys.argv[:]
-      if not is_regenerate:
-        argv.insert(1, '--version=' + str(DEFAULT_VERSION))
-
-      args = parse_args(parser, argv[1:])
-      if argparse_callback is not None:
-        argparse_callback(args)
-      if is_regenerate:
-        if script_name not in first_line and not args.force_update:
-          warn("Skipping test which wasn't autogenerated by " + script_name, test)
-          continue
-        args, argv = check_for_command(first_line, parser, args, argv, argparse_callback)
-      elif args.update_only:
-        assert UTC_ADVERT not in first_line
-        warn("Skipping test which isn't autogenerated: " + test)
-        continue
-      final_input_lines = []
-      for l in input_lines:
-        if UNUSED_NOTE in l:
-          break
-        final_input_lines.append(l)
-      yield TestInfo(test, parser, script_name, final_input_lines, args, argv,
-                     comment_prefix, argparse_callback)
-
-
-def should_add_line_to_output(input_line, prefix_set, skip_global_checks = False, comment_marker = ';'):
-  # Skip any blank comment lines in the IR.
-  if not skip_global_checks and input_line.strip() == comment_marker:
-    return False
-  # Skip a special double comment line we use as a separator.
-  if input_line.strip() == comment_marker + SEPARATOR:
-    return False
-  # Skip any blank lines in the IR.
-  #if input_line.strip() == '':
-  #  return False
-  # And skip any CHECK lines. We're building our own.
-  m = CHECK_RE.match(input_line)
-  if m and m.group(1) in prefix_set:
-    if skip_global_checks:
-      global_ir_value_re = re.compile(r'\[\[', flags=(re.M))
-      return not global_ir_value_re.search(input_line)
-    return False
-
-  return True
+    def __init__(
+        self,
+        test,
+        parser,
+        script_name,
+        input_lines,
+        args,
+        argv,
+        comment_prefix,
+        argparse_callback,
+    ):
+        self.parser = parser
+        self.argparse_callback = argparse_callback
+        self.path = test
+        self.args = args
+        if args.prefix_filecheck_ir_name:
+            global _prefix_filecheck_ir_name
+            _prefix_filecheck_ir_name = args.prefix_filecheck_ir_name
+        self.argv = argv
+        self.input_lines = input_lines
+        self.run_lines = find_run_lines(test, self.input_lines)
+        self.comment_prefix = comment_prefix
+        if self.comment_prefix is None:
+            if self.path.endswith(".mir"):
+                self.comment_prefix = "#"
+            else:
+                self.comment_prefix = ";"
+        self.autogenerated_note_prefix = self.comment_prefix + " " + UTC_ADVERT
+        self.test_autogenerated_note = self.autogenerated_note_prefix + script_name
+        self.test_autogenerated_note += get_autogennote_suffix(parser, self.args)
+        self.test_unused_note = (
+            self.comment_prefix + self.comment_prefix + " " + UNUSED_NOTE
+        )
+
+    def ro_iterlines(self):
+        for line_num, input_line in enumerate(self.input_lines):
+            args, argv = check_for_command(
+                input_line, self.parser, self.args, self.argv, self.argparse_callback
+            )
+            yield InputLineInfo(input_line, line_num, args, argv)
+
+    def iterlines(self, output_lines):
+        output_lines.append(self.test_autogenerated_note)
+        for line_info in self.ro_iterlines():
+            input_line = line_info.line
+            # Discard any previous script advertising.
+            if input_line.startswith(self.autogenerated_note_prefix):
+                continue
+            self.args = line_info.args
+            self.argv = line_info.argv
+            if not self.args.enabled:
+                output_lines.append(input_line)
+                continue
+            yield line_info
+
+    def get_checks_for_unused_prefixes(
+        self, run_list, used_prefixes: List[str]
+    ) -> List[str]:
+        run_list = [element for element in run_list if element[0] is not None]
+        unused_prefixes = set(
+            [prefix for sublist in run_list for prefix in sublist[0]]
+        ).
diff erence(set(used_prefixes))
+
+        ret = []
+        if not unused_prefixes:
+            return ret
+        ret.append(self.test_unused_note)
+        for unused in sorted(unused_prefixes):
+            ret.append(
+                "{comment} {prefix}: {match_everything}".format(
+                    comment=self.comment_prefix,
+                    prefix=unused,
+                    match_everything=r"""{{.*}}""",
+                )
+            )
+        return ret
+
+
+def itertests(
+    test_patterns, parser, script_name, comment_prefix=None, argparse_callback=None
+):
+    for pattern in test_patterns:
+        # On Windows we must expand the patterns ourselves.
+        tests_list = glob.glob(pattern)
+        if not tests_list:
+            warn("Test file pattern '%s' was not found. Ignoring it." % (pattern,))
+            continue
+        for test in tests_list:
+            with open(test) as f:
+                input_lines = [l.rstrip() for l in f]
+            first_line = input_lines[0] if input_lines else ""
+            is_regenerate = UTC_ADVERT in first_line
+
+            # If we're generating a new test, set the default version to the latest.
+            argv = sys.argv[:]
+            if not is_regenerate:
+                argv.insert(1, "--version=" + str(DEFAULT_VERSION))
+
+            args = parse_args(parser, argv[1:])
+            if argparse_callback is not None:
+                argparse_callback(args)
+            if is_regenerate:
+                if script_name not in first_line and not args.force_update:
+                    warn(
+                        "Skipping test which wasn't autogenerated by " + script_name,
+                        test,
+                    )
+                    continue
+                args, argv = check_for_command(
+                    first_line, parser, args, argv, argparse_callback
+                )
+            elif args.update_only:
+                assert UTC_ADVERT not in first_line
+                warn("Skipping test which isn't autogenerated: " + test)
+                continue
+            final_input_lines = []
+            for l in input_lines:
+                if UNUSED_NOTE in l:
+                    break
+                final_input_lines.append(l)
+            yield TestInfo(
+                test,
+                parser,
+                script_name,
+                final_input_lines,
+                args,
+                argv,
+                comment_prefix,
+                argparse_callback,
+            )
+
+
+def should_add_line_to_output(
+    input_line, prefix_set, skip_global_checks=False, comment_marker=";"
+):
+    # Skip any blank comment lines in the IR.
+    if not skip_global_checks and input_line.strip() == comment_marker:
+        return False
+    # Skip a special double comment line we use as a separator.
+    if input_line.strip() == comment_marker + SEPARATOR:
+        return False
+    # Skip any blank lines in the IR.
+    # if input_line.strip() == '':
+    #  return False
+    # And skip any CHECK lines. We're building our own.
+    m = CHECK_RE.match(input_line)
+    if m and m.group(1) in prefix_set:
+        if skip_global_checks:
+            global_ir_value_re = re.compile(r"\[\[", flags=(re.M))
+            return not global_ir_value_re.search(input_line)
+        return False
+
+    return True
+
 
 # Perform lit-like substitutions
 def getSubstitutions(sourcepath):
-  sourcedir = os.path.dirname(sourcepath)
-  return [('%s', sourcepath),
-          ('%S', sourcedir),
-          ('%p', sourcedir),
-          ('%{pathsep}', os.pathsep)]
+    sourcedir = os.path.dirname(sourcepath)
+    return [
+        ("%s", sourcepath),
+        ("%S", sourcedir),
+        ("%p", sourcedir),
+        ("%{pathsep}", os.pathsep),
+    ]
+
 
 def applySubstitutions(s, substitutions):
-  for a,b in substitutions:
-    s = s.replace(a, b)
-  return s
+    for a, b in substitutions:
+        s = s.replace(a, b)
+    return s
+
 
 # Invoke the tool that is being tested.
 def invoke_tool(exe, cmd_args, ir, preprocess_cmd=None, verbose=False):
-  with open(ir) as ir_file:
-    substitutions = getSubstitutions(ir)
-
-    # TODO Remove the str form which is used by update_test_checks.py and
-    # update_llc_test_checks.py
-    # The safer list form is used by update_cc_test_checks.py
-    if preprocess_cmd:
-      # Allow pre-processing the IR file (e.g. using sed):
-      assert isinstance(preprocess_cmd, str)  # TODO: use a list instead of using shell
-      preprocess_cmd = applySubstitutions(preprocess_cmd, substitutions).strip()
-      if verbose:
-        print('Pre-processing input file: ', ir, " with command '",
-              preprocess_cmd, "'", sep="", file=sys.stderr)
-      # Python 2.7 doesn't have subprocess.DEVNULL:
-      with open(os.devnull, 'w') as devnull:
-        pp = subprocess.Popen(preprocess_cmd, shell=True, stdin=devnull,
-                              stdout=subprocess.PIPE)
-        ir_file = pp.stdout
-
-    if isinstance(cmd_args, list):
-      args = [applySubstitutions(a, substitutions) for a in cmd_args]
-      stdout = subprocess.check_output([exe] + args, stdin=ir_file)
-    else:
-      stdout = subprocess.check_output(exe + ' ' + applySubstitutions(cmd_args, substitutions),
-                                       shell=True, stdin=ir_file)
-    if sys.version_info[0] > 2:
-      # FYI, if you crashed here with a decode error, your run line probably
-      # results in bitcode or other binary format being written to the pipe.
-      # For an opt test, you probably want to add -S or -disable-output.
-      stdout = stdout.decode()
-  # Fix line endings to unix CR style.
-  return stdout.replace('\r\n', '\n')
+    with open(ir) as ir_file:
+        substitutions = getSubstitutions(ir)
+
+        # TODO Remove the str form which is used by update_test_checks.py and
+        # update_llc_test_checks.py
+        # The safer list form is used by update_cc_test_checks.py
+        if preprocess_cmd:
+            # Allow pre-processing the IR file (e.g. using sed):
+            assert isinstance(
+                preprocess_cmd, str
+            )  # TODO: use a list instead of using shell
+            preprocess_cmd = applySubstitutions(preprocess_cmd, substitutions).strip()
+            if verbose:
+                print(
+                    "Pre-processing input file: ",
+                    ir,
+                    " with command '",
+                    preprocess_cmd,
+                    "'",
+                    sep="",
+                    file=sys.stderr,
+                )
+            # Python 2.7 doesn't have subprocess.DEVNULL:
+            with open(os.devnull, "w") as devnull:
+                pp = subprocess.Popen(
+                    preprocess_cmd, shell=True, stdin=devnull, stdout=subprocess.PIPE
+                )
+                ir_file = pp.stdout
+
+        if isinstance(cmd_args, list):
+            args = [applySubstitutions(a, substitutions) for a in cmd_args]
+            stdout = subprocess.check_output([exe] + args, stdin=ir_file)
+        else:
+            stdout = subprocess.check_output(
+                exe + " " + applySubstitutions(cmd_args, substitutions),
+                shell=True,
+                stdin=ir_file,
+            )
+        if sys.version_info[0] > 2:
+            # FYI, if you crashed here with a decode error, your run line probably
+            # results in bitcode or other binary format being written to the pipe.
+            # For an opt test, you probably want to add -S or -disable-output.
+            stdout = stdout.decode()
+    # Fix line endings to unix CR style.
+    return stdout.replace("\r\n", "\n")
 
-##### LLVM IR parser
-RUN_LINE_RE = re.compile(r'^\s*(?://|[;#])\s*RUN:\s*(.*)$')
-CHECK_PREFIX_RE = re.compile(r'--?check-prefix(?:es)?[= ](\S+)')
-PREFIX_RE = re.compile('^[a-zA-Z0-9_-]+$')
-CHECK_RE = re.compile(r'^\s*(?://|[;#])\s*([^:]+?)(?:-NEXT|-NOT|-DAG|-LABEL|-SAME|-EMPTY)?:')
 
-UTC_ARGS_KEY = 'UTC_ARGS:'
-UTC_ARGS_CMD = re.compile(r'.*' + UTC_ARGS_KEY + '\s*(?P<cmd>.*)\s*$')
-UTC_ADVERT = 'NOTE: Assertions have been autogenerated by '
-UNUSED_NOTE = 'NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:'
+##### LLVM IR parser
+RUN_LINE_RE = re.compile(r"^\s*(?://|[;#])\s*RUN:\s*(.*)$")
+CHECK_PREFIX_RE = re.compile(r"--?check-prefix(?:es)?[= ](\S+)")
+PREFIX_RE = re.compile("^[a-zA-Z0-9_-]+$")
+CHECK_RE = re.compile(
+    r"^\s*(?://|[;#])\s*([^:]+?)(?:-NEXT|-NOT|-DAG|-LABEL|-SAME|-EMPTY)?:"
+)
+
+UTC_ARGS_KEY = "UTC_ARGS:"
+UTC_ARGS_CMD = re.compile(r".*" + UTC_ARGS_KEY + "\s*(?P<cmd>.*)\s*$")
+UTC_ADVERT = "NOTE: Assertions have been autogenerated by "
+UNUSED_NOTE = "NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:"
 
 OPT_FUNCTION_RE = re.compile(
-    r'^(\s*;\s*Function\sAttrs:\s(?P<attrs>[\w\s():,]+?))?\s*define\s+(?P<funcdef_attrs_and_ret>[^@]*)@(?P<func>[\w.$-]+?)\s*'
-    r'(?P<args_and_sig>\((\)|(.*?[\w.-]+?)\))[^{]*\{)\n(?P<body>.*?)^\}$',
-    flags=(re.M | re.S))
+    r"^(\s*;\s*Function\sAttrs:\s(?P<attrs>[\w\s():,]+?))?\s*define\s+(?P<funcdef_attrs_and_ret>[^@]*)@(?P<func>[\w.$-]+?)\s*"
+    r"(?P<args_and_sig>\((\)|(.*?[\w.-]+?)\))[^{]*\{)\n(?P<body>.*?)^\}$",
+    flags=(re.M | re.S),
+)
 
 ANALYZE_FUNCTION_RE = re.compile(
-    r'^\s*\'(?P<analysis>[\w\s-]+?)\'\s+for\s+function\s+\'(?P<func>[\w.$-]+?)\':'
-    r'\s*\n(?P<body>.*)$',
-    flags=(re.X | re.S))
+    r"^\s*\'(?P<analysis>[\w\s-]+?)\'\s+for\s+function\s+\'(?P<func>[\w.$-]+?)\':"
+    r"\s*\n(?P<body>.*)$",
+    flags=(re.X | re.S),
+)
 
 LV_DEBUG_RE = re.compile(
-    r'^\s*\'(?P<func>[\w.$-]+?)\'[^\n]*'
-    r'\s*\n(?P<body>.*)$',
-    flags=(re.X | re.S))
+    r"^\s*\'(?P<func>[\w.$-]+?)\'[^\n]*" r"\s*\n(?P<body>.*)$", flags=(re.X | re.S)
+)
 
 IR_FUNCTION_RE = re.compile(r'^\s*define\s+(?:internal\s+)?[^@]*@"?([\w.$-]+)"?\s*\(')
 TRIPLE_IR_RE = re.compile(r'^\s*target\s+triple\s*=\s*"([^"]+)"$')
-TRIPLE_ARG_RE = re.compile(r'-mtriple[= ]([^ ]+)')
-MARCH_ARG_RE = re.compile(r'-march[= ]([^ ]+)')
-DEBUG_ONLY_ARG_RE = re.compile(r'-debug-only[= ]([^ ]+)')
+TRIPLE_ARG_RE = re.compile(r"-mtriple[= ]([^ ]+)")
+MARCH_ARG_RE = re.compile(r"-march[= ]([^ ]+)")
+DEBUG_ONLY_ARG_RE = re.compile(r"-debug-only[= ]([^ ]+)")
 
-SCRUB_LEADING_WHITESPACE_RE = re.compile(r'^(\s+)')
-SCRUB_WHITESPACE_RE = re.compile(r'(?!^(|  \w))[ \t]+', flags=re.M)
-SCRUB_TRAILING_WHITESPACE_RE = re.compile(r'[ \t]+$', flags=re.M)
+SCRUB_LEADING_WHITESPACE_RE = re.compile(r"^(\s+)")
+SCRUB_WHITESPACE_RE = re.compile(r"(?!^(|  \w))[ \t]+", flags=re.M)
+SCRUB_TRAILING_WHITESPACE_RE = re.compile(r"[ \t]+$", flags=re.M)
 SCRUB_TRAILING_WHITESPACE_TEST_RE = SCRUB_TRAILING_WHITESPACE_RE
-SCRUB_TRAILING_WHITESPACE_AND_ATTRIBUTES_RE = re.compile(r'([ \t]|(#[0-9]+))+$', flags=re.M)
-SCRUB_KILL_COMMENT_RE = re.compile(r'^ *#+ +kill:.*\n')
+SCRUB_TRAILING_WHITESPACE_AND_ATTRIBUTES_RE = re.compile(
+    r"([ \t]|(#[0-9]+))+$", flags=re.M
+)
+SCRUB_KILL_COMMENT_RE = re.compile(r"^ *#+ +kill:.*\n")
 SCRUB_LOOP_COMMENT_RE = re.compile(
-    r'# =>This Inner Loop Header:.*|# in Loop:.*', flags=re.M)
-SCRUB_TAILING_COMMENT_TOKEN_RE = re.compile(r'(?<=\S)+[ \t]*#$', flags=re.M)
+    r"# =>This Inner Loop Header:.*|# in Loop:.*", flags=re.M
+)
+SCRUB_TAILING_COMMENT_TOKEN_RE = re.compile(r"(?<=\S)+[ \t]*#$", flags=re.M)
+
+SEPARATOR = "."
 
-SEPARATOR = '.'
 
 def error(msg, test_file=None):
-  if test_file:
-    msg = '{}: {}'.format(msg, test_file)
-  print('ERROR: {}'.format(msg), file=sys.stderr)
+    if test_file:
+        msg = "{}: {}".format(msg, test_file)
+    print("ERROR: {}".format(msg), file=sys.stderr)
+
 
 def warn(msg, test_file=None):
-  if test_file:
-    msg = '{}: {}'.format(msg, test_file)
-  print('WARNING: {}'.format(msg), file=sys.stderr)
+    if test_file:
+        msg = "{}: {}".format(msg, test_file)
+    print("WARNING: {}".format(msg), file=sys.stderr)
+
 
 def debug(*args, **kwargs):
-  # Python2 does not allow def debug(*args, file=sys.stderr, **kwargs):
-  if 'file' not in kwargs:
-    kwargs['file'] = sys.stderr
-  if _verbose:
-    print(*args, **kwargs)
+    # Python2 does not allow def debug(*args, file=sys.stderr, **kwargs):
+    if "file" not in kwargs:
+        kwargs["file"] = sys.stderr
+    if _verbose:
+        print(*args, **kwargs)
+
 
 def find_run_lines(test, lines):
-  debug('Scanning for RUN lines in test file:', test)
-  raw_lines = [m.group(1)
-               for m in [RUN_LINE_RE.match(l) for l in lines] if m]
-  run_lines = [raw_lines[0]] if len(raw_lines) > 0 else []
-  for l in raw_lines[1:]:
-    if run_lines[-1].endswith('\\'):
-      run_lines[-1] = run_lines[-1].rstrip('\\') + ' ' + l
-    else:
-      run_lines.append(l)
-  debug('Found {} RUN lines in {}:'.format(len(run_lines), test))
-  for l in run_lines:
-    debug('  RUN: {}'.format(l))
-  return run_lines
+    debug("Scanning for RUN lines in test file:", test)
+    raw_lines = [m.group(1) for m in [RUN_LINE_RE.match(l) for l in lines] if m]
+    run_lines = [raw_lines[0]] if len(raw_lines) > 0 else []
+    for l in raw_lines[1:]:
+        if run_lines[-1].endswith("\\"):
+            run_lines[-1] = run_lines[-1].rstrip("\\") + " " + l
+        else:
+            run_lines.append(l)
+    debug("Found {} RUN lines in {}:".format(len(run_lines), test))
+    for l in run_lines:
+        debug("  RUN: {}".format(l))
+    return run_lines
+
 
 def get_triple_from_march(march):
-  triples = {
-      'amdgcn': 'amdgcn',
-      'r600': 'r600',
-      'mips': 'mips',
-      'sparc': 'sparc',
-      'hexagon': 'hexagon',
-      've': 've',
-  }
-  for prefix, triple in triples.items():
-    if march.startswith(prefix):
-      return triple
-  print("Cannot find a triple. Assume 'x86'", file=sys.stderr)
-  return 'x86'
+    triples = {
+        "amdgcn": "amdgcn",
+        "r600": "r600",
+        "mips": "mips",
+        "sparc": "sparc",
+        "hexagon": "hexagon",
+        "ve": "ve",
+    }
+    for prefix, triple in triples.items():
+        if march.startswith(prefix):
+            return triple
+    print("Cannot find a triple. Assume 'x86'", file=sys.stderr)
+    return "x86"
+
 
 def apply_filters(line, filters):
-  has_filter = False
-  for f in filters:
-    if not f.is_filter_out:
-      has_filter = True
-    if f.search(line):
-      return False if f.is_filter_out else True
-  # If we only used filter-out, keep the line, otherwise discard it since no
-  # filter matched.
-  return False if has_filter else True
+    has_filter = False
+    for f in filters:
+        if not f.is_filter_out:
+            has_filter = True
+        if f.search(line):
+            return False if f.is_filter_out else True
+    # If we only used filter-out, keep the line, otherwise discard it since no
+    # filter matched.
+    return False if has_filter else True
+
 
 def do_filter(body, filters):
-  return body if not filters else '\n'.join(filter(
-    lambda line: apply_filters(line, filters), body.splitlines()))
+    return (
+        body
+        if not filters
+        else "\n".join(
+            filter(lambda line: apply_filters(line, filters), body.splitlines())
+        )
+    )
+
 
 def scrub_body(body):
-  # Scrub runs of whitespace out of the assembly, but leave the leading
-  # whitespace in place.
-  body = SCRUB_WHITESPACE_RE.sub(r' ', body)
-  # Expand the tabs used for indentation.
-  body = str.expandtabs(body, 2)
-  # Strip trailing whitespace.
-  body = SCRUB_TRAILING_WHITESPACE_TEST_RE.sub(r'', body)
-  return body
+    # Scrub runs of whitespace out of the assembly, but leave the leading
+    # whitespace in place.
+    body = SCRUB_WHITESPACE_RE.sub(r" ", body)
+    # Expand the tabs used for indentation.
+    body = str.expandtabs(body, 2)
+    # Strip trailing whitespace.
+    body = SCRUB_TRAILING_WHITESPACE_TEST_RE.sub(r"", body)
+    return body
+
 
 def do_scrub(body, scrubber, scrubber_args, extra):
-  if scrubber_args:
-    local_args = copy.deepcopy(scrubber_args)
-    local_args[0].extra_scrub = extra
-    return scrubber(body, *local_args)
-  return scrubber(body, *scrubber_args)
+    if scrubber_args:
+        local_args = copy.deepcopy(scrubber_args)
+        local_args[0].extra_scrub = extra
+        return scrubber(body, *local_args)
+    return scrubber(body, *scrubber_args)
+
 
 # Build up a dictionary of all the function bodies.
 class function_body(object):
-  def __init__(self, string, extra, funcdef_attrs_and_ret, args_and_sig, attrs, func_name_separator):
-    self.scrub = string
-    self.extrascrub = extra
-    self.funcdef_attrs_and_ret = funcdef_attrs_and_ret
-    self.args_and_sig = args_and_sig
-    self.attrs = attrs
-    self.func_name_separator = func_name_separator
-  def is_same_except_arg_names(self, extrascrub, funcdef_attrs_and_ret, args_and_sig, attrs, is_backend):
-    arg_names = set()
-    def drop_arg_names(match):
-      arg_names.add(match.group(variable_group_in_ir_value_match))
-      if match.group(attribute_group_in_ir_value_match):
-        attr = match.group(attribute_group_in_ir_value_match)
-      else:
-        attr = ''
-      return match.group(1) + attr + match.group(match.lastindex)
-    def repl_arg_names(match):
-      if match.group(variable_group_in_ir_value_match) is not None and match.group(variable_group_in_ir_value_match) in arg_names:
-        return match.group(1) + match.group(match.lastindex)
-      return match.group(1) + match.group(2) + match.group(match.lastindex)
-    if self.funcdef_attrs_and_ret != funcdef_attrs_and_ret:
-      return False
-    if self.attrs != attrs:
-      return False
-    ans0 = IR_VALUE_RE.sub(drop_arg_names, self.args_and_sig)
-    ans1 = IR_VALUE_RE.sub(drop_arg_names, args_and_sig)
-    if ans0 != ans1:
-      return False
-    if is_backend:
-      # Check without replacements, the replacements are not applied to the
-      # body for backend checks.
-      return self.extrascrub == extrascrub
-
-    es0 = IR_VALUE_RE.sub(repl_arg_names, self.extrascrub)
-    es1 = IR_VALUE_RE.sub(repl_arg_names, extrascrub)
-    es0 = SCRUB_IR_COMMENT_RE.sub(r'', es0)
-    es1 = SCRUB_IR_COMMENT_RE.sub(r'', es1)
-    return es0 == es1
-
-  def __str__(self):
-    return self.scrub
+    def __init__(
+        self,
+        string,
+        extra,
+        funcdef_attrs_and_ret,
+        args_and_sig,
+        attrs,
+        func_name_separator,
+    ):
+        self.scrub = string
+        self.extrascrub = extra
+        self.funcdef_attrs_and_ret = funcdef_attrs_and_ret
+        self.args_and_sig = args_and_sig
+        self.attrs = attrs
+        self.func_name_separator = func_name_separator
+
+    def is_same_except_arg_names(
+        self, extrascrub, funcdef_attrs_and_ret, args_and_sig, attrs, is_backend
+    ):
+        arg_names = set()
+
+        def drop_arg_names(match):
+            arg_names.add(match.group(variable_group_in_ir_value_match))
+            if match.group(attribute_group_in_ir_value_match):
+                attr = match.group(attribute_group_in_ir_value_match)
+            else:
+                attr = ""
+            return match.group(1) + attr + match.group(match.lastindex)
+
+        def repl_arg_names(match):
+            if (
+                match.group(variable_group_in_ir_value_match) is not None
+                and match.group(variable_group_in_ir_value_match) in arg_names
+            ):
+                return match.group(1) + match.group(match.lastindex)
+            return match.group(1) + match.group(2) + match.group(match.lastindex)
+
+        if self.funcdef_attrs_and_ret != funcdef_attrs_and_ret:
+            return False
+        if self.attrs != attrs:
+            return False
+        ans0 = IR_VALUE_RE.sub(drop_arg_names, self.args_and_sig)
+        ans1 = IR_VALUE_RE.sub(drop_arg_names, args_and_sig)
+        if ans0 != ans1:
+            return False
+        if is_backend:
+            # Check without replacements, the replacements are not applied to the
+            # body for backend checks.
+            return self.extrascrub == extrascrub
+
+        es0 = IR_VALUE_RE.sub(repl_arg_names, self.extrascrub)
+        es1 = IR_VALUE_RE.sub(repl_arg_names, extrascrub)
+        es0 = SCRUB_IR_COMMENT_RE.sub(r"", es0)
+        es1 = SCRUB_IR_COMMENT_RE.sub(r"", es1)
+        return es0 == es1
+
+    def __str__(self):
+        return self.scrub
+
 
 class FunctionTestBuilder:
-  def __init__(self, run_list, flags, scrubber_args, path):
-    self._verbose = flags.verbose
-    self._record_args = flags.function_signature
-    self._check_attributes = flags.check_attributes
-    # Strip double-quotes if input was read by UTC_ARGS
-    self._filters = list(map(lambda f: Filter(re.compile(f.pattern().strip('"'),
-                                                         f.flags()),
-                                              f.is_filter_out),
-                             flags.filters)) if flags.filters else []
-    self._scrubber_args = scrubber_args
-    self._path = path
-    # Strip double-quotes if input was read by UTC_ARGS
-    self._replace_value_regex = list(map(lambda x: x.strip('"'), flags.replace_value_regex))
-    self._func_dict = {}
-    self._func_order = {}
-    self._global_var_dict = {}
-    self._processed_prefixes = set()
-    for tuple in run_list:
-      for prefix in tuple[0]:
-        self._func_dict.update({prefix: dict()})
-        self._func_order.update({prefix: []})
-        self._global_var_dict.update({prefix: dict()})
-
-  def finish_and_get_func_dict(self):
-    for prefix in self.get_failed_prefixes():
-      warn('Prefix %s had conflicting output from 
diff erent RUN lines for all functions in test %s' % (prefix,self._path,))
-    return self._func_dict
-
-  def func_order(self):
-    return self._func_order
-
-  def global_var_dict(self):
-    return self._global_var_dict
-
-  def is_filtered(self):
-    return bool(self._filters)
-
-  def process_run_line(self, function_re, scrubber, raw_tool_output, prefixes, is_backend):
-    build_global_values_dictionary(self._global_var_dict, raw_tool_output, prefixes)
-    for m in function_re.finditer(raw_tool_output):
-      if not m:
-        continue
-      func = m.group('func')
-      body = m.group('body')
-      # func_name_separator is the string that is placed right after function name at the
-      # beginning of assembly function definition. In most assemblies, that is just a
-      # colon: `foo:`. But, for example, in nvptx it is a brace: `foo(`. If is_backend is
-      # False, just assume that separator is an empty string.
-      if is_backend:
-        # Use ':' as default separator.
-        func_name_separator = m.group('func_name_separator') if 'func_name_separator' in m.groupdict() else ':'
-      else:
-        func_name_separator = ''
-      attrs = m.group('attrs') if self._check_attributes else ''
-      funcdef_attrs_and_ret = m.group('funcdef_attrs_and_ret') if self._record_args else ''
-      # Determine if we print arguments, the opening brace, or nothing after the
-      # function name
-      if self._record_args and 'args_and_sig' in m.groupdict():
-        args_and_sig = scrub_body(m.group('args_and_sig').strip())
-      elif 'args_and_sig' in m.groupdict():
-        args_and_sig = '('
-      else:
-        args_and_sig = ''
-      filtered_body = do_filter(body, self._filters)
-      scrubbed_body = do_scrub(filtered_body, scrubber, self._scrubber_args,
-                               extra=False)
-      scrubbed_extra = do_scrub(filtered_body, scrubber, self._scrubber_args,
-                                extra=True)
-      if 'analysis' in m.groupdict():
-        analysis = m.group('analysis')
-        if analysis.lower() != 'cost model analysis':
-          warn('Unsupported analysis mode: %r!' % (analysis,))
-      if func.startswith('stress'):
-        # We only use the last line of the function body for stress tests.
-        scrubbed_body = '\n'.join(scrubbed_body.splitlines()[-1:])
-      if self._verbose:
-        print('Processing function: ' + func, file=sys.stderr)
-        for l in scrubbed_body.splitlines():
-          print('  ' + l, file=sys.stderr)
-      for prefix in prefixes:
-        # Replace function names matching the regex.
-        for regex in self._replace_value_regex:
-          # Pattern that matches capture groups in the regex in leftmost order.
-          group_regex = re.compile(r'\(.*?\)')
-          # Replace function name with regex.
-          match = re.match(regex, func)
-          if match:
-            func_repl = regex
-            # Replace any capture groups with their matched strings.
-            for g in match.groups():
-              func_repl = group_regex.sub(re.escape(g), func_repl, count=1)
-            func = re.sub(func_repl, '{{' + func_repl + '}}', func)
-
-          # Replace all calls to regex matching functions.
-          matches = re.finditer(regex, scrubbed_body)
-          for match in matches:
-            func_repl = regex
-            # Replace any capture groups with their matched strings.
-            for g in match.groups():
-              func_repl = group_regex.sub(re.escape(g), func_repl, count=1)
-            # Substitute function call names that match the regex with the same
-            # capture groups set.
-            scrubbed_body = re.sub(func_repl, '{{' + func_repl + '}}',
-                                   scrubbed_body)
-
-        if func in self._func_dict[prefix]:
-          if (self._func_dict[prefix][func] is not None and
-              (str(self._func_dict[prefix][func]) != scrubbed_body or
-               self._func_dict[prefix][func].args_and_sig != args_and_sig or
-               self._func_dict[prefix][func].attrs != attrs or
-               self._func_dict[prefix][func].funcdef_attrs_and_ret != funcdef_attrs_and_ret)):
-            if self._func_dict[prefix][func].is_same_except_arg_names(
-                scrubbed_extra,
-                funcdef_attrs_and_ret,
-                args_and_sig,
-                attrs,
-                is_backend):
-              self._func_dict[prefix][func].scrub = scrubbed_extra
-              self._func_dict[prefix][func].args_and_sig = args_and_sig
+    def __init__(self, run_list, flags, scrubber_args, path):
+        self._verbose = flags.verbose
+        self._record_args = flags.function_signature
+        self._check_attributes = flags.check_attributes
+        # Strip double-quotes if input was read by UTC_ARGS
+        self._filters = (
+            list(
+                map(
+                    lambda f: Filter(
+                        re.compile(f.pattern().strip('"'), f.flags()), f.is_filter_out
+                    ),
+                    flags.filters,
+                )
+            )
+            if flags.filters
+            else []
+        )
+        self._scrubber_args = scrubber_args
+        self._path = path
+        # Strip double-quotes if input was read by UTC_ARGS
+        self._replace_value_regex = list(
+            map(lambda x: x.strip('"'), flags.replace_value_regex)
+        )
+        self._func_dict = {}
+        self._func_order = {}
+        self._global_var_dict = {}
+        self._processed_prefixes = set()
+        for tuple in run_list:
+            for prefix in tuple[0]:
+                self._func_dict.update({prefix: dict()})
+                self._func_order.update({prefix: []})
+                self._global_var_dict.update({prefix: dict()})
+
+    def finish_and_get_func_dict(self):
+        for prefix in self.get_failed_prefixes():
+            warn(
+                "Prefix %s had conflicting output from 
diff erent RUN lines for all functions in test %s"
+                % (
+                    prefix,
+                    self._path,
+                )
+            )
+        return self._func_dict
+
+    def func_order(self):
+        return self._func_order
+
+    def global_var_dict(self):
+        return self._global_var_dict
+
+    def is_filtered(self):
+        return bool(self._filters)
+
+    def process_run_line(
+        self, function_re, scrubber, raw_tool_output, prefixes, is_backend
+    ):
+        build_global_values_dictionary(self._global_var_dict, raw_tool_output, prefixes)
+        for m in function_re.finditer(raw_tool_output):
+            if not m:
+                continue
+            func = m.group("func")
+            body = m.group("body")
+            # func_name_separator is the string that is placed right after function name at the
+            # beginning of assembly function definition. In most assemblies, that is just a
+            # colon: `foo:`. But, for example, in nvptx it is a brace: `foo(`. If is_backend is
+            # False, just assume that separator is an empty string.
+            if is_backend:
+                # Use ':' as default separator.
+                func_name_separator = (
+                    m.group("func_name_separator")
+                    if "func_name_separator" in m.groupdict()
+                    else ":"
+                )
             else:
-              # This means a previous RUN line produced a body for this function
-              # that is 
diff erent from the one produced by this current RUN line,
-              # so the body can't be common across RUN lines. We use None to
-              # indicate that.
-              self._func_dict[prefix][func] = None
-        else:
-          if prefix not in self._processed_prefixes:
-            self._func_dict[prefix][func] = function_body(
-                scrubbed_body, scrubbed_extra, funcdef_attrs_and_ret,
-                args_and_sig, attrs, func_name_separator)
-            self._func_order[prefix].append(func)
-          else:
-            # An earlier RUN line used this check prefixes but didn't produce
-            # a body for this function. This happens in Clang tests that use
-            # preprocesser directives to exclude individual functions from some
-            # RUN lines.
-            self._func_dict[prefix][func] = None
-
-  def processed_prefixes(self, prefixes):
-    """
-    Mark a set of prefixes as having had at least one applicable RUN line fully
-    processed. This is used to filter out function bodies that don't have
-    outputs for all RUN lines.
-    """
-    self._processed_prefixes.update(prefixes)
-
-  def get_failed_prefixes(self):
-    # This returns the list of those prefixes that failed to match any function,
-    # because there were conflicting bodies produced by 
diff erent RUN lines, in
-    # all instances of the prefix.
-    for prefix in self._func_dict:
-      if (self._func_dict[prefix] and
-          (not [fct for fct in self._func_dict[prefix]
-                if self._func_dict[prefix][fct] is not None])):
-        yield prefix
+                func_name_separator = ""
+            attrs = m.group("attrs") if self._check_attributes else ""
+            funcdef_attrs_and_ret = (
+                m.group("funcdef_attrs_and_ret") if self._record_args else ""
+            )
+            # Determine if we print arguments, the opening brace, or nothing after the
+            # function name
+            if self._record_args and "args_and_sig" in m.groupdict():
+                args_and_sig = scrub_body(m.group("args_and_sig").strip())
+            elif "args_and_sig" in m.groupdict():
+                args_and_sig = "("
+            else:
+                args_and_sig = ""
+            filtered_body = do_filter(body, self._filters)
+            scrubbed_body = do_scrub(
+                filtered_body, scrubber, self._scrubber_args, extra=False
+            )
+            scrubbed_extra = do_scrub(
+                filtered_body, scrubber, self._scrubber_args, extra=True
+            )
+            if "analysis" in m.groupdict():
+                analysis = m.group("analysis")
+                if analysis.lower() != "cost model analysis":
+                    warn("Unsupported analysis mode: %r!" % (analysis,))
+            if func.startswith("stress"):
+                # We only use the last line of the function body for stress tests.
+                scrubbed_body = "\n".join(scrubbed_body.splitlines()[-1:])
+            if self._verbose:
+                print("Processing function: " + func, file=sys.stderr)
+                for l in scrubbed_body.splitlines():
+                    print("  " + l, file=sys.stderr)
+            for prefix in prefixes:
+                # Replace function names matching the regex.
+                for regex in self._replace_value_regex:
+                    # Pattern that matches capture groups in the regex in leftmost order.
+                    group_regex = re.compile(r"\(.*?\)")
+                    # Replace function name with regex.
+                    match = re.match(regex, func)
+                    if match:
+                        func_repl = regex
+                        # Replace any capture groups with their matched strings.
+                        for g in match.groups():
+                            func_repl = group_regex.sub(
+                                re.escape(g), func_repl, count=1
+                            )
+                        func = re.sub(func_repl, "{{" + func_repl + "}}", func)
+
+                    # Replace all calls to regex matching functions.
+                    matches = re.finditer(regex, scrubbed_body)
+                    for match in matches:
+                        func_repl = regex
+                        # Replace any capture groups with their matched strings.
+                        for g in match.groups():
+                            func_repl = group_regex.sub(
+                                re.escape(g), func_repl, count=1
+                            )
+                        # Substitute function call names that match the regex with the same
+                        # capture groups set.
+                        scrubbed_body = re.sub(
+                            func_repl, "{{" + func_repl + "}}", scrubbed_body
+                        )
+
+                if func in self._func_dict[prefix]:
+                    if self._func_dict[prefix][func] is not None and (
+                        str(self._func_dict[prefix][func]) != scrubbed_body
+                        or self._func_dict[prefix][func].args_and_sig != args_and_sig
+                        or self._func_dict[prefix][func].attrs != attrs
+                        or self._func_dict[prefix][func].funcdef_attrs_and_ret
+                        != funcdef_attrs_and_ret
+                    ):
+                        if self._func_dict[prefix][func].is_same_except_arg_names(
+                            scrubbed_extra,
+                            funcdef_attrs_and_ret,
+                            args_and_sig,
+                            attrs,
+                            is_backend,
+                        ):
+                            self._func_dict[prefix][func].scrub = scrubbed_extra
+                            self._func_dict[prefix][func].args_and_sig = args_and_sig
+                        else:
+                            # This means a previous RUN line produced a body for this function
+                            # that is 
diff erent from the one produced by this current RUN line,
+                            # so the body can't be common across RUN lines. We use None to
+                            # indicate that.
+                            self._func_dict[prefix][func] = None
+                else:
+                    if prefix not in self._processed_prefixes:
+                        self._func_dict[prefix][func] = function_body(
+                            scrubbed_body,
+                            scrubbed_extra,
+                            funcdef_attrs_and_ret,
+                            args_and_sig,
+                            attrs,
+                            func_name_separator,
+                        )
+                        self._func_order[prefix].append(func)
+                    else:
+                        # An earlier RUN line used this check prefixes but didn't produce
+                        # a body for this function. This happens in Clang tests that use
+                        # preprocesser directives to exclude individual functions from some
+                        # RUN lines.
+                        self._func_dict[prefix][func] = None
+
+    def processed_prefixes(self, prefixes):
+        """
+        Mark a set of prefixes as having had at least one applicable RUN line fully
+        processed. This is used to filter out function bodies that don't have
+        outputs for all RUN lines.
+        """
+        self._processed_prefixes.update(prefixes)
+
+    def get_failed_prefixes(self):
+        # This returns the list of those prefixes that failed to match any function,
+        # because there were conflicting bodies produced by 
diff erent RUN lines, in
+        # all instances of the prefix.
+        for prefix in self._func_dict:
+            if self._func_dict[prefix] and (
+                not [
+                    fct
+                    for fct in self._func_dict[prefix]
+                    if self._func_dict[prefix][fct] is not None
+                ]
+            ):
+                yield prefix
 
 
 ##### Generator of LLVM IR CHECK lines
 
-SCRUB_IR_COMMENT_RE = re.compile(r'\s*;.*')
+SCRUB_IR_COMMENT_RE = re.compile(r"\s*;.*")
 
 # TODO: We should also derive check lines for global, debug, loop declarations, etc..
 
+
 class NamelessValue:
-  def __init__(self, check_prefix, check_key, ir_prefix, ir_regexp,
-               global_ir_rhs_regexp, *, is_before_functions=False, is_number=False,
-               replace_number_with_counter=False):
-    self.check_prefix = check_prefix
-    self.check_key = check_key
-    self.ir_prefix = ir_prefix
-    self.ir_regexp = ir_regexp
-    self.global_ir_rhs_regexp = global_ir_rhs_regexp
-    self.is_before_functions = is_before_functions
-    self.is_number = is_number
-    # Some variable numbers (e.g. MCINST1234) will change based on unrelated
-    # modifications to LLVM, replace those with an incrementing counter.
-    self.replace_number_with_counter = replace_number_with_counter
-    self.variable_mapping = {}
-
-  # Return true if this kind of IR value is "local", basically if it matches '%{{.*}}'.
-  def is_local_def_ir_value_match(self, match):
-    return self.ir_prefix == '%'
-
-  # Return true if this kind of IR value is "global", basically if it matches '#{{.*}}'.
-  def is_global_scope_ir_value_match(self, match):
-    return self.global_ir_rhs_regexp is not None
-
-  # Return the IR prefix and check prefix we use for this kind or IR value,
-  # e.g., (%, TMP) for locals.
-  def get_ir_prefix_from_ir_value_match(self, match):
-    return self.ir_prefix, self.check_prefix
-
-  # Return the IR regexp we use for this kind or IR value, e.g., [\w.-]+? for locals
-  def get_ir_regex_from_ir_value_re_match(self, match):
-    # for backwards compatibility we check locals with '.*'
-    if self.is_local_def_ir_value_match(match):
-      return '.*'
-    return self.ir_regexp
-
-  # Create a FileCheck variable name based on an IR name.
-  def get_value_name(self, var: str, check_prefix: str):
-    var = var.replace('!', '')
-    if self.replace_number_with_counter:
-      assert var.isdigit(), var
-      replacement = self.variable_mapping.get(var, None)
-      if replacement is None:
-        # Replace variable with an incrementing counter
-        replacement = str(len(self.variable_mapping) + 1)
-        self.variable_mapping[var] = replacement
-      var = replacement
-    # This is a nameless value, prepend check_prefix.
-    if var.isdigit():
-      var = check_prefix + var
-    else:
-      # This is a named value that clashes with the check_prefix, prepend with
-      # _prefix_filecheck_ir_name, if it has been defined.
-      if may_clash_with_default_check_prefix_name(check_prefix, var) and _prefix_filecheck_ir_name:
-        var = _prefix_filecheck_ir_name + var
-    var = var.replace('.', '_')
-    var = var.replace('-', '_')
-    return var.upper()
-
-  # Create a FileCheck variable from regex.
-  def get_value_definition(self, var, match):
-    # for backwards compatibility we check locals with '.*'
-    varname = self.get_value_name(var, self.check_prefix)
-    prefix = self.get_ir_prefix_from_ir_value_match(match)[0]
-    if self.is_number:
-      regex = ''  # always capture a number in the default format
-      capture_start = '[[#'
-    else:
-      regex = self.get_ir_regex_from_ir_value_re_match(match)
-      capture_start = '[['
-    if self.is_local_def_ir_value_match(match):
-      return capture_start + varname + ':' + prefix + regex + ']]'
-    return prefix + capture_start + varname + ':' + regex + ']]'
-
-  # Use a FileCheck variable.
-  def get_value_use(self, var, match, var_prefix=None):
-    if var_prefix is None:
-      var_prefix = self.check_prefix
-    capture_start = '[[#' if self.is_number else '[['
-    if self.is_local_def_ir_value_match(match):
-      return capture_start + self.get_value_name(var, var_prefix) + ']]'
-    prefix = self.get_ir_prefix_from_ir_value_match(match)[0]
-    return prefix + capture_start + self.get_value_name(var, var_prefix) + ']]'
+    def __init__(
+        self,
+        check_prefix,
+        check_key,
+        ir_prefix,
+        ir_regexp,
+        global_ir_rhs_regexp,
+        *,
+        is_before_functions=False,
+        is_number=False,
+        replace_number_with_counter=False
+    ):
+        self.check_prefix = check_prefix
+        self.check_key = check_key
+        self.ir_prefix = ir_prefix
+        self.ir_regexp = ir_regexp
+        self.global_ir_rhs_regexp = global_ir_rhs_regexp
+        self.is_before_functions = is_before_functions
+        self.is_number = is_number
+        # Some variable numbers (e.g. MCINST1234) will change based on unrelated
+        # modifications to LLVM, replace those with an incrementing counter.
+        self.replace_number_with_counter = replace_number_with_counter
+        self.variable_mapping = {}
+
+    # Return true if this kind of IR value is "local", basically if it matches '%{{.*}}'.
+    def is_local_def_ir_value_match(self, match):
+        return self.ir_prefix == "%"
+
+    # Return true if this kind of IR value is "global", basically if it matches '#{{.*}}'.
+    def is_global_scope_ir_value_match(self, match):
+        return self.global_ir_rhs_regexp is not None
+
+    # Return the IR prefix and check prefix we use for this kind or IR value,
+    # e.g., (%, TMP) for locals.
+    def get_ir_prefix_from_ir_value_match(self, match):
+        return self.ir_prefix, self.check_prefix
+
+    # Return the IR regexp we use for this kind or IR value, e.g., [\w.-]+? for locals
+    def get_ir_regex_from_ir_value_re_match(self, match):
+        # for backwards compatibility we check locals with '.*'
+        if self.is_local_def_ir_value_match(match):
+            return ".*"
+        return self.ir_regexp
+
+    # Create a FileCheck variable name based on an IR name.
+    def get_value_name(self, var: str, check_prefix: str):
+        var = var.replace("!", "")
+        if self.replace_number_with_counter:
+            assert var.isdigit(), var
+            replacement = self.variable_mapping.get(var, None)
+            if replacement is None:
+                # Replace variable with an incrementing counter
+                replacement = str(len(self.variable_mapping) + 1)
+                self.variable_mapping[var] = replacement
+            var = replacement
+        # This is a nameless value, prepend check_prefix.
+        if var.isdigit():
+            var = check_prefix + var
+        else:
+            # This is a named value that clashes with the check_prefix, prepend with
+            # _prefix_filecheck_ir_name, if it has been defined.
+            if (
+                may_clash_with_default_check_prefix_name(check_prefix, var)
+                and _prefix_filecheck_ir_name
+            ):
+                var = _prefix_filecheck_ir_name + var
+        var = var.replace(".", "_")
+        var = var.replace("-", "_")
+        return var.upper()
+
+    # Create a FileCheck variable from regex.
+    def get_value_definition(self, var, match):
+        # for backwards compatibility we check locals with '.*'
+        varname = self.get_value_name(var, self.check_prefix)
+        prefix = self.get_ir_prefix_from_ir_value_match(match)[0]
+        if self.is_number:
+            regex = ""  # always capture a number in the default format
+            capture_start = "[[#"
+        else:
+            regex = self.get_ir_regex_from_ir_value_re_match(match)
+            capture_start = "[["
+        if self.is_local_def_ir_value_match(match):
+            return capture_start + varname + ":" + prefix + regex + "]]"
+        return prefix + capture_start + varname + ":" + regex + "]]"
+
+    # Use a FileCheck variable.
+    def get_value_use(self, var, match, var_prefix=None):
+        if var_prefix is None:
+            var_prefix = self.check_prefix
+        capture_start = "[[#" if self.is_number else "[["
+        if self.is_local_def_ir_value_match(match):
+            return capture_start + self.get_value_name(var, var_prefix) + "]]"
+        prefix = self.get_ir_prefix_from_ir_value_match(match)[0]
+        return prefix + capture_start + self.get_value_name(var, var_prefix) + "]]"
+
 
 # Description of the 
diff erent "unnamed" values we match in the IR, e.g.,
 # (local) ssa values, (debug) metadata, etc.
 ir_nameless_values = [
     #            check_prefix   check_key  ir_prefix           ir_regexp                global_ir_rhs_regexp
-    NamelessValue(r'TMP'        , '%' , r'%'                   , r'[\w$.-]+?'           , None                 ) ,
-    NamelessValue(r'ATTR'       , '#' , r'#'                   , r'[0-9]+'              , None                 ) ,
-    NamelessValue(r'ATTR'       , '#' , r'attributes #'        , r'[0-9]+'              , r'{[^}]*}'           ) ,
-    NamelessValue(r'GLOB'       , '@' , r'@'                   , r'[0-9]+'              , None                 ) ,
-    NamelessValue(r'GLOB'       , '@' , r'@'                   , r'[a-zA-Z0-9_$"\\.-]+' , r'.+'                , is_before_functions=True)  ,
-    NamelessValue(r'DBG'        , '!' , r'!dbg '               , r'![0-9]+'             , None                 ) ,
-    NamelessValue(r'DIASSIGNID' , '!' , r'!DIAssignID '        , r'![0-9]+'             , None                 ) ,
-    NamelessValue(r'PROF'       , '!' , r'!prof '              , r'![0-9]+'             , None                 ) ,
-    NamelessValue(r'TBAA'       , '!' , r'!tbaa '              , r'![0-9]+'             , None                 ) ,
-    NamelessValue(r'TBAA_STRUCT', '!' , r'!tbaa.struct '       , r'![0-9]+'             , None                 ) ,
-    NamelessValue(r'RNG'        , '!' , r'!range '             , r'![0-9]+'             , None                 ) ,
-    NamelessValue(r'LOOP'       , '!' , r'!llvm.loop '         , r'![0-9]+'             , None                 ) ,
-    NamelessValue(r'META'       , '!' , r'metadata '           , r'![0-9]+'             , None                 ) ,
-    NamelessValue(r'META'       , '!' , r''                    , r'![0-9]+'             , r'(?:distinct |)!.*' ) ,
-    NamelessValue(r'ACC_GRP'    , '!' , r'!llvm.access.group ' , r'![0-9]+'             , None                 ) ,
+    NamelessValue(r"TMP", "%", r"%", r"[\w$.-]+?", None),
+    NamelessValue(r"ATTR", "#", r"#", r"[0-9]+", None),
+    NamelessValue(r"ATTR", "#", r"attributes #", r"[0-9]+", r"{[^}]*}"),
+    NamelessValue(r"GLOB", "@", r"@", r"[0-9]+", None),
+    NamelessValue(
+        r"GLOB", "@", r"@", r'[a-zA-Z0-9_$"\\.-]+', r".+", is_before_functions=True
+    ),
+    NamelessValue(r"DBG", "!", r"!dbg ", r"![0-9]+", None),
+    NamelessValue(r"DIASSIGNID", "!", r"!DIAssignID ", r"![0-9]+", None),
+    NamelessValue(r"PROF", "!", r"!prof ", r"![0-9]+", None),
+    NamelessValue(r"TBAA", "!", r"!tbaa ", r"![0-9]+", None),
+    NamelessValue(r"TBAA_STRUCT", "!", r"!tbaa.struct ", r"![0-9]+", None),
+    NamelessValue(r"RNG", "!", r"!range ", r"![0-9]+", None),
+    NamelessValue(r"LOOP", "!", r"!llvm.loop ", r"![0-9]+", None),
+    NamelessValue(r"META", "!", r"metadata ", r"![0-9]+", None),
+    NamelessValue(r"META", "!", r"", r"![0-9]+", r"(?:distinct |)!.*"),
+    NamelessValue(r"ACC_GRP", "!", r"!llvm.access.group ", r"![0-9]+", None),
 ]
 
 asm_nameless_values = [
-    NamelessValue(r'MCINST'     , 'Inst#' , '<MCInst #'        , r'\d+'                 , r'.+', is_number=True, replace_number_with_counter=True),
-    NamelessValue(r'MCREG'      , 'Reg:'  , '<MCOperand Reg:'  , r'\d+'                 , r'.+', is_number=True, replace_number_with_counter=True),
+    NamelessValue(
+        r"MCINST",
+        "Inst#",
+        "<MCInst #",
+        r"\d+",
+        r".+",
+        is_number=True,
+        replace_number_with_counter=True,
+    ),
+    NamelessValue(
+        r"MCREG",
+        "Reg:",
+        "<MCOperand Reg:",
+        r"\d+",
+        r".+",
+        is_number=True,
+        replace_number_with_counter=True,
+    ),
 ]
 
+
 def createOrRegexp(old, new):
-  if not old:
-    return new
-  if not new:
-    return old
-  return old + '|' + new
+    if not old:
+        return new
+    if not new:
+        return old
+    return old + "|" + new
+
 
 def createPrefixMatch(prefix_str, prefix_re):
-  return '(?:' + prefix_str + '(' + prefix_re + '))'
+    return "(?:" + prefix_str + "(" + prefix_re + "))"
+
 
 # Build the regexp that matches an "IR value". This can be a local variable,
 # argument, global, or metadata, anything that is "named". It is important that
 # the PREFIX and SUFFIX below only contain a single group, if that changes
 # other locations will need adjustment as well.
-IR_VALUE_REGEXP_PREFIX = r'(\s*)'
-IR_VALUE_REGEXP_STRING = r''
+IR_VALUE_REGEXP_PREFIX = r"(\s*)"
+IR_VALUE_REGEXP_STRING = r""
 for nameless_value in ir_nameless_values:
-  match = createPrefixMatch(nameless_value.ir_prefix, nameless_value.ir_regexp)
-  if nameless_value.global_ir_rhs_regexp is not None:
-    match = '^' + match
-  IR_VALUE_REGEXP_STRING = createOrRegexp(IR_VALUE_REGEXP_STRING, match)
-IR_VALUE_REGEXP_SUFFIX = r'([,\s\(\)]|\Z)'
-IR_VALUE_RE = re.compile(IR_VALUE_REGEXP_PREFIX + r'(' + IR_VALUE_REGEXP_STRING + r')' + IR_VALUE_REGEXP_SUFFIX)
+    match = createPrefixMatch(nameless_value.ir_prefix, nameless_value.ir_regexp)
+    if nameless_value.global_ir_rhs_regexp is not None:
+        match = "^" + match
+    IR_VALUE_REGEXP_STRING = createOrRegexp(IR_VALUE_REGEXP_STRING, match)
+IR_VALUE_REGEXP_SUFFIX = r"([,\s\(\)]|\Z)"
+IR_VALUE_RE = re.compile(
+    IR_VALUE_REGEXP_PREFIX
+    + r"("
+    + IR_VALUE_REGEXP_STRING
+    + r")"
+    + IR_VALUE_REGEXP_SUFFIX
+)
 
 # Build the regexp that matches an "ASM value" (currently only for --asm-show-inst comments).
-ASM_VALUE_REGEXP_STRING = ''
+ASM_VALUE_REGEXP_STRING = ""
 for nameless_value in asm_nameless_values:
-  match = createPrefixMatch(nameless_value.ir_prefix, nameless_value.ir_regexp)
-  ASM_VALUE_REGEXP_STRING = createOrRegexp(ASM_VALUE_REGEXP_STRING, match)
-ASM_VALUE_REGEXP_SUFFIX = r'([>\s]|\Z)'
-ASM_VALUE_RE = re.compile(r'((?:#|//)\s*)' + '(' + ASM_VALUE_REGEXP_STRING + ')' + ASM_VALUE_REGEXP_SUFFIX)
+    match = createPrefixMatch(nameless_value.ir_prefix, nameless_value.ir_regexp)
+    ASM_VALUE_REGEXP_STRING = createOrRegexp(ASM_VALUE_REGEXP_STRING, match)
+ASM_VALUE_REGEXP_SUFFIX = r"([>\s]|\Z)"
+ASM_VALUE_RE = re.compile(
+    r"((?:#|//)\s*)" + "(" + ASM_VALUE_REGEXP_STRING + ")" + ASM_VALUE_REGEXP_SUFFIX
+)
 
 # The entire match is group 0, the prefix has one group (=1), the entire
 # IR_VALUE_REGEXP_STRING is one group (=2), and then the nameless values start.
@@ -825,488 +1061,665 @@ def createPrefixMatch(prefix_str, prefix_re):
 # Check a match for IR_VALUE_RE and inspect it to determine if it was a local
 # value, %..., global @..., debug number !dbg !..., etc. See the PREFIXES above.
 def get_idx_from_ir_value_match(match):
-  for i in range(first_nameless_group_in_ir_value_match, match.lastindex):
-    if match.group(i) is not None:
-      return i - first_nameless_group_in_ir_value_match
-  error("Unable to identify the kind of IR value from the match!")
-  return 0
+    for i in range(first_nameless_group_in_ir_value_match, match.lastindex):
+        if match.group(i) is not None:
+            return i - first_nameless_group_in_ir_value_match
+    error("Unable to identify the kind of IR value from the match!")
+    return 0
+
 
 # See get_idx_from_ir_value_match
 def get_name_from_ir_value_match(match):
-  return match.group(get_idx_from_ir_value_match(match) + first_nameless_group_in_ir_value_match)
+    return match.group(
+        get_idx_from_ir_value_match(match) + first_nameless_group_in_ir_value_match
+    )
+
 
 def get_nameless_value_from_match(match, nameless_values) -> NamelessValue:
-  return nameless_values[get_idx_from_ir_value_match(match)]
+    return nameless_values[get_idx_from_ir_value_match(match)]
+
 
 # Return true if var clashes with the scripted FileCheck check_prefix.
 def may_clash_with_default_check_prefix_name(check_prefix, var):
-  return check_prefix and re.match(r'^' + check_prefix + r'[0-9]+?$', var, re.IGNORECASE)
-
-def generalize_check_lines_common(lines, is_analyze, vars_seen,
-                                  global_vars_seen, nameless_values,
-                                  nameless_value_regex, is_asm):
-  # This gets called for each match that occurs in
-  # a line. We transform variables we haven't seen
-  # into defs, and variables we have seen into uses.
-  def transform_line_vars(match):
-    var = get_name_from_ir_value_match(match)
-    nameless_value = get_nameless_value_from_match(match, nameless_values)
-    if may_clash_with_default_check_prefix_name(nameless_value.check_prefix, var):
-      warn("Change IR value name '%s' or use --prefix-filecheck-ir-name to prevent possible conflict"
-           " with scripted FileCheck name." % (var,))
-    key = (var, nameless_value.check_key)
-    is_local_def = nameless_value.is_local_def_ir_value_match(match)
-    if is_local_def and key in vars_seen:
-      rv = nameless_value.get_value_use(var, match)
-    elif not is_local_def and key in global_vars_seen:
-      # We could have seen a 
diff erent prefix for the global variables first,
-      # ensure we use that one instead of the prefix for the current match.
-      rv = nameless_value.get_value_use(var, match, global_vars_seen[key])
-    else:
-      if is_local_def:
-        vars_seen.add(key)
-      else:
-        global_vars_seen[key] = nameless_value.check_prefix
-      rv = nameless_value.get_value_definition(var, match)
-    # re.sub replaces the entire regex match
-    # with whatever you return, so we have
-    # to make sure to hand it back everything
-    # including the commas and spaces.
-    return match.group(1) + rv + match.group(match.lastindex)
-
-  lines_with_def = []
-
-  for i, line in enumerate(lines):
-    if not is_asm:
-      # An IR variable named '%.' matches the FileCheck regex string.
-      line = line.replace('%.', '%dot')
-      for regex in _global_hex_value_regex:
-        if re.match('^@' + regex + ' = ', line):
-          line = re.sub(r'\bi([0-9]+) ([0-9]+)',
-              lambda m : 'i' + m.group(1) + ' [[#' + hex(int(m.group(2))) + ']]',
-              line)
-          break
-      # Ignore any comments, since the check lines will too.
-      scrubbed_line = SCRUB_IR_COMMENT_RE.sub(r'', line)
-      lines[i] = scrubbed_line
-    if is_asm or not is_analyze:
-      # It can happen that two matches are back-to-back and for some reason sub
-      # will not replace both of them. For now we work around this by
-      # substituting until there is no more match.
-      changed = True
-      while changed:
-        (lines[i], changed) = nameless_value_regex.subn(transform_line_vars,
-                                                        lines[i], count=1)
-  return lines
+    return check_prefix and re.match(
+        r"^" + check_prefix + r"[0-9]+?$", var, re.IGNORECASE
+    )
+
+
+def generalize_check_lines_common(
+    lines,
+    is_analyze,
+    vars_seen,
+    global_vars_seen,
+    nameless_values,
+    nameless_value_regex,
+    is_asm,
+):
+    # This gets called for each match that occurs in
+    # a line. We transform variables we haven't seen
+    # into defs, and variables we have seen into uses.
+    def transform_line_vars(match):
+        var = get_name_from_ir_value_match(match)
+        nameless_value = get_nameless_value_from_match(match, nameless_values)
+        if may_clash_with_default_check_prefix_name(nameless_value.check_prefix, var):
+            warn(
+                "Change IR value name '%s' or use --prefix-filecheck-ir-name to prevent possible conflict"
+                " with scripted FileCheck name." % (var,)
+            )
+        key = (var, nameless_value.check_key)
+        is_local_def = nameless_value.is_local_def_ir_value_match(match)
+        if is_local_def and key in vars_seen:
+            rv = nameless_value.get_value_use(var, match)
+        elif not is_local_def and key in global_vars_seen:
+            # We could have seen a 
diff erent prefix for the global variables first,
+            # ensure we use that one instead of the prefix for the current match.
+            rv = nameless_value.get_value_use(var, match, global_vars_seen[key])
+        else:
+            if is_local_def:
+                vars_seen.add(key)
+            else:
+                global_vars_seen[key] = nameless_value.check_prefix
+            rv = nameless_value.get_value_definition(var, match)
+        # re.sub replaces the entire regex match
+        # with whatever you return, so we have
+        # to make sure to hand it back everything
+        # including the commas and spaces.
+        return match.group(1) + rv + match.group(match.lastindex)
+
+    lines_with_def = []
+
+    for i, line in enumerate(lines):
+        if not is_asm:
+            # An IR variable named '%.' matches the FileCheck regex string.
+            line = line.replace("%.", "%dot")
+            for regex in _global_hex_value_regex:
+                if re.match("^@" + regex + " = ", line):
+                    line = re.sub(
+                        r"\bi([0-9]+) ([0-9]+)",
+                        lambda m: "i"
+                        + m.group(1)
+                        + " [[#"
+                        + hex(int(m.group(2)))
+                        + "]]",
+                        line,
+                    )
+                    break
+            # Ignore any comments, since the check lines will too.
+            scrubbed_line = SCRUB_IR_COMMENT_RE.sub(r"", line)
+            lines[i] = scrubbed_line
+        if is_asm or not is_analyze:
+            # It can happen that two matches are back-to-back and for some reason sub
+            # will not replace both of them. For now we work around this by
+            # substituting until there is no more match.
+            changed = True
+            while changed:
+                (lines[i], changed) = nameless_value_regex.subn(
+                    transform_line_vars, lines[i], count=1
+                )
+    return lines
+
 
 # Replace IR value defs and uses with FileCheck variables.
 def generalize_check_lines(lines, is_analyze, vars_seen, global_vars_seen):
-  return generalize_check_lines_common(lines, is_analyze, vars_seen,
-                                       global_vars_seen, ir_nameless_values,
-                                       IR_VALUE_RE, False)
+    return generalize_check_lines_common(
+        lines,
+        is_analyze,
+        vars_seen,
+        global_vars_seen,
+        ir_nameless_values,
+        IR_VALUE_RE,
+        False,
+    )
 
-def generalize_asm_check_lines(lines, vars_seen, global_vars_seen):
-  return generalize_check_lines_common(lines, False, vars_seen,
-                                       global_vars_seen, asm_nameless_values,
-                                       ASM_VALUE_RE, True)
-
-def add_checks(output_lines, comment_marker, prefix_list, func_dict, func_name, check_label_format, is_backend, is_analyze, version, global_vars_seen_dict, is_filtered):
-  # prefix_exclusions are prefixes we cannot use to print the function because it doesn't exist in run lines that use these prefixes as well.
-  prefix_exclusions = set()
-  printed_prefixes = []
-  for p in prefix_list:
-    checkprefixes = p[0]
-    # If not all checkprefixes of this run line produced the function we cannot check for it as it does not
-    # exist for this run line. A subset of the check prefixes might know about the function but only because
-    # other run lines created it.
-    if any(map(lambda checkprefix: func_name not in func_dict[checkprefix], checkprefixes)):
-      prefix_exclusions |= set(checkprefixes)
-      continue
-
-  # prefix_exclusions is constructed, we can now emit the output
-  for p in prefix_list:
-    global_vars_seen = {}
-    checkprefixes = p[0]
-    for checkprefix in checkprefixes:
-      if checkprefix in global_vars_seen_dict:
-        global_vars_seen.update(global_vars_seen_dict[checkprefix])
-      else:
-        global_vars_seen_dict[checkprefix] = {}
-      if checkprefix in printed_prefixes:
-        break
-
-      # Check if the prefix is excluded.
-      if checkprefix in prefix_exclusions:
-        continue
-
-      # If we do not have output for this prefix we skip it.
-      if not func_dict[checkprefix][func_name]:
-        continue
-
-      # Add some space between 
diff erent check prefixes, but not after the last
-      # check line (before the test code).
-      if is_backend:
-        if len(printed_prefixes) != 0:
-          output_lines.append(comment_marker)
-
-      if checkprefix not in global_vars_seen_dict:
-        global_vars_seen_dict[checkprefix] = {}
-
-      global_vars_seen_before = [key for key in global_vars_seen.keys()]
-
-      vars_seen = set()
-      printed_prefixes.append(checkprefix)
-      attrs = str(func_dict[checkprefix][func_name].attrs)
-      attrs = '' if attrs == 'None' else attrs
-      if version > 1:
-        funcdef_attrs_and_ret = func_dict[checkprefix][func_name].funcdef_attrs_and_ret
-      else:
-        funcdef_attrs_and_ret = ''
-
-      if attrs:
-        output_lines.append('%s %s: Function Attrs: %s' % (comment_marker, checkprefix, attrs))
-      args_and_sig = str(func_dict[checkprefix][func_name].args_and_sig)
-      if args_and_sig:
-        args_and_sig = generalize_check_lines([args_and_sig], is_analyze, vars_seen, global_vars_seen)[0]
-      func_name_separator = func_dict[checkprefix][func_name].func_name_separator
-      if '[[' in args_and_sig:
-        output_lines.append(check_label_format % (checkprefix, funcdef_attrs_and_ret, func_name, '', func_name_separator))
-        output_lines.append('%s %s-SAME: %s' % (comment_marker, checkprefix, args_and_sig))
-      else:
-        output_lines.append(check_label_format % (checkprefix, funcdef_attrs_and_ret, func_name, args_and_sig, func_name_separator))
-      func_body = str(func_dict[checkprefix][func_name]).splitlines()
-      if not func_body:
-        # We have filtered everything.
-        continue
-
-      # For ASM output, just emit the check lines.
-      if is_backend:
-        body_start = 1
-        if is_filtered:
-          # For filtered output we don't add "-NEXT" so don't add extra spaces
-          # before the first line.
-          body_start = 0
-        else:
-          output_lines.append('%s %s:       %s' % (comment_marker, checkprefix, func_body[0]))
-        func_lines = generalize_asm_check_lines(func_body[body_start:],
-                                                vars_seen, global_vars_seen)
-        for func_line in func_lines:
-          if func_line.strip() == '':
-            output_lines.append('%s %s-EMPTY:' % (comment_marker, checkprefix))
-          else:
-            check_suffix = '-NEXT' if not is_filtered else ''
-            output_lines.append('%s %s%s:  %s' % (comment_marker, checkprefix,
-                                                  check_suffix, func_line))
-        # Remember new global variables we have not seen before
-        for key in global_vars_seen:
-          if key not in global_vars_seen_before:
-            global_vars_seen_dict[checkprefix][key] = global_vars_seen[key]
-        break
-
-      # For IR output, change all defs to FileCheck variables, so we're immune
-      # to variable naming fashions.
-      func_body = generalize_check_lines(func_body, is_analyze, vars_seen, global_vars_seen)
-
-      # This could be selectively enabled with an optional invocation argument.
-      # Disabled for now: better to check everything. Be safe rather than sorry.
-
-      # Handle the first line of the function body as a special case because
-      # it's often just noise (a useless asm comment or entry label).
-      #if func_body[0].startswith("#") or func_body[0].startswith("entry:"):
-      #  is_blank_line = True
-      #else:
-      #  output_lines.append('%s %s:       %s' % (comment_marker, checkprefix, func_body[0]))
-      #  is_blank_line = False
-
-      is_blank_line = False
-
-      for func_line in func_body:
-        if func_line.strip() == '':
-          is_blank_line = True
-          continue
-        # Do not waste time checking IR comments.
-        func_line = SCRUB_IR_COMMENT_RE.sub(r'', func_line)
-
-        # Skip blank lines instead of checking them.
-        if is_blank_line:
-          output_lines.append('{} {}:       {}'.format(
-              comment_marker, checkprefix, func_line))
-        else:
-          check_suffix = '-NEXT' if not is_filtered else ''
-          output_lines.append('{} {}{}:  {}'.format(
-              comment_marker, checkprefix, check_suffix, func_line))
-        is_blank_line = False
-
-      # Add space between 
diff erent check prefixes and also before the first
-      # line of code in the test function.
-      output_lines.append(comment_marker)
-
-      # Remember new global variables we have not seen before
-      for key in global_vars_seen:
-        if key not in global_vars_seen_before:
-          global_vars_seen_dict[checkprefix][key] = global_vars_seen[key]
-      break
-  return printed_prefixes
-
-def add_ir_checks(output_lines, comment_marker, prefix_list, func_dict,
-                  func_name, preserve_names, function_sig, version,
-                  global_vars_seen_dict, is_filtered):
-  # Label format is based on IR string.
-  if function_sig and version > 1:
-    function_def_regex = 'define %s'
-  elif function_sig:
-    function_def_regex = 'define {{[^@]+}}%s'
-  else:
-    function_def_regex = '%s'
-  check_label_format = '{} %s-LABEL: {}@%s%s%s'.format(comment_marker, function_def_regex)
-  return add_checks(output_lines, comment_marker, prefix_list, func_dict, func_name,
-                    check_label_format, False, preserve_names, version,
-                    global_vars_seen_dict,
-                    is_filtered)
-
-def add_analyze_checks(output_lines, comment_marker, prefix_list, func_dict, func_name, is_filtered):
-  check_label_format = '{} %s-LABEL: \'%s%s%s%s\''.format(comment_marker)
-  global_vars_seen_dict = {}
-  return add_checks(output_lines, comment_marker, prefix_list, func_dict, func_name,
-                    check_label_format, False, True, 1, global_vars_seen_dict,
-                    is_filtered)
 
-def build_global_values_dictionary(glob_val_dict, raw_tool_output, prefixes):
-  for nameless_value in itertools.chain(ir_nameless_values, asm_nameless_values):
-    if nameless_value.global_ir_rhs_regexp is None:
-      continue
-
-    lhs_re_str = nameless_value.ir_prefix + nameless_value.ir_regexp
-    rhs_re_str = nameless_value.global_ir_rhs_regexp
-
-    global_ir_value_re_str = r'^' + lhs_re_str + r'\s=\s' + rhs_re_str + r'$'
-    global_ir_value_re = re.compile(global_ir_value_re_str, flags=(re.M))
-    lines = []
-    for m in global_ir_value_re.finditer(raw_tool_output):
-      lines.append(m.group(0))
-
-    for prefix in prefixes:
-      if glob_val_dict[prefix] is None:
-        continue
-      if nameless_value.check_prefix in glob_val_dict[prefix]:
-        if lines == glob_val_dict[prefix][nameless_value.check_prefix]:
-          continue
-        if prefix == prefixes[-1]:
-          warn('Found conflicting asm under the same prefix: %r!' % (prefix,))
-        else:
-          glob_val_dict[prefix][nameless_value.check_prefix] = None
-          continue
-      glob_val_dict[prefix][nameless_value.check_prefix] = lines
-
-def add_global_checks(glob_val_dict, comment_marker, prefix_list, output_lines, global_vars_seen_dict, is_analyze, is_before_functions):
-  printed_prefixes = set()
-  for nameless_value in ir_nameless_values:
-    if nameless_value.global_ir_rhs_regexp is None:
-      continue
-    if nameless_value.is_before_functions != is_before_functions:
-      continue
+def generalize_asm_check_lines(lines, vars_seen, global_vars_seen):
+    return generalize_check_lines_common(
+        lines,
+        False,
+        vars_seen,
+        global_vars_seen,
+        asm_nameless_values,
+        ASM_VALUE_RE,
+        True,
+    )
+
+
+def add_checks(
+    output_lines,
+    comment_marker,
+    prefix_list,
+    func_dict,
+    func_name,
+    check_label_format,
+    is_backend,
+    is_analyze,
+    version,
+    global_vars_seen_dict,
+    is_filtered,
+):
+    # prefix_exclusions are prefixes we cannot use to print the function because it doesn't exist in run lines that use these prefixes as well.
+    prefix_exclusions = set()
+    printed_prefixes = []
     for p in prefix_list:
-      global_vars_seen = {}
-      checkprefixes = p[0]
-      if checkprefixes is None:
-        continue
-      for checkprefix in checkprefixes:
-        if checkprefix in global_vars_seen_dict:
-          global_vars_seen.update(global_vars_seen_dict[checkprefix])
-        else:
-          global_vars_seen_dict[checkprefix] = {}
-        if (checkprefix, nameless_value.check_prefix) in printed_prefixes:
-          break
-        if not glob_val_dict[checkprefix]:
-          continue
-        if nameless_value.check_prefix not in glob_val_dict[checkprefix]:
-          continue
-        if not glob_val_dict[checkprefix][nameless_value.check_prefix]:
-          continue
-
-        check_lines = []
-        global_vars_seen_before = [key for key in global_vars_seen.keys()]
-        for line in glob_val_dict[checkprefix][nameless_value.check_prefix]:
-          if _global_value_regex:
-            matched = False
-            for regex in _global_value_regex:
-              if re.match('^@' + regex + ' = ', line):
-                matched = True
+        checkprefixes = p[0]
+        # If not all checkprefixes of this run line produced the function we cannot check for it as it does not
+        # exist for this run line. A subset of the check prefixes might know about the function but only because
+        # other run lines created it.
+        if any(
+            map(
+                lambda checkprefix: func_name not in func_dict[checkprefix],
+                checkprefixes,
+            )
+        ):
+            prefix_exclusions |= set(checkprefixes)
+            continue
+
+    # prefix_exclusions is constructed, we can now emit the output
+    for p in prefix_list:
+        global_vars_seen = {}
+        checkprefixes = p[0]
+        for checkprefix in checkprefixes:
+            if checkprefix in global_vars_seen_dict:
+                global_vars_seen.update(global_vars_seen_dict[checkprefix])
+            else:
+                global_vars_seen_dict[checkprefix] = {}
+            if checkprefix in printed_prefixes:
                 break
-            if not matched:
-              continue
-          tmp = generalize_check_lines([line], is_analyze, set(), global_vars_seen)
-          check_line = '%s %s: %s' % (comment_marker, checkprefix, tmp[0])
-          check_lines.append(check_line)
-        if not check_lines:
-          continue
 
-        output_lines.append(comment_marker + SEPARATOR)
-        for check_line in check_lines:
-          output_lines.append(check_line)
+            # Check if the prefix is excluded.
+            if checkprefix in prefix_exclusions:
+                continue
+
+            # If we do not have output for this prefix we skip it.
+            if not func_dict[checkprefix][func_name]:
+                continue
+
+            # Add some space between 
diff erent check prefixes, but not after the last
+            # check line (before the test code).
+            if is_backend:
+                if len(printed_prefixes) != 0:
+                    output_lines.append(comment_marker)
+
+            if checkprefix not in global_vars_seen_dict:
+                global_vars_seen_dict[checkprefix] = {}
+
+            global_vars_seen_before = [key for key in global_vars_seen.keys()]
+
+            vars_seen = set()
+            printed_prefixes.append(checkprefix)
+            attrs = str(func_dict[checkprefix][func_name].attrs)
+            attrs = "" if attrs == "None" else attrs
+            if version > 1:
+                funcdef_attrs_and_ret = func_dict[checkprefix][
+                    func_name
+                ].funcdef_attrs_and_ret
+            else:
+                funcdef_attrs_and_ret = ""
+
+            if attrs:
+                output_lines.append(
+                    "%s %s: Function Attrs: %s" % (comment_marker, checkprefix, attrs)
+                )
+            args_and_sig = str(func_dict[checkprefix][func_name].args_and_sig)
+            if args_and_sig:
+                args_and_sig = generalize_check_lines(
+                    [args_and_sig], is_analyze, vars_seen, global_vars_seen
+                )[0]
+            func_name_separator = func_dict[checkprefix][func_name].func_name_separator
+            if "[[" in args_and_sig:
+                output_lines.append(
+                    check_label_format
+                    % (
+                        checkprefix,
+                        funcdef_attrs_and_ret,
+                        func_name,
+                        "",
+                        func_name_separator,
+                    )
+                )
+                output_lines.append(
+                    "%s %s-SAME: %s" % (comment_marker, checkprefix, args_and_sig)
+                )
+            else:
+                output_lines.append(
+                    check_label_format
+                    % (
+                        checkprefix,
+                        funcdef_attrs_and_ret,
+                        func_name,
+                        args_and_sig,
+                        func_name_separator,
+                    )
+                )
+            func_body = str(func_dict[checkprefix][func_name]).splitlines()
+            if not func_body:
+                # We have filtered everything.
+                continue
+
+            # For ASM output, just emit the check lines.
+            if is_backend:
+                body_start = 1
+                if is_filtered:
+                    # For filtered output we don't add "-NEXT" so don't add extra spaces
+                    # before the first line.
+                    body_start = 0
+                else:
+                    output_lines.append(
+                        "%s %s:       %s" % (comment_marker, checkprefix, func_body[0])
+                    )
+                func_lines = generalize_asm_check_lines(
+                    func_body[body_start:], vars_seen, global_vars_seen
+                )
+                for func_line in func_lines:
+                    if func_line.strip() == "":
+                        output_lines.append(
+                            "%s %s-EMPTY:" % (comment_marker, checkprefix)
+                        )
+                    else:
+                        check_suffix = "-NEXT" if not is_filtered else ""
+                        output_lines.append(
+                            "%s %s%s:  %s"
+                            % (comment_marker, checkprefix, check_suffix, func_line)
+                        )
+                # Remember new global variables we have not seen before
+                for key in global_vars_seen:
+                    if key not in global_vars_seen_before:
+                        global_vars_seen_dict[checkprefix][key] = global_vars_seen[key]
+                break
 
-        printed_prefixes.add((checkprefix, nameless_value.check_prefix))
+            # For IR output, change all defs to FileCheck variables, so we're immune
+            # to variable naming fashions.
+            func_body = generalize_check_lines(
+                func_body, is_analyze, vars_seen, global_vars_seen
+            )
+
+            # This could be selectively enabled with an optional invocation argument.
+            # Disabled for now: better to check everything. Be safe rather than sorry.
+
+            # Handle the first line of the function body as a special case because
+            # it's often just noise (a useless asm comment or entry label).
+            # if func_body[0].startswith("#") or func_body[0].startswith("entry:"):
+            #  is_blank_line = True
+            # else:
+            #  output_lines.append('%s %s:       %s' % (comment_marker, checkprefix, func_body[0]))
+            #  is_blank_line = False
+
+            is_blank_line = False
+
+            for func_line in func_body:
+                if func_line.strip() == "":
+                    is_blank_line = True
+                    continue
+                # Do not waste time checking IR comments.
+                func_line = SCRUB_IR_COMMENT_RE.sub(r"", func_line)
+
+                # Skip blank lines instead of checking them.
+                if is_blank_line:
+                    output_lines.append(
+                        "{} {}:       {}".format(comment_marker, checkprefix, func_line)
+                    )
+                else:
+                    check_suffix = "-NEXT" if not is_filtered else ""
+                    output_lines.append(
+                        "{} {}{}:  {}".format(
+                            comment_marker, checkprefix, check_suffix, func_line
+                        )
+                    )
+                is_blank_line = False
+
+            # Add space between 
diff erent check prefixes and also before the first
+            # line of code in the test function.
+            output_lines.append(comment_marker)
+
+            # Remember new global variables we have not seen before
+            for key in global_vars_seen:
+                if key not in global_vars_seen_before:
+                    global_vars_seen_dict[checkprefix][key] = global_vars_seen[key]
+            break
+    return printed_prefixes
+
+
+def add_ir_checks(
+    output_lines,
+    comment_marker,
+    prefix_list,
+    func_dict,
+    func_name,
+    preserve_names,
+    function_sig,
+    version,
+    global_vars_seen_dict,
+    is_filtered,
+):
+    # Label format is based on IR string.
+    if function_sig and version > 1:
+        function_def_regex = "define %s"
+    elif function_sig:
+        function_def_regex = "define {{[^@]+}}%s"
+    else:
+        function_def_regex = "%s"
+    check_label_format = "{} %s-LABEL: {}@%s%s%s".format(
+        comment_marker, function_def_regex
+    )
+    return add_checks(
+        output_lines,
+        comment_marker,
+        prefix_list,
+        func_dict,
+        func_name,
+        check_label_format,
+        False,
+        preserve_names,
+        version,
+        global_vars_seen_dict,
+        is_filtered,
+    )
+
+
+def add_analyze_checks(
+    output_lines, comment_marker, prefix_list, func_dict, func_name, is_filtered
+):
+    check_label_format = "{} %s-LABEL: '%s%s%s%s'".format(comment_marker)
+    global_vars_seen_dict = {}
+    return add_checks(
+        output_lines,
+        comment_marker,
+        prefix_list,
+        func_dict,
+        func_name,
+        check_label_format,
+        False,
+        True,
+        1,
+        global_vars_seen_dict,
+        is_filtered,
+    )
 
-        # Remembe new global variables we have not seen before
-        for key in global_vars_seen:
-          if key not in global_vars_seen_before:
-            global_vars_seen_dict[checkprefix][key] = global_vars_seen[key]
-        break
 
-  if printed_prefixes:
-    output_lines.append(comment_marker + SEPARATOR)
-  return printed_prefixes
+def build_global_values_dictionary(glob_val_dict, raw_tool_output, prefixes):
+    for nameless_value in itertools.chain(ir_nameless_values, asm_nameless_values):
+        if nameless_value.global_ir_rhs_regexp is None:
+            continue
+
+        lhs_re_str = nameless_value.ir_prefix + nameless_value.ir_regexp
+        rhs_re_str = nameless_value.global_ir_rhs_regexp
+
+        global_ir_value_re_str = r"^" + lhs_re_str + r"\s=\s" + rhs_re_str + r"$"
+        global_ir_value_re = re.compile(global_ir_value_re_str, flags=(re.M))
+        lines = []
+        for m in global_ir_value_re.finditer(raw_tool_output):
+            lines.append(m.group(0))
+
+        for prefix in prefixes:
+            if glob_val_dict[prefix] is None:
+                continue
+            if nameless_value.check_prefix in glob_val_dict[prefix]:
+                if lines == glob_val_dict[prefix][nameless_value.check_prefix]:
+                    continue
+                if prefix == prefixes[-1]:
+                    warn("Found conflicting asm under the same prefix: %r!" % (prefix,))
+                else:
+                    glob_val_dict[prefix][nameless_value.check_prefix] = None
+                    continue
+            glob_val_dict[prefix][nameless_value.check_prefix] = lines
+
+
+def add_global_checks(
+    glob_val_dict,
+    comment_marker,
+    prefix_list,
+    output_lines,
+    global_vars_seen_dict,
+    is_analyze,
+    is_before_functions,
+):
+    printed_prefixes = set()
+    for nameless_value in ir_nameless_values:
+        if nameless_value.global_ir_rhs_regexp is None:
+            continue
+        if nameless_value.is_before_functions != is_before_functions:
+            continue
+        for p in prefix_list:
+            global_vars_seen = {}
+            checkprefixes = p[0]
+            if checkprefixes is None:
+                continue
+            for checkprefix in checkprefixes:
+                if checkprefix in global_vars_seen_dict:
+                    global_vars_seen.update(global_vars_seen_dict[checkprefix])
+                else:
+                    global_vars_seen_dict[checkprefix] = {}
+                if (checkprefix, nameless_value.check_prefix) in printed_prefixes:
+                    break
+                if not glob_val_dict[checkprefix]:
+                    continue
+                if nameless_value.check_prefix not in glob_val_dict[checkprefix]:
+                    continue
+                if not glob_val_dict[checkprefix][nameless_value.check_prefix]:
+                    continue
+
+                check_lines = []
+                global_vars_seen_before = [key for key in global_vars_seen.keys()]
+                for line in glob_val_dict[checkprefix][nameless_value.check_prefix]:
+                    if _global_value_regex:
+                        matched = False
+                        for regex in _global_value_regex:
+                            if re.match("^@" + regex + " = ", line):
+                                matched = True
+                                break
+                        if not matched:
+                            continue
+                    tmp = generalize_check_lines(
+                        [line], is_analyze, set(), global_vars_seen
+                    )
+                    check_line = "%s %s: %s" % (comment_marker, checkprefix, tmp[0])
+                    check_lines.append(check_line)
+                if not check_lines:
+                    continue
+
+                output_lines.append(comment_marker + SEPARATOR)
+                for check_line in check_lines:
+                    output_lines.append(check_line)
+
+                printed_prefixes.add((checkprefix, nameless_value.check_prefix))
+
+                # Remembe new global variables we have not seen before
+                for key in global_vars_seen:
+                    if key not in global_vars_seen_before:
+                        global_vars_seen_dict[checkprefix][key] = global_vars_seen[key]
+                break
+
+    if printed_prefixes:
+        output_lines.append(comment_marker + SEPARATOR)
+    return printed_prefixes
 
 
 def check_prefix(prefix):
-  if not PREFIX_RE.match(prefix):
-    hint = ""
-    if ',' in prefix:
-      hint = " Did you mean '--check-prefixes=" + prefix + "'?"
-    warn(("Supplied prefix '%s' is invalid. Prefix must contain only alphanumeric characters, hyphens and underscores." + hint) %
-         (prefix))
+    if not PREFIX_RE.match(prefix):
+        hint = ""
+        if "," in prefix:
+            hint = " Did you mean '--check-prefixes=" + prefix + "'?"
+        warn(
+            (
+                "Supplied prefix '%s' is invalid. Prefix must contain only alphanumeric characters, hyphens and underscores."
+                + hint
+            )
+            % (prefix)
+        )
+
 
 def get_check_prefixes(filecheck_cmd):
-  check_prefixes = [item for m in CHECK_PREFIX_RE.finditer(filecheck_cmd)
-                           for item in m.group(1).split(',')]
-  if not check_prefixes:
-    check_prefixes = ['CHECK']
-  return check_prefixes
+    check_prefixes = [
+        item
+        for m in CHECK_PREFIX_RE.finditer(filecheck_cmd)
+        for item in m.group(1).split(",")
+    ]
+    if not check_prefixes:
+        check_prefixes = ["CHECK"]
+    return check_prefixes
+
 
 def verify_filecheck_prefixes(fc_cmd):
-  fc_cmd_parts = fc_cmd.split()
-  for part in fc_cmd_parts:
-    if "check-prefix=" in part:
-      prefix = part.split('=', 1)[1]
-      check_prefix(prefix)
-    elif "check-prefixes=" in part:
-      prefixes = part.split('=', 1)[1].split(',')
-      for prefix in prefixes:
-        check_prefix(prefix)
-        if prefixes.count(prefix) > 1:
-          warn("Supplied prefix '%s' is not unique in the prefix list." % (prefix,))
+    fc_cmd_parts = fc_cmd.split()
+    for part in fc_cmd_parts:
+        if "check-prefix=" in part:
+            prefix = part.split("=", 1)[1]
+            check_prefix(prefix)
+        elif "check-prefixes=" in part:
+            prefixes = part.split("=", 1)[1].split(",")
+            for prefix in prefixes:
+                check_prefix(prefix)
+                if prefixes.count(prefix) > 1:
+                    warn(
+                        "Supplied prefix '%s' is not unique in the prefix list."
+                        % (prefix,)
+                    )
 
 
 def get_autogennote_suffix(parser, args):
-  autogenerated_note_args = ''
-  for action in parser._actions:
-    if not hasattr(args, action.dest):
-      continue  # Ignore options such as --help that aren't included in args
-    # Ignore parameters such as paths to the binary or the list of tests
-    if action.dest in ('tests', 'update_only', 'tool_binary', 'opt_binary',
-                       'llc_binary', 'clang', 'opt', 'llvm_bin', 'verbose',
-                       'force_update'):
-      continue
-    value = getattr(args, action.dest)
-    if action.const is not None:  # action stores a constant (usually True/False)
-      # Skip actions with 
diff erent constant values (this happens with boolean
-      # --foo/--no-foo options)
-      if value != action.const:
-        continue
-    if parser.get_default(action.dest) == value:
-      continue  # Don't add default values
-    if action.dest == 'function_signature' and args.version >= 2:
-      continue # Enabled by default in version 2
-    if action.dest == 'filters':
-      # Create a separate option for each filter element.  The value is a list
-      # of Filter objects.
-      for elem in value:
-        opt_name = 'filter-out' if elem.is_filter_out else 'filter'
-        opt_value = elem.pattern()
-        new_arg = '--%s "%s" ' % (opt_name, opt_value.strip('"'))
-        if new_arg not in autogenerated_note_args:
-          autogenerated_note_args += new_arg
-    else:
-      autogenerated_note_args += action.option_strings[0] + ' '
-      if action.const is None:  # action takes a parameter
-        if action.nargs == '+':
-          value = ' '.join(map(lambda v: '"' + v.strip('"') + '"', value))
-        autogenerated_note_args += '%s ' % value
-  if autogenerated_note_args:
-    autogenerated_note_args = ' %s %s' % (UTC_ARGS_KEY, autogenerated_note_args[:-1])
-  return autogenerated_note_args
+    autogenerated_note_args = ""
+    for action in parser._actions:
+        if not hasattr(args, action.dest):
+            continue  # Ignore options such as --help that aren't included in args
+        # Ignore parameters such as paths to the binary or the list of tests
+        if action.dest in (
+            "tests",
+            "update_only",
+            "tool_binary",
+            "opt_binary",
+            "llc_binary",
+            "clang",
+            "opt",
+            "llvm_bin",
+            "verbose",
+            "force_update",
+        ):
+            continue
+        value = getattr(args, action.dest)
+        if action.const is not None:  # action stores a constant (usually True/False)
+            # Skip actions with 
diff erent constant values (this happens with boolean
+            # --foo/--no-foo options)
+            if value != action.const:
+                continue
+        if parser.get_default(action.dest) == value:
+            continue  # Don't add default values
+        if action.dest == "function_signature" and args.version >= 2:
+            continue  # Enabled by default in version 2
+        if action.dest == "filters":
+            # Create a separate option for each filter element.  The value is a list
+            # of Filter objects.
+            for elem in value:
+                opt_name = "filter-out" if elem.is_filter_out else "filter"
+                opt_value = elem.pattern()
+                new_arg = '--%s "%s" ' % (opt_name, opt_value.strip('"'))
+                if new_arg not in autogenerated_note_args:
+                    autogenerated_note_args += new_arg
+        else:
+            autogenerated_note_args += action.option_strings[0] + " "
+            if action.const is None:  # action takes a parameter
+                if action.nargs == "+":
+                    value = " ".join(map(lambda v: '"' + v.strip('"') + '"', value))
+                autogenerated_note_args += "%s " % value
+    if autogenerated_note_args:
+        autogenerated_note_args = " %s %s" % (
+            UTC_ARGS_KEY,
+            autogenerated_note_args[:-1],
+        )
+    return autogenerated_note_args
 
 
 def check_for_command(line, parser, args, argv, argparse_callback):
-  cmd_m = UTC_ARGS_CMD.match(line)
-  if cmd_m:
-    for option in shlex.split(cmd_m.group('cmd').strip()):
-      if option:
-        argv.append(option)
-    args = parse_args(parser, filter(lambda arg: arg not in args.tests, argv))
-    if argparse_callback is not None:
-      argparse_callback(args)
-  return args, argv
+    cmd_m = UTC_ARGS_CMD.match(line)
+    if cmd_m:
+        for option in shlex.split(cmd_m.group("cmd").strip()):
+            if option:
+                argv.append(option)
+        args = parse_args(parser, filter(lambda arg: arg not in args.tests, argv))
+        if argparse_callback is not None:
+            argparse_callback(args)
+    return args, argv
+
 
 def find_arg_in_test(test_info, get_arg_to_check, arg_string, is_global):
-  result = get_arg_to_check(test_info.args)
-  if not result and is_global:
-    # See if this has been specified via UTC_ARGS.  This is a "global" option
-    # that affects the entire generation of test checks.  If it exists anywhere
-    # in the test, apply it to everything.
-    saw_line = False
-    for line_info in test_info.ro_iterlines():
-      line = line_info.line
-      if not line.startswith(';') and line.strip() != '':
-        saw_line = True
-      result = get_arg_to_check(line_info.args)
-      if result:
-        if warn and saw_line:
-          # We saw the option after already reading some test input lines.
-          # Warn about it.
-          print('WARNING: Found {} in line following test start: '.format(arg_string)
-                + line, file=sys.stderr)
-          print('WARNING: Consider moving {} to top of file'.format(arg_string),
-                file=sys.stderr)
-        break
-  return result
+    result = get_arg_to_check(test_info.args)
+    if not result and is_global:
+        # See if this has been specified via UTC_ARGS.  This is a "global" option
+        # that affects the entire generation of test checks.  If it exists anywhere
+        # in the test, apply it to everything.
+        saw_line = False
+        for line_info in test_info.ro_iterlines():
+            line = line_info.line
+            if not line.startswith(";") and line.strip() != "":
+                saw_line = True
+            result = get_arg_to_check(line_info.args)
+            if result:
+                if warn and saw_line:
+                    # We saw the option after already reading some test input lines.
+                    # Warn about it.
+                    print(
+                        "WARNING: Found {} in line following test start: ".format(
+                            arg_string
+                        )
+                        + line,
+                        file=sys.stderr,
+                    )
+                    print(
+                        "WARNING: Consider moving {} to top of file".format(arg_string),
+                        file=sys.stderr,
+                    )
+                break
+    return result
+
 
 def dump_input_lines(output_lines, test_info, prefix_set, comment_string):
-  for input_line_info in test_info.iterlines(output_lines):
-    line = input_line_info.line
-    args = input_line_info.args
-    if line.strip() == comment_string:
-      continue
-    if line.strip() == comment_string + SEPARATOR:
-      continue
-    if line.lstrip().startswith(comment_string):
-      m = CHECK_RE.match(line)
-      if m and m.group(1) in prefix_set:
-        continue
-    output_lines.append(line.rstrip('\n'))
-
-def add_checks_at_end(output_lines, prefix_list, func_order,
-                      comment_string, check_generator):
-  added = set()
-  generated_prefixes = set()
-  for prefix in prefix_list:
-    prefixes = prefix[0]
-    tool_args = prefix[1]
-    for prefix in prefixes:
-      for func in func_order[prefix]:
-        # The func order can contain the same functions multiple times.
-        # If we see one again we are done.
-        if (func, prefix) in added:
-          continue
-        if added:
-          output_lines.append(comment_string)
-
-        # The add_*_checks routines expect a run list whose items are
-        # tuples that have a list of prefixes as their first element and
-        # tool command args string as their second element.  They output
-        # checks for each prefix in the list of prefixes.  By doing so, it
-        # implicitly assumes that for each function every run line will
-        # generate something for that function.  That is not the case for
-        # generated functions as some run lines might not generate them
-        # (e.g. -fopenmp vs. no -fopenmp).
-        #
-        # Therefore, pass just the prefix we're interested in.  This has
-        # the effect of generating all of the checks for functions of a
-        # single prefix before moving on to the next prefix.  So checks
-        # are ordered by prefix instead of by function as in "normal"
-        # mode.
-        for generated_prefix in check_generator(output_lines,
-                        [([prefix], tool_args)], func):
-          added.add((func, generated_prefix))
-          generated_prefixes.add(generated_prefix)
-  return generated_prefixes
+    for input_line_info in test_info.iterlines(output_lines):
+        line = input_line_info.line
+        args = input_line_info.args
+        if line.strip() == comment_string:
+            continue
+        if line.strip() == comment_string + SEPARATOR:
+            continue
+        if line.lstrip().startswith(comment_string):
+            m = CHECK_RE.match(line)
+            if m and m.group(1) in prefix_set:
+                continue
+        output_lines.append(line.rstrip("\n"))
+
+
+def add_checks_at_end(
+    output_lines, prefix_list, func_order, comment_string, check_generator
+):
+    added = set()
+    generated_prefixes = set()
+    for prefix in prefix_list:
+        prefixes = prefix[0]
+        tool_args = prefix[1]
+        for prefix in prefixes:
+            for func in func_order[prefix]:
+                # The func order can contain the same functions multiple times.
+                # If we see one again we are done.
+                if (func, prefix) in added:
+                    continue
+                if added:
+                    output_lines.append(comment_string)
+
+                # The add_*_checks routines expect a run list whose items are
+                # tuples that have a list of prefixes as their first element and
+                # tool command args string as their second element.  They output
+                # checks for each prefix in the list of prefixes.  By doing so, it
+                # implicitly assumes that for each function every run line will
+                # generate something for that function.  That is not the case for
+                # generated functions as some run lines might not generate them
+                # (e.g. -fopenmp vs. no -fopenmp).
+                #
+                # Therefore, pass just the prefix we're interested in.  This has
+                # the effect of generating all of the checks for functions of a
+                # single prefix before moving on to the next prefix.  So checks
+                # are ordered by prefix instead of by function as in "normal"
+                # mode.
+                for generated_prefix in check_generator(
+                    output_lines, [([prefix], tool_args)], func
+                ):
+                    added.add((func, generated_prefix))
+                    generated_prefixes.add(generated_prefix)
+    return generated_prefixes

diff  --git a/llvm/utils/UpdateTestChecks/isel.py b/llvm/utils/UpdateTestChecks/isel.py
index c10a55540efdd..bdb68e5815a33 100644
--- a/llvm/utils/UpdateTestChecks/isel.py
+++ b/llvm/utils/UpdateTestChecks/isel.py
@@ -3,10 +3,12 @@
 import sys
 
 if sys.version_info[0] > 2:
-  class string:
-    expandtabs = str.expandtabs
+
+    class string:
+        expandtabs = str.expandtabs
+
 else:
-  import string
+    import string
 
 # Support of isel debug checks
 # RegEx: this is where the magic happens.
@@ -15,43 +17,64 @@ class string:
 
 # TODO: add function prefix
 ISEL_FUNCTION_DEFAULT_RE = re.compile(
-     r'Selected[\s]*selection[\s]*DAG:[\s]*%bb.0[\s]*\'(?P<func>.*?):[^\']*\'*\n'
-     r'(?P<body>.*?)\n'
-     r'Total[\s]*amount[\s]*of[\s]*phi[\s]*nodes[\s]*to[\s]*update:[\s]*[0-9]+',
-     flags=(re.M | re.S))
+    r"Selected[\s]*selection[\s]*DAG:[\s]*%bb.0[\s]*\'(?P<func>.*?):[^\']*\'*\n"
+    r"(?P<body>.*?)\n"
+    r"Total[\s]*amount[\s]*of[\s]*phi[\s]*nodes[\s]*to[\s]*update:[\s]*[0-9]+",
+    flags=(re.M | re.S),
+)
+
 
 def scrub_isel_default(isel, args):
-  # Scrub runs of whitespace out of the iSel debug output, but leave the leading
-  # whitespace in place.
-  isel = common.SCRUB_WHITESPACE_RE.sub(r' ', isel)
-  # Expand the tabs used for indentation.
-  isel = string.expandtabs(isel, 2)
-  # Strip trailing whitespace.
-  isel = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', isel)
-  return isel
+    # Scrub runs of whitespace out of the iSel debug output, but leave the leading
+    # whitespace in place.
+    isel = common.SCRUB_WHITESPACE_RE.sub(r" ", isel)
+    # Expand the tabs used for indentation.
+    isel = string.expandtabs(isel, 2)
+    # Strip trailing whitespace.
+    isel = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r"", isel)
+    return isel
+
 
 def get_run_handler(triple):
-  target_handlers = {
-  }
-  handler = None
-  best_prefix = ''
-  for prefix, s in target_handlers.items():
-    if triple.startswith(prefix) and len(prefix) > len(best_prefix):
-      handler = s
-      best_prefix = prefix
+    target_handlers = {}
+    handler = None
+    best_prefix = ""
+    for prefix, s in target_handlers.items():
+        if triple.startswith(prefix) and len(prefix) > len(best_prefix):
+            handler = s
+            best_prefix = prefix
+
+    if handler is None:
+        common.debug("Using default handler.")
+        handler = (scrub_isel_default, ISEL_FUNCTION_DEFAULT_RE)
 
-  if handler is None:
-    common.debug('Using default handler.')
-    handler = (scrub_isel_default, ISEL_FUNCTION_DEFAULT_RE)
+    return handler
 
-  return handler
 
 ##### Generator of iSel CHECK lines
 
-def add_checks(output_lines, comment_marker, prefix_list, func_dict, func_name,
-               global_vars_seen_dict, is_filtered):
-  # Label format is based on iSel string.
-  check_label_format = '{} %s-LABEL: %s%s%s%s'.format(comment_marker)
-  return common.add_checks(output_lines, comment_marker, prefix_list, func_dict,
-                           func_name, check_label_format, True, False, 1,
-                           global_vars_seen_dict, is_filtered=is_filtered)
+
+def add_checks(
+    output_lines,
+    comment_marker,
+    prefix_list,
+    func_dict,
+    func_name,
+    global_vars_seen_dict,
+    is_filtered,
+):
+    # Label format is based on iSel string.
+    check_label_format = "{} %s-LABEL: %s%s%s%s".format(comment_marker)
+    return common.add_checks(
+        output_lines,
+        comment_marker,
+        prefix_list,
+        func_dict,
+        func_name,
+        check_label_format,
+        True,
+        False,
+        1,
+        global_vars_seen_dict,
+        is_filtered=is_filtered,
+    )

diff  --git a/llvm/utils/abtest.py b/llvm/utils/abtest.py
index 4ca3ca4f730e9..a799c8764b290 100755
--- a/llvm/utils/abtest.py
+++ b/llvm/utils/abtest.py
@@ -57,11 +57,7 @@
 
 
 def find(dir, file_filter=None):
-    files = [
-        walkdir[0]+"/"+file
-        for walkdir in os.walk(dir)
-        for file in walkdir[2]
-    ]
+    files = [walkdir[0] + "/" + file for walkdir in os.walk(dir) for file in walkdir[2]]
     if file_filter is not None:
         files = filter(files, file_filter)
     return sorted(files)
@@ -147,8 +143,10 @@ def test_partition(partition, upcoming_partition):
         picks = dict(all_a)
         for x in partition:
             picks[x] = choice_map[x][1]
-        announce_test("checking %s [<=%d remaining]" %
-                      (format_namelist(partition), max_remaining_steps))
+        announce_test(
+            "checking %s [<=%d remaining]"
+            % (format_namelist(partition), max_remaining_steps)
+        )
         res = perform_test(picks)
         if res is True:
             known_good.update(partition)
@@ -184,7 +182,7 @@ def extract_functions(file):
         if marker != -1:
             if in_function is not None:
                 warn("Missing end of function %s" % (in_function,))
-            funcname = line[marker + 19:-1]
+            funcname = line[marker + 19 : -1]
             in_function = funcname
             text = line
             continue
@@ -210,7 +208,7 @@ def replace_functions(source, dest, replacements):
         if marker != -1:
             if in_function is not None:
                 warn("Missing end of function %s" % (in_function,))
-            funcname = line[marker + 19:-1]
+            funcname = line[marker + 19 : -1]
             in_function = funcname
             replacement = replacements.get(in_function)
             if replacement is not None:
@@ -229,7 +227,10 @@ def replace_functions(source, dest, replacements):
 
 
 def testrun(files):
-    linkline = "%s %s" % (LINKTEST, " ".join(files),)
+    linkline = "%s %s" % (
+        LINKTEST,
+        " ".join(files),
+    )
     res = subprocess.call(linkline, shell=True)
     if res != 0:
         announce_result(FAILED + ": '%s' exitcode != 0" % LINKTEST)
@@ -244,12 +245,13 @@ def prepare_files(gooddir, baddir, rspfile):
     files_b = []
 
     if rspfile is not None:
+
         def get_basename(name):
             # remove prefix
             if name.startswith(gooddir):
-                return name[len(gooddir):]
+                return name[len(gooddir) :]
             if name.startswith(baddir):
-                return name[len(baddir):]
+                return name[len(baddir) :]
             assert False, ""
 
         with open(rspfile, "r") as rf:
@@ -269,15 +271,13 @@ def get_basename(name):
     for name in files_b:
         basename = get_basename(name)
         if basename not in basenames_a:
-            warn("There is no corresponding file to '%s' in %s" %
-                 (name, gooddir))
+            warn("There is no corresponding file to '%s' in %s" % (name, gooddir))
     choices = []
     skipped = []
     for name in files_a:
         basename = get_basename(name)
         if basename not in basenames_b:
-            warn("There is no corresponding file to '%s' in %s" %
-                 (name, baddir))
+            warn("There is no corresponding file to '%s' in %s" % (name, baddir))
 
         file_a = gooddir + "/" + basename
         file_b = baddir + "/" + basename
@@ -307,8 +307,7 @@ def perform_test(picks):
         # If response file is used, create a temporary response file for the
         # picked files.
         if rspfile is not None:
-            with tempfile.NamedTemporaryFile('w', suffix='.rsp',
-                                             delete=False) as tf:
+            with tempfile.NamedTemporaryFile("w", suffix=".rsp", delete=False) as tf:
                 tf.write(" ".join(files))
                 tf.flush()
             ret = testrun([tf.name])
@@ -346,7 +345,7 @@ def prepare_functions(to_check, gooddir, goodfile, badfile):
     if len(skipped) > 0:
         info("Skipped (same content): %s" % format_namelist(skipped))
 
-    combined_file = '/tmp/combined2.s'
+    combined_file = "/tmp/combined2.s"
     files = []
     found_good_file = False
     for c in files_good:
@@ -362,21 +361,21 @@ def perform_test(picks):
             assert x == functions_a_map[name] or x == functions_b_map[name]
         replace_functions(goodfile, combined_file, picks)
         return testrun(files)
+
     return perform_test, choices
 
 
 def main():
     parser = argparse.ArgumentParser()
-    parser.add_argument('--a', dest='dir_a', default='before')
-    parser.add_argument('--b', dest='dir_b', default='after')
-    parser.add_argument('--rsp', default=None)
-    parser.add_argument('--test', default='./link_test')
-    parser.add_argument('--insane', help='Skip sanity check',
-                        action='store_true')
-    parser.add_argument('--seq',
-                        help='Check sequentially instead of bisection',
-                        action='store_true')
-    parser.add_argument('file', metavar='file', nargs='?')
+    parser.add_argument("--a", dest="dir_a", default="before")
+    parser.add_argument("--b", dest="dir_b", default="after")
+    parser.add_argument("--rsp", default=None)
+    parser.add_argument("--test", default="./link_test")
+    parser.add_argument("--insane", help="Skip sanity check", action="store_true")
+    parser.add_argument(
+        "--seq", help="Check sequentially instead of bisection", action="store_true"
+    )
+    parser.add_argument("file", metavar="file", nargs="?")
     config = parser.parse_args()
 
     gooddir = config.dir_a
@@ -391,8 +390,9 @@ def main():
     if config.file is not None:
         goodfile = gooddir + "/" + config.file
         badfile = baddir + "/" + config.file
-        perform_test, choices = prepare_functions(config.file, gooddir,
-                                                  goodfile, badfile)
+        perform_test, choices = prepare_functions(
+            config.file, gooddir, goodfile, badfile
+        )
     else:
         perform_test, choices = prepare_files(gooddir, baddir, rspfile)
 
@@ -423,5 +423,5 @@ def main():
         stderr.write("Could not identify failing parts?!?")
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/add_argument_names.py b/llvm/utils/add_argument_names.py
index 38dde25997941..2860dd4b90bcb 100755
--- a/llvm/utils/add_argument_names.py
+++ b/llvm/utils/add_argument_names.py
@@ -1,55 +1,59 @@
 #!/usr/bin/env python3
 import re, sys
 
+
 def fix_string(s):
-    TYPE = re.compile('\s*(i[0-9]+|float|double|x86_fp80|fp128|ppc_fp128|\[\[.*?\]\]|\[2 x \[\[[A-Z_0-9]+\]\]\]|<.*?>|{.*?}|\[[0-9]+ x .*?\]|%["a-z:A-Z0-9._]+({{.*?}})?|%{{.*?}}|{{.*?}}|\[\[.*?\]\])(\s*(\*|addrspace\(.*?\)|dereferenceable\(.*?\)|byval\(.*?\)|sret|zeroext|inreg|returned|signext|nocapture|align \d+|swiftself|swifterror|readonly|noalias|inalloca|nocapture))*\s*')
+    TYPE = re.compile(
+        '\s*(i[0-9]+|float|double|x86_fp80|fp128|ppc_fp128|\[\[.*?\]\]|\[2 x \[\[[A-Z_0-9]+\]\]\]|<.*?>|{.*?}|\[[0-9]+ x .*?\]|%["a-z:A-Z0-9._]+({{.*?}})?|%{{.*?}}|{{.*?}}|\[\[.*?\]\])(\s*(\*|addrspace\(.*?\)|dereferenceable\(.*?\)|byval\(.*?\)|sret|zeroext|inreg|returned|signext|nocapture|align \d+|swiftself|swifterror|readonly|noalias|inalloca|nocapture))*\s*'
+    )
 
     counter = 0
-    if 'i32{{.*}}' in s:
+    if "i32{{.*}}" in s:
         counter = 1
 
-    at_pos = s.find('@')
+    at_pos = s.find("@")
     if at_pos == -1:
         at_pos = 0
 
-    annoying_pos = s.find('{{[^(]+}}')
+    annoying_pos = s.find("{{[^(]+}}")
     if annoying_pos != -1:
         at_pos = annoying_pos + 9
 
-    paren_pos = s.find('(', at_pos)
+    paren_pos = s.find("(", at_pos)
     if paren_pos == -1:
         return s
 
-    res = s[:paren_pos+1]
-    s = s[paren_pos+1:]
+    res = s[: paren_pos + 1]
+    s = s[paren_pos + 1 :]
 
     m = TYPE.match(s)
     while m:
         res += m.group()
-        s = s[m.end():]
-        if s.startswith(',') or s.startswith(')'):
-            res += f' %{counter}'
+        s = s[m.end() :]
+        if s.startswith(",") or s.startswith(")"):
+            res += f" %{counter}"
             counter += 1
 
-        next_arg = s.find(',')
+        next_arg = s.find(",")
         if next_arg == -1:
             break
 
-        res += s[:next_arg+1]
-        s = s[next_arg+1:]
+        res += s[: next_arg + 1]
+        s = s[next_arg + 1 :]
         m = TYPE.match(s)
 
-    return res+s
+    return res + s
+
 
 def process_file(contents):
-    PREFIX = re.compile(r'check-prefix(es)?(=|\s+)([a-zA-Z0-9,]+)')
-    check_prefixes = ['CHECK']
-    result = ''
-    for line in contents.split('\n'):
-        if 'FileCheck' in line:
+    PREFIX = re.compile(r"check-prefix(es)?(=|\s+)([a-zA-Z0-9,]+)")
+    check_prefixes = ["CHECK"]
+    result = ""
+    for line in contents.split("\n"):
+        if "FileCheck" in line:
             m = PREFIX.search(line)
             if m:
-                check_prefixes.extend(m.group(3).split(','))
+                check_prefixes.extend(m.group(3).split(","))
 
         found_check = False
         for prefix in check_prefixes:
@@ -57,26 +61,28 @@ def process_file(contents):
                 found_check = True
                 break
 
-        if not found_check or 'define' not in line:
-            result += line + '\n'
+        if not found_check or "define" not in line:
+            result += line + "\n"
             continue
 
         # We have a check for a function definition. Number the args.
         line = fix_string(line)
-        result += line + '\n'
+        result += line + "\n"
     return result
 
+
 def main():
-    print(f'Processing {sys.argv[1]}')
+    print(f"Processing {sys.argv[1]}")
     f = open(sys.argv[1])
     content = f.read()
     f.close()
 
     content = process_file(content)
 
-    f = open(sys.argv[1], 'w')
+    f = open(sys.argv[1], "w")
     f.write(content)
     f.close()
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/bugpoint_gisel_reducer.py b/llvm/utils/bugpoint_gisel_reducer.py
index 4c366efbba497..116ec792e921d 100755
--- a/llvm/utils/bugpoint_gisel_reducer.py
+++ b/llvm/utils/bugpoint_gisel_reducer.py
@@ -24,40 +24,38 @@ def log(msg):
 
 
 def hr():
-    log('-' * 50)
+    log("-" * 50)
 
 
 def log_err(msg):
-    print('ERROR: {}'.format(msg), file=sys.stderr)
+    print("ERROR: {}".format(msg), file=sys.stderr)
 
 
 def check_path(path):
     if not os.path.exists(path):
-        log_err('{} does not exist.'.format(path))
+        log_err("{} does not exist.".format(path))
         raise
     return path
 
 
 def check_bin(build_dir, bin_name):
-    file_name = '{}/bin/{}'.format(build_dir, bin_name)
+    file_name = "{}/bin/{}".format(build_dir, bin_name)
     return check_path(file_name)
 
 
 def run_llc(llc, irfile):
-    pr = subprocess.Popen([llc,
-                           '-o',
-                           '-',
-                           '-global-isel',
-                           '-pass-remarks-missed=gisel',
-                           irfile],
-                          stdout=subprocess.PIPE,
-                          stderr=subprocess.PIPE)
+    pr = subprocess.Popen(
+        [llc, "-o", "-", "-global-isel", "-pass-remarks-missed=gisel", irfile],
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+    )
     out, err = pr.communicate()
     res = pr.wait()
     if res == 0:
         return 0
     re_err = re.compile(
-        r'LLVM ERROR: ([a-z\s]+):.*(G_INTRINSIC[_A-Z]* <intrinsic:@[a-zA-Z0-9\.]+>|G_[A-Z_]+)')
+        r"LLVM ERROR: ([a-z\s]+):.*(G_INTRINSIC[_A-Z]* <intrinsic:@[a-zA-Z0-9\.]+>|G_[A-Z_]+)"
+    )
     match = re_err.match(err)
     if not match:
         return 0
@@ -66,13 +64,18 @@ def run_llc(llc, irfile):
 
 
 def run_bugpoint(bugpoint_bin, llc_bin, opt_bin, tmp, ir_file):
-    compileCmd = '-compile-command={} -c {} {}'.format(
-        os.path.realpath(__file__), llc_bin, tmp)
-    pr = subprocess.Popen([bugpoint_bin,
-                           '-compile-custom',
-                           compileCmd,
-                           '-opt-command={}'.format(opt_bin),
-                           ir_file])
+    compileCmd = "-compile-command={} -c {} {}".format(
+        os.path.realpath(__file__), llc_bin, tmp
+    )
+    pr = subprocess.Popen(
+        [
+            bugpoint_bin,
+            "-compile-custom",
+            compileCmd,
+            "-opt-command={}".format(opt_bin),
+            ir_file,
+        ]
+    )
     res = pr.wait()
     if res != 0:
         log_err("Unable to reduce the test.")
@@ -83,13 +86,13 @@ def run_bugpoint_check():
     path_to_llc = sys.argv[2]
     path_to_err = sys.argv[3]
     path_to_ir = sys.argv[4]
-    with open(path_to_err, 'r') as f:
+    with open(path_to_err, "r") as f:
         err = f.read()
         res = run_llc(path_to_llc, path_to_ir)
         if res == 0:
             return 0
-        log('GlobalISed failed, {}: {}'.format(res[0], res[1]))
-        if res != err.split(';'):
+        log("GlobalISed failed, {}: {}".format(res[0], res[1]))
+        if res != err.split(";"):
             return 0
         else:
             return 1
@@ -97,50 +100,53 @@ def run_bugpoint_check():
 
 def main():
     # Check if this is called by bugpoint.
-    if len(sys.argv) == 5 and sys.argv[1] == '-c':
+    if len(sys.argv) == 5 and sys.argv[1] == "-c":
         sys.exit(run_bugpoint_check())
 
     # Parse arguments.
     parser = argparse.ArgumentParser(
-        description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
-    parser.add_argument('BuildDir', help="Path to LLVM build directory")
-    parser.add_argument('IRFile', help="Path to the input IR file")
+        description=__doc__, formatter_class=argparse.RawTextHelpFormatter
+    )
+    parser.add_argument("BuildDir", help="Path to LLVM build directory")
+    parser.add_argument("IRFile", help="Path to the input IR file")
     args = parser.parse_args()
 
     # Check if the binaries exist.
     build_dir = check_path(args.BuildDir)
     ir_file = check_path(args.IRFile)
-    llc_bin = check_bin(build_dir, 'llc')
-    opt_bin = check_bin(build_dir, 'opt')
-    bugpoint_bin = check_bin(build_dir, 'bugpoint')
+    llc_bin = check_bin(build_dir, "llc")
+    opt_bin = check_bin(build_dir, "opt")
+    bugpoint_bin = check_bin(build_dir, "bugpoint")
 
     # Run llc to see if GlobalISel fails.
-    log('Running llc...')
+    log("Running llc...")
     res = run_llc(llc_bin, ir_file)
     if res == 0:
         log_err("Expected failure")
         raise
     hr()
-    log('GlobalISel failed, {}: {}.'.format(res[0], res[1]))
+    log("GlobalISel failed, {}: {}.".format(res[0], res[1]))
     tmp = tempfile.NamedTemporaryFile()
-    log('Writing error to {} for bugpoint.'.format(tmp.name))
-    tmp.write(';'.join(res))
+    log("Writing error to {} for bugpoint.".format(tmp.name))
+    tmp.write(";".join(res))
     tmp.flush()
     hr()
 
     # Run bugpoint.
-    log('Running bugpoint...')
+    log("Running bugpoint...")
     run_bugpoint(bugpoint_bin, llc_bin, opt_bin, tmp.name, ir_file)
     hr()
-    log('Done!')
+    log("Done!")
     hr()
-    output_file = 'bugpoint-reduced-simplified.bc'
-    log('Run llvm-dis to disassemble the output:')
-    log('$ {}/bin/llvm-dis -o - {}'.format(build_dir, output_file))
-    log('Run llc to reproduce the problem:')
-    log('$ {}/bin/llc -o - -global-isel '
-        '-pass-remarks-missed=gisel {}'.format(build_dir, output_file))
+    output_file = "bugpoint-reduced-simplified.bc"
+    log("Run llvm-dis to disassemble the output:")
+    log("$ {}/bin/llvm-dis -o - {}".format(build_dir, output_file))
+    log("Run llc to reproduce the problem:")
+    log(
+        "$ {}/bin/llc -o - -global-isel "
+        "-pass-remarks-missed=gisel {}".format(build_dir, output_file)
+    )
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/check_ninja_deps.py b/llvm/utils/check_ninja_deps.py
index d19c470d21204..e6b5b1ce008f7 100755
--- a/llvm/utils/check_ninja_deps.py
+++ b/llvm/utils/check_ninja_deps.py
@@ -58,6 +58,7 @@
 import subprocess
 import pygraphviz
 
+
 def toposort(g):
     """Topologically sort a graph.
 
@@ -88,7 +89,8 @@ def toposort(g):
                 # If that counter reaches zero, w is ready to output.
                 ready.add(w)
 
-def ancestors(g, translate = lambda x: x):
+
+def ancestors(g, translate=lambda x: x):
     """Form the set of ancestors for each vertex of a graph.
 
     The input g is a pygraphviz graph object representing a DAG. The function
@@ -107,7 +109,7 @@ def ancestors(g, translate = lambda x: x):
         vm = translate(v)
 
         # Make up a[v], based on a[predecessors of v].
-        a[v] = {vm} # include v itself
+        a[v] = {vm}  # include v itself
         for w in g.in_neighbors(v):
             a[v].update(a[w])
 
@@ -115,14 +117,16 @@ def ancestors(g, translate = lambda x: x):
         # doesn't get the trivial dependency of v on itself.
         yield vm, a[v].
diff erence({vm})
 
+
 def main():
     parser = argparse.ArgumentParser(
-        description='Find missing formal dependencies on generated include '
-        'files in a build.ninja file.')
-    parser.add_argument("-C", "--build-dir",
-                        help="Build directory (default cwd)")
-    parser.add_argument("-f", "--build-file",
-                        help="Build directory (default build.ninja)")
+        description="Find missing formal dependencies on generated include "
+        "files in a build.ninja file."
+    )
+    parser.add_argument("-C", "--build-dir", help="Build directory (default cwd)")
+    parser.add_argument(
+        "-f", "--build-file", help="Build directory (default build.ninja)"
+    )
     args = parser.parse_args()
 
     errs = 0
@@ -134,8 +138,9 @@ def main():
         ninja_prefix.extend(["-f", args.build_file])
 
     # Get the formal dependency graph and decode it using pygraphviz.
-    g = pygraphviz.AGraph(subprocess.check_output(
-        ninja_prefix + ["-t", "graph"]).decode("UTF-8"))
+    g = pygraphviz.AGraph(
+        subprocess.check_output(ninja_prefix + ["-t", "graph"]).decode("UTF-8")
+    )
 
     # Helper function to ask for the label of a vertex, which is where ninja's
     # Graphviz output keeps the actual file name of the target.
@@ -153,8 +158,11 @@ def main():
     # Fetch the cached dependency data and check it against our formal ancestry
     # data.
     currtarget = None
-    for line in (subprocess.check_output(ninja_prefix + ["-t", "deps"])
-                 .decode("UTF-8").splitlines()):
+    for line in (
+        subprocess.check_output(ninja_prefix + ["-t", "deps"])
+        .decode("UTF-8")
+        .splitlines()
+    ):
         # ninja -t deps output consists of stanzas of the following form,
         # separated by a blank line:
         #
@@ -176,10 +184,15 @@ def main():
             # cache is not cleared when build.ninja changes, so it can contain
             # stale data from targets that existed only in past builds in the
             # same directory.
-            if (dep in targets and currtarget in deps and
-                dep not in deps[currtarget]):
-                print("error:", currtarget, "requires", dep,
-                      "but has no dependency on it", file=sys.stderr)
+            if dep in targets and currtarget in deps and dep not in deps[currtarget]:
+                print(
+                    "error:",
+                    currtarget,
+                    "requires",
+                    dep,
+                    "but has no dependency on it",
+                    file=sys.stderr,
+                )
                 errs += 1
         elif ":" in line:
             currtarget = line.split(":", 1)[0]
@@ -187,5 +200,6 @@ def main():
     if errs:
         sys.exit("{:d} errors found".format(errs))
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/chunk-print-before-all.py b/llvm/utils/chunk-print-before-all.py
index b5756c59c2719..fe0eaaea1c20d 100755
--- a/llvm/utils/chunk-print-before-all.py
+++ b/llvm/utils/chunk-print-before-all.py
@@ -1,9 +1,9 @@
 #!/usr/bin/env python
 
-# Given a -print-before-all and/or -print-after-all -print-module-scope log from 
-# an opt invocation, chunk it into a series of individual IR files, one for each 
-# pass invocation. If the log ends with an obvious stack trace, try to split off 
-# a separate "crashinfo.txt" file leaving only the valid input IR in the last 
+# Given a -print-before-all and/or -print-after-all -print-module-scope log from
+# an opt invocation, chunk it into a series of individual IR files, one for each
+# pass invocation. If the log ends with an obvious stack trace, try to split off
+# a separate "crashinfo.txt" file leaving only the valid input IR in the last
 # chunk. Files are written to current working directory.
 
 import sys
@@ -14,8 +14,9 @@
 # This function gets the pass name from the following line:
 # *** IR Dump Before/After PASS_NAME... ***
 def get_pass_name(line, prefix):
-    short_line = line[line.find(prefix) + len(prefix) + 1:]
-    return re.split(' |<', short_line)[0]
+    short_line = line[line.find(prefix) + len(prefix) + 1 :]
+    return re.split(" |<", short_line)[0]
+
 
 def print_chunk(lines, prefix, pass_name):
     global chunk_id
@@ -25,6 +26,7 @@ def print_chunk(lines, prefix, pass_name):
     with open(fname, "w") as f:
         f.writelines(lines)
 
+
 is_dump = False
 cur = []
 for line in sys.stdin:

diff  --git a/llvm/utils/collect_and_build_with_pgo.py b/llvm/utils/collect_and_build_with_pgo.py
index 0851b91b0283c..4c30dc876411e 100755
--- a/llvm/utils/collect_and_build_with_pgo.py
+++ b/llvm/utils/collect_and_build_with_pgo.py
@@ -36,7 +36,7 @@
 # in to build more things, if you'd like.
 def _run_benchmark(env, out_dir, include_debug_info):
     """The 'benchmark' we run to generate profile data."""
-    target_dir = env.output_subdir('instrumentation_run')
+    target_dir = env.output_subdir("instrumentation_run")
 
     # `check-llvm` and `check-clang` are cheap ways to increase coverage. The
     # former lets us touch on the non-x86 backends a bit if configured, and the
@@ -44,34 +44,34 @@ def _run_benchmark(env, out_dir, include_debug_info):
     # paths a fair amount, though the `if (stuff_is_broken) { diag() ... }`
     # branches should still heavily be weighted in the not-taken direction,
     # since we built all of LLVM/etc).
-    _build_things_in(env, out_dir, what=['check-llvm', 'check-clang'])
+    _build_things_in(env, out_dir, what=["check-llvm", "check-clang"])
 
     # Building tblgen gets us coverage; don't skip it. (out_dir may also not
     # have them anyway, but that's less of an issue)
-    cmake = _get_cmake_invocation_for_bootstrap_from(
-        env, out_dir, skip_tablegens=False)
+    cmake = _get_cmake_invocation_for_bootstrap_from(env, out_dir, skip_tablegens=False)
 
     if include_debug_info:
-        cmake.add_flag('CMAKE_BUILD_TYPE', 'RelWithDebInfo')
+        cmake.add_flag("CMAKE_BUILD_TYPE", "RelWithDebInfo")
 
     _run_fresh_cmake(env, cmake, target_dir)
 
     # Just build all the things. The more data we have, the better.
-    _build_things_in(env, target_dir, what=['all'])
+    _build_things_in(env, target_dir, what=["all"])
+
 
 ### Script
 
 
 class CmakeInvocation:
-    _cflags = ['CMAKE_C_FLAGS', 'CMAKE_CXX_FLAGS']
+    _cflags = ["CMAKE_C_FLAGS", "CMAKE_CXX_FLAGS"]
     _ldflags = [
-        'CMAKE_EXE_LINKER_FLAGS',
-        'CMAKE_MODULE_LINKER_FLAGS',
-        'CMAKE_SHARED_LINKER_FLAGS',
+        "CMAKE_EXE_LINKER_FLAGS",
+        "CMAKE_MODULE_LINKER_FLAGS",
+        "CMAKE_SHARED_LINKER_FLAGS",
     ]
 
     def __init__(self, cmake, maker, cmake_dir):
-        self._prefix = [cmake, '-G', maker, cmake_dir]
+        self._prefix = [cmake, "-G", maker, cmake_dir]
 
         # Map of str -> (list|str).
         self._flags = {}
@@ -92,7 +92,7 @@ def add_flag(self, key, value, allow_overwrites=True):
             return
 
         if not allow_overwrites:
-            raise ValueError('Invalid overwrite of %s requested' % key)
+            raise ValueError("Invalid overwrite of %s requested" % key)
 
         self._flags[key] = value
 
@@ -115,18 +115,17 @@ def to_args(self):
                 # nothing to add, don't.
                 if not value:
                     continue
-                value = ' '.join(value)
+                value = " ".join(value)
 
-            arg = '-D' + key
-            if value != '':
-                arg += '=' + value
+            arg = "-D" + key
+            if value != "":
+                arg += "=" + value
             args.append(arg)
         return args
 
 
 class Env:
-    def __init__(self, llvm_dir, use_make, output_dir, default_cmake_args,
-                 dry_run):
+    def __init__(self, llvm_dir, use_make, output_dir, default_cmake_args, dry_run):
         self.llvm_dir = llvm_dir
         self.use_make = use_make
         self.output_dir = output_dir
@@ -137,35 +136,30 @@ def get_default_cmake_args_kv(self):
         return self.default_cmake_args.items()
 
     def get_cmake_maker(self):
-        return 'Ninja' if not self.use_make else 'Unix Makefiles'
+        return "Ninja" if not self.use_make else "Unix Makefiles"
 
     def get_make_command(self):
         if self.use_make:
-            return ['make', '-j{}'.format(multiprocessing.cpu_count())]
-        return ['ninja']
+            return ["make", "-j{}".format(multiprocessing.cpu_count())]
+        return ["ninja"]
 
     def output_subdir(self, name):
         return os.path.join(self.output_dir, name)
 
     def has_llvm_subproject(self, name):
-        if name == 'compiler-rt':
-            subdir = '../compiler-rt'
-        elif name == 'clang':
-            subdir = '../clang'
+        if name == "compiler-rt":
+            subdir = "../compiler-rt"
+        elif name == "clang":
+            subdir = "../clang"
         else:
-            raise ValueError('Unknown subproject: %s' % name)
+            raise ValueError("Unknown subproject: %s" % name)
 
         return os.path.isdir(os.path.join(self.llvm_dir, subdir))
 
     # Note that we don't allow capturing stdout/stderr. This works quite nicely
     # with dry_run.
-    def run_command(self,
-                    cmd,
-                    cwd=None,
-                    check=False,
-                    silent_unless_error=False):
-        print(
-            'Running `%s` in %s' % (cmd, shlex.quote(cwd or os.getcwd())))
+    def run_command(self, cmd, cwd=None, check=False, silent_unless_error=False):
+        print("Running `%s` in %s" % (cmd, shlex.quote(cwd or os.getcwd())))
 
         if self.dry_run:
             return
@@ -178,11 +172,8 @@ def run_command(self,
         # Don't use subprocess.run because it's >= py3.5 only, and it's not too
         # much extra effort to get what it gives us anyway.
         popen = subprocess.Popen(
-            cmd,
-            stdin=subprocess.DEVNULL,
-            stdout=stdout,
-            stderr=stderr,
-            cwd=cwd)
+            cmd, stdin=subprocess.DEVNULL, stdout=stdout, stderr=stderr, cwd=cwd
+        )
         stdout, _ = popen.communicate()
         return_code = popen.wait(timeout=0)
 
@@ -190,32 +181,33 @@ def run_command(self,
             return
 
         if silent_unless_error:
-            print(stdout.decode('utf-8', 'ignore'))
+            print(stdout.decode("utf-8", "ignore"))
 
         if check:
             raise subprocess.CalledProcessError(
-                returncode=return_code, cmd=cmd, output=stdout, stderr=None)
+                returncode=return_code, cmd=cmd, output=stdout, stderr=None
+            )
 
 
 def _get_default_cmake_invocation(env):
     inv = CmakeInvocation(
-        cmake='cmake', maker=env.get_cmake_maker(), cmake_dir=env.llvm_dir)
+        cmake="cmake", maker=env.get_cmake_maker(), cmake_dir=env.llvm_dir
+    )
     for key, value in env.get_default_cmake_args_kv():
         inv.add_new_flag(key, value)
     return inv
 
 
-def _get_cmake_invocation_for_bootstrap_from(env, out_dir,
-                                             skip_tablegens=True):
-    clang = os.path.join(out_dir, 'bin', 'clang')
+def _get_cmake_invocation_for_bootstrap_from(env, out_dir, skip_tablegens=True):
+    clang = os.path.join(out_dir, "bin", "clang")
     cmake = _get_default_cmake_invocation(env)
-    cmake.add_new_flag('CMAKE_C_COMPILER', clang)
-    cmake.add_new_flag('CMAKE_CXX_COMPILER', clang + '++')
+    cmake.add_new_flag("CMAKE_C_COMPILER", clang)
+    cmake.add_new_flag("CMAKE_CXX_COMPILER", clang + "++")
 
     # We often get no value out of building new tblgens; the previous build
     # should have them. It's still correct to build them, just slower.
     def add_tablegen(key, binary):
-        path = os.path.join(out_dir, 'bin', binary)
+        path = os.path.join(out_dir, "bin", binary)
 
         # Check that this exists, since the user's allowed to specify their own
         # stage1 directory (which is generally where we'll source everything
@@ -224,8 +216,8 @@ def add_tablegen(key, binary):
             cmake.add_new_flag(key, path)
 
     if skip_tablegens:
-        add_tablegen('LLVM_TABLEGEN', 'llvm-tblgen')
-        add_tablegen('CLANG_TABLEGEN', 'clang-tblgen')
+        add_tablegen("LLVM_TABLEGEN", "llvm-tblgen")
+        add_tablegen("CLANG_TABLEGEN", "clang-tblgen")
 
     return cmake
 
@@ -245,146 +237,160 @@ def _run_fresh_cmake(env, cmake, target_dir):
         os.makedirs(target_dir, mode=0o755)
 
     cmake_args = cmake.to_args()
-    env.run_command(
-        cmake_args, cwd=target_dir, check=True, silent_unless_error=True)
+    env.run_command(cmake_args, cwd=target_dir, check=True, silent_unless_error=True)
 
 
 def _build_stage1_clang(env):
-    target_dir = env.output_subdir('stage1')
+    target_dir = env.output_subdir("stage1")
     cmake = _get_default_cmake_invocation(env)
     _run_fresh_cmake(env, cmake, target_dir)
-    _build_things_in(env, target_dir, what=['clang', 'llvm-profdata', 'profile'])
+    _build_things_in(env, target_dir, what=["clang", "llvm-profdata", "profile"])
     return target_dir
 
 
-def _generate_instrumented_clang_profile(env, stage1_dir, profile_dir,
-                                         output_file):
-    llvm_profdata = os.path.join(stage1_dir, 'bin', 'llvm-profdata')
+def _generate_instrumented_clang_profile(env, stage1_dir, profile_dir, output_file):
+    llvm_profdata = os.path.join(stage1_dir, "bin", "llvm-profdata")
     if env.dry_run:
-        profiles = [os.path.join(profile_dir, '*.profraw')]
+        profiles = [os.path.join(profile_dir, "*.profraw")]
     else:
         profiles = [
-            os.path.join(profile_dir, f) for f in os.listdir(profile_dir)
-            if f.endswith('.profraw')
+            os.path.join(profile_dir, f)
+            for f in os.listdir(profile_dir)
+            if f.endswith(".profraw")
         ]
-    cmd = [llvm_profdata, 'merge', '-output=' + output_file] + profiles
+    cmd = [llvm_profdata, "merge", "-output=" + output_file] + profiles
     env.run_command(cmd, check=True)
 
 
 def _build_instrumented_clang(env, stage1_dir):
     assert os.path.isabs(stage1_dir)
 
-    target_dir = os.path.join(env.output_dir, 'instrumented')
+    target_dir = os.path.join(env.output_dir, "instrumented")
     cmake = _get_cmake_invocation_for_bootstrap_from(env, stage1_dir)
-    cmake.add_new_flag('LLVM_BUILD_INSTRUMENTED', 'IR')
+    cmake.add_new_flag("LLVM_BUILD_INSTRUMENTED", "IR")
 
     # libcxx's configure step messes with our link order: we'll link
     # libclang_rt.profile after libgcc, and the former requires atexit from the
     # latter. So, configure checks fail.
     #
     # Since we don't need libcxx or compiler-rt anyway, just disable them.
-    cmake.add_new_flag('LLVM_BUILD_RUNTIME', 'No')
+    cmake.add_new_flag("LLVM_BUILD_RUNTIME", "No")
 
     _run_fresh_cmake(env, cmake, target_dir)
-    _build_things_in(env, target_dir, what=['clang', 'lld'])
+    _build_things_in(env, target_dir, what=["clang", "lld"])
 
-    profiles_dir = os.path.join(target_dir, 'profiles')
+    profiles_dir = os.path.join(target_dir, "profiles")
     return target_dir, profiles_dir
 
 
 def _build_optimized_clang(env, stage1_dir, profdata_file):
     if not env.dry_run and not os.path.exists(profdata_file):
-        raise ValueError('Looks like the profdata file at %s doesn\'t exist' %
-                         profdata_file)
+        raise ValueError(
+            "Looks like the profdata file at %s doesn't exist" % profdata_file
+        )
 
-    target_dir = os.path.join(env.output_dir, 'optimized')
+    target_dir = os.path.join(env.output_dir, "optimized")
     cmake = _get_cmake_invocation_for_bootstrap_from(env, stage1_dir)
-    cmake.add_new_flag('LLVM_PROFDATA_FILE', os.path.abspath(profdata_file))
+    cmake.add_new_flag("LLVM_PROFDATA_FILE", os.path.abspath(profdata_file))
 
     # We'll get complaints about hash mismatches in `main` in tools/etc. Ignore
     # it.
-    cmake.add_cflags(['-Wno-backend-plugin'])
+    cmake.add_cflags(["-Wno-backend-plugin"])
     _run_fresh_cmake(env, cmake, target_dir)
-    _build_things_in(env, target_dir, what=['clang'])
+    _build_things_in(env, target_dir, what=["clang"])
     return target_dir
 
 
-Args = collections.namedtuple('Args', [
-    'do_optimized_build',
-    'include_debug_info',
-    'profile_location',
-    'stage1_dir',
-])
+Args = collections.namedtuple(
+    "Args",
+    [
+        "do_optimized_build",
+        "include_debug_info",
+        "profile_location",
+        "stage1_dir",
+    ],
+)
 
 
 def _parse_args():
     parser = argparse.ArgumentParser(
-        description='Builds LLVM and Clang with instrumentation, collects '
-        'instrumentation profiles for them, and (optionally) builds things '
-        'with these PGO profiles. By default, it\'s assumed that you\'re '
-        'running this from your LLVM root, and all build artifacts will be '
-        'saved to $PWD/out.')
+        description="Builds LLVM and Clang with instrumentation, collects "
+        "instrumentation profiles for them, and (optionally) builds things "
+        "with these PGO profiles. By default, it's assumed that you're "
+        "running this from your LLVM root, and all build artifacts will be "
+        "saved to $PWD/out."
+    )
     parser.add_argument(
-        '--cmake-extra-arg',
-        action='append',
+        "--cmake-extra-arg",
+        action="append",
         default=[],
-        help='an extra arg to pass to all cmake invocations. Note that this '
-        'is interpreted as a -D argument, e.g. --cmake-extra-arg FOO=BAR will '
-        'be passed as -DFOO=BAR. This may be specified multiple times.')
+        help="an extra arg to pass to all cmake invocations. Note that this "
+        "is interpreted as a -D argument, e.g. --cmake-extra-arg FOO=BAR will "
+        "be passed as -DFOO=BAR. This may be specified multiple times.",
+    )
     parser.add_argument(
-        '--dry-run',
-        action='store_true',
-        help='print commands instead of running them')
+        "--dry-run", action="store_true", help="print commands instead of running them"
+    )
     parser.add_argument(
-        '--llvm-dir',
-        default='.',
-        help='directory containing an LLVM checkout (default: $PWD)')
+        "--llvm-dir",
+        default=".",
+        help="directory containing an LLVM checkout (default: $PWD)",
+    )
     parser.add_argument(
-        '--no-optimized-build',
-        action='store_true',
-        help='disable the final, PGO-optimized build')
+        "--no-optimized-build",
+        action="store_true",
+        help="disable the final, PGO-optimized build",
+    )
     parser.add_argument(
-        '--out-dir',
-        help='directory to write artifacts to (default: $llvm_dir/out)')
+        "--out-dir", help="directory to write artifacts to (default: $llvm_dir/out)"
+    )
     parser.add_argument(
-        '--profile-output',
-        help='where to output the profile (default is $out/pgo_profile.prof)')
+        "--profile-output",
+        help="where to output the profile (default is $out/pgo_profile.prof)",
+    )
     parser.add_argument(
-        '--stage1-dir',
-        help='instead of having an initial build of everything, use the given '
-        'directory. It is expected that this directory will have clang, '
-        'llvm-profdata, and the appropriate libclang_rt.profile already built')
+        "--stage1-dir",
+        help="instead of having an initial build of everything, use the given "
+        "directory. It is expected that this directory will have clang, "
+        "llvm-profdata, and the appropriate libclang_rt.profile already built",
+    )
     parser.add_argument(
-        '--use-debug-info-in-benchmark',
-        action='store_true',
-        help='use a regular build instead of RelWithDebInfo in the benchmark. '
-        'This increases benchmark execution time and disk space requirements, '
-        'but gives more coverage over debuginfo bits in LLVM and clang.')
+        "--use-debug-info-in-benchmark",
+        action="store_true",
+        help="use a regular build instead of RelWithDebInfo in the benchmark. "
+        "This increases benchmark execution time and disk space requirements, "
+        "but gives more coverage over debuginfo bits in LLVM and clang.",
+    )
     parser.add_argument(
-        '--use-make',
-        action='store_true',
-        default=shutil.which('ninja') is None,
-        help='use Makefiles instead of ninja')
+        "--use-make",
+        action="store_true",
+        default=shutil.which("ninja") is None,
+        help="use Makefiles instead of ninja",
+    )
 
     args = parser.parse_args()
 
     llvm_dir = os.path.abspath(args.llvm_dir)
     if args.out_dir is None:
-        output_dir = os.path.join(llvm_dir, 'out')
+        output_dir = os.path.join(llvm_dir, "out")
     else:
         output_dir = os.path.abspath(args.out_dir)
 
-    extra_args = {'CMAKE_BUILD_TYPE': 'Release',
-                  'LLVM_ENABLE_PROJECTS': 'clang;compiler-rt;lld'}
+    extra_args = {
+        "CMAKE_BUILD_TYPE": "Release",
+        "LLVM_ENABLE_PROJECTS": "clang;compiler-rt;lld",
+    }
     for arg in args.cmake_extra_arg:
-        if arg.startswith('-D'):
+        if arg.startswith("-D"):
             arg = arg[2:]
-        elif arg.startswith('-'):
-            raise ValueError('Unknown not- -D arg encountered; you may need '
-                             'to tweak the source...')
-        split = arg.split('=', 1)
+        elif arg.startswith("-"):
+            raise ValueError(
+                "Unknown not- -D arg encountered; you may need "
+                "to tweak the source..."
+            )
+        split = arg.split("=", 1)
         if len(split) == 1:
-            key, val = split[0], ''
+            key, val = split[0], ""
         else:
             key, val = split
         extra_args[key] = val
@@ -400,7 +406,7 @@ def _parse_args():
     if args.profile_output is not None:
         profile_location = args.profile_output
     else:
-        profile_location = os.path.join(env.output_dir, 'pgo_profile.prof')
+        profile_location = os.path.join(env.output_dir, "pgo_profile.prof")
 
     result_args = Args(
         do_optimized_build=not args.no_optimized_build,
@@ -419,26 +425,26 @@ def _looks_like_llvm_dir(directory):
 
     contents = set(os.listdir(directory))
     expected_contents = [
-        'CODE_OWNERS.TXT',
-        'cmake',
-        'docs',
-        'include',
-        'utils',
+        "CODE_OWNERS.TXT",
+        "cmake",
+        "docs",
+        "include",
+        "utils",
     ]
 
     if not all(c in contents for c in expected_contents):
         return False
 
     try:
-        include_listing = os.listdir(os.path.join(directory, 'include'))
+        include_listing = os.listdir(os.path.join(directory, "include"))
     except NotADirectoryError:
         return False
 
-    return 'llvm' in include_listing
+    return "llvm" in include_listing
 
 
 def _die(*args, **kwargs):
-    kwargs['file'] = sys.stderr
+    kwargs["file"] = sys.stderr
     print(*args, **kwargs)
     sys.exit(1)
 
@@ -447,37 +453,36 @@ def _main():
     env, args = _parse_args()
 
     if not _looks_like_llvm_dir(env.llvm_dir):
-        _die('Looks like %s isn\'t an LLVM directory; please see --help' %
-             env.llvm_dir)
-    if not env.has_llvm_subproject('clang'):
-        _die('Need a clang checkout at tools/clang')
-    if not env.has_llvm_subproject('compiler-rt'):
-        _die('Need a compiler-rt checkout at projects/compiler-rt')
+        _die("Looks like %s isn't an LLVM directory; please see --help" % env.llvm_dir)
+    if not env.has_llvm_subproject("clang"):
+        _die("Need a clang checkout at tools/clang")
+    if not env.has_llvm_subproject("compiler-rt"):
+        _die("Need a compiler-rt checkout at projects/compiler-rt")
 
     def status(*args):
         print(*args, file=sys.stderr)
 
     if args.stage1_dir is None:
-        status('*** Building stage1 clang...')
+        status("*** Building stage1 clang...")
         stage1_out = _build_stage1_clang(env)
     else:
         stage1_out = args.stage1_dir
 
-    status('*** Building instrumented clang...')
+    status("*** Building instrumented clang...")
     instrumented_out, profile_dir = _build_instrumented_clang(env, stage1_out)
-    status('*** Running profdata benchmarks...')
+    status("*** Running profdata benchmarks...")
     _run_benchmark(env, instrumented_out, args.include_debug_info)
-    status('*** Generating profile...')
-    _generate_instrumented_clang_profile(env, stage1_out, profile_dir,
-                                         args.profile_location)
+    status("*** Generating profile...")
+    _generate_instrumented_clang_profile(
+        env, stage1_out, profile_dir, args.profile_location
+    )
 
-    print('Final profile:', args.profile_location)
+    print("Final profile:", args.profile_location)
     if args.do_optimized_build:
-        status('*** Building PGO-optimized binaries...')
-        optimized_out = _build_optimized_clang(env, stage1_out,
-                                               args.profile_location)
-        print('Final build directory:', optimized_out)
+        status("*** Building PGO-optimized binaries...")
+        optimized_out = _build_optimized_clang(env, stage1_out, args.profile_location)
+        print("Final build directory:", optimized_out)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     _main()

diff  --git a/llvm/utils/convert-constraint-log-to-z3.py b/llvm/utils/convert-constraint-log-to-z3.py
index 77b0a3d95b6d4..a3c33f2ef4599 100755
--- a/llvm/utils/convert-constraint-log-to-z3.py
+++ b/llvm/utils/convert-constraint-log-to-z3.py
@@ -34,21 +34,23 @@
 
 def main():
     parser = argparse.ArgumentParser(
-        description='Convert constraint log to script to verify using Z3.')
-    parser.add_argument('log_file', metavar='log', type=str,
-                        help='constraint-system log file')
+        description="Convert constraint log to script to verify using Z3."
+    )
+    parser.add_argument(
+        "log_file", metavar="log", type=str, help="constraint-system log file"
+    )
     args = parser.parse_args()
 
-    content = ''
-    with open(args.log_file, 'rt') as f:
+    content = ""
+    with open(args.log_file, "rt") as f:
         content = f.read()
 
-    groups = content.split('---')
-    var_re = re.compile('x\d+')
+    groups = content.split("---")
+    var_re = re.compile("x\d+")
 
-    print('from z3 import *')
+    print("from z3 import *")
     for group in groups:
-        constraints = [g.strip() for g in group.split('\n') if g.strip() != '']
+        constraints = [g.strip() for g in group.split("\n") if g.strip() != ""]
         variables = set()
         for c in constraints[:-1]:
             for m in var_re.finditer(c):
@@ -57,13 +59,13 @@ def main():
             continue
         for v in variables:
             print('{} = Int("{}")'.format(v, v))
-        print('s = Solver()')
+        print("s = Solver()")
         for c in constraints[:-1]:
-            print('s.add({})'.format(c))
+            print("s.add({})".format(c))
         expected = constraints[-1].strip()
-        print('assert(s.check() == {})'.format(expected))
+        print("assert(s.check() == {})".format(expected))
     print('print("all checks passed")')
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/create_ladder_graph.py b/llvm/utils/create_ladder_graph.py
index a5946ff24af5a..f11aaf39bfa96 100755
--- a/llvm/utils/create_ladder_graph.py
+++ b/llvm/utils/create_ladder_graph.py
@@ -13,33 +13,37 @@
 from __future__ import print_function
 
 import argparse
+
+
 def main():
-  parser = argparse.ArgumentParser(description=__doc__)
-  parser.add_argument('rungs', type=int,
-                      help="Number of ladder rungs. Must be a multiple of 2")
-  args = parser.parse_args()
-  if (args.rungs % 2) != 0:
-    print("Rungs must be a multiple of 2")
-    return
-  print("int ladder(int *foo, int *bar, int x) {")
-  rung1 = range(0, args.rungs, 2)
-  rung2 = range(1, args.rungs, 2)
-  for i in rung1:
-    print("rung1%d:" % i)
-    print("*foo = x++;")
-    if i != rung1[-1]:
-      print("if (*bar) goto rung1%d;" % (i+2))
-      print("else goto rung2%d;" % (i+1))
-    else:
-      print("goto rung2%d;" % (i+1))
-  for i in rung2:
-    print("rung2%d:" % i)
-    print("*foo = x++;")
-    if i != rung2[-1]:
-      print("goto rung2%d;" % (i+2))
-    else:
-      print("return *foo;")
-  print("}")
-
-if __name__ == '__main__':
-  main()
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument(
+        "rungs", type=int, help="Number of ladder rungs. Must be a multiple of 2"
+    )
+    args = parser.parse_args()
+    if (args.rungs % 2) != 0:
+        print("Rungs must be a multiple of 2")
+        return
+    print("int ladder(int *foo, int *bar, int x) {")
+    rung1 = range(0, args.rungs, 2)
+    rung2 = range(1, args.rungs, 2)
+    for i in rung1:
+        print("rung1%d:" % i)
+        print("*foo = x++;")
+        if i != rung1[-1]:
+            print("if (*bar) goto rung1%d;" % (i + 2))
+            print("else goto rung2%d;" % (i + 1))
+        else:
+            print("goto rung2%d;" % (i + 1))
+    for i in rung2:
+        print("rung2%d:" % i)
+        print("*foo = x++;")
+        if i != rung2[-1]:
+            print("goto rung2%d;" % (i + 2))
+        else:
+            print("return *foo;")
+    print("}")
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/llvm/utils/demangle_tree.py b/llvm/utils/demangle_tree.py
index 00de72b2a18d7..9d9603e63b832 100644
--- a/llvm/utils/demangle_tree.py
+++ b/llvm/utils/demangle_tree.py
@@ -18,21 +18,23 @@
 
 args = None
 
+
 def parse_line(line):
-    question = line.find('?')
+    question = line.find("?")
     if question == -1:
         return None, None
 
-    open_paren = line.find('(', question)
+    open_paren = line.find("(", question)
     if open_paren == -1:
         return None, None
-    close_paren = line.rfind(')', open_paren)
+    close_paren = line.rfind(")", open_paren)
     if open_paren == -1:
         return None, None
-    mangled = line[question : open_paren]
-    demangled = line[open_paren+1 : close_paren]
+    mangled = line[question:open_paren]
+    demangled = line[open_paren + 1 : close_paren]
     return mangled.strip(), demangled.strip()
 
+
 class Result(object):
     def __init__(self):
         self.crashed = []
@@ -41,6 +43,7 @@ def __init__(self):
         self.errors = set()
         self.nfiles = 0
 
+
 class MapContext(object):
     def __init__(self):
         self.rincomplete = None
@@ -48,18 +51,19 @@ def __init__(self):
         self.pending_objs = []
         self.npending = 0
 
+
 def process_file(path, objdump):
     r = Result()
     r.file = path
 
-    popen_args = [objdump, '-t', '-demangle', path]
+    popen_args = [objdump, "-t", "-demangle", path]
     p = subprocess.Popen(popen_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     stdout, stderr = p.communicate()
     if p.returncode != 0:
         r.crashed = [r.file]
         return r
 
-    output = stdout.decode('utf-8')
+    output = stdout.decode("utf-8")
 
     for line in output.splitlines():
         mangled, demangled = parse_line(line)
@@ -70,15 +74,25 @@ def process_file(path, objdump):
             r.errors.add(mangled)
     return r
 
+
 def add_results(r1, r2):
     r1.crashed.extend(r2.crashed)
     r1.errors.update(r2.errors)
     r1.nsymbols += r2.nsymbols
     r1.nfiles += r2.nfiles
 
+
 def print_result_row(directory, result):
-    print("[{0} files, {1} crashes, {2} errors, {3} symbols]: '{4}'".format(
-        result.nfiles, len(result.crashed), len(result.errors), result.nsymbols, directory))
+    print(
+        "[{0} files, {1} crashes, {2} errors, {3} symbols]: '{4}'".format(
+            result.nfiles,
+            len(result.crashed),
+            len(result.errors),
+            result.nsymbols,
+            directory,
+        )
+    )
+
 
 def process_one_chunk(pool, chunk_size, objdump, context):
     objs = []
@@ -112,7 +126,7 @@ def process_one_chunk(pool, chunk_size, objdump, context):
 
         re.nfiles += ntaken
 
-    assert(len(objs) == chunk_size or context.npending == 0)
+    assert len(objs) == chunk_size or context.npending == 0
 
     copier = functools.partial(process_file, objdump=objdump)
     mapped_results = list(pool.map(copier, objs))
@@ -134,17 +148,18 @@ def process_one_chunk(pool, chunk_size, objdump, context):
         add_results(context.rcumulative, re)
         print_result_row(c, re)
 
+
 def process_pending_files(pool, chunk_size, objdump, context):
     while context.npending >= chunk_size:
         process_one_chunk(pool, chunk_size, objdump, context)
 
+
 def go():
     global args
 
     obj_dir = args.dir
-    extensions = args.extensions.split(',')
-    extensions = [x if x[0] == '.' else '.' + x for x in extensions]
-
+    extensions = args.extensions.split(",")
+    extensions = [x if x[0] == "." else "." + x for x in extensions]
 
     pool_size = 48
     pool = Pool(processes=pool_size)
@@ -178,7 +193,7 @@ def go():
             # `pool_size` tasks remaining.
             process_pending_files(pool, pool_size, args.objdump, context)
 
-        assert(context.npending < pool_size);
+        assert context.npending < pool_size
         process_one_chunk(pool, pool_size, args.objdump, context)
 
         total = context.rcumulative
@@ -186,43 +201,58 @@ def go():
         nsuccess = total.nsymbols - nfailed
         ncrashed = len(total.crashed)
 
-        if (nfailed > 0):
+        if nfailed > 0:
             print("Failures:")
             for m in sorted(total.errors):
                 print("  " + m)
-        if (ncrashed > 0):
+        if ncrashed > 0:
             print("Crashes:")
             for f in sorted(total.crashed):
                 print("  " + f)
         print("Summary:")
-        spct = float(nsuccess)/float(total.nsymbols)
-        fpct = float(nfailed)/float(total.nsymbols)
-        cpct = float(ncrashed)/float(nfiles)
+        spct = float(nsuccess) / float(total.nsymbols)
+        fpct = float(nfailed) / float(total.nsymbols)
+        cpct = float(ncrashed) / float(nfiles)
         print("Processed {0} object files.".format(nfiles))
-        print("{0}/{1} symbols successfully demangled ({2:.4%})".format(nsuccess, total.nsymbols, spct))
+        print(
+            "{0}/{1} symbols successfully demangled ({2:.4%})".format(
+                nsuccess, total.nsymbols, spct
+            )
+        )
         print("{0} symbols could not be demangled ({1:.4%})".format(nfailed, fpct))
         print("{0} files crashed while demangling ({1:.4%})".format(ncrashed, cpct))
-            
+
     except:
         traceback.print_exc()
 
     pool.close()
     pool.join()
 
-if __name__ == "__main__":
-    def_obj = 'obj' if sys.platform == 'win32' else 'o'
 
-    parser = argparse.ArgumentParser(description='Demangle all symbols in a tree of object files, looking for failures.')
-    parser.add_argument('dir', type=str, help='the root directory at which to start crawling')
-    parser.add_argument('--objdump', type=str, help='path to llvm-objdump.  If not specified ' +
-                        'the tool is located as if by `which llvm-objdump`.')
-    parser.add_argument('--extensions', type=str, default=def_obj,
-                        help='comma separated list of extensions to demangle (e.g. `o,obj`).  ' +
-                        'By default this will be `obj` on Windows and `o` otherwise.')
+if __name__ == "__main__":
+    def_obj = "obj" if sys.platform == "win32" else "o"
+
+    parser = argparse.ArgumentParser(
+        description="Demangle all symbols in a tree of object files, looking for failures."
+    )
+    parser.add_argument(
+        "dir", type=str, help="the root directory at which to start crawling"
+    )
+    parser.add_argument(
+        "--objdump",
+        type=str,
+        help="path to llvm-objdump.  If not specified "
+        + "the tool is located as if by `which llvm-objdump`.",
+    )
+    parser.add_argument(
+        "--extensions",
+        type=str,
+        default=def_obj,
+        help="comma separated list of extensions to demangle (e.g. `o,obj`).  "
+        + "By default this will be `obj` on Windows and `o` otherwise.",
+    )
 
     args = parser.parse_args()
 
-
     multiprocessing.freeze_support()
     go()
-

diff  --git a/llvm/utils/docker/scripts/llvm_checksum/llvm_checksum.py b/llvm/utils/docker/scripts/llvm_checksum/llvm_checksum.py
index 1cfbf2b56c709..5fa569aec540d 100755
--- a/llvm/utils/docker/scripts/llvm_checksum/llvm_checksum.py
+++ b/llvm/utils/docker/scripts/llvm_checksum/llvm_checksum.py
@@ -16,183 +16,186 @@
 
 
 def main():
-  parser = ArgumentParser()
-  parser.add_argument(
-      "-v", "--verbose", action="store_true", help="enable debug logging")
-  parser.add_argument(
-      "-c",
-      "--check",
-      metavar="reference_file",
-      help="read checksums from reference_file and " +
-      "check they match checksums of llvm_path.")
-  parser.add_argument(
-      "--partial",
-      action="store_true",
-      help="ignore projects from reference_file " +
-      "that are not checked out in llvm_path.")
-  parser.add_argument(
-      "--multi_dir",
-      action="store_true",
-      help="indicates llvm_path contains llvm, checked out " +
-      "into multiple directories, as opposed to a " +
-      "typical single source tree checkout.")
-  parser.add_argument("llvm_path")
-
-  args = parser.parse_args()
-  if args.check is not None:
-    with open(args.check, "r") as f:
-      reference_checksums = ReadLLVMChecksums(f)
-  else:
-    reference_checksums = None
-
-  if args.verbose:
-    logging.basicConfig(level=logging.DEBUG)
-
-  llvm_projects = CreateLLVMProjects(not args.multi_dir)
-  checksums = ComputeLLVMChecksums(args.llvm_path, llvm_projects)
-
-  if reference_checksums is None:
-    WriteLLVMChecksums(checksums, sys.stdout)
-    sys.exit(0)
-
-  if not ValidateChecksums(reference_checksums, checksums, args.partial):
-    sys.stdout.write("Checksums 
diff er.\nNew checksums:\n")
-    WriteLLVMChecksums(checksums, sys.stdout)
-    sys.stdout.write("Reference checksums:\n")
-    WriteLLVMChecksums(reference_checksums, sys.stdout)
-    sys.exit(1)
-  else:
-    sys.stdout.write("Checksums match.")
+    parser = ArgumentParser()
+    parser.add_argument(
+        "-v", "--verbose", action="store_true", help="enable debug logging"
+    )
+    parser.add_argument(
+        "-c",
+        "--check",
+        metavar="reference_file",
+        help="read checksums from reference_file and "
+        + "check they match checksums of llvm_path.",
+    )
+    parser.add_argument(
+        "--partial",
+        action="store_true",
+        help="ignore projects from reference_file "
+        + "that are not checked out in llvm_path.",
+    )
+    parser.add_argument(
+        "--multi_dir",
+        action="store_true",
+        help="indicates llvm_path contains llvm, checked out "
+        + "into multiple directories, as opposed to a "
+        + "typical single source tree checkout.",
+    )
+    parser.add_argument("llvm_path")
+
+    args = parser.parse_args()
+    if args.check is not None:
+        with open(args.check, "r") as f:
+            reference_checksums = ReadLLVMChecksums(f)
+    else:
+        reference_checksums = None
+
+    if args.verbose:
+        logging.basicConfig(level=logging.DEBUG)
+
+    llvm_projects = CreateLLVMProjects(not args.multi_dir)
+    checksums = ComputeLLVMChecksums(args.llvm_path, llvm_projects)
+
+    if reference_checksums is None:
+        WriteLLVMChecksums(checksums, sys.stdout)
+        sys.exit(0)
+
+    if not ValidateChecksums(reference_checksums, checksums, args.partial):
+        sys.stdout.write("Checksums 
diff er.\nNew checksums:\n")
+        WriteLLVMChecksums(checksums, sys.stdout)
+        sys.stdout.write("Reference checksums:\n")
+        WriteLLVMChecksums(reference_checksums, sys.stdout)
+        sys.exit(1)
+    else:
+        sys.stdout.write("Checksums match.")
 
 
 def ComputeLLVMChecksums(root_path, projects):
-  """Compute checksums for LLVM sources checked out using svn.
-
-  Args:
-    root_path: a directory of llvm checkout.
-    projects: a list of LLVMProject instances, which describe checkout paths,
-      relative to root_path.
-
-  Returns:
-    A dict mapping from project name to project checksum.
-  """
-  hash_algo = hashlib.sha256
-
-  def collapse_svn_substitutions(contents):
-    # Replace svn substitutions for $Date$ and $LastChangedDate$.
-    # Unfortunately, these are locale-specific.
-    return SVN_DATES_REGEX.sub("$\1$", contents)
-
-  def read_and_collapse_svn_subsitutions(file_path):
-    with open(file_path, "rb") as f:
-      contents = f.read()
-      new_contents = collapse_svn_substitutions(contents)
-      if contents != new_contents:
-        logging.debug("Replaced svn keyword substitutions in %s", file_path)
-        logging.debug("\n\tBefore\n%s\n\tAfter\n%s", contents, new_contents)
-      return new_contents
-
-  project_checksums = dict()
-  # Hash each project.
-  for proj in projects:
-    project_root = os.path.join(root_path, proj.relpath)
-    if not os.path.exists(project_root):
-      logging.info("Folder %s doesn't exist, skipping project %s", proj.relpath,
-                   proj.name)
-      continue
-
-    files = list()
-
-    def add_file_hash(file_path):
-      if os.path.islink(file_path) and not os.path.exists(file_path):
-        content = os.readlink(file_path)
-      else:
-        content = read_and_collapse_svn_subsitutions(file_path)
-      hasher = hash_algo()
-      hasher.update(content)
-      file_digest = hasher.hexdigest()
-      logging.debug("Checksum %s for file %s", file_digest, file_path)
-      files.append((file_path, file_digest))
-
-    logging.info("Computing checksum for %s", proj.name)
-    WalkProjectFiles(root_path, projects, proj, add_file_hash)
-
-    # Compute final checksum.
-    files.sort(key=lambda x: x[0])
-    hasher = hash_algo()
-    for file_path, file_digest in files:
-      file_path = os.path.relpath(file_path, project_root)
-      hasher.update(file_path)
-      hasher.update(file_digest)
-    project_checksums[proj.name] = hasher.hexdigest()
-  return project_checksums
+    """Compute checksums for LLVM sources checked out using svn.
+
+    Args:
+      root_path: a directory of llvm checkout.
+      projects: a list of LLVMProject instances, which describe checkout paths,
+        relative to root_path.
+
+    Returns:
+      A dict mapping from project name to project checksum.
+    """
+    hash_algo = hashlib.sha256
+
+    def collapse_svn_substitutions(contents):
+        # Replace svn substitutions for $Date$ and $LastChangedDate$.
+        # Unfortunately, these are locale-specific.
+        return SVN_DATES_REGEX.sub("$\1$", contents)
+
+    def read_and_collapse_svn_subsitutions(file_path):
+        with open(file_path, "rb") as f:
+            contents = f.read()
+            new_contents = collapse_svn_substitutions(contents)
+            if contents != new_contents:
+                logging.debug("Replaced svn keyword substitutions in %s", file_path)
+                logging.debug("\n\tBefore\n%s\n\tAfter\n%s", contents, new_contents)
+            return new_contents
+
+    project_checksums = dict()
+    # Hash each project.
+    for proj in projects:
+        project_root = os.path.join(root_path, proj.relpath)
+        if not os.path.exists(project_root):
+            logging.info(
+                "Folder %s doesn't exist, skipping project %s", proj.relpath, proj.name
+            )
+            continue
+
+        files = list()
+
+        def add_file_hash(file_path):
+            if os.path.islink(file_path) and not os.path.exists(file_path):
+                content = os.readlink(file_path)
+            else:
+                content = read_and_collapse_svn_subsitutions(file_path)
+            hasher = hash_algo()
+            hasher.update(content)
+            file_digest = hasher.hexdigest()
+            logging.debug("Checksum %s for file %s", file_digest, file_path)
+            files.append((file_path, file_digest))
+
+        logging.info("Computing checksum for %s", proj.name)
+        WalkProjectFiles(root_path, projects, proj, add_file_hash)
+
+        # Compute final checksum.
+        files.sort(key=lambda x: x[0])
+        hasher = hash_algo()
+        for file_path, file_digest in files:
+            file_path = os.path.relpath(file_path, project_root)
+            hasher.update(file_path)
+            hasher.update(file_digest)
+        project_checksums[proj.name] = hasher.hexdigest()
+    return project_checksums
 
 
 def WriteLLVMChecksums(checksums, f):
-  """Writes checksums to a text file.
+    """Writes checksums to a text file.
 
-  Args:
-    checksums: a dict mapping from project name to project checksum (result of
-      ComputeLLVMChecksums).
-    f: a file object to write into.
-  """
+    Args:
+      checksums: a dict mapping from project name to project checksum (result of
+        ComputeLLVMChecksums).
+      f: a file object to write into.
+    """
 
-  for proj in sorted(checksums.keys()):
-    f.write("{} {}\n".format(checksums[proj], proj))
+    for proj in sorted(checksums.keys()):
+        f.write("{} {}\n".format(checksums[proj], proj))
 
 
 def ReadLLVMChecksums(f):
-  """Reads checksums from a text file, produced by WriteLLVMChecksums.
-
-  Returns:
-    A dict, mapping from project name to project checksum.
-  """
-  checksums = {}
-  while True:
-    line = f.readline()
-    if line == "":
-      break
-    checksum, proj = line.split()
-    checksums[proj] = checksum
-  return checksums
-
-
-def ValidateChecksums(reference_checksums,
-                      new_checksums,
-                      allow_missing_projects=False):
-  """Validates that reference_checksums and new_checksums match.
-
-  Args:
-    reference_checksums: a dict of reference checksums, mapping from a project
-      name to a project checksum.
-    new_checksums: a dict of checksums to be checked, mapping from a project
-      name to a project checksum.
-    allow_missing_projects:
-      When True, reference_checksums may contain more projects than
-        new_checksums. Projects missing from new_checksums are ignored.
-      When False, new_checksums and reference_checksums must contain checksums
-        for the same set of projects. If there is a project in
-        reference_checksums, missing from new_checksums, ValidateChecksums
-        will return False.
-
-  Returns:
-    True, if checksums match with regards to allow_missing_projects flag value.
-    False, otherwise.
-  """
-  if not allow_missing_projects:
-    if len(new_checksums) != len(reference_checksums):
-      return False
-
-  for proj, checksum in new_checksums.items():
-    # We never computed a checksum for this project.
-    if proj not in reference_checksums:
-      return False
-    # Checksum did not match.
-    if reference_checksums[proj] != checksum:
-      return False
-
-  return True
+    """Reads checksums from a text file, produced by WriteLLVMChecksums.
+
+    Returns:
+      A dict, mapping from project name to project checksum.
+    """
+    checksums = {}
+    while True:
+        line = f.readline()
+        if line == "":
+            break
+        checksum, proj = line.split()
+        checksums[proj] = checksum
+    return checksums
+
+
+def ValidateChecksums(reference_checksums, new_checksums, allow_missing_projects=False):
+    """Validates that reference_checksums and new_checksums match.
+
+    Args:
+      reference_checksums: a dict of reference checksums, mapping from a project
+        name to a project checksum.
+      new_checksums: a dict of checksums to be checked, mapping from a project
+        name to a project checksum.
+      allow_missing_projects:
+        When True, reference_checksums may contain more projects than
+          new_checksums. Projects missing from new_checksums are ignored.
+        When False, new_checksums and reference_checksums must contain checksums
+          for the same set of projects. If there is a project in
+          reference_checksums, missing from new_checksums, ValidateChecksums
+          will return False.
+
+    Returns:
+      True, if checksums match with regards to allow_missing_projects flag value.
+      False, otherwise.
+    """
+    if not allow_missing_projects:
+        if len(new_checksums) != len(reference_checksums):
+            return False
+
+    for proj, checksum in new_checksums.items():
+        # We never computed a checksum for this project.
+        if proj not in reference_checksums:
+            return False
+        # Checksum did not match.
+        if reference_checksums[proj] != checksum:
+            return False
+
+    return True
 
 
 if __name__ == "__main__":
-  main()
+    main()

diff  --git a/llvm/utils/docker/scripts/llvm_checksum/project_tree.py b/llvm/utils/docker/scripts/llvm_checksum/project_tree.py
index 17ed475872bd7..337dd449c6c3b 100644
--- a/llvm/utils/docker/scripts/llvm_checksum/project_tree.py
+++ b/llvm/utils/docker/scripts/llvm_checksum/project_tree.py
@@ -11,85 +11,89 @@
 
 
 class LLVMProject(object):
-  """An LLVM project with a descriptive name and a relative checkout path.
-  """
+    """An LLVM project with a descriptive name and a relative checkout path."""
 
-  def __init__(self, name, relpath):
-    self.name = name
-    self.relpath = relpath
+    def __init__(self, name, relpath):
+        self.name = name
+        self.relpath = relpath
 
-  def is_subproject(self, other_project):
-    """ Check if self is checked out as a subdirectory of other_project.
-    """
-    return self.relpath.startswith(other_project.relpath)
+    def is_subproject(self, other_project):
+        """Check if self is checked out as a subdirectory of other_project."""
+        return self.relpath.startswith(other_project.relpath)
 
 
 def WalkProjectFiles(checkout_root, all_projects, project, visitor):
-  """ Walk over all files inside a project without recursing into subprojects, '.git' and '.svn' subfolders.
+    """Walk over all files inside a project without recursing into subprojects, '.git' and '.svn' subfolders.
 
     checkout_root: root of the LLVM checkout.
     all_projects: projects in the LLVM checkout.
     project: a project to walk the files of. Must be inside all_projects.
     visitor: a function called on each visited file.
-  """
-  assert project in all_projects
-
-  ignored_paths = set()
-  for other_project in all_projects:
-    if other_project != project and other_project.is_subproject(project):
-      ignored_paths.add(os.path.join(checkout_root, other_project.relpath))
-
-  def raise_error(err):
-    raise err
-
-  project_root = os.path.join(checkout_root, project.relpath)
-  for root, dirs, files in os.walk(project_root, onerror=raise_error):
-    dirs[:] = [
-        d for d in dirs
-        if d != ".svn" and d != ".git" and
-        os.path.join(root, d) not in ignored_paths
-    ]
-    for f in files:
-      visitor(os.path.join(root, f))
+    """
+    assert project in all_projects
+
+    ignored_paths = set()
+    for other_project in all_projects:
+        if other_project != project and other_project.is_subproject(project):
+            ignored_paths.add(os.path.join(checkout_root, other_project.relpath))
+
+    def raise_error(err):
+        raise err
+
+    project_root = os.path.join(checkout_root, project.relpath)
+    for root, dirs, files in os.walk(project_root, onerror=raise_error):
+        dirs[:] = [
+            d
+            for d in dirs
+            if d != ".svn"
+            and d != ".git"
+            and os.path.join(root, d) not in ignored_paths
+        ]
+        for f in files:
+            visitor(os.path.join(root, f))
 
 
 def CreateLLVMProjects(single_tree_checkout):
-  """Returns a list of LLVMProject instances, describing relative paths of a typical LLVM checkout.
-
-  Args:
-    single_tree_checkout:
-      When True, relative paths for each project points to a typical single
-        source tree checkout.
-      When False, relative paths for each projects points to a separate
-        directory. However, clang-tools-extra is an exception, its relative path
-        will always be 'clang/tools/extra'.
-  """
-  # FIXME: cover all of llvm projects.
-
-  # Projects that reside inside 'projects/' in a single source tree checkout.
-  ORDINARY_PROJECTS = [
-      "compiler-rt", "dragonegg", "libcxx", "libcxxabi", "libunwind",
-      "test-suite"
-  ]
-  # Projects that reside inside 'tools/' in a single source tree checkout.
-  TOOLS_PROJECTS = ["clang", "lld", "lldb"]
-
-  if single_tree_checkout:
-    projects = [LLVMProject("llvm", "")]
-    projects += [
-        LLVMProject(p, os.path.join("projects", p)) for p in ORDINARY_PROJECTS
-    ]
-    projects += [
-        LLVMProject(p, os.path.join("tools", p)) for p in TOOLS_PROJECTS
+    """Returns a list of LLVMProject instances, describing relative paths of a typical LLVM checkout.
+
+    Args:
+      single_tree_checkout:
+        When True, relative paths for each project points to a typical single
+          source tree checkout.
+        When False, relative paths for each projects points to a separate
+          directory. However, clang-tools-extra is an exception, its relative path
+          will always be 'clang/tools/extra'.
+    """
+    # FIXME: cover all of llvm projects.
+
+    # Projects that reside inside 'projects/' in a single source tree checkout.
+    ORDINARY_PROJECTS = [
+        "compiler-rt",
+        "dragonegg",
+        "libcxx",
+        "libcxxabi",
+        "libunwind",
+        "test-suite",
     ]
-    projects.append(
-        LLVMProject("clang-tools-extra",
-                    os.path.join("tools", "clang", "tools", "extra")))
-  else:
-    projects = [LLVMProject("llvm", "llvm")]
-    projects += [LLVMProject(p, p) for p in ORDINARY_PROJECTS]
-    projects += [LLVMProject(p, p) for p in TOOLS_PROJECTS]
-    projects.append(
-        LLVMProject("clang-tools-extra", os.path.join("clang", "tools",
-                                                      "extra")))
-  return projects
+    # Projects that reside inside 'tools/' in a single source tree checkout.
+    TOOLS_PROJECTS = ["clang", "lld", "lldb"]
+
+    if single_tree_checkout:
+        projects = [LLVMProject("llvm", "")]
+        projects += [
+            LLVMProject(p, os.path.join("projects", p)) for p in ORDINARY_PROJECTS
+        ]
+        projects += [LLVMProject(p, os.path.join("tools", p)) for p in TOOLS_PROJECTS]
+        projects.append(
+            LLVMProject(
+                "clang-tools-extra", os.path.join("tools", "clang", "tools", "extra")
+            )
+        )
+    else:
+        projects = [LLVMProject("llvm", "llvm")]
+        projects += [LLVMProject(p, p) for p in ORDINARY_PROJECTS]
+        projects += [LLVMProject(p, p) for p in TOOLS_PROJECTS]
+        projects.append(
+            LLVMProject("clang-tools-extra", os.path.join("clang", "tools", "extra"))
+        )
+    return projects

diff  --git a/llvm/utils/extract-section.py b/llvm/utils/extract-section.py
index c8838b4900163..bb4e252155372 100755
--- a/llvm/utils/extract-section.py
+++ b/llvm/utils/extract-section.py
@@ -1,6 +1,7 @@
 #!/usr/bin/env python
 from __future__ import print_function
-'''
+
+"""
 Helper script to print out the raw content of an ELF section.
 Example usages:
 ```
@@ -13,87 +14,133 @@
 ```
 This is merely a wrapper around `llvm-readobj` that focuses on the binary
 content as well as providing more formatting options.
-'''
+"""
 
 # Unfortunately reading binary from stdin is not so trivial in Python...
 def read_raw_stdin():
     import sys
+
     if sys.version_info >= (3, 0):
         reading_source = sys.stdin.buffer
     else:
         # Windows will always read as string so we need some
         # special handling
-        if sys.platform == 'win32':
+        if sys.platform == "win32":
             import os, msvcrt
+
             msvcrt.setformat(sys.stdin.fileno(), os.O_BINARY)
         reading_source = sys.stdin
     return reading_source.read()
 
+
 def get_raw_section_dump(readobj_path, section_name, input_file):
     import subprocess
-    cmd = [readobj_path, '--elf-output-style=GNU', '--hex-dump={}'.format(section_name),
-            input_file]
+
+    cmd = [
+        readobj_path,
+        "--elf-output-style=GNU",
+        "--hex-dump={}".format(section_name),
+        input_file,
+    ]
     proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
 
-    if input_file == '-':
+    if input_file == "-":
         # From stdin
-        out,_ = proc.communicate(input=read_raw_stdin())
+        out, _ = proc.communicate(input=read_raw_stdin())
     else:
-        out,_ = proc.communicate()
+        out, _ = proc.communicate()
+
+    return out.decode("utf-8") if type(out) is not str else out
 
-    return out.decode('utf-8') if type(out) is not str else out
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     import argparse
+
     # The default '-h' (--help) will conflict with our '-h' (hex) format
     arg_parser = argparse.ArgumentParser(add_help=False)
-    arg_parser.add_argument('--readobj-path', metavar='<executable path>', type=str,
-            help='Path to llvm-readobj')
-    arg_parser.add_argument('--input-file', metavar='<file>', type=str,
-            help='Input object file, or \'-\' to read from stdin')
-    arg_parser.add_argument('section', metavar='<name>', type=str,
-            help='Name of the section to extract')
+    arg_parser.add_argument(
+        "--readobj-path",
+        metavar="<executable path>",
+        type=str,
+        help="Path to llvm-readobj",
+    )
+    arg_parser.add_argument(
+        "--input-file",
+        metavar="<file>",
+        type=str,
+        help="Input object file, or '-' to read from stdin",
+    )
+    arg_parser.add_argument(
+        "section", metavar="<name>", type=str, help="Name of the section to extract"
+    )
     # Output format
     format_group = arg_parser.add_mutually_exclusive_group()
-    format_group.add_argument('-b', dest='format', action='store_const', const='bits',
-            help='Print out in bits')
-    arg_parser.add_argument('--byte-indicator', action='store_true',
-            help='Whether to print a \'.\' every 8 bits in bits printing mode')
-    arg_parser.add_argument('--bits-endian', metavar='<little/big>', type=str,
-            choices=['little', 'big'],
-            help='Print out bits in specified endianness (little or big); defaults to big')
-    format_group.add_argument('-h', dest='format', action='store_const', const='hex',
-            help='Print out in hexadecimal')
-    arg_parser.add_argument('--hex-width', metavar='<# of bytes>', type=int,
-            help='The width (in byte) of every element in hex printing mode')
+    format_group.add_argument(
+        "-b",
+        dest="format",
+        action="store_const",
+        const="bits",
+        help="Print out in bits",
+    )
+    arg_parser.add_argument(
+        "--byte-indicator",
+        action="store_true",
+        help="Whether to print a '.' every 8 bits in bits printing mode",
+    )
+    arg_parser.add_argument(
+        "--bits-endian",
+        metavar="<little/big>",
+        type=str,
+        choices=["little", "big"],
+        help="Print out bits in specified endianness (little or big); defaults to big",
+    )
+    format_group.add_argument(
+        "-h",
+        dest="format",
+        action="store_const",
+        const="hex",
+        help="Print out in hexadecimal",
+    )
+    arg_parser.add_argument(
+        "--hex-width",
+        metavar="<# of bytes>",
+        type=int,
+        help="The width (in byte) of every element in hex printing mode",
+    )
 
-    arg_parser.add_argument('--help', action='help')
-    arg_parser.set_defaults(format='bits', tool_path='llvm-readobj', input_file='-',
-            byte_indicator=False, hex_width=4, bits_endian='big')
+    arg_parser.add_argument("--help", action="help")
+    arg_parser.set_defaults(
+        format="bits",
+        tool_path="llvm-readobj",
+        input_file="-",
+        byte_indicator=False,
+        hex_width=4,
+        bits_endian="big",
+    )
     args = arg_parser.parse_args()
 
     raw_section = get_raw_section_dump(args.tool_path, args.section, args.input_file)
 
     results = []
     for line in raw_section.splitlines(False):
-        if line.startswith('Hex dump'):
+        if line.startswith("Hex dump"):
             continue
-        parts = line.strip().split(' ')[1:]
+        parts = line.strip().split(" ")[1:]
         for part in parts[:4]:
             # exclude any non-hex dump string
             try:
                 val = int(part, 16)
-                if args.format == 'bits':
+                if args.format == "bits":
                     # divided into bytes first
                     offsets = (24, 16, 8, 0)
-                    if args.bits_endian == 'little':
+                    if args.bits_endian == "little":
                         offsets = (0, 8, 16, 24)
                     for byte in [(val >> off) & 0xFF for off in offsets]:
                         for bit in [(byte >> off) & 1 for off in range(7, -1, -1)]:
                             results.append(str(bit))
                         if args.byte_indicator:
-                            results.append('.')
-                elif args.format == 'hex':
+                            results.append(".")
+                elif args.format == "hex":
                     assert args.hex_width <= 4 and args.hex_width > 0
                     width_bits = args.hex_width * 8
                     offsets = [off for off in range(32 - width_bits, -1, -width_bits)]
@@ -103,4 +150,4 @@ def get_raw_section_dump(readobj_path, section_name, input_file):
                         results.append(format_str.format(word))
             except:
                 break
-    print(' '.join(results), end='')
+    print(" ".join(results), end="")

diff  --git a/llvm/utils/extract_symbols.py b/llvm/utils/extract_symbols.py
index a2eabd3ab4f4c..9238828d7ce85 100755
--- a/llvm/utils/extract_symbols.py
+++ b/llvm/utils/extract_symbols.py
@@ -36,10 +36,14 @@ def nm_get_symbols(tool, lib):
     #   that llvm-nm do not demangle by default, but the system nm on AIX does
     #   that, so the behavior may change in the future,
     # '-p' do not waste time sorting the symbols.
-    cmd = [tool,'-P','-g','-Xany','--no-demangle','-p']
-    process = subprocess.Popen(cmd+[lib], bufsize=1,
-                               stdout=subprocess.PIPE, stdin=subprocess.PIPE,
-                               universal_newlines=True)
+    cmd = [tool, "-P", "-g", "-Xany", "--no-demangle", "-p"]
+    process = subprocess.Popen(
+        cmd + [lib],
+        bufsize=1,
+        stdout=subprocess.PIPE,
+        stdin=subprocess.PIPE,
+        universal_newlines=True,
+    )
     process.stdin.close()
     for line in process.stdout:
         # Look for external symbols that are defined in some section
@@ -58,26 +62,29 @@ def nm_get_symbols(tool, lib):
             yield (match.group(1), False)
     process.wait()
 
+
 # Define a function which determines if the target is 32-bit Windows (as that's
 # where calling convention name decoration happens).
 def readobj_is_32bit_windows(tool, lib):
-    output = subprocess.check_output([tool,'--file-header',lib],
-                                     universal_newlines=True)
+    output = subprocess.check_output(
+        [tool, "--file-header", lib], universal_newlines=True
+    )
     for line in output.splitlines():
-        match = re.match('Format: (\S+)', line)
+        match = re.match("Format: (\S+)", line)
         if match:
-            return (match.group(1) == 'COFF-i386')
+            return match.group(1) == "COFF-i386"
     return False
 
+
 # MSVC mangles names to ?<identifier_mangling>@<type_mangling>. By examining the
 # identifier/type mangling we can decide which symbols could possibly be
 # required and which we can discard.
 def should_keep_microsoft_symbol(symbol, calling_convention_decoration):
     # Keep unmangled (i.e. extern "C") names
-    if not '?' in symbol:
+    if not "?" in symbol:
         if calling_convention_decoration:
             # Remove calling convention decoration from names
-            match = re.match('[_@]([^@]+)', symbol)
+            match = re.match("[_@]([^@]+)", symbol)
             if match:
                 symbol = match.group(1)
         # Discard floating point/SIMD constants.
@@ -87,15 +94,15 @@ def should_keep_microsoft_symbol(symbol, calling_convention_decoration):
     # Deleting destructors start with ?_G or ?_E and can be discarded because
     # link.exe gives you a warning telling you they can't be exported if you
     # don't
-    elif symbol.startswith('??_G') or symbol.startswith('??_E'):
+    elif symbol.startswith("??_G") or symbol.startswith("??_E"):
         return None
     # An anonymous namespace is mangled as ?A(maybe hex number)@. Any symbol
     # that mentions an anonymous namespace can be discarded, as the anonymous
     # namespace doesn't exist outside of that translation unit.
-    elif re.search('\?A(0x\w+)?@', symbol):
+    elif re.search("\?A(0x\w+)?@", symbol):
         return None
     # Skip X86GenMnemonicTables functions, they are not exposed from llvm/include/.
-    elif re.match('\?is[A-Z0-9]*@X86 at llvm', symbol):
+    elif re.match("\?is[A-Z0-9]*@X86 at llvm", symbol):
         return None
     # Keep mangled llvm:: and clang:: function symbols. How we detect these is a
     # bit of a mess and imprecise, but that avoids having to completely demangle
@@ -115,23 +122,24 @@ def should_keep_microsoft_symbol(symbol, calling_convention_decoration):
     #                 ::= .+@ (list of types)
     #                 ::= .*Z (list of types, varargs)
     # <throw-spec> ::= exceptions are not allowed
-    elif re.search('(llvm|clang)@@[A-Z][A-Z0-9_]*[A-JQ].+(X|.+@|.*Z)$', symbol):
+    elif re.search("(llvm|clang)@@[A-Z][A-Z0-9_]*[A-JQ].+(X|.+@|.*Z)$", symbol):
         return symbol
     return None
 
+
 # Itanium manglings are of the form _Z<identifier_mangling><type_mangling>. We
 # demangle the identifier mangling to identify symbols that can be safely
 # discarded.
 def should_keep_itanium_symbol(symbol, calling_convention_decoration):
     # Start by removing any calling convention decoration (which we expect to
     # see on all symbols, even mangled C++ symbols)
-    if calling_convention_decoration and symbol.startswith('_'):
+    if calling_convention_decoration and symbol.startswith("_"):
         symbol = symbol[1:]
     # Keep unmangled names
-    if not symbol.startswith('_') and not symbol.startswith('.'):
+    if not symbol.startswith("_") and not symbol.startswith("."):
         return symbol
     # Discard manglings that aren't nested names
-    match = re.match('_Z(T[VTIS])?(N.+)', symbol)
+    match = re.match("_Z(T[VTIS])?(N.+)", symbol)
     if not match:
         return None
     # Demangle the name. If the name is too complex then we don't need to keep
@@ -143,89 +151,93 @@ def should_keep_itanium_symbol(symbol, calling_convention_decoration):
     if not names:
         return symbol
     # Keep llvm:: and clang:: names
-    elif names[0][0] == '4llvm' or names[0][0] == '5clang':
+    elif names[0][0] == "4llvm" or names[0][0] == "5clang":
         return symbol
     # Discard everything else
     else:
         return None
 
+
 # Certain kinds of complex manglings we assume cannot be part of a public
 # interface, and we handle them by raising an exception.
 class TooComplexName(Exception):
     pass
 
+
 # Parse an itanium mangled name from the start of a string and return a
 # (name, rest of string) pair.
 def parse_itanium_name(arg):
     # Check for a normal name
-    match = re.match('(\d+)(.+)', arg)
+    match = re.match("(\d+)(.+)", arg)
     if match:
         n = int(match.group(1))
-        name = match.group(1)+match.group(2)[:n]
+        name = match.group(1) + match.group(2)[:n]
         rest = match.group(2)[n:]
         return name, rest
     # Check for constructor/destructor names
-    match = re.match('([CD][123])(.+)', arg)
+    match = re.match("([CD][123])(.+)", arg)
     if match:
         return match.group(1), match.group(2)
     # Assume that a sequence of characters that doesn't end a nesting is an
     # operator (this is very imprecise, but appears to be good enough)
-    match = re.match('([^E]+)(.+)', arg)
+    match = re.match("([^E]+)(.+)", arg)
     if match:
         return match.group(1), match.group(2)
     # Anything else: we can't handle it
     return None, arg
 
+
 # Parse an itanium mangled template argument list from the start of a string
 # and throw it away, returning the rest of the string.
 def skip_itanium_template(arg):
     # A template argument list starts with I
-    assert arg.startswith('I'), arg
+    assert arg.startswith("I"), arg
     tmp = arg[1:]
     while tmp:
         # Check for names
-        match = re.match('(\d+)(.+)', tmp)
+        match = re.match("(\d+)(.+)", tmp)
         if match:
             n = int(match.group(1))
-            tmp =  match.group(2)[n:]
+            tmp = match.group(2)[n:]
             continue
         # Check for substitutions
-        match = re.match('S[A-Z0-9]*_(.+)', tmp)
+        match = re.match("S[A-Z0-9]*_(.+)", tmp)
         if match:
             tmp = match.group(1)
         # Start of a template
-        elif tmp.startswith('I'):
+        elif tmp.startswith("I"):
             tmp = skip_itanium_template(tmp)
         # Start of a nested name
-        elif tmp.startswith('N'):
+        elif tmp.startswith("N"):
             _, tmp = parse_itanium_nested_name(tmp)
         # Start of an expression: assume that it's too complicated
-        elif tmp.startswith('L') or tmp.startswith('X'):
+        elif tmp.startswith("L") or tmp.startswith("X"):
             raise TooComplexName
         # End of the template
-        elif tmp.startswith('E'):
+        elif tmp.startswith("E"):
             return tmp[1:]
         # Something else: probably a type, skip it
         else:
             tmp = tmp[1:]
     return None
 
+
 # Parse an itanium mangled nested name and transform it into a list of pairs of
 # (name, is_template), returning (list, rest of string).
 def parse_itanium_nested_name(arg):
     # A nested name starts with N
-    assert arg.startswith('N'), arg
+    assert arg.startswith("N"), arg
     ret = []
 
     # Skip past the N, and possibly a substitution
-    match = re.match('NS[A-Z0-9]*_(.+)', arg)
+    match = re.match("NS[A-Z0-9]*_(.+)", arg)
     if match:
         tmp = match.group(1)
     else:
         tmp = arg[1:]
 
     # Skip past CV-qualifiers and ref qualifiers
-    match = re.match('[rVKRO]*(.+)', tmp);
+    match = re.match("[rVKRO]*(.+)", tmp)
     if match:
         tmp = match.group(1)
 
@@ -233,7 +245,7 @@ def parse_itanium_nested_name(arg):
     # nested name
     while tmp:
         # An E ends the nested name
-        if tmp.startswith('E'):
+        if tmp.startswith("E"):
             return ret, tmp[1:]
         # Parse a name
         name_part, tmp = parse_itanium_name(tmp)
@@ -243,7 +255,7 @@ def parse_itanium_nested_name(arg):
         is_template = False
         # If this name is a template record that, then skip the template
         # arguments
-        if tmp.startswith('I'):
+        if tmp.startswith("I"):
             tmp = skip_itanium_template(tmp)
             is_template = True
         # Add the name to the list
@@ -252,33 +264,34 @@ def parse_itanium_nested_name(arg):
     # If we get here then something went wrong
     return None, None
 
+
 # Parse a microsoft mangled symbol and return a list of pairs of
 # (name, is_template). This is very rudimentary and does just enough
 # in order to determine if the first or second component is a template.
 def parse_microsoft_mangling(arg):
     # If the name doesn't start with ? this isn't a mangled name
-    if not arg.startswith('?'):
+    if not arg.startswith("?"):
         return [(arg, False)]
     arg = arg[1:]
     components = []
     while len(arg) > 0:
         # If we see an empty component we've reached the end
-        if arg.startswith('@'):
+        if arg.startswith("@"):
             return components
         # Check for a simple name
-        match = re.match('(\w+)@(.+)', arg)
+        match = re.match("(\w+)@(.+)", arg)
         if match:
             components.append((match.group(1), False))
             arg = match.group(2)
             continue
         # Check for a special function name
-        match = re.match('(\?_?\w)(.+)', arg)
+        match = re.match("(\?_?\w)(.+)", arg)
         if match:
             components.append((match.group(1), False))
             arg = match.group(2)
             continue
         # Check for a template name
-        match = re.match('\?\$(\w+)@[^@]+@(.+)', arg)
+        match = re.match("\?\$(\w+)@[^@]+@(.+)", arg)
         if match:
             components.append((match.group(1), True))
             arg = match.group(2)
@@ -288,6 +301,7 @@ def parse_microsoft_mangling(arg):
         return components
     return components
 
+
 def extract_symbols(arg):
     llvm_nm_path, should_keep_symbol, calling_convention_decoration, lib = arg
     symbol_defs = dict()
@@ -296,18 +310,19 @@ def extract_symbols(arg):
         symbol = should_keep_symbol(symbol, calling_convention_decoration)
         if symbol:
             if is_def:
-                symbol_defs[symbol] = 1 + symbol_defs.setdefault(symbol,0)
+                symbol_defs[symbol] = 1 + symbol_defs.setdefault(symbol, 0)
             else:
                 symbol_refs.add(symbol)
     return (symbol_defs, symbol_refs)
 
+
 def get_template_name(sym, mangling):
     # Parse the mangling into a list of (name, is_template)
     try:
-        if mangling == 'microsoft':
+        if mangling == "microsoft":
             names = parse_microsoft_mangling(sym)
         else:
-            match = re.match('_Z(T[VTIS])?(N.+)', sym)
+            match = re.match("_Z(T[VTIS])?(N.+)", sym)
             if match:
                 names, _ = parse_itanium_nested_name(match.group(2))
             else:
@@ -326,41 +341,62 @@ def get_template_name(sym, mangling):
     # Not a template
     return None
 
+
 def parse_tool_path(parser, tool, val):
     try:
         # Close std streams as we don't want any output and we don't
         # want the process to wait for something on stdin.
-        p = subprocess.Popen([val], stdout=subprocess.PIPE,
-                                stderr=subprocess.PIPE,
-                                stdin=subprocess.PIPE,
-                                universal_newlines=True)
+        p = subprocess.Popen(
+            [val],
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            stdin=subprocess.PIPE,
+            universal_newlines=True,
+        )
         p.stdout.close()
         p.stderr.close()
         p.stdin.close()
         p.wait()
         return val
     except Exception:
-        parser.error(f'Invalid path for {tool}')
+        parser.error(f"Invalid path for {tool}")
+
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     parser = argparse.ArgumentParser(
-        description='Extract symbols to export from libraries')
-    parser.add_argument('--mangling', choices=['itanium','microsoft'],
-                        required=True, help='expected symbol mangling scheme')
-    parser.add_argument('--nm', metavar='path',
-                        type=lambda x: parse_tool_path(parser, 'nm', x),
-                        help='path to the llvm-nm executable')
-    parser.add_argument('--readobj', metavar='path',
-                        type=lambda x: parse_tool_path(parser, 'readobj', x),
-                        help='path to the llvm-readobj executable')
-    parser.add_argument('libs', metavar='lib', type=str, nargs='+',
-                        help='libraries to extract symbols from')
-    parser.add_argument('-o', metavar='file', type=str, help='output to file')
+        description="Extract symbols to export from libraries"
+    )
+    parser.add_argument(
+        "--mangling",
+        choices=["itanium", "microsoft"],
+        required=True,
+        help="expected symbol mangling scheme",
+    )
+    parser.add_argument(
+        "--nm",
+        metavar="path",
+        type=lambda x: parse_tool_path(parser, "nm", x),
+        help="path to the llvm-nm executable",
+    )
+    parser.add_argument(
+        "--readobj",
+        metavar="path",
+        type=lambda x: parse_tool_path(parser, "readobj", x),
+        help="path to the llvm-readobj executable",
+    )
+    parser.add_argument(
+        "libs",
+        metavar="lib",
+        type=str,
+        nargs="+",
+        help="libraries to extract symbols from",
+    )
+    parser.add_argument("-o", metavar="file", type=str, help="output to file")
     args = parser.parse_args()
 
     # How we determine which symbols to keep and which to discard depends on
     # the mangling scheme
-    if args.mangling == 'microsoft':
+    if args.mangling == "microsoft":
         should_keep_symbol = should_keep_microsoft_symbol
     else:
         should_keep_symbol = should_keep_itanium_symbol
@@ -371,17 +407,17 @@ def parse_tool_path(parser, tool, val):
         # When invoked by cmake the arguments are the cmake target names of the
         # libraries, so we need to add .lib/.a to the end and maybe lib to the
         # start to get the filename. Also allow objects.
-        suffixes = ['.lib','.a','.obj','.o']
+        suffixes = [".lib", ".a", ".obj", ".o"]
         if not any([lib.endswith(s) for s in suffixes]):
             for s in suffixes:
-                if os.path.exists(lib+s):
-                    lib = lib+s
+                if os.path.exists(lib + s):
+                    lib = lib + s
                     break
-                if os.path.exists('lib'+lib+s):
-                    lib = 'lib'+lib+s
+                if os.path.exists("lib" + lib + s):
+                    lib = "lib" + lib + s
                     break
         if not any([lib.endswith(s) for s in suffixes]):
-            print("Don't know what to do with argument "+lib, file=sys.stderr)
+            print("Don't know what to do with argument " + lib, file=sys.stderr)
             exit(1)
         libs.append(lib)
 
@@ -398,7 +434,10 @@ def parse_tool_path(parser, tool, val):
         # use a lambda or local function definition as that doesn't work on
         # windows, so create a list of tuples which duplicates the arguments
         # that are the same in all calls.
-        vals = [(args.nm, should_keep_symbol, calling_convention_decoration, x) for x in libs]
+        vals = [
+            (args.nm, should_keep_symbol, calling_convention_decoration, x)
+            for x in libs
+        ]
         # Do an async map then wait for the result to make sure that
         # KeyboardInterrupt gets caught correctly (see
         # http://bugs.python.org/issue8296)
@@ -415,8 +454,8 @@ def parse_tool_path(parser, tool, val):
     symbol_defs = dict()
     symbol_refs = set()
     for (this_lib_defs, this_lib_refs) in libs_symbols:
-        for k,v in list(this_lib_defs.items()):
-            symbol_defs[k] = v + symbol_defs.setdefault(k,0)
+        for k, v in list(this_lib_defs.items()):
+            symbol_defs[k] = v + symbol_defs.setdefault(k, 0)
         for sym in list(this_lib_refs):
             symbol_refs.add(sym)
 
@@ -434,10 +473,10 @@ def parse_tool_path(parser, tool, val):
     #    is because we need to export any explicitly instantiated templates,
     #    and we expect those to be referenced in some object.
     if args.o:
-        outfile = open(args.o,'w')
+        outfile = open(args.o, "w")
     else:
         outfile = sys.stdout
-    for k,v in list(symbol_defs.items()):
+    for k, v in list(symbol_defs.items()):
         template = get_template_name(k, args.mangling)
         if v == 1 and (not template or template in template_instantiation_refs):
             print(k, file=outfile)

diff  --git a/llvm/utils/extract_vplan.py b/llvm/utils/extract_vplan.py
index a6f217b85176c..cff6f5074d771 100755
--- a/llvm/utils/extract_vplan.py
+++ b/llvm/utils/extract_vplan.py
@@ -13,36 +13,38 @@
 import subprocess
 
 parser = argparse.ArgumentParser()
-parser.add_argument('--png', action='store_true')
+parser.add_argument("--png", action="store_true")
 args = parser.parse_args()
 
-dot = shutil.which('dot')
+dot = shutil.which("dot")
 if args.png and not dot:
     raise RuntimeError("Can't export to PNG without 'dot' in the system")
 
-pattern = re.compile(r"(digraph VPlan {.*?\n})",re.DOTALL)
+pattern = re.compile(r"(digraph VPlan {.*?\n})", re.DOTALL)
 matches = re.findall(pattern, sys.stdin.read())
 
 for vplan in matches:
     m = re.search("graph \[.+(VF=.+,UF.+)", vplan)
     if not m:
         raise ValueError("Can't get the right VPlan name")
-    name = re.sub('[^a-zA-Z0-9]', '', m.group(1))
+    name = re.sub("[^a-zA-Z0-9]", "", m.group(1))
 
     if args.png:
-        filename = 'VPlan' + name + '.png'
+        filename = "VPlan" + name + ".png"
         print("Exporting " + name + " to PNG via dot: " + filename)
-        p = subprocess.Popen([dot, '-Tpng', '-o', filename],
-                              encoding='utf-8',
-                              stdin=subprocess.PIPE,
-                              stdout=subprocess.PIPE,
-                              stderr=subprocess.PIPE)
+        p = subprocess.Popen(
+            [dot, "-Tpng", "-o", filename],
+            encoding="utf-8",
+            stdin=subprocess.PIPE,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+        )
         out, err = p.communicate(input=vplan)
         if err:
             raise RuntimeError("Error running dot: " + err)
 
     else:
-        filename = 'VPlan' + name + '.dot'
+        filename = "VPlan" + name + ".dot"
         print("Exporting " + name + " to DOT: " + filename)
-        with open(filename, 'w') as out:
+        with open(filename, "w") as out:
             out.write(vplan)

diff  --git a/llvm/utils/filecheck_lint/filecheck_lint.py b/llvm/utils/filecheck_lint/filecheck_lint.py
index cae4b3396b3d2..dc054ab76a098 100644
--- a/llvm/utils/filecheck_lint/filecheck_lint.py
+++ b/llvm/utils/filecheck_lint/filecheck_lint.py
@@ -34,146 +34,158 @@
 from typing import Generator, Sequence, Tuple
 
 _distance_threshold = 3
-_prefixes = {'CHECK'}
-_suffixes = {'-DAG', '-COUNT', '-EMPTY', '-LABEL', '-NEXT', '-NOT', '-SAME'}
+_prefixes = {"CHECK"}
+_suffixes = {"-DAG", "-COUNT", "-EMPTY", "-LABEL", "-NEXT", "-NOT", "-SAME"}
 # 'NOTE' and 'TODO' are not directives, but are likely to be false positives
 # if encountered and to generate noise as a result. We filter them out also to
 # avoid this.
 _lit_directives = {
-    'RUN',
-    'REQUIRES',
-    'UNSUPPORTED',
-    'XFAIL',
-    'DEFINE',
-    'REDEFINE',
+    "RUN",
+    "REQUIRES",
+    "UNSUPPORTED",
+    "XFAIL",
+    "DEFINE",
+    "REDEFINE",
 }
 # 'COM' and 'RUN' are default comment prefixes for FileCheck.
-_comment_prefixes = {'COM', 'RUN'}
-_ignore = _lit_directives.union(_comment_prefixes).union({'NOTE', 'TODO'})
+_comment_prefixes = {"COM", "RUN"}
+_ignore = _lit_directives.union(_comment_prefixes).union({"NOTE", "TODO"})
 
 
 def levenshtein(s1: str, s2: str) -> int:  # pylint: disable=g-doc-args
-  """Computes the edit distance between two strings.
-
-  Additions, deletions, and substitutions all count as a single operation.
-  """
-  if not s1:
-    return len(s2)
-  if not s2:
-    return len(s1)
-
-  distances = range(len(s2) + 1)
-  for i in range(len(s1)):
-    new_distances = [i + 1]
-    for j in range(len(s2)):
-      cost = min(distances[j] + int(s1[i] != s2[j]), distances[j + 1] + 1,
-                 new_distances[-1] + 1)
-      new_distances.append(cost)
-    distances = new_distances
-  return distances[-1]
+    """Computes the edit distance between two strings.
+
+    Additions, deletions, and substitutions all count as a single operation.
+    """
+    if not s1:
+        return len(s2)
+    if not s2:
+        return len(s1)
+
+    distances = range(len(s2) + 1)
+    for i in range(len(s1)):
+        new_distances = [i + 1]
+        for j in range(len(s2)):
+            cost = min(
+                distances[j] + int(s1[i] != s2[j]),
+                distances[j + 1] + 1,
+                new_distances[-1] + 1,
+            )
+            new_distances.append(cost)
+        distances = new_distances
+    return distances[-1]
 
 
 class FileRange:
-  """Stores the coordinates of a span on a single line within a file.
+    """Stores the coordinates of a span on a single line within a file.
+
+    Attributes:
+      line:         the line number
+      start_column: the (inclusive) column where the span starts
+      end_column:   the (inclusive) column where the span ends
+    """
 
-  Attributes:
-    line:         the line number
-    start_column: the (inclusive) column where the span starts
-    end_column:   the (inclusive) column where the span ends
-  """
-  line: int
-  start_column: int
-  end_column: int
+    line: int
+    start_column: int
+    end_column: int
 
-  def __init__(self, content: str, start_byte: int, end_byte: int):  # pylint: disable=g-doc-args
-    """Derives a span's coordinates based on a string and start/end bytes.
+    def __init__(
+        self, content: str, start_byte: int, end_byte: int
+    ):  # pylint: disable=g-doc-args
+        """Derives a span's coordinates based on a string and start/end bytes.
 
-    `start_byte` and `end_byte` are assumed to be on the same line.
-    """
-    content_before_span = content[:start_byte]
-    self.line = content_before_span.count('\n') + 1
-    self.start_column = start_byte - content_before_span.rfind('\n')
-    self.end_column = self.start_column + (end_byte - start_byte - 1)
+        `start_byte` and `end_byte` are assumed to be on the same line.
+        """
+        content_before_span = content[:start_byte]
+        self.line = content_before_span.count("\n") + 1
+        self.start_column = start_byte - content_before_span.rfind("\n")
+        self.end_column = self.start_column + (end_byte - start_byte - 1)
 
-  def __str__(self) -> str:
-    return f'{self.line}:{self.start_column}-{self.end_column}'
+    def __str__(self) -> str:
+        return f"{self.line}:{self.start_column}-{self.end_column}"
 
 
 class Diagnostic:
-  """Stores information about one typo and a suggested fix.
-
-  Attributes:
-    filepath:   the path to the file in which the typo was found
-    filerange:  the position at which the typo was found in the file
-    typo:       the typo
-    fix:        a suggested fix
-  """
-
-  filepath: pathlib.Path
-  filerange: FileRange
-  typo: str
-  fix: str
-
-  def __init__(
-      self,
-      filepath: pathlib.Path,
-      filerange: FileRange,
-      typo: str,
-      fix: str  # pylint: disable=redefined-outer-name
-  ):
-    self.filepath = filepath
-    self.filerange = filerange
-    self.typo = typo
-    self.fix = fix
-
-  def __str__(self) -> str:
-    return f'{self.filepath}:' + str(self.filerange) + f': {self.summary()}'
-
-  def summary(self) -> str:
-    return (
-        f'Found potentially misspelled directive "{self.typo}". Did you mean '
-        f'"{self.fix}"?')
+    """Stores information about one typo and a suggested fix.
+
+    Attributes:
+      filepath:   the path to the file in which the typo was found
+      filerange:  the position at which the typo was found in the file
+      typo:       the typo
+      fix:        a suggested fix
+    """
+
+    filepath: pathlib.Path
+    filerange: FileRange
+    typo: str
+    fix: str
+
+    def __init__(
+        self,
+        filepath: pathlib.Path,
+        filerange: FileRange,
+        typo: str,
+        fix: str,  # pylint: disable=redefined-outer-name
+    ):
+        self.filepath = filepath
+        self.filerange = filerange
+        self.typo = typo
+        self.fix = fix
+
+    def __str__(self) -> str:
+        return f"{self.filepath}:" + str(self.filerange) + f": {self.summary()}"
+
+    def summary(self) -> str:
+        return (
+            f'Found potentially misspelled directive "{self.typo}". Did you mean '
+            f'"{self.fix}"?'
+        )
 
 
 def find_potential_directives(
-    content: str,) -> Generator[Tuple[FileRange, str], None, None]:
-  """Extracts all the potential FileCheck directives from a string.
+    content: str,
+) -> Generator[Tuple[FileRange, str], None, None]:
+    """Extracts all the potential FileCheck directives from a string.
 
-  What constitutes a potential directive is loosely defined---we err on the side
-  of capturing more strings than is necessary, rather than missing any.
+    What constitutes a potential directive is loosely defined---we err on the side
+    of capturing more strings than is necessary, rather than missing any.
 
-  Args:
-    content: the string in which to look for directives
+    Args:
+      content: the string in which to look for directives
 
-  Yields:
-    Tuples (p, d) where p is the span where the potential directive occurs
-    within the string and d is the potential directive.
-  """
-  directive_pattern = re.compile(
-      r'(?:^|//|;|#)[^\d\w\-_]*([\d\w\-_][\s\d\w\-_]*):', re.MULTILINE)
-  for match in re.finditer(directive_pattern, content):
-    potential_directive, span = match.group(1), match.span(1)
-    yield (FileRange(content, span[0], span[1]), potential_directive)
+    Yields:
+      Tuples (p, d) where p is the span where the potential directive occurs
+      within the string and d is the potential directive.
+    """
+    directive_pattern = re.compile(
+        r"(?:^|//|;|#)[^\d\w\-_]*([\d\w\-_][\s\d\w\-_]*):", re.MULTILINE
+    )
+    for match in re.finditer(directive_pattern, content):
+        potential_directive, span = match.group(1), match.span(1)
+        yield (FileRange(content, span[0], span[1]), potential_directive)
 
 
 # TODO(bchetioui): also parse comment prefixes to ignore.
-def parse_custom_prefixes(content: str) -> Generator[str, None, None]:  # pylint: disable=g-doc-args
-  """Parses custom prefixes defined in the string provided.
+def parse_custom_prefixes(
+    content: str,
+) -> Generator[str, None, None]:  # pylint: disable=g-doc-args
+    """Parses custom prefixes defined in the string provided.
 
-  For example, given the following file content:
-    RUN: something | FileCheck %s -check-prefixes CHECK1,CHECK2
-    RUN: something_else | FileCheck %s -check-prefix 'CHECK3'
+    For example, given the following file content:
+      RUN: something | FileCheck %s -check-prefixes CHECK1,CHECK2
+      RUN: something_else | FileCheck %s -check-prefix 'CHECK3'
 
-  the custom prefixes are CHECK1, CHECK2, and CHECK3.
-  """
-  param_re = r'|'.join([r"'[^']*'", r'"[^"]*"', r'[^\'"\s]+'])
-  for m in re.finditer(r'-check-prefix(?:es)?(?:\s+|=)({})'.format(param_re),
-                       content):
-    prefixes = m.group(1)
-    if prefixes.startswith('\'') or prefixes.startswith('"'):
-      prefixes = prefixes[1:-1]
-    for prefix in prefixes.split(','):
-      yield prefix
+    the custom prefixes are CHECK1, CHECK2, and CHECK3.
+    """
+    param_re = r"|".join([r"'[^']*'", r'"[^"]*"', r'[^\'"\s]+'])
+    for m in re.finditer(
+        r"-check-prefix(?:es)?(?:\s+|=)({})".format(param_re), content
+    ):
+        prefixes = m.group(1)
+        if prefixes.startswith("'") or prefixes.startswith('"'):
+            prefixes = prefixes[1:-1]
+        for prefix in prefixes.split(","):
+            yield prefix
 
 
 def find_directive_typos(
@@ -181,71 +193,79 @@ def find_directive_typos(
     filepath: pathlib.Path,
     threshold: int = 3,
 ) -> Generator[Diagnostic, None, None]:
-  """Detects potential typos in FileCheck directives.
-
-  Args:
-    content: the content of the file
-    filepath: the path to the file to check for typos in directives
-    threshold: the (inclusive) maximum edit distance between a potential
-      directive and an actual directive, such that the potential directive is
-      classified as a typo
-
-  Yields:
-    Diagnostics, in order from the top of the file.
-  """
-  all_prefixes = _prefixes.union(set(parse_custom_prefixes(content)))
-  all_directives = ([
-      f'{prefix}{suffix}'
-      for prefix, suffix in itertools.product(all_prefixes, _suffixes)
-  ] + list(_ignore) + list(all_prefixes))
-
-  def find_best_match(typo):
-    return min(
-        [(threshold + 1, typo)] + [(levenshtein(typo, d), d)
-                                   for d in all_directives
-                                   if abs(len(d) - len(typo)) <= threshold],
-        key=lambda tup: tup[0],
-    )
+    """Detects potential typos in FileCheck directives.
 
-  potential_directives = find_potential_directives(content)
+    Args:
+      content: the content of the file
+      filepath: the path to the file to check for typos in directives
+      threshold: the (inclusive) maximum edit distance between a potential
+        directive and an actual directive, such that the potential directive is
+        classified as a typo
 
-  for filerange, potential_directive in potential_directives:
-    # TODO(bchetioui): match count directives more finely. We skip directives
-    # starting with 'CHECK-COUNT-' for the moment as they require more complex
-    # logic to be handled correctly.
-    if any(
-        potential_directive.startswith(f'{prefix}-COUNT-')
-        for prefix in all_prefixes):
-      continue
-
-    # Ignoring potential typos that will not be matched later due to a too low
-    # threshold, in order to avoid potentially long computation times.
-    if len(potential_directive) > max(map(len, all_directives)) + threshold:
-      continue
+    Yields:
+      Diagnostics, in order from the top of the file.
+    """
+    all_prefixes = _prefixes.union(set(parse_custom_prefixes(content)))
+    all_directives = (
+        [
+            f"{prefix}{suffix}"
+            for prefix, suffix in itertools.product(all_prefixes, _suffixes)
+        ]
+        + list(_ignore)
+        + list(all_prefixes)
+    )
 
-    score, best_match = find_best_match(potential_directive)
-    if score == 0:  # This is an actual directive, ignore.
-      continue
-    elif score <= threshold and best_match not in _ignore:
-      yield Diagnostic(filepath, filerange, potential_directive, best_match)
+    def find_best_match(typo):
+        return min(
+            [(threshold + 1, typo)]
+            + [
+                (levenshtein(typo, d), d)
+                for d in all_directives
+                if abs(len(d) - len(typo)) <= threshold
+            ],
+            key=lambda tup: tup[0],
+        )
+
+    potential_directives = find_potential_directives(content)
+
+    for filerange, potential_directive in potential_directives:
+        # TODO(bchetioui): match count directives more finely. We skip directives
+        # starting with 'CHECK-COUNT-' for the moment as they require more complex
+        # logic to be handled correctly.
+        if any(
+            potential_directive.startswith(f"{prefix}-COUNT-")
+            for prefix in all_prefixes
+        ):
+            continue
+
+        # Ignoring potential typos that will not be matched later due to a too low
+        # threshold, in order to avoid potentially long computation times.
+        if len(potential_directive) > max(map(len, all_directives)) + threshold:
+            continue
+
+        score, best_match = find_best_match(potential_directive)
+        if score == 0:  # This is an actual directive, ignore.
+            continue
+        elif score <= threshold and best_match not in _ignore:
+            yield Diagnostic(filepath, filerange, potential_directive, best_match)
 
 
 def main(argv: Sequence[str]):
-  if len(argv) < 2:
-    print(f'Usage: {argv[0]} path/to/file/1 ... path/to/file/n')
-    exit(1)
-
-  for filepath in argv[1:]:
-    logging.info('Checking %s', filepath)
-    with open(filepath, 'rt') as f:
-      content = f.read()
-    for diagnostic in find_directive_typos(
-        content,
-        pathlib.Path(filepath),
-        threshold=_distance_threshold,
-    ):
-      print(diagnostic)
-
-
-if __name__ == '__main__':
-  main(sys.argv)
+    if len(argv) < 2:
+        print(f"Usage: {argv[0]} path/to/file/1 ... path/to/file/n")
+        exit(1)
+
+    for filepath in argv[1:]:
+        logging.info("Checking %s", filepath)
+        with open(filepath, "rt") as f:
+            content = f.read()
+        for diagnostic in find_directive_typos(
+            content,
+            pathlib.Path(filepath),
+            threshold=_distance_threshold,
+        ):
+            print(diagnostic)
+
+
+if __name__ == "__main__":
+    main(sys.argv)

diff  --git a/llvm/utils/filecheck_lint/filecheck_lint_test.py b/llvm/utils/filecheck_lint/filecheck_lint_test.py
index ddb2c0768c4f8..16f381d5b0455 100644
--- a/llvm/utils/filecheck_lint/filecheck_lint_test.py
+++ b/llvm/utils/filecheck_lint/filecheck_lint_test.py
@@ -11,68 +11,72 @@
 
 
 class TestParser(unittest.TestCase):
+    def test_parse_all_additional_prefixes(self):
+        def run(content, expected_prefixes):
+            prefixes = set(fcl.parse_custom_prefixes(content))
+            for prefix in expected_prefixes:
+                self.assertIn(prefix, prefixes)
 
-  def test_parse_all_additional_prefixes(self):
-
-    def run(content, expected_prefixes):
-      prefixes = set(fcl.parse_custom_prefixes(content))
-      for prefix in expected_prefixes:
-        self.assertIn(prefix, prefixes)
-
-    for content, expected_prefixes in [
-        ('-check-prefix=PREFIX', {'PREFIX'}),
-        ('-check-prefix=\'PREFIX\'', {'PREFIX'}),
-        ('-check-prefix="PREFIX"', {'PREFIX'}),
-        ('-check-prefix PREFIX', {'PREFIX'}),
-        ('-check-prefix      PREFIX', {'PREFIX'}),
-        ('-check-prefixes=PREFIX1,PREFIX2', {'PREFIX1', 'PREFIX2'}),
-        ('-check-prefixes PREFIX1,PREFIX2', {'PREFIX1', 'PREFIX2'}),
-        (
-            """-check-prefix=PREFIX1 -check-prefix PREFIX2
+        for content, expected_prefixes in [
+            ("-check-prefix=PREFIX", {"PREFIX"}),
+            ("-check-prefix='PREFIX'", {"PREFIX"}),
+            ('-check-prefix="PREFIX"', {"PREFIX"}),
+            ("-check-prefix PREFIX", {"PREFIX"}),
+            ("-check-prefix      PREFIX", {"PREFIX"}),
+            ("-check-prefixes=PREFIX1,PREFIX2", {"PREFIX1", "PREFIX2"}),
+            ("-check-prefixes PREFIX1,PREFIX2", {"PREFIX1", "PREFIX2"}),
+            (
+                """-check-prefix=PREFIX1 -check-prefix PREFIX2
             -check-prefixes=PREFIX3,PREFIX4 -check-prefix=PREFIX5
             -check-prefixes PREFIX6,PREFIX7 -check-prefixes=PREFIX8',
          """,  # pylint: disable=bad-continuation
-            {f'PREFIX{i}' for i in range(1, 9)}),
-    ]:
-      run(content, expected_prefixes)
+                {f"PREFIX{i}" for i in range(1, 9)},
+            ),
+        ]:
+            run(content, expected_prefixes)
 
-  def test_additional_prefixes_uniquely(self):
-    lines = ['--check-prefix=SOME-PREFIX', '--check-prefix=SOME-PREFIX']
-    prefixes = set(fcl.parse_custom_prefixes('\n'.join(lines)))
-    assert len(prefixes) == 1
+    def test_additional_prefixes_uniquely(self):
+        lines = ["--check-prefix=SOME-PREFIX", "--check-prefix=SOME-PREFIX"]
+        prefixes = set(fcl.parse_custom_prefixes("\n".join(lines)))
+        assert len(prefixes) == 1
 
 
 class TestTypoDetection(unittest.TestCase):
+    def test_find_potential_directives_comment_prefix(self):
+        lines = ["junk; CHCK1:", "junk// CHCK2:", "SOME CHCK3:"]
+        content = "\n".join(lines)
 
-  def test_find_potential_directives_comment_prefix(self):
-    lines = ['junk; CHCK1:', 'junk// CHCK2:', 'SOME CHCK3:']
-    content = '\n'.join(lines)
-
-    results = list(fcl.find_potential_directives(content))
-    assert len(results) == 3
-    pos, match = results[0]
-    assert (pos.line == 1 and
-            pos.start_column == len('junk; ') + 1 and
-            pos.end_column == len(lines[0]) - 1)
-    assert match == 'CHCK1'
+        results = list(fcl.find_potential_directives(content))
+        assert len(results) == 3
+        pos, match = results[0]
+        assert (
+            pos.line == 1
+            and pos.start_column == len("junk; ") + 1
+            and pos.end_column == len(lines[0]) - 1
+        )
+        assert match == "CHCK1"
 
-    pos, match = results[1]
-    assert (pos.line == 2 and
-            pos.start_column == len('junk// ') + 1 and
-            pos.end_column == len(lines[1]) - 1)
-    assert match == 'CHCK2'
+        pos, match = results[1]
+        assert (
+            pos.line == 2
+            and pos.start_column == len("junk// ") + 1
+            and pos.end_column == len(lines[1]) - 1
+        )
+        assert match == "CHCK2"
 
-    pos, match = results[2]
-    assert (pos.line == 3 and
-            pos.start_column == 1 and
-            pos.end_column == len(lines[2]) - 1)
-    assert match == 'SOME CHCK3'
+        pos, match = results[2]
+        assert (
+            pos.line == 3
+            and pos.start_column == 1
+            and pos.end_column == len(lines[2]) - 1
+        )
+        assert match == "SOME CHCK3"
 
-  def test_levenshtein(self):
-    for s1, s2, distance in [
-        ('Levenshtein', 'Levenstin', 2),  # 2 insertions
-        ('Levenshtein', 'Levenstherin', 3),  # 1 insertion, 2 deletions
-        ('Levenshtein', 'Lenvinshtein', 2),  # 1 deletion, 1 substitution
-        ('Levenshtein', 'Levenshtein', 0),  # identical strings
-    ]:
-      assert fcl.levenshtein(s1, s2) == distance
+    def test_levenshtein(self):
+        for s1, s2, distance in [
+            ("Levenshtein", "Levenstin", 2),  # 2 insertions
+            ("Levenshtein", "Levenstherin", 3),  # 1 insertion, 2 deletions
+            ("Levenshtein", "Lenvinshtein", 2),  # 1 deletion, 1 substitution
+            ("Levenshtein", "Levenshtein", 0),  # identical strings
+        ]:
+            assert fcl.levenshtein(s1, s2) == distance

diff  --git a/llvm/utils/gdb-scripts/prettyprinters.py b/llvm/utils/gdb-scripts/prettyprinters.py
index 1fdf4fe781c74..1016467fc0974 100644
--- a/llvm/utils/gdb-scripts/prettyprinters.py
+++ b/llvm/utils/gdb-scripts/prettyprinters.py
@@ -5,495 +5,550 @@
 import gdb.printing
 import gdb.types
 
+
 class Iterator:
-  def __iter__(self):
-    return self
+    def __iter__(self):
+        return self
+
+    if sys.version_info.major == 2:
 
-  if sys.version_info.major == 2:
-      def next(self):
-        return self.__next__()
+        def next(self):
+            return self.__next__()
+
+    def children(self):
+        return self
 
-  def children(self):
-    return self
 
 class SmallStringPrinter:
-  """Print an llvm::SmallString object."""
+    """Print an llvm::SmallString object."""
+
+    def __init__(self, val):
+        self.val = val
 
-  def __init__(self, val):
-    self.val = val
+    def to_string(self):
+        data = self.val["BeginX"].cast(gdb.lookup_type("char").pointer())
+        length = self.val["Size"]
+        return data.lazy_string(length=length)
 
-  def to_string(self):
-    data = self.val['BeginX'].cast(gdb.lookup_type('char').pointer())
-    length = self.val['Size']
-    return data.lazy_string(length=length)
+    def display_hint(self):
+        return "string"
 
-  def display_hint (self):
-    return 'string'
 
 class StringRefPrinter:
-  """Print an llvm::StringRef object."""
+    """Print an llvm::StringRef object."""
 
-  def __init__(self, val):
-    self.val = val
+    def __init__(self, val):
+        self.val = val
 
-  def to_string(self):
-    data = self.val['Data']
-    length = self.val['Length']
-    return data.lazy_string(length=length)
+    def to_string(self):
+        data = self.val["Data"]
+        length = self.val["Length"]
+        return data.lazy_string(length=length)
+
+    def display_hint(self):
+        return "string"
 
-  def display_hint(self):
-    return 'string'
 
 class SmallVectorPrinter(Iterator):
-  """Print an llvm::SmallVector object."""
+    """Print an llvm::SmallVector object."""
+
+    def __init__(self, val):
+        self.val = val
+        t = val.type.template_argument(0).pointer()
+        self.begin = val["BeginX"].cast(t)
+        self.size = val["Size"]
+        self.i = 0
 
-  def __init__(self, val):
-    self.val = val
-    t = val.type.template_argument(0).pointer()
-    self.begin = val['BeginX'].cast(t)
-    self.size = val['Size']
-    self.i = 0
+    def __next__(self):
+        if self.i == self.size:
+            raise StopIteration
+        ret = "[{}]".format(self.i), (self.begin + self.i).dereference()
+        self.i += 1
+        return ret
 
-  def __next__(self):
-    if self.i == self.size:
-      raise StopIteration
-    ret = '[{}]'.format(self.i), (self.begin+self.i).dereference()
-    self.i += 1
-    return ret
+    def to_string(self):
+        return "llvm::SmallVector of Size {}, Capacity {}".format(
+            self.size, self.val["Capacity"]
+        )
 
-  def to_string(self):
-    return 'llvm::SmallVector of Size {}, Capacity {}'.format(self.size, self.val['Capacity'])
+    def display_hint(self):
+        return "array"
 
-  def display_hint (self):
-    return 'array'
 
 class ArrayRefPrinter:
-  """Print an llvm::ArrayRef object."""
+    """Print an llvm::ArrayRef object."""
 
-  class _iterator:
-    def __init__(self, begin, end):
-      self.cur = begin
-      self.end = end
-      self.count = 0
+    class _iterator:
+        def __init__(self, begin, end):
+            self.cur = begin
+            self.end = end
+            self.count = 0
 
-    def __iter__(self):
-      return self
+        def __iter__(self):
+            return self
 
-    def __next__(self):
-      if self.cur == self.end:
-        raise StopIteration
-      count = self.count
-      self.count = self.count + 1
-      cur = self.cur
-      self.cur = self.cur + 1
-      return '[%d]' % count, cur.dereference()
+        def __next__(self):
+            if self.cur == self.end:
+                raise StopIteration
+            count = self.count
+            self.count = self.count + 1
+            cur = self.cur
+            self.cur = self.cur + 1
+            return "[%d]" % count, cur.dereference()
 
-    if sys.version_info.major == 2:
-        next = __next__
+        if sys.version_info.major == 2:
+            next = __next__
 
-  def __init__(self, val):
-    self.val = val
+    def __init__(self, val):
+        self.val = val
 
-  def children(self):
-    data = self.val['Data']
-    return self._iterator(data, data + self.val['Length'])
+    def children(self):
+        data = self.val["Data"]
+        return self._iterator(data, data + self.val["Length"])
 
-  def to_string(self):
-    return 'llvm::ArrayRef of length %d' % (self.val['Length'])
+    def to_string(self):
+        return "llvm::ArrayRef of length %d" % (self.val["Length"])
+
+    def display_hint(self):
+        return "array"
 
-  def display_hint (self):
-    return 'array'
 
 class ExpectedPrinter(Iterator):
-  """Print an llvm::Expected object."""
+    """Print an llvm::Expected object."""
 
-  def __init__(self, val):
-    self.val = val
+    def __init__(self, val):
+        self.val = val
 
-  def __next__(self):
-    val = self.val
-    if val is None:
-      raise StopIteration
-    self.val = None
-    if val['HasError']:
-      return ('error', val['ErrorStorage'].address.cast(
-          gdb.lookup_type('llvm::ErrorInfoBase').pointer()).dereference())
-    return ('value', val['TStorage'].address.cast(
-        val.type.template_argument(0).pointer()).dereference())
+    def __next__(self):
+        val = self.val
+        if val is None:
+            raise StopIteration
+        self.val = None
+        if val["HasError"]:
+            return (
+                "error",
+                val["ErrorStorage"]
+                .address.cast(gdb.lookup_type("llvm::ErrorInfoBase").pointer())
+                .dereference(),
+            )
+        return (
+            "value",
+            val["TStorage"]
+            .address.cast(val.type.template_argument(0).pointer())
+            .dereference(),
+        )
+
+    def to_string(self):
+        return "llvm::Expected{}".format(" is error" if self.val["HasError"] else "")
 
-  def to_string(self):
-    return 'llvm::Expected{}'.format(' is error' if self.val['HasError'] else '')
 
 class OptionalPrinter(Iterator):
-  """Print an llvm::Optional object."""
-
-  def __init__(self, val):
-    self.val = val
-
-  def __next__(self):
-    val = self.val
-    if val is None:
-      raise StopIteration
-    self.val = None
-    if not val['Storage']['hasVal']:
-      raise StopIteration
-    return ('value', val['Storage']['val'])
+    """Print an llvm::Optional object."""
 
-  def to_string(self):
-    return 'llvm::Optional{}'.format('' if self.val['Storage']['hasVal'] else ' is not initialized')
-
-class DenseMapPrinter:
-  "Print a DenseMap"
-
-  class _iterator:
-    def __init__(self, key_info_t, begin, end):
-      self.key_info_t = key_info_t
-      self.cur = begin
-      self.end = end
-      self.advancePastEmptyBuckets()
-      self.first = True
-
-    def __iter__(self):
-      return self
-
-    def advancePastEmptyBuckets(self):
-      # disabled until the comments below can be addressed
-      # keeping as notes/posterity/hints for future contributors
-      return
-      n = self.key_info_t.name
-      is_equal = gdb.parse_and_eval(n + '::isEqual')
-      empty = gdb.parse_and_eval(n + '::getEmptyKey()')
-      tombstone = gdb.parse_and_eval(n + '::getTombstoneKey()')
-      # the following is invalid, GDB fails with:
-      #   Python Exception <class 'gdb.error'> Attempt to take address of value
-      #   not located in memory.
-      # because isEqual took parameter (for the unsigned long key I was testing)
-      # by const ref, and GDB
-      # It's also not entirely general - we should be accessing the "getFirst()"
-      # member function, not the 'first' member variable, but I've yet to figure
-      # out how to find/call member functions (especially (const) overloaded
-      # ones) on a gdb.Value.
-      while self.cur != self.end and (is_equal(self.cur.dereference()['first'], empty) or is_equal(self.cur.dereference()['first'], tombstone)):
-        self.cur = self.cur + 1
+    def __init__(self, val):
+        self.val = val
 
     def __next__(self):
-      if self.cur == self.end:
-        raise StopIteration
-      cur = self.cur
-      v = cur.dereference()['first' if self.first else 'second']
-      if not self.first:
-        self.cur = self.cur + 1
-        self.advancePastEmptyBuckets()
-        self.first = True
-      else:
-        self.first = False
-      return 'x', v
-
-    if sys.version_info.major == 2:
-        next = __next__
+        val = self.val
+        if val is None:
+            raise StopIteration
+        self.val = None
+        if not val["Storage"]["hasVal"]:
+            raise StopIteration
+        return ("value", val["Storage"]["val"])
 
-  def __init__(self, val):
-    self.val = val
+    def to_string(self):
+        return "llvm::Optional{}".format(
+            "" if self.val["Storage"]["hasVal"] else " is not initialized"
+        )
 
-  def children(self):
-    t = self.val.type.template_argument(3).pointer()
-    begin = self.val['Buckets'].cast(t)
-    end = (begin + self.val['NumBuckets']).cast(t)
-    return self._iterator(self.val.type.template_argument(2), begin, end)
 
-  def to_string(self):
-    return 'llvm::DenseMap with %d elements' % (self.val['NumEntries'])
+class DenseMapPrinter:
+    "Print a DenseMap"
+
+    class _iterator:
+        def __init__(self, key_info_t, begin, end):
+            self.key_info_t = key_info_t
+            self.cur = begin
+            self.end = end
+            self.advancePastEmptyBuckets()
+            self.first = True
+
+        def __iter__(self):
+            return self
+
+        def advancePastEmptyBuckets(self):
+            # disabled until the comments below can be addressed
+            # keeping as notes/posterity/hints for future contributors
+            return
+            n = self.key_info_t.name
+            is_equal = gdb.parse_and_eval(n + "::isEqual")
+            empty = gdb.parse_and_eval(n + "::getEmptyKey()")
+            tombstone = gdb.parse_and_eval(n + "::getTombstoneKey()")
+            # the following is invalid, GDB fails with:
+            #   Python Exception <class 'gdb.error'> Attempt to take address of value
+            #   not located in memory.
+            # because isEqual took parameter (for the unsigned long key I was testing)
+            # by const ref, and GDB
+            # It's also not entirely general - we should be accessing the "getFirst()"
+            # member function, not the 'first' member variable, but I've yet to figure
+            # out how to find/call member functions (especially (const) overloaded
+            # ones) on a gdb.Value.
+            while self.cur != self.end and (
+                is_equal(self.cur.dereference()["first"], empty)
+                or is_equal(self.cur.dereference()["first"], tombstone)
+            ):
+                self.cur = self.cur + 1
+
+        def __next__(self):
+            if self.cur == self.end:
+                raise StopIteration
+            cur = self.cur
+            v = cur.dereference()["first" if self.first else "second"]
+            if not self.first:
+                self.cur = self.cur + 1
+                self.advancePastEmptyBuckets()
+                self.first = True
+            else:
+                self.first = False
+            return "x", v
+
+        if sys.version_info.major == 2:
+            next = __next__
+
+    def __init__(self, val):
+        self.val = val
+
+    def children(self):
+        t = self.val.type.template_argument(3).pointer()
+        begin = self.val["Buckets"].cast(t)
+        end = (begin + self.val["NumBuckets"]).cast(t)
+        return self._iterator(self.val.type.template_argument(2), begin, end)
+
+    def to_string(self):
+        return "llvm::DenseMap with %d elements" % (self.val["NumEntries"])
+
+    def display_hint(self):
+        return "map"
 
-  def display_hint(self):
-    return 'map'
 
 class StringMapPrinter:
-  "Print a StringMap"
+    "Print a StringMap"
 
-  def __init__(self, val):
-    self.val = val
+    def __init__(self, val):
+        self.val = val
 
-  def children(self):
-    it = self.val['TheTable']
-    end = (it + self.val['NumBuckets'])
-    value_ty = self.val.type.template_argument(0)
-    entry_base_ty = gdb.lookup_type('llvm::StringMapEntryBase')
-    tombstone = gdb.parse_and_eval('llvm::StringMapImpl::TombstoneIntVal');
+    def children(self):
+        it = self.val["TheTable"]
+        end = it + self.val["NumBuckets"]
+        value_ty = self.val.type.template_argument(0)
+        entry_base_ty = gdb.lookup_type("llvm::StringMapEntryBase")
+        tombstone = gdb.parse_and_eval("llvm::StringMapImpl::TombstoneIntVal")
 
-    while it != end:
-      it_deref = it.dereference()
-      if it_deref == 0 or it_deref == tombstone:
-        it = it + 1
-        continue
+        while it != end:
+            it_deref = it.dereference()
+            if it_deref == 0 or it_deref == tombstone:
+                it = it + 1
+                continue
 
-      entry_ptr = it_deref.cast(entry_base_ty.pointer())
-      entry = entry_ptr.dereference()
+            entry_ptr = it_deref.cast(entry_base_ty.pointer())
+            entry = entry_ptr.dereference()
 
-      str_len = entry['keyLength']
-      value_ptr = (entry_ptr + 1).cast(value_ty.pointer())
-      str_data = (entry_ptr + 1).cast(gdb.lookup_type('uintptr_t')) + max(value_ty.sizeof, entry_base_ty.alignof)
-      str_data = str_data.cast(gdb.lookup_type('char').const().pointer())
-      string_ref = gdb.Value(struct.pack('PN', int(str_data), int(str_len)), gdb.lookup_type('llvm::StringRef'))
-      yield 'key', string_ref
+            str_len = entry["keyLength"]
+            value_ptr = (entry_ptr + 1).cast(value_ty.pointer())
+            str_data = (entry_ptr + 1).cast(gdb.lookup_type("uintptr_t")) + max(
+                value_ty.sizeof, entry_base_ty.alignof
+            )
+            str_data = str_data.cast(gdb.lookup_type("char").const().pointer())
+            string_ref = gdb.Value(
+                struct.pack("PN", int(str_data), int(str_len)),
+                gdb.lookup_type("llvm::StringRef"),
+            )
+            yield "key", string_ref
 
-      value = value_ptr.dereference()
-      yield 'value', value
+            value = value_ptr.dereference()
+            yield "value", value
 
-      it = it + 1
+            it = it + 1
 
-  def to_string(self):
-    return 'llvm::StringMap with %d elements' % (self.val['NumItems'])
+    def to_string(self):
+        return "llvm::StringMap with %d elements" % (self.val["NumItems"])
+
+    def display_hint(self):
+        return "map"
 
-  def display_hint(self):
-    return 'map'
 
 class TwinePrinter:
-  "Print a Twine"
+    "Print a Twine"
+
+    def __init__(self, val):
+        self._val = val
 
-  def __init__(self, val):
-    self._val = val
+    def display_hint(self):
+        return "string"
 
-  def display_hint(self):
-    return 'string'
+    def string_from_pretty_printer_lookup(self, val):
+        """Lookup the default pretty-printer for val and use it.
 
-  def string_from_pretty_printer_lookup(self, val):
-    '''Lookup the default pretty-printer for val and use it.
+        If no pretty-printer is defined for the type of val, print an error and
+        return a placeholder string."""
 
-    If no pretty-printer is defined for the type of val, print an error and
-    return a placeholder string.'''
+        pp = gdb.default_visualizer(val)
+        if pp:
+            s = pp.to_string()
 
-    pp = gdb.default_visualizer(val)
-    if pp:
-      s = pp.to_string()
+            # The pretty-printer may return a LazyString instead of an actual Python
+            # string.  Convert it to a Python string.  However, GDB doesn't seem to
+            # register the LazyString type, so we can't check
+            # "type(s) == gdb.LazyString".
+            if "LazyString" in type(s).__name__:
+                s = s.value().string()
 
-      # The pretty-printer may return a LazyString instead of an actual Python
-      # string.  Convert it to a Python string.  However, GDB doesn't seem to
-      # register the LazyString type, so we can't check
-      # "type(s) == gdb.LazyString".
-      if 'LazyString' in type(s).__name__:
-        s = s.value().string()
+        else:
+            print(
+                (
+                    "No pretty printer for {} found. The resulting Twine "
+                    + "representation will be incomplete."
+                ).format(val.type.name)
+            )
+            s = "(missing {})".format(val.type.name)
 
-    else:
-      print(('No pretty printer for {} found. The resulting Twine ' +
-             'representation will be incomplete.').format(val.type.name))
-      s = '(missing {})'.format(val.type.name)
+        return s
 
-    return s
+    def is_twine_kind(self, kind, expected):
+        if not kind.endswith(expected):
+            return False
+        # apparently some GDB versions add the NodeKind:: namespace
+        # (happens for me on GDB 7.11)
+        return kind in (
+            "llvm::Twine::" + expected,
+            "llvm::Twine::NodeKind::" + expected,
+        )
 
-  def is_twine_kind(self, kind, expected):
-    if not kind.endswith(expected):
-      return False
-    # apparently some GDB versions add the NodeKind:: namespace
-    # (happens for me on GDB 7.11)
-    return kind in ('llvm::Twine::' + expected,
-                    'llvm::Twine::NodeKind::' + expected)
+    def string_from_child(self, child, kind):
+        """Return the string representation of the Twine::Child child."""
 
-  def string_from_child(self, child, kind):
-    '''Return the string representation of the Twine::Child child.'''
+        if self.is_twine_kind(kind, "EmptyKind") or self.is_twine_kind(
+            kind, "NullKind"
+        ):
+            return ""
 
-    if self.is_twine_kind(kind, 'EmptyKind') or self.is_twine_kind(kind, 'NullKind'):
-      return ''
+        if self.is_twine_kind(kind, "TwineKind"):
+            return self.string_from_twine_object(child["twine"].dereference())
 
-    if self.is_twine_kind(kind, 'TwineKind'):
-      return self.string_from_twine_object(child['twine'].dereference())
+        if self.is_twine_kind(kind, "CStringKind"):
+            return child["cString"].string()
 
-    if self.is_twine_kind(kind, 'CStringKind'):
-      return child['cString'].string()
+        if self.is_twine_kind(kind, "StdStringKind"):
+            val = child["stdString"].dereference()
+            return self.string_from_pretty_printer_lookup(val)
 
-    if self.is_twine_kind(kind, 'StdStringKind'):
-      val = child['stdString'].dereference()
-      return self.string_from_pretty_printer_lookup(val)
+        if self.is_twine_kind(kind, "PtrAndLengthKind"):
+            val = child["ptrAndLength"]
+            data = val["ptr"]
+            length = val["length"]
+            return data.string(length=length)
 
-    if self.is_twine_kind(kind, 'PtrAndLengthKind'):
-      val = child['ptrAndLength']
-      data = val['ptr']
-      length = val['length']
-      return data.string(length=length)
+        if self.is_twine_kind(kind, "CharKind"):
+            return chr(child["character"])
 
-    if self.is_twine_kind(kind, 'CharKind'):
-      return chr(child['character'])
+        if self.is_twine_kind(kind, "DecUIKind"):
+            return str(child["decUI"])
 
-    if self.is_twine_kind(kind, 'DecUIKind'):
-      return str(child['decUI'])
+        if self.is_twine_kind(kind, "DecIKind"):
+            return str(child["decI"])
 
-    if self.is_twine_kind(kind, 'DecIKind'):
-      return str(child['decI'])
+        if self.is_twine_kind(kind, "DecULKind"):
+            return str(child["decUL"].dereference())
 
-    if self.is_twine_kind(kind, 'DecULKind'):
-      return str(child['decUL'].dereference())
+        if self.is_twine_kind(kind, "DecLKind"):
+            return str(child["decL"].dereference())
 
-    if self.is_twine_kind(kind, 'DecLKind'):
-      return str(child['decL'].dereference())
+        if self.is_twine_kind(kind, "DecULLKind"):
+            return str(child["decULL"].dereference())
 
-    if self.is_twine_kind(kind, 'DecULLKind'):
-      return str(child['decULL'].dereference())
+        if self.is_twine_kind(kind, "DecLLKind"):
+            return str(child["decLL"].dereference())
 
-    if self.is_twine_kind(kind, 'DecLLKind'):
-      return str(child['decLL'].dereference())
+        if self.is_twine_kind(kind, "UHexKind"):
+            val = child["uHex"].dereference()
+            return hex(int(val))
 
-    if self.is_twine_kind(kind, 'UHexKind'):
-      val = child['uHex'].dereference()
-      return hex(int(val))
+        print(
+            (
+                "Unhandled NodeKind {} in Twine pretty-printer. The result will be "
+                "incomplete."
+            ).format(kind)
+        )
 
-    print(('Unhandled NodeKind {} in Twine pretty-printer. The result will be '
-           'incomplete.').format(kind))
+        return "(unhandled {})".format(kind)
 
-    return '(unhandled {})'.format(kind)
+    def string_from_twine_object(self, twine):
+        """Return the string representation of the Twine object twine."""
 
-  def string_from_twine_object(self, twine):
-    '''Return the string representation of the Twine object twine.'''
+        lhs = twine["LHS"]
+        rhs = twine["RHS"]
 
-    lhs = twine['LHS']
-    rhs = twine['RHS']
+        lhs_kind = str(twine["LHSKind"])
+        rhs_kind = str(twine["RHSKind"])
 
-    lhs_kind = str(twine['LHSKind'])
-    rhs_kind = str(twine['RHSKind'])
+        lhs_str = self.string_from_child(lhs, lhs_kind)
+        rhs_str = self.string_from_child(rhs, rhs_kind)
 
-    lhs_str = self.string_from_child(lhs, lhs_kind)
-    rhs_str = self.string_from_child(rhs, rhs_kind)
+        return lhs_str + rhs_str
 
-    return lhs_str + rhs_str
+    def to_string(self):
+        return self.string_from_twine_object(self._val)
 
-  def to_string(self):
-    return self.string_from_twine_object(self._val)
+    def display_hint(self):
+        return "string"
 
-  def display_hint(self):
-    return 'string'
 
 def get_pointer_int_pair(val):
-  """Get tuple from llvm::PointerIntPair."""
-  info_name = val.type.template_argument(4).strip_typedefs().name
-  # Note: this throws a gdb.error if the info type is not used (by means of a
-  # call to getPointer() or similar) in the current translation unit.
-  enum_type = gdb.lookup_type(info_name + '::MaskAndShiftConstants')
-  enum_dict = gdb.types.make_enum_dict(enum_type)
-  ptr_mask = enum_dict[info_name + '::PointerBitMask']
-  int_shift = enum_dict[info_name + '::IntShift']
-  int_mask = enum_dict[info_name + '::IntMask']
-  pair_union = val['Value']
-  pointer = (pair_union & ptr_mask)
-  value = ((pair_union >> int_shift) & int_mask)
-  return (pointer, value)
+    """Get tuple from llvm::PointerIntPair."""
+    info_name = val.type.template_argument(4).strip_typedefs().name
+    # Note: this throws a gdb.error if the info type is not used (by means of a
+    # call to getPointer() or similar) in the current translation unit.
+    enum_type = gdb.lookup_type(info_name + "::MaskAndShiftConstants")
+    enum_dict = gdb.types.make_enum_dict(enum_type)
+    ptr_mask = enum_dict[info_name + "::PointerBitMask"]
+    int_shift = enum_dict[info_name + "::IntShift"]
+    int_mask = enum_dict[info_name + "::IntMask"]
+    pair_union = val["Value"]
+    pointer = pair_union & ptr_mask
+    value = (pair_union >> int_shift) & int_mask
+    return (pointer, value)
+
 
 class PointerIntPairPrinter:
-  """Print a PointerIntPair."""
+    """Print a PointerIntPair."""
+
+    def __init__(self, pointer, value):
+        self.pointer = pointer
+        self.value = value
 
-  def __init__(self, pointer, value):
-    self.pointer = pointer
-    self.value = value
+    def children(self):
+        yield ("pointer", self.pointer)
+        yield ("value", self.value)
 
-  def children(self):
-    yield ('pointer', self.pointer)
-    yield ('value', self.value)
+    def to_string(self):
+        return "(%s, %s)" % (self.pointer.type, self.value.type)
 
-  def to_string(self):
-    return '(%s, %s)' % (self.pointer.type, self.value.type)
 
 def make_pointer_int_pair_printer(val):
-  """Factory for an llvm::PointerIntPair printer."""
-  try:
-    pointer, value = get_pointer_int_pair(val)
-  except gdb.error:
-    return None  # If PointerIntPair cannot be analyzed, print as raw value.
-  pointer_type = val.type.template_argument(0)
-  value_type = val.type.template_argument(2)
-  return PointerIntPairPrinter(pointer.cast(pointer_type),
-                               value.cast(value_type))
+    """Factory for an llvm::PointerIntPair printer."""
+    try:
+        pointer, value = get_pointer_int_pair(val)
+    except gdb.error:
+        return None  # If PointerIntPair cannot be analyzed, print as raw value.
+    pointer_type = val.type.template_argument(0)
+    value_type = val.type.template_argument(2)
+    return PointerIntPairPrinter(pointer.cast(pointer_type), value.cast(value_type))
+
 
 class PointerUnionPrinter:
-  """Print a PointerUnion."""
+    """Print a PointerUnion."""
+
+    def __init__(self, pointer):
+        self.pointer = pointer
 
-  def __init__(self, pointer):
-    self.pointer = pointer
+    def children(self):
+        yield ("pointer", self.pointer)
 
-  def children(self):
-    yield ('pointer', self.pointer)
+    def to_string(self):
+        return "Containing %s" % self.pointer.type
 
-  def to_string(self):
-    return "Containing %s" % self.pointer.type
 
 def make_pointer_union_printer(val):
-  """Factory for an llvm::PointerUnion printer."""
-  try:
-    pointer, value = get_pointer_int_pair(val['Val'])
-  except gdb.error:
-    return None  # If PointerIntPair cannot be analyzed, print as raw value.
-  pointer_type = val.type.template_argument(int(value))
-  return PointerUnionPrinter(pointer.cast(pointer_type))
+    """Factory for an llvm::PointerUnion printer."""
+    try:
+        pointer, value = get_pointer_int_pair(val["Val"])
+    except gdb.error:
+        return None  # If PointerIntPair cannot be analyzed, print as raw value.
+    pointer_type = val.type.template_argument(int(value))
+    return PointerUnionPrinter(pointer.cast(pointer_type))
+
 
 class IlistNodePrinter:
-  """Print an llvm::ilist_node object."""
-
-  def __init__(self, val):
-    impl_type = val.type.fields()[0].type
-    base_type = impl_type.fields()[0].type
-    derived_type = val.type.template_argument(0)
-
-    def get_prev_and_sentinel(base):
-      # One of Prev and PrevAndSentinel exists. Depending on #defines used to
-      # compile LLVM, the base_type's template argument is either true of false.
-      if base_type.template_argument(0):
-        return get_pointer_int_pair(base['PrevAndSentinel'])
-      return base['Prev'], None
-
-    # Casts a base_type pointer to the appropriate derived type.
-    def cast_pointer(pointer):
-      sentinel = get_prev_and_sentinel(pointer.dereference())[1]
-      pointer = pointer.cast(impl_type.pointer())
-      if sentinel:
-          return pointer
-      return pointer.cast(derived_type.pointer())
-
-    # Repeated cast becaue val.type's base_type is ambiguous when using tags.
-    base = val.cast(impl_type).cast(base_type)
-    (prev, sentinel) = get_prev_and_sentinel(base)
-    prev = prev.cast(base_type.pointer())
-    self.prev = cast_pointer(prev)
-    self.next = cast_pointer(val['Next'])
-    self.sentinel = sentinel
-
-  def children(self):
-    if self.sentinel:
-      yield 'sentinel', 'yes'
-    yield 'prev', self.prev
-    yield 'next', self.next
+    """Print an llvm::ilist_node object."""
+
+    def __init__(self, val):
+        impl_type = val.type.fields()[0].type
+        base_type = impl_type.fields()[0].type
+        derived_type = val.type.template_argument(0)
+
+        def get_prev_and_sentinel(base):
+            # One of Prev and PrevAndSentinel exists. Depending on #defines used to
+            # compile LLVM, the base_type's template argument is either true of false.
+            if base_type.template_argument(0):
+                return get_pointer_int_pair(base["PrevAndSentinel"])
+            return base["Prev"], None
+
+        # Casts a base_type pointer to the appropriate derived type.
+        def cast_pointer(pointer):
+            sentinel = get_prev_and_sentinel(pointer.dereference())[1]
+            pointer = pointer.cast(impl_type.pointer())
+            if sentinel:
+                return pointer
+            return pointer.cast(derived_type.pointer())
+
+        # Repeated cast becaue val.type's base_type is ambiguous when using tags.
+        base = val.cast(impl_type).cast(base_type)
+        (prev, sentinel) = get_prev_and_sentinel(base)
+        prev = prev.cast(base_type.pointer())
+        self.prev = cast_pointer(prev)
+        self.next = cast_pointer(val["Next"])
+        self.sentinel = sentinel
+
+    def children(self):
+        if self.sentinel:
+            yield "sentinel", "yes"
+        yield "prev", self.prev
+        yield "next", self.next
+
 
 class IlistPrinter:
-  """Print an llvm::simple_ilist or llvm::iplist object."""
+    """Print an llvm::simple_ilist or llvm::iplist object."""
 
-  def __init__(self, val):
-    self.node_type = val.type.template_argument(0)
-    sentinel = val['Sentinel']
-    # First field is common base type of sentinel and ilist_node.
-    base_type = sentinel.type.fields()[0].type
-    self.sentinel = sentinel.address.cast(base_type.pointer())
+    def __init__(self, val):
+        self.node_type = val.type.template_argument(0)
+        sentinel = val["Sentinel"]
+        # First field is common base type of sentinel and ilist_node.
+        base_type = sentinel.type.fields()[0].type
+        self.sentinel = sentinel.address.cast(base_type.pointer())
 
-  def _pointers(self):
-    pointer = self.sentinel
-    while True:
-      pointer = pointer['Next'].cast(pointer.type)
-      if pointer == self.sentinel:
-        return
-      yield pointer.cast(self.node_type.pointer())
+    def _pointers(self):
+        pointer = self.sentinel
+        while True:
+            pointer = pointer["Next"].cast(pointer.type)
+            if pointer == self.sentinel:
+                return
+            yield pointer.cast(self.node_type.pointer())
 
-  def children(self):
-    for k, v in enumerate(self._pointers()):
-      yield ('[%d]' % k, v.dereference())
+    def children(self):
+        for k, v in enumerate(self._pointers()):
+            yield ("[%d]" % k, v.dereference())
 
 
 pp = gdb.printing.RegexpCollectionPrettyPrinter("LLVMSupport")
-pp.add_printer('llvm::SmallString', '^llvm::SmallString<.*>$', SmallStringPrinter)
-pp.add_printer('llvm::StringRef', '^llvm::StringRef$', StringRefPrinter)
-pp.add_printer('llvm::SmallVectorImpl', '^llvm::SmallVector(Impl)?<.*>$', SmallVectorPrinter)
-pp.add_printer('llvm::ArrayRef', '^llvm::(Mutable)?ArrayRef<.*>$', ArrayRefPrinter)
-pp.add_printer('llvm::Expected', '^llvm::Expected<.*>$', ExpectedPrinter)
-pp.add_printer('llvm::Optional', '^llvm::Optional<.*>$', OptionalPrinter)
-pp.add_printer('llvm::DenseMap', '^llvm::DenseMap<.*>$', DenseMapPrinter)
-pp.add_printer('llvm::StringMap', '^llvm::StringMap<.*>$', StringMapPrinter)
-pp.add_printer('llvm::Twine', '^llvm::Twine$', TwinePrinter)
-pp.add_printer('llvm::PointerIntPair', '^llvm::PointerIntPair<.*>$', make_pointer_int_pair_printer)
-pp.add_printer('llvm::PointerUnion', '^llvm::PointerUnion<.*>$', make_pointer_union_printer)
-pp.add_printer('llvm::ilist_node', '^llvm::ilist_node<.*>$', IlistNodePrinter)
-pp.add_printer('llvm::iplist', '^llvm::iplist<.*>$', IlistPrinter)
-pp.add_printer('llvm::simple_ilist', '^llvm::simple_ilist<.*>$', IlistPrinter)
+pp.add_printer("llvm::SmallString", "^llvm::SmallString<.*>$", SmallStringPrinter)
+pp.add_printer("llvm::StringRef", "^llvm::StringRef$", StringRefPrinter)
+pp.add_printer(
+    "llvm::SmallVectorImpl", "^llvm::SmallVector(Impl)?<.*>$", SmallVectorPrinter
+)
+pp.add_printer("llvm::ArrayRef", "^llvm::(Mutable)?ArrayRef<.*>$", ArrayRefPrinter)
+pp.add_printer("llvm::Expected", "^llvm::Expected<.*>$", ExpectedPrinter)
+pp.add_printer("llvm::Optional", "^llvm::Optional<.*>$", OptionalPrinter)
+pp.add_printer("llvm::DenseMap", "^llvm::DenseMap<.*>$", DenseMapPrinter)
+pp.add_printer("llvm::StringMap", "^llvm::StringMap<.*>$", StringMapPrinter)
+pp.add_printer("llvm::Twine", "^llvm::Twine$", TwinePrinter)
+pp.add_printer(
+    "llvm::PointerIntPair", "^llvm::PointerIntPair<.*>$", make_pointer_int_pair_printer
+)
+pp.add_printer(
+    "llvm::PointerUnion", "^llvm::PointerUnion<.*>$", make_pointer_union_printer
+)
+pp.add_printer("llvm::ilist_node", "^llvm::ilist_node<.*>$", IlistNodePrinter)
+pp.add_printer("llvm::iplist", "^llvm::iplist<.*>$", IlistPrinter)
+pp.add_printer("llvm::simple_ilist", "^llvm::simple_ilist<.*>$", IlistPrinter)
 gdb.printing.register_pretty_printer(gdb.current_objfile(), pp)

diff  --git a/llvm/utils/git/github-automation.py b/llvm/utils/git/github-automation.py
index 5d84a7a9d26c0..1790de1152a93 100755
--- a/llvm/utils/git/github-automation.py
+++ b/llvm/utils/git/github-automation.py
@@ -9,7 +9,7 @@
 # ==-------------------------------------------------------------------------==#
 
 import argparse
-from git import Repo # type: ignore
+from git import Repo  # type: ignore
 import github
 import os
 import re
@@ -18,8 +18,7 @@
 import time
 from typing import List, Optional
 
-beginner_comment = \
-"""
+beginner_comment = """
 Hi!
 
 This issue may be a good introductory issue for people new to working on LLVM. If you would like to work on this issue, your first steps are:
@@ -39,54 +38,58 @@
 If you have any further questions about this issue, don't hesitate to ask via a comment on this Github issue.
 """
 
-class IssueSubscriber:
 
+class IssueSubscriber:
     @property
     def team_name(self) -> str:
         return self._team_name
 
-    def __init__(self, token:str, repo:str, issue_number:int, label_name:str):
+    def __init__(self, token: str, repo: str, issue_number: int, label_name: str):
         self.repo = github.Github(token).get_repo(repo)
         self.org = github.Github(token).get_organization(self.repo.organization.login)
         self.issue = self.repo.get_issue(issue_number)
-        self._team_name = 'issue-subscribers-{}'.format(label_name).lower()
+        self._team_name = "issue-subscribers-{}".format(label_name).lower()
 
     def run(self) -> bool:
         for team in self.org.get_teams():
             if self.team_name != team.name.lower():
                 continue
 
-            comment = ''
-            if team.slug == 'issue-subscribers-good-first-issue':
-                comment = '{}\n'.format(beginner_comment)
+            comment = ""
+            if team.slug == "issue-subscribers-good-first-issue":
+                comment = "{}\n".format(beginner_comment)
 
-            comment += '@llvm/{}'.format(team.slug)
+            comment += "@llvm/{}".format(team.slug)
             self.issue.create_comment(comment)
             return True
         return False
 
-def setup_llvmbot_git(git_dir = '.'):
+
+def setup_llvmbot_git(git_dir="."):
     """
     Configure the git repo in `git_dir` with the llvmbot account so
     commits are attributed to llvmbot.
     """
     repo = Repo(git_dir)
     with repo.config_writer() as config:
-        config.set_value('user', 'name', 'llvmbot')
-        config.set_value('user', 'email', 'llvmbot at llvm.org')
+        config.set_value("user", "name", "llvmbot")
+        config.set_value("user", "email", "llvmbot at llvm.org")
+
 
-def phab_api_call(phab_token:str, url:str, args:dict) -> dict:
+def phab_api_call(phab_token: str, url: str, args: dict) -> dict:
     """
     Make an API call to the Phabricator web service and return a dictionary
     containing the json response.
     """
-    data = { "api.token" : phab_token }
+    data = {"api.token": phab_token}
     data.update(args)
-    response = requests.post(url, data = data)
+    response = requests.post(url, data=data)
     return response.json()
 
 
-def phab_login_to_github_login(phab_token:str, repo:github.Repository.Repository, phab_login:str) -> Optional[str]:
+def phab_login_to_github_login(
+    phab_token: str, repo: github.Repository.Repository, phab_login: str
+) -> Optional[str]:
     """
     Tries to translate a Phabricator login to a github login by
     finding a commit made in Phabricator's Differential.
@@ -99,19 +102,21 @@ def phab_login_to_github_login(phab_token:str, repo:github.Repository.Repository
     """
 
     args = {
-        "constraints[authors][0]" : phab_login,
+        "constraints[authors][0]": phab_login,
         # PHID for "LLVM Github Monorepo" repository
-        "constraints[repositories][0]" : "PHID-REPO-f4scjekhnkmh7qilxlcy",
-        "limit" : 1
+        "constraints[repositories][0]": "PHID-REPO-f4scjekhnkmh7qilxlcy",
+        "limit": 1,
     }
     # API documentation: https://reviews.llvm.org/conduit/method/
diff usion.commit.search/
-    r = phab_api_call(phab_token, "https://reviews.llvm.org/api/
diff usion.commit.search", args)
-    data = r['result']['data']
+    r = phab_api_call(
+        phab_token, "https://reviews.llvm.org/api/
diff usion.commit.search", args
+    )
+    data = r["result"]["data"]
     if len(data) == 0:
         # Can't find any commits associated with this user
         return None
 
-    commit_sha = data[0]['fields']['identifier']
+    commit_sha = data[0]["fields"]["identifier"]
     committer = repo.get_commit(commit_sha).committer
     if not committer:
         # This committer had an email address GitHub could not recognize, so
@@ -120,36 +125,39 @@ def phab_login_to_github_login(phab_token:str, repo:github.Repository.Repository
         return None
     return committer.login
 
-def phab_get_commit_approvers(phab_token:str, commit:github.Commit.Commit) -> list:
-    args = { "corpus" : commit.commit.message }
+
+def phab_get_commit_approvers(phab_token: str, commit: github.Commit.Commit) -> list:
+    args = {"corpus": commit.commit.message}
     # API documentation: https://reviews.llvm.org/conduit/method/
diff erential.parsecommitmessage/
-    r = phab_api_call(phab_token, "https://reviews.llvm.org/api/
diff erential.parsecommitmessage", args)
-    review_id = r['result']['revisionIDFieldInfo']['value']
+    r = phab_api_call(
+        phab_token, "https://reviews.llvm.org/api/
diff erential.parsecommitmessage", args
+    )
+    review_id = r["result"]["revisionIDFieldInfo"]["value"]
     if not review_id:
         # No Phabricator revision for this commit
         return []
 
-    args = {
-        'constraints[ids][0]' : review_id,
-        'attachments[reviewers]' : True
-    }
+    args = {"constraints[ids][0]": review_id, "attachments[reviewers]": True}
     # API documentation: https://reviews.llvm.org/conduit/method/
diff erential.revision.search/
-    r = phab_api_call(phab_token, "https://reviews.llvm.org/api/
diff erential.revision.search", args)
-    reviewers = r['result']['data'][0]['attachments']['reviewers']['reviewers']
+    r = phab_api_call(
+        phab_token, "https://reviews.llvm.org/api/
diff erential.revision.search", args
+    )
+    reviewers = r["result"]["data"][0]["attachments"]["reviewers"]["reviewers"]
     accepted = []
     for reviewer in reviewers:
-        if reviewer['status'] != 'accepted':
+        if reviewer["status"] != "accepted":
             continue
-        phid = reviewer['reviewerPHID']
-        args = { 'constraints[phids][0]' : phid }
+        phid = reviewer["reviewerPHID"]
+        args = {"constraints[phids][0]": phid}
         # API documentation: https://reviews.llvm.org/conduit/method/user.search/
         r = phab_api_call(phab_token, "https://reviews.llvm.org/api/user.search", args)
-        accepted.append(r['result']['data'][0]['fields']['username'])
+        accepted.append(r["result"]["data"][0]["fields"]["username"])
     return accepted
 
+
 class ReleaseWorkflow:
 
-    CHERRY_PICK_FAILED_LABEL = 'release:cherry-pick-failed'
+    CHERRY_PICK_FAILED_LABEL = "release:cherry-pick-failed"
 
     """
     This class implements the sub-commands for the release-workflow command.
@@ -161,9 +169,16 @@ class ReleaseWorkflow:
     based on the text in stdin.
     """
 
-    def __init__(self, token:str, repo:str, issue_number:int,
-                       branch_repo_name:str, branch_repo_token:str,
-                       llvm_project_dir:str, phab_token:str) -> None:
+    def __init__(
+        self,
+        token: str,
+        repo: str,
+        issue_number: int,
+        branch_repo_name: str,
+        branch_repo_token: str,
+        llvm_project_dir: str,
+        phab_token: str,
+    ) -> None:
         self._token = token
         self._repo_name = repo
         self._issue_number = issue_number
@@ -213,11 +228,13 @@ def issue(self) -> github.Issue.Issue:
 
     @property
     def push_url(self) -> str:
-        return 'https://{}@github.com/{}'.format(self.branch_repo_token, self.branch_repo_name)
+        return "https://{}@github.com/{}".format(
+            self.branch_repo_token, self.branch_repo_name
+        )
 
     @property
     def branch_name(self) -> str:
-        return 'issue{}'.format(self.issue_number)
+        return "issue{}".format(self.issue_number)
 
     @property
     def release_branch_for_issue(self) -> Optional[str]:
@@ -225,7 +242,7 @@ def release_branch_for_issue(self) -> Optional[str]:
         milestone = issue.milestone
         if milestone is None:
             return None
-        m = re.search('branch: (.+)',milestone.description)
+        m = re.search("branch: (.+)", milestone.description)
         if m:
             return m.group(1)
         return None
@@ -234,10 +251,14 @@ def print_release_branch(self) -> None:
         print(self.release_branch_for_issue)
 
     def issue_notify_branch(self) -> None:
-        self.issue.create_comment('/branch {}/{}'.format(self.branch_repo_name, self.branch_name))
+        self.issue.create_comment(
+            "/branch {}/{}".format(self.branch_repo_name, self.branch_name)
+        )
 
-    def issue_notify_pull_request(self, pull:github.PullRequest.PullRequest) -> None:
-        self.issue.create_comment('/pull-request {}#{}'.format(self.branch_repo_name, pull.number))
+    def issue_notify_pull_request(self, pull: github.PullRequest.PullRequest) -> None:
+        self.issue.create_comment(
+            "/pull-request {}#{}".format(self.branch_repo_name, pull.number)
+        )
 
     def make_ignore_comment(self, comment: str) -> str:
         """
@@ -246,20 +267,28 @@ def make_ignore_comment(self, comment: str) -> str:
 
         :param str comment: The comment to ignore
         """
-        return "<!--IGNORE-->\n"+comment
+        return "<!--IGNORE-->\n" + comment
 
-    def issue_notify_no_milestone(self, comment:List[str]) -> None:
-        message = "{}\n\nError: Command failed due to missing milestone.".format(''.join(['>' + line for line in comment]))
+    def issue_notify_no_milestone(self, comment: List[str]) -> None:
+        message = "{}\n\nError: Command failed due to missing milestone.".format(
+            "".join([">" + line for line in comment])
+        )
         self.issue.create_comment(self.make_ignore_comment(message))
 
     @property
     def action_url(self) -> str:
-        if os.getenv('CI'):
-            return 'https://github.com/{}/actions/runs/{}'.format(os.getenv('GITHUB_REPOSITORY'), os.getenv('GITHUB_RUN_ID'))
+        if os.getenv("CI"):
+            return "https://github.com/{}/actions/runs/{}".format(
+                os.getenv("GITHUB_REPOSITORY"), os.getenv("GITHUB_RUN_ID")
+            )
         return ""
 
-    def issue_notify_cherry_pick_failure(self, commit:str) -> github.IssueComment.IssueComment:
-        message = self.make_ignore_comment("Failed to cherry-pick: {}\n\n".format(commit))
+    def issue_notify_cherry_pick_failure(
+        self, commit: str
+    ) -> github.IssueComment.IssueComment:
+        message = self.make_ignore_comment(
+            "Failed to cherry-pick: {}\n\n".format(commit)
+        )
         action_url = self.action_url
         if action_url:
             message += action_url + "\n\n"
@@ -269,7 +298,9 @@ def issue_notify_cherry_pick_failure(self, commit:str) -> github.IssueComment.Is
         issue.add_to_labels(self.CHERRY_PICK_FAILED_LABEL)
         return comment
 
-    def issue_notify_pull_request_failure(self, branch:str) -> github.IssueComment.IssueComment:
+    def issue_notify_pull_request_failure(
+        self, branch: str
+    ) -> github.IssueComment.IssueComment:
         message = "Failed to create pull request for {} ".format(branch)
         message += self.action_url
         return self.issue.create_comment(message)
@@ -278,7 +309,7 @@ def issue_remove_cherry_pick_failed_label(self):
         if self.CHERRY_PICK_FAILED_LABEL in [l.name for l in self.issue.labels]:
             self.issue.remove_from_labels(self.CHERRY_PICK_FAILED_LABEL)
 
-    def pr_request_review(self, pr:github.PullRequest.PullRequest):
+    def pr_request_review(self, pr: github.PullRequest.PullRequest):
         """
         This function will try to find the best reviewers for `commits` and
         then add a comment requesting review of the backport and assign the
@@ -297,11 +328,12 @@ def pr_request_review(self, pr:github.PullRequest.PullRequest):
                 reviewers.append(login)
         if len(reviewers):
             message = "{} What do you think about merging this PR to the release branch?".format(
-                    " ".join(["@" + r for r in reviewers]))
+                " ".join(["@" + r for r in reviewers])
+            )
             pr.create_issue_comment(message)
             pr.add_to_assignees(*reviewers)
 
-    def create_branch(self, commits:List[str]) -> bool:
+    def create_branch(self, commits: List[str]) -> bool:
         """
         This function attempts to backport `commits` into the branch associated
         with `self.issue_number`.
@@ -312,31 +344,33 @@ def create_branch(self, commits:List[str]) -> bool:
         :param list commits: List of commits to cherry-pick.
 
         """
-        print('cherry-picking', commits)
+        print("cherry-picking", commits)
         branch_name = self.branch_name
         local_repo = Repo(self.llvm_project_dir)
         local_repo.git.checkout(self.release_branch_for_issue)
 
         for c in commits:
             try:
-                local_repo.git.cherry_pick('-x', c)
+                local_repo.git.cherry_pick("-x", c)
             except Exception as e:
                 self.issue_notify_cherry_pick_failure(c)
                 raise e
 
         push_url = self.push_url
-        print('Pushing to {} {}'.format(push_url, branch_name))
-        local_repo.git.push(push_url, 'HEAD:{}'.format(branch_name), force=True)
+        print("Pushing to {} {}".format(push_url, branch_name))
+        local_repo.git.push(push_url, "HEAD:{}".format(branch_name), force=True)
 
         self.issue_notify_branch()
         self.issue_remove_cherry_pick_failed_label()
         return True
 
-    def check_if_pull_request_exists(self, repo:github.Repository.Repository, head:str) -> bool:
+    def check_if_pull_request_exists(
+        self, repo: github.Repository.Repository, head: str
+    ) -> bool:
         pulls = repo.get_pulls(head=head)
         return pulls.totalCount != 0
 
-    def create_pull_request(self, owner:str, repo_name:str, branch:str) -> bool:
+    def create_pull_request(self, owner: str, repo_name: str, branch: str) -> bool:
         """
         reate a pull request in `self.branch_repo_name`.  The base branch of the
         pull request will be chosen based on the the milestone attached to
@@ -347,7 +381,7 @@ def create_pull_request(self, owner:str, repo_name:str, branch:str) -> bool:
         https://docs.github.com/en/get-started/quickstart/github-glossary#compare-branch
         """
         repo = github.Github(self.token).get_repo(self.branch_repo_name)
-        issue_ref = '{}#{}'.format(self.repo_name, self.issue_number)
+        issue_ref = "{}#{}".format(self.repo_name, self.issue_number)
         pull = None
         release_branch_for_issue = self.release_branch_for_issue
         if release_branch_for_issue is None:
@@ -357,13 +391,17 @@ def create_pull_request(self, owner:str, repo_name:str, branch:str) -> bool:
             # If the target repo is not a fork of llvm-project, we need to copy
             # the branch into the target repo.  GitHub only supports cross-repo pull
             # requests on forked repos.
-            head_branch = f'{owner}-{branch}'
+            head_branch = f"{owner}-{branch}"
             local_repo = Repo(self.llvm_project_dir)
             push_done = False
-            for _ in range(0,5):
+            for _ in range(0, 5):
                 try:
-                    local_repo.git.fetch(f'https://github.com/{owner}/{repo_name}', f'{branch}:{branch}')
-                    local_repo.git.push(self.push_url, f'{branch}:{head_branch}', force=True)
+                    local_repo.git.fetch(
+                        f"https://github.com/{owner}/{repo_name}", f"{branch}:{branch}"
+                    )
+                    local_repo.git.push(
+                        self.push_url, f"{branch}:{head_branch}", force=True
+                    )
                     push_done = True
                     break
                 except Exception as e:
@@ -379,11 +417,13 @@ def create_pull_request(self, owner:str, repo_name:str, branch:str) -> bool:
             print("PR already exists...")
             return True
         try:
-            pull = repo.create_pull(title=f"PR for {issue_ref}",
-                                    body='resolves {}'.format(issue_ref),
-                                    base=release_branch_for_issue,
-                                    head=head,
-                                    maintainer_can_modify=False)
+            pull = repo.create_pull(
+                title=f"PR for {issue_ref}",
+                body="resolves {}".format(issue_ref),
+                base=release_branch_for_issue,
+                head=head,
+                maintainer_can_modify=False,
+            )
 
             try:
                 if self.phab_token:
@@ -404,7 +444,6 @@ def create_pull_request(self, owner:str, repo_name:str, branch:str) -> bool:
         # TODO(tstellar): Do you really want to always return True?
         return True
 
-
     def execute_command(self) -> bool:
         """
         This function reads lines from STDIN and executes the first command
@@ -420,11 +459,11 @@ def execute_command(self) -> bool:
             command = m.group(1)
             args = m.group(2)
 
-            if command == 'cherry-pick':
+            if command == "cherry-pick":
                 return self.create_branch(args.split())
 
-            if command == 'branch':
-                m = re.match('([^/]+)/([^/]+)/(.+)', args)
+            if command == "branch":
+                m = re.match("([^/]+)/([^/]+)/(.+)", args)
                 if m:
                     owner = m.group(1)
                     repo = m.group(2)
@@ -435,45 +474,85 @@ def execute_command(self) -> bool:
         print(sys.stdin.readlines())
         return False
 
+
 parser = argparse.ArgumentParser()
-parser.add_argument('--token', type=str, required=True, help='GitHub authentiation token')
-parser.add_argument('--repo', type=str, default=os.getenv('GITHUB_REPOSITORY', 'llvm/llvm-project'),
-                    help='The GitHub repository that we are working with in the form of <owner>/<repo> (e.g. llvm/llvm-project)')
-subparsers = parser.add_subparsers(dest='command')
-
-issue_subscriber_parser = subparsers.add_parser('issue-subscriber')
-issue_subscriber_parser.add_argument('--label-name', type=str, required=True)
-issue_subscriber_parser.add_argument('--issue-number', type=int, required=True)
-
-release_workflow_parser = subparsers.add_parser('release-workflow')
-release_workflow_parser.add_argument('--llvm-project-dir', type=str, default='.', help='directory containing the llvm-project checout')
-release_workflow_parser.add_argument('--issue-number', type=int, required=True, help='The issue number to update')
-release_workflow_parser.add_argument('--phab-token', type=str, help='Phabricator conduit API token. See https://reviews.llvm.org/settings/user/<USER>/page/apitokens/')
-release_workflow_parser.add_argument('--branch-repo-token', type=str,
-                                     help='GitHub authentication token to use for the repository where new branches will be pushed. Defaults to TOKEN.')
-release_workflow_parser.add_argument('--branch-repo', type=str, default='llvm/llvm-project-release-prs',
-                                     help='The name of the repo where new branches will be pushed (e.g. llvm/llvm-project)')
-release_workflow_parser.add_argument('sub_command', type=str, choices=['print-release-branch', 'auto'],
-                                     help='Print to stdout the name of the release branch ISSUE_NUMBER should be backported to')
-
-llvmbot_git_config_parser = subparsers.add_parser('setup-llvmbot-git', help='Set the default user and email for the git repo in LLVM_PROJECT_DIR to llvmbot')
+parser.add_argument(
+    "--token", type=str, required=True, help="GitHub authentiation token"
+)
+parser.add_argument(
+    "--repo",
+    type=str,
+    default=os.getenv("GITHUB_REPOSITORY", "llvm/llvm-project"),
+    help="The GitHub repository that we are working with in the form of <owner>/<repo> (e.g. llvm/llvm-project)",
+)
+subparsers = parser.add_subparsers(dest="command")
+
+issue_subscriber_parser = subparsers.add_parser("issue-subscriber")
+issue_subscriber_parser.add_argument("--label-name", type=str, required=True)
+issue_subscriber_parser.add_argument("--issue-number", type=int, required=True)
+
+release_workflow_parser = subparsers.add_parser("release-workflow")
+release_workflow_parser.add_argument(
+    "--llvm-project-dir",
+    type=str,
+    default=".",
+    help="directory containing the llvm-project checout",
+)
+release_workflow_parser.add_argument(
+    "--issue-number", type=int, required=True, help="The issue number to update"
+)
+release_workflow_parser.add_argument(
+    "--phab-token",
+    type=str,
+    help="Phabricator conduit API token. See https://reviews.llvm.org/settings/user/<USER>/page/apitokens/",
+)
+release_workflow_parser.add_argument(
+    "--branch-repo-token",
+    type=str,
+    help="GitHub authentication token to use for the repository where new branches will be pushed. Defaults to TOKEN.",
+)
+release_workflow_parser.add_argument(
+    "--branch-repo",
+    type=str,
+    default="llvm/llvm-project-release-prs",
+    help="The name of the repo where new branches will be pushed (e.g. llvm/llvm-project)",
+)
+release_workflow_parser.add_argument(
+    "sub_command",
+    type=str,
+    choices=["print-release-branch", "auto"],
+    help="Print to stdout the name of the release branch ISSUE_NUMBER should be backported to",
+)
+
+llvmbot_git_config_parser = subparsers.add_parser(
+    "setup-llvmbot-git",
+    help="Set the default user and email for the git repo in LLVM_PROJECT_DIR to llvmbot",
+)
 
 args = parser.parse_args()
 
-if args.command == 'issue-subscriber':
-    issue_subscriber = IssueSubscriber(args.token, args.repo, args.issue_number, args.label_name)
+if args.command == "issue-subscriber":
+    issue_subscriber = IssueSubscriber(
+        args.token, args.repo, args.issue_number, args.label_name
+    )
     issue_subscriber.run()
-elif args.command == 'release-workflow':
-    release_workflow = ReleaseWorkflow(args.token, args.repo, args.issue_number,
-                                       args.branch_repo, args.branch_repo_token,
-                                       args.llvm_project_dir, args.phab_token)
+elif args.command == "release-workflow":
+    release_workflow = ReleaseWorkflow(
+        args.token,
+        args.repo,
+        args.issue_number,
+        args.branch_repo,
+        args.branch_repo_token,
+        args.llvm_project_dir,
+        args.phab_token,
+    )
     if not release_workflow.release_branch_for_issue:
         release_workflow.issue_notify_no_milestone(sys.stdin.readlines())
         sys.exit(1)
-    if args.sub_command == 'print-release-branch':
+    if args.sub_command == "print-release-branch":
         release_workflow.print_release_branch()
     else:
         if not release_workflow.execute_command():
             sys.exit(1)
-elif args.command == 'setup-llvmbot-git':
+elif args.command == "setup-llvmbot-git":
     setup_llvmbot_git()

diff  --git a/llvm/utils/git/pre-push.py b/llvm/utils/git/pre-push.py
index e50a913709634..d7ae3767d2923 100755
--- a/llvm/utils/git/pre-push.py
+++ b/llvm/utils/git/pre-push.py
@@ -37,7 +37,7 @@
 VERBOSE = False
 QUIET = False
 dev_null_fd = None
-z40 = '0000000000000000000000000000000000000000'
+z40 = "0000000000000000000000000000000000000000"
 
 
 def eprint(*args, **kwargs):
@@ -63,29 +63,37 @@ def die(msg):
 
 def ask_confirm(prompt):
     while True:
-        query = input('%s (y/N): ' % (prompt))
-        if query.lower() not in ['y', 'n', '']:
-           print('Expect y or n!')
-           continue
-        return query.lower() == 'y'
+        query = input("%s (y/N): " % (prompt))
+        if query.lower() not in ["y", "n", ""]:
+            print("Expect y or n!")
+            continue
+        return query.lower() == "y"
 
 
 def get_dev_null():
     """Lazily create a /dev/null fd for use in shell()"""
     global dev_null_fd
     if dev_null_fd is None:
-        dev_null_fd = open(os.devnull, 'w')
+        dev_null_fd = open(os.devnull, "w")
     return dev_null_fd
 
 
-def shell(cmd, strip=True, cwd=None, stdin=None, die_on_failure=True,
-          ignore_errors=False, text=True, print_raw_stderr=False):
+def shell(
+    cmd,
+    strip=True,
+    cwd=None,
+    stdin=None,
+    die_on_failure=True,
+    ignore_errors=False,
+    text=True,
+    print_raw_stderr=False,
+):
     # Escape args when logging for easy repro.
     quoted_cmd = [quote(arg) for arg in cmd]
-    cwd_msg = ''
+    cwd_msg = ""
     if cwd:
-      cwd_msg = ' in %s' % cwd
-    log_verbose('Running%s: %s' % (cwd_msg, ' '.join(quoted_cmd)))
+        cwd_msg = " in %s" % cwd
+    log_verbose("Running%s: %s" % (cwd_msg, " ".join(quoted_cmd)))
 
     err_pipe = subprocess.PIPE
     if ignore_errors:
@@ -93,29 +101,34 @@ def shell(cmd, strip=True, cwd=None, stdin=None, die_on_failure=True,
         err_pipe = get_dev_null()
 
     start = time.time()
-    p = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=err_pipe,
-                         stdin=subprocess.PIPE,
-                         universal_newlines=text)
+    p = subprocess.Popen(
+        cmd,
+        cwd=cwd,
+        stdout=subprocess.PIPE,
+        stderr=err_pipe,
+        stdin=subprocess.PIPE,
+        universal_newlines=text,
+    )
     stdout, stderr = p.communicate(input=stdin)
     elapsed = time.time() - start
 
-    log_verbose('Command took %0.1fs' % elapsed)
+    log_verbose("Command took %0.1fs" % elapsed)
 
     if p.returncode == 0 or ignore_errors:
         if stderr and not ignore_errors:
             if not print_raw_stderr:
-                eprint('`%s` printed to stderr:' % ' '.join(quoted_cmd))
+                eprint("`%s` printed to stderr:" % " ".join(quoted_cmd))
             eprint(stderr.rstrip())
         if strip:
             if text:
-                stdout = stdout.rstrip('\r\n')
+                stdout = stdout.rstrip("\r\n")
             else:
-                stdout = stdout.rstrip(b'\r\n')
+                stdout = stdout.rstrip(b"\r\n")
         if VERBOSE:
             for l in stdout.splitlines():
-                log_verbose('STDOUT: %s' % l)
+                log_verbose("STDOUT: %s" % l)
         return stdout
-    err_msg = '`%s` returned %s' % (' '.join(quoted_cmd), p.returncode)
+    err_msg = "`%s` returned %s" % (" ".join(quoted_cmd), p.returncode)
     eprint(err_msg)
     if stderr:
         eprint(stderr.rstrip())
@@ -125,40 +138,47 @@ def shell(cmd, strip=True, cwd=None, stdin=None, die_on_failure=True,
 
 
 def git(*cmd, **kwargs):
-    return shell(['git'] + list(cmd), **kwargs)
+    return shell(["git"] + list(cmd), **kwargs)
 
 
 def get_revs_to_push(range):
-    commits = git('rev-list', range).splitlines()
+    commits = git("rev-list", range).splitlines()
     # Reverse the order so we print the oldest commit first
     commits.reverse()
     return commits
 
 
 def handle_push(args, local_ref, local_sha, remote_ref, remote_sha):
-    '''Check a single push request (which can include multiple revisions)'''
-    log_verbose('Handle push, reproduce with '
-                '`echo %s %s %s %s | pre-push.py %s %s'
-                 % (local_ref, local_sha, remote_ref, remote_sha, args.remote,
-                    args.url))
+    """Check a single push request (which can include multiple revisions)"""
+    log_verbose(
+        "Handle push, reproduce with "
+        "`echo %s %s %s %s | pre-push.py %s %s"
+        % (local_ref, local_sha, remote_ref, remote_sha, args.remote, args.url)
+    )
     # Handle request to delete
     if local_sha == z40:
-        if not ask_confirm('Are you sure you want to delete "%s" on remote "%s"?' % (remote_ref, args.url)):
+        if not ask_confirm(
+            'Are you sure you want to delete "%s" on remote "%s"?'
+            % (remote_ref, args.url)
+        ):
             die("Aborting")
         return
 
     # Push a new branch
     if remote_sha == z40:
-      if not ask_confirm('Are you sure you want to push a new branch/tag "%s" on remote "%s"?' % (remote_ref, args.url)):
-        die("Aborting")
-      range=local_sha
-      return
+        if not ask_confirm(
+            'Are you sure you want to push a new branch/tag "%s" on remote "%s"?'
+            % (remote_ref, args.url)
+        ):
+            die("Aborting")
+        range = local_sha
+        return
     else:
-      # Update to existing branch, examine new commits
-      range='%s..%s' % (remote_sha, local_sha)
-      # Check that the remote commit exists, otherwise let git proceed
-      if "commit" not in git('cat-file','-t', remote_sha, ignore_errors=True):
-          return
+        # Update to existing branch, examine new commits
+        range = "%s..%s" % (remote_sha, local_sha)
+        # Check that the remote commit exists, otherwise let git proceed
+        if "commit" not in git("cat-file", "-t", remote_sha, ignore_errors=True):
+            return
 
     revs = get_revs_to_push(range)
     if not revs:
@@ -168,51 +188,57 @@ def handle_push(args, local_ref, local_sha, remote_ref, remote_sha):
     # Print the revision about to be pushed commits
     print('Pushing to "%s" on remote "%s"' % (remote_ref, args.url))
     for sha in revs:
-      print(' - ' + git('show', '--oneline', '--quiet', sha))
+        print(" - " + git("show", "--oneline", "--quiet", sha))
 
     if len(revs) > 1:
-      if not ask_confirm('Are you sure you want to push %d commits?' % len(revs)):
-          die('Aborting')
-
+        if not ask_confirm("Are you sure you want to push %d commits?" % len(revs)):
+            die("Aborting")
 
     for sha in revs:
-      msg = git('log', '--format=%B', '-n1', sha)
-      if 'Differential Revision' not in msg:
-          continue
-      for line in msg.splitlines():
-          for tag in ['Summary', 'Reviewers', 'Subscribers', 'Tags']:
-            if line.startswith(tag + ':'):
-              eprint('Please remove arcanist tags from the commit message (found "%s" tag in %s)' % (tag, sha[:12]))
-              if len(revs) == 1:
-                  eprint('Try running: llvm/utils/git/arcfilter.sh')
-              die('Aborting (force push by adding "--no-verify")')
+        msg = git("log", "--format=%B", "-n1", sha)
+        if "Differential Revision" not in msg:
+            continue
+        for line in msg.splitlines():
+            for tag in ["Summary", "Reviewers", "Subscribers", "Tags"]:
+                if line.startswith(tag + ":"):
+                    eprint(
+                        'Please remove arcanist tags from the commit message (found "%s" tag in %s)'
+                        % (tag, sha[:12])
+                    )
+                    if len(revs) == 1:
+                        eprint("Try running: llvm/utils/git/arcfilter.sh")
+                    die('Aborting (force push by adding "--no-verify")')
 
     return
 
 
-if __name__ == '__main__':
-    if not shutil.which('git'):
-        die('error: cannot find git command')
+if __name__ == "__main__":
+    if not shutil.which("git"):
+        die("error: cannot find git command")
 
     argv = sys.argv[1:]
     p = argparse.ArgumentParser(
-        prog='pre-push', formatter_class=argparse.RawDescriptionHelpFormatter,
-        description=__doc__)
+        prog="pre-push",
+        formatter_class=argparse.RawDescriptionHelpFormatter,
+        description=__doc__,
+    )
     verbosity_group = p.add_mutually_exclusive_group()
-    verbosity_group.add_argument('-q', '--quiet', action='store_true',
-                                 help='print less information')
-    verbosity_group.add_argument('-v', '--verbose', action='store_true',
-                                 help='print more information')
+    verbosity_group.add_argument(
+        "-q", "--quiet", action="store_true", help="print less information"
+    )
+    verbosity_group.add_argument(
+        "-v", "--verbose", action="store_true", help="print more information"
+    )
 
-    p.add_argument('remote', type=str, help='Name of the remote')
-    p.add_argument('url', type=str, help='URL for the remote')
+    p.add_argument("remote", type=str, help="Name of the remote")
+    p.add_argument("url", type=str, help="URL for the remote")
 
     args = p.parse_args(argv)
     VERBOSE = args.verbose
     QUIET = args.quiet
 
     lines = sys.stdin.readlines()
-    sys.stdin = open('/dev/tty', 'r')
+    sys.stdin = open("/dev/tty", "r")
     for line in lines:
-      local_ref, local_sha, remote_ref, remote_sha = line.split()
-      handle_push(args, local_ref, local_sha, remote_ref, remote_sha)
+        local_ref, local_sha, remote_ref, remote_sha = line.split()
+        handle_push(args, local_ref, local_sha, remote_ref, remote_sha)

diff  --git a/llvm/utils/gn/build/run_built_binary.py b/llvm/utils/gn/build/run_built_binary.py
index faac5654873b9..abe606f022b58 100755
--- a/llvm/utils/gn/build/run_built_binary.py
+++ b/llvm/utils/gn/build/run_built_binary.py
@@ -5,4 +5,4 @@
 import sys
 
 # Prefix with ./ to run built binary, not arbitrary stuff from PATH.
-sys.exit(subprocess.call(['./' + sys.argv[1]] + sys.argv[2:]))
+sys.exit(subprocess.call(["./" + sys.argv[1]] + sys.argv[2:]))

diff  --git a/llvm/utils/gn/build/symbol_exports.py b/llvm/utils/gn/build/symbol_exports.py
index 5126e7071e70d..379a999d4c778 100755
--- a/llvm/utils/gn/build/symbol_exports.py
+++ b/llvm/utils/gn/build/symbol_exports.py
@@ -16,29 +16,30 @@
 
 def main():
     parser = argparse.ArgumentParser(description=__doc__)
-    parser.add_argument('--format', required=True,
-                        choices=('linux','mac','win'))
-    parser.add_argument('source')
-    parser.add_argument('output')
+    parser.add_argument("--format", required=True, choices=("linux", "mac", "win"))
+    parser.add_argument("source")
+    parser.add_argument("output")
     args = parser.parse_args()
 
     symbols = open(args.source).readlines()
 
-    if args.format == 'linux':
-        output_lines = (['LLVM_0 {\n',
-                         '  global:\n',] +
-                        ['    %s;\n' % s.rstrip() for s in symbols] +
-                        ['  local:\n',
-                         '    *;\n',
-                         '};\n'])
-    elif args.format == 'mac':
-        output_lines = ['_' + s for s in symbols]
+    if args.format == "linux":
+        output_lines = (
+            [
+                "LLVM_0 {\n",
+                "  global:\n",
+            ]
+            + ["    %s;\n" % s.rstrip() for s in symbols]
+            + ["  local:\n", "    *;\n", "};\n"]
+        )
+    elif args.format == "mac":
+        output_lines = ["_" + s for s in symbols]
     else:
-        assert args.format == 'win'
-        output_lines = ['EXPORTS\n'] + ['  ' + s for s in symbols]
+        assert args.format == "win"
+        output_lines = ["EXPORTS\n"] + ["  " + s for s in symbols]
 
-    open(args.output, 'w').writelines(output_lines)
+    open(args.output, "w").writelines(output_lines)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/gn/build/symlink_or_copy.py b/llvm/utils/gn/build/symlink_or_copy.py
index d5fbf32c1a6de..cbc559a6778f9 100755
--- a/llvm/utils/gn/build/symlink_or_copy.py
+++ b/llvm/utils/gn/build/symlink_or_copy.py
@@ -13,15 +13,16 @@
 
 def main():
     parser = argparse.ArgumentParser(description=__doc__)
-    parser.add_argument('--stamp', required=True,
-                        help='name of a file whose mtime is updated on run')
-    parser.add_argument('source')
-    parser.add_argument('output')
+    parser.add_argument(
+        "--stamp", required=True, help="name of a file whose mtime is updated on run"
+    )
+    parser.add_argument("source")
+    parser.add_argument("output")
     args = parser.parse_args()
 
     # FIXME: This should not check the host platform but the target platform
     # (which needs to be passed in as an arg), for cross builds.
-    if sys.platform != 'win32':
+    if sys.platform != "win32":
         try:
             os.makedirs(os.path.dirname(args.output))
         except OSError as e:
@@ -37,12 +38,13 @@ def main():
                 raise
     else:
         import shutil
+
         output = args.output + ".exe"
         source = args.source + ".exe"
         shutil.copyfile(os.path.join(os.path.dirname(output), source), output)
 
-    open(args.stamp, 'w') # Update mtime on stamp file.
+    open(args.stamp, "w")  # Update mtime on stamp file.
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/gn/build/sync_source_lists_from_cmake.py b/llvm/utils/gn/build/sync_source_lists_from_cmake.py
index fe7e8ed10bcf5..6b48ca7de869f 100755
--- a/llvm/utils/gn/build/sync_source_lists_from_cmake.py
+++ b/llvm/utils/gn/build/sync_source_lists_from_cmake.py
@@ -25,77 +25,87 @@ def patch_gn_file(gn_file, add, remove):
     with open(gn_file) as f:
         gn_contents = f.read()
     if add:
-        srcs_tok = 'sources = ['
+        srcs_tok = "sources = ["
         tokloc = gn_contents.find(srcs_tok)
-        while gn_contents.startswith('sources = []', tokloc):
+        while gn_contents.startswith("sources = []", tokloc):
             tokloc = gn_contents.find(srcs_tok, tokloc + 1)
-        if tokloc == -1: raise ValueError(gn_file + ': No source list')
+        if tokloc == -1:
+            raise ValueError(gn_file + ": No source list")
         if gn_contents.find(srcs_tok, tokloc + 1) != -1:
-            raise ValueError(gn_file + ': Multiple source lists')
-        if gn_contents.find('# NOSORT', 0, tokloc) != -1:
-            raise ValueError(gn_file + ': Found # NOSORT, needs manual merge')
+            raise ValueError(gn_file + ": Multiple source lists")
+        if gn_contents.find("# NOSORT", 0, tokloc) != -1:
+            raise ValueError(gn_file + ": Found # NOSORT, needs manual merge")
         tokloc += len(srcs_tok)
         for a in add:
-            gn_contents = (gn_contents[:tokloc] + ('"%s",' % a) +
-                           gn_contents[tokloc:])
+            gn_contents = gn_contents[:tokloc] + ('"%s",' % a) + gn_contents[tokloc:]
     for r in remove:
-        gn_contents = gn_contents.replace('"%s",' % r, '')
-    with open(gn_file, 'w') as f:
+        gn_contents = gn_contents.replace('"%s",' % r, "")
+    with open(gn_file, "w") as f:
         f.write(gn_contents)
 
     # Run `gn format`.
-    gn = os.path.join(os.path.dirname(__file__), '..', 'gn.py')
-    subprocess.check_call([sys.executable, gn, 'format', '-q', gn_file])
+    gn = os.path.join(os.path.dirname(__file__), "..", "gn.py")
+    subprocess.check_call([sys.executable, gn, "format", "-q", gn_file])
 
 
 def sync_source_lists(write):
     # Use shell=True on Windows in case git is a bat file.
-    def git(args): subprocess.check_call(['git'] + args, shell=os.name == 'nt')
+    def git(args):
+        subprocess.check_call(["git"] + args, shell=os.name == "nt")
+
     def git_out(args):
-        return subprocess.check_output(['git'] + args, shell=os.name == 'nt',
-                                       universal_newlines=True)
-    gn_files = git_out(['ls-files', '*BUILD.gn']).splitlines()
+        return subprocess.check_output(
+            ["git"] + args, shell=os.name == "nt", universal_newlines=True
+        )
+
+    gn_files = git_out(["ls-files", "*BUILD.gn"]).splitlines()
 
     # Matches e.g. |   "foo.cpp",|, captures |foo| in group 1.
     gn_cpp_re = re.compile(r'^\s*"([^$"]+\.(?:cpp|c|h|S))",$', re.MULTILINE)
     # Matches e.g. |   bar_sources = [ "foo.cpp" ]|, captures |foo| in group 1.
     gn_cpp_re2 = re.compile(
-        r'^\s*(?:.*_)?sources \+?= \[ "([^$"]+\.(?:cpp|c|h|S))" ]$',
-        re.MULTILINE)
+        r'^\s*(?:.*_)?sources \+?= \[ "([^$"]+\.(?:cpp|c|h|S))" ]$', re.MULTILINE
+    )
     # Matches e.g. |   foo.cpp|, captures |foo| in group 1.
-    cmake_cpp_re = re.compile(r'^\s*([A-Za-z_0-9./-]+\.(?:cpp|c|h|S))$',
-                              re.MULTILINE)
+    cmake_cpp_re = re.compile(r"^\s*([A-Za-z_0-9./-]+\.(?:cpp|c|h|S))$", re.MULTILINE)
 
     changes_by_rev = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
 
     def find_gitrev(touched_line, in_file):
         # re.escape() escapes e.g. '-', which works in practice but has
         # undefined behavior according to the POSIX extended regex spec.
-        posix_re_escape = lambda s: re.sub(r'([.[{()\\*+?|^$])', r'\\\1', s)
-        cmd = ['log', '--format=%h', '-1', '--pickaxe-regex',
-               # `\<` / `\>` cause issues on Windows (and is a GNU extension).
-               # `\b` is a GNU extension and stopped working in Apple Git-143
-               # (Xcode 13.3).
-               # `[:space:]` is over 10x faster than `^[:alnum:]` and hopefully
-               # good enough.
-               r'-S[[:space:]]%s[[:space:]]' % posix_re_escape(touched_line),
-               in_file]
+        posix_re_escape = lambda s: re.sub(r"([.[{()\\*+?|^$])", r"\\\1", s)
+        cmd = [
+            "log",
+            "--format=%h",
+            "-1",
+            "--pickaxe-regex",
+            # `\<` / `\>` cause issues on Windows (and is a GNU extension).
+            # `\b` is a GNU extension and stopped working in Apple Git-143
+            # (Xcode 13.3).
+            # `[:space:]` is over 10x faster than `^[:alnum:]` and hopefully
+            # good enough.
+            r"-S[[:space:]]%s[[:space:]]" % posix_re_escape(touched_line),
+            in_file,
+        ]
         return git_out(cmd).rstrip()
 
     # Collect changes to gn files, grouped by revision.
     for gn_file in gn_files:
         # The CMakeLists.txt for llvm/utils/gn/secondary/foo/BUILD.gn is
         # at foo/CMakeLists.txt.
-        strip_prefix = 'llvm/utils/gn/secondary/'
+        strip_prefix = "llvm/utils/gn/secondary/"
         if not gn_file.startswith(strip_prefix):
             continue
         cmake_file = os.path.join(
-                os.path.dirname(gn_file[len(strip_prefix):]), 'CMakeLists.txt')
+            os.path.dirname(gn_file[len(strip_prefix) :]), "CMakeLists.txt"
+        )
         if not os.path.exists(cmake_file):
             continue
 
         def get_sources(source_re, text):
             return set([m.group(1) for m in source_re.finditer(text)])
+
         gn_cpp = get_sources(gn_cpp_re, open(gn_file).read())
         gn_cpp |= get_sources(gn_cpp_re2, open(gn_file).read())
         cmake_cpp = get_sources(cmake_cpp_re, open(cmake_file).read())
@@ -107,28 +117,28 @@ def by_rev(files, key):
             for f in files:
                 rev = find_gitrev(f, cmake_file)
                 changes_by_rev[rev][gn_file][key].append(f)
-        by_rev(sorted(cmake_cpp - gn_cpp), 'add')
-        by_rev(sorted(gn_cpp - cmake_cpp), 'remove')
+
+        by_rev(sorted(cmake_cpp - gn_cpp), "add")
+        by_rev(sorted(gn_cpp - cmake_cpp), "remove")
 
     # Output necessary changes grouped by revision.
     for rev in sorted(changes_by_rev):
-        print('[gn build] Port {0} -- https://reviews.llvm.org/rG{0}'
-            .format(rev))
+        print("[gn build] Port {0} -- https://reviews.llvm.org/rG{0}".format(rev))
         for gn_file, data in sorted(changes_by_rev[rev].items()):
-            add = data.get('add', [])
-            remove = data.get('remove', [])
+            add = data.get("add", [])
+            remove = data.get("remove", [])
             if write:
                 patch_gn_file(gn_file, add, remove)
-                git(['add', gn_file])
+                git(["add", gn_file])
             else:
-                print('  ' + gn_file)
+                print("  " + gn_file)
                 if add:
-                    print('   add:\n' + '\n'.join('    "%s",' % a for a in add))
+                    print("   add:\n" + "\n".join('    "%s",' % a for a in add))
                 if remove:
-                    print('   remove:\n    ' + '\n    '.join(remove))
+                    print("   remove:\n    " + "\n    ".join(remove))
                 print()
         if write:
-            git(['commit', '-m', '[gn build] Port %s' % rev])
+            git(["commit", "-m", "[gn build] Port %s" % rev])
         else:
             print()
 
@@ -137,31 +147,33 @@ def by_rev(files, key):
 
 def sync_unittests():
     # Matches e.g. |add_llvm_unittest_with_input_files|.
-    unittest_re = re.compile(r'^add_\S+_unittest', re.MULTILINE)
+    unittest_re = re.compile(r"^add_\S+_unittest", re.MULTILINE)
 
-    checked = [ 'bolt', 'clang', 'clang-tools-extra', 'lld', 'llvm' ]
+    checked = ["bolt", "clang", "clang-tools-extra", "lld", "llvm"]
     changed = False
     for c in checked:
-        for root, _, _ in os.walk(os.path.join(c, 'unittests')):
-            cmake_file = os.path.join(root, 'CMakeLists.txt')
+        for root, _, _ in os.walk(os.path.join(c, "unittests")):
+            cmake_file = os.path.join(root, "CMakeLists.txt")
             if not os.path.exists(cmake_file):
                 continue
             if not unittest_re.search(open(cmake_file).read()):
                 continue  # Skip CMake files that just add subdirectories.
-            gn_file = os.path.join('llvm/utils/gn/secondary', root, 'BUILD.gn')
+            gn_file = os.path.join("llvm/utils/gn/secondary", root, "BUILD.gn")
             if not os.path.exists(gn_file):
                 changed = True
-                print('missing GN file %s for unittest CMake file %s' %
-                      (gn_file, cmake_file))
+                print(
+                    "missing GN file %s for unittest CMake file %s"
+                    % (gn_file, cmake_file)
+                )
     return changed
 
 
 def main():
-    src = sync_source_lists(len(sys.argv) > 1 and sys.argv[1] == '--write')
+    src = sync_source_lists(len(sys.argv) > 1 and sys.argv[1] == "--write")
     tests = sync_unittests()
     if src or tests:
         sys.exit(1)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/gn/build/write_cmake_config.py b/llvm/utils/gn/build/write_cmake_config.py
index ff69079bbef76..a14df6b2cdeab 100755
--- a/llvm/utils/gn/build/write_cmake_config.py
+++ b/llvm/utils/gn/build/write_cmake_config.py
@@ -40,70 +40,72 @@
 
 def main():
     parser = argparse.ArgumentParser(
-                 epilog=__doc__,
-                 formatter_class=argparse.RawDescriptionHelpFormatter)
-    parser.add_argument('input', help='input file')
-    parser.add_argument('values', nargs='*', help='several KEY=VALUE pairs')
-    parser.add_argument('-o', '--output', required=True,
-                        help='output file')
+        epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
+    )
+    parser.add_argument("input", help="input file")
+    parser.add_argument("values", nargs="*", help="several KEY=VALUE pairs")
+    parser.add_argument("-o", "--output", required=True, help="output file")
     args = parser.parse_args()
 
     values = {}
     for value in args.values:
-        key, val = value.split('=', 1)
+        key, val = value.split("=", 1)
         if key in values:
             print('duplicate key "%s" in args' % key, file=sys.stderr)
             return 1
-        values[key] = val.replace('\\n', '\n')
+        values[key] = val.replace("\\n", "\n")
     unused_values = set(values.keys())
 
     # Matches e.g. '${FOO}' or '@FOO@' and captures FOO in group 1 or 2.
-    var_re = re.compile(r'\$\{([^}]*)\}|@([^@]*)@')
+    var_re = re.compile(r"\$\{([^}]*)\}|@([^@]*)@")
 
     with open(args.input) as f:
         in_lines = f.readlines()
     out_lines = []
     for in_line in in_lines:
+
         def repl(m):
             key = m.group(1) or m.group(2)
             unused_values.discard(key)
             return values[key]
+
         in_line = var_re.sub(repl, in_line)
-        if in_line.startswith('#cmakedefine01 '):
+        if in_line.startswith("#cmakedefine01 "):
             _, var = in_line.split()
-            if values[var] == '0':
+            if values[var] == "0":
                 print('error: "%s=0" used with #cmakedefine01 %s' % (var, var))
                 print("       '0' evaluates as truthy with #cmakedefine01")
                 print('       use "%s=" instead' % var)
                 return 1
-            in_line = '#define %s %d\n' % (var, 1 if values[var] else 0)
+            in_line = "#define %s %d\n" % (var, 1 if values[var] else 0)
             unused_values.discard(var)
-        elif in_line.startswith('#cmakedefine '):
+        elif in_line.startswith("#cmakedefine "):
             _, var = in_line.split(None, 1)
             try:
                 var, val = var.split(None, 1)
-                in_line = '#define %s %s' % (var, val)  # val ends in \n.
+                in_line = "#define %s %s" % (var, val)  # val ends in \n.
             except:
                 var = var.rstrip()
-                in_line = '#define %s\n' % var
+                in_line = "#define %s\n" % var
             if not values[var]:
-                in_line = '/* #undef %s */\n' % var
+                in_line = "/* #undef %s */\n" % var
             unused_values.discard(var)
         out_lines.append(in_line)
 
     if unused_values:
-        print('unused values args:', file=sys.stderr)
-        print('    ' + '\n    '.join(unused_values), file=sys.stderr)
+        print("unused values args:", file=sys.stderr)
+        print("    " + "\n    ".join(unused_values), file=sys.stderr)
         return 1
 
-    output = ''.join(out_lines)
+    output = "".join(out_lines)
 
     leftovers = var_re.findall(output)
     if leftovers:
         print(
-            'unprocessed values:\n',
-            '\n'.join([x[0] or x[1] for x in leftovers]),
-            file=sys.stderr)
+            "unprocessed values:\n",
+            "\n".join([x[0] or x[1] for x in leftovers]),
+            file=sys.stderr,
+        )
         return 1
 
     def read(filename):
@@ -111,10 +113,10 @@ def read(filename):
             return f.read()
 
     if not os.path.exists(args.output) or read(args.output) != output:
-        with open(args.output, 'w') as f:
+        with open(args.output, "w") as f:
             f.write(output)
         os.chmod(args.output, os.stat(args.input).st_mode & 0o777)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/gn/build/write_file.py b/llvm/utils/gn/build/write_file.py
index 96545b8ec1b4c..112164f208e54 100644
--- a/llvm/utils/gn/build/write_file.py
+++ b/llvm/utils/gn/build/write_file.py
@@ -8,14 +8,14 @@
 
 def main():
     parser = argparse.ArgumentParser(description=__doc__)
-    parser.add_argument('filepath')
-    parser.add_argument('content')
+    parser.add_argument("filepath")
+    parser.add_argument("content")
 
     args = parser.parse_args()
 
-    with open(args.filepath, 'w') as f:
+    with open(args.filepath, "w") as f:
         f.write(args.content)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/gn/build/write_library_dependencies.py b/llvm/utils/gn/build/write_library_dependencies.py
index d9fcc326df7f9..ba1a8a0157314 100644
--- a/llvm/utils/gn/build/write_library_dependencies.py
+++ b/llvm/utils/gn/build/write_library_dependencies.py
@@ -97,12 +97,12 @@
 
 def main():
     parser = argparse.ArgumentParser()
-    parser.add_argument('-o', '--output', required=True, help='output file')
+    parser.add_argument("-o", "--output", required=True, help="output file")
     args = parser.parse_args()
 
-    with open(args.output, 'w') as f:
+    with open(args.output, "w") as f:
         f.write(OUTPUT)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/gn/build/write_vcsrevision.py b/llvm/utils/gn/build/write_vcsrevision.py
index 6398b94df76bb..afd6aae60f6d7 100755
--- a/llvm/utils/gn/build/write_vcsrevision.py
+++ b/llvm/utils/gn/build/write_vcsrevision.py
@@ -24,59 +24,87 @@ def which(program):
 
 def main():
     parser = argparse.ArgumentParser(description=__doc__)
-    parser.add_argument('-d', '--depfile',
-                        help='if set, writes a depfile that causes this script '
-                             'to re-run each time the current revision changes')
-    parser.add_argument('--write-git-rev', action='store_true',
-                        help='if set, writes git revision, else writes #undef')
-    parser.add_argument('--name', action='append',
-                        help='if set, writes a depfile that causes this script '
-                             'to re-run each time the current revision changes')
-    parser.add_argument('vcs_header', help='path to the output file to write')
+    parser.add_argument(
+        "-d",
+        "--depfile",
+        help="if set, writes a depfile that causes this script "
+        "to re-run each time the current revision changes",
+    )
+    parser.add_argument(
+        "--write-git-rev",
+        action="store_true",
+        help="if set, writes git revision, else writes #undef",
+    )
+    parser.add_argument(
+        "--name",
+        action="append",
+        help="if set, writes a depfile that causes this script "
+        "to re-run each time the current revision changes",
+    )
+    parser.add_argument("vcs_header", help="path to the output file to write")
     args = parser.parse_args()
 
-    vcsrevision_contents = ''
+    vcsrevision_contents = ""
     if args.write_git_rev:
-        git, use_shell = which('git'), False
-        if not git: git = which('git.exe')
-        if not git: git, use_shell = which('git.bat'), True
-        git_dir = subprocess.check_output(
-                [git, 'rev-parse', '--git-dir'],
-                cwd=LLVM_DIR, shell=use_shell).decode().strip()
+        git, use_shell = which("git"), False
+        if not git:
+            git = which("git.exe")
+        if not git:
+            git, use_shell = which("git.bat"), True
+        git_dir = (
+            subprocess.check_output(
+                [git, "rev-parse", "--git-dir"], cwd=LLVM_DIR, shell=use_shell
+            )
+            .decode()
+            .strip()
+        )
         if not os.path.isdir(git_dir):
             print('.git dir not found at "%s"' % git_dir, file=sys.stderr)
             return 1
 
-        rev = subprocess.check_output(
-                [git, 'rev-parse', '--short', 'HEAD'],
-                cwd=git_dir, shell=use_shell).decode().strip()
-        url = subprocess.check_output(
-                [git, 'remote', 'get-url', 'origin'],
-                cwd=git_dir, shell=use_shell).decode().strip()
+        rev = (
+            subprocess.check_output(
+                [git, "rev-parse", "--short", "HEAD"], cwd=git_dir, shell=use_shell
+            )
+            .decode()
+            .strip()
+        )
+        url = (
+            subprocess.check_output(
+                [git, "remote", "get-url", "origin"], cwd=git_dir, shell=use_shell
+            )
+            .decode()
+            .strip()
+        )
         for name in args.name:
             vcsrevision_contents += '#define %s_REVISION "%s"\n' % (name, rev)
             vcsrevision_contents += '#define %s_REPOSITORY "%s"\n' % (name, url)
     else:
         for name in args.name:
-            vcsrevision_contents += '#undef %s_REVISION\n' % name
-            vcsrevision_contents += '#undef %s_REPOSITORY\n' % name
+            vcsrevision_contents += "#undef %s_REVISION\n" % name
+            vcsrevision_contents += "#undef %s_REPOSITORY\n" % name
 
     # If the output already exists and is identical to what we'd write,
     # return to not perturb the existing file's timestamp.
-    if os.path.exists(args.vcs_header) and \
-            open(args.vcs_header).read() == vcsrevision_contents:
+    if (
+        os.path.exists(args.vcs_header)
+        and open(args.vcs_header).read() == vcsrevision_contents
+    ):
         return 0
 
     # http://neugierig.org/software/blog/2014/11/binary-revisions.html
     if args.depfile:
         build_dir = os.getcwd()
-        with open(args.depfile, 'w') as depfile:
-            depfile.write('%s: %s\n' % (
-                args.vcs_header,
-                os.path.relpath(os.path.join(git_dir, 'logs', 'HEAD'),
-                                build_dir)))
-    open(args.vcs_header, 'w').write(vcsrevision_contents)
+        with open(args.depfile, "w") as depfile:
+            depfile.write(
+                "%s: %s\n"
+                % (
+                    args.vcs_header,
+                    os.path.relpath(os.path.join(git_dir, "logs", "HEAD"), build_dir),
+                )
+            )
+    open(args.vcs_header, "w").write(vcsrevision_contents)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/gn/get.py b/llvm/utils/gn/get.py
index 0d08f5f60a229..d32685acf550c 100755
--- a/llvm/utils/gn/get.py
+++ b/llvm/utils/gn/get.py
@@ -10,53 +10,56 @@
 
 def download_and_unpack(url, output_dir, gn):
     """Download an archive from url and extract gn from it into output_dir."""
-    print('downloading %s ...' % url, end='')
+    print("downloading %s ..." % url, end="")
     sys.stdout.flush()
     data = urllib.request.urlopen(url).read()
-    print(' done')
+    print(" done")
     zipfile.ZipFile(io.BytesIO(data)).extract(gn, path=output_dir)
 
 
 def set_executable_bit(path):
     mode = os.stat(path).st_mode
-    mode |= (mode & 0o444) >> 2 # Copy R bits to X.
-    os.chmod(path, mode) # No-op on Windows.
+    mode |= (mode & 0o444) >> 2  # Copy R bits to X.
+    os.chmod(path, mode)  # No-op on Windows.
 
 
 def get_platform():
     import platform
-    if sys.platform == 'darwin':
-        return 'mac-amd64' if platform.machine() != 'arm64' else 'mac-arm64'
-    if platform.machine() not in ('AMD64', 'x86_64'):
+
+    if sys.platform == "darwin":
+        return "mac-amd64" if platform.machine() != "arm64" else "mac-arm64"
+    if platform.machine() not in ("AMD64", "x86_64"):
         return None
-    if sys.platform.startswith('linux'):
-        return 'linux-amd64'
-    if sys.platform == 'win32':
-        return 'windows-amd64'
+    if sys.platform.startswith("linux"):
+        return "linux-amd64"
+    if sys.platform == "win32":
+        return "windows-amd64"
 
 
 def main():
     platform = get_platform()
     if not platform:
-        print('no prebuilt binary for', sys.platform)
-        print('build it yourself with:')
-        print('  rm -rf /tmp/gn &&')
-        print('  pushd /tmp && git clone https://gn.googlesource.com/gn &&')
-        print('  cd gn && build/gen.py && ninja -C out gn && popd &&')
-        print('  cp /tmp/gn/out/gn somewhere/on/PATH')
+        print("no prebuilt binary for", sys.platform)
+        print("build it yourself with:")
+        print("  rm -rf /tmp/gn &&")
+        print("  pushd /tmp && git clone https://gn.googlesource.com/gn &&")
+        print("  cd gn && build/gen.py && ninja -C out gn && popd &&")
+        print("  cp /tmp/gn/out/gn somewhere/on/PATH")
         return 1
-    dirname = os.path.join(os.path.dirname(__file__), 'bin', platform)
+    dirname = os.path.join(os.path.dirname(__file__), "bin", platform)
     if not os.path.exists(dirname):
         os.makedirs(dirname)
 
-    url = 'https://chrome-infra-packages.appspot.com/dl/gn/gn/%s/+/latest'
-    gn = 'gn' + ('.exe' if sys.platform == 'win32' else '')
-    if platform == 'mac-arm64': # For https://openradar.appspot.com/FB8914243
-        try: os.remove(os.path.join(dirname, gn))
-        except OSError: pass
+    url = "https://chrome-infra-packages.appspot.com/dl/gn/gn/%s/+/latest"
+    gn = "gn" + (".exe" if sys.platform == "win32" else "")
+    if platform == "mac-arm64":  # For https://openradar.appspot.com/FB8914243
+        try:
+            os.remove(os.path.join(dirname, gn))
+        except OSError:
+            pass
     download_and_unpack(url % platform, dirname, gn)
     set_executable_bit(os.path.join(dirname, gn))
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/gn/gn.py b/llvm/utils/gn/gn.py
index b2936f88bf3fa..290c6941bceea 100755
--- a/llvm/utils/gn/gn.py
+++ b/llvm/utils/gn/gn.py
@@ -12,57 +12,64 @@
 
 
 THIS_DIR = os.path.dirname(__file__)
-ROOT_DIR = os.path.join(THIS_DIR, '..', '..', '..')
+ROOT_DIR = os.path.join(THIS_DIR, "..", "..", "..")
 
 
 def get_platform():
     import platform
-    if sys.platform == 'darwin':
-        return 'mac-amd64' if platform.machine() != 'arm64' else 'mac-arm64'
-    if platform.machine() not in ('AMD64', 'x86_64'):
+
+    if sys.platform == "darwin":
+        return "mac-amd64" if platform.machine() != "arm64" else "mac-arm64"
+    if platform.machine() not in ("AMD64", "x86_64"):
         return None
-    if sys.platform.startswith('linux'):
-        return 'linux-amd64'
-    if sys.platform == 'win32':
-        return 'windows-amd64'
+    if sys.platform.startswith("linux"):
+        return "linux-amd64"
+    if sys.platform == "win32":
+        return "windows-amd64"
 
 
 def print_no_gn(mention_get):
-    print('gn binary not found in PATH')
+    print("gn binary not found in PATH")
     if mention_get:
-        print('run llvm/utils/gn/get.py to download a binary and try again, or')
-    print('follow https://gn.googlesource.com/gn/#getting-started')
+        print("run llvm/utils/gn/get.py to download a binary and try again, or")
+    print("follow https://gn.googlesource.com/gn/#getting-started")
     return 1
 
 
 def main():
     # Find real gn executable.
-    gn = 'gn'
-    if subprocess.call('gn --version', stdout=open(os.devnull, 'w'),
-                                       stderr=subprocess.STDOUT,
-                                       shell=True) != 0:
+    gn = "gn"
+    if (
+        subprocess.call(
+            "gn --version",
+            stdout=open(os.devnull, "w"),
+            stderr=subprocess.STDOUT,
+            shell=True,
+        )
+        != 0
+    ):
         # Not on path. See if get.py downloaded a prebuilt binary and run that
         # if it's there, or suggest to run get.py if it isn't.
         platform = get_platform()
         if not platform:
             return print_no_gn(mention_get=False)
-        gn = os.path.join(os.path.dirname(__file__), 'bin', platform, 'gn')
-        if not os.path.exists(gn + ('.exe' if sys.platform == 'win32' else '')):
+        gn = os.path.join(os.path.dirname(__file__), "bin", platform, "gn")
+        if not os.path.exists(gn + (".exe" if sys.platform == "win32" else "")):
             return print_no_gn(mention_get=True)
 
     # Compute --dotfile= and --root= args to add.
     extra_args = []
-    gn_main_arg = next((x for x in sys.argv[1:] if not x.startswith('-')), None)
-    if gn_main_arg != 'help':  # `gn help` gets confused by the switches.
+    gn_main_arg = next((x for x in sys.argv[1:] if not x.startswith("-")), None)
+    if gn_main_arg != "help":  # `gn help` gets confused by the switches.
         cwd = os.getcwd()
-        dotfile = os.path.relpath(os.path.join(THIS_DIR, '.gn'), cwd)
+        dotfile = os.path.relpath(os.path.join(THIS_DIR, ".gn"), cwd)
         root = os.path.relpath(ROOT_DIR, cwd)
-        extra_args = [ '--dotfile=' + dotfile, '--root=' + root ]
+        extra_args = ["--dotfile=" + dotfile, "--root=" + root]
 
     # Run GN command with --dotfile= and --root= added.
     cmd = [gn] + extra_args + sys.argv[1:]
     sys.exit(subprocess.call(cmd))
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/gn/secondary/libcxx/utils/gen_link_script.py b/llvm/utils/gn/secondary/libcxx/utils/gen_link_script.py
index 3b162a9123b5b..7a584b1b5dcbb 100755
--- a/llvm/utils/gn/secondary/libcxx/utils/gen_link_script.py
+++ b/llvm/utils/gn/secondary/libcxx/utils/gen_link_script.py
@@ -1,11 +1,11 @@
 #!/usr/bin/env python3
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
 
 """
 Generate a linker script that links libc++ to the proper ABI library.
@@ -20,31 +20,31 @@
 def main():
     parser = argparse.ArgumentParser(description=__doc__)
     parser.add_argument("--input", help="Path to libc++ library", required=True)
-    parser.add_argument("--output", help="Path to libc++ linker script",
-                        required=True)
-    parser.add_argument("libraries", nargs="+",
-                        help="List of libraries libc++ depends on")
+    parser.add_argument("--output", help="Path to libc++ linker script", required=True)
+    parser.add_argument(
+        "libraries", nargs="+", help="List of libraries libc++ depends on"
+    )
     args = parser.parse_args()
 
     # Use the relative path for the libc++ library.
     libcxx = os.path.relpath(args.input, os.path.dirname(args.output))
 
     # Prepare the list of public libraries to link.
-    public_libs = ['-l%s' % l for l in args.libraries]
+    public_libs = ["-l%s" % l for l in args.libraries]
 
     # Generate the linker script contents.
-    contents = "INPUT(%s)" % ' '.join([libcxx] + public_libs)
+    contents = "INPUT(%s)" % " ".join([libcxx] + public_libs)
 
     # Remove the existing libc++ symlink if it exists.
     if os.path.islink(args.output):
         os.unlink(args.output)
 
     # Replace it with the linker script.
-    with open(args.output, 'w') as f:
+    with open(args.output, "w") as f:
         f.write(contents + "\n")
 
     return 0
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/gn/secondary/llvm/include/llvm/Support/write_extension_def.py b/llvm/utils/gn/secondary/llvm/include/llvm/Support/write_extension_def.py
index c31f35ce87416..b33517a2d21a2 100755
--- a/llvm/utils/gn/secondary/llvm/include/llvm/Support/write_extension_def.py
+++ b/llvm/utils/gn/secondary/llvm/include/llvm/Support/write_extension_def.py
@@ -7,16 +7,16 @@
 
 def main():
     parser = argparse.ArgumentParser()
-    parser.add_argument('exts', nargs='*', help='list of supported extensions')
-    parser.add_argument('-o', '--output', required=True, help='output file')
+    parser.add_argument("exts", nargs="*", help="list of supported extensions")
+    parser.add_argument("-o", "--output", required=True, help="output file")
     args = parser.parse_args()
 
-    output = ''.join(['HANDLE_EXTENSION(%s)\n' % ext for ext in args.exts])
-    output += '#undef HANDLE_EXTENSION\n'
+    output = "".join(["HANDLE_EXTENSION(%s)\n" % ext for ext in args.exts])
+    output += "#undef HANDLE_EXTENSION\n"
 
     if not os.path.exists(args.output) or open(args.output).read() != output:
-        open(args.output, 'w').write(output)
+        open(args.output, "w").write(output)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/gn/secondary/llvm/tools/llvm-config/write_extension_dependencies.py b/llvm/utils/gn/secondary/llvm/tools/llvm-config/write_extension_dependencies.py
index 68a341c18bd84..5f0c91cea689f 100644
--- a/llvm/utils/gn/secondary/llvm/tools/llvm-config/write_extension_dependencies.py
+++ b/llvm/utils/gn/secondary/llvm/tools/llvm-config/write_extension_dependencies.py
@@ -9,8 +9,7 @@
 
 def main():
     parser = argparse.ArgumentParser(description=__doc__)
-    parser.add_argument('-o', '--output', required=True,
-                        help='output file')
+    parser.add_argument("-o", "--output", required=True, help="output file")
     args = parser.parse_args()
 
     source = """\
@@ -21,8 +20,8 @@ def main():
 };
 std::array<ExtensionDescriptor, 0>  AvailableExtensions{};
 """
-    open(args.output, 'w').write(source)
+    open(args.output, "w").write(source)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())

diff  --git a/llvm/utils/indirect_calls.py b/llvm/utils/indirect_calls.py
index e460ff7d08362..2bdabc8c4d74f 100755
--- a/llvm/utils/indirect_calls.py
+++ b/llvm/utils/indirect_calls.py
@@ -18,26 +18,29 @@
 import subprocess
 import optparse
 
-# Look for indirect calls/jmps in a binary. re: (call|jmp).*\* 
+# Look for indirect calls/jmps in a binary. re: (call|jmp).*\*
 def look_for_indirect(file):
-    args = ['llvm-objdump']
+    args = ["llvm-objdump"]
     args.extend(["-d"])
     args.extend([file])
 
-    p = subprocess.Popen(args=args, stdin=None, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
-    (stdout,stderr) = p.communicate()
+    p = subprocess.Popen(
+        args=args, stdin=None, stderr=subprocess.PIPE, stdout=subprocess.PIPE
+    )
+    (stdout, stderr) = p.communicate()
 
     function = ""
     for line in stdout.splitlines():
-        if line.startswith(' ') == False:
+        if line.startswith(" ") == False:
             function = line
-        result = re.search('(call|jmp).*\*', line)
+        result = re.search("(call|jmp).*\*", line)
         if result != None:
             # TODO: Perhaps use cxxfilt to demangle functions?
             print(function)
             print(line)
     return
 
+
 def main(args):
     # No options currently other than the binary.
     parser = optparse.OptionParser("%prog [options] <binary>")
@@ -46,5 +49,6 @@ def main(args):
         parser.error("invalid number of arguments: %s" % len(args))
     look_for_indirect(args[1])
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     main(sys.argv)

diff  --git a/llvm/utils/lint/common_lint.py b/llvm/utils/lint/common_lint.py
index 641048b7ee5b4..1bf1695659d88 100644
--- a/llvm/utils/lint/common_lint.py
+++ b/llvm/utils/lint/common_lint.py
@@ -5,94 +5,101 @@
 from __future__ import print_function
 import re
 
+
 def VerifyLineLength(filename, lines, max_length):
-  """Checks to make sure the file has no lines with lines exceeding the length
-  limit.
-
-  Args:
-    filename: the file under consideration as string
-    lines: contents of the file as string array
-    max_length: maximum acceptable line length as number
-
-  Returns:
-    A list of tuples with format [(filename, line number, msg), ...] with any
-    violations found.
-  """
-  lint = []
-  line_num = 1
-  for line in lines:
-    length = len(line.rstrip('\n'))
-    if length > max_length:
-      lint.append((filename, line_num,
-                   'Line exceeds %d chars (%d)' % (max_length, length)))
-    line_num += 1
-  return lint
+    """Checks to make sure the file has no lines with lines exceeding the length
+    limit.
+
+    Args:
+      filename: the file under consideration as string
+      lines: contents of the file as string array
+      max_length: maximum acceptable line length as number
+
+    Returns:
+      A list of tuples with format [(filename, line number, msg), ...] with any
+      violations found.
+    """
+    lint = []
+    line_num = 1
+    for line in lines:
+        length = len(line.rstrip("\n"))
+        if length > max_length:
+            lint.append(
+                (
+                    filename,
+                    line_num,
+                    "Line exceeds %d chars (%d)" % (max_length, length),
+                )
+            )
+        line_num += 1
+    return lint
+
 
 def VerifyTabs(filename, lines):
-  """Checks to make sure the file has no tab characters.
-
-  Args:
-    filename: the file under consideration as string
-    lines: contents of the file as string array
-
-  Returns:
-    A list of tuples with format [(line_number, msg), ...] with any violations
-    found.
-  """
-  lint = []
-  tab_re = re.compile(r'\t')
-  line_num = 1
-  for line in lines:
-    if tab_re.match(line.rstrip('\n')):
-      lint.append((filename, line_num, 'Tab found instead of whitespace'))
-    line_num += 1
-  return lint
+    """Checks to make sure the file has no tab characters.
+
+    Args:
+      filename: the file under consideration as string
+      lines: contents of the file as string array
+
+    Returns:
+      A list of tuples with format [(line_number, msg), ...] with any violations
+      found.
+    """
+    lint = []
+    tab_re = re.compile(r"\t")
+    line_num = 1
+    for line in lines:
+        if tab_re.match(line.rstrip("\n")):
+            lint.append((filename, line_num, "Tab found instead of whitespace"))
+        line_num += 1
+    return lint
 
 
 def VerifyTrailingWhitespace(filename, lines):
-  """Checks to make sure the file has no lines with trailing whitespace.
-
-  Args:
-    filename: the file under consideration as string
-    lines: contents of the file as string array
-
-  Returns:
-    A list of tuples with format [(filename, line number, msg), ...] with any
-    violations found.
-  """
-  lint = []
-  trailing_whitespace_re = re.compile(r'\s+$')
-  line_num = 1
-  for line in lines:
-    if trailing_whitespace_re.match(line.rstrip('\n')):
-      lint.append((filename, line_num, 'Trailing whitespace'))
-    line_num += 1
-  return lint
+    """Checks to make sure the file has no lines with trailing whitespace.
+
+    Args:
+      filename: the file under consideration as string
+      lines: contents of the file as string array
+
+    Returns:
+      A list of tuples with format [(filename, line number, msg), ...] with any
+      violations found.
+    """
+    lint = []
+    trailing_whitespace_re = re.compile(r"\s+$")
+    line_num = 1
+    for line in lines:
+        if trailing_whitespace_re.match(line.rstrip("\n")):
+            lint.append((filename, line_num, "Trailing whitespace"))
+        line_num += 1
+    return lint
 
 
 class BaseLint:
-  def RunOnFile(filename, lines):
-    raise Exception('RunOnFile() unimplemented')
+    def RunOnFile(filename, lines):
+        raise Exception("RunOnFile() unimplemented")
 
 
 def RunLintOverAllFiles(linter, filenames):
-  """Runs linter over the contents of all files.
-
-  Args:
-    lint: subclass of BaseLint, implementing RunOnFile()
-    filenames: list of all files whose contents will be linted
-
-  Returns:
-    A list of tuples with format [(filename, line number, msg), ...] with any
-    violations found.
-  """
-  lint = []
-  for filename in filenames:
-    file = open(filename, 'r')
-    if not file:
-      print('Cound not open %s' % filename)
-      continue
-    lines = file.readlines()
-    lint.extend(linter.RunOnFile(filename, lines))
-
-  return lint
+    """Runs linter over the contents of all files.
+
+    Args:
+      lint: subclass of BaseLint, implementing RunOnFile()
+      filenames: list of all files whose contents will be linted
+
+    Returns:
+      A list of tuples with format [(filename, line number, msg), ...] with any
+      violations found.
+    """
+    lint = []
+    for filename in filenames:
+        file = open(filename, "r")
+        if not file:
+            print("Cound not open %s" % filename)
+            continue
+        lines = file.readlines()
+        lint.extend(linter.RunOnFile(filename, lines))
+
+    return lint

diff  --git a/llvm/utils/lint/cpp_lint.py b/llvm/utils/lint/cpp_lint.py
index 4ef457e83daa5..3734ba456626f 100755
--- a/llvm/utils/lint/cpp_lint.py
+++ b/llvm/utils/lint/cpp_lint.py
@@ -11,85 +11,102 @@
 import re
 import sys
 
+
 def VerifyIncludes(filename, lines):
-  """Makes sure the #includes are in proper order and no disallows files are
-  #included.
-
-  Args:
-    filename: the file under consideration as string
-    lines: contents of the file as string array
-  """
-  lint = []
-
-  include_gtest_re = re.compile(r'^#include "gtest/(.*)"')
-  include_llvm_re = re.compile(r'^#include "llvm/(.*)"')
-  include_support_re = re.compile(r'^#include "(Support/.*)"')
-  include_config_re = re.compile(r'^#include "(Config/.*)"')
-  include_system_re = re.compile(r'^#include <(.*)>')
-
-  DISALLOWED_SYSTEM_HEADERS = ['iostream']
-
-  line_num = 1
-  prev_config_header = None
-  prev_system_header = None
-  for line in lines:
-    # TODO: implement private headers
-    # TODO: implement gtest headers
-    # TODO: implement top-level llvm/* headers
-    # TODO: implement llvm/Support/* headers
-
-    # Process Config/* headers
-    config_header = include_config_re.match(line)
-    if config_header:
-      curr_config_header = config_header.group(1)
-      if prev_config_header:
-        if prev_config_header > curr_config_header:
-          lint.append((filename, line_num,
-                       'Config headers not in order: "%s" before "%s"' % (
-                         prev_config_header, curr_config_header)))
-
-    # Process system headers
-    system_header = include_system_re.match(line)
-    if system_header:
-      curr_system_header = system_header.group(1)
-
-      # Is it disallowed?
-      if curr_system_header in DISALLOWED_SYSTEM_HEADERS:
-        lint.append((filename, line_num,
-                     'Disallowed system header: <%s>' % curr_system_header))
-      elif prev_system_header:
-        # Make sure system headers are alphabetized amongst themselves
-        if prev_system_header > curr_system_header:
-          lint.append((filename, line_num,
-                       'System headers not in order: <%s> before <%s>' % (
-                         prev_system_header, curr_system_header)))
-
-      prev_system_header = curr_system_header
-
-    line_num += 1
-
-  return lint
+    """Makes sure the #includes are in proper order and no disallows files are
+    #included.
 
+    Args:
+      filename: the file under consideration as string
+      lines: contents of the file as string array
+    """
+    lint = []
 
-class CppLint(common_lint.BaseLint):
-  MAX_LINE_LENGTH = 80
+    include_gtest_re = re.compile(r'^#include "gtest/(.*)"')
+    include_llvm_re = re.compile(r'^#include "llvm/(.*)"')
+    include_support_re = re.compile(r'^#include "(Support/.*)"')
+    include_config_re = re.compile(r'^#include "(Config/.*)"')
+    include_system_re = re.compile(r"^#include <(.*)>")
+
+    DISALLOWED_SYSTEM_HEADERS = ["iostream"]
+
+    line_num = 1
+    prev_config_header = None
+    prev_system_header = None
+    for line in lines:
+        # TODO: implement private headers
+        # TODO: implement gtest headers
+        # TODO: implement top-level llvm/* headers
+        # TODO: implement llvm/Support/* headers
+
+        # Process Config/* headers
+        config_header = include_config_re.match(line)
+        if config_header:
+            curr_config_header = config_header.group(1)
+            if prev_config_header:
+                if prev_config_header > curr_config_header:
+                    lint.append(
+                        (
+                            filename,
+                            line_num,
+                            'Config headers not in order: "%s" before "%s"'
+                            % (prev_config_header, curr_config_header),
+                        )
+                    )
+
+        # Process system headers
+        system_header = include_system_re.match(line)
+        if system_header:
+            curr_system_header = system_header.group(1)
+
+            # Is it disallowed?
+            if curr_system_header in DISALLOWED_SYSTEM_HEADERS:
+                lint.append(
+                    (
+                        filename,
+                        line_num,
+                        "Disallowed system header: <%s>" % curr_system_header,
+                    )
+                )
+            elif prev_system_header:
+                # Make sure system headers are alphabetized amongst themselves
+                if prev_system_header > curr_system_header:
+                    lint.append(
+                        (
+                            filename,
+                            line_num,
+                            "System headers not in order: <%s> before <%s>"
+                            % (prev_system_header, curr_system_header),
+                        )
+                    )
+
+            prev_system_header = curr_system_header
+
+        line_num += 1
 
-  def RunOnFile(self, filename, lines):
-    lint = []
-    lint.extend(VerifyIncludes(filename, lines))
-    lint.extend(common_lint.VerifyLineLength(filename, lines,
-                                             CppLint.MAX_LINE_LENGTH))
-    lint.extend(common_lint.VerifyTabs(filename, lines))
-    lint.extend(common_lint.VerifyTrailingWhitespace(filename, lines))
     return lint
 
 
+class CppLint(common_lint.BaseLint):
+    MAX_LINE_LENGTH = 80
+
+    def RunOnFile(self, filename, lines):
+        lint = []
+        lint.extend(VerifyIncludes(filename, lines))
+        lint.extend(
+            common_lint.VerifyLineLength(filename, lines, CppLint.MAX_LINE_LENGTH)
+        )
+        lint.extend(common_lint.VerifyTabs(filename, lines))
+        lint.extend(common_lint.VerifyTrailingWhitespace(filename, lines))
+        return lint
+
+
 def CppLintMain(filenames):
-  all_lint = common_lint.RunLintOverAllFiles(CppLint(), filenames)
-  for lint in all_lint:
-    print('%s:%d:%s' % (lint[0], lint[1], lint[2]))
-  return 0
+    all_lint = common_lint.RunLintOverAllFiles(CppLint(), filenames)
+    for lint in all_lint:
+        print("%s:%d:%s" % (lint[0], lint[1], lint[2]))
+    return 0
 
 
-if __name__ == '__main__':
-  sys.exit(CppLintMain(sys.argv[1:]))
+if __name__ == "__main__":
+    sys.exit(CppLintMain(sys.argv[1:]))

diff  --git a/llvm/utils/lint/generic_lint.py b/llvm/utils/lint/generic_lint.py
index 06218d7ea3c19..18e6e67e29a6e 100755
--- a/llvm/utils/lint/generic_lint.py
+++ b/llvm/utils/lint/generic_lint.py
@@ -6,19 +6,19 @@
 import common_lint
 import sys
 
+
 class GenericCodeLint(common_lint.BaseLint):
-  MAX_LINE_LENGTH = 80
+    MAX_LINE_LENGTH = 80
 
-  def RunOnFile(self, filename, lines):
-    common_lint.VerifyLineLength(filename, lines,
-                                 GenericCodeLint.MAX_LINE_LENGTH)
-    common_lint.VerifyTrailingWhitespace(filename, lines)
+    def RunOnFile(self, filename, lines):
+        common_lint.VerifyLineLength(filename, lines, GenericCodeLint.MAX_LINE_LENGTH)
+        common_lint.VerifyTrailingWhitespace(filename, lines)
 
 
 def GenericCodeLintMain(filenames):
-  common_lint.RunLintOverAllFiles(GenericCodeLint(), filenames)
-  return 0
+    common_lint.RunLintOverAllFiles(GenericCodeLint(), filenames)
+    return 0
 
 
-if __name__ == '__main__':
-  sys.exit(GenericCodeLintMain(sys.argv[1:]))
+if __name__ == "__main__":
+    sys.exit(GenericCodeLintMain(sys.argv[1:]))

diff  --git a/llvm/utils/lit/examples/many-tests/lit.cfg b/llvm/utils/lit/examples/many-tests/lit.cfg
index 3477fef01ef98..da01c935c8e66 100644
--- a/llvm/utils/lit/examples/many-tests/lit.cfg
+++ b/llvm/utils/lit/examples/many-tests/lit.cfg
@@ -2,6 +2,7 @@
 
 import sys
 import os
+
 sys.path.insert(0, os.path.dirname(__file__))
 import ManyTests
 

diff  --git a/llvm/utils/lit/lit.py b/llvm/utils/lit/lit.py
index 381228ea39877..5c1c953e8bc75 100755
--- a/llvm/utils/lit/lit.py
+++ b/llvm/utils/lit/lit.py
@@ -2,5 +2,5 @@
 
 from lit.main import main
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/llvm/utils/lit/lit/BooleanExpression.py b/llvm/utils/lit/lit/BooleanExpression.py
index ba8453d60e98a..9b9573d2f3f14 100644
--- a/llvm/utils/lit/lit/BooleanExpression.py
+++ b/llvm/utils/lit/lit/BooleanExpression.py
@@ -1,5 +1,6 @@
 import re
 
+
 class BooleanExpression:
     # A simple evaluator of boolean expressions.
     #
@@ -30,14 +31,14 @@ def evaluate(string, variables):
             parser = BooleanExpression(string, set(variables))
             return parser.parseAll()
         except ValueError as e:
-            raise ValueError(str(e) + ('\nin expression: %r' % string))
+            raise ValueError(str(e) + ("\nin expression: %r" % string))
 
     #####
 
     def __init__(self, string, variables):
         self.tokens = BooleanExpression.tokenize(string)
         self.variables = variables
-        self.variables.add('true')
+        self.variables.add("true")
         self.value = None
         self.token = None
 
@@ -45,7 +46,9 @@ def __init__(self, string, variables):
     END = object()
 
     # Tokenization pattern.
-    Pattern = re.compile(r'\A\s*([()]|&&|\|\||!|(?:[-+=._a-zA-Z0-9]+|\{\{.+?\}\})+)\s*(.*)\Z')
+    Pattern = re.compile(
+        r"\A\s*([()]|&&|\|\||!|(?:[-+=._a-zA-Z0-9]+|\{\{.+?\}\})+)\s*(.*)\Z"
+    )
 
     @staticmethod
     def tokenize(string):
@@ -53,7 +56,7 @@ def tokenize(string):
             m = re.match(BooleanExpression.Pattern, string)
             if m is None:
                 if string == "":
-                    yield BooleanExpression.END;
+                    yield BooleanExpression.END
                     return
                 else:
                     raise ValueError("couldn't parse text: %r" % string)
@@ -64,7 +67,7 @@ def tokenize(string):
 
     def quote(self, token):
         if token is BooleanExpression.END:
-            return '<end of expression>'
+            return "<end of expression>"
         else:
             return repr(token)
 
@@ -80,22 +83,29 @@ def expect(self, t):
             if self.token != BooleanExpression.END:
                 self.token = next(self.tokens)
         else:
-            raise ValueError("expected: %s\nhave: %s" %
-                             (self.quote(t), self.quote(self.token)))
+            raise ValueError(
+                "expected: %s\nhave: %s" % (self.quote(t), self.quote(self.token))
+            )
 
     @staticmethod
     def isMatchExpression(token):
-        if (token is BooleanExpression.END or token == '&&' or token == '||' or
-            token == '!' or token == '(' or token == ')'):
+        if (
+            token is BooleanExpression.END
+            or token == "&&"
+            or token == "||"
+            or token == "!"
+            or token == "("
+            or token == ")"
+        ):
             return False
         return True
 
     def parseMATCH(self):
-        regex = ''
-        for part in filter(None, re.split(r'(\{\{.+?\}\})', self.token)):
-            if part.startswith('{{'):
-                assert part.endswith('}}')
-                regex += '(?:{})'.format(part[2:-2])
+        regex = ""
+        for part in filter(None, re.split(r"(\{\{.+?\}\})", self.token)):
+            if part.startswith("{{"):
+                assert part.endswith("}}")
+                regex += "(?:{})".format(part[2:-2])
             else:
                 regex += re.escape(part)
         regex = re.compile(regex)
@@ -103,21 +113,23 @@ def parseMATCH(self):
         self.token = next(self.tokens)
 
     def parseNOT(self):
-        if self.accept('!'):
+        if self.accept("!"):
             self.parseNOT()
             self.value = not self.value
-        elif self.accept('('):
+        elif self.accept("("):
             self.parseOR()
-            self.expect(')')
+            self.expect(")")
         elif not BooleanExpression.isMatchExpression(self.token):
-            raise ValueError("expected: '!', '(', '{{', or identifier\nhave: %s" %
-                             self.quote(self.token))
+            raise ValueError(
+                "expected: '!', '(', '{{', or identifier\nhave: %s"
+                % self.quote(self.token)
+            )
         else:
             self.parseMATCH()
 
     def parseAND(self):
         self.parseNOT()
-        while self.accept('&&'):
+        while self.accept("&&"):
             left = self.value
             self.parseNOT()
             right = self.value
@@ -127,7 +139,7 @@ def parseAND(self):
 
     def parseOR(self):
         self.parseAND()
-        while self.accept('||'):
+        while self.accept("||"):
             left = self.value
             self.parseAND()
             right = self.value
@@ -147,61 +159,90 @@ def parseAll(self):
 
 import unittest
 
+
 class TestBooleanExpression(unittest.TestCase):
     def test_variables(self):
-        variables = {'its-true', 'false-lol-true', 'under_score',
-                     'e=quals', 'd1g1ts'}
-        self.assertTrue(BooleanExpression.evaluate('true', variables))
-        self.assertTrue(BooleanExpression.evaluate('its-true', variables))
-        self.assertTrue(BooleanExpression.evaluate('false-lol-true', variables))
-        self.assertTrue(BooleanExpression.evaluate('under_score', variables))
-        self.assertTrue(BooleanExpression.evaluate('e=quals', variables))
-        self.assertTrue(BooleanExpression.evaluate('d1g1ts', variables))
-        self.assertTrue(BooleanExpression.evaluate('{{its.+}}', variables))
-        self.assertTrue(BooleanExpression.evaluate('{{false-[lo]+-true}}', variables))
-        self.assertTrue(BooleanExpression.evaluate('{{(true|false)-lol-(true|false)}}', variables))
-        self.assertTrue(BooleanExpression.evaluate('d1g{{[0-9]}}ts', variables))
-        self.assertTrue(BooleanExpression.evaluate('d1g{{[0-9]}}t{{[a-z]}}', variables))
-        self.assertTrue(BooleanExpression.evaluate('{{d}}1g{{[0-9]}}t{{[a-z]}}', variables))
-        self.assertTrue(BooleanExpression.evaluate('d1{{(g|1)+}}ts', variables))
-
-        self.assertFalse(BooleanExpression.evaluate('false', variables))
-        self.assertFalse(BooleanExpression.evaluate('True', variables))
-        self.assertFalse(BooleanExpression.evaluate('true-ish', variables))
-        self.assertFalse(BooleanExpression.evaluate('not_true', variables))
-        self.assertFalse(BooleanExpression.evaluate('tru', variables))
-        self.assertFalse(BooleanExpression.evaluate('{{its-true.+}}', variables))
+        variables = {"its-true", "false-lol-true", "under_score", "e=quals", "d1g1ts"}
+        self.assertTrue(BooleanExpression.evaluate("true", variables))
+        self.assertTrue(BooleanExpression.evaluate("its-true", variables))
+        self.assertTrue(BooleanExpression.evaluate("false-lol-true", variables))
+        self.assertTrue(BooleanExpression.evaluate("under_score", variables))
+        self.assertTrue(BooleanExpression.evaluate("e=quals", variables))
+        self.assertTrue(BooleanExpression.evaluate("d1g1ts", variables))
+        self.assertTrue(BooleanExpression.evaluate("{{its.+}}", variables))
+        self.assertTrue(BooleanExpression.evaluate("{{false-[lo]+-true}}", variables))
+        self.assertTrue(
+            BooleanExpression.evaluate("{{(true|false)-lol-(true|false)}}", variables)
+        )
+        self.assertTrue(BooleanExpression.evaluate("d1g{{[0-9]}}ts", variables))
+        self.assertTrue(BooleanExpression.evaluate("d1g{{[0-9]}}t{{[a-z]}}", variables))
+        self.assertTrue(
+            BooleanExpression.evaluate("{{d}}1g{{[0-9]}}t{{[a-z]}}", variables)
+        )
+        self.assertTrue(BooleanExpression.evaluate("d1{{(g|1)+}}ts", variables))
+
+        self.assertFalse(BooleanExpression.evaluate("false", variables))
+        self.assertFalse(BooleanExpression.evaluate("True", variables))
+        self.assertFalse(BooleanExpression.evaluate("true-ish", variables))
+        self.assertFalse(BooleanExpression.evaluate("not_true", variables))
+        self.assertFalse(BooleanExpression.evaluate("tru", variables))
+        self.assertFalse(BooleanExpression.evaluate("{{its-true.+}}", variables))
 
     def test_matching(self):
-        expr1 = 'linux && (target={{aarch64-.+}} || target={{x86_64-.+}})'
-        self.assertTrue(BooleanExpression.evaluate(expr1, {'linux', 'target=x86_64-unknown-linux-gnu'}))
-        self.assertFalse(BooleanExpression.evaluate(expr1, {'linux', 'target=i386-unknown-linux-gnu'}))
-
-        expr2 = 'use_system_cxx_lib && target={{.+}}-apple-macosx10.{{9|10|11|12}} && !no-exceptions'
-        self.assertTrue(BooleanExpression.evaluate(expr2, {'use_system_cxx_lib', 'target=arm64-apple-macosx10.12'}))
-        self.assertFalse(BooleanExpression.evaluate(expr2, {'use_system_cxx_lib', 'target=arm64-apple-macosx10.12', 'no-exceptions'}))
-        self.assertFalse(BooleanExpression.evaluate(expr2, {'use_system_cxx_lib', 'target=arm64-apple-macosx10.15'}))
+        expr1 = "linux && (target={{aarch64-.+}} || target={{x86_64-.+}})"
+        self.assertTrue(
+            BooleanExpression.evaluate(
+                expr1, {"linux", "target=x86_64-unknown-linux-gnu"}
+            )
+        )
+        self.assertFalse(
+            BooleanExpression.evaluate(
+                expr1, {"linux", "target=i386-unknown-linux-gnu"}
+            )
+        )
+
+        expr2 = "use_system_cxx_lib && target={{.+}}-apple-macosx10.{{9|10|11|12}} && !no-exceptions"
+        self.assertTrue(
+            BooleanExpression.evaluate(
+                expr2, {"use_system_cxx_lib", "target=arm64-apple-macosx10.12"}
+            )
+        )
+        self.assertFalse(
+            BooleanExpression.evaluate(
+                expr2,
+                {
+                    "use_system_cxx_lib",
+                    "target=arm64-apple-macosx10.12",
+                    "no-exceptions",
+                },
+            )
+        )
+        self.assertFalse(
+            BooleanExpression.evaluate(
+                expr2, {"use_system_cxx_lib", "target=arm64-apple-macosx10.15"}
+            )
+        )
 
     def test_operators(self):
-        self.assertTrue(BooleanExpression.evaluate('true || true', {}))
-        self.assertTrue(BooleanExpression.evaluate('true || false', {}))
-        self.assertTrue(BooleanExpression.evaluate('false || true', {}))
-        self.assertFalse(BooleanExpression.evaluate('false || false', {}))
-
-        self.assertTrue(BooleanExpression.evaluate('true && true', {}))
-        self.assertFalse(BooleanExpression.evaluate('true && false', {}))
-        self.assertFalse(BooleanExpression.evaluate('false && true', {}))
-        self.assertFalse(BooleanExpression.evaluate('false && false', {}))
-
-        self.assertFalse(BooleanExpression.evaluate('!true', {}))
-        self.assertTrue(BooleanExpression.evaluate('!false', {}))
-
-        self.assertTrue(BooleanExpression.evaluate('   ((!((false) ))   ) ', {}))
-        self.assertTrue(BooleanExpression.evaluate('true && (true && (true))', {}))
-        self.assertTrue(BooleanExpression.evaluate('!false && !false && !! !false', {}))
-        self.assertTrue(BooleanExpression.evaluate('false && false || true', {}))
-        self.assertTrue(BooleanExpression.evaluate('(false && false) || true', {}))
-        self.assertFalse(BooleanExpression.evaluate('false && (false || true)', {}))
+        self.assertTrue(BooleanExpression.evaluate("true || true", {}))
+        self.assertTrue(BooleanExpression.evaluate("true || false", {}))
+        self.assertTrue(BooleanExpression.evaluate("false || true", {}))
+        self.assertFalse(BooleanExpression.evaluate("false || false", {}))
+
+        self.assertTrue(BooleanExpression.evaluate("true && true", {}))
+        self.assertFalse(BooleanExpression.evaluate("true && false", {}))
+        self.assertFalse(BooleanExpression.evaluate("false && true", {}))
+        self.assertFalse(BooleanExpression.evaluate("false && false", {}))
+
+        self.assertFalse(BooleanExpression.evaluate("!true", {}))
+        self.assertTrue(BooleanExpression.evaluate("!false", {}))
+
+        self.assertTrue(BooleanExpression.evaluate("   ((!((false) ))   ) ", {}))
+        self.assertTrue(BooleanExpression.evaluate("true && (true && (true))", {}))
+        self.assertTrue(BooleanExpression.evaluate("!false && !false && !! !false", {}))
+        self.assertTrue(BooleanExpression.evaluate("false && false || true", {}))
+        self.assertTrue(BooleanExpression.evaluate("(false && false) || true", {}))
+        self.assertFalse(BooleanExpression.evaluate("false && (false || true)", {}))
 
     # Evaluate boolean expression `expr`.
     # Fail if it does not throw a ValueError containing the text `error`.
@@ -211,74 +252,99 @@ def checkException(self, expr, error):
             self.fail("expression %r didn't cause an exception" % expr)
         except ValueError as e:
             if -1 == str(e).find(error):
-                self.fail(("expression %r caused the wrong ValueError\n" +
-                           "actual error was:\n%s\n" +
-                           "expected error was:\n%s\n") % (expr, e, error))
+                self.fail(
+                    (
+                        "expression %r caused the wrong ValueError\n"
+                        + "actual error was:\n%s\n"
+                        + "expected error was:\n%s\n"
+                    )
+                    % (expr, e, error)
+                )
         except BaseException as e:
-            self.fail(("expression %r caused the wrong exception; actual " +
-                      "exception was: \n%r") % (expr, e))
+            self.fail(
+                (
+                    "expression %r caused the wrong exception; actual "
+                    + "exception was: \n%r"
+                )
+                % (expr, e)
+            )
 
     def test_errors(self):
-        self.checkException("ba#d",
-                            "couldn't parse text: '#d'\n" +
-                            "in expression: 'ba#d'")
-
-        self.checkException("true and true",
-                            "expected: <end of expression>\n" +
-                            "have: 'and'\n" +
-                            "in expression: 'true and true'")
-
-        self.checkException("|| true",
-                            "expected: '!', '(', '{{', or identifier\n" +
-                            "have: '||'\n" +
-                            "in expression: '|| true'")
-
-        self.checkException("true &&",
-                            "expected: '!', '(', '{{', or identifier\n" +
-                            "have: <end of expression>\n" +
-                            "in expression: 'true &&'")
-
-        self.checkException("",
-                            "expected: '!', '(', '{{', or identifier\n" +
-                            "have: <end of expression>\n" +
-                            "in expression: ''")
-
-        self.checkException("*",
-                            "couldn't parse text: '*'\n" +
-                            "in expression: '*'")
-
-        self.checkException("no wait stop",
-                            "expected: <end of expression>\n" +
-                            "have: 'wait'\n" +
-                            "in expression: 'no wait stop'")
-
-        self.checkException("no-$-please",
-                            "couldn't parse text: '$-please'\n" +
-                            "in expression: 'no-$-please'")
-
-        self.checkException("(((true && true) || true)",
-                            "expected: ')'\n" +
-                            "have: <end of expression>\n" +
-                            "in expression: '(((true && true) || true)'")
-
-        self.checkException("true (true)",
-                            "expected: <end of expression>\n" +
-                            "have: '('\n" +
-                            "in expression: 'true (true)'")
-
-        self.checkException("( )",
-                            "expected: '!', '(', '{{', or identifier\n" +
-                            "have: ')'\n" +
-                            "in expression: '( )'")
-
-        self.checkException("abc{{def",
-                            "couldn't parse text: '{{def'\n" +
-                            "in expression: 'abc{{def'")
-
-        self.checkException("{{}}",
-                            "couldn't parse text: '{{}}'\n" +
-                            "in expression: '{{}}'")
-
-
-if __name__ == '__main__':
+        self.checkException(
+            "ba#d", "couldn't parse text: '#d'\n" + "in expression: 'ba#d'"
+        )
+
+        self.checkException(
+            "true and true",
+            "expected: <end of expression>\n"
+            + "have: 'and'\n"
+            + "in expression: 'true and true'",
+        )
+
+        self.checkException(
+            "|| true",
+            "expected: '!', '(', '{{', or identifier\n"
+            + "have: '||'\n"
+            + "in expression: '|| true'",
+        )
+
+        self.checkException(
+            "true &&",
+            "expected: '!', '(', '{{', or identifier\n"
+            + "have: <end of expression>\n"
+            + "in expression: 'true &&'",
+        )
+
+        self.checkException(
+            "",
+            "expected: '!', '(', '{{', or identifier\n"
+            + "have: <end of expression>\n"
+            + "in expression: ''",
+        )
+
+        self.checkException("*", "couldn't parse text: '*'\n" + "in expression: '*'")
+
+        self.checkException(
+            "no wait stop",
+            "expected: <end of expression>\n"
+            + "have: 'wait'\n"
+            + "in expression: 'no wait stop'",
+        )
+
+        self.checkException(
+            "no-$-please",
+            "couldn't parse text: '$-please'\n" + "in expression: 'no-$-please'",
+        )
+
+        self.checkException(
+            "(((true && true) || true)",
+            "expected: ')'\n"
+            + "have: <end of expression>\n"
+            + "in expression: '(((true && true) || true)'",
+        )
+
+        self.checkException(
+            "true (true)",
+            "expected: <end of expression>\n"
+            + "have: '('\n"
+            + "in expression: 'true (true)'",
+        )
+
+        self.checkException(
+            "( )",
+            "expected: '!', '(', '{{', or identifier\n"
+            + "have: ')'\n"
+            + "in expression: '( )'",
+        )
+
+        self.checkException(
+            "abc{{def", "couldn't parse text: '{{def'\n" + "in expression: 'abc{{def'"
+        )
+
+        self.checkException(
+            "{{}}", "couldn't parse text: '{{}}'\n" + "in expression: '{{}}'"
+        )
+
+
+if __name__ == "__main__":
     unittest.main()

diff  --git a/llvm/utils/lit/lit/LitConfig.py b/llvm/utils/lit/lit/LitConfig.py
index d6287d06034f3..fcd6825f1d6da 100644
--- a/llvm/utils/lit/lit/LitConfig.py
+++ b/llvm/utils/lit/lit/LitConfig.py
@@ -20,13 +20,24 @@ class LitConfig(object):
     easily.
     """
 
-    def __init__(self, progname, path, quiet,
-                 useValgrind, valgrindLeakCheck, valgrindArgs,
-                 noExecute, debug, isWindows, order,
-                 params, config_prefix = None,
-                 maxIndividualTestTime = 0,
-                 parallelism_groups = {},
-                 echo_all_commands = False):
+    def __init__(
+        self,
+        progname,
+        path,
+        quiet,
+        useValgrind,
+        valgrindLeakCheck,
+        valgrindArgs,
+        noExecute,
+        debug,
+        isWindows,
+        order,
+        params,
+        config_prefix=None,
+        maxIndividualTestTime=0,
+        parallelism_groups={},
+        echo_all_commands=False,
+    ):
         # The name of the test runner.
         self.progname = progname
         # The items to add to the PATH environment variable.
@@ -43,25 +54,34 @@ def __init__(self, progname, path, quiet,
         self.bashPath = None
 
         # Configuration files to look for when discovering test suites.
-        self.config_prefix = config_prefix or 'lit'
-        self.suffixes = ['cfg.py', 'cfg']
-        self.config_names = ['%s.%s' % (self.config_prefix,x) for x in self.suffixes]
-        self.site_config_names = ['%s.site.%s' % (self.config_prefix,x) for x in self.suffixes]
-        self.local_config_names = ['%s.local.%s' % (self.config_prefix,x) for x in self.suffixes]
+        self.config_prefix = config_prefix or "lit"
+        self.suffixes = ["cfg.py", "cfg"]
+        self.config_names = ["%s.%s" % (self.config_prefix, x) for x in self.suffixes]
+        self.site_config_names = [
+            "%s.site.%s" % (self.config_prefix, x) for x in self.suffixes
+        ]
+        self.local_config_names = [
+            "%s.local.%s" % (self.config_prefix, x) for x in self.suffixes
+        ]
 
         self.numErrors = 0
         self.numWarnings = 0
 
         self.valgrindArgs = []
         if self.useValgrind:
-            self.valgrindArgs = ['valgrind', '-q', '--run-libc-freeres=no',
-                                 '--tool=memcheck', '--trace-children=yes',
-                                 '--error-exitcode=123']
+            self.valgrindArgs = [
+                "valgrind",
+                "-q",
+                "--run-libc-freeres=no",
+                "--tool=memcheck",
+                "--trace-children=yes",
+                "--error-exitcode=123",
+            ]
             if self.valgrindLeakCheck:
-                self.valgrindArgs.append('--leak-check=full')
+                self.valgrindArgs.append("--leak-check=full")
             else:
                 # The default is 'summary'.
-                self.valgrindArgs.append('--leak-check=no')
+                self.valgrindArgs.append("--leak-check=no")
             self.valgrindArgs.extend(self.valgrindUserArgs)
 
         self.maxIndividualTestTime = maxIndividualTestTime
@@ -71,32 +91,32 @@ def __init__(self, progname, path, quiet,
     @property
     def maxIndividualTestTime(self):
         """
-            Interface for getting maximum time to spend executing
-            a single test
+        Interface for getting maximum time to spend executing
+        a single test
         """
         return self._maxIndividualTestTime
 
     @property
     def maxIndividualTestTimeIsSupported(self):
         """
-            Returns a tuple (<supported> , <error message>)
-            where
-            `<supported>` is True if setting maxIndividualTestTime is supported
-                on the current host, returns False otherwise.
-            `<error message>` is an empty string if `<supported>` is True,
-                otherwise is contains a string describing why setting
-                maxIndividualTestTime is not supported.
+        Returns a tuple (<supported> , <error message>)
+        where
+        `<supported>` is True if setting maxIndividualTestTime is supported
+            on the current host, returns False otherwise.
+        `<error message>` is an empty string if `<supported>` is True,
+            otherwise is contains a string describing why setting
+            maxIndividualTestTime is not supported.
         """
         return lit.util.killProcessAndChildrenIsSupported()
 
     @maxIndividualTestTime.setter
     def maxIndividualTestTime(self, value):
         """
-            Interface for setting maximum time to spend executing
-            a single test
+        Interface for setting maximum time to spend executing
+        a single test
         """
         if not isinstance(value, int):
-            self.fatal('maxIndividualTestTime must set to a value of type int.')
+            self.fatal("maxIndividualTestTime must set to a value of type int.")
         self._maxIndividualTestTime = value
         if self.maxIndividualTestTime > 0:
             # The current implementation needs psutil on some platforms to set
@@ -104,16 +124,15 @@ def maxIndividualTestTime(self, value):
             # See lit.util.killProcessAndChildren()
             supported, errormsg = self.maxIndividualTestTimeIsSupported
             if not supported:
-                self.fatal('Setting a timeout per test not supported. ' +
-                           errormsg)
+                self.fatal("Setting a timeout per test not supported. " + errormsg)
         elif self.maxIndividualTestTime < 0:
-            self.fatal('The timeout per test must be >= 0 seconds')
+            self.fatal("The timeout per test must be >= 0 seconds")
 
     def load_config(self, config, path):
         """load_config(config, path) - Load a config object from an alternate
         path."""
         if self.debug:
-            self.note('load_config from %r' % path)
+            self.note("load_config from %r" % path)
         config.load_from_path(path, self)
         return config
 
@@ -122,28 +141,32 @@ def getBashPath(self):
         if self.bashPath is not None:
             return self.bashPath
 
-        self.bashPath = lit.util.which('bash', os.pathsep.join(self.path))
+        self.bashPath = lit.util.which("bash", os.pathsep.join(self.path))
         if self.bashPath is None:
-            self.bashPath = lit.util.which('bash')
+            self.bashPath = lit.util.which("bash")
 
         if self.bashPath is None:
-            self.bashPath = ''
+            self.bashPath = ""
 
         # Check whether the found version of bash is able to cope with paths in
         # the host path format. If not, don't return it as it can't be used to
         # run scripts. For example, WSL's bash.exe requires '/mnt/c/foo' rather
         # than 'C:\\foo' or 'C:/foo'.
         if self.isWindows and self.bashPath:
-            command = [self.bashPath, '-c',
-                       '[[ -f "%s" ]]' % self.bashPath.replace('\\', '\\\\')]
+            command = [
+                self.bashPath,
+                "-c",
+                '[[ -f "%s" ]]' % self.bashPath.replace("\\", "\\\\"),
+            ]
             _, _, exitCode = lit.util.executeCommand(command)
             if exitCode:
-                self.note('bash command failed: %s' % (
-                    ' '.join('"%s"' % c for c in command)))
-                self.bashPath = ''
+                self.note(
+                    "bash command failed: %s" % (" ".join('"%s"' % c for c in command))
+                )
+                self.bashPath = ""
 
         if not self.bashPath:
-            self.warning('Unable to find a usable version of bash.')
+            self.warning("Unable to find a usable version of bash.")
 
         return self.bashPath
 
@@ -155,9 +178,9 @@ def getToolsPath(self, dir, paths, tools):
             dir = lit.util.whichTools(tools, paths)
 
         # bash
-        self.bashPath = lit.util.which('bash', dir)
+        self.bashPath = lit.util.which("bash", dir)
         if self.bashPath is None:
-            self.bashPath = ''
+            self.bashPath = ""
 
         return dir
 
@@ -168,8 +191,9 @@ def _write_message(self, kind, message):
         f = f.f_back.f_back
         file = os.path.abspath(inspect.getsourcefile(f))
         line = inspect.getlineno(f)
-        sys.stderr.write('%s: %s:%d: %s: %s\n' % (self.progname, file, line,
-                                                  kind, message))
+        sys.stderr.write(
+            "%s: %s:%d: %s: %s\n" % (self.progname, file, line, kind, message)
+        )
         if self.isWindows:
             # In a git bash terminal, the writes to sys.stderr aren't visible
             # on screen immediately. Flush them here to avoid broken/misoredered
@@ -179,25 +203,26 @@ def _write_message(self, kind, message):
     def substitute(self, string):
         """substitute - Interpolate params into a string"""
         try:
-          return string % self.params
+            return string % self.params
         except KeyError as e:
-          key, = e.args
-          self.fatal("unable to find %r parameter, use '--param=%s=VALUE'" % (
-              key,key))
+            (key,) = e.args
+            self.fatal(
+                "unable to find %r parameter, use '--param=%s=VALUE'" % (key, key)
+            )
 
     def note(self, message):
         if not self.quiet:
-            self._write_message('note', message)
+            self._write_message("note", message)
 
     def warning(self, message):
         if not self.quiet:
-            self._write_message('warning', message)
+            self._write_message("warning", message)
         self.numWarnings += 1
 
     def error(self, message):
-        self._write_message('error', message)
+        self._write_message("error", message)
         self.numErrors += 1
 
     def fatal(self, message):
-        self._write_message('fatal', message)
+        self._write_message("fatal", message)
         sys.exit(2)

diff  --git a/llvm/utils/lit/lit/LitTestCase.py b/llvm/utils/lit/lit/LitTestCase.py
index 2e9b64953bd15..d44b76a0a0415 100644
--- a/llvm/utils/lit/lit/LitTestCase.py
+++ b/llvm/utils/lit/lit/LitTestCase.py
@@ -39,11 +39,12 @@ def runTest(self):
 
 def load_test_suite(inputs):
     import platform
-    windows = platform.system() == 'Windows'
+
+    windows = platform.system() == "Windows"
 
     # Create the global config object.
     lit_config = lit.LitConfig.LitConfig(
-        progname='lit',
+        progname="lit",
         path=[],
         quiet=False,
         useValgrind=False,
@@ -52,8 +53,9 @@ def load_test_suite(inputs):
         noExecute=False,
         debug=False,
         isWindows=windows,
-        order='smart',
-        params={})
+        order="smart",
+        params={},
+    )
 
     # Perform test discovery.
     tests = lit.discovery.find_tests_for_inputs(lit_config, inputs, False)

diff  --git a/llvm/utils/lit/lit/ProgressBar.py b/llvm/utils/lit/lit/ProgressBar.py
index fd721db780b5c..382b8f2e52540 100644
--- a/llvm/utils/lit/lit/ProgressBar.py
+++ b/llvm/utils/lit/lit/ProgressBar.py
@@ -5,15 +5,17 @@
 
 import sys, re, time
 
+
 def to_bytes(str):
     # Encode to UTF-8 to get binary data.
-    return str.encode('utf-8')
+    return str.encode("utf-8")
+
 
 class TerminalController:
     """
     A class that can be used to portably generate formatted output to
-    a terminal.  
-    
+    a terminal.
+
     `TerminalController` defines a set of instance variables whose
     values are initialized to the control sequence necessary to
     perform a given action.  These can be simply included in normal
@@ -43,41 +45,42 @@ class TerminalController:
     Finally, if the width and height of the terminal are known, then
     they will be stored in the `COLS` and `LINES` attributes.
     """
+
     # Cursor movement:
-    BOL = ''             #: Move the cursor to the beginning of the line
-    UP = ''              #: Move the cursor up one line
-    DOWN = ''            #: Move the cursor down one line
-    LEFT = ''            #: Move the cursor left one char
-    RIGHT = ''           #: Move the cursor right one char
+    BOL = ""  #: Move the cursor to the beginning of the line
+    UP = ""  #: Move the cursor up one line
+    DOWN = ""  #: Move the cursor down one line
+    LEFT = ""  #: Move the cursor left one char
+    RIGHT = ""  #: Move the cursor right one char
 
     # Deletion:
-    CLEAR_SCREEN = ''    #: Clear the screen and move to home position
-    CLEAR_EOL = ''       #: Clear to the end of the line.
-    CLEAR_BOL = ''       #: Clear to the beginning of the line.
-    CLEAR_EOS = ''       #: Clear to the end of the screen
+    CLEAR_SCREEN = ""  #: Clear the screen and move to home position
+    CLEAR_EOL = ""  #: Clear to the end of the line.
+    CLEAR_BOL = ""  #: Clear to the beginning of the line.
+    CLEAR_EOS = ""  #: Clear to the end of the screen
 
     # Output modes:
-    BOLD = ''            #: Turn on bold mode
-    BLINK = ''           #: Turn on blink mode
-    DIM = ''             #: Turn on half-bright mode
-    REVERSE = ''         #: Turn on reverse-video mode
-    NORMAL = ''          #: Turn off all modes
+    BOLD = ""  #: Turn on bold mode
+    BLINK = ""  #: Turn on blink mode
+    DIM = ""  #: Turn on half-bright mode
+    REVERSE = ""  #: Turn on reverse-video mode
+    NORMAL = ""  #: Turn off all modes
 
     # Cursor display:
-    HIDE_CURSOR = ''     #: Make the cursor invisible
-    SHOW_CURSOR = ''     #: Make the cursor visible
+    HIDE_CURSOR = ""  #: Make the cursor invisible
+    SHOW_CURSOR = ""  #: Make the cursor visible
 
     # Terminal size:
-    COLS = None          #: Width of the terminal (None for unknown)
-    LINES = None         #: Height of the terminal (None for unknown)
+    COLS = None  #: Width of the terminal (None for unknown)
+    LINES = None  #: Height of the terminal (None for unknown)
 
     # Foreground colors:
-    BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ''
-    
+    BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ""
+
     # Background colors:
-    BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = ''
-    BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = ''
-    
+    BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = ""
+    BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = ""
+
     _STRING_CAPABILITIES = """
     BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1
     CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold
@@ -95,60 +98,67 @@ def __init__(self, term_stream=sys.stdout):
         assumed to be a dumb terminal (i.e., have no capabilities).
         """
         # Curses isn't available on all platforms
-        try: import curses
-        except: return
+        try:
+            import curses
+        except:
+            return
 
         # If the stream isn't a tty, then assume it has no capabilities.
-        if not term_stream.isatty(): return
+        if not term_stream.isatty():
+            return
 
         # Check the terminal type.  If we fail, then assume that the
         # terminal has no capabilities.
-        try: curses.setupterm()
-        except: return
+        try:
+            curses.setupterm()
+        except:
+            return
 
         # Look up numeric capabilities.
-        self.COLS = curses.tigetnum('cols')
-        self.LINES = curses.tigetnum('lines')
-        self.XN = curses.tigetflag('xenl')
-        
+        self.COLS = curses.tigetnum("cols")
+        self.LINES = curses.tigetnum("lines")
+        self.XN = curses.tigetflag("xenl")
+
         # Look up string capabilities.
         for capability in self._STRING_CAPABILITIES:
-            (attrib, cap_name) = capability.split('=')
-            setattr(self, attrib, self._tigetstr(cap_name) or '')
+            (attrib, cap_name) = capability.split("=")
+            setattr(self, attrib, self._tigetstr(cap_name) or "")
 
         # Colors
-        set_fg = self._tigetstr('setf')
+        set_fg = self._tigetstr("setf")
         if set_fg:
-            for i,color in zip(range(len(self._COLORS)), self._COLORS):
+            for i, color in zip(range(len(self._COLORS)), self._COLORS):
                 setattr(self, color, self._tparm(set_fg, i))
-        set_fg_ansi = self._tigetstr('setaf')
+        set_fg_ansi = self._tigetstr("setaf")
         if set_fg_ansi:
-            for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
+            for i, color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
                 setattr(self, color, self._tparm(set_fg_ansi, i))
-        set_bg = self._tigetstr('setb')
+        set_bg = self._tigetstr("setb")
         if set_bg:
-            for i,color in zip(range(len(self._COLORS)), self._COLORS):
-                setattr(self, 'BG_'+color, self._tparm(set_bg, i))
-        set_bg_ansi = self._tigetstr('setab')
+            for i, color in zip(range(len(self._COLORS)), self._COLORS):
+                setattr(self, "BG_" + color, self._tparm(set_bg, i))
+        set_bg_ansi = self._tigetstr("setab")
         if set_bg_ansi:
-            for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
-                setattr(self, 'BG_'+color, self._tparm(set_bg_ansi, i))
+            for i, color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
+                setattr(self, "BG_" + color, self._tparm(set_bg_ansi, i))
 
     def _tparm(self, arg, index):
         import curses
-        return curses.tparm(to_bytes(arg), index).decode('utf-8') or ''
+
+        return curses.tparm(to_bytes(arg), index).decode("utf-8") or ""
 
     def _tigetstr(self, cap_name):
         # String capabilities can include "delays" of the form "$<2>".
         # For any modern terminal, we should be able to just ignore
         # these, so strip them out.
         import curses
+
         cap = curses.tigetstr(cap_name)
         if cap is None:
-            cap = ''
+            cap = ""
         else:
-            cap = cap.decode('utf-8')
-        return re.sub(r'\$<\d+>[/*]?', '', cap)
+            cap = cap.decode("utf-8")
+        return re.sub(r"\$<\d+>[/*]?", "", cap)
 
     def render(self, template):
         """
@@ -156,17 +166,21 @@ def render(self, template):
         the corresponding terminal control string (if it's defined) or
         '' (if it's not).
         """
-        return re.sub(r'\$\$|\${\w+}', self._render_sub, template)
+        return re.sub(r"\$\$|\${\w+}", self._render_sub, template)
 
     def _render_sub(self, match):
         s = match.group()
-        if s == '$$': return s
-        else: return getattr(self, s[2:-1])
+        if s == "$$":
+            return s
+        else:
+            return getattr(self, s[2:-1])
+
 
 #######################################################################
 # Example use case: progress bar
 #######################################################################
 
+
 class SimpleProgressBar:
     """
     A simple progress bar which doesn't need any terminal support.
@@ -184,33 +198,34 @@ def update(self, percent, message):
             sys.stdout.write(self.header)
             self.atIndex = 0
 
-        next = int(percent*50)
+        next = int(percent * 50)
         if next == self.atIndex:
             return
 
         for i in range(self.atIndex, next):
             idx = i % 5
             if idx == 0:
-                sys.stdout.write('%2d' % (i*2))
+                sys.stdout.write("%2d" % (i * 2))
             elif idx == 1:
-                pass # Skip second char
+                pass  # Skip second char
             elif idx < 4:
-                sys.stdout.write('.')
+                sys.stdout.write(".")
             else:
-                sys.stdout.write(' ')
+                sys.stdout.write(" ")
         sys.stdout.flush()
         self.atIndex = next
 
     def clear(self, interrupted):
         if self.atIndex is not None and not interrupted:
-            sys.stdout.write('\n')
+            sys.stdout.write("\n")
             sys.stdout.flush()
             self.atIndex = None
 
+
 class ProgressBar:
     """
     A 3-line progress bar, which looks like::
-    
+
                                 Header
         20% [===========----------------------------------]
                            progress message
@@ -218,26 +233,29 @@ class ProgressBar:
     The progress bar is colored, if the terminal supports color
     output; and adjusts to the width of the terminal.
     """
-    BAR = '%s${%s}[${BOLD}%s%s${NORMAL}${%s}]${NORMAL}%s'
-    HEADER = '${BOLD}${CYAN}%s${NORMAL}\n\n'
-        
+
+    BAR = "%s${%s}[${BOLD}%s%s${NORMAL}${%s}]${NORMAL}%s"
+    HEADER = "${BOLD}${CYAN}%s${NORMAL}\n\n"
+
     def __init__(self, term, header, useETA=True):
         self.term = term
         if not (self.term.CLEAR_EOL and self.term.UP and self.term.BOL):
-            raise ValueError("Terminal isn't capable enough -- you "
-                             "should use a simpler progress dispaly.")
-        self.BOL = self.term.BOL # BoL from col#79
-        self.XNL = "\n" # Newline from col#79
+            raise ValueError(
+                "Terminal isn't capable enough -- you "
+                "should use a simpler progress dispaly."
+            )
+        self.BOL = self.term.BOL  # BoL from col#79
+        self.XNL = "\n"  # Newline from col#79
         if self.term.COLS:
             self.width = self.term.COLS
             if not self.term.XN:
                 self.BOL = self.term.UP + self.term.BOL
-                self.XNL = "" # Cursor must be fed to the next line
+                self.XNL = ""  # Cursor must be fed to the next line
         else:
             self.width = 75
-        self.barColor = 'GREEN'
+        self.barColor = "GREEN"
         self.header = self.term.render(self.HEADER % header.center(self.width))
-        self.cleared = 1 #: true if we haven't drawn the bar yet.
+        self.cleared = 1  #: true if we haven't drawn the bar yet.
         self.useETA = useETA
         if self.useETA:
             self.startTime = time.time()
@@ -247,51 +265,62 @@ def update(self, percent, message):
         if self.cleared:
             sys.stdout.write(self.header)
             self.cleared = 0
-        prefix = '%3d%% ' % (percent*100,)
-        suffix = ''
+        prefix = "%3d%% " % (percent * 100,)
+        suffix = ""
         if self.useETA:
             elapsed = time.time() - self.startTime
-            if percent > .0001 and elapsed > 1:
+            if percent > 0.0001 and elapsed > 1:
                 total = elapsed / percent
                 eta = total - elapsed
-                h = eta//3600.
-                m = (eta//60) % 60
+                h = eta // 3600.0
+                m = (eta // 60) % 60
                 s = eta % 60
-                suffix = ' ETA: %02d:%02d:%02d'%(h,m,s)
+                suffix = " ETA: %02d:%02d:%02d" % (h, m, s)
         barWidth = self.width - len(prefix) - len(suffix) - 2
-        n = int(barWidth*percent)
+        n = int(barWidth * percent)
         if len(message) < self.width:
-            message = message + ' '*(self.width - len(message))
+            message = message + " " * (self.width - len(message))
         else:
-            message = '... ' + message[-(self.width-4):]
+            message = "... " + message[-(self.width - 4) :]
         bc = self.barColor
-        bar = self.BAR % (prefix, bc, '='*n, '-'*(barWidth-n), bc, suffix)
+        bar = self.BAR % (prefix, bc, "=" * n, "-" * (barWidth - n), bc, suffix)
         bar = self.term.render(bar)
         sys.stdout.write(
-            self.BOL + self.term.UP + self.term.CLEAR_EOL +
-            bar +
-            self.XNL +
-            self.term.CLEAR_EOL + message)
+            self.BOL
+            + self.term.UP
+            + self.term.CLEAR_EOL
+            + bar
+            + self.XNL
+            + self.term.CLEAR_EOL
+            + message
+        )
         if not self.term.XN:
             sys.stdout.flush()
 
     def clear(self, interrupted):
         if not self.cleared:
-            sys.stdout.write(self.BOL + self.term.CLEAR_EOL +
-                             self.term.UP + self.term.CLEAR_EOL +
-                             self.term.UP + self.term.CLEAR_EOL)
+            sys.stdout.write(
+                self.BOL
+                + self.term.CLEAR_EOL
+                + self.term.UP
+                + self.term.CLEAR_EOL
+                + self.term.UP
+                + self.term.CLEAR_EOL
+            )
             if interrupted:  # ^C creates extra line. Gobble it up!
                 sys.stdout.write(self.term.UP + self.term.CLEAR_EOL)
-                sys.stdout.write('^C')
+                sys.stdout.write("^C")
             sys.stdout.flush()
             self.cleared = 1
 
+
 def test():
     tc = TerminalController()
-    p = ProgressBar(tc, 'Tests')
+    p = ProgressBar(tc, "Tests")
     for i in range(101):
-        p.update(i/100., str(i))        
-        time.sleep(.3)
+        p.update(i / 100.0, str(i))
+        time.sleep(0.3)
+
 
-if __name__=='__main__':
+if __name__ == "__main__":
     test()

diff  --git a/llvm/utils/lit/lit/ShCommands.py b/llvm/utils/lit/lit/ShCommands.py
index 01e91c55da989..68655a41d7934 100644
--- a/llvm/utils/lit/lit/ShCommands.py
+++ b/llvm/utils/lit/lit/ShCommands.py
@@ -4,30 +4,30 @@ def __init__(self, args, redirects):
         self.redirects = list(redirects)
 
     def __repr__(self):
-        return 'Command(%r, %r)' % (self.args, self.redirects)
+        return "Command(%r, %r)" % (self.args, self.redirects)
 
     def __eq__(self, other):
         if not isinstance(other, Command):
             return False
 
-        return ((self.args, self.redirects) ==
-                (other.args, other.redirects))
+        return (self.args, self.redirects) == (other.args, other.redirects)
 
     def toShell(self, file):
         for arg in self.args:
             if "'" not in arg:
                 quoted = "'%s'" % arg
-            elif '"' not in arg and '$' not in arg:
+            elif '"' not in arg and "$" not in arg:
                 quoted = '"%s"' % arg
             else:
-                raise NotImplementedError('Unable to quote %r' % arg)
+                raise NotImplementedError("Unable to quote %r" % arg)
             file.write(quoted)
 
             # For debugging / validation.
             import ShUtil
+
             dequoted = list(ShUtil.ShLexer(quoted).lex())
             if dequoted != [arg]:
-                raise NotImplementedError('Unable to quote %r' % arg)
+                raise NotImplementedError("Unable to quote %r" % arg)
 
         for r in self.redirects:
             if len(r[0]) == 1:
@@ -35,6 +35,7 @@ def toShell(self, file):
             else:
                 file.write("%s%s '%s'" % (r[0][1], r[0][0], r[1]))
 
+
 class GlobItem:
     def __init__(self, pattern):
         self.pattern = pattern
@@ -46,18 +47,20 @@ def __eq__(self, other):
         if not isinstance(other, Command):
             return False
 
-        return (self.pattern == other.pattern)
+        return self.pattern == other.pattern
 
     def resolve(self, cwd):
         import glob
         import os
+
         if os.path.isabs(self.pattern):
-           abspath = self.pattern
+            abspath = self.pattern
         else:
             abspath = os.path.join(cwd, self.pattern)
         results = glob.glob(abspath)
         return [self.pattern] if len(results) == 0 else results
 
+
 class Pipeline:
     def __init__(self, commands, negate=False, pipe_err=False):
         self.commands = commands
@@ -65,44 +68,46 @@ def __init__(self, commands, negate=False, pipe_err=False):
         self.pipe_err = pipe_err
 
     def __repr__(self):
-        return 'Pipeline(%r, %r, %r)' % (self.commands, self.negate,
-                                         self.pipe_err)
+        return "Pipeline(%r, %r, %r)" % (self.commands, self.negate, self.pipe_err)
 
     def __eq__(self, other):
         if not isinstance(other, Pipeline):
             return False
 
-        return ((self.commands, self.negate, self.pipe_err) ==
-                (other.commands, other.negate, self.pipe_err))
+        return (self.commands, self.negate, self.pipe_err) == (
+            other.commands,
+            other.negate,
+            self.pipe_err,
+        )
 
     def toShell(self, file, pipefail=False):
         if pipefail != self.pipe_err:
             raise ValueError('Inconsistent "pipefail" attribute!')
         if self.negate:
-            file.write('! ')
+            file.write("! ")
         for cmd in self.commands:
             cmd.toShell(file)
             if cmd is not self.commands[-1]:
-                file.write('|\n  ')
+                file.write("|\n  ")
+
 
 class Seq:
     def __init__(self, lhs, op, rhs):
-        assert op in (';', '&', '||', '&&')
+        assert op in (";", "&", "||", "&&")
         self.op = op
         self.lhs = lhs
         self.rhs = rhs
 
     def __repr__(self):
-        return 'Seq(%r, %r, %r)' % (self.lhs, self.op, self.rhs)
+        return "Seq(%r, %r, %r)" % (self.lhs, self.op, self.rhs)
 
     def __eq__(self, other):
         if not isinstance(other, Seq):
             return False
 
-        return ((self.lhs, self.op, self.rhs) ==
-                (other.lhs, other.op, other.rhs))
+        return (self.lhs, self.op, self.rhs) == (other.lhs, other.op, other.rhs)
 
     def toShell(self, file, pipefail=False):
         self.lhs.toShell(file, pipefail)
-        file.write(' %s\n' % self.op)
+        file.write(" %s\n" % self.op)
         self.rhs.toShell(file, pipefail)

diff  --git a/llvm/utils/lit/lit/ShUtil.py b/llvm/utils/lit/lit/ShUtil.py
index 00ec8ab004936..fa13167cad1be 100644
--- a/llvm/utils/lit/lit/ShUtil.py
+++ b/llvm/utils/lit/lit/ShUtil.py
@@ -4,8 +4,9 @@
 import lit.util
 from lit.ShCommands import Command, GlobItem, Pipeline, Seq
 
+
 class ShLexer:
-    def __init__(self, data, win32Escapes = False):
+    def __init__(self, data, win32Escapes=False):
         self.data = data
         self.pos = 0
         self.end = len(data)
@@ -22,7 +23,7 @@ def look(self):
     def maybe_eat(self, c):
         """
         maybe_eat(c) - Consume the character c if it is the next character,
-        returning True if a character was consumed. """
+        returning True if a character was consumed."""
         if self.data[self.pos] == c:
             self.pos += 1
             return True
@@ -30,18 +31,24 @@ def maybe_eat(self, c):
 
     def lex_arg_fast(self, c):
         # Get the leading whitespace free section.
-        chunk = self.data[self.pos - 1:].split(None, 1)[0]
-        
+        chunk = self.data[self.pos - 1 :].split(None, 1)[0]
+
         # If it has special characters, the fast path failed.
-        if ('|' in chunk or '&' in chunk or 
-            '<' in chunk or '>' in chunk or
-            "'" in chunk or '"' in chunk or
-            ';' in chunk or '\\' in chunk):
+        if (
+            "|" in chunk
+            or "&" in chunk
+            or "<" in chunk
+            or ">" in chunk
+            or "'" in chunk
+            or '"' in chunk
+            or ";" in chunk
+            or "\\" in chunk
+        ):
             return None
-        
+
         self.pos = self.pos - 1 + len(chunk)
-        return GlobItem(chunk) if '*' in chunk or '?' in chunk else chunk
-        
+        return GlobItem(chunk) if "*" in chunk or "?" in chunk else chunk
+
     def lex_arg_slow(self, c):
         if c in "'\"":
             str = self.lex_arg_quoted(c)
@@ -53,7 +60,7 @@ def lex_arg_slow(self, c):
             c = self.look()
             if c.isspace() or c in "|&;":
                 break
-            elif c in '><':
+            elif c in "><":
                 # This is an annoying case; we treat '2>' as a single token so
                 # we don't have to track whitespace tokens.
 
@@ -66,22 +73,23 @@ def lex_arg_slow(self, c):
                 num = int(str)
                 tok = self.lex_one_token()
                 assert isinstance(tok, tuple) and len(tok) == 1
-                return (tok[0], num)                    
+                return (tok[0], num)
             elif c == '"' or c == "'":
                 self.eat()
                 quoted_arg = self.lex_arg_quoted(c)
-                if '*' in quoted_arg or '?' in quoted_arg:
+                if "*" in quoted_arg or "?" in quoted_arg:
                     quoted_glob_char = True
                 str += quoted_arg
-            elif not self.win32Escapes and c == '\\':
+            elif not self.win32Escapes and c == "\\":
                 # Outside of a string, '\\' escapes everything.
                 self.eat()
                 if self.pos == self.end:
                     lit.util.warning(
-                        "escape at end of quoted argument in: %r" % self.data)
+                        "escape at end of quoted argument in: %r" % self.data
+                    )
                     return str
                 str += self.eat()
-            elif c in '*?':
+            elif c in "*?":
                 unquoted_glob_char = True
                 str += self.eat()
             else:
@@ -102,30 +110,31 @@ def lex_arg_slow(self, c):
         return GlobItem(str) if unquoted_glob_char else str
 
     def lex_arg_quoted(self, delim):
-        str = ''
+        str = ""
         while self.pos != self.end:
             c = self.eat()
             if c == delim:
                 return str
-            elif c == '\\' and delim == '"':
+            elif c == "\\" and delim == '"':
                 # Inside a '"' quoted string, '\\' only escapes the quote
                 # character and backslash, otherwise it is preserved.
                 if self.pos == self.end:
                     lit.util.warning(
-                        "escape at end of quoted argument in: %r" % self.data)
+                        "escape at end of quoted argument in: %r" % self.data
+                    )
                     return str
                 c = self.eat()
-                if c == '"': # 
+                if c == '"':  #
                     str += '"'
-                elif c == '\\':
-                    str += '\\'
+                elif c == "\\":
+                    str += "\\"
                 else:
-                    str += '\\' + c
+                    str += "\\" + c
             else:
                 str += c
         lit.util.warning("missing quote character in %r" % self.data)
         return str
-    
+
     def lex_arg_checked(self, c):
         pos = self.pos
         res = self.lex_arg_fast(c)
@@ -135,44 +144,42 @@ def lex_arg_checked(self, c):
         reference = self.lex_arg_slow(c)
         if res is not None:
             if res != reference:
-                raise ValueError("Fast path failure: %r != %r" % (
-                        res, reference))
+                raise ValueError("Fast path failure: %r != %r" % (res, reference))
             if self.pos != end:
-                raise ValueError("Fast path failure: %r != %r" % (
-                        self.pos, end))
+                raise ValueError("Fast path failure: %r != %r" % (self.pos, end))
         return reference
-        
+
     def lex_arg(self, c):
         return self.lex_arg_fast(c) or self.lex_arg_slow(c)
-        
+
     def lex_one_token(self):
         """
-        lex_one_token - Lex a single 'sh' token. """
+        lex_one_token - Lex a single 'sh' token."""
 
         c = self.eat()
-        if c == ';':
+        if c == ";":
             return (c,)
-        if c == '|':
-            if self.maybe_eat('|'):
-                return ('||',)
+        if c == "|":
+            if self.maybe_eat("|"):
+                return ("||",)
             return (c,)
-        if c == '&':
-            if self.maybe_eat('&'):
-                return ('&&',)
-            if self.maybe_eat('>'): 
-                return ('&>',)
+        if c == "&":
+            if self.maybe_eat("&"):
+                return ("&&",)
+            if self.maybe_eat(">"):
+                return ("&>",)
             return (c,)
-        if c == '>':
-            if self.maybe_eat('&'):
-                return ('>&',)
-            if self.maybe_eat('>'):
-                return ('>>',)
+        if c == ">":
+            if self.maybe_eat("&"):
+                return (">&",)
+            if self.maybe_eat(">"):
+                return (">>",)
             return (c,)
-        if c == '<':
-            if self.maybe_eat('&'):
-                return ('<&',)
-            if self.maybe_eat('>'):
-                return ('<<',)
+        if c == "<":
+            if self.maybe_eat("&"):
+                return ("<&",)
+            if self.maybe_eat(">"):
+                return ("<<",)
             return (c,)
 
         return self.lex_arg(c)
@@ -184,32 +191,34 @@ def lex(self):
             else:
                 yield self.lex_one_token()
 
+
 ###
- 
+
+
 class ShParser:
-    def __init__(self, data, win32Escapes = False, pipefail = False):
+    def __init__(self, data, win32Escapes=False, pipefail=False):
         self.data = data
         self.pipefail = pipefail
-        self.tokens = ShLexer(data, win32Escapes = win32Escapes).lex()
-    
+        self.tokens = ShLexer(data, win32Escapes=win32Escapes).lex()
+
     def lex(self):
         for item in self.tokens:
             return item
         return None
-    
+
     def look(self):
         token = self.lex()
         if token is not None:
             self.tokens = itertools.chain([token], self.tokens)
         return token
-    
+
     def parse_command(self):
         tok = self.lex()
         if not tok:
             raise ValueError("empty command!")
         if isinstance(tok, tuple):
             raise ValueError("syntax error near unexpected token %r" % tok[0])
-        
+
         args = [tok]
         redirects = []
         while 1:
@@ -226,9 +235,9 @@ def parse_command(self):
 
             # Otherwise see if it is a terminator.
             assert isinstance(tok, tuple)
-            if tok[0] in ('|',';','&','||','&&'):
+            if tok[0] in ("|", ";", "&", "||", "&&"):
                 break
-            
+
             # Otherwise it must be a redirection.
             op = self.lex()
             arg = self.lex()
@@ -242,11 +251,11 @@ def parse_pipeline(self):
         negate = False
 
         commands = [self.parse_command()]
-        while self.look() == ('|',):
+        while self.look() == ("|",):
             self.lex()
             commands.append(self.parse_command())
         return Pipeline(commands, negate, self.pipefail)
-            
+
     def parse(self):
         lhs = self.parse_pipeline()
 
@@ -255,11 +264,9 @@ def parse(self):
             assert isinstance(operator, tuple) and len(operator) == 1
 
             if not self.look():
-                raise ValueError(
-                    "missing argument to operator %r" % operator[0])
-            
+                raise ValueError("missing argument to operator %r" % operator[0])
+
             # FIXME: Operator precedence!!
             lhs = Seq(lhs, operator[0], self.parse_pipeline())
 
         return lhs
-

diff  --git a/llvm/utils/lit/lit/Test.py b/llvm/utils/lit/lit/Test.py
index 6c72359440b9c..051062706f856 100644
--- a/llvm/utils/lit/lit/Test.py
+++ b/llvm/utils/lit/lit/Test.py
@@ -7,6 +7,7 @@
 
 # Test result codes.
 
+
 class ResultCode(object):
     """Test result codes."""
 
@@ -37,26 +38,26 @@ def __init__(self, name, label, isFailure):
         ResultCode._all_codes.append(self)
 
     def __repr__(self):
-        return '%s%r' % (self.__class__.__name__,
-                         (self.name, self.isFailure))
+        return "%s%r" % (self.__class__.__name__, (self.name, self.isFailure))
 
 
 # Successes
-EXCLUDED    = ResultCode('EXCLUDED',    'Excluded', False)
-SKIPPED     = ResultCode('SKIPPED',     'Skipped', False)
-UNSUPPORTED = ResultCode('UNSUPPORTED', 'Unsupported', False)
-PASS        = ResultCode('PASS',        'Passed', False)
-FLAKYPASS   = ResultCode('FLAKYPASS',   'Passed With Retry', False)
-XFAIL       = ResultCode('XFAIL',       'Expectedly Failed', False)
+EXCLUDED = ResultCode("EXCLUDED", "Excluded", False)
+SKIPPED = ResultCode("SKIPPED", "Skipped", False)
+UNSUPPORTED = ResultCode("UNSUPPORTED", "Unsupported", False)
+PASS = ResultCode("PASS", "Passed", False)
+FLAKYPASS = ResultCode("FLAKYPASS", "Passed With Retry", False)
+XFAIL = ResultCode("XFAIL", "Expectedly Failed", False)
 # Failures
-UNRESOLVED  = ResultCode('UNRESOLVED',  'Unresolved', True)
-TIMEOUT     = ResultCode('TIMEOUT',     'Timed Out', True)
-FAIL        = ResultCode('FAIL',        'Failed', True)
-XPASS       = ResultCode('XPASS',       'Unexpectedly Passed', True)
+UNRESOLVED = ResultCode("UNRESOLVED", "Unresolved", True)
+TIMEOUT = ResultCode("TIMEOUT", "Timed Out", True)
+FAIL = ResultCode("FAIL", "Failed", True)
+XPASS = ResultCode("XPASS", "Unexpectedly Passed", True)
 
 
 # Test metric values.
 
+
 class MetricValue(object):
     def format(self):
         """
@@ -76,6 +77,7 @@ def todata(self):
         """
         raise RuntimeError("abstract method")
 
+
 class IntMetricValue(MetricValue):
     def __init__(self, value):
         self.value = value
@@ -86,21 +88,24 @@ def format(self):
     def todata(self):
         return self.value
 
+
 class RealMetricValue(MetricValue):
     def __init__(self, value):
         self.value = value
 
     def format(self):
-        return '%.4f' % self.value
+        return "%.4f" % self.value
 
     def todata(self):
         return self.value
 
+
 class JSONMetricValue(MetricValue):
     """
-        JSONMetricValue is used for types that are representable in the output
-        but that are otherwise uninterpreted.
+    JSONMetricValue is used for types that are representable in the output
+    but that are otherwise uninterpreted.
     """
+
     def __init__(self, value):
         # Ensure the value is a serializable by trying to encode it.
         # WARNING: The value may change before it is encoded again, and may
@@ -119,6 +124,7 @@ def format(self):
     def todata(self):
         return self.value
 
+
 def toMetricValue(value):
     if isinstance(value, MetricValue):
         return value
@@ -141,10 +147,11 @@ def toMetricValue(value):
 
 # Test results.
 
+
 class Result(object):
     """Wrapper for the results of executing an individual test."""
 
-    def __init__(self, code, output='', elapsed=None):
+    def __init__(self, code, output="", elapsed=None):
         # The result code.
         self.code = code
         # The test output.
@@ -169,8 +176,7 @@ def addMetric(self, name, value):
         Each value must be an instance of a MetricValue subclass.
         """
         if name in self.metrics:
-            raise ValueError("result already includes metrics for %r" % (
-                    name,))
+            raise ValueError("result already includes metrics for %r" % (name,))
         if not isinstance(value, MetricValue):
             raise TypeError("unexpected metric value: %r" % (value,))
         self.metrics[name] = value
@@ -186,8 +192,7 @@ def addMicroResult(self, name, microResult):
         Each micro-test result must be an instance of the Result class.
         """
         if name in self.microResults:
-            raise ValueError("Result already includes microResult for %r" % (
-                   name,))
+            raise ValueError("Result already includes microResult for %r" % (name,))
         if not isinstance(microResult, Result):
             raise TypeError("unexpected MicroResult value %r" % (microResult,))
         self.microResults[name] = microResult
@@ -195,6 +200,7 @@ def addMicroResult(self, name, microResult):
 
 # Test classes.
 
+
 class TestSuite:
     """TestSuite - Information on a group of tests.
 
@@ -216,10 +222,13 @@ def getSourcePath(self, components):
     def getExecPath(self, components):
         return os.path.join(self.exec_root, *components)
 
+
 class Test:
     """Test - Information on a single test instance."""
 
-    def __init__(self, suite, path_in_suite, config, file_path = None, gtest_json_file = None):
+    def __init__(
+        self, suite, path_in_suite, config, file_path=None, gtest_json_file=None
+    ):
         self.suite = suite
         self.path_in_suite = path_in_suite
         self.config = config
@@ -247,7 +256,7 @@ def __init__(self, suite, path_in_suite, config, file_path = None, gtest_json_fi
 
         # An optional number of retries allowed before the test finally succeeds.
         # The test is run at most once plus the number of retries specified here.
-        self.allowed_retries = getattr(config, 'test_retry_attempts', 0)
+        self.allowed_retries = getattr(config, "test_retry_attempts", 0)
 
         # The test result, once complete.
         self.result = None
@@ -258,12 +267,11 @@ def __init__(self, suite, path_in_suite, config, file_path = None, gtest_json_fi
         # The previous test elapsed time, if applicable.
         self.previous_elapsed = 0.0
 
-        if suite.test_times and '/'.join(path_in_suite) in suite.test_times:
-            time = suite.test_times['/'.join(path_in_suite)]
+        if suite.test_times and "/".join(path_in_suite) in suite.test_times:
+            time = suite.test_times["/".join(path_in_suite)]
             self.previous_elapsed = abs(time)
             self.previous_failure = time < 0
 
-
     def setResult(self, result):
         assert self.result is None, "result already set"
         assert isinstance(result, Result), "unexpected result type"
@@ -288,7 +296,7 @@ def isFailure(self):
         return self.result.code.isFailure
 
     def getFullName(self):
-        return self.suite.config.name + ' :: ' + '/'.join(self.path_in_suite)
+        return self.suite.config.name + " :: " + "/".join(self.path_in_suite)
 
     def getFilePath(self):
         if self.file_path:
@@ -313,14 +321,14 @@ def isExpectedToFail(self):
         """
 
         if self.xfail_not:
-          return False
+            return False
 
         features = self.config.available_features
 
         # Check if any of the xfails match an available feature.
         for item in self.xfails:
             # If this is the wildcard, it always fails.
-            if item == '*':
+            if item == "*":
                 return True
 
             # If this is a True expression of features, it fails.
@@ -328,7 +336,7 @@ def isExpectedToFail(self):
                 if BooleanExpression.evaluate(item, features):
                     return True
             except ValueError as e:
-                raise ValueError('Error in XFAIL list:\n%s' % str(e))
+                raise ValueError("Error in XFAIL list:\n%s" % str(e))
 
         return False
 
@@ -352,8 +360,11 @@ def isWithinFeatureLimits(self):
             return False
 
         # Check the requirements after removing the limiting features (#2)
-        featuresMinusLimits = [f for f in self.config.available_features
-                               if not f in self.config.limit_to_features]
+        featuresMinusLimits = [
+            f
+            for f in self.config.available_features
+            if not f in self.config.limit_to_features
+        ]
         if not self.getMissingRequiredFeaturesFromList(featuresMinusLimits):
             return False
 
@@ -361,10 +372,13 @@ def isWithinFeatureLimits(self):
 
     def getMissingRequiredFeaturesFromList(self, features):
         try:
-            return [item for item in self.requires
-                    if not BooleanExpression.evaluate(item, features)]
+            return [
+                item
+                for item in self.requires
+                if not BooleanExpression.evaluate(item, features)
+            ]
         except ValueError as e:
-            raise ValueError('Error in REQUIRES list:\n%s' % str(e))
+            raise ValueError("Error in REQUIRES list:\n%s" % str(e))
 
     def getMissingRequiredFeatures(self):
         """
@@ -389,10 +403,13 @@ def getUnsupportedFeatures(self):
         features = self.config.available_features
 
         try:
-            return [item for item in self.unsupported
-                    if BooleanExpression.evaluate(item, features)]
+            return [
+                item
+                for item in self.unsupported
+                if BooleanExpression.evaluate(item, features)
+            ]
         except ValueError as e:
-            raise ValueError('Error in UNSUPPORTED list:\n%s' % str(e))
+            raise ValueError("Error in UNSUPPORTED list:\n%s" % str(e))
 
     def getUsedFeatures(self):
         """
@@ -402,14 +419,18 @@ def getUsedFeatures(self):
         REQUIRES annotations for this test.
         """
         import lit.TestRunner
-        parsed = lit.TestRunner._parseKeywords(self.getSourcePath(), require_script=False)
-        feature_keywords = ('UNSUPPORTED:', 'REQUIRES:', 'XFAIL:')
+
+        parsed = lit.TestRunner._parseKeywords(
+            self.getSourcePath(), require_script=False
+        )
+        feature_keywords = ("UNSUPPORTED:", "REQUIRES:", "XFAIL:")
         boolean_expressions = itertools.chain.from_iterable(
             parsed[k] or [] for k in feature_keywords
         )
         tokens = itertools.chain.from_iterable(
-            BooleanExpression.tokenize(expr) for expr in
-                boolean_expressions if expr != '*'
+            BooleanExpression.tokenize(expr)
+            for expr in boolean_expressions
+            if expr != "*"
         )
         matchExpressions = set(filter(BooleanExpression.isMatchExpression, tokens))
         return matchExpressions

diff  --git a/llvm/utils/lit/lit/TestRunner.py b/llvm/utils/lit/lit/TestRunner.py
index f3499bb122d82..24670610e3a57 100644
--- a/llvm/utils/lit/lit/TestRunner.py
+++ b/llvm/utils/lit/lit/TestRunner.py
@@ -13,6 +13,7 @@
 import threading
 
 import io
+
 try:
     from StringIO import StringIO
 except ImportError:
@@ -25,12 +26,14 @@
 from lit.util import to_bytes, to_string, to_unicode
 from lit.BooleanExpression import BooleanExpression
 
+
 class InternalShellError(Exception):
     def __init__(self, command, message):
         self.command = command
         self.message = message
 
-kIsWindows = platform.system() == 'Windows'
+
+kIsWindows = platform.system() == "Windows"
 
 # Don't use close_fds on Windows.
 kUseCloseFDs = not kIsWindows
@@ -51,7 +54,8 @@ def __init__(self, command, message):
 #
 # COMMAND that follows %dbg(ARG) is also captured. COMMAND can be
 # empty as a result of conditinal substitution.
-kPdbgRegex = '%dbg\\(([^)\'"]*)\\)(.*)'
+kPdbgRegex = "%dbg\\(([^)'\"]*)\\)(.*)"
+
 
 class ShellEnvironment(object):
 
@@ -72,13 +76,15 @@ def change_dir(self, newdir):
         else:
             self.cwd = os.path.realpath(os.path.join(self.cwd, newdir))
 
+
 class TimeoutHelper(object):
     """
-        Object used to helper manage enforcing a timeout in
-        _executeShCmd(). It is passed through recursive calls
-        to collect processes that have been executed so that when
-        the timeout happens they can be killed.
+    Object used to helper manage enforcing a timeout in
+    _executeShCmd(). It is passed through recursive calls
+    to collect processes that have been executed so that when
+    the timeout happens they can be killed.
     """
+
     def __init__(self, timeout):
         self.timeout = timeout
         self._procs = []
@@ -135,24 +141,26 @@ def timeoutReached(self):
 
     def _kill(self):
         """
-            This method may be called multiple times as we might get unlucky
-            and be in the middle of creating a new process in _executeShCmd()
-            which won't yet be in ``self._procs``. By locking here and in
-            addProcess() we should be able to kill processes launched after
-            the initial call to _kill()
+        This method may be called multiple times as we might get unlucky
+        and be in the middle of creating a new process in _executeShCmd()
+        which won't yet be in ``self._procs``. By locking here and in
+        addProcess() we should be able to kill processes launched after
+        the initial call to _kill()
         """
         with self._lock:
             for p in self._procs:
                 lit.util.killProcessAndChildren(p.pid)
             # Empty the list and note that we've done a pass over the list
-            self._procs = [] # Python2 doesn't have list.clear()
+            self._procs = []  # Python2 doesn't have list.clear()
             self._doneKillPass = True
 
+
 class ShellCommandResult(object):
     """Captures the result of an individual command."""
 
-    def __init__(self, command, stdout, stderr, exitCode, timeoutReached,
-                 outputFiles = []):
+    def __init__(
+        self, command, stdout, stderr, exitCode, timeoutReached, outputFiles=[]
+    ):
         self.command = command
         self.stdout = stdout
         self.stderr = stderr
@@ -160,10 +168,11 @@ def __init__(self, command, stdout, stderr, exitCode, timeoutReached,
         self.timeoutReached = timeoutReached
         self.outputFiles = list(outputFiles)
 
+
 def executeShCmd(cmd, shenv, results, timeout=0):
     """
-        Wrapper around _executeShCmd that handles
-        timeout
+    Wrapper around _executeShCmd that handles
+    timeout
     """
     # Use the helper even when no timeout is required to make
     # other code simpler (i.e. avoid bunch of ``!= None`` checks)
@@ -174,21 +183,24 @@ def executeShCmd(cmd, shenv, results, timeout=0):
     timeoutHelper.cancel()
     timeoutInfo = None
     if timeoutHelper.timeoutReached():
-        timeoutInfo = 'Reached timeout of {} seconds'.format(timeout)
+        timeoutInfo = "Reached timeout of {} seconds".format(timeout)
 
     return (finalExitCode, timeoutInfo)
 
+
 def expand_glob(arg, cwd):
     if isinstance(arg, GlobItem):
         return sorted(arg.resolve(cwd))
     return [arg]
 
+
 def expand_glob_expressions(args, cwd):
     result = [args[0]]
     for arg in args[1:]:
         result.extend(expand_glob(arg, cwd))
     return result
 
+
 def quote_windows_command(seq):
     """
     Reimplement Python's private subprocess.list2cmdline for MSys compatibility
@@ -218,20 +230,27 @@ def quote_windows_command(seq):
 
         # Add a space to separate this argument from the others
         if result:
-            result.append(' ')
+            result.append(" ")
 
         # This logic 
diff ers from upstream list2cmdline.
-        needquote = (" " in arg) or ("\t" in arg) or ("\"" in arg) or ("[" in arg) or (";" in arg) or not arg
+        needquote = (
+            (" " in arg)
+            or ("\t" in arg)
+            or ('"' in arg)
+            or ("[" in arg)
+            or (";" in arg)
+            or not arg
+        )
         if needquote:
             result.append('"')
 
         for c in arg:
-            if c == '\\':
+            if c == "\\":
                 # Don't know if we need to double yet.
                 bs_buf.append(c)
             elif c == '"':
                 # Double backslashes.
-                result.append('\\' * len(bs_buf)*2)
+                result.append("\\" * len(bs_buf) * 2)
                 bs_buf = []
                 result.append('\\"')
             else:
@@ -249,7 +268,8 @@ def quote_windows_command(seq):
             result.extend(bs_buf)
             result.append('"')
 
-    return ''.join(result)
+    return "".join(result)
+
 
 # args are from 'export' or 'env' command.
 # Skips the command, and parses its arguments.
@@ -262,7 +282,7 @@ def updateEnv(env, args):
         # Support for the -u flag (unsetting) for env command
         # e.g., env -u FOO -u BAR will remove both FOO and BAR
         # from the environment.
-        if arg == '-u':
+        if arg == "-u":
             unset_next_env_var = True
             continue
         if unset_next_env_var:
@@ -272,14 +292,15 @@ def updateEnv(env, args):
             continue
 
         # Partition the string into KEY=VALUE.
-        key, eq, val = arg.partition('=')
+        key, eq, val = arg.partition("=")
         # Stop if there was no equals.
-        if eq == '':
+        if eq == "":
             arg_idx_next = arg_idx + 1
             break
         env.env[key] = val
     return args[arg_idx_next:]
 
+
 def executeBuiltinCd(cmd, shenv):
     """executeBuiltinCd - Change the current directory."""
     if len(cmd.args) != 2:
@@ -290,6 +311,7 @@ def executeBuiltinCd(cmd, shenv):
     # following Popen calls will fail instead.
     return ShellCommandResult(cmd, "", "", 0, False)
 
+
 def executeBuiltinPushd(cmd, shenv):
     """executeBuiltinPushd - Change the current dir and save the old."""
     if len(cmd.args) != 2:
@@ -298,6 +320,7 @@ def executeBuiltinPushd(cmd, shenv):
     shenv.change_dir(cmd.args[1])
     return ShellCommandResult(cmd, "", "", 0, False)
 
+
 def executeBuiltinPopd(cmd, shenv):
     """executeBuiltinPopd - Restore a previously saved working directory."""
     if len(cmd.args) != 1:
@@ -307,6 +330,7 @@ def executeBuiltinPopd(cmd, shenv):
     shenv.cwd = shenv.dirStack.pop()
     return ShellCommandResult(cmd, "", "", 0, False)
 
+
 def executeBuiltinExport(cmd, shenv):
     """executeBuiltinExport - Set an environment variable."""
     if len(cmd.args) != 2:
@@ -314,19 +338,20 @@ def executeBuiltinExport(cmd, shenv):
     updateEnv(shenv, cmd.args)
     return ShellCommandResult(cmd, "", "", 0, False)
 
+
 def executeBuiltinEcho(cmd, shenv):
     """Interpret a redirected echo command"""
     opened_files = []
-    stdin, stdout, stderr = processRedirects(cmd, subprocess.PIPE, shenv,
-                                             opened_files)
+    stdin, stdout, stderr = processRedirects(cmd, subprocess.PIPE, shenv, opened_files)
     if stdin != subprocess.PIPE or stderr != subprocess.PIPE:
         raise InternalShellError(
-                cmd, "stdin and stderr redirects not supported for echo")
+            cmd, "stdin and stderr redirects not supported for echo"
+        )
 
     # Some tests have un-redirected echo commands to help debug test failures.
     # Buffer our output and return it to the caller.
     is_redirected = True
-    encode = lambda x : x
+    encode = lambda x: x
     if stdout == subprocess.PIPE:
         is_redirected = False
         stdout = StringIO()
@@ -337,7 +362,7 @@ def executeBuiltinEcho(cmd, shenv):
         # When we open as binary, however, this also means that we have to write
         # 'bytes' objects to stdout instead of 'str' objects.
         encode = lit.util.to_bytes
-        stdout = open(stdout.name, stdout.mode + 'b')
+        stdout = open(stdout.name, stdout.mode + "b")
         opened_files.append((None, None, stdout, None))
 
     # Implement echo flags. We only support -e and -n, and not yet in
@@ -346,12 +371,12 @@ def executeBuiltinEcho(cmd, shenv):
     args = cmd.args[1:]
     interpret_escapes = False
     write_newline = True
-    while len(args) >= 1 and args[0] in ('-e', '-n'):
+    while len(args) >= 1 and args[0] in ("-e", "-n"):
         flag = args[0]
         args = args[1:]
-        if flag == '-e':
+        if flag == "-e":
             interpret_escapes = True
-        elif flag == '-n':
+        elif flag == "-n":
             write_newline = False
 
     def maybeUnescape(arg):
@@ -359,16 +384,16 @@ def maybeUnescape(arg):
             return arg
 
         arg = lit.util.to_bytes(arg)
-        codec = 'string_escape' if sys.version_info < (3,0) else 'unicode_escape'
+        codec = "string_escape" if sys.version_info < (3, 0) else "unicode_escape"
         return arg.decode(codec)
 
     if args:
         for arg in args[:-1]:
             stdout.write(encode(maybeUnescape(arg)))
-            stdout.write(encode(' '))
+            stdout.write(encode(" "))
         stdout.write(encode(maybeUnescape(args[-1])))
     if write_newline:
-        stdout.write(encode('\n'))
+        stdout.write(encode("\n"))
 
     for (name, mode, f, path) in opened_files:
         f.close()
@@ -376,11 +401,12 @@ def maybeUnescape(arg):
     output = "" if is_redirected else stdout.getvalue()
     return ShellCommandResult(cmd, output, "", 0, False)
 
+
 def executeBuiltinMkdir(cmd, cmd_shenv):
     """executeBuiltinMkdir - Create new directories."""
     args = expand_glob_expressions(cmd.args, cmd_shenv.cwd)[1:]
     try:
-        opts, args = getopt.gnu_getopt(args, 'p')
+        opts, args = getopt.gnu_getopt(args, "p")
     except getopt.GetoptError as err:
         raise InternalShellError(cmd, "Unsupported: 'mkdir':  %s" % str(err))
 
@@ -412,6 +438,7 @@ def executeBuiltinMkdir(cmd, cmd_shenv):
                 exitCode = 1
     return ShellCommandResult(cmd, "", stderr.getvalue(), exitCode, False)
 
+
 def executeBuiltinRm(cmd, cmd_shenv):
     """executeBuiltinRm - Removes (deletes) files or directories."""
     args = expand_glob_expressions(cmd.args, cmd_shenv.cwd)[1:]
@@ -436,7 +463,7 @@ def executeBuiltinRm(cmd, cmd_shenv):
     def on_rm_error(func, path, exc_info):
         # path contains the path of the file that couldn't be removed
         # let's just assume that it's read-only and remove it.
-        os.chmod(path, stat.S_IMODE( os.stat(path).st_mode) | stat.S_IWRITE)
+        os.chmod(path, stat.S_IMODE(os.stat(path).st_mode) | stat.S_IWRITE)
         os.remove(path)
 
     stderr = StringIO()
@@ -454,7 +481,7 @@ def on_rm_error(func, path, exc_info):
                 if not recursive:
                     stderr.write("Error: %s is a directory\n" % path)
                     exitCode = 1
-                if platform.system() == 'Windows':
+                if platform.system() == "Windows":
                     # NOTE: use ctypes to access `SHFileOperationsW` on Windows to
                     # use the NT style path to get access to long file paths which
                     # cannot be removed otherwise.
@@ -465,14 +492,14 @@ def on_rm_error(func, path, exc_info):
 
                     class SHFILEOPSTRUCTW(Structure):
                         _fields_ = [
-                                ('hWnd', HWND),
-                                ('wFunc', UINT),
-