[clang] dd3c26a - [NFC][Py Reformat] Reformat python files in clang and clang-tools-extra

Tobias Hieta via cfe-commits cfe-commits at lists.llvm.org
Mon May 22 23:30:04 PDT 2023


Author: Tobias Hieta
Date: 2023-05-23T08:29:52+02:00
New Revision: dd3c26a045c081620375a878159f536758baba6e

URL: https://github.com/llvm/llvm-project/commit/dd3c26a045c081620375a878159f536758baba6e
DIFF: https://github.com/llvm/llvm-project/commit/dd3c26a045c081620375a878159f536758baba6e.diff

LOG: [NFC][Py Reformat] Reformat python files in clang and clang-tools-extra

This is an ongoing series of commits that are reformatting our
Python code.

Reformatting is done with `black`.

If you end up having problems merging this commit because you
have made changes to a python file, the best way to handle that
is to run git checkout --ours <yourfile> and then reformat it
with black.

If you run into any problems, post to discourse about it and
we will try to help.

RFC Thread below:

https://discourse.llvm.org/t/rfc-document-and-standardize-python-code-style

Reviewed By: MatzeB

Differential Revision: https://reviews.llvm.org/D150761

Added: 
    

Modified: 
    clang-tools-extra/clang-include-fixer/find-all-symbols/tool/run-find-all-symbols.py
    clang-tools-extra/clang-include-fixer/tool/clang-include-fixer.py
    clang-tools-extra/clang-tidy/add_new_check.py
    clang-tools-extra/clang-tidy/rename_check.py
    clang-tools-extra/clang-tidy/tool/clang-tidy-diff.py
    clang-tools-extra/clang-tidy/tool/run-clang-tidy.py
    clang-tools-extra/clangd/TidyFastChecks.py
    clang-tools-extra/clangd/quality/CompletionModelCodegen.py
    clang-tools-extra/clangd/test/lit.cfg.py
    clang-tools-extra/clangd/test/lit.local.cfg
    clang-tools-extra/clangd/test/remote-index/pipeline_helper.py
    clang-tools-extra/clangd/unittests/lit.cfg.py
    clang-tools-extra/docs/clang-tidy/checks/gen-static-analyzer-docs.py
    clang-tools-extra/docs/conf.py
    clang-tools-extra/include-cleaner/test/Unit/lit.cfg.py
    clang-tools-extra/include-cleaner/test/lit.cfg.py
    clang-tools-extra/pseudo/test/Unit/lit.cfg.py
    clang-tools-extra/pseudo/test/lit.cfg.py
    clang-tools-extra/pseudo/test/lit.local.cfg
    clang-tools-extra/test/Unit/lit.cfg.py
    clang-tools-extra/test/clang-tidy/check_clang_tidy.py
    clang-tools-extra/test/lit.cfg.py
    clang/bindings/python/clang/__init__.py
    clang/bindings/python/clang/cindex.py
    clang/bindings/python/clang/enumerations.py
    clang/bindings/python/examples/cindex/cindex-dump.py
    clang/bindings/python/examples/cindex/cindex-includes.py
    clang/bindings/python/tests/cindex/test_access_specifiers.py
    clang/bindings/python/tests/cindex/test_cdb.py
    clang/bindings/python/tests/cindex/test_code_completion.py
    clang/bindings/python/tests/cindex/test_comment.py
    clang/bindings/python/tests/cindex/test_cursor.py
    clang/bindings/python/tests/cindex/test_cursor_kind.py
    clang/bindings/python/tests/cindex/test_diagnostics.py
    clang/bindings/python/tests/cindex/test_exception_specification_kind.py
    clang/bindings/python/tests/cindex/test_file.py
    clang/bindings/python/tests/cindex/test_index.py
    clang/bindings/python/tests/cindex/test_linkage.py
    clang/bindings/python/tests/cindex/test_location.py
    clang/bindings/python/tests/cindex/test_tls_kind.py
    clang/bindings/python/tests/cindex/test_token_kind.py
    clang/bindings/python/tests/cindex/test_tokens.py
    clang/bindings/python/tests/cindex/test_translation_unit.py
    clang/bindings/python/tests/cindex/test_type.py
    clang/bindings/python/tests/cindex/util.py
    clang/docs/analyzer/conf.py
    clang/docs/conf.py
    clang/docs/tools/dump_ast_matchers.py
    clang/docs/tools/dump_format_help.py
    clang/docs/tools/dump_format_style.py
    clang/docs/tools/generate_formatted_state.py
    clang/lib/Tooling/DumpTool/generate_cxx_src_locs.py
    clang/test/AST/gen_ast_dump_json_test.py
    clang/test/Analysis/analyzer_test.py
    clang/test/Analysis/check-analyzer-fixit.py
    clang/test/Analysis/exploded-graph-rewriter/lit.local.cfg
    clang/test/Analysis/lit.local.cfg
    clang/test/Analysis/scan-build/lit.local.cfg
    clang/test/CodeGen/builtins-nvptx-mma.py
    clang/test/CodeGenHIP/lit.local.cfg
    clang/test/CodeGenHLSL/lit.local.cfg
    clang/test/CodeGenObjC/lit.local.cfg
    clang/test/CodeGenObjCXX/lit.local.cfg
    clang/test/Driver/XRay/lit.local.cfg
    clang/test/Driver/ftime-trace-sections.py
    clang/test/Driver/lit.local.cfg
    clang/test/Format/lit.local.cfg
    clang/test/Frontend/lit.local.cfg
    clang/test/Headers/lit.local.cfg
    clang/test/Index/skip-parsed-bodies/lit.local.cfg
    clang/test/Interpreter/lit.local.cfg
    clang/test/LibClang/lit.local.cfg
    clang/test/OpenMP/lit.local.cfg
    clang/test/ParserHLSL/lit.local.cfg
    clang/test/Sema/lit.local.cfg
    clang/test/SemaCUDA/lit.local.cfg
    clang/test/SemaHLSL/lit.local.cfg
    clang/test/SemaObjCXX/lit.local.cfg
    clang/test/SemaOpenCL/lit.local.cfg
    clang/test/TableGen/lit.local.cfg
    clang/test/Unit/lit.cfg.py
    clang/test/lit.cfg.py
    clang/test/utils/update_cc_test_checks/lit.local.cfg
    clang/tools/clang-format/clang-format-diff.py
    clang/tools/clang-format/clang-format-sublime.py
    clang/tools/clang-format/clang-format.py
    clang/tools/clang-rename/clang-rename.py
    clang/tools/include-mapping/cppreference_parser.py
    clang/tools/include-mapping/gen_std.py
    clang/tools/include-mapping/test.py
    clang/tools/libclang/linker-script-to-export-list.py
    clang/tools/scan-build-py/lib/libear/__init__.py
    clang/tools/scan-build-py/lib/libscanbuild/__init__.py
    clang/tools/scan-build-py/lib/libscanbuild/analyze.py
    clang/tools/scan-build-py/lib/libscanbuild/arguments.py
    clang/tools/scan-build-py/lib/libscanbuild/clang.py
    clang/tools/scan-build-py/lib/libscanbuild/compilation.py
    clang/tools/scan-build-py/lib/libscanbuild/intercept.py
    clang/tools/scan-build-py/lib/libscanbuild/report.py
    clang/tools/scan-build-py/lib/libscanbuild/shell.py
    clang/tools/scan-build-py/tests/__init__.py
    clang/tools/scan-build-py/tests/functional/cases/__init__.py
    clang/tools/scan-build-py/tests/functional/cases/test_create_cdb.py
    clang/tools/scan-build-py/tests/functional/cases/test_exec_anatomy.py
    clang/tools/scan-build-py/tests/functional/cases/test_from_cdb.py
    clang/tools/scan-build-py/tests/functional/cases/test_from_cmd.py
    clang/tools/scan-build-py/tests/unit/test_analyze.py
    clang/tools/scan-build-py/tests/unit/test_clang.py
    clang/tools/scan-build-py/tests/unit/test_compilation.py
    clang/tools/scan-build-py/tests/unit/test_intercept.py
    clang/tools/scan-build-py/tests/unit/test_libear.py
    clang/tools/scan-build-py/tests/unit/test_report.py
    clang/tools/scan-build-py/tests/unit/test_shell.py
    clang/tools/scan-view/share/Reporter.py
    clang/tools/scan-view/share/ScanView.py
    clang/tools/scan-view/share/startfile.py
    clang/utils/ABITest/ABITestGen.py
    clang/utils/ABITest/Enumeration.py
    clang/utils/ABITest/TypeGen.py
    clang/utils/CIndex/completion_logger_server.py
    clang/utils/ClangDataFormat.py
    clang/utils/TestUtils/deep-stack.py
    clang/utils/analyzer/CmpRuns.py
    clang/utils/analyzer/ProjectMap.py
    clang/utils/analyzer/SATest.py
    clang/utils/analyzer/SATestAdd.py
    clang/utils/analyzer/SATestBenchmark.py
    clang/utils/analyzer/SATestBuild.py
    clang/utils/analyzer/SATestUpdateDiffs.py
    clang/utils/analyzer/SATestUtils.py
    clang/utils/analyzer/SumTimerInfo.py
    clang/utils/analyzer/entrypoint.py
    clang/utils/analyzer/exploded-graph-rewriter.py
    clang/utils/bundle_resources.py
    clang/utils/check_cfc/check_cfc.py
    clang/utils/check_cfc/obj_diff.py
    clang/utils/check_cfc/setup.py
    clang/utils/check_cfc/test_check_cfc.py
    clang/utils/clangdiag.py
    clang/utils/convert_arm_neon.py
    clang/utils/creduce-clang-crash.py
    clang/utils/modfuzz.py
    clang/utils/module-deps-to-rsp.py
    clang/utils/perf-training/perf-helper.py
    clang/utils/token-delta.py
    clang/www/builtins.py

Removed: 
    


################################################################################
diff  --git a/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/run-find-all-symbols.py b/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/run-find-all-symbols.py
index a0fa64592e62b..471dbf8c110b4 100755
--- a/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/run-find-all-symbols.py
+++ b/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/run-find-all-symbols.py
@@ -1,12 +1,12 @@
 #!/usr/bin/env python
 #
-#=- run-find-all-symbols.py - Parallel find-all-symbols runner -*- python  -*-=#
+# =- run-find-all-symbols.py - Parallel find-all-symbols runner -*- python  -*-=#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 """
 Parallel find-all-symbols runner
@@ -35,92 +35,99 @@
 
 
 def find_compilation_database(path):
-  """Adjusts the directory until a compilation database is found."""
-  result = './'
-  while not os.path.isfile(os.path.join(result, path)):
-    if os.path.realpath(result) == '/':
-      print('Error: could not find compilation database.')
-      sys.exit(1)
-    result += '../'
-  return os.path.realpath(result)
+    """Adjusts the directory until a compilation database is found."""
+    result = "./"
+    while not os.path.isfile(os.path.join(result, path)):
+        if os.path.realpath(result) == "/":
+            print("Error: could not find compilation database.")
+            sys.exit(1)
+        result += "../"
+    return os.path.realpath(result)
 
 
 def MergeSymbols(directory, args):
-  """Merge all symbol files (yaml) in a given directory into a single file."""
-  invocation = [args.binary, '-merge-dir='+directory, args.saving_path]
-  subprocess.call(invocation)
-  print('Merge is finished. Saving results in ' + args.saving_path)
+    """Merge all symbol files (yaml) in a given directory into a single file."""
+    invocation = [args.binary, "-merge-dir=" + directory, args.saving_path]
+    subprocess.call(invocation)
+    print("Merge is finished. Saving results in " + args.saving_path)
 
 
 def run_find_all_symbols(args, tmpdir, build_path, queue):
-  """Takes filenames out of queue and runs find-all-symbols on them."""
-  while True:
-    name = queue.get()
-    invocation = [args.binary, name, '-output-dir='+tmpdir, '-p='+build_path]
-    sys.stdout.write(' '.join(invocation) + '\n')
-    subprocess.call(invocation)
-    queue.task_done()
+    """Takes filenames out of queue and runs find-all-symbols on them."""
+    while True:
+        name = queue.get()
+        invocation = [args.binary, name, "-output-dir=" + tmpdir, "-p=" + build_path]
+        sys.stdout.write(" ".join(invocation) + "\n")
+        subprocess.call(invocation)
+        queue.task_done()
 
 
 def main():
-  parser = argparse.ArgumentParser(description='Runs find-all-symbols over all'
-                                   'files in a compilation database.')
-  parser.add_argument('-binary', metavar='PATH',
-                      default='./bin/find-all-symbols',
-                      help='path to find-all-symbols binary')
-  parser.add_argument('-j', type=int, default=0,
-                      help='number of instances to be run in parallel.')
-  parser.add_argument('-p', dest='build_path',
-                      help='path used to read a compilation database.')
-  parser.add_argument('-saving-path', default='./find_all_symbols_db.yaml',
-                      help='result saving path')
-  args = parser.parse_args()
-
-  db_path = 'compile_commands.json'
-
-  if args.build_path is not None:
-    build_path = args.build_path
-  else:
-    build_path = find_compilation_database(db_path)
-
-  tmpdir = tempfile.mkdtemp()
-
-  # Load the database and extract all files.
-  database = json.load(open(os.path.join(build_path, db_path)))
-  files = [entry['file'] for entry in database]
-
-  # Filter out .rc files on Windows. CMake includes them for some reason.
-  files = [f for f in files if not f.endswith('.rc')]
-
-  max_task = args.j
-  if max_task == 0:
-    max_task = multiprocessing.cpu_count()
-
-  try:
-    # Spin up a bunch of tidy-launching threads.
-    queue = Queue.Queue(max_task)
-    for _ in range(max_task):
-      t = threading.Thread(target=run_find_all_symbols,
-                           args=(args, tmpdir, build_path, queue))
-      t.daemon = True
-      t.start()
-
-    # Fill the queue with files.
-    for name in files:
-      queue.put(name)
-
-    # Wait for all threads to be done.
-    queue.join()
-
-    MergeSymbols(tmpdir, args)
-
-
-  except KeyboardInterrupt:
-    # This is a sad hack. Unfortunately subprocess goes
-    # bonkers with ctrl-c and we start forking merrily.
-    print('\nCtrl-C detected, goodbye.')
-    os.kill(0, 9)
-
-
-if __name__ == '__main__':
-  main()
+    parser = argparse.ArgumentParser(
+        description="Runs find-all-symbols over all" "files in a compilation database."
+    )
+    parser.add_argument(
+        "-binary",
+        metavar="PATH",
+        default="./bin/find-all-symbols",
+        help="path to find-all-symbols binary",
+    )
+    parser.add_argument(
+        "-j", type=int, default=0, help="number of instances to be run in parallel."
+    )
+    parser.add_argument(
+        "-p", dest="build_path", help="path used to read a compilation database."
+    )
+    parser.add_argument(
+        "-saving-path", default="./find_all_symbols_db.yaml", help="result saving path"
+    )
+    args = parser.parse_args()
+
+    db_path = "compile_commands.json"
+
+    if args.build_path is not None:
+        build_path = args.build_path
+    else:
+        build_path = find_compilation_database(db_path)
+
+    tmpdir = tempfile.mkdtemp()
+
+    # Load the database and extract all files.
+    database = json.load(open(os.path.join(build_path, db_path)))
+    files = [entry["file"] for entry in database]
+
+    # Filter out .rc files on Windows. CMake includes them for some reason.
+    files = [f for f in files if not f.endswith(".rc")]
+
+    max_task = args.j
+    if max_task == 0:
+        max_task = multiprocessing.cpu_count()
+
+    try:
+        # Spin up a bunch of tidy-launching threads.
+        queue = Queue.Queue(max_task)
+        for _ in range(max_task):
+            t = threading.Thread(
+                target=run_find_all_symbols, args=(args, tmpdir, build_path, queue)
+            )
+            t.daemon = True
+            t.start()
+
+        # Fill the queue with files.
+        for name in files:
+            queue.put(name)
+
+        # Wait for all threads to be done.
+        queue.join()
+
+        MergeSymbols(tmpdir, args)
+
+    except KeyboardInterrupt:
+        # This is a sad hack. Unfortunately subprocess goes
+        # bonkers with ctrl-c and we start forking merrily.
+        print("\nCtrl-C detected, goodbye.")
+        os.kill(0, 9)
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/clang-tools-extra/clang-include-fixer/tool/clang-include-fixer.py b/clang-tools-extra/clang-include-fixer/tool/clang-include-fixer.py
index 62bd07365a99d..da254d5a83887 100644
--- a/clang-tools-extra/clang-include-fixer/tool/clang-include-fixer.py
+++ b/clang-tools-extra/clang-include-fixer/tool/clang-include-fixer.py
@@ -28,192 +28,216 @@
 # set g:clang_include_fixer_path to the path to clang-include-fixer if it is not
 # on the path.
 # Change this to the full path if clang-include-fixer is not on the path.
-binary = 'clang-include-fixer'
+binary = "clang-include-fixer"
 if vim.eval('exists("g:clang_include_fixer_path")') == "1":
-  binary = vim.eval('g:clang_include_fixer_path')
+    binary = vim.eval("g:clang_include_fixer_path")
 
 maximum_suggested_headers = 3
 if vim.eval('exists("g:clang_include_fixer_maximum_suggested_headers")') == "1":
-  maximum_suggested_headers = max(
-      1,
-      vim.eval('g:clang_include_fixer_maximum_suggested_headers'))
+    maximum_suggested_headers = max(
+        1, vim.eval("g:clang_include_fixer_maximum_suggested_headers")
+    )
 
 increment_num = 5
 if vim.eval('exists("g:clang_include_fixer_increment_num")') == "1":
-  increment_num = max(
-      1,
-      vim.eval('g:clang_include_fixer_increment_num'))
+    increment_num = max(1, vim.eval("g:clang_include_fixer_increment_num"))
 
 jump_to_include = False
 if vim.eval('exists("g:clang_include_fixer_jump_to_include")') == "1":
-  jump_to_include = vim.eval('g:clang_include_fixer_jump_to_include') != "0"
+    jump_to_include = vim.eval("g:clang_include_fixer_jump_to_include") != "0"
 
 query_mode = False
 if vim.eval('exists("g:clang_include_fixer_query_mode")') == "1":
-  query_mode = vim.eval('g:clang_include_fixer_query_mode') != "0"
+    query_mode = vim.eval("g:clang_include_fixer_query_mode") != "0"
 
 
 def GetUserSelection(message, headers, maximum_suggested_headers):
-  eval_message = message + '\n'
-  for idx, header in enumerate(headers[0:maximum_suggested_headers]):
-    eval_message += "({0}). {1}\n".format(idx + 1, header)
-  eval_message += "Enter (q) to quit;"
-  if maximum_suggested_headers < len(headers):
-    eval_message += " (m) to show {0} more candidates.".format(
-        min(increment_num, len(headers) - maximum_suggested_headers))
-
-  eval_message += "\nSelect (default 1): "
-  res = vim.eval("input('{0}')".format(eval_message))
-  if res == '':
-    # choose the top ranked header by default
-    idx = 1
-  elif res == 'q':
-    raise Exception('   Insertion cancelled...')
-  elif res == 'm':
-    return GetUserSelection(message,
-                            headers, maximum_suggested_headers + increment_num)
-  else:
-    try:
-      idx = int(res)
-      if idx <= 0 or idx > len(headers):
-        raise Exception()
-    except Exception:
-      # Show a new prompt on invalid option instead of aborting so that users
-      # don't need to wait for another clang-include-fixer run.
-      print("Invalid option: {}".format(res), file=sys.stderr)
-      return GetUserSelection(message, headers, maximum_suggested_headers)
-  return headers[idx - 1]
+    eval_message = message + "\n"
+    for idx, header in enumerate(headers[0:maximum_suggested_headers]):
+        eval_message += "({0}). {1}\n".format(idx + 1, header)
+    eval_message += "Enter (q) to quit;"
+    if maximum_suggested_headers < len(headers):
+        eval_message += " (m) to show {0} more candidates.".format(
+            min(increment_num, len(headers) - maximum_suggested_headers)
+        )
+
+    eval_message += "\nSelect (default 1): "
+    res = vim.eval("input('{0}')".format(eval_message))
+    if res == "":
+        # choose the top ranked header by default
+        idx = 1
+    elif res == "q":
+        raise Exception("   Insertion cancelled...")
+    elif res == "m":
+        return GetUserSelection(
+            message, headers, maximum_suggested_headers + increment_num
+        )
+    else:
+        try:
+            idx = int(res)
+            if idx <= 0 or idx > len(headers):
+                raise Exception()
+        except Exception:
+            # Show a new prompt on invalid option instead of aborting so that users
+            # don't need to wait for another clang-include-fixer run.
+            print("Invalid option: {}".format(res), file=sys.stderr)
+            return GetUserSelection(message, headers, maximum_suggested_headers)
+    return headers[idx - 1]
 
 
 def execute(command, text):
-  # Avoid flashing a cmd prompt on Windows.
-  startupinfo = None
-  if sys.platform.startswith('win32'):
-    startupinfo = subprocess.STARTUPINFO()
-    startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
-    startupinfo.wShowWindow = subprocess.SW_HIDE
-
-  p = subprocess.Popen(command,
-                       stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                       stdin=subprocess.PIPE, startupinfo=startupinfo)
-  return p.communicate(input=text.encode('utf-8'))
+    # Avoid flashing a cmd prompt on Windows.
+    startupinfo = None
+    if sys.platform.startswith("win32"):
+        startupinfo = subprocess.STARTUPINFO()
+        startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
+        startupinfo.wShowWindow = subprocess.SW_HIDE
+
+    p = subprocess.Popen(
+        command,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        stdin=subprocess.PIPE,
+        startupinfo=startupinfo,
+    )
+    return p.communicate(input=text.encode("utf-8"))
 
 
 def InsertHeaderToVimBuffer(header, text):
-  command = [binary, "-stdin", "-insert-header=" + json.dumps(header),
-             vim.current.buffer.name]
-  stdout, stderr = execute(command, text)
-  if stderr:
-    raise Exception(stderr)
-  if stdout:
-    lines = stdout.splitlines()
-    sequence = 
diff lib.SequenceMatcher(None, vim.current.buffer, lines)
-    line_num = None
-    for op in reversed(sequence.get_opcodes()):
-      if op[0] != 'equal':
-        vim.current.buffer[op[1]:op[2]] = lines[op[3]:op[4]]
-      if op[0] == 'insert':
-        # line_num in vim is 1-based.
-        line_num = op[1] + 1
-
-    if jump_to_include and line_num:
-      vim.current.window.cursor = (line_num, 0)
+    command = [
+        binary,
+        "-stdin",
+        "-insert-header=" + json.dumps(header),
+        vim.current.buffer.name,
+    ]
+    stdout, stderr = execute(command, text)
+    if stderr:
+        raise Exception(stderr)
+    if stdout:
+        lines = stdout.splitlines()
+        sequence = 
diff lib.SequenceMatcher(None, vim.current.buffer, lines)
+        line_num = None
+        for op in reversed(sequence.get_opcodes()):
+            if op[0] != "equal":
+                vim.current.buffer[op[1] : op[2]] = lines[op[3] : op[4]]
+            if op[0] == "insert":
+                # line_num in vim is 1-based.
+                line_num = op[1] + 1
+
+        if jump_to_include and line_num:
+            vim.current.window.cursor = (line_num, 0)
 
 
 # The vim internal implementation (expand("cword"/"cWORD")) doesn't support
 # our use case very well, we re-implement our own one.
 def get_symbol_under_cursor():
-  line = vim.eval("line(\".\")")
-  # column number in vim is 1-based.
-  col = int(vim.eval("col(\".\")")) - 1
-  line_text = vim.eval("getline({0})".format(line))
-  if len(line_text) == 0: return ""
-  symbol_pos_begin = col
-  p = re.compile('[a-zA-Z0-9:_]')
-  while symbol_pos_begin >= 0 and p.match(line_text[symbol_pos_begin]):
-    symbol_pos_begin -= 1
-
-  symbol_pos_end = col
-  while symbol_pos_end < len(line_text) and p.match(line_text[symbol_pos_end]):
-    symbol_pos_end += 1
-  return line_text[symbol_pos_begin+1:symbol_pos_end]
+    line = vim.eval('line(".")')
+    # column number in vim is 1-based.
+    col = int(vim.eval('col(".")')) - 1
+    line_text = vim.eval("getline({0})".format(line))
+    if len(line_text) == 0:
+        return ""
+    symbol_pos_begin = col
+    p = re.compile("[a-zA-Z0-9:_]")
+    while symbol_pos_begin >= 0 and p.match(line_text[symbol_pos_begin]):
+        symbol_pos_begin -= 1
+
+    symbol_pos_end = col
+    while symbol_pos_end < len(line_text) and p.match(line_text[symbol_pos_end]):
+        symbol_pos_end += 1
+    return line_text[symbol_pos_begin + 1 : symbol_pos_end]
 
 
 def main():
-  parser = argparse.ArgumentParser(
-      description='Vim integration for clang-include-fixer')
-  parser.add_argument('-db', default='yaml',
-                      help='clang-include-fixer input format.')
-  parser.add_argument('-input', default='',
-                      help='String to initialize the database.')
-  # Don't throw exception when parsing unknown arguments to make the script
-  # work in neovim.
-  # Neovim (at least v0.2.1) somehow mangles the sys.argv in a weird way: it
-  # will pass additional arguments (e.g. "-c script_host.py") to sys.argv,
-  # which makes the script fail.
-  args, _ = parser.parse_known_args()
-
-  # Get the current text.
-  buf = vim.current.buffer
-  text = '\n'.join(buf)
-
-  if query_mode:
-    symbol = get_symbol_under_cursor()
-    if len(symbol) == 0:
-      print("Skip querying empty symbol.")
-      return
-    command = [binary, "-stdin", "-query-symbol="+get_symbol_under_cursor(),
-               "-db=" + args.db, "-input=" + args.input,
-               vim.current.buffer.name]
-  else:
-    # Run command to get all headers.
-    command = [binary, "-stdin", "-output-headers", "-db=" + args.db,
-               "-input=" + args.input, vim.current.buffer.name]
-  stdout, stderr = execute(command, text)
-  if stderr:
-    print("Error while running clang-include-fixer: {}".format(stderr),
-          file=sys.stderr)
-    return
+    parser = argparse.ArgumentParser(
+        description="Vim integration for clang-include-fixer"
+    )
+    parser.add_argument("-db", default="yaml", help="clang-include-fixer input format.")
+    parser.add_argument("-input", default="", help="String to initialize the database.")
+    # Don't throw exception when parsing unknown arguments to make the script
+    # work in neovim.
+    # Neovim (at least v0.2.1) somehow mangles the sys.argv in a weird way: it
+    # will pass additional arguments (e.g. "-c script_host.py") to sys.argv,
+    # which makes the script fail.
+    args, _ = parser.parse_known_args()
+
+    # Get the current text.
+    buf = vim.current.buffer
+    text = "\n".join(buf)
+
+    if query_mode:
+        symbol = get_symbol_under_cursor()
+        if len(symbol) == 0:
+            print("Skip querying empty symbol.")
+            return
+        command = [
+            binary,
+            "-stdin",
+            "-query-symbol=" + get_symbol_under_cursor(),
+            "-db=" + args.db,
+            "-input=" + args.input,
+            vim.current.buffer.name,
+        ]
+    else:
+        # Run command to get all headers.
+        command = [
+            binary,
+            "-stdin",
+            "-output-headers",
+            "-db=" + args.db,
+            "-input=" + args.input,
+            vim.current.buffer.name,
+        ]
+    stdout, stderr = execute(command, text)
+    if stderr:
+        print(
+            "Error while running clang-include-fixer: {}".format(stderr),
+            file=sys.stderr,
+        )
+        return
+
+    include_fixer_context = json.loads(stdout)
+    query_symbol_infos = include_fixer_context["QuerySymbolInfos"]
+    if not query_symbol_infos:
+        print("The file is fine, no need to add a header.")
+        return
+    symbol = query_symbol_infos[0]["RawIdentifier"]
+    # The header_infos is already sorted by clang-include-fixer.
+    header_infos = include_fixer_context["HeaderInfos"]
+    # Deduplicate headers while keeping the order, so that the same header would
+    # not be suggested twice.
+    unique_headers = []
+    seen = set()
+    for header_info in header_infos:
+        header = header_info["Header"]
+        if header not in seen:
+            seen.add(header)
+            unique_headers.append(header)
+
+    if not unique_headers:
+        print("Couldn't find a header for {0}.".format(symbol))
+        return
 
-  include_fixer_context = json.loads(stdout)
-  query_symbol_infos = include_fixer_context["QuerySymbolInfos"]
-  if not query_symbol_infos:
-    print("The file is fine, no need to add a header.")
-    return
-  symbol = query_symbol_infos[0]["RawIdentifier"]
-  # The header_infos is already sorted by clang-include-fixer.
-  header_infos = include_fixer_context["HeaderInfos"]
-  # Deduplicate headers while keeping the order, so that the same header would
-  # not be suggested twice.
-  unique_headers = []
-  seen = set()
-  for header_info in header_infos:
-    header = header_info["Header"]
-    if header not in seen:
-      seen.add(header)
-      unique_headers.append(header)
-
-  if not unique_headers:
-    print("Couldn't find a header for {0}.".format(symbol))
+    try:
+        selected = unique_headers[0]
+        inserted_header_infos = header_infos
+        if len(unique_headers) > 1:
+            selected = GetUserSelection(
+                "choose a header file for {0}.".format(symbol),
+                unique_headers,
+                maximum_suggested_headers,
+            )
+            inserted_header_infos = [
+                header for header in header_infos if header["Header"] == selected
+            ]
+        include_fixer_context["HeaderInfos"] = inserted_header_infos
+
+        InsertHeaderToVimBuffer(include_fixer_context, text)
+        print("Added #include {0} for {1}.".format(selected, symbol))
+    except Exception as error:
+        print(error, file=sys.stderr)
     return
 
-  try:
-    selected = unique_headers[0]
-    inserted_header_infos = header_infos
-    if len(unique_headers) > 1:
-      selected = GetUserSelection(
-          "choose a header file for {0}.".format(symbol),
-          unique_headers, maximum_suggested_headers)
-      inserted_header_infos = [
-        header for header in header_infos if header["Header"] == selected]
-    include_fixer_context["HeaderInfos"] = inserted_header_infos
-
-    InsertHeaderToVimBuffer(include_fixer_context, text)
-    print("Added #include {0} for {1}.".format(selected, symbol))
-  except Exception as error:
-    print(error, file=sys.stderr)
-  return
-
-
-if __name__ == '__main__':
-  main()
+
+if __name__ == "__main__":
+    main()

diff  --git a/clang-tools-extra/clang-tidy/add_new_check.py b/clang-tools-extra/clang-tidy/add_new_check.py
index 98e96553ca8ac..c642000eba228 100755
--- a/clang-tools-extra/clang-tidy/add_new_check.py
+++ b/clang-tools-extra/clang-tidy/add_new_check.py
@@ -1,12 +1,12 @@
 #!/usr/bin/env python3
 #
-#===- add_new_check.py - clang-tidy check generator ---------*- python -*--===#
+# ===- add_new_check.py - clang-tidy check generator ---------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===-----------------------------------------------------------------------===#
+# ===-----------------------------------------------------------------------===#
 
 from __future__ import print_function
 from __future__ import unicode_literals
@@ -20,51 +20,57 @@
 # Adapts the module's CMakelist file. Returns 'True' if it could add a new
 # entry and 'False' if the entry already existed.
 def adapt_cmake(module_path, check_name_camel):
-  filename = os.path.join(module_path, 'CMakeLists.txt')
-
-  # The documentation files are encoded using UTF-8, however on Windows the
-  # default encoding might be 
diff erent (e.g. CP-1252). To make sure UTF-8 is
-  # always used, use `io.open(filename, mode, encoding='utf8')` for reading and
-  # writing files here and elsewhere.
-  with io.open(filename, 'r', encoding='utf8') as f:
-    lines = f.readlines()
-
-  cpp_file = check_name_camel + '.cpp'
-
-  # Figure out whether this check already exists.
-  for line in lines:
-    if line.strip() == cpp_file:
-      return False
-
-  print('Updating %s...' % filename)
-  with io.open(filename, 'w', encoding='utf8', newline='\n') as f:
-    cpp_found = False
-    file_added = False
+    filename = os.path.join(module_path, "CMakeLists.txt")
+
+    # The documentation files are encoded using UTF-8, however on Windows the
+    # default encoding might be 
diff erent (e.g. CP-1252). To make sure UTF-8 is
+    # always used, use `io.open(filename, mode, encoding='utf8')` for reading and
+    # writing files here and elsewhere.
+    with io.open(filename, "r", encoding="utf8") as f:
+        lines = f.readlines()
+
+    cpp_file = check_name_camel + ".cpp"
+
+    # Figure out whether this check already exists.
     for line in lines:
-      cpp_line = line.strip().endswith('.cpp')
-      if (not file_added) and (cpp_line or cpp_found):
-        cpp_found = True
-        if (line.strip() > cpp_file) or (not cpp_line):
-          f.write('  ' + cpp_file + '\n')
-          file_added = True
-      f.write(line)
+        if line.strip() == cpp_file:
+            return False
+
+    print("Updating %s..." % filename)
+    with io.open(filename, "w", encoding="utf8", newline="\n") as f:
+        cpp_found = False
+        file_added = False
+        for line in lines:
+            cpp_line = line.strip().endswith(".cpp")
+            if (not file_added) and (cpp_line or cpp_found):
+                cpp_found = True
+                if (line.strip() > cpp_file) or (not cpp_line):
+                    f.write("  " + cpp_file + "\n")
+                    file_added = True
+            f.write(line)
 
-  return True
+    return True
 
 
 # Adds a header for the new check.
 def write_header(module_path, module, namespace, check_name, check_name_camel):
-  filename = os.path.join(module_path, check_name_camel) + '.h'
-  print('Creating %s...' % filename)
-  with io.open(filename, 'w', encoding='utf8', newline='\n') as f:
-    header_guard = ('LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_' + module.upper() + '_'
-                    + check_name_camel.upper() + '_H')
-    f.write('//===--- ')
-    f.write(os.path.basename(filename))
-    f.write(' - clang-tidy ')
-    f.write('-' * max(0, 42 - len(os.path.basename(filename))))
-    f.write('*- C++ -*-===//')
-    f.write("""
+    filename = os.path.join(module_path, check_name_camel) + ".h"
+    print("Creating %s..." % filename)
+    with io.open(filename, "w", encoding="utf8", newline="\n") as f:
+        header_guard = (
+            "LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_"
+            + module.upper()
+            + "_"
+            + check_name_camel.upper()
+            + "_H"
+        )
+        f.write("//===--- ")
+        f.write(os.path.basename(filename))
+        f.write(" - clang-tidy ")
+        f.write("-" * max(0, 42 - len(os.path.basename(filename))))
+        f.write("*- C++ -*-===//")
+        f.write(
+            """
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -94,24 +100,29 @@ class %(check_name_camel)s : public ClangTidyCheck {
 } // namespace clang::tidy::%(namespace)s
 
 #endif // %(header_guard)s
-""" % {'header_guard': header_guard,
-       'check_name_camel': check_name_camel,
-       'check_name': check_name,
-       'module': module,
-       'namespace': namespace})
+"""
+            % {
+                "header_guard": header_guard,
+                "check_name_camel": check_name_camel,
+                "check_name": check_name,
+                "module": module,
+                "namespace": namespace,
+            }
+        )
 
 
 # Adds the implementation of the new check.
 def write_implementation(module_path, module, namespace, check_name_camel):
-  filename = os.path.join(module_path, check_name_camel) + '.cpp'
-  print('Creating %s...' % filename)
-  with io.open(filename, 'w', encoding='utf8', newline='\n') as f:
-    f.write('//===--- ')
-    f.write(os.path.basename(filename))
-    f.write(' - clang-tidy ')
-    f.write('-' * max(0, 51 - len(os.path.basename(filename))))
-    f.write('-===//')
-    f.write("""
+    filename = os.path.join(module_path, check_name_camel) + ".cpp"
+    print("Creating %s..." % filename)
+    with io.open(filename, "w", encoding="utf8", newline="\n") as f:
+        f.write("//===--- ")
+        f.write(os.path.basename(filename))
+        f.write(" - clang-tidy ")
+        f.write("-" * max(0, 51 - len(os.path.basename(filename))))
+        f.write("-===//")
+        f.write(
+            """
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -144,138 +155,162 @@ def write_implementation(module_path, module, namespace, check_name_camel):
 }
 
 } // namespace clang::tidy::%(namespace)s
-""" % {'check_name': check_name_camel,
-       'module': module,
-       'namespace': namespace})
+"""
+            % {"check_name": check_name_camel, "module": module, "namespace": namespace}
+        )
 
 
 # Returns the source filename that implements the module.
 def get_module_filename(module_path, module):
-  modulecpp = list(filter(
-      lambda p: p.lower() == module.lower() + 'tidymodule.cpp',
-      os.listdir(module_path)))[0]
-  return os.path.join(module_path, modulecpp)
+    modulecpp = list(
+        filter(
+            lambda p: p.lower() == module.lower() + "tidymodule.cpp",
+            os.listdir(module_path),
+        )
+    )[0]
+    return os.path.join(module_path, modulecpp)
 
 
 # Modifies the module to include the new check.
 def adapt_module(module_path, module, check_name, check_name_camel):
-  filename = get_module_filename(module_path, module)
-  with io.open(filename, 'r', encoding='utf8') as f:
-    lines = f.readlines()
-
-  print('Updating %s...' % filename)
-  with io.open(filename, 'w', encoding='utf8', newline='\n') as f:
-    header_added = False
-    header_found = False
-    check_added = False
-    check_fq_name = module + '-' + check_name
-    check_decl = ('    CheckFactories.registerCheck<' + check_name_camel +
-                  '>(\n        "' + check_fq_name + '");\n')
-
-    lines = iter(lines)
-    try:
-      while True:
-        line = next(lines)
-        if not header_added:
-          match = re.search('#include "(.*)"', line)
-          if match:
-            header_found = True
-            if match.group(1) > check_name_camel:
-              header_added = True
-              f.write('#include "' + check_name_camel + '.h"\n')
-          elif header_found:
-            header_added = True
-            f.write('#include "' + check_name_camel + '.h"\n')
-
-        if not check_added:
-          if line.strip() == '}':
-            check_added = True
-            f.write(check_decl)
-          else:
-            match = re.search('registerCheck<(.*)> *\( *(?:"([^"]*)")?', line)
-            prev_line = None
-            if match:
-              current_check_name = match.group(2)
-              if current_check_name is None:
-                # If we didn't find the check name on this line, look on the
-                # next one.
-                prev_line = line
+    filename = get_module_filename(module_path, module)
+    with io.open(filename, "r", encoding="utf8") as f:
+        lines = f.readlines()
+
+    print("Updating %s..." % filename)
+    with io.open(filename, "w", encoding="utf8", newline="\n") as f:
+        header_added = False
+        header_found = False
+        check_added = False
+        check_fq_name = module + "-" + check_name
+        check_decl = (
+            "    CheckFactories.registerCheck<"
+            + check_name_camel
+            + '>(\n        "'
+            + check_fq_name
+            + '");\n'
+        )
+
+        lines = iter(lines)
+        try:
+            while True:
                 line = next(lines)
-                match = re.search(' *"([^"]*)"', line)
-                if match:
-                  current_check_name = match.group(1)
-              if current_check_name > check_fq_name:
-                check_added = True
-                f.write(check_decl)
-              if prev_line:
-                f.write(prev_line)
-        f.write(line)
-    except StopIteration:
-      pass
+                if not header_added:
+                    match = re.search('#include "(.*)"', line)
+                    if match:
+                        header_found = True
+                        if match.group(1) > check_name_camel:
+                            header_added = True
+                            f.write('#include "' + check_name_camel + '.h"\n')
+                    elif header_found:
+                        header_added = True
+                        f.write('#include "' + check_name_camel + '.h"\n')
+
+                if not check_added:
+                    if line.strip() == "}":
+                        check_added = True
+                        f.write(check_decl)
+                    else:
+                        match = re.search(
+                            'registerCheck<(.*)> *\( *(?:"([^"]*)")?', line
+                        )
+                        prev_line = None
+                        if match:
+                            current_check_name = match.group(2)
+                            if current_check_name is None:
+                                # If we didn't find the check name on this line, look on the
+                                # next one.
+                                prev_line = line
+                                line = next(lines)
+                                match = re.search(' *"([^"]*)"', line)
+                                if match:
+                                    current_check_name = match.group(1)
+                            if current_check_name > check_fq_name:
+                                check_added = True
+                                f.write(check_decl)
+                            if prev_line:
+                                f.write(prev_line)
+                f.write(line)
+        except StopIteration:
+            pass
 
 
 # Adds a release notes entry.
 def add_release_notes(module_path, module, check_name):
-  check_name_dashes = module + '-' + check_name
-  filename = os.path.normpath(os.path.join(module_path,
-                                           '../../docs/ReleaseNotes.rst'))
-  with io.open(filename, 'r', encoding='utf8') as f:
-    lines = f.readlines()
-
-  lineMatcher = re.compile('New checks')
-  nextSectionMatcher = re.compile('New check aliases')
-  checkMatcher = re.compile('- New :doc:`(.*)')
-
-  print('Updating %s...' % filename)
-  with io.open(filename, 'w', encoding='utf8', newline='\n') as f:
-    note_added = False
-    header_found = False
-    add_note_here = False
+    check_name_dashes = module + "-" + check_name
+    filename = os.path.normpath(
+        os.path.join(module_path, "../../docs/ReleaseNotes.rst")
+    )
+    with io.open(filename, "r", encoding="utf8") as f:
+        lines = f.readlines()
+
+    lineMatcher = re.compile("New checks")
+    nextSectionMatcher = re.compile("New check aliases")
+    checkMatcher = re.compile("- New :doc:`(.*)")
+
+    print("Updating %s..." % filename)
+    with io.open(filename, "w", encoding="utf8", newline="\n") as f:
+        note_added = False
+        header_found = False
+        add_note_here = False
+
+        for line in lines:
+            if not note_added:
+                match = lineMatcher.match(line)
+                match_next = nextSectionMatcher.match(line)
+                match_check = checkMatcher.match(line)
+                if match_check:
+                    last_check = match_check.group(1)
+                    if last_check > check_name_dashes:
+                        add_note_here = True
+
+                if match_next:
+                    add_note_here = True
 
-    for line in lines:
-      if not note_added:
-        match = lineMatcher.match(line)
-        match_next = nextSectionMatcher.match(line)
-        match_check = checkMatcher.match(line)
-        if match_check:
-          last_check = match_check.group(1)
-          if last_check > check_name_dashes:
-            add_note_here = True
-
-        if match_next:
-          add_note_here = True
-
-        if match:
-          header_found = True
-          f.write(line)
-          continue
-
-        if line.startswith('^^^^'):
-          f.write(line)
-          continue
-
-        if header_found and add_note_here:
-          if not line.startswith('^^^^'):
-            f.write("""- New :doc:`%s
+                if match:
+                    header_found = True
+                    f.write(line)
+                    continue
+
+                if line.startswith("^^^^"):
+                    f.write(line)
+                    continue
+
+                if header_found and add_note_here:
+                    if not line.startswith("^^^^"):
+                        f.write(
+                            """- New :doc:`%s
   <clang-tidy/checks/%s/%s>` check.
 
   FIXME: add release notes.
 
-""" % (check_name_dashes, module, check_name))
-            note_added = True
+"""
+                            % (check_name_dashes, module, check_name)
+                        )
+                        note_added = True
 
-      f.write(line)
+            f.write(line)
 
 
 # Adds a test for the check.
 def write_test(module_path, module, check_name, test_extension):
-  check_name_dashes = module + '-' + check_name
-  filename = os.path.normpath(os.path.join(
-    module_path, '..', '..', 'test', 'clang-tidy', 'checkers',
-    module, check_name + '.' + test_extension))
-  print('Creating %s...' % filename)
-  with io.open(filename, 'w', encoding='utf8', newline='\n') as f:
-    f.write("""// RUN: %%check_clang_tidy %%s %(check_name_dashes)s %%t
+    check_name_dashes = module + "-" + check_name
+    filename = os.path.normpath(
+        os.path.join(
+            module_path,
+            "..",
+            "..",
+            "test",
+            "clang-tidy",
+            "checkers",
+            module,
+            check_name + "." + test_extension,
+        )
+    )
+    print("Creating %s..." % filename)
+    with io.open(filename, "w", encoding="utf8", newline="\n") as f:
+        f.write(
+            """// RUN: %%check_clang_tidy %%s %(check_name_dashes)s %%t
 
 // FIXME: Add something that triggers the check here.
 void f();
@@ -289,298 +324,327 @@ def write_test(module_path, module, check_name, test_extension):
 
 // FIXME: Add something that doesn't trigger the check here.
 void awesome_f2();
-""" % {'check_name_dashes': check_name_dashes})
+"""
+            % {"check_name_dashes": check_name_dashes}
+        )
 
 
 def get_actual_filename(dirname, filename):
-  if not os.path.isdir(dirname):
-    return ''
-  name = os.path.join(dirname, filename)
-  if (os.path.isfile(name)):
-    return name
-  caselessname = filename.lower()
-  for file in os.listdir(dirname):
-    if (file.lower() == caselessname):
-      return os.path.join(dirname, file)
-  return ''
+    if not os.path.isdir(dirname):
+        return ""
+    name = os.path.join(dirname, filename)
+    if os.path.isfile(name):
+        return name
+    caselessname = filename.lower()
+    for file in os.listdir(dirname):
+        if file.lower() == caselessname:
+            return os.path.join(dirname, file)
+    return ""
 
 
 # Recreates the list of checks in the docs/clang-tidy/checks directory.
 def update_checks_list(clang_tidy_path):
-  docs_dir = os.path.join(clang_tidy_path, '../docs/clang-tidy/checks')
-  filename = os.path.normpath(os.path.join(docs_dir, 'list.rst'))
-  # Read the content of the current list.rst file
-  with io.open(filename, 'r', encoding='utf8') as f:
-    lines = f.readlines()
-  # Get all existing docs
-  doc_files = []
-  for subdir in filter(lambda s: os.path.isdir(os.path.join(docs_dir, s)), os.listdir(docs_dir)):
-    for file in filter(lambda s: s.endswith('.rst'), os.listdir(os.path.join(docs_dir, subdir))):
-      doc_files.append([subdir, file])
-  doc_files.sort()
-
-  # We couldn't find the source file from the check name, so try to find the
-  # class name that corresponds to the check in the module file.
-  def filename_from_module(module_name, check_name):
-    module_path = os.path.join(clang_tidy_path, module_name)
-    if not os.path.isdir(module_path):
-      return ''
-    module_file = get_module_filename(module_path, module_name)
-    if not os.path.isfile(module_file):
-      return ''
-    with io.open(module_file, 'r') as f:
-      code = f.read()
-      full_check_name = module_name + '-' + check_name
-      name_pos = code.find('"' + full_check_name + '"')
-      if name_pos == -1:
-        return ''
-      stmt_end_pos = code.find(';', name_pos)
-      if stmt_end_pos == -1:
-        return ''
-      stmt_start_pos = code.rfind(';', 0, name_pos)
-      if stmt_start_pos == -1:
-        stmt_start_pos = code.rfind('{', 0, name_pos)
-      if stmt_start_pos == -1:
-        return ''
-      stmt = code[stmt_start_pos+1:stmt_end_pos]
-      matches = re.search('registerCheck<([^>:]*)>\(\s*"([^"]*)"\s*\)', stmt)
-      if matches and matches[2] == full_check_name:
-        class_name = matches[1]
-        if '::' in class_name:
-          parts = class_name.split('::')
-          class_name = parts[-1]
-          class_path = os.path.join(clang_tidy_path, module_name, '..', *parts[0:-1])
+    docs_dir = os.path.join(clang_tidy_path, "../docs/clang-tidy/checks")
+    filename = os.path.normpath(os.path.join(docs_dir, "list.rst"))
+    # Read the content of the current list.rst file
+    with io.open(filename, "r", encoding="utf8") as f:
+        lines = f.readlines()
+    # Get all existing docs
+    doc_files = []
+    for subdir in filter(
+        lambda s: os.path.isdir(os.path.join(docs_dir, s)), os.listdir(docs_dir)
+    ):
+        for file in filter(
+            lambda s: s.endswith(".rst"), os.listdir(os.path.join(docs_dir, subdir))
+        ):
+            doc_files.append([subdir, file])
+    doc_files.sort()
+
+    # We couldn't find the source file from the check name, so try to find the
+    # class name that corresponds to the check in the module file.
+    def filename_from_module(module_name, check_name):
+        module_path = os.path.join(clang_tidy_path, module_name)
+        if not os.path.isdir(module_path):
+            return ""
+        module_file = get_module_filename(module_path, module_name)
+        if not os.path.isfile(module_file):
+            return ""
+        with io.open(module_file, "r") as f:
+            code = f.read()
+            full_check_name = module_name + "-" + check_name
+            name_pos = code.find('"' + full_check_name + '"')
+            if name_pos == -1:
+                return ""
+            stmt_end_pos = code.find(";", name_pos)
+            if stmt_end_pos == -1:
+                return ""
+            stmt_start_pos = code.rfind(";", 0, name_pos)
+            if stmt_start_pos == -1:
+                stmt_start_pos = code.rfind("{", 0, name_pos)
+            if stmt_start_pos == -1:
+                return ""
+            stmt = code[stmt_start_pos + 1 : stmt_end_pos]
+            matches = re.search('registerCheck<([^>:]*)>\(\s*"([^"]*)"\s*\)', stmt)
+            if matches and matches[2] == full_check_name:
+                class_name = matches[1]
+                if "::" in class_name:
+                    parts = class_name.split("::")
+                    class_name = parts[-1]
+                    class_path = os.path.join(
+                        clang_tidy_path, module_name, "..", *parts[0:-1]
+                    )
+                else:
+                    class_path = os.path.join(clang_tidy_path, module_name)
+                return get_actual_filename(class_path, class_name + ".cpp")
+
+        return ""
+
+    # Examine code looking for a c'tor definition to get the base class name.
+    def get_base_class(code, check_file):
+        check_class_name = os.path.splitext(os.path.basename(check_file))[0]
+        ctor_pattern = check_class_name + "\([^:]*\)\s*:\s*([A-Z][A-Za-z0-9]*Check)\("
+        matches = re.search("\s+" + check_class_name + "::" + ctor_pattern, code)
+
+        # The constructor might be inline in the header.
+        if not matches:
+            header_file = os.path.splitext(check_file)[0] + ".h"
+            if not os.path.isfile(header_file):
+                return ""
+            with io.open(header_file, encoding="utf8") as f:
+                code = f.read()
+            matches = re.search(" " + ctor_pattern, code)
+
+        if matches and matches[1] != "ClangTidyCheck":
+            return matches[1]
+        return ""
+
+    # Some simple heuristics to figure out if a check has an autofix or not.
+    def has_fixits(code):
+        for needle in [
+            "FixItHint",
+            "ReplacementText",
+            "fixit",
+            "TransformerClangTidyCheck",
+        ]:
+            if needle in code:
+                return True
+        return False
+
+    # Try to figure out of the check supports fixits.
+    def has_auto_fix(check_name):
+        dirname, _, check_name = check_name.partition("-")
+
+        check_file = get_actual_filename(
+            os.path.join(clang_tidy_path, dirname),
+            get_camel_check_name(check_name) + ".cpp",
+        )
+        if not os.path.isfile(check_file):
+            # Some older checks don't end with 'Check.cpp'
+            check_file = get_actual_filename(
+                os.path.join(clang_tidy_path, dirname),
+                get_camel_name(check_name) + ".cpp",
+            )
+            if not os.path.isfile(check_file):
+                # Some checks aren't in a file based on the check name.
+                check_file = filename_from_module(dirname, check_name)
+                if not check_file or not os.path.isfile(check_file):
+                    return ""
+
+        with io.open(check_file, encoding="utf8") as f:
+            code = f.read()
+            if has_fixits(code):
+                return ' "Yes"'
+
+        base_class = get_base_class(code, check_file)
+        if base_class:
+            base_file = os.path.join(clang_tidy_path, dirname, base_class + ".cpp")
+            if os.path.isfile(base_file):
+                with io.open(base_file, encoding="utf8") as f:
+                    code = f.read()
+                    if has_fixits(code):
+                        return ' "Yes"'
+
+        return ""
+
+    def process_doc(doc_file):
+        check_name = doc_file[0] + "-" + doc_file[1].replace(".rst", "")
+
+        with io.open(os.path.join(docs_dir, *doc_file), "r", encoding="utf8") as doc:
+            content = doc.read()
+            match = re.search(".*:orphan:.*", content)
+
+            if match:
+                # Orphan page, don't list it.
+                return "", ""
+
+            match = re.search(".*:http-equiv=refresh: \d+;URL=(.*).html(.*)", content)
+            # Is it a redirect?
+            return check_name, match
+
+    def format_link(doc_file):
+        check_name, match = process_doc(doc_file)
+        if not match and check_name:
+            return "   `%(check_name)s <%(module)s/%(check)s.html>`_,%(autofix)s\n" % {
+                "check_name": check_name,
+                "module": doc_file[0],
+                "check": doc_file[1].replace(".rst", ""),
+                "autofix": has_auto_fix(check_name),
+            }
         else:
-          class_path = os.path.join(clang_tidy_path, module_name)
-        return get_actual_filename(class_path, class_name + '.cpp')
-
-    return ''
-
-  # Examine code looking for a c'tor definition to get the base class name.
-  def get_base_class(code, check_file):
-    check_class_name = os.path.splitext(os.path.basename(check_file))[0]
-    ctor_pattern = check_class_name + '\([^:]*\)\s*:\s*([A-Z][A-Za-z0-9]*Check)\('
-    matches = re.search('\s+' + check_class_name + '::' + ctor_pattern, code)
-
-    # The constructor might be inline in the header.
-    if not matches:
-      header_file = os.path.splitext(check_file)[0] + '.h'
-      if not os.path.isfile(header_file):
-        return ''
-      with io.open(header_file, encoding='utf8') as f:
-        code = f.read()
-      matches = re.search(' ' + ctor_pattern, code)
-
-    if matches and matches[1] != 'ClangTidyCheck':
-      return matches[1]
-    return ''
-
-  # Some simple heuristics to figure out if a check has an autofix or not.
-  def has_fixits(code):
-    for needle in ['FixItHint', 'ReplacementText', 'fixit',
-                   'TransformerClangTidyCheck']:
-      if needle in code:
-        return True
-    return False
-
-  # Try to figure out of the check supports fixits.
-  def has_auto_fix(check_name):
-    dirname, _, check_name = check_name.partition('-')
-
-    check_file = get_actual_filename(os.path.join(clang_tidy_path, dirname),
-                                       get_camel_check_name(check_name) + '.cpp')
-    if not os.path.isfile(check_file):
-      # Some older checks don't end with 'Check.cpp'
-      check_file = get_actual_filename(os.path.join(clang_tidy_path, dirname),
-                                         get_camel_name(check_name) + '.cpp')
-      if not os.path.isfile(check_file):
-        # Some checks aren't in a file based on the check name.
-        check_file = filename_from_module(dirname, check_name)
-        if not check_file or not os.path.isfile(check_file):
-          return ''
-
-    with io.open(check_file, encoding='utf8') as f:
-      code = f.read()
-      if has_fixits(code):
-        return ' "Yes"'
-
-    base_class = get_base_class(code, check_file)
-    if base_class:
-      base_file = os.path.join(clang_tidy_path, dirname, base_class + '.cpp')
-      if os.path.isfile(base_file):
-        with io.open(base_file, encoding='utf8') as f:
-          code = f.read()
-          if has_fixits(code):
-            return ' "Yes"'
-
-    return ''
-
-  def process_doc(doc_file):
-    check_name = doc_file[0] + '-' + doc_file[1].replace('.rst', '')
-
-    with io.open(os.path.join(docs_dir, *doc_file), 'r', encoding='utf8') as doc:
-      content = doc.read()
-      match = re.search('.*:orphan:.*', content)
-
-      if match:
-        # Orphan page, don't list it.
-        return '', ''
-
-      match = re.search('.*:http-equiv=refresh: \d+;URL=(.*).html(.*)',
-                        content)
-      # Is it a redirect?
-      return check_name, match
-
-  def format_link(doc_file):
-    check_name, match = process_doc(doc_file)
-    if not match and check_name:
-      return '   `%(check_name)s <%(module)s/%(check)s.html>`_,%(autofix)s\n' % {
-        'check_name': check_name,
-        'module': doc_file[0],
-        'check': doc_file[1].replace('.rst', ''),
-        'autofix': has_auto_fix(check_name)
-      }
-    else:
-      return ''
-
-  def format_link_alias(doc_file):
-    check_name, match = process_doc(doc_file)
-    if match and check_name:
-      module = doc_file[0]
-      check_file = doc_file[1].replace('.rst', '')
-      if match.group(1) == 'https://clang.llvm.org/docs/analyzer/checkers':
-        title = 'Clang Static Analyzer ' + check_file
-        # Preserve the anchor in checkers.html from group 2.
-        target = match.group(1) + '.html' + match.group(2)
-        autofix = ''
-      else:
-        redirect_parts = re.search('^\.\./([^/]*)/([^/]*)$', match.group(1))
-        title = redirect_parts[1] + '-' + redirect_parts[2]
-        target = redirect_parts[1] + '/' + redirect_parts[2] + '.html'
-        autofix = has_auto_fix(title)
-
-      # The checker is just a redirect.
-      return '   `%(check_name)s <%(module)s/%(check_file)s.html>`_, `%(title)s <%(target)s>`_,%(autofix)s\n' % {
-        'check_name': check_name,
-        'module': module,
-        'check_file': check_file,
-        'target': target,
-        'title': title,
-        'autofix': autofix
-      }
-    return ''
-
-  checks = map(format_link, doc_files)
-  checks_alias = map(format_link_alias, doc_files)
-
-  print('Updating %s...' % filename)
-  with io.open(filename, 'w', encoding='utf8', newline='\n') as f:
-    for line in lines:
-      f.write(line)
-      if line.strip() == '.. csv-table::':
-        # We dump the checkers
-        f.write('   :header: "Name", "Offers fixes"\n\n')
-        f.writelines(checks)
-        # and the aliases
-        f.write('\n\n')
-        f.write('.. csv-table:: Aliases..\n')
-        f.write('   :header: "Name", "Redirect", "Offers fixes"\n\n')
-        f.writelines(checks_alias)
-        break
+            return ""
+
+    def format_link_alias(doc_file):
+        check_name, match = process_doc(doc_file)
+        if match and check_name:
+            module = doc_file[0]
+            check_file = doc_file[1].replace(".rst", "")
+            if match.group(1) == "https://clang.llvm.org/docs/analyzer/checkers":
+                title = "Clang Static Analyzer " + check_file
+                # Preserve the anchor in checkers.html from group 2.
+                target = match.group(1) + ".html" + match.group(2)
+                autofix = ""
+            else:
+                redirect_parts = re.search("^\.\./([^/]*)/([^/]*)$", match.group(1))
+                title = redirect_parts[1] + "-" + redirect_parts[2]
+                target = redirect_parts[1] + "/" + redirect_parts[2] + ".html"
+                autofix = has_auto_fix(title)
+
+            # The checker is just a redirect.
+            return (
+                "   `%(check_name)s <%(module)s/%(check_file)s.html>`_, `%(title)s <%(target)s>`_,%(autofix)s\n"
+                % {
+                    "check_name": check_name,
+                    "module": module,
+                    "check_file": check_file,
+                    "target": target,
+                    "title": title,
+                    "autofix": autofix,
+                }
+            )
+        return ""
+
+    checks = map(format_link, doc_files)
+    checks_alias = map(format_link_alias, doc_files)
+
+    print("Updating %s..." % filename)
+    with io.open(filename, "w", encoding="utf8", newline="\n") as f:
+        for line in lines:
+            f.write(line)
+            if line.strip() == ".. csv-table::":
+                # We dump the checkers
+                f.write('   :header: "Name", "Offers fixes"\n\n')
+                f.writelines(checks)
+                # and the aliases
+                f.write("\n\n")
+                f.write(".. csv-table:: Aliases..\n")
+                f.write('   :header: "Name", "Redirect", "Offers fixes"\n\n')
+                f.writelines(checks_alias)
+                break
 
 
 # Adds a documentation for the check.
 def write_docs(module_path, module, check_name):
-  check_name_dashes = module + '-' + check_name
-  filename = os.path.normpath(os.path.join(
-      module_path, '../../docs/clang-tidy/checks/', module, check_name + '.rst'))
-  print('Creating %s...' % filename)
-  with io.open(filename, 'w', encoding='utf8', newline='\n') as f:
-    f.write(""".. title:: clang-tidy - %(check_name_dashes)s
+    check_name_dashes = module + "-" + check_name
+    filename = os.path.normpath(
+        os.path.join(
+            module_path, "../../docs/clang-tidy/checks/", module, check_name + ".rst"
+        )
+    )
+    print("Creating %s..." % filename)
+    with io.open(filename, "w", encoding="utf8", newline="\n") as f:
+        f.write(
+            """.. title:: clang-tidy - %(check_name_dashes)s
 
 %(check_name_dashes)s
 %(underline)s
 
 FIXME: Describe what patterns does the check detect and why. Give examples.
-""" % {'check_name_dashes': check_name_dashes,
-       'underline': '=' * len(check_name_dashes)})
+"""
+            % {
+                "check_name_dashes": check_name_dashes,
+                "underline": "=" * len(check_name_dashes),
+            }
+        )
 
 
 def get_camel_name(check_name):
-  return ''.join(map(lambda elem: elem.capitalize(),
-                     check_name.split('-')))
+    return "".join(map(lambda elem: elem.capitalize(), check_name.split("-")))
 
 
 def get_camel_check_name(check_name):
-  return get_camel_name(check_name) + 'Check'
+    return get_camel_name(check_name) + "Check"
 
 
 def main():
-  language_to_extension = {
-      'c': 'c',
-      'c++': 'cpp',
-      'objc': 'm',
-      'objc++': 'mm',
-  }
-  parser = argparse.ArgumentParser()
-  parser.add_argument(
-      '--update-docs',
-      action='store_true',
-      help='just update the list of documentation files, then exit')
-  parser.add_argument(
-      '--language',
-      help='language to use for new check (defaults to c++)',
-      choices=language_to_extension.keys(),
-      default='c++',
-      metavar='LANG')
-  parser.add_argument(
-      'module',
-      nargs='?',
-      help='module directory under which to place the new tidy check (e.g., misc)')
-  parser.add_argument(
-      'check',
-      nargs='?',
-      help='name of new tidy check to add (e.g. foo-do-the-stuff)')
-  args = parser.parse_args()
-
-  if args.update_docs:
-    update_checks_list(os.path.dirname(sys.argv[0]))
-    return
-
-  if not args.module or not args.check:
-    print('Module and check must be specified.')
-    parser.print_usage()
-    return
-
-  module = args.module
-  check_name = args.check
-  check_name_camel = get_camel_check_name(check_name)
-  if check_name.startswith(module):
-    print('Check name "%s" must not start with the module "%s". Exiting.' % (
-        check_name, module))
-    return
-  clang_tidy_path = os.path.dirname(sys.argv[0])
-  module_path = os.path.join(clang_tidy_path, module)
-
-  if not adapt_cmake(module_path, check_name_camel):
-    return
-
-  # Map module names to namespace names that don't conflict with widely used top-level namespaces.
-  if module == 'llvm':
-    namespace = module + '_check'
-  else:
-    namespace = module
-
-  write_header(module_path, module, namespace, check_name, check_name_camel)
-  write_implementation(module_path, module, namespace, check_name_camel)
-  adapt_module(module_path, module, check_name, check_name_camel)
-  add_release_notes(module_path, module, check_name)
-  test_extension = language_to_extension.get(args.language)
-  write_test(module_path, module, check_name, test_extension)
-  write_docs(module_path, module, check_name)
-  update_checks_list(clang_tidy_path)
-  print('Done. Now it\'s your turn!')
-
-
-if __name__ == '__main__':
-  main()
+    language_to_extension = {
+        "c": "c",
+        "c++": "cpp",
+        "objc": "m",
+        "objc++": "mm",
+    }
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--update-docs",
+        action="store_true",
+        help="just update the list of documentation files, then exit",
+    )
+    parser.add_argument(
+        "--language",
+        help="language to use for new check (defaults to c++)",
+        choices=language_to_extension.keys(),
+        default="c++",
+        metavar="LANG",
+    )
+    parser.add_argument(
+        "module",
+        nargs="?",
+        help="module directory under which to place the new tidy check (e.g., misc)",
+    )
+    parser.add_argument(
+        "check", nargs="?", help="name of new tidy check to add (e.g. foo-do-the-stuff)"
+    )
+    args = parser.parse_args()
+
+    if args.update_docs:
+        update_checks_list(os.path.dirname(sys.argv[0]))
+        return
+
+    if not args.module or not args.check:
+        print("Module and check must be specified.")
+        parser.print_usage()
+        return
+
+    module = args.module
+    check_name = args.check
+    check_name_camel = get_camel_check_name(check_name)
+    if check_name.startswith(module):
+        print(
+            'Check name "%s" must not start with the module "%s". Exiting.'
+            % (check_name, module)
+        )
+        return
+    clang_tidy_path = os.path.dirname(sys.argv[0])
+    module_path = os.path.join(clang_tidy_path, module)
+
+    if not adapt_cmake(module_path, check_name_camel):
+        return
+
+    # Map module names to namespace names that don't conflict with widely used top-level namespaces.
+    if module == "llvm":
+        namespace = module + "_check"
+    else:
+        namespace = module
+
+    write_header(module_path, module, namespace, check_name, check_name_camel)
+    write_implementation(module_path, module, namespace, check_name_camel)
+    adapt_module(module_path, module, check_name, check_name_camel)
+    add_release_notes(module_path, module, check_name)
+    test_extension = language_to_extension.get(args.language)
+    write_test(module_path, module, check_name, test_extension)
+    write_docs(module_path, module, check_name)
+    update_checks_list(clang_tidy_path)
+    print("Done. Now it's your turn!")
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/clang-tools-extra/clang-tidy/rename_check.py b/clang-tools-extra/clang-tidy/rename_check.py
index f56776fdb7291..bf9c886699cb2 100755
--- a/clang-tools-extra/clang-tidy/rename_check.py
+++ b/clang-tools-extra/clang-tidy/rename_check.py
@@ -1,12 +1,12 @@
 #!/usr/bin/env python3
 #
-#===- rename_check.py - clang-tidy check renamer ------------*- python -*--===#
+# ===- rename_check.py - clang-tidy check renamer ------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===-----------------------------------------------------------------------===#
+# ===-----------------------------------------------------------------------===#
 
 from __future__ import unicode_literals
 
@@ -16,321 +16,387 @@
 import os
 import re
 
+
 def replaceInFileRegex(fileName, sFrom, sTo):
-  if sFrom == sTo:
-    return
+    if sFrom == sTo:
+        return
 
-  # The documentation files are encoded using UTF-8, however on Windows the
-  # default encoding might be 
diff erent (e.g. CP-1252). To make sure UTF-8 is
-  # always used, use `io.open(filename, mode, encoding='utf8')` for reading and
-  # writing files here and elsewhere.
-  txt = None
-  with io.open(fileName, 'r', encoding='utf8') as f:
-    txt = f.read()
+    # The documentation files are encoded using UTF-8, however on Windows the
+    # default encoding might be 
diff erent (e.g. CP-1252). To make sure UTF-8 is
+    # always used, use `io.open(filename, mode, encoding='utf8')` for reading and
+    # writing files here and elsewhere.
+    txt = None
+    with io.open(fileName, "r", encoding="utf8") as f:
+        txt = f.read()
 
-  txt = re.sub(sFrom, sTo, txt)
-  print("Replacing '%s' -> '%s' in '%s'..." % (sFrom, sTo, fileName))
-  with io.open(fileName, 'w', encoding='utf8') as f:
-    f.write(txt)
+    txt = re.sub(sFrom, sTo, txt)
+    print("Replacing '%s' -> '%s' in '%s'..." % (sFrom, sTo, fileName))
+    with io.open(fileName, "w", encoding="utf8") as f:
+        f.write(txt)
 
 
 def replaceInFile(fileName, sFrom, sTo):
-  if sFrom == sTo:
-    return
-  txt = None
-  with io.open(fileName, 'r', encoding='utf8') as f:
-    txt = f.read()
+    if sFrom == sTo:
+        return
+    txt = None
+    with io.open(fileName, "r", encoding="utf8") as f:
+        txt = f.read()
 
-  if sFrom not in txt:
-    return
+    if sFrom not in txt:
+        return
 
-  txt = txt.replace(sFrom, sTo)
-  print("Replacing '%s' -> '%s' in '%s'..." % (sFrom, sTo, fileName))
-  with io.open(fileName, 'w', encoding='utf8') as f:
-    f.write(txt)
+    txt = txt.replace(sFrom, sTo)
+    print("Replacing '%s' -> '%s' in '%s'..." % (sFrom, sTo, fileName))
+    with io.open(fileName, "w", encoding="utf8") as f:
+        f.write(txt)
 
 
 def generateCommentLineHeader(filename):
-  return ''.join(['//===--- ',
-                  os.path.basename(filename),
-                  ' - clang-tidy ',
-                  '-' * max(0, 42 - len(os.path.basename(filename))),
-                  '*- C++ -*-===//'])
+    return "".join(
+        [
+            "//===--- ",
+            os.path.basename(filename),
+            " - clang-tidy ",
+            "-" * max(0, 42 - len(os.path.basename(filename))),
+            "*- C++ -*-===//",
+        ]
+    )
 
 
 def generateCommentLineSource(filename):
-  return ''.join(['//===--- ',
-                  os.path.basename(filename),
-                  ' - clang-tidy',
-                  '-' * max(0, 52 - len(os.path.basename(filename))),
-                  '-===//'])
+    return "".join(
+        [
+            "//===--- ",
+            os.path.basename(filename),
+            " - clang-tidy",
+            "-" * max(0, 52 - len(os.path.basename(filename))),
+            "-===//",
+        ]
+    )
 
 
 def fileRename(fileName, sFrom, sTo):
-  if sFrom not in fileName or sFrom == sTo:
-    return fileName
-  newFileName = fileName.replace(sFrom, sTo)
-  print("Renaming '%s' -> '%s'..." % (fileName, newFileName))
-  os.rename(fileName, newFileName)
-  return newFileName
+    if sFrom not in fileName or sFrom == sTo:
+        return fileName
+    newFileName = fileName.replace(sFrom, sTo)
+    print("Renaming '%s' -> '%s'..." % (fileName, newFileName))
+    os.rename(fileName, newFileName)
+    return newFileName
 
 
 def deleteMatchingLines(fileName, pattern):
-  lines = None
-  with io.open(fileName, 'r', encoding='utf8') as f:
-    lines = f.readlines()
+    lines = None
+    with io.open(fileName, "r", encoding="utf8") as f:
+        lines = f.readlines()
 
-  not_matching_lines = [l for l in lines if not re.search(pattern, l)]
-  if len(not_matching_lines) == len(lines):
-    return False
+    not_matching_lines = [l for l in lines if not re.search(pattern, l)]
+    if len(not_matching_lines) == len(lines):
+        return False
 
-  print("Removing lines matching '%s' in '%s'..." % (pattern, fileName))
-  print('  ' + '  '.join([l for l in lines if re.search(pattern, l)]))
-  with io.open(fileName, 'w', encoding='utf8') as f:
-    f.writelines(not_matching_lines)
+    print("Removing lines matching '%s' in '%s'..." % (pattern, fileName))
+    print("  " + "  ".join([l for l in lines if re.search(pattern, l)]))
+    with io.open(fileName, "w", encoding="utf8") as f:
+        f.writelines(not_matching_lines)
 
-  return True
+    return True
 
 
 def getListOfFiles(clang_tidy_path):
-  files = glob.glob(os.path.join(clang_tidy_path, '**'), recursive=True)
-  files += [os.path.normpath(os.path.join(clang_tidy_path,
-                                           '../docs/ReleaseNotes.rst'))]
-  files += glob.glob(os.path.join(clang_tidy_path, '..', 'test',
-                                  'clang-tidy', 'checkers', '**'), recursive=True)
-  files += glob.glob(os.path.join(clang_tidy_path, '..', 'docs',
-                                  'clang-tidy', 'checks', '*.rst'))
-  files += glob.glob(os.path.join(clang_tidy_path, '..', 'docs',
-                                  'clang-tidy', 'checks', "*", "*.rst"), recursive=True)
-  return [filename for filename in files if os.path.isfile(filename)]
+    files = glob.glob(os.path.join(clang_tidy_path, "**"), recursive=True)
+    files += [
+        os.path.normpath(os.path.join(clang_tidy_path, "../docs/ReleaseNotes.rst"))
+    ]
+    files += glob.glob(
+        os.path.join(clang_tidy_path, "..", "test", "clang-tidy", "checkers", "**"),
+        recursive=True,
+    )
+    files += glob.glob(
+        os.path.join(clang_tidy_path, "..", "docs", "clang-tidy", "checks", "*.rst")
+    )
+    files += glob.glob(
+        os.path.join(
+            clang_tidy_path, "..", "docs", "clang-tidy", "checks", "*", "*.rst"
+        ),
+        recursive=True,
+    )
+    return [filename for filename in files if os.path.isfile(filename)]
 
 
 # Adapts the module's CMakelist file. Returns 'True' if it could add a new
 # entry and 'False' if the entry already existed.
 def adapt_cmake(module_path, check_name_camel):
-  filename = os.path.join(module_path, 'CMakeLists.txt')
-  with io.open(filename, 'r', encoding='utf8') as f:
-    lines = f.readlines()
-
-  cpp_file = check_name_camel + '.cpp'
+    filename = os.path.join(module_path, "CMakeLists.txt")
+    with io.open(filename, "r", encoding="utf8") as f:
+        lines = f.readlines()
 
-  # Figure out whether this check already exists.
-  for line in lines:
-    if line.strip() == cpp_file:
-      return False
+    cpp_file = check_name_camel + ".cpp"
 
-  print('Updating %s...' % filename)
-  with io.open(filename, 'w', encoding='utf8') as f:
-    cpp_found = False
-    file_added = False
+    # Figure out whether this check already exists.
     for line in lines:
-      cpp_line = line.strip().endswith('.cpp')
-      if (not file_added) and (cpp_line or cpp_found):
-        cpp_found = True
-        if (line.strip() > cpp_file) or (not cpp_line):
-          f.write('  ' + cpp_file + '\n')
-          file_added = True
-      f.write(line)
+        if line.strip() == cpp_file:
+            return False
+
+    print("Updating %s..." % filename)
+    with io.open(filename, "w", encoding="utf8") as f:
+        cpp_found = False
+        file_added = False
+        for line in lines:
+            cpp_line = line.strip().endswith(".cpp")
+            if (not file_added) and (cpp_line or cpp_found):
+                cpp_found = True
+                if (line.strip() > cpp_file) or (not cpp_line):
+                    f.write("  " + cpp_file + "\n")
+                    file_added = True
+            f.write(line)
+
+    return True
 
-  return True
 
 # Modifies the module to include the new check.
 def adapt_module(module_path, module, check_name, check_name_camel):
-  modulecpp = next(iter(filter(
-      lambda p: p.lower() == module.lower() + 'tidymodule.cpp',
-      os.listdir(module_path))))
-  filename = os.path.join(module_path, modulecpp)
-  with io.open(filename, 'r', encoding='utf8') as f:
-    lines = f.readlines()
-
-  print('Updating %s...' % filename)
-  with io.open(filename, 'w', encoding='utf8') as f:
-    header_added = False
-    header_found = False
-    check_added = False
-    check_decl = ('    CheckFactories.registerCheck<' + check_name_camel +
-                  '>(\n        "' + check_name + '");\n')
-
-    for line in lines:
-      if not header_added:
-        match = re.search('#include "(.*)"', line)
-        if match:
-          header_found = True
-          if match.group(1) > check_name_camel:
-            header_added = True
-            f.write('#include "' + check_name_camel + '.h"\n')
-        elif header_found:
-          header_added = True
-          f.write('#include "' + check_name_camel + '.h"\n')
-
-      if not check_added:
-        if line.strip() == '}':
-          check_added = True
-          f.write(check_decl)
-        else:
-          match = re.search('registerCheck<(.*)>', line)
-          if match and match.group(1) > check_name_camel:
-            check_added = True
-            f.write(check_decl)
-      f.write(line)
+    modulecpp = next(
+        iter(
+            filter(
+                lambda p: p.lower() == module.lower() + "tidymodule.cpp",
+                os.listdir(module_path),
+            )
+        )
+    )
+    filename = os.path.join(module_path, modulecpp)
+    with io.open(filename, "r", encoding="utf8") as f:
+        lines = f.readlines()
+
+    print("Updating %s..." % filename)
+    with io.open(filename, "w", encoding="utf8") as f:
+        header_added = False
+        header_found = False
+        check_added = False
+        check_decl = (
+            "    CheckFactories.registerCheck<"
+            + check_name_camel
+            + '>(\n        "'
+            + check_name
+            + '");\n'
+        )
+
+        for line in lines:
+            if not header_added:
+                match = re.search('#include "(.*)"', line)
+                if match:
+                    header_found = True
+                    if match.group(1) > check_name_camel:
+                        header_added = True
+                        f.write('#include "' + check_name_camel + '.h"\n')
+                elif header_found:
+                    header_added = True
+                    f.write('#include "' + check_name_camel + '.h"\n')
+
+            if not check_added:
+                if line.strip() == "}":
+                    check_added = True
+                    f.write(check_decl)
+                else:
+                    match = re.search("registerCheck<(.*)>", line)
+                    if match and match.group(1) > check_name_camel:
+                        check_added = True
+                        f.write(check_decl)
+            f.write(line)
 
 
 # Adds a release notes entry.
 def add_release_notes(clang_tidy_path, old_check_name, new_check_name):
-  filename = os.path.normpath(os.path.join(clang_tidy_path,
-                                           '../docs/ReleaseNotes.rst'))
-  with io.open(filename, 'r', encoding='utf8') as f:
-    lines = f.readlines()
-
-  lineMatcher = re.compile('Renamed checks')
-  nextSectionMatcher = re.compile('Improvements to include-fixer')
-  checkMatcher = re.compile('- The \'(.*)')
-
-  print('Updating %s...' % filename)
-  with io.open(filename, 'w', encoding='utf8') as f:
-    note_added = False
-    header_found = False
-    add_note_here = False
-
-    for line in lines:
-      if not note_added:
-        match = lineMatcher.match(line)
-        match_next = nextSectionMatcher.match(line)
-        match_check = checkMatcher.match(line)
-        if match_check:
-          last_check = match_check.group(1)
-          if last_check > old_check_name:
-            add_note_here = True
-
-        if match_next:
-          add_note_here = True
-
-        if match:
-          header_found = True
-          f.write(line)
-          continue
-
-        if line.startswith('^^^^'):
-          f.write(line)
-          continue
-
-        if header_found and add_note_here:
-          if not line.startswith('^^^^'):
-            f.write("""- The '%s' check was renamed to :doc:`%s
+    filename = os.path.normpath(
+        os.path.join(clang_tidy_path, "../docs/ReleaseNotes.rst")
+    )
+    with io.open(filename, "r", encoding="utf8") as f:
+        lines = f.readlines()
+
+    lineMatcher = re.compile("Renamed checks")
+    nextSectionMatcher = re.compile("Improvements to include-fixer")
+    checkMatcher = re.compile("- The '(.*)")
+
+    print("Updating %s..." % filename)
+    with io.open(filename, "w", encoding="utf8") as f:
+        note_added = False
+        header_found = False
+        add_note_here = False
+
+        for line in lines:
+            if not note_added:
+                match = lineMatcher.match(line)
+                match_next = nextSectionMatcher.match(line)
+                match_check = checkMatcher.match(line)
+                if match_check:
+                    last_check = match_check.group(1)
+                    if last_check > old_check_name:
+                        add_note_here = True
+
+                if match_next:
+                    add_note_here = True
+
+                if match:
+                    header_found = True
+                    f.write(line)
+                    continue
+
+                if line.startswith("^^^^"):
+                    f.write(line)
+                    continue
+
+                if header_found and add_note_here:
+                    if not line.startswith("^^^^"):
+                        f.write(
+                            """- The '%s' check was renamed to :doc:`%s
   <clang-tidy/checks/%s/%s>`
 
-                    """ % (old_check_name, new_check_name,
-                           new_check_name.split('-', 1)[0],
-                           '-'.join(new_check_name.split('-')[1:])))
-            note_added = True
+                    """
+                            % (
+                                old_check_name,
+                                new_check_name,
+                                new_check_name.split("-", 1)[0],
+                                "-".join(new_check_name.split("-")[1:]),
+                            )
+                        )
+                        note_added = True
+
+            f.write(line)
 
-      f.write(line)
 
 def main():
-  parser = argparse.ArgumentParser(description='Rename clang-tidy check.')
-  parser.add_argument('old_check_name', type=str,
-                      help='Old check name.')
-  parser.add_argument('new_check_name', type=str,
-                      help='New check name.')
-  parser.add_argument('--check_class_name', type=str,
-                      help='Old name of the class implementing the check.')
-  args = parser.parse_args()
-
-  old_module = args.old_check_name.split('-')[0]
-  new_module = args.new_check_name.split('-')[0]
-  old_name = '-'.join(args.old_check_name.split('-')[1:])
-  new_name  = '-'.join(args.new_check_name.split('-')[1:])
-
-  if args.check_class_name:
-    check_name_camel = args.check_class_name
-  else:
-    check_name_camel = (''.join(map(lambda elem: elem.capitalize(),
-                                    old_name.split('-'))) +
-                        'Check')
-
-  new_check_name_camel = (''.join(map(lambda elem: elem.capitalize(),
-                                      new_name.split('-'))) +
-                          'Check')
-
-  clang_tidy_path = os.path.dirname(__file__)
-
-  header_guard_variants = [
-      (args.old_check_name.replace('-', '_')).upper() + '_CHECK',
-      (old_module + '_' + check_name_camel).upper(),
-      (old_module + '_' + new_check_name_camel).upper(),
-      args.old_check_name.replace('-', '_').upper()]
-  header_guard_new = (new_module + '_' + new_check_name_camel).upper()
-
-  old_module_path = os.path.join(clang_tidy_path, old_module)
-  new_module_path = os.path.join(clang_tidy_path, new_module)
-
-  if (old_module != new_module):
-    # Remove the check from the old module.
-    cmake_lists = os.path.join(old_module_path, 'CMakeLists.txt')
-    check_found = deleteMatchingLines(cmake_lists, '\\b' + check_name_camel)
-    if not check_found:
-      print("Check name '%s' not found in %s. Exiting." %
-            (check_name_camel, cmake_lists))
-      return 1
-
-    modulecpp = next(iter(filter(
-        lambda p: p.lower() == old_module.lower() + 'tidymodule.cpp',
-        os.listdir(old_module_path))))
-    deleteMatchingLines(os.path.join(old_module_path, modulecpp),
-                      '\\b' + check_name_camel + '|\\b' + args.old_check_name)
-
-  for filename in getListOfFiles(clang_tidy_path):
-    originalName = filename
-    filename = fileRename(filename, old_module + "/" + old_name, new_module + "/" + new_name)
-    filename = fileRename(filename, args.old_check_name, args.new_check_name)
-    filename = fileRename(filename, check_name_camel, new_check_name_camel)
-    replaceInFile(filename, generateCommentLineHeader(originalName),
-                  generateCommentLineHeader(filename))
-    replaceInFile(filename, generateCommentLineSource(originalName),
-                  generateCommentLineSource(filename))
-    for header_guard in header_guard_variants:
-      replaceInFile(filename, header_guard, header_guard_new)
-
-    if new_module + '/'+ new_name + '.rst' in filename:
-      replaceInFile(
-          filename,
-          args.old_check_name + '\n' + '=' * len(args.old_check_name) + '\n',
-          args.new_check_name + '\n' + '=' * len(args.new_check_name) + '\n')
-
-    replaceInFile(filename, args.old_check_name, args.new_check_name)
-    replaceInFile(filename, old_module + '::' + check_name_camel,
-                  new_module + '::' + new_check_name_camel)
-    replaceInFile(filename, old_module + '/' + check_name_camel,
-                  new_module + '/' + new_check_name_camel)
-    replaceInFile(filename, old_module + '/' + old_name,
-                  new_module + '/' + new_name)
-    replaceInFile(filename, check_name_camel, new_check_name_camel)
-
-  if old_module != new_module or new_module == 'llvm':
-    if new_module == 'llvm':
-      new_namespace = new_module + '_check'
+    parser = argparse.ArgumentParser(description="Rename clang-tidy check.")
+    parser.add_argument("old_check_name", type=str, help="Old check name.")
+    parser.add_argument("new_check_name", type=str, help="New check name.")
+    parser.add_argument(
+        "--check_class_name",
+        type=str,
+        help="Old name of the class implementing the check.",
+    )
+    args = parser.parse_args()
+
+    old_module = args.old_check_name.split("-")[0]
+    new_module = args.new_check_name.split("-")[0]
+    old_name = "-".join(args.old_check_name.split("-")[1:])
+    new_name = "-".join(args.new_check_name.split("-")[1:])
+
+    if args.check_class_name:
+        check_name_camel = args.check_class_name
     else:
-      new_namespace = new_module
-    check_implementation_files = glob.glob(
-        os.path.join(old_module_path, new_check_name_camel + '*'))
-    for filename in check_implementation_files:
-      # Move check implementation to the directory of the new module.
-      filename = fileRename(filename, old_module_path, new_module_path)
-      replaceInFileRegex(filename,
-                         'namespace clang::tidy::' + old_module + '[^ \n]*',
-                         'namespace clang::tidy::' + new_namespace)
-
-  if old_module != new_module:
-
-      # Add check to the new module.
-      adapt_cmake(new_module_path, new_check_name_camel)
-      adapt_module(new_module_path, new_module, args.new_check_name,
-                   new_check_name_camel)
-
-  os.system(os.path.join(clang_tidy_path, 'add_new_check.py')
-            + ' --update-docs')
-  add_release_notes(clang_tidy_path, args.old_check_name, args.new_check_name)
-
-
-if __name__ == '__main__':
-  main()
+        check_name_camel = (
+            "".join(map(lambda elem: elem.capitalize(), old_name.split("-"))) + "Check"
+        )
+
+    new_check_name_camel = (
+        "".join(map(lambda elem: elem.capitalize(), new_name.split("-"))) + "Check"
+    )
+
+    clang_tidy_path = os.path.dirname(__file__)
+
+    header_guard_variants = [
+        (args.old_check_name.replace("-", "_")).upper() + "_CHECK",
+        (old_module + "_" + check_name_camel).upper(),
+        (old_module + "_" + new_check_name_camel).upper(),
+        args.old_check_name.replace("-", "_").upper(),
+    ]
+    header_guard_new = (new_module + "_" + new_check_name_camel).upper()
+
+    old_module_path = os.path.join(clang_tidy_path, old_module)
+    new_module_path = os.path.join(clang_tidy_path, new_module)
+
+    if old_module != new_module:
+        # Remove the check from the old module.
+        cmake_lists = os.path.join(old_module_path, "CMakeLists.txt")
+        check_found = deleteMatchingLines(cmake_lists, "\\b" + check_name_camel)
+        if not check_found:
+            print(
+                "Check name '%s' not found in %s. Exiting."
+                % (check_name_camel, cmake_lists)
+            )
+            return 1
+
+        modulecpp = next(
+            iter(
+                filter(
+                    lambda p: p.lower() == old_module.lower() + "tidymodule.cpp",
+                    os.listdir(old_module_path),
+                )
+            )
+        )
+        deleteMatchingLines(
+            os.path.join(old_module_path, modulecpp),
+            "\\b" + check_name_camel + "|\\b" + args.old_check_name,
+        )
+
+    for filename in getListOfFiles(clang_tidy_path):
+        originalName = filename
+        filename = fileRename(
+            filename, old_module + "/" + old_name, new_module + "/" + new_name
+        )
+        filename = fileRename(filename, args.old_check_name, args.new_check_name)
+        filename = fileRename(filename, check_name_camel, new_check_name_camel)
+        replaceInFile(
+            filename,
+            generateCommentLineHeader(originalName),
+            generateCommentLineHeader(filename),
+        )
+        replaceInFile(
+            filename,
+            generateCommentLineSource(originalName),
+            generateCommentLineSource(filename),
+        )
+        for header_guard in header_guard_variants:
+            replaceInFile(filename, header_guard, header_guard_new)
+
+        if new_module + "/" + new_name + ".rst" in filename:
+            replaceInFile(
+                filename,
+                args.old_check_name + "\n" + "=" * len(args.old_check_name) + "\n",
+                args.new_check_name + "\n" + "=" * len(args.new_check_name) + "\n",
+            )
+
+        replaceInFile(filename, args.old_check_name, args.new_check_name)
+        replaceInFile(
+            filename,
+            old_module + "::" + check_name_camel,
+            new_module + "::" + new_check_name_camel,
+        )
+        replaceInFile(
+            filename,
+            old_module + "/" + check_name_camel,
+            new_module + "/" + new_check_name_camel,
+        )
+        replaceInFile(
+            filename, old_module + "/" + old_name, new_module + "/" + new_name
+        )
+        replaceInFile(filename, check_name_camel, new_check_name_camel)
+
+    if old_module != new_module or new_module == "llvm":
+        if new_module == "llvm":
+            new_namespace = new_module + "_check"
+        else:
+            new_namespace = new_module
+        check_implementation_files = glob.glob(
+            os.path.join(old_module_path, new_check_name_camel + "*")
+        )
+        for filename in check_implementation_files:
+            # Move check implementation to the directory of the new module.
+            filename = fileRename(filename, old_module_path, new_module_path)
+            replaceInFileRegex(
+                filename,
+                "namespace clang::tidy::" + old_module + "[^ \n]*",
+                "namespace clang::tidy::" + new_namespace,
+            )
+
+    if old_module != new_module:
+
+        # Add check to the new module.
+        adapt_cmake(new_module_path, new_check_name_camel)
+        adapt_module(
+            new_module_path, new_module, args.new_check_name, new_check_name_camel
+        )
+
+    os.system(os.path.join(clang_tidy_path, "add_new_check.py") + " --update-docs")
+    add_release_notes(clang_tidy_path, args.old_check_name, args.new_check_name)
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/clang-tools-extra/clang-tidy/tool/clang-tidy-
diff .py b/clang-tools-extra/clang-tidy/tool/clang-tidy-
diff .py
index a26d2144b7f97..45c28d963b849 100755
--- a/clang-tools-extra/clang-tidy/tool/clang-tidy-
diff .py
+++ b/clang-tools-extra/clang-tidy/tool/clang-tidy-
diff .py
@@ -1,12 +1,12 @@
 #!/usr/bin/env python
 #
-#===- clang-tidy-
diff .py - ClangTidy Diff Checker -----------*- python -*--===#
+# ===- clang-tidy-
diff .py - ClangTidy Diff Checker -----------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===-----------------------------------------------------------------------===#
+# ===-----------------------------------------------------------------------===#
 
 r"""
 ClangTidy Diff Checker
@@ -37,11 +37,11 @@
 import traceback
 
 try:
-  import yaml
+    import yaml
 except ImportError:
-  yaml = None
+    yaml = None
 
-is_py2 = sys.version[0] == '2'
+is_py2 = sys.version[0] == "2"
 
 if is_py2:
     import Queue as queue
@@ -50,230 +50,271 @@
 
 
 def run_tidy(task_queue, lock, timeout):
-  watchdog = None
-  while True:
-    command = task_queue.get()
-    try:
-      proc = subprocess.Popen(command,
-                              stdout=subprocess.PIPE,
-                              stderr=subprocess.PIPE)
-
-      if timeout is not None:
-        watchdog = threading.Timer(timeout, proc.kill)
-        watchdog.start()
-
-      stdout, stderr = proc.communicate()
-
-      with lock:
-        sys.stdout.write(stdout.decode('utf-8') + '\n')
-        sys.stdout.flush()
-        if stderr:
-          sys.stderr.write(stderr.decode('utf-8') + '\n')
-          sys.stderr.flush()
-    except Exception as e:
-      with lock:
-        sys.stderr.write('Failed: ' + str(e) + ': '.join(command) + '\n')
-    finally:
-      with lock:
-        if not (timeout is None or watchdog is None):
-          if not watchdog.is_alive():
-              sys.stderr.write('Terminated by timeout: ' +
-                               ' '.join(command) + '\n')
-          watchdog.cancel()
-      task_queue.task_done()
+    watchdog = None
+    while True:
+        command = task_queue.get()
+        try:
+            proc = subprocess.Popen(
+                command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
+            )
+
+            if timeout is not None:
+                watchdog = threading.Timer(timeout, proc.kill)
+                watchdog.start()
+
+            stdout, stderr = proc.communicate()
+
+            with lock:
+                sys.stdout.write(stdout.decode("utf-8") + "\n")
+                sys.stdout.flush()
+                if stderr:
+                    sys.stderr.write(stderr.decode("utf-8") + "\n")
+                    sys.stderr.flush()
+        except Exception as e:
+            with lock:
+                sys.stderr.write("Failed: " + str(e) + ": ".join(command) + "\n")
+        finally:
+            with lock:
+                if not (timeout is None or watchdog is None):
+                    if not watchdog.is_alive():
+                        sys.stderr.write(
+                            "Terminated by timeout: " + " ".join(command) + "\n"
+                        )
+                    watchdog.cancel()
+            task_queue.task_done()
 
 
 def start_workers(max_tasks, tidy_caller, task_queue, lock, timeout):
-  for _ in range(max_tasks):
-    t = threading.Thread(target=tidy_caller, args=(task_queue, lock, timeout))
-    t.daemon = True
-    t.start()
+    for _ in range(max_tasks):
+        t = threading.Thread(target=tidy_caller, args=(task_queue, lock, timeout))
+        t.daemon = True
+        t.start()
 
 
 def merge_replacement_files(tmpdir, mergefile):
-  """Merge all replacement files in a directory into a single file"""
-  # The fixes suggested by clang-tidy >= 4.0.0 are given under
-  # the top level key 'Diagnostics' in the output yaml files
-  mergekey = "Diagnostics"
-  merged = []
-  for replacefile in glob.iglob(os.path.join(tmpdir, '*.yaml')):
-    content = yaml.safe_load(open(replacefile, 'r'))
-    if not content:
-      continue # Skip empty files.
-    merged.extend(content.get(mergekey, []))
-
-  if merged:
-    # MainSourceFile: The key is required by the definition inside
-    # include/clang/Tooling/ReplacementsYaml.h, but the value
-    # is actually never used inside clang-apply-replacements,
-    # so we set it to '' here.
-    output = {'MainSourceFile': '', mergekey: merged}
-    with open(mergefile, 'w') as out:
-      yaml.safe_dump(output, out)
-  else:
-    # Empty the file:
-    open(mergefile, 'w').close()
+    """Merge all replacement files in a directory into a single file"""
+    # The fixes suggested by clang-tidy >= 4.0.0 are given under
+    # the top level key 'Diagnostics' in the output yaml files
+    mergekey = "Diagnostics"
+    merged = []
+    for replacefile in glob.iglob(os.path.join(tmpdir, "*.yaml")):
+        content = yaml.safe_load(open(replacefile, "r"))
+        if not content:
+            continue  # Skip empty files.
+        merged.extend(content.get(mergekey, []))
+
+    if merged:
+        # MainSourceFile: The key is required by the definition inside
+        # include/clang/Tooling/ReplacementsYaml.h, but the value
+        # is actually never used inside clang-apply-replacements,
+        # so we set it to '' here.
+        output = {"MainSourceFile": "", mergekey: merged}
+        with open(mergefile, "w") as out:
+            yaml.safe_dump(output, out)
+    else:
+        # Empty the file:
+        open(mergefile, "w").close()
 
 
 def main():
-  parser = argparse.ArgumentParser(description=
-                                   'Run clang-tidy against changed files, and '
-                                   'output diagnostics only for modified '
-                                   'lines.')
-  parser.add_argument('-clang-tidy-binary', metavar='PATH',
-                      default='clang-tidy',
-                      help='path to clang-tidy binary')
-  parser.add_argument('-p', metavar='NUM', default=0,
-                      help='strip the smallest prefix containing P slashes')
-  parser.add_argument('-regex', metavar='PATTERN', default=None,
-                      help='custom pattern selecting file paths to check '
-                      '(case sensitive, overrides -iregex)')
-  parser.add_argument('-iregex', metavar='PATTERN', default=
-                      r'.*\.(cpp|cc|c\+\+|cxx|c|cl|h|hpp|m|mm|inc)',
-                      help='custom pattern selecting file paths to check '
-                      '(case insensitive, overridden by -regex)')
-  parser.add_argument('-j', type=int, default=1,
-                      help='number of tidy instances to be run in parallel.')
-  parser.add_argument('-timeout', type=int, default=None,
-                      help='timeout per each file in seconds.')
-  parser.add_argument('-fix', action='store_true', default=False,
-                      help='apply suggested fixes')
-  parser.add_argument('-checks',
-                      help='checks filter, when not specified, use clang-tidy '
-                      'default',
-                      default='')
-  parser.add_argument('-use-color', action='store_true',
-                      help='Use colors in output')
-  parser.add_argument('-path', dest='build_path',
-                      help='Path used to read a compile command database.')
-  if yaml:
-    parser.add_argument('-export-fixes', metavar='FILE', dest='export_fixes',
-                        help='Create a yaml file to store suggested fixes in, '
-                        'which can be applied with clang-apply-replacements.')
-  parser.add_argument('-extra-arg', dest='extra_arg',
-                      action='append', default=[],
-                      help='Additional argument to append to the compiler '
-                      'command line.')
-  parser.add_argument('-extra-arg-before', dest='extra_arg_before',
-                      action='append', default=[],
-                      help='Additional argument to prepend to the compiler '
-                      'command line.')
-  parser.add_argument('-quiet', action='store_true', default=False,
-                      help='Run clang-tidy in quiet mode')
-  parser.add_argument('-load', dest='plugins',
-                      action='append', default=[],
-                      help='Load the specified plugin in clang-tidy.')
-
-  clang_tidy_args = []
-  argv = sys.argv[1:]
-  if '--' in argv:
-    clang_tidy_args.extend(argv[argv.index('--'):])
-    argv = argv[:argv.index('--')]
-
-  args = parser.parse_args(argv)
-
-  # Extract changed lines for each file.
-  filename = None
-  lines_by_file = {}
-  for line in sys.stdin:
-    match = re.search('^\+\+\+\ \"?(.*?/){%s}([^ \t\n\"]*)' % args.p, line)
-    if match:
-      filename = match.group(2)
-    if filename is None:
-      continue
-
-    if args.regex is not None:
-      if not re.match('^%s$' % args.regex, filename):
-        continue
-    else:
-      if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE):
-        continue
-
-    match = re.search('^@@.*\+(\d+)(,(\d+))?', line)
-    if match:
-      start_line = int(match.group(1))
-      line_count = 1
-      if match.group(3):
-        line_count = int(match.group(3))
-      if line_count == 0:
-        continue
-      end_line = start_line + line_count - 1
-      lines_by_file.setdefault(filename, []).append([start_line, end_line])
-
-  if not any(lines_by_file):
-    print("No relevant changes found.")
-    sys.exit(0)
-
-  max_task_count = args.j
-  if max_task_count == 0:
-      max_task_count = multiprocessing.cpu_count()
-  max_task_count = min(len(lines_by_file), max_task_count)
-
-  tmpdir = None
-  if yaml and args.export_fixes:
-    tmpdir = tempfile.mkdtemp()
-
-  # Tasks for clang-tidy.
-  task_queue = queue.Queue(max_task_count)
-  # A lock for console output.
-  lock = threading.Lock()
-
-  # Run a pool of clang-tidy workers.
-  start_workers(max_task_count, run_tidy, task_queue, lock, args.timeout)
-
-  # Form the common args list.
-  common_clang_tidy_args = []
-  if args.fix:
-    common_clang_tidy_args.append('-fix')
-  if args.checks != '':
-    common_clang_tidy_args.append('-checks=' + args.checks)
-  if args.quiet:
-    common_clang_tidy_args.append('-quiet')
-  if args.build_path is not None:
-    common_clang_tidy_args.append('-p=%s' % args.build_path)
-  if args.use_color:
-    common_clang_tidy_args.append('--use-color')
-  for arg in args.extra_arg:
-    common_clang_tidy_args.append('-extra-arg=%s' % arg)
-  for arg in args.extra_arg_before:
-    common_clang_tidy_args.append('-extra-arg-before=%s' % arg)
-  for plugin in args.plugins:
-    common_clang_tidy_args.append('-load=%s' % plugin)
-
-  for name in lines_by_file:
-    line_filter_json = json.dumps(
-      [{"name": name, "lines": lines_by_file[name]}],
-      separators=(',', ':'))
-
-    # Run clang-tidy on files containing changes.
-    command = [args.clang_tidy_binary]
-    command.append('-line-filter=' + line_filter_json)
+    parser = argparse.ArgumentParser(
+        description="Run clang-tidy against changed files, and "
+        "output diagnostics only for modified "
+        "lines."
+    )
+    parser.add_argument(
+        "-clang-tidy-binary",
+        metavar="PATH",
+        default="clang-tidy",
+        help="path to clang-tidy binary",
+    )
+    parser.add_argument(
+        "-p",
+        metavar="NUM",
+        default=0,
+        help="strip the smallest prefix containing P slashes",
+    )
+    parser.add_argument(
+        "-regex",
+        metavar="PATTERN",
+        default=None,
+        help="custom pattern selecting file paths to check "
+        "(case sensitive, overrides -iregex)",
+    )
+    parser.add_argument(
+        "-iregex",
+        metavar="PATTERN",
+        default=r".*\.(cpp|cc|c\+\+|cxx|c|cl|h|hpp|m|mm|inc)",
+        help="custom pattern selecting file paths to check "
+        "(case insensitive, overridden by -regex)",
+    )
+    parser.add_argument(
+        "-j",
+        type=int,
+        default=1,
+        help="number of tidy instances to be run in parallel.",
+    )
+    parser.add_argument(
+        "-timeout", type=int, default=None, help="timeout per each file in seconds."
+    )
+    parser.add_argument(
+        "-fix", action="store_true", default=False, help="apply suggested fixes"
+    )
+    parser.add_argument(
+        "-checks",
+        help="checks filter, when not specified, use clang-tidy " "default",
+        default="",
+    )
+    parser.add_argument("-use-color", action="store_true", help="Use colors in output")
+    parser.add_argument(
+        "-path", dest="build_path", help="Path used to read a compile command database."
+    )
+    if yaml:
+        parser.add_argument(
+            "-export-fixes",
+            metavar="FILE",
+            dest="export_fixes",
+            help="Create a yaml file to store suggested fixes in, "
+            "which can be applied with clang-apply-replacements.",
+        )
+    parser.add_argument(
+        "-extra-arg",
+        dest="extra_arg",
+        action="append",
+        default=[],
+        help="Additional argument to append to the compiler " "command line.",
+    )
+    parser.add_argument(
+        "-extra-arg-before",
+        dest="extra_arg_before",
+        action="append",
+        default=[],
+        help="Additional argument to prepend to the compiler " "command line.",
+    )
+    parser.add_argument(
+        "-quiet",
+        action="store_true",
+        default=False,
+        help="Run clang-tidy in quiet mode",
+    )
+    parser.add_argument(
+        "-load",
+        dest="plugins",
+        action="append",
+        default=[],
+        help="Load the specified plugin in clang-tidy.",
+    )
+
+    clang_tidy_args = []
+    argv = sys.argv[1:]
+    if "--" in argv:
+        clang_tidy_args.extend(argv[argv.index("--") :])
+        argv = argv[: argv.index("--")]
+
+    args = parser.parse_args(argv)
+
+    # Extract changed lines for each file.
+    filename = None
+    lines_by_file = {}
+    for line in sys.stdin:
+        match = re.search('^\+\+\+\ "?(.*?/){%s}([^ \t\n"]*)' % args.p, line)
+        if match:
+            filename = match.group(2)
+        if filename is None:
+            continue
+
+        if args.regex is not None:
+            if not re.match("^%s$" % args.regex, filename):
+                continue
+        else:
+            if not re.match("^%s$" % args.iregex, filename, re.IGNORECASE):
+                continue
+
+        match = re.search("^@@.*\+(\d+)(,(\d+))?", line)
+        if match:
+            start_line = int(match.group(1))
+            line_count = 1
+            if match.group(3):
+                line_count = int(match.group(3))
+            if line_count == 0:
+                continue
+            end_line = start_line + line_count - 1
+            lines_by_file.setdefault(filename, []).append([start_line, end_line])
+
+    if not any(lines_by_file):
+        print("No relevant changes found.")
+        sys.exit(0)
+
+    max_task_count = args.j
+    if max_task_count == 0:
+        max_task_count = multiprocessing.cpu_count()
+    max_task_count = min(len(lines_by_file), max_task_count)
+
+    tmpdir = None
+    if yaml and args.export_fixes:
+        tmpdir = tempfile.mkdtemp()
+
+    # Tasks for clang-tidy.
+    task_queue = queue.Queue(max_task_count)
+    # A lock for console output.
+    lock = threading.Lock()
+
+    # Run a pool of clang-tidy workers.
+    start_workers(max_task_count, run_tidy, task_queue, lock, args.timeout)
+
+    # Form the common args list.
+    common_clang_tidy_args = []
+    if args.fix:
+        common_clang_tidy_args.append("-fix")
+    if args.checks != "":
+        common_clang_tidy_args.append("-checks=" + args.checks)
+    if args.quiet:
+        common_clang_tidy_args.append("-quiet")
+    if args.build_path is not None:
+        common_clang_tidy_args.append("-p=%s" % args.build_path)
+    if args.use_color:
+        common_clang_tidy_args.append("--use-color")
+    for arg in args.extra_arg:
+        common_clang_tidy_args.append("-extra-arg=%s" % arg)
+    for arg in args.extra_arg_before:
+        common_clang_tidy_args.append("-extra-arg-before=%s" % arg)
+    for plugin in args.plugins:
+        common_clang_tidy_args.append("-load=%s" % plugin)
+
+    for name in lines_by_file:
+        line_filter_json = json.dumps(
+            [{"name": name, "lines": lines_by_file[name]}], separators=(",", ":")
+        )
+
+        # Run clang-tidy on files containing changes.
+        command = [args.clang_tidy_binary]
+        command.append("-line-filter=" + line_filter_json)
+        if yaml and args.export_fixes:
+            # Get a temporary file. We immediately close the handle so clang-tidy can
+            # overwrite it.
+            (handle, tmp_name) = tempfile.mkstemp(suffix=".yaml", dir=tmpdir)
+            os.close(handle)
+            command.append("-export-fixes=" + tmp_name)
+        command.extend(common_clang_tidy_args)
+        command.append(name)
+        command.extend(clang_tidy_args)
+
+        task_queue.put(command)
+
+    # Wait for all threads to be done.
+    task_queue.join()
+
     if yaml and args.export_fixes:
-      # Get a temporary file. We immediately close the handle so clang-tidy can
-      # overwrite it.
-      (handle, tmp_name) = tempfile.mkstemp(suffix='.yaml', dir=tmpdir)
-      os.close(handle)
-      command.append('-export-fixes=' + tmp_name)
-    command.extend(common_clang_tidy_args)
-    command.append(name)
-    command.extend(clang_tidy_args)
-
-    task_queue.put(command)
-
-  # Wait for all threads to be done.
-  task_queue.join()
-
-  if yaml and args.export_fixes:
-    print('Writing fixes to ' + args.export_fixes + ' ...')
-    try:
-      merge_replacement_files(tmpdir, args.export_fixes)
-    except:
-      sys.stderr.write('Error exporting fixes.\n')
-      traceback.print_exc()
-
-  if tmpdir:
-    shutil.rmtree(tmpdir)
-
-
-if __name__ == '__main__':
-  main()
+        print("Writing fixes to " + args.export_fixes + " ...")
+        try:
+            merge_replacement_files(tmpdir, args.export_fixes)
+        except:
+            sys.stderr.write("Error exporting fixes.\n")
+            traceback.print_exc()
+
+    if tmpdir:
+        shutil.rmtree(tmpdir)
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/clang-tools-extra/clang-tidy/tool/run-clang-tidy.py b/clang-tools-extra/clang-tidy/tool/run-clang-tidy.py
index 30e29182908d7..53dc851149fd7 100755
--- a/clang-tools-extra/clang-tidy/tool/run-clang-tidy.py
+++ b/clang-tools-extra/clang-tidy/tool/run-clang-tidy.py
@@ -1,12 +1,12 @@
 #!/usr/bin/env python3
 #
-#===- run-clang-tidy.py - Parallel clang-tidy runner --------*- python -*--===#
+# ===- run-clang-tidy.py - Parallel clang-tidy runner --------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===-----------------------------------------------------------------------===#
+# ===-----------------------------------------------------------------------===#
 # FIXME: Integrate with clang-tidy-
diff .py
 
 
@@ -51,347 +51,443 @@
 import traceback
 
 try:
-  import yaml
+    import yaml
 except ImportError:
-  yaml = None
+    yaml = None
 
 
 def strtobool(val):
-  """Convert a string representation of truth to a bool following LLVM's CLI argument parsing."""
+    """Convert a string representation of truth to a bool following LLVM's CLI argument parsing."""
 
-  val = val.lower()
-  if val in ['', 'true', '1']:
-    return True
-  elif val in ['false', '0']:
-    return False
+    val = val.lower()
+    if val in ["", "true", "1"]:
+        return True
+    elif val in ["false", "0"]:
+        return False
 
-  # Return ArgumentTypeError so that argparse does not substitute its own error message
-  raise argparse.ArgumentTypeError(
-    "'{}' is invalid value for boolean argument! Try 0 or 1.".format(val)
-  )
+    # Return ArgumentTypeError so that argparse does not substitute its own error message
+    raise argparse.ArgumentTypeError(
+        "'{}' is invalid value for boolean argument! Try 0 or 1.".format(val)
+    )
 
 
 def find_compilation_database(path):
-  """Adjusts the directory until a compilation database is found."""
-  result = os.path.realpath('./')
-  while not os.path.isfile(os.path.join(result, path)):
-    parent = os.path.dirname(result)
-    if result == parent:
-      print('Error: could not find compilation database.')
-      sys.exit(1)
-    result = parent
-  return result
+    """Adjusts the directory until a compilation database is found."""
+    result = os.path.realpath("./")
+    while not os.path.isfile(os.path.join(result, path)):
+        parent = os.path.dirname(result)
+        if result == parent:
+            print("Error: could not find compilation database.")
+            sys.exit(1)
+        result = parent
+    return result
 
 
 def make_absolute(f, directory):
-  if os.path.isabs(f):
-    return f
-  return os.path.normpath(os.path.join(directory, f))
-
-
-def get_tidy_invocation(f, clang_tidy_binary, checks, tmpdir, build_path,
-                        header_filter, allow_enabling_alpha_checkers,
-                        extra_arg, extra_arg_before, quiet, config_file_path,
-                        config, line_filter, use_color, plugins):
-  """Gets a command line for clang-tidy."""
-  start = [clang_tidy_binary]
-  if allow_enabling_alpha_checkers:
-    start.append('-allow-enabling-analyzer-alpha-checkers')
-  if header_filter is not None:
-    start.append('-header-filter=' + header_filter)
-  if line_filter is not None:
-    start.append('-line-filter=' + line_filter)
-  if use_color is not None:
-    if use_color:
-      start.append('--use-color')
-    else:
-      start.append('--use-color=false')
-  if checks:
-    start.append('-checks=' + checks)
-  if tmpdir is not None:
-    start.append('-export-fixes')
-    # Get a temporary file. We immediately close the handle so clang-tidy can
-    # overwrite it.
-    (handle, name) = tempfile.mkstemp(suffix='.yaml', dir=tmpdir)
-    os.close(handle)
-    start.append(name)
-  for arg in extra_arg:
-      start.append('-extra-arg=%s' % arg)
-  for arg in extra_arg_before:
-      start.append('-extra-arg-before=%s' % arg)
-  start.append('-p=' + build_path)
-  if quiet:
-      start.append('-quiet')
-  if config_file_path:
-      start.append('--config-file=' + config_file_path)
-  elif config:
-      start.append('-config=' + config)
-  for plugin in plugins:
-      start.append('-load=' + plugin)
-  start.append(f)
-  return start
+    if os.path.isabs(f):
+        return f
+    return os.path.normpath(os.path.join(directory, f))
+
+
+def get_tidy_invocation(
+    f,
+    clang_tidy_binary,
+    checks,
+    tmpdir,
+    build_path,
+    header_filter,
+    allow_enabling_alpha_checkers,
+    extra_arg,
+    extra_arg_before,
+    quiet,
+    config_file_path,
+    config,
+    line_filter,
+    use_color,
+    plugins,
+):
+    """Gets a command line for clang-tidy."""
+    start = [clang_tidy_binary]
+    if allow_enabling_alpha_checkers:
+        start.append("-allow-enabling-analyzer-alpha-checkers")
+    if header_filter is not None:
+        start.append("-header-filter=" + header_filter)
+    if line_filter is not None:
+        start.append("-line-filter=" + line_filter)
+    if use_color is not None:
+        if use_color:
+            start.append("--use-color")
+        else:
+            start.append("--use-color=false")
+    if checks:
+        start.append("-checks=" + checks)
+    if tmpdir is not None:
+        start.append("-export-fixes")
+        # Get a temporary file. We immediately close the handle so clang-tidy can
+        # overwrite it.
+        (handle, name) = tempfile.mkstemp(suffix=".yaml", dir=tmpdir)
+        os.close(handle)
+        start.append(name)
+    for arg in extra_arg:
+        start.append("-extra-arg=%s" % arg)
+    for arg in extra_arg_before:
+        start.append("-extra-arg-before=%s" % arg)
+    start.append("-p=" + build_path)
+    if quiet:
+        start.append("-quiet")
+    if config_file_path:
+        start.append("--config-file=" + config_file_path)
+    elif config:
+        start.append("-config=" + config)
+    for plugin in plugins:
+        start.append("-load=" + plugin)
+    start.append(f)
+    return start
 
 
 def merge_replacement_files(tmpdir, mergefile):
-  """Merge all replacement files in a directory into a single file"""
-  # The fixes suggested by clang-tidy >= 4.0.0 are given under
-  # the top level key 'Diagnostics' in the output yaml files
-  mergekey = "Diagnostics"
-  merged=[]
-  for replacefile in glob.iglob(os.path.join(tmpdir, '*.yaml')):
-    content = yaml.safe_load(open(replacefile, 'r'))
-    if not content:
-      continue # Skip empty files.
-    merged.extend(content.get(mergekey, []))
-
-  if merged:
-    # MainSourceFile: The key is required by the definition inside
-    # include/clang/Tooling/ReplacementsYaml.h, but the value
-    # is actually never used inside clang-apply-replacements,
-    # so we set it to '' here.
-    output = {'MainSourceFile': '', mergekey: merged}
-    with open(mergefile, 'w') as out:
-      yaml.safe_dump(output, out)
-  else:
-    # Empty the file:
-    open(mergefile, 'w').close()
+    """Merge all replacement files in a directory into a single file"""
+    # The fixes suggested by clang-tidy >= 4.0.0 are given under
+    # the top level key 'Diagnostics' in the output yaml files
+    mergekey = "Diagnostics"
+    merged = []
+    for replacefile in glob.iglob(os.path.join(tmpdir, "*.yaml")):
+        content = yaml.safe_load(open(replacefile, "r"))
+        if not content:
+            continue  # Skip empty files.
+        merged.extend(content.get(mergekey, []))
+
+    if merged:
+        # MainSourceFile: The key is required by the definition inside
+        # include/clang/Tooling/ReplacementsYaml.h, but the value
+        # is actually never used inside clang-apply-replacements,
+        # so we set it to '' here.
+        output = {"MainSourceFile": "", mergekey: merged}
+        with open(mergefile, "w") as out:
+            yaml.safe_dump(output, out)
+    else:
+        # Empty the file:
+        open(mergefile, "w").close()
 
 
 def find_binary(arg, name, build_path):
-  """Get the path for a binary or exit"""
-  if arg:
-    if shutil.which(arg):
-      return arg
+    """Get the path for a binary or exit"""
+    if arg:
+        if shutil.which(arg):
+            return arg
+        else:
+            raise SystemExit(
+                "error: passed binary '{}' was not found or is not executable".format(
+                    arg
+                )
+            )
+
+    built_path = os.path.join(build_path, "bin", name)
+    binary = shutil.which(name) or shutil.which(built_path)
+    if binary:
+        return binary
     else:
-      raise SystemExit(
-        "error: passed binary '{}' was not found or is not executable"
-        .format(arg))
-
-  built_path = os.path.join(build_path, "bin", name)
-  binary = shutil.which(name) or shutil.which(built_path)
-  if binary:
-    return binary
-  else:
-    raise SystemExit(
-      "error: failed to find {} in $PATH or at {}"
-      .format(name, built_path))
+        raise SystemExit(
+            "error: failed to find {} in $PATH or at {}".format(name, built_path)
+        )
 
 
 def apply_fixes(args, clang_apply_replacements_binary, tmpdir):
-  """Calls clang-apply-fixes on a given directory."""
-  invocation = [clang_apply_replacements_binary]
-  invocation.append('-ignore-insert-conflict')
-  if args.format:
-    invocation.append('-format')
-  if args.style:
-    invocation.append('-style=' + args.style)
-  invocation.append(tmpdir)
-  subprocess.call(invocation)
-
-
-def run_tidy(args, clang_tidy_binary, tmpdir, build_path, queue, lock,
-             failed_files):
-  """Takes filenames out of queue and runs clang-tidy on them."""
-  while True:
-    name = queue.get()
-    invocation = get_tidy_invocation(name, clang_tidy_binary, args.checks,
-                                     tmpdir, build_path, args.header_filter,
-                                     args.allow_enabling_alpha_checkers,
-                                     args.extra_arg, args.extra_arg_before,
-                                     args.quiet, args.config_file, args.config,
-                                     args.line_filter, args.use_color,
-                                     args.plugins)
-
-    proc = subprocess.Popen(invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    output, err = proc.communicate()
-    if proc.returncode != 0:
-      if proc.returncode < 0:
-        msg = "%s: terminated by signal %d\n" % (name, -proc.returncode)
-        err += msg.encode('utf-8')
-      failed_files.append(name)
-    with lock:
-      sys.stdout.write(' '.join(invocation) + '\n' + output.decode('utf-8'))
-      if len(err) > 0:
-        sys.stdout.flush()
-        sys.stderr.write(err.decode('utf-8'))
-    queue.task_done()
+    """Calls clang-apply-fixes on a given directory."""
+    invocation = [clang_apply_replacements_binary]
+    invocation.append("-ignore-insert-conflict")
+    if args.format:
+        invocation.append("-format")
+    if args.style:
+        invocation.append("-style=" + args.style)
+    invocation.append(tmpdir)
+    subprocess.call(invocation)
+
+
+def run_tidy(args, clang_tidy_binary, tmpdir, build_path, queue, lock, failed_files):
+    """Takes filenames out of queue and runs clang-tidy on them."""
+    while True:
+        name = queue.get()
+        invocation = get_tidy_invocation(
+            name,
+            clang_tidy_binary,
+            args.checks,
+            tmpdir,
+            build_path,
+            args.header_filter,
+            args.allow_enabling_alpha_checkers,
+            args.extra_arg,
+            args.extra_arg_before,
+            args.quiet,
+            args.config_file,
+            args.config,
+            args.line_filter,
+            args.use_color,
+            args.plugins,
+        )
+
+        proc = subprocess.Popen(
+            invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE
+        )
+        output, err = proc.communicate()
+        if proc.returncode != 0:
+            if proc.returncode < 0:
+                msg = "%s: terminated by signal %d\n" % (name, -proc.returncode)
+                err += msg.encode("utf-8")
+            failed_files.append(name)
+        with lock:
+            sys.stdout.write(" ".join(invocation) + "\n" + output.decode("utf-8"))
+            if len(err) > 0:
+                sys.stdout.flush()
+                sys.stderr.write(err.decode("utf-8"))
+        queue.task_done()
 
 
 def main():
-  parser = argparse.ArgumentParser(description='Runs clang-tidy over all files '
-                                   'in a compilation database. Requires '
-                                   'clang-tidy and clang-apply-replacements in '
-                                   '$PATH or in your build directory.')
-  parser.add_argument('-allow-enabling-alpha-checkers',
-                      action='store_true', help='allow alpha checkers from '
-                                                'clang-analyzer.')
-  parser.add_argument('-clang-tidy-binary', metavar='PATH',
-                      help='path to clang-tidy binary')
-  parser.add_argument('-clang-apply-replacements-binary', metavar='PATH',
-                      help='path to clang-apply-replacements binary')
-  parser.add_argument('-checks', default=None,
-                      help='checks filter, when not specified, use clang-tidy '
-                      'default')
-  config_group = parser.add_mutually_exclusive_group()
-  config_group.add_argument('-config', default=None,
-                      help='Specifies a configuration in YAML/JSON format: '
-                      '  -config="{Checks: \'*\', '
-                      '                       CheckOptions: {x: y}}" '
-                      'When the value is empty, clang-tidy will '
-                      'attempt to find a file named .clang-tidy for '
-                      'each source file in its parent directories.')
-  config_group.add_argument('-config-file', default=None,
-                      help='Specify the path of .clang-tidy or custom config '
-                      'file: e.g. -config-file=/some/path/myTidyConfigFile. '
-                      'This option internally works exactly the same way as '
-                      '-config option after reading specified config file. '
-                      'Use either -config-file or -config, not both.')
-  parser.add_argument('-header-filter', default=None,
-                      help='regular expression matching the names of the '
-                      'headers to output diagnostics from. Diagnostics from '
-                      'the main file of each translation unit are always '
-                      'displayed.')
-  parser.add_argument('-line-filter', default=None,
-                      help='List of files with line ranges to filter the'
-                      'warnings.')
-  if yaml:
-    parser.add_argument('-export-fixes', metavar='filename', dest='export_fixes',
-                        help='Create a yaml file to store suggested fixes in, '
-                        'which can be applied with clang-apply-replacements.')
-  parser.add_argument('-j', type=int, default=0,
-                      help='number of tidy instances to be run in parallel.')
-  parser.add_argument('files', nargs='*', default=['.*'],
-                      help='files to be processed (regex on path)')
-  parser.add_argument('-fix', action='store_true', help='apply fix-its')
-  parser.add_argument('-format', action='store_true', help='Reformat code '
-                      'after applying fixes')
-  parser.add_argument('-style', default='file', help='The style of reformat '
-                      'code after applying fixes')
-  parser.add_argument('-use-color', type=strtobool, nargs='?', const=True,
-                      help='Use colors in diagnostics, overriding clang-tidy\'s'
-                      ' default behavior. This option overrides the \'UseColor'
-                      '\' option in .clang-tidy file, if any.')
-  parser.add_argument('-p', dest='build_path',
-                      help='Path used to read a compile command database.')
-  parser.add_argument('-extra-arg', dest='extra_arg',
-                      action='append', default=[],
-                      help='Additional argument to append to the compiler '
-                      'command line.')
-  parser.add_argument('-extra-arg-before', dest='extra_arg_before',
-                      action='append', default=[],
-                      help='Additional argument to prepend to the compiler '
-                      'command line.')
-  parser.add_argument('-quiet', action='store_true',
-                      help='Run clang-tidy in quiet mode')
-  parser.add_argument('-load', dest='plugins',
-                      action='append', default=[],
-                      help='Load the specified plugin in clang-tidy.')
-  args = parser.parse_args()
-
-  db_path = 'compile_commands.json'
-
-  if args.build_path is not None:
-    build_path = args.build_path
-  else:
-    # Find our database
-    build_path = find_compilation_database(db_path)
-
-  clang_tidy_binary = find_binary(args.clang_tidy_binary, "clang-tidy",
-                                  build_path)
-
-  tmpdir = None
-  if args.fix:
-    clang_apply_replacements_binary = find_binary(
-      args.clang_apply_replacements_binary, "clang-apply-replacements",
-      build_path)
-    tmpdir = tempfile.mkdtemp()
-
-  try:
-    invocation = get_tidy_invocation("", clang_tidy_binary, args.checks,
-                                     None, build_path, args.header_filter,
-                                     args.allow_enabling_alpha_checkers,
-                                     args.extra_arg, args.extra_arg_before,
-                                     args.quiet, args.config_file, args.config,
-                                     args.line_filter, args.use_color,
-                                     args.plugins)
-    invocation.append('-list-checks')
-    invocation.append('-')
-    if args.quiet:
-      # Even with -quiet we still want to check if we can call clang-tidy.
-      with open(os.devnull, 'w') as dev_null:
-        subprocess.check_call(invocation, stdout=dev_null)
+    parser = argparse.ArgumentParser(
+        description="Runs clang-tidy over all files "
+        "in a compilation database. Requires "
+        "clang-tidy and clang-apply-replacements in "
+        "$PATH or in your build directory."
+    )
+    parser.add_argument(
+        "-allow-enabling-alpha-checkers",
+        action="store_true",
+        help="allow alpha checkers from " "clang-analyzer.",
+    )
+    parser.add_argument(
+        "-clang-tidy-binary", metavar="PATH", help="path to clang-tidy binary"
+    )
+    parser.add_argument(
+        "-clang-apply-replacements-binary",
+        metavar="PATH",
+        help="path to clang-apply-replacements binary",
+    )
+    parser.add_argument(
+        "-checks",
+        default=None,
+        help="checks filter, when not specified, use clang-tidy " "default",
+    )
+    config_group = parser.add_mutually_exclusive_group()
+    config_group.add_argument(
+        "-config",
+        default=None,
+        help="Specifies a configuration in YAML/JSON format: "
+        "  -config=\"{Checks: '*', "
+        '                       CheckOptions: {x: y}}" '
+        "When the value is empty, clang-tidy will "
+        "attempt to find a file named .clang-tidy for "
+        "each source file in its parent directories.",
+    )
+    config_group.add_argument(
+        "-config-file",
+        default=None,
+        help="Specify the path of .clang-tidy or custom config "
+        "file: e.g. -config-file=/some/path/myTidyConfigFile. "
+        "This option internally works exactly the same way as "
+        "-config option after reading specified config file. "
+        "Use either -config-file or -config, not both.",
+    )
+    parser.add_argument(
+        "-header-filter",
+        default=None,
+        help="regular expression matching the names of the "
+        "headers to output diagnostics from. Diagnostics from "
+        "the main file of each translation unit are always "
+        "displayed.",
+    )
+    parser.add_argument(
+        "-line-filter",
+        default=None,
+        help="List of files with line ranges to filter the" "warnings.",
+    )
+    if yaml:
+        parser.add_argument(
+            "-export-fixes",
+            metavar="filename",
+            dest="export_fixes",
+            help="Create a yaml file to store suggested fixes in, "
+            "which can be applied with clang-apply-replacements.",
+        )
+    parser.add_argument(
+        "-j",
+        type=int,
+        default=0,
+        help="number of tidy instances to be run in parallel.",
+    )
+    parser.add_argument(
+        "files", nargs="*", default=[".*"], help="files to be processed (regex on path)"
+    )
+    parser.add_argument("-fix", action="store_true", help="apply fix-its")
+    parser.add_argument(
+        "-format", action="store_true", help="Reformat code " "after applying fixes"
+    )
+    parser.add_argument(
+        "-style",
+        default="file",
+        help="The style of reformat " "code after applying fixes",
+    )
+    parser.add_argument(
+        "-use-color",
+        type=strtobool,
+        nargs="?",
+        const=True,
+        help="Use colors in diagnostics, overriding clang-tidy's"
+        " default behavior. This option overrides the 'UseColor"
+        "' option in .clang-tidy file, if any.",
+    )
+    parser.add_argument(
+        "-p", dest="build_path", help="Path used to read a compile command database."
+    )
+    parser.add_argument(
+        "-extra-arg",
+        dest="extra_arg",
+        action="append",
+        default=[],
+        help="Additional argument to append to the compiler " "command line.",
+    )
+    parser.add_argument(
+        "-extra-arg-before",
+        dest="extra_arg_before",
+        action="append",
+        default=[],
+        help="Additional argument to prepend to the compiler " "command line.",
+    )
+    parser.add_argument(
+        "-quiet", action="store_true", help="Run clang-tidy in quiet mode"
+    )
+    parser.add_argument(
+        "-load",
+        dest="plugins",
+        action="append",
+        default=[],
+        help="Load the specified plugin in clang-tidy.",
+    )
+    args = parser.parse_args()
+
+    db_path = "compile_commands.json"
+
+    if args.build_path is not None:
+        build_path = args.build_path
     else:
-      subprocess.check_call(invocation)
-  except:
-    print("Unable to run clang-tidy.", file=sys.stderr)
-    sys.exit(1)
-
-  # Load the database and extract all files.
-  database = json.load(open(os.path.join(build_path, db_path)))
-  files = set([make_absolute(entry['file'], entry['directory'])
-           for entry in database])
-
-  max_task = args.j
-  if max_task == 0:
-    max_task = multiprocessing.cpu_count()
-
-  # Build up a big regexy filter from all command line arguments.
-  file_name_re = re.compile('|'.join(args.files))
-
-  return_code = 0
-  try:
-    # Spin up a bunch of tidy-launching threads.
-    task_queue = queue.Queue(max_task)
-    # List of files with a non-zero return code.
-    failed_files = []
-    lock = threading.Lock()
-    for _ in range(max_task):
-      t = threading.Thread(target=run_tidy,
-                           args=(args, clang_tidy_binary, tmpdir, build_path,
-                                 task_queue, lock, failed_files))
-      t.daemon = True
-      t.start()
-
-    # Fill the queue with files.
-    for name in files:
-      if file_name_re.search(name):
-        task_queue.put(name)
-
-    # Wait for all threads to be done.
-    task_queue.join()
-    if len(failed_files):
-      return_code = 1
-
-  except KeyboardInterrupt:
-    # This is a sad hack. Unfortunately subprocess goes
-    # bonkers with ctrl-c and we start forking merrily.
-    print('\nCtrl-C detected, goodbye.')
-    if tmpdir:
-      shutil.rmtree(tmpdir)
-    os.kill(0, 9)
+        # Find our database
+        build_path = find_compilation_database(db_path)
+
+    clang_tidy_binary = find_binary(args.clang_tidy_binary, "clang-tidy", build_path)
+
+    tmpdir = None
+    if args.fix:
+        clang_apply_replacements_binary = find_binary(
+            args.clang_apply_replacements_binary, "clang-apply-replacements", build_path
+        )
+        tmpdir = tempfile.mkdtemp()
 
-  if yaml and args.export_fixes:
-    print('Writing fixes to ' + args.export_fixes + ' ...')
     try:
-      merge_replacement_files(tmpdir, args.export_fixes)
+        invocation = get_tidy_invocation(
+            "",
+            clang_tidy_binary,
+            args.checks,
+            None,
+            build_path,
+            args.header_filter,
+            args.allow_enabling_alpha_checkers,
+            args.extra_arg,
+            args.extra_arg_before,
+            args.quiet,
+            args.config_file,
+            args.config,
+            args.line_filter,
+            args.use_color,
+            args.plugins,
+        )
+        invocation.append("-list-checks")
+        invocation.append("-")
+        if args.quiet:
+            # Even with -quiet we still want to check if we can call clang-tidy.
+            with open(os.devnull, "w") as dev_null:
+                subprocess.check_call(invocation, stdout=dev_null)
+        else:
+            subprocess.check_call(invocation)
     except:
-      print('Error exporting fixes.\n', file=sys.stderr)
-      traceback.print_exc()
-      return_code=1
+        print("Unable to run clang-tidy.", file=sys.stderr)
+        sys.exit(1)
 
-  if args.fix:
-    print('Applying fixes ...')
+    # Load the database and extract all files.
+    database = json.load(open(os.path.join(build_path, db_path)))
+    files = set(
+        [make_absolute(entry["file"], entry["directory"]) for entry in database]
+    )
+
+    max_task = args.j
+    if max_task == 0:
+        max_task = multiprocessing.cpu_count()
+
+    # Build up a big regexy filter from all command line arguments.
+    file_name_re = re.compile("|".join(args.files))
+
+    return_code = 0
     try:
-      apply_fixes(args, clang_apply_replacements_binary, tmpdir)
-    except:
-      print('Error applying fixes.\n', file=sys.stderr)
-      traceback.print_exc()
-      return_code = 1
+        # Spin up a bunch of tidy-launching threads.
+        task_queue = queue.Queue(max_task)
+        # List of files with a non-zero return code.
+        failed_files = []
+        lock = threading.Lock()
+        for _ in range(max_task):
+            t = threading.Thread(
+                target=run_tidy,
+                args=(
+                    args,
+                    clang_tidy_binary,
+                    tmpdir,
+                    build_path,
+                    task_queue,
+                    lock,
+                    failed_files,
+                ),
+            )
+            t.daemon = True
+            t.start()
+
+        # Fill the queue with files.
+        for name in files:
+            if file_name_re.search(name):
+                task_queue.put(name)
+
+        # Wait for all threads to be done.
+        task_queue.join()
+        if len(failed_files):
+            return_code = 1
+
+    except KeyboardInterrupt:
+        # This is a sad hack. Unfortunately subprocess goes
+        # bonkers with ctrl-c and we start forking merrily.
+        print("\nCtrl-C detected, goodbye.")
+        if tmpdir:
+            shutil.rmtree(tmpdir)
+        os.kill(0, 9)
+
+    if yaml and args.export_fixes:
+        print("Writing fixes to " + args.export_fixes + " ...")
+        try:
+            merge_replacement_files(tmpdir, args.export_fixes)
+        except:
+            print("Error exporting fixes.\n", file=sys.stderr)
+            traceback.print_exc()
+            return_code = 1
+
+    if args.fix:
+        print("Applying fixes ...")
+        try:
+            apply_fixes(args, clang_apply_replacements_binary, tmpdir)
+        except:
+            print("Error applying fixes.\n", file=sys.stderr)
+            traceback.print_exc()
+            return_code = 1
 
-  if tmpdir:
-    shutil.rmtree(tmpdir)
-  sys.exit(return_code)
+    if tmpdir:
+        shutil.rmtree(tmpdir)
+    sys.exit(return_code)
 
 
-if __name__ == '__main__':
-  main()
+if __name__ == "__main__":
+    main()

diff  --git a/clang-tools-extra/clangd/TidyFastChecks.py b/clang-tools-extra/clangd/TidyFastChecks.py
index 2132a960793da..b544551e6e041 100755
--- a/clang-tools-extra/clangd/TidyFastChecks.py
+++ b/clang-tools-extra/clangd/TidyFastChecks.py
@@ -21,59 +21,78 @@
 # Checks faster than FAST_THRESHOLD are fast, slower than SLOW_THRESHOLD slow.
 # If a check is in between, we stick with our previous decision. This avoids
 # enabling/disabling checks between releases due to random measurement jitter.
-FAST_THRESHOLD = 8 # percent
+FAST_THRESHOLD = 8  # percent
 SLOW_THRESHOLD = 15
 
 parser = argparse.ArgumentParser()
-parser.add_argument('--target', help='X-macro output file. '
-    'If it exists, existing contents will be used for hysteresis',
-    default='clang-tools-extra/clangd/TidyFastChecks.inc')
-parser.add_argument('--source', help='Source file to benchmark tidy checks',
-    default='clang/lib/Sema/Sema.cpp')
-parser.add_argument('--clangd', help='clangd binary to invoke',
-    default='build/bin/clangd')
-parser.add_argument('--checks', help='check glob to run', default='*')
-parser.add_argument('--verbose', help='log clangd output', action='store_true')
+parser.add_argument(
+    "--target",
+    help="X-macro output file. "
+    "If it exists, existing contents will be used for hysteresis",
+    default="clang-tools-extra/clangd/TidyFastChecks.inc",
+)
+parser.add_argument(
+    "--source",
+    help="Source file to benchmark tidy checks",
+    default="clang/lib/Sema/Sema.cpp",
+)
+parser.add_argument(
+    "--clangd", help="clangd binary to invoke", default="build/bin/clangd"
+)
+parser.add_argument("--checks", help="check glob to run", default="*")
+parser.add_argument("--verbose", help="log clangd output", action="store_true")
 args = parser.parse_args()
 
 # Use the preprocessor to extract the list of previously-fast checks.
 def read_old_fast(path):
-    text = subprocess.check_output(["cpp",
-        "-P",            # Omit GNU line markers
-        "-nostdinc",     # Don't include stdc-predef.h
-        "-DFAST(C,T)=C", # Print fast checks only
-        path])
+    text = subprocess.check_output(
+        [
+            "cpp",
+            "-P",  # Omit GNU line markers
+            "-nostdinc",  # Don't include stdc-predef.h
+            "-DFAST(C,T)=C",  # Print fast checks only
+            path,
+        ]
+    )
     for line in text.splitlines():
         if line.strip():
-            yield line.strip().decode('utf-8')
+            yield line.strip().decode("utf-8")
+
+
 old_fast = list(read_old_fast(args.target)) if os.path.exists(args.target) else []
 print(f"Old fast checks: {old_fast}", file=sys.stderr)
 
 # Runs clangd --check --check-tidy-time.
 # Yields (check, percent-overhead) pairs.
 def measure():
-    process = subprocess.Popen([args.clangd,
-        "--check=" + args.source,
-        "--check-locations=0", # Skip useless slow steps.
-        "--check-tidy-time=" + args.checks],
-        stderr=subprocess.PIPE)
+    process = subprocess.Popen(
+        [
+            args.clangd,
+            "--check=" + args.source,
+            "--check-locations=0",  # Skip useless slow steps.
+            "--check-tidy-time=" + args.checks,
+        ],
+        stderr=subprocess.PIPE,
+    )
     recording = False
     for line in iter(process.stderr.readline, b""):
         if args.verbose:
             print("clangd> ", line, file=sys.stderr)
         if not recording:
-            if b'Timing AST build with individual clang-tidy checks' in line:  
-               recording = True
+            if b"Timing AST build with individual clang-tidy checks" in line:
+                recording = True
             continue
-        if b'Finished individual clang-tidy checks' in line:
+        if b"Finished individual clang-tidy checks" in line:
             return
-        match = re.search(rb'(\S+) = (\S+)%', line)
+        match = re.search(rb"(\S+) = (\S+)%", line)
         if match:
-            yield (match.group(1).decode('utf-8'), float(match.group(2)))
+            yield (match.group(1).decode("utf-8"), float(match.group(2)))
+
 
-with open(args.target, 'w', buffering=1) as target:
+with open(args.target, "w", buffering=1) as target:
     # Produce an includable X-macros fragment with our decisions.
-    print(f"""// This file is generated, do not edit it directly!
+    print(
+        f"""// This file is generated, do not edit it directly!
 // Deltas are percentage regression in parsing {args.source}
 #ifndef FAST
 #define FAST(CHECK, DELTA)
@@ -81,7 +100,9 @@ def measure():
 #ifndef SLOW
 #define SLOW(CHECK, DELTA)
 #endif
-""", file=target)
+""",
+        file=target,
+    )
 
     for check, time in measure():
         threshold = SLOW_THRESHOLD if check in old_fast else FAST_THRESHOLD
@@ -89,7 +110,10 @@ def measure():
         print(f"{decision} {check} {time}% <= {threshold}%", file=sys.stderr)
         print(f"{decision}({check}, {time})", file=target)
 
-    print("""
+    print(
+        """
 #undef FAST
 #undef SLOW
-""", file=target)
+""",
+        file=target,
+    )

diff  --git a/clang-tools-extra/clangd/quality/CompletionModelCodegen.py b/clang-tools-extra/clangd/quality/CompletionModelCodegen.py
index 4f2356fceacb0..bcafaa2f441c8 100644
--- a/clang-tools-extra/clangd/quality/CompletionModelCodegen.py
+++ b/clang-tools-extra/clangd/quality/CompletionModelCodegen.py
@@ -28,19 +28,18 @@ def ns_begin(self):
 
     def ns_end(self):
         """Returns snippet for closing namespace declarations."""
-        close_ns = [
-            "} // namespace %s" % ns for ns in reversed(self.ns)]
+        close_ns = ["} // namespace %s" % ns for ns in reversed(self.ns)]
         return "\n".join(close_ns)
 
 
 def header_guard(filename):
-    '''Returns the header guard for the generated header.'''
+    """Returns the header guard for the generated header."""
     return "GENERATED_DECISION_FOREST_MODEL_%s_H" % filename.upper()
 
 
 def boost_node(n, label, next_label):
     """Returns code snippet for a leaf/boost node."""
-    return "%s: return %sf;" % (label, n['score'])
+    return "%s: return %sf;" % (label, n["score"])
 
 
 def if_greater_node(n, label, next_label):
@@ -51,7 +50,12 @@ def if_greater_node(n, label, next_label):
     Control falls through if condition is evaluated to false."""
     threshold = n["threshold"]
     return "%s: if (E.get%s() >= %s /*%s*/) goto %s;" % (
-        label, n['feature'], order_encode(threshold), threshold, next_label)
+        label,
+        n["feature"],
+        order_encode(threshold),
+        threshold,
+        next_label,
+    )
 
 
 def if_member_node(n, label, next_label):
@@ -59,21 +63,24 @@ def if_member_node(n, label, next_label):
     Jumps to true_label if the Example feature (ENUM) is present in the set of enum values
     described in the node.
     Control falls through if condition is evaluated to false."""
-    members = '|'.join([
-        "BIT(%s_type::%s)" % (n['feature'], member)
-        for member in n["set"]
-    ])
+    members = "|".join(
+        ["BIT(%s_type::%s)" % (n["feature"], member) for member in n["set"]]
+    )
     return "%s: if (E.get%s() & (%s)) goto %s;" % (
-        label, n['feature'], members, next_label)
+        label,
+        n["feature"],
+        members,
+        next_label,
+    )
 
 
 def node(n, label, next_label):
     """Returns code snippet for the node."""
     return {
-        'boost': boost_node,
-        'if_greater': if_greater_node,
-        'if_member': if_member_node,
-    }[n['operation']](n, label, next_label)
+        "boost": boost_node,
+        "if_greater": if_greater_node,
+        "if_member": if_member_node,
+    }[n["operation"]](n, label, next_label)
 
 
 def tree(t, tree_num, node_num):
@@ -97,18 +104,16 @@ def tree(t, tree_num, node_num):
         code.append(node(t, label=label, next_label="t%d" % (tree_num + 1)))
         return code, 1
 
-    false_code, false_size = tree(
-        t['else'], tree_num=tree_num, node_num=node_num+1)
+    false_code, false_size = tree(t["else"], tree_num=tree_num, node_num=node_num + 1)
 
-    true_node_num = node_num+false_size+1
+    true_node_num = node_num + false_size + 1
     true_label = "t%d_n%d" % (tree_num, true_node_num)
 
-    true_code, true_size = tree(
-        t['then'], tree_num=tree_num, node_num=true_node_num)
+    true_code, true_size = tree(t["then"], tree_num=tree_num, node_num=true_node_num)
 
     code.append(node(t, label=label, next_label=true_label))
 
-    return code+false_code+true_code, 1+false_size+true_size
+    return code + false_code + true_code, 1 + false_size + true_size
 
 
 def gen_header_code(features_json, cpp_class, filename):
@@ -127,23 +132,23 @@ class can be used to represent a code completion candidate.
         if f["kind"] == "NUMBER":
             # Floats are order-encoded to integers for faster comparison.
             setters.append(
-                "void set%s(float V) { %s = OrderEncode(V); }" % (
-                    feature, feature))
+                "void set%s(float V) { %s = OrderEncode(V); }" % (feature, feature)
+            )
         elif f["kind"] == "ENUM":
             setters.append(
-                "void set%s(unsigned V) { %s = 1LL << V; }" % (feature, feature))
+                "void set%s(unsigned V) { %s = 1LL << V; }" % (feature, feature)
+            )
         else:
             raise ValueError("Unhandled feature type.", f["kind"])
 
     # Class members represent all the features of the Example.
     class_members = [
-        "uint%d_t %s = 0;"
-        % (64 if f["kind"] == "ENUM" else 32, f['name'])
+        "uint%d_t %s = 0;" % (64 if f["kind"] == "ENUM" else 32, f["name"])
         for f in features_json
     ]
     getters = [
         "LLVM_ATTRIBUTE_ALWAYS_INLINE uint%d_t get%s() const { return %s; }"
-        % (64 if f["kind"] == "ENUM" else 32, f['name'], f['name'])
+        % (64 if f["kind"] == "ENUM" else 32, f["name"], f["name"])
         for f in features_json
     ]
     nline = "\n  "
@@ -173,25 +178,32 @@ class %s {
 float Evaluate(const %s&);
 %s
 #endif // %s
-""" % (guard, guard, cpp_class.ns_begin(), cpp_class.name,
+""" % (
+        guard,
+        guard,
+        cpp_class.ns_begin(),
+        cpp_class.name,
         nline.join(setters),
         nline.join(getters),
         nline.join(class_members),
-        cpp_class.name, cpp_class.ns_end(), guard)
+        cpp_class.name,
+        cpp_class.ns_end(),
+        guard,
+    )
 
 
 def order_encode(v):
-    i = struct.unpack('<I', struct.pack('<f', v))[0]
+    i = struct.unpack("<I", struct.pack("<f", v))[0]
     TopBit = 1 << 31
     # IEEE 754 floats compare like sign-magnitude integers.
-    if (i & TopBit):  # Negative float
+    if i & TopBit:  # Negative float
         return (1 << 32) - i  # low half of integers, order reversed.
     return TopBit + i  # top half of integers
 
 
 def evaluate_func(forest_json, cpp_class):
     """Generates evaluation functions for each tree and combines them in
-    `float Evaluate(const {Example}&)` function. This function can be 
+    `float Evaluate(const {Example}&)` function. This function can be
     used to score an Example."""
 
     code = ""
@@ -200,10 +212,13 @@ def evaluate_func(forest_json, cpp_class):
     code += "namespace {\n"
     tree_num = 0
     for tree_json in forest_json:
-        code += "LLVM_ATTRIBUTE_NOINLINE float EvaluateTree%d(const %s& E) {\n" % (tree_num, cpp_class.name)
-        code += "  " + \
-            "\n  ".join(
-                tree(tree_json, tree_num=tree_num, node_num=0)[0]) + "\n"
+        code += "LLVM_ATTRIBUTE_NOINLINE float EvaluateTree%d(const %s& E) {\n" % (
+            tree_num,
+            cpp_class.name,
+        )
+        code += (
+            "  " + "\n  ".join(tree(tree_json, tree_num=tree_num, node_num=0)[0]) + "\n"
+        )
         code += "}\n\n"
         tree_num += 1
     code += "} // namespace\n\n"
@@ -224,23 +239,20 @@ def gen_cpp_code(forest_json, features_json, filename, cpp_class):
     """Generates code for the .cpp file."""
     # Headers
     # Required by OrderEncode(float F).
-    angled_include = [
-        '#include <%s>' % h
-        for h in ["cstring", "limits"]
-    ]
+    angled_include = ["#include <%s>" % h for h in ["cstring", "limits"]]
 
     # Include generated header.
-    qouted_headers = {filename + '.h', 'llvm/ADT/bit.h'}
+    qouted_headers = {filename + ".h", "llvm/ADT/bit.h"}
     # Headers required by ENUM features used by the model.
-    qouted_headers |= {f["header"]
-                       for f in features_json if f["kind"] == "ENUM"}
+    qouted_headers |= {f["header"] for f in features_json if f["kind"] == "ENUM"}
     quoted_include = ['#include "%s"' % h for h in sorted(qouted_headers)]
 
     # using-decl for ENUM features.
-    using_decls = "\n".join("using %s_type = %s;" % (
-        feature['name'], feature['type'])
+    using_decls = "\n".join(
+        "using %s_type = %s;" % (feature["name"], feature["type"])
         for feature in features_json
-        if feature["kind"] == "ENUM")
+        if feature["kind"] == "ENUM"
+    )
     nl = "\n"
     return """%s
 
@@ -267,19 +279,25 @@ def gen_cpp_code(forest_json, features_json, filename, cpp_class):
 
 %s
 %s
-""" % (nl.join(angled_include), nl.join(quoted_include), cpp_class.ns_begin(),
-       using_decls, cpp_class.name, evaluate_func(forest_json, cpp_class),
-       cpp_class.ns_end())
+""" % (
+        nl.join(angled_include),
+        nl.join(quoted_include),
+        cpp_class.ns_begin(),
+        using_decls,
+        cpp_class.name,
+        evaluate_func(forest_json, cpp_class),
+        cpp_class.ns_end(),
+    )
 
 
 def main():
-    parser = argparse.ArgumentParser('DecisionForestCodegen')
-    parser.add_argument('--filename', help='output file name.')
-    parser.add_argument('--output_dir', help='output directory.')
-    parser.add_argument('--model', help='path to model directory.')
+    parser = argparse.ArgumentParser("DecisionForestCodegen")
+    parser.add_argument("--filename", help="output file name.")
+    parser.add_argument("--output_dir", help="output directory.")
+    parser.add_argument("--model", help="path to model directory.")
     parser.add_argument(
-        '--cpp_class',
-        help='The name of the class (which may be a namespace-qualified) created in generated header.'
+        "--cpp_class",
+        help="The name of the class (which may be a namespace-qualified) created in generated header.",
     )
     ns = parser.parse_args()
 
@@ -298,19 +316,23 @@ def main():
     with open(model_file) as m:
         forest_json = json.load(m)
 
-    with open(cpp_file, 'w+t') as output_cc:
+    with open(cpp_file, "w+t") as output_cc:
         output_cc.write(
-            gen_cpp_code(forest_json=forest_json,
-                         features_json=features_json,
-                         filename=filename,
-                         cpp_class=cpp_class))
-
-    with open(header_file, 'w+t') as output_h:
-        output_h.write(gen_header_code(
-            features_json=features_json,
-            cpp_class=cpp_class,
-            filename=filename))
-
-
-if __name__ == '__main__':
+            gen_cpp_code(
+                forest_json=forest_json,
+                features_json=features_json,
+                filename=filename,
+                cpp_class=cpp_class,
+            )
+        )
+
+    with open(header_file, "w+t") as output_h:
+        output_h.write(
+            gen_header_code(
+                features_json=features_json, cpp_class=cpp_class, filename=filename
+            )
+        )
+
+
+if __name__ == "__main__":
     main()

diff  --git a/clang-tools-extra/clangd/test/lit.cfg.py b/clang-tools-extra/clangd/test/lit.cfg.py
index 5292eaf2e99f4..489726bc9429b 100644
--- a/clang-tools-extra/clangd/test/lit.cfg.py
+++ b/clang-tools-extra/clangd/test/lit.cfg.py
@@ -4,9 +4,9 @@
 lit.llvm.llvm_config.use_clang([], [], required=False)
 lit.llvm.llvm_config.use_default_substitutions()
 
-config.name = 'Clangd'
-config.suffixes = ['.test']
-config.excludes = ['Inputs']
+config.name = "Clangd"
+config.suffixes = [".test"]
+config.excludes = ["Inputs"]
 config.test_format = lit.formats.ShTest(not lit.llvm.llvm_config.use_lit_shell)
 config.test_source_root = config.clangd_source_dir + "/test"
 config.test_exec_root = config.clangd_binary_dir + "/test"
@@ -15,27 +15,27 @@
 # Used to enable tests based on the required targets. Can be queried with e.g.
 #    REQUIRES: x86-registered-target
 def calculate_arch_features(arch_string):
-  return [arch.lower() + '-registered-target' for arch in arch_string.split()]
+    return [arch.lower() + "-registered-target" for arch in arch_string.split()]
 
 
-lit.llvm.llvm_config.feature_config([('--targets-built',
-                                      calculate_arch_features)])
+lit.llvm.llvm_config.feature_config([("--targets-built", calculate_arch_features)])
 
 # Clangd-specific lit environment.
-config.substitutions.append(('%clangd-benchmark-dir',
-                             config.clangd_binary_dir + "/benchmarks"))
+config.substitutions.append(
+    ("%clangd-benchmark-dir", config.clangd_binary_dir + "/benchmarks")
+)
 
 if config.clangd_build_xpc:
-  config.available_features.add('clangd-xpc-support')
+    config.available_features.add("clangd-xpc-support")
 
 if config.clangd_enable_remote:
-  config.available_features.add('clangd-remote-index')
+    config.available_features.add("clangd-remote-index")
 
 if config.clangd_tidy_checks:
-  config.available_features.add('clangd-tidy-checks')
+    config.available_features.add("clangd-tidy-checks")
 
 if config.have_zlib:
-  config.available_features.add('zlib')
+    config.available_features.add("zlib")
 
 # It is not realistically possible to account for all options that could
 # possibly be present in system and user configuration files, so disable

diff  --git a/clang-tools-extra/clangd/test/lit.local.cfg b/clang-tools-extra/clangd/test/lit.local.cfg
index ef338c755372e..08582b6b967ce 100644
--- a/clang-tools-extra/clangd/test/lit.local.cfg
+++ b/clang-tools-extra/clangd/test/lit.local.cfg
@@ -1,6 +1,7 @@
 import re
+
 # We rely on the default -std being derived from the filetype.
 # PS4 sets a 
diff erent -std, and many tests break.
 # FIXME: make our tests less brittle instead.
-if re.match(r'.*-scei-ps4', config.target_triple):
-  config.unsupported = True
+if re.match(r".*-scei-ps4", config.target_triple):
+    config.unsupported = True

diff  --git a/clang-tools-extra/clangd/test/remote-index/pipeline_helper.py b/clang-tools-extra/clangd/test/remote-index/pipeline_helper.py
index 75ff5ff2ae164..afdac985221a7 100644
--- a/clang-tools-extra/clangd/test/remote-index/pipeline_helper.py
+++ b/clang-tools-extra/clangd/test/remote-index/pipeline_helper.py
@@ -1,12 +1,12 @@
 #!/usr/bin/env python
 #
-#===- pipeline_helper.py - Remote Index pipeline Helper *- python -------*--===#
+# ===- pipeline_helper.py - Remote Index pipeline Helper *- python -------*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 import argparse
 import os
@@ -18,83 +18,100 @@
 
 
 def kill_process_after_delay(server_process):
-  time.sleep(10)
-  if server_process.poll() is None:
-    server_process.kill()
+    time.sleep(10)
+    if server_process.poll() is None:
+        server_process.kill()
 
 
 def main():
-  parser = argparse.ArgumentParser()
-  parser.add_argument('--input-file-name', required=True)
-  parser.add_argument('--project-root', required=True)
-  parser.add_argument('--index-file', required=True)
-  parser.add_argument('--server-arg', action='append', default=[])
-  parser.add_argument('--server-log', nargs='?', type=argparse.FileType('wb'), default=os.devnull)
-
-  args = parser.parse_args()
-
-  # Grab an available port.
-  with socket() as s:
-    s.bind(('localhost', 0))
-    server_address = 'localhost:' + str(s.getsockname()[1])
-
-  print('Initializing clangd-index-server...', file=sys.stderr)
-  index_server_process = subprocess.Popen([
-      'clangd-index-server', '--server-address=' + server_address,
-      args.index_file, args.project_root
-  ] + args.server_arg,
-                                          stderr=subprocess.PIPE)
-
-  # This will kill index_server_process if it hangs without printing init
-  # message.
-  shutdown_thread = threading.Thread(
-      target=kill_process_after_delay, args=(index_server_process,))
-  shutdown_thread.daemon = True
-  shutdown_thread.start()
-
-  # Wait for the server to warm-up.
-  found_init_message = False
-  while index_server_process.poll() is None:
-    line = index_server_process.stderr.readline()
-    args.server_log.write(line)
-    args.server_log.flush()
-    if b'Server listening' in line:
-      print('Server initialization complete.', file=sys.stderr)
-      found_init_message = True
-      break
-
-  if not found_init_message:
-    print('Server initialization failed. Shutting down.', file=sys.stderr)
-    sys.exit(1)
-
-  print('Running clangd-index-server-monitor...', file=sys.stderr)
-  index_server_monitor_process = subprocess.Popen([
-      'clangd-index-server-monitor', server_address,
-  ], stderr=subprocess.PIPE)
-
-  index_server_monitor_process.wait()
-
-  in_file = open(args.input_file_name)
-
-  print('Staring clangd...', file=sys.stderr)
-  clangd_process = subprocess.Popen([
-      'clangd', '--remote-index-address=' + server_address,
-      '--project-root=' + args.project_root, '--lit-test', '--sync'
-  ],
-                                    stdin=in_file)
-  clangd_process.wait()
-  print(
-      'Clangd executed successfully, shutting down child processes.',
-      file=sys.stderr)
-  index_server_process.kill()
-  for line in index_server_process.stderr:
-    args.server_log.write(line)
-    args.server_log.flush()
-
-  for line in index_server_monitor_process.stderr:
-    args.server_log.write(line)
-    args.server_log.flush()
-
-
-if __name__ == '__main__':
-  main()
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--input-file-name", required=True)
+    parser.add_argument("--project-root", required=True)
+    parser.add_argument("--index-file", required=True)
+    parser.add_argument("--server-arg", action="append", default=[])
+    parser.add_argument(
+        "--server-log", nargs="?", type=argparse.FileType("wb"), default=os.devnull
+    )
+
+    args = parser.parse_args()
+
+    # Grab an available port.
+    with socket() as s:
+        s.bind(("localhost", 0))
+        server_address = "localhost:" + str(s.getsockname()[1])
+
+    print("Initializing clangd-index-server...", file=sys.stderr)
+    index_server_process = subprocess.Popen(
+        [
+            "clangd-index-server",
+            "--server-address=" + server_address,
+            args.index_file,
+            args.project_root,
+        ]
+        + args.server_arg,
+        stderr=subprocess.PIPE,
+    )
+
+    # This will kill index_server_process if it hangs without printing init
+    # message.
+    shutdown_thread = threading.Thread(
+        target=kill_process_after_delay, args=(index_server_process,)
+    )
+    shutdown_thread.daemon = True
+    shutdown_thread.start()
+
+    # Wait for the server to warm-up.
+    found_init_message = False
+    while index_server_process.poll() is None:
+        line = index_server_process.stderr.readline()
+        args.server_log.write(line)
+        args.server_log.flush()
+        if b"Server listening" in line:
+            print("Server initialization complete.", file=sys.stderr)
+            found_init_message = True
+            break
+
+    if not found_init_message:
+        print("Server initialization failed. Shutting down.", file=sys.stderr)
+        sys.exit(1)
+
+    print("Running clangd-index-server-monitor...", file=sys.stderr)
+    index_server_monitor_process = subprocess.Popen(
+        [
+            "clangd-index-server-monitor",
+            server_address,
+        ],
+        stderr=subprocess.PIPE,
+    )
+
+    index_server_monitor_process.wait()
+
+    in_file = open(args.input_file_name)
+
+    print("Staring clangd...", file=sys.stderr)
+    clangd_process = subprocess.Popen(
+        [
+            "clangd",
+            "--remote-index-address=" + server_address,
+            "--project-root=" + args.project_root,
+            "--lit-test",
+            "--sync",
+        ],
+        stdin=in_file,
+    )
+    clangd_process.wait()
+    print(
+        "Clangd executed successfully, shutting down child processes.", file=sys.stderr
+    )
+    index_server_process.kill()
+    for line in index_server_process.stderr:
+        args.server_log.write(line)
+        args.server_log.flush()
+
+    for line in index_server_monitor_process.stderr:
+        args.server_log.write(line)
+        args.server_log.flush()
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/clang-tools-extra/clangd/unittests/lit.cfg.py b/clang-tools-extra/clangd/unittests/lit.cfg.py
index 1db30ca7c7511..48ee5f5d5ab92 100644
--- a/clang-tools-extra/clangd/unittests/lit.cfg.py
+++ b/clang-tools-extra/clangd/unittests/lit.cfg.py
@@ -1,21 +1,23 @@
 import lit.formats
+
 config.name = "Clangd Unit Tests"
-config.test_format = lit.formats.GoogleTest('.', 'Tests')
+config.test_format = lit.formats.GoogleTest(".", "Tests")
 config.test_source_root = config.clangd_binary_dir + "/unittests"
 config.test_exec_root = config.clangd_binary_dir + "/unittests"
 
 # Point the dynamic loader at dynamic libraries in 'lib'.
 # FIXME: it seems every project has a copy of this logic. Move it somewhere.
 import platform
-if platform.system() == 'Darwin':
-    shlibpath_var = 'DYLD_LIBRARY_PATH'
-elif platform.system() == 'Windows':
-    shlibpath_var = 'PATH'
+
+if platform.system() == "Darwin":
+    shlibpath_var = "DYLD_LIBRARY_PATH"
+elif platform.system() == "Windows":
+    shlibpath_var = "PATH"
 else:
-    shlibpath_var = 'LD_LIBRARY_PATH'
-config.environment[shlibpath_var] = os.path.pathsep.join((
-    "@SHLIBDIR@", "@LLVM_LIBS_DIR@",
-    config.environment.get(shlibpath_var,'')))
+    shlibpath_var = "LD_LIBRARY_PATH"
+config.environment[shlibpath_var] = os.path.pathsep.join(
+    ("@SHLIBDIR@", "@LLVM_LIBS_DIR@", config.environment.get(shlibpath_var, ""))
+)
 
 # It is not realistically possible to account for all options that could
 # possibly be present in system and user configuration files, so disable

diff  --git a/clang-tools-extra/docs/clang-tidy/checks/gen-static-analyzer-docs.py b/clang-tools-extra/docs/clang-tidy/checks/gen-static-analyzer-docs.py
index 51956d15f15e9..1e54cd53bc455 100644
--- a/clang-tools-extra/docs/clang-tidy/checks/gen-static-analyzer-docs.py
+++ b/clang-tools-extra/docs/clang-tidy/checks/gen-static-analyzer-docs.py
@@ -10,49 +10,64 @@
 import re
 
 """Get path of script so files are always in correct directory"""
-__location__ = os.path.realpath(
-        os.path.join(os.getcwd(), os.path.dirname(__file__)))
+__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
 
 """Get dict of checker related info and parse for full check names
 
 Returns:
   checkers: dict of checker info
 """
+
+
 def get_checkers(checkers_td_directory):
-  p = subprocess.Popen(["llvm-tblgen", "--dump-json", "-I",
-                           checkers_td_directory, checkers_td_directory+"Checkers.td"],
-                           stdout=subprocess.PIPE)
-  table_entries = json.loads(p.communicate()[0])
-  documentable_checkers = []
-  checkers = table_entries["!instanceof"]["Checker"]
-  packages = table_entries["!instanceof"]["Package"]
-
-  for checker_ in checkers:
-    checker = table_entries[checker_]
-    checker_name = checker["CheckerName"]
-    package_ = checker["ParentPackage"]["def"]
-    package = table_entries[package_]
-    package_name = package["PackageName"]
-    checker_package_prefix = package_name
-    parent_package_ = package["ParentPackage"]
-    hidden = (checker["Hidden"] != 0) or (package["Hidden"] != 0)
-
-    while(parent_package_ != None):
-      parent_package = table_entries[parent_package_["def"]]
-      checker_package_prefix = parent_package["PackageName"] + "." + checker_package_prefix
-      hidden = hidden or parent_package["Hidden"] != 0
-      parent_package_ = parent_package["ParentPackage"]
-
-    full_package_name = "clang-analyzer-" + checker_package_prefix + "." + checker_name
-    anchor_url = re.sub("\.", "-", checker_package_prefix + "." + checker_name).lower()
-
-    if(not hidden and "alpha" not in full_package_name.lower()):
-      checker["FullPackageName"] = full_package_name
-      checker["AnchorUrl"] = anchor_url
-      documentable_checkers.append(checker)
-
-  documentable_checkers.sort(key=lambda x: x["FullPackageName"])
-  return documentable_checkers
+    p = subprocess.Popen(
+        [
+            "llvm-tblgen",
+            "--dump-json",
+            "-I",
+            checkers_td_directory,
+            checkers_td_directory + "Checkers.td",
+        ],
+        stdout=subprocess.PIPE,
+    )
+    table_entries = json.loads(p.communicate()[0])
+    documentable_checkers = []
+    checkers = table_entries["!instanceof"]["Checker"]
+    packages = table_entries["!instanceof"]["Package"]
+
+    for checker_ in checkers:
+        checker = table_entries[checker_]
+        checker_name = checker["CheckerName"]
+        package_ = checker["ParentPackage"]["def"]
+        package = table_entries[package_]
+        package_name = package["PackageName"]
+        checker_package_prefix = package_name
+        parent_package_ = package["ParentPackage"]
+        hidden = (checker["Hidden"] != 0) or (package["Hidden"] != 0)
+
+        while parent_package_ != None:
+            parent_package = table_entries[parent_package_["def"]]
+            checker_package_prefix = (
+                parent_package["PackageName"] + "." + checker_package_prefix
+            )
+            hidden = hidden or parent_package["Hidden"] != 0
+            parent_package_ = parent_package["ParentPackage"]
+
+        full_package_name = (
+            "clang-analyzer-" + checker_package_prefix + "." + checker_name
+        )
+        anchor_url = re.sub(
+            "\.", "-", checker_package_prefix + "." + checker_name
+        ).lower()
+
+        if not hidden and "alpha" not in full_package_name.lower():
+            checker["FullPackageName"] = full_package_name
+            checker["AnchorUrl"] = anchor_url
+            documentable_checkers.append(checker)
+
+    documentable_checkers.sort(key=lambda x: x["FullPackageName"])
+    return documentable_checkers
+
 
 """Generate documentation for checker
 
@@ -61,88 +76,108 @@ def get_checkers(checkers_td_directory):
   only_help_text: Generate documentation based off the checker description.
     Used when there is no other documentation to link to.
 """
+
+
 def generate_documentation(checker, only_help_text=False):
-  with open(os.path.join(__location__, checker["FullPackageName"]+".rst"),"w") as f:
-    f.write(".. title:: clang-tidy - %s\n" % checker["FullPackageName"])
-    if(not only_help_text):
-      f.write(".. meta::\n")
-      f.write("   :http-equiv=refresh: 5;URL=https://clang.llvm.org/docs/analyzer/checkers.html#%s\n" % checker["AnchorUrl"])
-    f.write("\n")
-    f.write("%s\n" % checker["FullPackageName"])
-    f.write("=" * len(checker["FullPackageName"]) + "\n")
-    f.write("\n")
-    if(only_help_text):
-      f.write("%s\n" % checker["HelpText"])
-    else:
-      f.write("The %s check is an alias, please see\n" % checker["FullPackageName"])
-      f.write("`Clang Static Analyzer Available Checkers <https://clang.llvm.org/docs/analyzer/checkers.html#%s>`_\n" % checker["AnchorUrl"])
-      f.write("for more information.\n")
-    f.close()
+    with open(
+        os.path.join(__location__, checker["FullPackageName"] + ".rst"), "w"
+    ) as f:
+        f.write(".. title:: clang-tidy - %s\n" % checker["FullPackageName"])
+        if not only_help_text:
+            f.write(".. meta::\n")
+            f.write(
+                "   :http-equiv=refresh: 5;URL=https://clang.llvm.org/docs/analyzer/checkers.html#%s\n"
+                % checker["AnchorUrl"]
+            )
+        f.write("\n")
+        f.write("%s\n" % checker["FullPackageName"])
+        f.write("=" * len(checker["FullPackageName"]) + "\n")
+        f.write("\n")
+        if only_help_text:
+            f.write("%s\n" % checker["HelpText"])
+        else:
+            f.write(
+                "The %s check is an alias, please see\n" % checker["FullPackageName"]
+            )
+            f.write(
+                "`Clang Static Analyzer Available Checkers <https://clang.llvm.org/docs/analyzer/checkers.html#%s>`_\n"
+                % checker["AnchorUrl"]
+            )
+            f.write("for more information.\n")
+        f.close()
+
 
 """Update list.rst to include the new checks
 
 Args:
   checkers: dict acquired from get_checkers()
 """
+
+
 def update_documentation_list(checkers):
-  with open(os.path.join(__location__, "list.rst"), "r+") as f:
-    f_text = f.read()
-    header, check_text= f_text.split(".. toctree::\n")
-    checks = check_text.split("\n")
-    for checker in checkers:
-      if(("   %s" % checker["FullPackageName"]) not in checks):
-        checks.append("   %s" % checker["FullPackageName"])
-    checks.sort()
+    with open(os.path.join(__location__, "list.rst"), "r+") as f:
+        f_text = f.read()
+        header, check_text = f_text.split(".. toctree::\n")
+        checks = check_text.split("\n")
+        for checker in checkers:
+            if ("   %s" % checker["FullPackageName"]) not in checks:
+                checks.append("   %s" % checker["FullPackageName"])
+        checks.sort()
+
+        # Overwrite file with new data
+        f.seek(0)
+        f.write(header)
+        f.write(".. toctree::")
+        for check in checks:
+            f.write("%s\n" % check)
+        f.close()
+
 
-    #Overwrite file with new data
-    f.seek(0)
-    f.write(header)
-    f.write(".. toctree::")
-    for check in checks:
-      f.write("%s\n" % check)
-    f.close()
+default_path_monorepo = "../../../../clang/include/clang/StaticAnalyzer/Checkers/"
+default_path_in_tree = "../../../../../include/clang/StaticAnalyzer/Checkers/"
 
-default_path_monorepo = '../../../../clang/include/clang/StaticAnalyzer/Checkers/'
-default_path_in_tree = '../../../../../include/clang/StaticAnalyzer/Checkers/'
 
 def parse_arguments():
-  """Set up and parse command-line arguments
-  Returns:
-    file_path: Path to Checkers.td"""
-  usage = """Parse Checkers.td to generate documentation for static analyzer checks"""
-  parse = argparse.ArgumentParser(description=usage)
+    """Set up and parse command-line arguments
+    Returns:
+      file_path: Path to Checkers.td"""
+    usage = """Parse Checkers.td to generate documentation for static analyzer checks"""
+    parse = argparse.ArgumentParser(description=usage)
 
-  file_path_help = ("""Path to Checkers directory
+    file_path_help = """Path to Checkers directory
                     defaults to ../../../../clang/include/clang/StaticAnalyzer/Checkers/ if it exists
-                    then to ../../../../../include/clang/StaticAnalyzer/Checkers/""")
+                    then to ../../../../../include/clang/StaticAnalyzer/Checkers/"""
 
-  default_path=None
-  if(os.path.exists(default_path_monorepo)):
-    default_path = default_path_monorepo
-  elif(os.path.exists(default_path_in_tree)):
-    default_path = default_path_in_tree
+    default_path = None
+    if os.path.exists(default_path_monorepo):
+        default_path = default_path_monorepo
+    elif os.path.exists(default_path_in_tree):
+        default_path = default_path_in_tree
 
-  parse.add_argument("file", type=str, help=file_path_help, nargs='?', default=default_path)
-  args = parse.parse_args()
+    parse.add_argument(
+        "file", type=str, help=file_path_help, nargs="?", default=default_path
+    )
+    args = parse.parse_args()
 
-  if(args.file is None):
-    print("Could not find Checkers directory. Please see -h")
-    exit(1)
+    if args.file is None:
+        print("Could not find Checkers directory. Please see -h")
+        exit(1)
 
-  return args.file
+    return args.file
 
 
 def main():
-  file_path = parse_arguments()
-  checkers = get_checkers(file_path)
-  for checker in checkers:
-    #No documentation nor alpha documentation
-    if(checker["Documentation"][1] == 0 and checker["Documentation"][0] == 0):
-      generate_documentation(checker, True)
-    else:
-      generate_documentation(checker)
-    print("Generated documentation for: %s" % checker["FullPackageName"])
-  update_documentation_list(checkers)
-
-if __name__ == '__main__':
-  main()
+    file_path = parse_arguments()
+    checkers = get_checkers(file_path)
+    for checker in checkers:
+        # No documentation nor alpha documentation
+        if checker["Documentation"][1] == 0 and checker["Documentation"][0] == 0:
+            generate_documentation(checker, True)
+        else:
+            generate_documentation(checker)
+        print("Generated documentation for: %s" % checker["FullPackageName"])
+    update_documentation_list(checkers)
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/clang-tools-extra/docs/conf.py b/clang-tools-extra/docs/conf.py
index bf8dd05cb6f8d..feb7a1da63643 100644
--- a/clang-tools-extra/docs/conf.py
+++ b/clang-tools-extra/docs/conf.py
@@ -17,66 +17,66 @@
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+# sys.path.insert(0, os.path.abspath('.'))
 
 # -- General configuration -----------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax']
+extensions = ["sphinx.ext.todo", "sphinx.ext.mathjax"]
 
 # Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
 
 # The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
 
 # The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
 
 # The master toctree document.
-master_doc = 'index'
+master_doc = "index"
 
 # General information about the project.
-project = u'Extra Clang Tools'
-copyright = u'2007-%d, The Clang Team' % date.today().year
+project = "Extra Clang Tools"
+copyright = "2007-%d, The Clang Team" % date.today().year
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
-#language = None
+# language = None
 
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
-#today = ''
+# today = ''
 # Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# today_fmt = '%B %d, %Y'
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
-exclude_patterns = ['_build']
+exclude_patterns = ["_build"]
 
 # The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# default_role = None
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
 
 # If true, the current module name will be prepended to all description
 # unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
 
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # output. They are ignored by default.
-#show_authors = False
+# show_authors = False
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'friendly'
+pygments_style = "friendly"
 
 # A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
 
 in_progress_title = "(In-Progress) " if tags.has("PreRelease") else ""
 
@@ -88,121 +88,124 @@
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
-html_theme = 'haiku'
+html_theme = "haiku"
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
 # documentation.
-#html_theme_options = {}
+# html_theme_options = {}
 
 # Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
+# html_theme_path = []
 
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
-#html_title = None
+# html_title = None
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
-#html_logo = None
+# html_logo = None
 
 # The name of an image file (within the static path) to use as favicon of the
 # docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
 # pixels large.
-#html_favicon = None
+# html_favicon = None
 
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
+# html_last_updated_fmt = '%b %d, %Y'
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
 
 # Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
+# html_sidebars = {}
 
 # Additional templates that should be rendered to pages, maps page names to
 # template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
 
 # If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
 
 # If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
 
 # If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
+# html_show_sourcelink = True
 
 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
 
 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
 
 # If true, an OpenSearch description file will be output, and all pages will
 # contain a <link> tag referring to it.  The value of this option must be the
 # base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
 
 # This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = 'ExtraClangToolsdoc'
+htmlhelp_basename = "ExtraClangToolsdoc"
 
 
 # -- Options for LaTeX output --------------------------------------------------
 
 latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
+    # The paper size ('letterpaper' or 'a4paper').
+    #'papersize': 'letterpaper',
+    # The font size ('10pt', '11pt' or '12pt').
+    #'pointsize': '10pt',
+    # Additional stuff for the LaTeX preamble.
+    #'preamble': '',
 }
 
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
-  ('index', 'ExtraClangTools.tex', u'Extra Clang Tools Documentation',
-   u'The Clang Team', 'manual'),
+    (
+        "index",
+        "ExtraClangTools.tex",
+        "Extra Clang Tools Documentation",
+        "The Clang Team",
+        "manual",
+    ),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
 # the title page.
-#latex_logo = None
+# latex_logo = None
 
 # For "manual" documents, if this is true, then toplevel headings are parts,
 # not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
 
 # If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
 
 # If true, show URL addresses after external links.
-#latex_show_urls = False
+# latex_show_urls = False
 
 # Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
 
 # If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
 
 
 # -- Options for manual page output --------------------------------------------
@@ -210,12 +213,17 @@
 # One entry per manual page. List of tuples
 # (source start file, name, description, authors, manual section).
 man_pages = [
-    ('index', 'extraclangtools', u'Extra Clang Tools Documentation',
-     [u'The Clang Team'], 1)
+    (
+        "index",
+        "extraclangtools",
+        "Extra Clang Tools Documentation",
+        ["The Clang Team"],
+        1,
+    )
 ]
 
 # If true, show URL addresses after external links.
-#man_show_urls = False
+# man_show_urls = False
 
 
 # -- Options for Texinfo output ------------------------------------------------
@@ -224,16 +232,22 @@
 # (source start file, target name, title, author,
 #  dir menu entry, description, category)
 texinfo_documents = [
-  ('index', 'ExtraClangTools', u'Extra Clang Tools Documentation',
-   u'The Clang Team', 'ExtraClangTools', 'One line description of project.',
-   'Miscellaneous'),
+    (
+        "index",
+        "ExtraClangTools",
+        "Extra Clang Tools Documentation",
+        "The Clang Team",
+        "ExtraClangTools",
+        "One line description of project.",
+        "Miscellaneous",
+    ),
 ]
 
 # Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
+# texinfo_appendices = []
 
 # If false, no module index is generated.
-#texinfo_domain_indices = True
+# texinfo_domain_indices = True
 
 # How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
+# texinfo_show_urls = 'footnote'

diff  --git a/clang-tools-extra/include-cleaner/test/Unit/lit.cfg.py b/clang-tools-extra/include-cleaner/test/Unit/lit.cfg.py
index 1b2910db9d47d..0963351abe3b1 100644
--- a/clang-tools-extra/include-cleaner/test/Unit/lit.cfg.py
+++ b/clang-tools-extra/include-cleaner/test/Unit/lit.cfg.py
@@ -1,21 +1,23 @@
 import lit.formats
+
 config.name = "clangIncludeCleaner Unit Tests"
-config.test_format = lit.formats.GoogleTest('.', 'Tests')
+config.test_format = lit.formats.GoogleTest(".", "Tests")
 config.test_source_root = config.clang_include_cleaner_binary_dir + "/unittests"
 config.test_exec_root = config.clang_include_cleaner_binary_dir + "/unittests"
 
 # Point the dynamic loader at dynamic libraries in 'lib'.
 # FIXME: it seems every project has a copy of this logic. Move it somewhere.
 import platform
-if platform.system() == 'Darwin':
-    shlibpath_var = 'DYLD_LIBRARY_PATH'
-elif platform.system() == 'Windows':
-    shlibpath_var = 'PATH'
+
+if platform.system() == "Darwin":
+    shlibpath_var = "DYLD_LIBRARY_PATH"
+elif platform.system() == "Windows":
+    shlibpath_var = "PATH"
 else:
-    shlibpath_var = 'LD_LIBRARY_PATH'
-config.environment[shlibpath_var] = os.path.pathsep.join((
-    "@SHLIBDIR@", "@LLVM_LIBS_DIR@",
-    config.environment.get(shlibpath_var,'')))
+    shlibpath_var = "LD_LIBRARY_PATH"
+config.environment[shlibpath_var] = os.path.pathsep.join(
+    ("@SHLIBDIR@", "@LLVM_LIBS_DIR@", config.environment.get(shlibpath_var, ""))
+)
 
 # It is not realistically possible to account for all options that could
 # possibly be present in system and user configuration files, so disable

diff  --git a/clang-tools-extra/include-cleaner/test/lit.cfg.py b/clang-tools-extra/include-cleaner/test/lit.cfg.py
index aa5bbd69f33b2..fc6725054366e 100644
--- a/clang-tools-extra/include-cleaner/test/lit.cfg.py
+++ b/clang-tools-extra/include-cleaner/test/lit.cfg.py
@@ -3,17 +3,16 @@
 lit.llvm.initialize(lit_config, config)
 lit.llvm.llvm_config.use_default_substitutions()
 
-config.name = 'ClangIncludeCleaner'
-config.suffixes = ['.test', '.c', '.cpp']
-config.excludes = ['Inputs']
+config.name = "ClangIncludeCleaner"
+config.suffixes = [".test", ".c", ".cpp"]
+config.excludes = ["Inputs"]
 config.test_format = lit.formats.ShTest(not lit.llvm.llvm_config.use_lit_shell)
 config.test_source_root = config.clang_include_cleaner_source_dir + "/test"
 config.test_exec_root = config.clang_include_cleaner_binary_dir + "/test"
 
-config.environment['PATH'] = os.path.pathsep.join((
-        config.clang_tools_dir,
-        config.llvm_tools_dir,
-        config.environment['PATH']))
+config.environment["PATH"] = os.path.pathsep.join(
+    (config.clang_tools_dir, config.llvm_tools_dir, config.environment["PATH"])
+)
 
 # It is not realistically possible to account for all options that could
 # possibly be present in system and user configuration files, so disable

diff  --git a/clang-tools-extra/pseudo/test/Unit/lit.cfg.py b/clang-tools-extra/pseudo/test/Unit/lit.cfg.py
index b76d7dd0ee127..000a8a772c31b 100644
--- a/clang-tools-extra/pseudo/test/Unit/lit.cfg.py
+++ b/clang-tools-extra/pseudo/test/Unit/lit.cfg.py
@@ -1,21 +1,23 @@
 import lit.formats
+
 config.name = "clangPseudo Unit Tests"
-config.test_format = lit.formats.GoogleTest('.', 'Tests')
+config.test_format = lit.formats.GoogleTest(".", "Tests")
 config.test_source_root = config.clang_pseudo_binary_dir + "/unittests"
 config.test_exec_root = config.clang_pseudo_binary_dir + "/unittests"
 
 # Point the dynamic loader at dynamic libraries in 'lib'.
 # FIXME: it seems every project has a copy of this logic. Move it somewhere.
 import platform
-if platform.system() == 'Darwin':
-    shlibpath_var = 'DYLD_LIBRARY_PATH'
-elif platform.system() == 'Windows':
-    shlibpath_var = 'PATH'
+
+if platform.system() == "Darwin":
+    shlibpath_var = "DYLD_LIBRARY_PATH"
+elif platform.system() == "Windows":
+    shlibpath_var = "PATH"
 else:
-    shlibpath_var = 'LD_LIBRARY_PATH'
-config.environment[shlibpath_var] = os.path.pathsep.join((
-    "@SHLIBDIR@", "@LLVM_LIBS_DIR@",
-    config.environment.get(shlibpath_var,'')))
+    shlibpath_var = "LD_LIBRARY_PATH"
+config.environment[shlibpath_var] = os.path.pathsep.join(
+    ("@SHLIBDIR@", "@LLVM_LIBS_DIR@", config.environment.get(shlibpath_var, ""))
+)
 
 # It is not realistically possible to account for all options that could
 # possibly be present in system and user configuration files, so disable

diff  --git a/clang-tools-extra/pseudo/test/lit.cfg.py b/clang-tools-extra/pseudo/test/lit.cfg.py
index 01b1b7014fec1..2ba1558b2ed7d 100644
--- a/clang-tools-extra/pseudo/test/lit.cfg.py
+++ b/clang-tools-extra/pseudo/test/lit.cfg.py
@@ -3,17 +3,16 @@
 lit.llvm.initialize(lit_config, config)
 lit.llvm.llvm_config.use_default_substitutions()
 
-config.name = 'ClangPseudo'
-config.suffixes = ['.test', '.c', '.cpp']
-config.excludes = ['Inputs']
+config.name = "ClangPseudo"
+config.suffixes = [".test", ".c", ".cpp"]
+config.excludes = ["Inputs"]
 config.test_format = lit.formats.ShTest(not lit.llvm.llvm_config.use_lit_shell)
 config.test_source_root = config.clang_pseudo_source_dir + "/test"
 config.test_exec_root = config.clang_pseudo_binary_dir + "/test"
 
-config.environment['PATH'] = os.path.pathsep.join((
-        config.clang_tools_dir,
-        config.llvm_tools_dir,
-        config.environment['PATH']))
+config.environment["PATH"] = os.path.pathsep.join(
+    (config.clang_tools_dir, config.llvm_tools_dir, config.environment["PATH"])
+)
 
 # It is not realistically possible to account for all options that could
 # possibly be present in system and user configuration files, so disable

diff  --git a/clang-tools-extra/pseudo/test/lit.local.cfg b/clang-tools-extra/pseudo/test/lit.local.cfg
index b5969c8874698..53079a0b538ae 100644
--- a/clang-tools-extra/pseudo/test/lit.local.cfg
+++ b/clang-tools-extra/pseudo/test/lit.local.cfg
@@ -1,2 +1,2 @@
-cxx_bnf_file = os.path.join(config.clang_pseudo_source_dir, 'lib', 'cxx', 'cxx.bnf')
-config.substitutions.append(('%cxx-bnf-file', cxx_bnf_file))
+cxx_bnf_file = os.path.join(config.clang_pseudo_source_dir, "lib", "cxx", "cxx.bnf")
+config.substitutions.append(("%cxx-bnf-file", cxx_bnf_file))

diff  --git a/clang-tools-extra/test/Unit/lit.cfg.py b/clang-tools-extra/test/Unit/lit.cfg.py
index 1150b72431e0c..b7376a02c89e1 100644
--- a/clang-tools-extra/test/Unit/lit.cfg.py
+++ b/clang-tools-extra/test/Unit/lit.cfg.py
@@ -5,7 +5,7 @@
 import lit.formats
 
 config.name = "Extra Tools Unit Tests"
-config.suffixes = [] # Seems not to matter for google tests?
+config.suffixes = []  # Seems not to matter for google tests?
 
 # Test Source and Exec root dirs both point to the same directory where google
 # test binaries are built.
@@ -17,21 +17,22 @@
 # a special value for GoogleTest indicating that it should look through the
 # entire testsuite recursively for tests (alternatively, one could provide a
 # ;-separated list of subdirectories).
-config.test_format = lit.formats.GoogleTest('.', 'Tests')
+config.test_format = lit.formats.GoogleTest(".", "Tests")
 
-if platform.system() == 'Darwin':
-    shlibpath_var = 'DYLD_LIBRARY_PATH'
-elif platform.system() == 'Windows':
-    shlibpath_var = 'PATH'
+if platform.system() == "Darwin":
+    shlibpath_var = "DYLD_LIBRARY_PATH"
+elif platform.system() == "Windows":
+    shlibpath_var = "PATH"
 else:
-    shlibpath_var = 'LD_LIBRARY_PATH'
+    shlibpath_var = "LD_LIBRARY_PATH"
 
 # Point the dynamic loader at dynamic libraries in 'lib'.
-shlibpath = os.path.pathsep.join((config.shlibdir, config.llvm_libs_dir,
-                                 config.environment.get(shlibpath_var,'')))
+shlibpath = os.path.pathsep.join(
+    (config.shlibdir, config.llvm_libs_dir, config.environment.get(shlibpath_var, ""))
+)
 
 # Win32 seeks DLLs along %PATH%.
-if sys.platform in ['win32', 'cygwin'] and os.path.isdir(config.shlibdir):
+if sys.platform in ["win32", "cygwin"] and os.path.isdir(config.shlibdir):
     shlibpath = os.path.pathsep.join((config.shlibdir, shlibpath))
 
 config.environment[shlibpath_var] = shlibpath

diff  --git a/clang-tools-extra/test/clang-tidy/check_clang_tidy.py b/clang-tools-extra/test/clang-tidy/check_clang_tidy.py
index f2d9f40fe568c..a78996a0fce3b 100755
--- a/clang-tools-extra/test/clang-tidy/check_clang_tidy.py
+++ b/clang-tools-extra/test/clang-tidy/check_clang_tidy.py
@@ -1,12 +1,12 @@
 #!/usr/bin/env python3
 #
-#===- check_clang_tidy.py - ClangTidy Test Helper ------------*- python -*--===#
+# ===- check_clang_tidy.py - ClangTidy Test Helper ------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 r"""
 ClangTidy Test Helper
@@ -40,228 +40,275 @@
 
 
 def write_file(file_name, text):
-  with open(file_name, 'w', encoding='utf-8') as f:
-    f.write(text)
-    f.truncate()
+    with open(file_name, "w", encoding="utf-8") as f:
+        f.write(text)
+        f.truncate()
 
 
 def try_run(args, raise_error=True):
-  try:
-    process_output = \
-      subprocess.check_output(args, stderr=subprocess.STDOUT).decode(errors='ignore')
-  except subprocess.CalledProcessError as e:
-    process_output = e.output.decode(errors='ignore')
-    print('%s failed:\n%s' % (' '.join(args), process_output))
-    if raise_error:
-      raise
-  return process_output
+    try:
+        process_output = subprocess.check_output(args, stderr=subprocess.STDOUT).decode(
+            errors="ignore"
+        )
+    except subprocess.CalledProcessError as e:
+        process_output = e.output.decode(errors="ignore")
+        print("%s failed:\n%s" % (" ".join(args), process_output))
+        if raise_error:
+            raise
+    return process_output
 
 
 # This class represents the appearance of a message prefix in a file.
 class MessagePrefix:
-  def __init__(self, label):
-    self.has_message = False
-    self.prefixes = []
-    self.label = label
+    def __init__(self, label):
+        self.has_message = False
+        self.prefixes = []
+        self.label = label
 
-  def check(self, file_check_suffix, input_text):
-    self.prefix = self.label + file_check_suffix
-    self.has_message = self.prefix in input_text
-    if self.has_message:
-      self.prefixes.append(self.prefix)
-    return self.has_message
+    def check(self, file_check_suffix, input_text):
+        self.prefix = self.label + file_check_suffix
+        self.has_message = self.prefix in input_text
+        if self.has_message:
+            self.prefixes.append(self.prefix)
+        return self.has_message
 
 
 class CheckRunner:
-  def __init__(self, args, extra_args):
-    self.resource_dir = args.resource_dir
-    self.assume_file_name = args.assume_filename
-    self.input_file_name = args.input_file_name
-    self.check_name = args.check_name
-    self.temp_file_name = args.temp_file_name
-    self.original_file_name = self.temp_file_name + ".orig"
-    self.expect_clang_tidy_error = args.expect_clang_tidy_error
-    self.std = args.std
-    self.check_suffix = args.check_suffix
-    self.input_text = ''
-    self.has_check_fixes = False
-    self.has_check_messages = False
-    self.has_check_notes = False
-    self.fixes = MessagePrefix('CHECK-FIXES')
-    self.messages = MessagePrefix('CHECK-MESSAGES')
-    self.notes = MessagePrefix('CHECK-NOTES')
-
-    file_name_with_extension = self.assume_file_name or self.input_file_name
-    _, extension = os.path.splitext(file_name_with_extension)
-    if extension not in ['.c', '.hpp', '.m', '.mm']:
-      extension = '.cpp'
-    self.temp_file_name = self.temp_file_name + extension
-
-    self.clang_extra_args = []
-    self.clang_tidy_extra_args = extra_args
-    if '--' in extra_args:
-      i = self.clang_tidy_extra_args.index('--')
-      self.clang_extra_args = self.clang_tidy_extra_args[i + 1:]
-      self.clang_tidy_extra_args = self.clang_tidy_extra_args[:i]
-
-    # If the test does not specify a config style, force an empty one; otherwise
-    # auto-detection logic can discover a ".clang-tidy" file that is not related to
-    # the test.
-    if not any([
-        re.match('^-?-config(-file)?=', arg)
-        for arg in self.clang_tidy_extra_args]):
-      self.clang_tidy_extra_args.append('--config={}')
-
-    if extension in ['.m', '.mm']:
-      self.clang_extra_args = ['-fobjc-abi-version=2', '-fobjc-arc', '-fblocks'] + \
-          self.clang_extra_args
-
-    if extension in ['.cpp', '.hpp', '.mm']:
-      self.clang_extra_args.append('-std=' + self.std)
-
-    # Tests should not rely on STL being available, and instead provide mock
-    # implementations of relevant APIs.
-    self.clang_extra_args.append('-nostdinc++')
-
-    if self.resource_dir is not None:
-      self.clang_extra_args.append('-resource-dir=%s' % self.resource_dir)
-
-  def read_input(self):
-    with open(self.input_file_name, 'r', encoding='utf-8') as input_file:
-      self.input_text = input_file.read()
-
-  def get_prefixes(self):
-    for suffix in self.check_suffix:
-      if suffix and not re.match('^[A-Z0-9\\-]+$', suffix):
-        sys.exit('Only A..Z, 0..9 and "-" are allowed in check suffixes list,'
-                 + ' but "%s" was given' % suffix)
-
-      file_check_suffix = ('-' + suffix) if suffix else ''
-
-      has_check_fix = self.fixes.check(file_check_suffix, self.input_text)
-      self.has_check_fixes = self.has_check_fixes or has_check_fix
-
-      has_check_message = self.messages.check(file_check_suffix, self.input_text)
-      self.has_check_messages = self.has_check_messages or has_check_message
-
-      has_check_note = self.notes.check(file_check_suffix, self.input_text)
-      self.has_check_notes = self.has_check_notes or has_check_note
-
-      if has_check_note and has_check_message:
-        sys.exit('Please use either %s or %s but not both' %
-          (self.notes.prefix, self.messages.prefix))
-
-      if not has_check_fix and not has_check_message and not has_check_note:
-        sys.exit('%s, %s or %s not found in the input' %
-          (self.fixes.prefix, self.messages.prefix, self.notes.prefix))
-
-    assert self.has_check_fixes or self.has_check_messages or self.has_check_notes
-
-  def prepare_test_inputs(self):
-    # Remove the contents of the CHECK lines to avoid CHECKs matching on
-    # themselves.  We need to keep the comments to preserve line numbers while
-    # avoiding empty lines which could potentially trigger formatting-related
-    # checks.
-    cleaned_test = re.sub('// *CHECK-[A-Z0-9\\-]*:[^\r\n]*', '//', self.input_text)
-    write_file(self.temp_file_name, cleaned_test)
-    write_file(self.original_file_name, cleaned_test)
-
-  def run_clang_tidy(self):
-    args = ['clang-tidy', self.temp_file_name, '-fix', '--checks=-*,' + self.check_name] + \
-        self.clang_tidy_extra_args + ['--'] + self.clang_extra_args
-    if self.expect_clang_tidy_error:
-      args.insert(0, 'not')
-    print('Running ' + repr(args) + '...')
-    clang_tidy_output = try_run(args)
-    print('------------------------ clang-tidy output -----------------------')
-    print(clang_tidy_output.encode(sys.stdout.encoding, errors="replace").decode(sys.stdout.encoding))
-    print('------------------------------------------------------------------')
-
-    
diff _output = try_run(['
diff ', '-u', self.original_file_name, self.temp_file_name], False)
-    print('------------------------------ Fixes -----------------------------')
-    print(
diff _output)
-    print('------------------------------------------------------------------')
-    return clang_tidy_output
-
-  def check_fixes(self):
-    if self.has_check_fixes:
-      try_run(['FileCheck', '-input-file=' + self.temp_file_name, self.input_file_name,
-              '-check-prefixes=' + ','.join(self.fixes.prefixes),
-              '-strict-whitespace'])
-
-  def check_messages(self, clang_tidy_output):
-    if self.has_check_messages:
-      messages_file = self.temp_file_name + '.msg'
-      write_file(messages_file, clang_tidy_output)
-      try_run(['FileCheck', '-input-file=' + messages_file, self.input_file_name,
-             '-check-prefixes=' + ','.join(self.messages.prefixes),
-             '-implicit-check-not={{warning|error}}:'])
-
-  def check_notes(self, clang_tidy_output):
-    if self.has_check_notes:
-      notes_file = self.temp_file_name + '.notes'
-      filtered_output = [line for line in clang_tidy_output.splitlines()
-                         if not ("note: FIX-IT applied" in line)]
-      write_file(notes_file, '\n'.join(filtered_output))
-      try_run(['FileCheck', '-input-file=' + notes_file, self.input_file_name,
-             '-check-prefixes=' + ','.join(self.notes.prefixes),
-             '-implicit-check-not={{note|warning|error}}:'])
-
-  def run(self):
-    self.read_input()
-    self.get_prefixes()
-    self.prepare_test_inputs()
-    clang_tidy_output = self.run_clang_tidy()
-    self.check_fixes()
-    self.check_messages(clang_tidy_output)
-    self.check_notes(clang_tidy_output)
+    def __init__(self, args, extra_args):
+        self.resource_dir = args.resource_dir
+        self.assume_file_name = args.assume_filename
+        self.input_file_name = args.input_file_name
+        self.check_name = args.check_name
+        self.temp_file_name = args.temp_file_name
+        self.original_file_name = self.temp_file_name + ".orig"
+        self.expect_clang_tidy_error = args.expect_clang_tidy_error
+        self.std = args.std
+        self.check_suffix = args.check_suffix
+        self.input_text = ""
+        self.has_check_fixes = False
+        self.has_check_messages = False
+        self.has_check_notes = False
+        self.fixes = MessagePrefix("CHECK-FIXES")
+        self.messages = MessagePrefix("CHECK-MESSAGES")
+        self.notes = MessagePrefix("CHECK-NOTES")
+
+        file_name_with_extension = self.assume_file_name or self.input_file_name
+        _, extension = os.path.splitext(file_name_with_extension)
+        if extension not in [".c", ".hpp", ".m", ".mm"]:
+            extension = ".cpp"
+        self.temp_file_name = self.temp_file_name + extension
+
+        self.clang_extra_args = []
+        self.clang_tidy_extra_args = extra_args
+        if "--" in extra_args:
+            i = self.clang_tidy_extra_args.index("--")
+            self.clang_extra_args = self.clang_tidy_extra_args[i + 1 :]
+            self.clang_tidy_extra_args = self.clang_tidy_extra_args[:i]
+
+        # If the test does not specify a config style, force an empty one; otherwise
+        # auto-detection logic can discover a ".clang-tidy" file that is not related to
+        # the test.
+        if not any(
+            [re.match("^-?-config(-file)?=", arg) for arg in self.clang_tidy_extra_args]
+        ):
+            self.clang_tidy_extra_args.append("--config={}")
+
+        if extension in [".m", ".mm"]:
+            self.clang_extra_args = [
+                "-fobjc-abi-version=2",
+                "-fobjc-arc",
+                "-fblocks",
+            ] + self.clang_extra_args
+
+        if extension in [".cpp", ".hpp", ".mm"]:
+            self.clang_extra_args.append("-std=" + self.std)
+
+        # Tests should not rely on STL being available, and instead provide mock
+        # implementations of relevant APIs.
+        self.clang_extra_args.append("-nostdinc++")
+
+        if self.resource_dir is not None:
+            self.clang_extra_args.append("-resource-dir=%s" % self.resource_dir)
+
+    def read_input(self):
+        with open(self.input_file_name, "r", encoding="utf-8") as input_file:
+            self.input_text = input_file.read()
+
+    def get_prefixes(self):
+        for suffix in self.check_suffix:
+            if suffix and not re.match("^[A-Z0-9\\-]+$", suffix):
+                sys.exit(
+                    'Only A..Z, 0..9 and "-" are allowed in check suffixes list,'
+                    + ' but "%s" was given' % suffix
+                )
+
+            file_check_suffix = ("-" + suffix) if suffix else ""
+
+            has_check_fix = self.fixes.check(file_check_suffix, self.input_text)
+            self.has_check_fixes = self.has_check_fixes or has_check_fix
+
+            has_check_message = self.messages.check(file_check_suffix, self.input_text)
+            self.has_check_messages = self.has_check_messages or has_check_message
+
+            has_check_note = self.notes.check(file_check_suffix, self.input_text)
+            self.has_check_notes = self.has_check_notes or has_check_note
+
+            if has_check_note and has_check_message:
+                sys.exit(
+                    "Please use either %s or %s but not both"
+                    % (self.notes.prefix, self.messages.prefix)
+                )
+
+            if not has_check_fix and not has_check_message and not has_check_note:
+                sys.exit(
+                    "%s, %s or %s not found in the input"
+                    % (self.fixes.prefix, self.messages.prefix, self.notes.prefix)
+                )
+
+        assert self.has_check_fixes or self.has_check_messages or self.has_check_notes
+
+    def prepare_test_inputs(self):
+        # Remove the contents of the CHECK lines to avoid CHECKs matching on
+        # themselves.  We need to keep the comments to preserve line numbers while
+        # avoiding empty lines which could potentially trigger formatting-related
+        # checks.
+        cleaned_test = re.sub("// *CHECK-[A-Z0-9\\-]*:[^\r\n]*", "//", self.input_text)
+        write_file(self.temp_file_name, cleaned_test)
+        write_file(self.original_file_name, cleaned_test)
+
+    def run_clang_tidy(self):
+        args = (
+            [
+                "clang-tidy",
+                self.temp_file_name,
+                "-fix",
+                "--checks=-*," + self.check_name,
+            ]
+            + self.clang_tidy_extra_args
+            + ["--"]
+            + self.clang_extra_args
+        )
+        if self.expect_clang_tidy_error:
+            args.insert(0, "not")
+        print("Running " + repr(args) + "...")
+        clang_tidy_output = try_run(args)
+        print("------------------------ clang-tidy output -----------------------")
+        print(
+            clang_tidy_output.encode(sys.stdout.encoding, errors="replace").decode(
+                sys.stdout.encoding
+            )
+        )
+        print("------------------------------------------------------------------")
+
+        
diff _output = try_run(
+            ["
diff ", "-u", self.original_file_name, self.temp_file_name], False
+        )
+        print("------------------------------ Fixes -----------------------------")
+        print(
diff _output)
+        print("------------------------------------------------------------------")
+        return clang_tidy_output
+
+    def check_fixes(self):
+        if self.has_check_fixes:
+            try_run(
+                [
+                    "FileCheck",
+                    "-input-file=" + self.temp_file_name,
+                    self.input_file_name,
+                    "-check-prefixes=" + ",".join(self.fixes.prefixes),
+                    "-strict-whitespace",
+                ]
+            )
+
+    def check_messages(self, clang_tidy_output):
+        if self.has_check_messages:
+            messages_file = self.temp_file_name + ".msg"
+            write_file(messages_file, clang_tidy_output)
+            try_run(
+                [
+                    "FileCheck",
+                    "-input-file=" + messages_file,
+                    self.input_file_name,
+                    "-check-prefixes=" + ",".join(self.messages.prefixes),
+                    "-implicit-check-not={{warning|error}}:",
+                ]
+            )
+
+    def check_notes(self, clang_tidy_output):
+        if self.has_check_notes:
+            notes_file = self.temp_file_name + ".notes"
+            filtered_output = [
+                line
+                for line in clang_tidy_output.splitlines()
+                if not ("note: FIX-IT applied" in line)
+            ]
+            write_file(notes_file, "\n".join(filtered_output))
+            try_run(
+                [
+                    "FileCheck",
+                    "-input-file=" + notes_file,
+                    self.input_file_name,
+                    "-check-prefixes=" + ",".join(self.notes.prefixes),
+                    "-implicit-check-not={{note|warning|error}}:",
+                ]
+            )
+
+    def run(self):
+        self.read_input()
+        self.get_prefixes()
+        self.prepare_test_inputs()
+        clang_tidy_output = self.run_clang_tidy()
+        self.check_fixes()
+        self.check_messages(clang_tidy_output)
+        self.check_notes(clang_tidy_output)
 
 
 def expand_std(std):
-  if std == 'c++98-or-later':
-    return ['c++98', 'c++11', 'c++14', 'c++17', 'c++20']
-  if std == 'c++11-or-later':
-    return ['c++11', 'c++14', 'c++17', 'c++20']
-  if std == 'c++14-or-later':
-    return ['c++14', 'c++17', 'c++20']
-  if std == 'c++17-or-later':
-    return ['c++17', 'c++20']
-  if std == 'c++20-or-later':
-    return ['c++20']
-  return [std]
+    if std == "c++98-or-later":
+        return ["c++98", "c++11", "c++14", "c++17", "c++20"]
+    if std == "c++11-or-later":
+        return ["c++11", "c++14", "c++17", "c++20"]
+    if std == "c++14-or-later":
+        return ["c++14", "c++17", "c++20"]
+    if std == "c++17-or-later":
+        return ["c++17", "c++20"]
+    if std == "c++20-or-later":
+        return ["c++20"]
+    return [std]
 
 
 def csv(string):
-  return string.split(',')
+    return string.split(",")
 
 
 def parse_arguments():
-  parser = argparse.ArgumentParser()
-  parser.add_argument('-expect-clang-tidy-error', action='store_true')
-  parser.add_argument('-resource-dir')
-  parser.add_argument('-assume-filename')
-  parser.add_argument('input_file_name')
-  parser.add_argument('check_name')
-  parser.add_argument('temp_file_name')
-  parser.add_argument(
-    '-check-suffix',
-    '-check-suffixes',
-    default=[''],
-    type=csv,
-    help='comma-separated list of FileCheck suffixes')
-  parser.add_argument('-std', type=csv, default=['c++11-or-later'])
-  return parser.parse_known_args()
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-expect-clang-tidy-error", action="store_true")
+    parser.add_argument("-resource-dir")
+    parser.add_argument("-assume-filename")
+    parser.add_argument("input_file_name")
+    parser.add_argument("check_name")
+    parser.add_argument("temp_file_name")
+    parser.add_argument(
+        "-check-suffix",
+        "-check-suffixes",
+        default=[""],
+        type=csv,
+        help="comma-separated list of FileCheck suffixes",
+    )
+    parser.add_argument("-std", type=csv, default=["c++11-or-later"])
+    return parser.parse_known_args()
 
 
 def main():
-  args, extra_args = parse_arguments()
+    args, extra_args = parse_arguments()
 
-  abbreviated_stds = args.std
-  for abbreviated_std in abbreviated_stds:
-    for std in expand_std(abbreviated_std):
-      args.std = std
-      CheckRunner(args, extra_args).run()
+    abbreviated_stds = args.std
+    for abbreviated_std in abbreviated_stds:
+        for std in expand_std(abbreviated_std):
+            args.std = std
+            CheckRunner(args, extra_args).run()
 
 
-if __name__ == '__main__':
-  main()
+if __name__ == "__main__":
+    main()

diff  --git a/clang-tools-extra/test/lit.cfg.py b/clang-tools-extra/test/lit.cfg.py
index 871596ac8585b..9f64fd3d2ffa2 100644
--- a/clang-tools-extra/test/lit.cfg.py
+++ b/clang-tools-extra/test/lit.cfg.py
@@ -10,55 +10,70 @@
 # Configuration file for the 'lit' test runner.
 
 # name: The name of this test suite.
-config.name = 'Clang Tools'
+config.name = "Clang Tools"
 
 # testFormat: The test format to use to interpret tests.
 config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
 
 # suffixes: A list of file extensions to treat as test files.
-config.suffixes = ['.c', '.cpp', '.hpp', '.m', '.mm', '.cu', '.ll', '.cl', '.s',
-  '.modularize', '.module-map-checker', '.test']
+config.suffixes = [
+    ".c",
+    ".cpp",
+    ".hpp",
+    ".m",
+    ".mm",
+    ".cu",
+    ".ll",
+    ".cl",
+    ".s",
+    ".modularize",
+    ".module-map-checker",
+    ".test",
+]
 
 # Test-time dependencies located in directories called 'Inputs' are excluded
 # from test suites; there won't be any lit tests within them.
-config.excludes = ['Inputs']
+config.excludes = ["Inputs"]
 
 # test_source_root: The root path where tests are located.
 config.test_source_root = os.path.dirname(__file__)
 
 # test_exec_root: The root path where tests should be run.
-config.test_exec_root = os.path.join(config.clang_tools_binary_dir, 'test')
+config.test_exec_root = os.path.join(config.clang_tools_binary_dir, "test")
 
 # Tools need the same environment setup as clang (we don't need clang itself).
-llvm_config.use_clang(required = False)
+llvm_config.use_clang(required=False)
 
 if config.clang_tidy_staticanalyzer:
-    config.available_features.add('static-analyzer')
+    config.available_features.add("static-analyzer")
 
 python_exec = shlex.quote(config.python_executable)
 check_clang_tidy = os.path.join(
-    config.test_source_root, "clang-tidy", "check_clang_tidy.py")
+    config.test_source_root, "clang-tidy", "check_clang_tidy.py"
+)
 config.substitutions.append(
-    ('%check_clang_tidy',
-     '%s %s' % (python_exec, check_clang_tidy)) )
+    ("%check_clang_tidy", "%s %s" % (python_exec, check_clang_tidy))
+)
 clang_tidy_
diff  = os.path.join(
-    config.test_source_root, "..", "clang-tidy", "tool", "clang-tidy-
diff .py")
+    config.test_source_root, "..", "clang-tidy", "tool", "clang-tidy-
diff .py"
+)
 config.substitutions.append(
-    ('%clang_tidy_
diff ',
-     '%s %s' % (python_exec, clang_tidy_
diff )) )
+    ("%clang_tidy_
diff ", "%s %s" % (python_exec, clang_tidy_
diff ))
+)
 run_clang_tidy = os.path.join(
-    config.test_source_root, "..", "clang-tidy", "tool", "run-clang-tidy.py")
+    config.test_source_root, "..", "clang-tidy", "tool", "run-clang-tidy.py"
+)
 config.substitutions.append(
-    ('%run_clang_tidy',
-     '%s %s' % (python_exec, run_clang_tidy)) )
+    ("%run_clang_tidy", "%s %s" % (python_exec, run_clang_tidy))
+)
 clang_tidy_headers = os.path.join(
-    config.test_source_root, "clang-tidy", "checkers", "Inputs", "Headers")
-config.substitutions.append(
-    ("%clang_tidy_headers", clang_tidy_headers) )
+    config.test_source_root, "clang-tidy", "checkers", "Inputs", "Headers"
+)
+config.substitutions.append(("%clang_tidy_headers", clang_tidy_headers))
 
 # Plugins (loadable modules)
 if config.has_plugins and config.llvm_plugin_ext:
-    config.available_features.add('plugins')
+    config.available_features.add("plugins")
 
 # It is not realistically possible to account for all options that could
 # possibly be present in system and user configuration files, so disable

diff  --git a/clang/bindings/python/clang/__init__.py b/clang/bindings/python/clang/__init__.py
index 14944b63e6dac..8d0d1729e4a13 100644
--- a/clang/bindings/python/clang/__init__.py
+++ b/clang/bindings/python/clang/__init__.py
@@ -1,10 +1,10 @@
-#===- __init__.py - Clang Python Bindings --------------------*- python -*--===#
+# ===- __init__.py - Clang Python Bindings --------------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 r"""
 Clang Library Bindings
@@ -19,5 +19,4 @@
     Bindings for the Clang indexing library.
 """
 
-__all__ = ['cindex']
-
+__all__ = ["cindex"]

diff  --git a/clang/bindings/python/clang/cindex.py b/clang/bindings/python/clang/cindex.py
index 6d33650a71671..ff386d2094a0b 100644
--- a/clang/bindings/python/clang/cindex.py
+++ b/clang/bindings/python/clang/cindex.py
@@ -1,10 +1,10 @@
-#===- cindex.py - Python Indexing Library Bindings -----------*- python -*--===#
+# ===- cindex.py - Python Indexing Library Bindings -----------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 r"""
 Clang Indexing Library Bindings
@@ -68,10 +68,10 @@
 
 import os
 import sys
+
 if sys.version_info[0] == 3:
     # Python 3 strings are unicode, translate them to/from utf8 for C-interop.
     class c_interop_string(c_char_p):
-
         def __init__(self, p=None):
             if p is None:
                 p = ""
@@ -97,7 +97,9 @@ def from_param(cls, param):
             if param is None:
                 # Support passing null to C functions expecting char arrays
                 return None
-            raise TypeError("Cannot convert '{}' to '{}'".format(type(param).__name__, cls.__name__))
+            raise TypeError(
+                "Cannot convert '{}' to '{}'".format(type(param).__name__, cls.__name__)
+            )
 
         @staticmethod
         def to_python_string(x, *args):
@@ -106,7 +108,7 @@ def to_python_string(x, *args):
     def b(x):
         if isinstance(x, bytes):
             return x
-        return x.encode('utf8')
+        return x.encode("utf8")
 
 elif sys.version_info[0] == 2:
     # Python 2 strings are utf8 byte strings, no translation is needed for
@@ -121,6 +123,7 @@ def _to_python_string(x, *args):
     def b(x):
         return x
 
+
 # Importing ABC-s directly from collections is deprecated since Python 3.7,
 # will stop working in Python 3.8.
 # See: https://docs.python.org/dev/whatsnew/3.7.html#id3
@@ -135,9 +138,11 @@ def b(x):
 try:
     fspath = os.fspath
 except AttributeError:
+
     def fspath(x):
         return x
 
+
 # ctypes doesn't implicitly convert c_void_p to the appropriate wrapper
 # object. This is a problem, because it means that from_parameter will see an
 # integer and pass the wrong value on platforms where int != void*. Work around
@@ -148,6 +153,7 @@ def fspath(x):
 
 ### Exception Classes ###
 
+
 class TranslationUnitLoadError(Exception):
     """Represents an error that occurred when loading a TranslationUnit.
 
@@ -156,8 +162,10 @@ class TranslationUnitLoadError(Exception):
 
     FIXME: Make libclang expose additional error information in this scenario.
     """
+
     pass
 
+
 class TranslationUnitSaveError(Exception):
     """Represents an error that occurred when saving a TranslationUnit.
 
@@ -181,15 +189,19 @@ def __init__(self, enumeration, message):
         assert isinstance(enumeration, int)
 
         if enumeration < 1 or enumeration > 3:
-            raise Exception("Encountered undefined TranslationUnit save error "
-                            "constant: %d. Please file a bug to have this "
-                            "value supported." % enumeration)
+            raise Exception(
+                "Encountered undefined TranslationUnit save error "
+                "constant: %d. Please file a bug to have this "
+                "value supported." % enumeration
+            )
 
         self.save_error = enumeration
-        Exception.__init__(self, 'Error %d: %s' % (enumeration, message))
+        Exception.__init__(self, "Error %d: %s" % (enumeration, message))
+
 
 ### Structures and Utility Classes ###
 
+
 class CachedProperty(object):
     """Decorator that lazy-loads the value of a property.
 
@@ -233,14 +245,16 @@ class SourceLocation(Structure):
     """
     A SourceLocation represents a particular location within a source file.
     """
+
     _fields_ = [("ptr_data", c_void_p * 2), ("int_data", c_uint)]
     _data = None
 
     def _get_instantiation(self):
         if self._data is None:
             f, l, c, o = c_object_p(), c_uint(), c_uint(), c_uint()
-            conf.lib.clang_getInstantiationLocation(self, byref(f), byref(l),
-                    byref(c), byref(o))
+            conf.lib.clang_getInstantiationLocation(
+                self, byref(f), byref(l), byref(c), byref(o)
+            )
             if f:
                 f = File(f)
             else:
@@ -303,17 +317,23 @@ def __repr__(self):
         else:
             filename = None
         return "<SourceLocation file %r, line %r, column %r>" % (
-            filename, self.line, self.column)
+            filename,
+            self.line,
+            self.column,
+        )
+
 
 class SourceRange(Structure):
     """
     A SourceRange describes a range of source locations within the source
     code.
     """
+
     _fields_ = [
         ("ptr_data", c_void_p * 2),
         ("begin_int_data", c_uint),
-        ("end_int_data", c_uint)]
+        ("end_int_data", c_uint),
+    ]
 
     # FIXME: Eliminate this and make normal constructor? Requires hiding ctypes
     # object.
@@ -349,8 +369,10 @@ def __contains__(self, other):
             return False
         if other.file is None and self.start.file is None:
             pass
-        elif ( self.start.file.name != other.file.name or
-               other.file.name != self.end.file.name):
+        elif (
+            self.start.file.name != other.file.name
+            or other.file.name != self.end.file.name
+        ):
             # same file name
             return False
         # same file, in between lines
@@ -369,6 +391,7 @@ def __contains__(self, other):
     def __repr__(self):
         return "<SourceRange start %r, end %r>" % (self.start, self.end)
 
+
 class Diagnostic(object):
     """
     A Diagnostic is a single instance of a Clang diagnostic. It includes the
@@ -377,18 +400,18 @@ class Diagnostic(object):
     """
 
     Ignored = 0
-    Note    = 1
+    Note = 1
     Warning = 2
-    Error   = 3
-    Fatal   = 4
+    Error = 3
+    Fatal = 4
 
     DisplaySourceLocation = 0x01
-    DisplayColumn         = 0x02
-    DisplaySourceRanges   = 0x04
-    DisplayOption         = 0x08
-    DisplayCategoryId     = 0x10
-    DisplayCategoryName   = 0x20
-    _FormatOptionsMask    = 0x3f
+    DisplayColumn = 0x02
+    DisplaySourceRanges = 0x04
+    DisplayOption = 0x08
+    DisplayCategoryId = 0x10
+    DisplayCategoryName = 0x20
+    _FormatOptionsMask = 0x3F
 
     def __init__(self, ptr):
         self.ptr = ptr
@@ -418,7 +441,7 @@ def __len__(self):
                 return int(conf.lib.clang_getDiagnosticNumRanges(self.diag))
 
             def __getitem__(self, key):
-                if (key >= len(self)):
+                if key >= len(self):
                     raise IndexError
                 return conf.lib.clang_getDiagnosticRange(self.diag, key)
 
@@ -435,8 +458,7 @@ def __len__(self):
 
             def __getitem__(self, key):
                 range = SourceRange()
-                value = conf.lib.clang_getDiagnosticFixIt(self.diag, key,
-                        byref(range))
+                value = conf.lib.clang_getDiagnosticFixIt(self.diag, key, byref(range))
                 if len(value) == 0:
                     raise IndexError
 
@@ -493,18 +515,22 @@ def format(self, options=None):
         if options is None:
             options = conf.lib.clang_defaultDiagnosticDisplayOptions()
         if options & ~Diagnostic._FormatOptionsMask:
-            raise ValueError('Invalid format options')
+            raise ValueError("Invalid format options")
         return conf.lib.clang_formatDiagnostic(self, options)
 
     def __repr__(self):
         return "<Diagnostic severity %r, location %r, spelling %r>" % (
-            self.severity, self.location, self.spelling)
+            self.severity,
+            self.location,
+            self.spelling,
+        )
 
     def __str__(self):
         return self.format()
 
     def from_param(self):
-      return self.ptr
+        return self.ptr
+
 
 class FixIt(object):
     """
@@ -520,6 +546,7 @@ def __init__(self, range, value):
     def __repr__(self):
         return "<FixIt range %r, value %r>" % (self.range, self.value)
 
+
 class TokenGroup(object):
     """Helper class to facilitate token management.
 
@@ -534,6 +561,7 @@ class TokenGroup(object):
 
     You should not instantiate this class outside of this module.
     """
+
     def __init__(self, tu, memory, count):
         self._tu = tu
         self._memory = memory
@@ -552,8 +580,7 @@ def get_tokens(tu, extent):
         tokens_memory = POINTER(Token)()
         tokens_count = c_uint()
 
-        conf.lib.clang_tokenize(tu, extent, byref(tokens_memory),
-                byref(tokens_count))
+        conf.lib.clang_tokenize(tu, extent, byref(tokens_memory), byref(tokens_count))
 
         count = int(tokens_count.value)
 
@@ -575,10 +602,11 @@ def get_tokens(tu, extent):
 
             yield token
 
+
 class TokenKind(object):
     """Describes a specific type of a Token."""
 
-    _value_map = {} # int -> TokenKind
+    _value_map = {}  # int -> TokenKind
 
     def __init__(self, value, name):
         """Create a new TokenKind instance from a numeric value and a name."""
@@ -586,7 +614,7 @@ def __init__(self, value, name):
         self.name = name
 
     def __repr__(self):
-        return 'TokenKind.%s' % (self.name,)
+        return "TokenKind.%s" % (self.name,)
 
     @staticmethod
     def from_value(value):
@@ -594,7 +622,7 @@ def from_value(value):
         result = TokenKind._value_map.get(value, None)
 
         if result is None:
-            raise ValueError('Unknown TokenKind: %d' % value)
+            raise ValueError("Unknown TokenKind: %d" % value)
 
         return result
 
@@ -606,12 +634,13 @@ def register(value, name):
         package.
         """
         if value in TokenKind._value_map:
-            raise ValueError('TokenKind already registered: %d' % value)
+            raise ValueError("TokenKind already registered: %d" % value)
 
         kind = TokenKind(value, name)
         TokenKind._value_map[value] = kind
         setattr(TokenKind, name, kind)
 
+
 ### Cursor Kinds ###
 class BaseEnumeration(object):
     """
@@ -629,13 +658,13 @@ def __init__(self, value):
         if value >= len(self.__class__._kinds):
             self.__class__._kinds += [None] * (value - len(self.__class__._kinds) + 1)
         if self.__class__._kinds[value] is not None:
-            raise ValueError('{0} value {1} already loaded'.format(
-                str(self.__class__), value))
+            raise ValueError(
+                "{0} value {1} already loaded".format(str(self.__class__), value)
+            )
         self.value = value
         self.__class__._kinds[value] = self
         self.__class__._name_map = None
 
-
     def from_param(self):
         return self.value
 
@@ -652,11 +681,14 @@ def name(self):
     @classmethod
     def from_id(cls, id):
         if id >= len(cls._kinds) or cls._kinds[id] is None:
-            raise ValueError('Unknown template argument kind %d' % id)
+            raise ValueError("Unknown template argument kind %d" % id)
         return cls._kinds[id]
 
     def __repr__(self):
-        return '%s.%s' % (self.__class__, self.name,)
+        return "%s.%s" % (
+            self.__class__,
+            self.name,
+        )
 
 
 class CursorKind(BaseEnumeration):
@@ -710,7 +742,8 @@ def is_unexposed(self):
         return conf.lib.clang_isUnexposed(self)
 
     def __repr__(self):
-        return 'CursorKind.%s' % (self.name,)
+        return "CursorKind.%s" % (self.name,)
+
 
 ###
 # Declaration Kinds
@@ -1385,6 +1418,7 @@ class TemplateArgumentKind(BaseEnumeration):
     _kinds = []
     _name_map = None
 
+
 TemplateArgumentKind.NULL = TemplateArgumentKind(0)
 TemplateArgumentKind.TYPE = TemplateArgumentKind(1)
 TemplateArgumentKind.DECLARATION = TemplateArgumentKind(2)
@@ -1403,7 +1437,8 @@ class ExceptionSpecificationKind(BaseEnumeration):
     _name_map = None
 
     def __repr__(self):
-        return 'ExceptionSpecificationKind.{}'.format(self.name)
+        return "ExceptionSpecificationKind.{}".format(self.name)
+
 
 ExceptionSpecificationKind.NONE = ExceptionSpecificationKind(0)
 ExceptionSpecificationKind.DYNAMIC_NONE = ExceptionSpecificationKind(1)
@@ -1417,11 +1452,13 @@ def __repr__(self):
 
 ### Cursors ###
 
+
 class Cursor(Structure):
     """
     The Cursor class represents a reference to an element within the AST. It
     acts as a kind of iterator.
     """
+
     _fields_ = [("_kind_id", c_int), ("xdata", c_int), ("data", c_void_p * 3)]
 
     @staticmethod
@@ -1453,23 +1490,19 @@ def is_const_method(self):
         return conf.lib.clang_CXXMethod_isConst(self)
 
     def is_converting_constructor(self):
-        """Returns True if the cursor refers to a C++ converting constructor.
-        """
+        """Returns True if the cursor refers to a C++ converting constructor."""
         return conf.lib.clang_CXXConstructor_isConvertingConstructor(self)
 
     def is_copy_constructor(self):
-        """Returns True if the cursor refers to a C++ copy constructor.
-        """
+        """Returns True if the cursor refers to a C++ copy constructor."""
         return conf.lib.clang_CXXConstructor_isCopyConstructor(self)
 
     def is_default_constructor(self):
-        """Returns True if the cursor refers to a C++ default constructor.
-        """
+        """Returns True if the cursor refers to a C++ default constructor."""
         return conf.lib.clang_CXXConstructor_isDefaultConstructor(self)
 
     def is_move_constructor(self):
-        """Returns True if the cursor refers to a C++ move constructor.
-        """
+        """Returns True if the cursor refers to a C++ move constructor."""
         return conf.lib.clang_CXXConstructor_isMoveConstructor(self)
 
     def is_default_method(self):
@@ -1610,8 +1643,7 @@ def is_abstract_record(self):
         return conf.lib.clang_CXXRecord_isAbstract(self)
 
     def is_scoped_enum(self):
-        """Returns True if the cursor refers to a scoped enum declaration.
-        """
+        """Returns True if the cursor refers to a scoped enum declaration."""
         return conf.lib.clang_EnumDecl_isScoped(self)
 
     def get_definition(self):
@@ -1649,7 +1681,7 @@ def kind(self):
     @property
     def spelling(self):
         """Return the spelling of the entity pointed at by the cursor."""
-        if not hasattr(self, '_spelling'):
+        if not hasattr(self, "_spelling"):
             self._spelling = conf.lib.clang_getCursorSpelling(self)
 
         return self._spelling
@@ -1663,7 +1695,7 @@ def displayname(self):
         cursor, such as the parameters of a function or template or the
         arguments of a class template specialization.
         """
-        if not hasattr(self, '_displayname'):
+        if not hasattr(self, "_displayname"):
             self._displayname = conf.lib.clang_getCursorDisplayName(self)
 
         return self._displayname
@@ -1671,7 +1703,7 @@ def displayname(self):
     @property
     def mangled_name(self):
         """Return the mangled name for the entity referenced by this cursor."""
-        if not hasattr(self, '_mangled_name'):
+        if not hasattr(self, "_mangled_name"):
             self._mangled_name = conf.lib.clang_Cursor_getMangling(self)
 
         return self._mangled_name
@@ -1682,7 +1714,7 @@ def location(self):
         Return the source location (the starting character) of the entity
         pointed at by the cursor.
         """
-        if not hasattr(self, '_loc'):
+        if not hasattr(self, "_loc"):
             self._loc = conf.lib.clang_getCursorLocation(self)
 
         return self._loc
@@ -1690,7 +1722,7 @@ def location(self):
     @property
     def linkage(self):
         """Return the linkage of this cursor."""
-        if not hasattr(self, '_linkage'):
+        if not hasattr(self, "_linkage"):
             self._linkage = conf.lib.clang_getCursorLinkage(self)
 
         return LinkageKind.from_id(self._linkage)
@@ -1698,7 +1730,7 @@ def linkage(self):
     @property
     def tls_kind(self):
         """Return the thread-local storage (TLS) kind of this cursor."""
-        if not hasattr(self, '_tls_kind'):
+        if not hasattr(self, "_tls_kind"):
             self._tls_kind = conf.lib.clang_getCursorTLSKind(self)
 
         return TLSKind.from_id(self._tls_kind)
@@ -1709,7 +1741,7 @@ def extent(self):
         Return the source range (the range of text) occupied by the entity
         pointed at by the cursor.
         """
-        if not hasattr(self, '_extent'):
+        if not hasattr(self, "_extent"):
             self._extent = conf.lib.clang_getCursorExtent(self)
 
         return self._extent
@@ -1720,7 +1752,7 @@ def storage_class(self):
         Retrieves the storage class (if any) of the entity pointed at by the
         cursor.
         """
-        if not hasattr(self, '_storage_class'):
+        if not hasattr(self, "_storage_class"):
             self._storage_class = conf.lib.clang_Cursor_getStorageClass(self)
 
         return StorageClass.from_id(self._storage_class)
@@ -1730,7 +1762,7 @@ def availability(self):
         """
         Retrieves the availability of the entity pointed at by the cursor.
         """
-        if not hasattr(self, '_availability'):
+        if not hasattr(self, "_availability"):
             self._availability = conf.lib.clang_getCursorAvailability(self)
 
         return AvailabilityKind.from_id(self._availability)
@@ -1741,7 +1773,7 @@ def access_specifier(self):
         Retrieves the access specifier (if any) of the entity pointed at by the
         cursor.
         """
-        if not hasattr(self, '_access_specifier'):
+        if not hasattr(self, "_access_specifier"):
             self._access_specifier = conf.lib.clang_getCXXAccessSpecifier(self)
 
         return AccessSpecifier.from_id(self._access_specifier)
@@ -1751,7 +1783,7 @@ def type(self):
         """
         Retrieve the Type (if any) of the entity pointed at by the cursor.
         """
-        if not hasattr(self, '_type'):
+        if not hasattr(self, "_type"):
             self._type = conf.lib.clang_getCursorType(self)
 
         return self._type
@@ -1765,7 +1797,7 @@ def canonical(self):
         declarations for the same class, the canonical cursor for the forward
         declarations will be identical.
         """
-        if not hasattr(self, '_canonical'):
+        if not hasattr(self, "_canonical"):
             self._canonical = conf.lib.clang_getCanonicalCursor(self)
 
         return self._canonical
@@ -1773,20 +1805,22 @@ def canonical(self):
     @property
     def result_type(self):
         """Retrieve the Type of the result for this Cursor."""
-        if not hasattr(self, '_result_type'):
+        if not hasattr(self, "_result_type"):
             self._result_type = conf.lib.clang_getCursorResultType(self)
 
         return self._result_type
 
     @property
     def exception_specification_kind(self):
-        '''
+        """
         Retrieve the exception specification kind, which is one of the values
         from the ExceptionSpecificationKind enumeration.
-        '''
-        if not hasattr(self, '_exception_specification_kind'):
+        """
+        if not hasattr(self, "_exception_specification_kind"):
             exc_kind = conf.lib.clang_getCursorExceptionSpecificationType(self)
-            self._exception_specification_kind = ExceptionSpecificationKind.from_id(exc_kind)
+            self._exception_specification_kind = ExceptionSpecificationKind.from_id(
+                exc_kind
+            )
 
         return self._exception_specification_kind
 
@@ -1797,10 +1831,9 @@ def underlying_typedef_type(self):
         Returns a Type for the typedef this cursor is a declaration for. If
         the current cursor is not a typedef, this raises.
         """
-        if not hasattr(self, '_underlying_type'):
+        if not hasattr(self, "_underlying_type"):
             assert self.kind.is_declaration()
-            self._underlying_type = \
-              conf.lib.clang_getTypedefDeclUnderlyingType(self)
+            self._underlying_type = conf.lib.clang_getTypedefDeclUnderlyingType(self)
 
         return self._underlying_type
 
@@ -1811,7 +1844,7 @@ def enum_type(self):
         Returns a Type corresponding to an integer. If the cursor is not for an
         enum, this raises.
         """
-        if not hasattr(self, '_enum_type'):
+        if not hasattr(self, "_enum_type"):
             assert self.kind == CursorKind.ENUM_DECL
             self._enum_type = conf.lib.clang_getEnumDeclIntegerType(self)
 
@@ -1820,24 +1853,25 @@ def enum_type(self):
     @property
     def enum_value(self):
         """Return the value of an enum constant."""
-        if not hasattr(self, '_enum_value'):
+        if not hasattr(self, "_enum_value"):
             assert self.kind == CursorKind.ENUM_CONSTANT_DECL
             # Figure out the underlying type of the enum to know if it
             # is a signed or unsigned quantity.
             underlying_type = self.type
             if underlying_type.kind == TypeKind.ENUM:
                 underlying_type = underlying_type.get_declaration().enum_type
-            if underlying_type.kind in (TypeKind.CHAR_U,
-                                        TypeKind.UCHAR,
-                                        TypeKind.CHAR16,
-                                        TypeKind.CHAR32,
-                                        TypeKind.USHORT,
-                                        TypeKind.UINT,
-                                        TypeKind.ULONG,
-                                        TypeKind.ULONGLONG,
-                                        TypeKind.UINT128):
-                self._enum_value = \
-                  conf.lib.clang_getEnumConstantDeclUnsignedValue(self)
+            if underlying_type.kind in (
+                TypeKind.CHAR_U,
+                TypeKind.UCHAR,
+                TypeKind.CHAR16,
+                TypeKind.CHAR32,
+                TypeKind.USHORT,
+                TypeKind.UINT,
+                TypeKind.ULONG,
+                TypeKind.ULONGLONG,
+                TypeKind.UINT128,
+            ):
+                self._enum_value = conf.lib.clang_getEnumConstantDeclUnsignedValue(self)
             else:
                 self._enum_value = conf.lib.clang_getEnumConstantDeclValue(self)
         return self._enum_value
@@ -1845,16 +1879,15 @@ def enum_value(self):
     @property
     def objc_type_encoding(self):
         """Return the Objective-C type encoding as a str."""
-        if not hasattr(self, '_objc_type_encoding'):
-            self._objc_type_encoding = \
-              conf.lib.clang_getDeclObjCTypeEncoding(self)
+        if not hasattr(self, "_objc_type_encoding"):
+            self._objc_type_encoding = conf.lib.clang_getDeclObjCTypeEncoding(self)
 
         return self._objc_type_encoding
 
     @property
     def hash(self):
         """Returns a hash of the cursor as an int."""
-        if not hasattr(self, '_hash'):
+        if not hasattr(self, "_hash"):
             self._hash = conf.lib.clang_hashCursor(self)
 
         return self._hash
@@ -1862,7 +1895,7 @@ def hash(self):
     @property
     def semantic_parent(self):
         """Return the semantic parent for this cursor."""
-        if not hasattr(self, '_semantic_parent'):
+        if not hasattr(self, "_semantic_parent"):
             self._semantic_parent = conf.lib.clang_getCursorSemanticParent(self)
 
         return self._semantic_parent
@@ -1870,7 +1903,7 @@ def semantic_parent(self):
     @property
     def lexical_parent(self):
         """Return the lexical parent for this cursor."""
-        if not hasattr(self, '_lexical_parent'):
+        if not hasattr(self, "_lexical_parent"):
             self._lexical_parent = conf.lib.clang_getCursorLexicalParent(self)
 
         return self._lexical_parent
@@ -1888,7 +1921,7 @@ def referenced(self):
         For a cursor that is a reference, returns a cursor
         representing the entity that it references.
         """
-        if not hasattr(self, '_referenced'):
+        if not hasattr(self, "_referenced"):
             self._referenced = conf.lib.clang_getCursorReferenced(self)
 
         return self._referenced
@@ -1942,10 +1975,10 @@ def visitor(child, parent, children):
             # Create reference to TU so it isn't GC'd before Cursor.
             child._tu = self._tu
             children.append(child)
-            return 1 # continue
+            return 1  # continue
+
         children = []
-        conf.lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor),
-            children)
+        conf.lib.clang_visitChildren(self, callbacks["cursor_visit"](visitor), children)
         return iter(children)
 
     def walk_preorder(self):
@@ -2005,7 +2038,7 @@ def from_result(res, fn, args):
                 tu = arg
                 break
 
-            if hasattr(arg, 'translation_unit'):
+            if hasattr(arg, "translation_unit"):
                 tu = arg.translation_unit
                 break
 
@@ -2023,6 +2056,7 @@ def from_cursor_result(res, fn, args):
         res._tu = args[0]._tu
         return res
 
+
 class StorageClass(object):
     """
     Describes the storage class of a declaration
@@ -2036,7 +2070,7 @@ def __init__(self, value):
         if value >= len(StorageClass._kinds):
             StorageClass._kinds += [None] * (value - len(StorageClass._kinds) + 1)
         if StorageClass._kinds[value] is not None:
-            raise ValueError('StorageClass already loaded')
+            raise ValueError("StorageClass already loaded")
         self.value = value
         StorageClass._kinds[value] = self
         StorageClass._name_map = None
@@ -2049,19 +2083,20 @@ def name(self):
         """Get the enumeration name of this storage class."""
         if self._name_map is None:
             self._name_map = {}
-            for key,value in StorageClass.__dict__.items():
-                if isinstance(value,StorageClass):
+            for key, value in StorageClass.__dict__.items():
+                if isinstance(value, StorageClass):
                     self._name_map[value] = key
         return self._name_map[self]
 
     @staticmethod
     def from_id(id):
         if id >= len(StorageClass._kinds) or not StorageClass._kinds[id]:
-            raise ValueError('Unknown storage class %d' % id)
+            raise ValueError("Unknown storage class %d" % id)
         return StorageClass._kinds[id]
 
     def __repr__(self):
-        return 'StorageClass.%s' % (self.name,)
+        return "StorageClass.%s" % (self.name,)
+
 
 StorageClass.INVALID = StorageClass(0)
 StorageClass.NONE = StorageClass(1)
@@ -2074,6 +2109,7 @@ def __repr__(self):
 
 ### Availability Kinds ###
 
+
 class AvailabilityKind(BaseEnumeration):
     """
     Describes the availability of an entity.
@@ -2084,7 +2120,8 @@ class AvailabilityKind(BaseEnumeration):
     _name_map = None
 
     def __repr__(self):
-        return 'AvailabilityKind.%s' % (self.name,)
+        return "AvailabilityKind.%s" % (self.name,)
+
 
 AvailabilityKind.AVAILABLE = AvailabilityKind(0)
 AvailabilityKind.DEPRECATED = AvailabilityKind(1)
@@ -2093,6 +2130,7 @@ def __repr__(self):
 
 ### C++ access specifiers ###
 
+
 class AccessSpecifier(BaseEnumeration):
     """
     Describes the access of a C++ class member
@@ -2106,7 +2144,8 @@ def from_param(self):
         return self.value
 
     def __repr__(self):
-        return 'AccessSpecifier.%s' % (self.name,)
+        return "AccessSpecifier.%s" % (self.name,)
+
 
 AccessSpecifier.INVALID = AccessSpecifier(0)
 AccessSpecifier.PUBLIC = AccessSpecifier(1)
@@ -2116,6 +2155,7 @@ def __repr__(self):
 
 ### Type Kinds ###
 
+
 class TypeKind(BaseEnumeration):
     """
     Describes the kind of type.
@@ -2131,7 +2171,8 @@ def spelling(self):
         return conf.lib.clang_getTypeKindSpelling(self.value)
 
     def __repr__(self):
-        return 'TypeKind.%s' % (self.name,)
+        return "TypeKind.%s" % (self.name,)
+
 
 TypeKind.INVALID = TypeKind(0)
 TypeKind.UNEXPOSED = TypeKind(1)
@@ -2231,6 +2272,7 @@ def __repr__(self):
 TypeKind.EXTVECTOR = TypeKind(176)
 TypeKind.ATOMIC = TypeKind(177)
 
+
 class RefQualifierKind(BaseEnumeration):
     """Describes a specific ref-qualifier of a type."""
 
@@ -2242,12 +2284,14 @@ def from_param(self):
         return self.value
 
     def __repr__(self):
-        return 'RefQualifierKind.%s' % (self.name,)
+        return "RefQualifierKind.%s" % (self.name,)
+
 
 RefQualifierKind.NONE = RefQualifierKind(0)
 RefQualifierKind.LVALUE = RefQualifierKind(1)
 RefQualifierKind.RVALUE = RefQualifierKind(2)
 
+
 class LinkageKind(BaseEnumeration):
     """Describes the kind of linkage of a cursor."""
 
@@ -2259,7 +2303,8 @@ def from_param(self):
         return self.value
 
     def __repr__(self):
-        return 'LinkageKind.%s' % (self.name,)
+        return "LinkageKind.%s" % (self.name,)
+
 
 LinkageKind.INVALID = LinkageKind(0)
 LinkageKind.NO_LINKAGE = LinkageKind(1)
@@ -2267,6 +2312,7 @@ def __repr__(self):
 LinkageKind.UNIQUE_EXTERNAL = LinkageKind(3)
 LinkageKind.EXTERNAL = LinkageKind(4)
 
+
 class TLSKind(BaseEnumeration):
     """Describes the kind of thread-local storage (TLS) of a cursor."""
 
@@ -2278,16 +2324,19 @@ def from_param(self):
         return self.value
 
     def __repr__(self):
-        return 'TLSKind.%s' % (self.name,)
+        return "TLSKind.%s" % (self.name,)
+
 
 TLSKind.NONE = TLSKind(0)
 TLSKind.DYNAMIC = TLSKind(1)
 TLSKind.STATIC = TLSKind(2)
 
+
 class Type(Structure):
     """
     The type of an element in the abstract syntax tree.
     """
+
     _fields_ = [("_kind_id", c_int), ("data", c_void_p * 2)]
 
     @property
@@ -2301,6 +2350,7 @@ def argument_types(self):
         The returned object is iterable and indexable. Each item in the
         container is a Type instance.
         """
+
         class ArgumentsIterator(collections_abc.Sequence):
             def __init__(self, parent):
                 self.parent = parent
@@ -2321,8 +2371,10 @@ def __getitem__(self, key):
                     raise IndexError("Only non-negative indexes are accepted.")
 
                 if key >= len(self):
-                    raise IndexError("Index greater than container length: "
-                                     "%d > %d" % ( key, len(self) ))
+                    raise IndexError(
+                        "Index greater than container length: "
+                        "%d > %d" % (key, len(self))
+                    )
 
                 result = conf.lib.clang_getArgType(self.parent, key)
                 if result.kind == TypeKind.INVALID:
@@ -2342,7 +2394,7 @@ def element_type(self):
         """
         result = conf.lib.clang_getElementType(self)
         if result.kind == TypeKind.INVALID:
-            raise Exception('Element type not available on this type.')
+            raise Exception("Element type not available on this type.")
 
         return result
 
@@ -2356,7 +2408,7 @@ def element_count(self):
         """
         result = conf.lib.clang_getNumElements(self)
         if result < 0:
-            raise Exception('Type does not have elements.')
+            raise Exception("Type does not have elements.")
 
         return result
 
@@ -2373,7 +2425,7 @@ def from_result(res, fn, args):
 
         tu = None
         for arg in args:
-            if hasattr(arg, 'translation_unit'):
+            if hasattr(arg, "translation_unit"):
                 tu = arg.translation_unit
                 break
 
@@ -2504,8 +2556,7 @@ def get_ref_qualifier(self):
         """
         Retrieve the ref-qualifier of the type.
         """
-        return RefQualifierKind.from_id(
-                conf.lib.clang_Type_getCXXRefQualifier(self))
+        return RefQualifierKind.from_id(conf.lib.clang_Type_getCXXRefQualifier(self))
 
     def get_fields(self):
         """Return an iterator for accessing the fields of this type."""
@@ -2516,10 +2567,12 @@ def visitor(field, children):
             # Create reference to TU so it isn't GC'd before Cursor.
             field._tu = self._tu
             fields.append(field)
-            return 1 # continue
+            return 1  # continue
+
         fields = []
-        conf.lib.clang_Type_visitFields(self,
-                            callbacks['fields_visit'](visitor), fields)
+        conf.lib.clang_Type_visitFields(
+            self, callbacks["fields_visit"](visitor), fields
+        )
         return iter(fields)
 
     def get_exception_specification_kind(self):
@@ -2528,7 +2581,8 @@ def get_exception_specification_kind(self):
         the ExceptionSpecificationKind enumeration.
         """
         return ExceptionSpecificationKind.from_id(
-                conf.lib.clang.getExceptionSpecificationType(self))
+            conf.lib.clang.getExceptionSpecificationType(self)
+        )
 
     @property
     def spelling(self):
@@ -2544,17 +2598,20 @@ def __eq__(self, other):
     def __ne__(self, other):
         return not self.__eq__(other)
 
+
 ## CIndex Objects ##
 
 # CIndex objects (derived from ClangObject) are essentially lightweight
 # wrappers attached to some underlying object, which is exposed via CIndex as
 # a void*.
 
+
 class ClangObject(object):
     """
     A helper for Clang objects. This class helps act as an intermediary for
     the ctypes library and the Clang CIndex library.
     """
+
     def __init__(self, obj):
         assert isinstance(obj, c_object_p) and obj
         self.obj = self._as_parameter_ = obj
@@ -2565,35 +2622,38 @@ def from_param(self):
 
 class _CXUnsavedFile(Structure):
     """Helper for passing unsaved file arguments."""
-    _fields_ = [("name", c_char_p), ("contents", c_char_p), ('length', c_ulong)]
+
+    _fields_ = [("name", c_char_p), ("contents", c_char_p), ("length", c_ulong)]
+
 
 # Functions calls through the python interface are rather slow. Fortunately,
 # for most symboles, we do not need to perform a function call. Their spelling
 # never changes and is consequently provided by this spelling cache.
 SpellingCache = {
-            # 0: CompletionChunk.Kind("Optional"),
-            # 1: CompletionChunk.Kind("TypedText"),
-            # 2: CompletionChunk.Kind("Text"),
-            # 3: CompletionChunk.Kind("Placeholder"),
-            # 4: CompletionChunk.Kind("Informative"),
-            # 5 : CompletionChunk.Kind("CurrentParameter"),
-            6: '(',   # CompletionChunk.Kind("LeftParen"),
-            7: ')',   # CompletionChunk.Kind("RightParen"),
-            8: '[',   # CompletionChunk.Kind("LeftBracket"),
-            9: ']',   # CompletionChunk.Kind("RightBracket"),
-            10: '{',  # CompletionChunk.Kind("LeftBrace"),
-            11: '}',  # CompletionChunk.Kind("RightBrace"),
-            12: '<',  # CompletionChunk.Kind("LeftAngle"),
-            13: '>',  # CompletionChunk.Kind("RightAngle"),
-            14: ', ', # CompletionChunk.Kind("Comma"),
-            # 15: CompletionChunk.Kind("ResultType"),
-            16: ':',  # CompletionChunk.Kind("Colon"),
-            17: ';',  # CompletionChunk.Kind("SemiColon"),
-            18: '=',  # CompletionChunk.Kind("Equal"),
-            19: ' ',  # CompletionChunk.Kind("HorizontalSpace"),
-            # 20: CompletionChunk.Kind("VerticalSpace")
+    # 0: CompletionChunk.Kind("Optional"),
+    # 1: CompletionChunk.Kind("TypedText"),
+    # 2: CompletionChunk.Kind("Text"),
+    # 3: CompletionChunk.Kind("Placeholder"),
+    # 4: CompletionChunk.Kind("Informative"),
+    # 5 : CompletionChunk.Kind("CurrentParameter"),
+    6: "(",  # CompletionChunk.Kind("LeftParen"),
+    7: ")",  # CompletionChunk.Kind("RightParen"),
+    8: "[",  # CompletionChunk.Kind("LeftBracket"),
+    9: "]",  # CompletionChunk.Kind("RightBracket"),
+    10: "{",  # CompletionChunk.Kind("LeftBrace"),
+    11: "}",  # CompletionChunk.Kind("RightBrace"),
+    12: "<",  # CompletionChunk.Kind("LeftAngle"),
+    13: ">",  # CompletionChunk.Kind("RightAngle"),
+    14: ", ",  # CompletionChunk.Kind("Comma"),
+    # 15: CompletionChunk.Kind("ResultType"),
+    16: ":",  # CompletionChunk.Kind("Colon"),
+    17: ";",  # CompletionChunk.Kind("SemiColon"),
+    18: "=",  # CompletionChunk.Kind("Equal"),
+    19: " ",  # CompletionChunk.Kind("HorizontalSpace"),
+    # 20: CompletionChunk.Kind("VerticalSpace")
 }
 
+
 class CompletionChunk(object):
     class Kind(object):
         def __init__(self, name):
@@ -2616,7 +2676,7 @@ def __repr__(self):
     @CachedProperty
     def spelling(self):
         if self.__kindNumber in SpellingCache:
-                return SpellingCache[self.__kindNumber]
+            return SpellingCache[self.__kindNumber]
         return conf.lib.clang_getCompletionChunkText(self.cs, self.key)
 
     # We do not use @CachedProperty here, as the manual implementation is
@@ -2625,8 +2685,9 @@ def spelling(self):
     @property
     def __kindNumber(self):
         if self.__kindNumberCache == -1:
-            self.__kindNumberCache = \
-                conf.lib.clang_getCompletionChunkKind(self.cs, self.key)
+            self.__kindNumberCache = conf.lib.clang_getCompletionChunkKind(
+                self.cs, self.key
+            )
         return self.__kindNumberCache
 
     @CachedProperty
@@ -2635,51 +2696,53 @@ def kind(self):
 
     @CachedProperty
     def string(self):
-        res = conf.lib.clang_getCompletionChunkCompletionString(self.cs,
-                                                                self.key)
+        res = conf.lib.clang_getCompletionChunkCompletionString(self.cs, self.key)
 
-        if (res):
-          return CompletionString(res)
+        if res:
+            return CompletionString(res)
         else:
-          None
+            None
 
     def isKindOptional(self):
-      return self.__kindNumber == 0
+        return self.__kindNumber == 0
 
     def isKindTypedText(self):
-      return self.__kindNumber == 1
+        return self.__kindNumber == 1
 
     def isKindPlaceHolder(self):
-      return self.__kindNumber == 3
+        return self.__kindNumber == 3
 
     def isKindInformative(self):
-      return self.__kindNumber == 4
+        return self.__kindNumber == 4
 
     def isKindResultType(self):
-      return self.__kindNumber == 15
+        return self.__kindNumber == 15
+
 
 completionChunkKindMap = {
-            0: CompletionChunk.Kind("Optional"),
-            1: CompletionChunk.Kind("TypedText"),
-            2: CompletionChunk.Kind("Text"),
-            3: CompletionChunk.Kind("Placeholder"),
-            4: CompletionChunk.Kind("Informative"),
-            5: CompletionChunk.Kind("CurrentParameter"),
-            6: CompletionChunk.Kind("LeftParen"),
-            7: CompletionChunk.Kind("RightParen"),
-            8: CompletionChunk.Kind("LeftBracket"),
-            9: CompletionChunk.Kind("RightBracket"),
-            10: CompletionChunk.Kind("LeftBrace"),
-            11: CompletionChunk.Kind("RightBrace"),
-            12: CompletionChunk.Kind("LeftAngle"),
-            13: CompletionChunk.Kind("RightAngle"),
-            14: CompletionChunk.Kind("Comma"),
-            15: CompletionChunk.Kind("ResultType"),
-            16: CompletionChunk.Kind("Colon"),
-            17: CompletionChunk.Kind("SemiColon"),
-            18: CompletionChunk.Kind("Equal"),
-            19: CompletionChunk.Kind("HorizontalSpace"),
-            20: CompletionChunk.Kind("VerticalSpace")}
+    0: CompletionChunk.Kind("Optional"),
+    1: CompletionChunk.Kind("TypedText"),
+    2: CompletionChunk.Kind("Text"),
+    3: CompletionChunk.Kind("Placeholder"),
+    4: CompletionChunk.Kind("Informative"),
+    5: CompletionChunk.Kind("CurrentParameter"),
+    6: CompletionChunk.Kind("LeftParen"),
+    7: CompletionChunk.Kind("RightParen"),
+    8: CompletionChunk.Kind("LeftBracket"),
+    9: CompletionChunk.Kind("RightBracket"),
+    10: CompletionChunk.Kind("LeftBrace"),
+    11: CompletionChunk.Kind("RightBrace"),
+    12: CompletionChunk.Kind("LeftAngle"),
+    13: CompletionChunk.Kind("RightAngle"),
+    14: CompletionChunk.Kind("Comma"),
+    15: CompletionChunk.Kind("ResultType"),
+    16: CompletionChunk.Kind("Colon"),
+    17: CompletionChunk.Kind("SemiColon"),
+    18: CompletionChunk.Kind("Equal"),
+    19: CompletionChunk.Kind("HorizontalSpace"),
+    20: CompletionChunk.Kind("VerticalSpace"),
+}
+
 
 class CompletionString(ClangObject):
     class Availability(object):
@@ -2720,19 +2783,27 @@ def briefComment(self):
         return _CXString()
 
     def __repr__(self):
-        return " | ".join([str(a) for a in self]) \
-               + " || Priority: " + str(self.priority) \
-               + " || Availability: " + str(self.availability) \
-               + " || Brief comment: " + str(self.briefComment)
+        return (
+            " | ".join([str(a) for a in self])
+            + " || Priority: "
+            + str(self.priority)
+            + " || Availability: "
+            + str(self.availability)
+            + " || Brief comment: "
+            + str(self.briefComment)
+        )
+
 
 availabilityKinds = {
-            0: CompletionChunk.Kind("Available"),
-            1: CompletionChunk.Kind("Deprecated"),
-            2: CompletionChunk.Kind("NotAvailable"),
-            3: CompletionChunk.Kind("NotAccessible")}
+    0: CompletionChunk.Kind("Available"),
+    1: CompletionChunk.Kind("Deprecated"),
+    2: CompletionChunk.Kind("NotAvailable"),
+    3: CompletionChunk.Kind("NotAccessible"),
+}
+
 
 class CodeCompletionResult(Structure):
-    _fields_ = [('cursorKind', c_int), ('completionString', c_object_p)]
+    _fields_ = [("cursorKind", c_int), ("completionString", c_object_p)]
 
     def __repr__(self):
         return str(CompletionString(self.completionString))
@@ -2745,9 +2816,9 @@ def kind(self):
     def string(self):
         return CompletionString(self.completionString)
 
+
 class CCRStructure(Structure):
-    _fields_ = [('results', POINTER(CodeCompletionResult)),
-                ('numResults', c_int)]
+    _fields_ = [("results", POINTER(CodeCompletionResult)), ("numResults", c_int)]
 
     def __len__(self):
         return self.numResults
@@ -2758,6 +2829,7 @@ def __getitem__(self, key):
 
         return self.results[key]
 
+
 class CodeCompletionResults(ClangObject):
     def __init__(self, ptr):
         assert isinstance(ptr, POINTER(CCRStructure)) and ptr
@@ -2777,11 +2849,10 @@ def results(self):
     def diagnostics(self):
         class DiagnosticsItr(object):
             def __init__(self, ccr):
-                self.ccr= ccr
+                self.ccr = ccr
 
             def __len__(self):
-                return int(\
-                  conf.lib.clang_codeCompleteGetNumDiagnostics(self.ccr))
+                return int(conf.lib.clang_codeCompleteGetNumDiagnostics(self.ccr))
 
             def __getitem__(self, key):
                 return conf.lib.clang_codeCompleteGetDiagnostic(self.ccr, key)
@@ -2812,7 +2883,7 @@ def read(self, path):
         """Load a TranslationUnit from the given AST file."""
         return TranslationUnit.from_ast_file(path, self)
 
-    def parse(self, path, args=None, unsaved_files=None, options = 0):
+    def parse(self, path, args=None, unsaved_files=None, options=0):
         """Load the translation unit from the given source code file by running
         clang and generating the AST before loading. Additional command line
         parameters can be passed to clang via the args parameter.
@@ -2825,8 +2896,8 @@ def parse(self, path, args=None, unsaved_files=None, options = 0):
         If an error was encountered during parsing, a TranslationUnitLoadError
         will be raised.
         """
-        return TranslationUnit.from_source(path, args, unsaved_files, options,
-                                           self)
+        return TranslationUnit.from_source(path, args, unsaved_files, options, self)
+
 
 class TranslationUnit(ClangObject):
     """Represents a source code translation unit.
@@ -2868,8 +2939,9 @@ class TranslationUnit(ClangObject):
     PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION = 128
 
     @classmethod
-    def from_source(cls, filename, args=None, unsaved_files=None, options=0,
-                    index=None):
+    def from_source(
+        cls, filename, args=None, unsaved_files=None, options=0, index=None
+    ):
         """Create a TranslationUnit by parsing source.
 
         This is capable of processing source code both from files on the
@@ -2933,11 +3005,15 @@ def from_source(cls, filename, args=None, unsaved_files=None, options=0,
                 unsaved_array[i].contents = contents
                 unsaved_array[i].length = len(contents)
 
-        ptr = conf.lib.clang_parseTranslationUnit(index,
-                                    fspath(filename) if filename is not None else None,
-                                    args_array,
-                                    len(args), unsaved_array,
-                                    len(unsaved_files), options)
+        ptr = conf.lib.clang_parseTranslationUnit(
+            index,
+            fspath(filename) if filename is not None else None,
+            args_array,
+            len(args),
+            unsaved_array,
+            len(unsaved_files),
+            options,
+        )
 
         if not ptr:
             raise TranslationUnitLoadError("Error parsing translation unit.")
@@ -2999,6 +3075,7 @@ def get_includes(self):
         recursively iterate over header files included through precompiled
         headers.
         """
+
         def visitor(fobj, lptr, depth, includes):
             if depth > 0:
                 loc = lptr.contents
@@ -3006,8 +3083,9 @@ def visitor(fobj, lptr, depth, includes):
 
         # Automatically adapt CIndex/ctype pointers to python objects
         includes = []
-        conf.lib.clang_getInclusions(self,
-                callbacks['translation_unit_includes'](visitor), includes)
+        conf.lib.clang_getInclusions(
+            self, callbacks["translation_unit_includes"](visitor), includes
+        )
 
         return iter(includes)
 
@@ -3050,20 +3128,21 @@ def get_extent(self, filename, locations):
         f = self.get_file(filename)
 
         if len(locations) < 2:
-            raise Exception('Must pass object with at least 2 elements')
+            raise Exception("Must pass object with at least 2 elements")
 
         start_location, end_location = locations
 
-        if hasattr(start_location, '__len__'):
-            start_location = SourceLocation.from_position(self, f,
-                start_location[0], start_location[1])
+        if hasattr(start_location, "__len__"):
+            start_location = SourceLocation.from_position(
+                self, f, start_location[0], start_location[1]
+            )
         elif isinstance(start_location, int):
-            start_location = SourceLocation.from_offset(self, f,
-                start_location)
+            start_location = SourceLocation.from_offset(self, f, start_location)
 
-        if hasattr(end_location, '__len__'):
-            end_location = SourceLocation.from_position(self, f,
-                end_location[0], end_location[1])
+        if hasattr(end_location, "__len__"):
+            end_location = SourceLocation.from_position(
+                self, f, end_location[0], end_location[1]
+            )
         elif isinstance(end_location, int):
             end_location = SourceLocation.from_offset(self, f, end_location)
 
@@ -3077,6 +3156,7 @@ def diagnostics(self):
         """
         Return an iterable (and indexable) object containing the diagnostics.
         """
+
         class DiagIterator(object):
             def __init__(self, tu):
                 self.tu = tu
@@ -3107,15 +3187,16 @@ def reparse(self, unsaved_files=None, options=0):
         unsaved_files_array = 0
         if len(unsaved_files):
             unsaved_files_array = (_CXUnsavedFile * len(unsaved_files))()
-            for i,(name,contents) in enumerate(unsaved_files):
+            for i, (name, contents) in enumerate(unsaved_files):
                 if hasattr(contents, "read"):
                     contents = contents.read()
                 contents = b(contents)
                 unsaved_files_array[i].name = b(fspath(name))
                 unsaved_files_array[i].contents = contents
                 unsaved_files_array[i].length = len(contents)
-        ptr = conf.lib.clang_reparseTranslationUnit(self, len(unsaved_files),
-                unsaved_files_array, options)
+        ptr = conf.lib.clang_reparseTranslationUnit(
+            self, len(unsaved_files), unsaved_files_array, options
+        )
 
     def save(self, filename):
         """Saves the TranslationUnit to a file.
@@ -3133,15 +3214,22 @@ def save(self, filename):
         filename -- The path to save the translation unit to (str or PathLike).
         """
         options = conf.lib.clang_defaultSaveOptions(self)
-        result = int(conf.lib.clang_saveTranslationUnit(self, fspath(filename),
-                                                        options))
+        result = int(
+            conf.lib.clang_saveTranslationUnit(self, fspath(filename), options)
+        )
         if result != 0:
-            raise TranslationUnitSaveError(result,
-                'Error saving TranslationUnit.')
-
-    def codeComplete(self, path, line, column, unsaved_files=None,
-                     include_macros=False, include_code_patterns=False,
-                     include_brief_comments=False):
+            raise TranslationUnitSaveError(result, "Error saving TranslationUnit.")
+
+    def codeComplete(
+        self,
+        path,
+        line,
+        column,
+        unsaved_files=None,
+        include_macros=False,
+        include_code_patterns=False,
+        include_brief_comments=False,
+    ):
         """
         Code complete in this translation unit.
 
@@ -3167,15 +3255,22 @@ def codeComplete(self, path, line, column, unsaved_files=None,
         unsaved_files_array = 0
         if len(unsaved_files):
             unsaved_files_array = (_CXUnsavedFile * len(unsaved_files))()
-            for i,(name,contents) in enumerate(unsaved_files):
+            for i, (name, contents) in enumerate(unsaved_files):
                 if hasattr(contents, "read"):
                     contents = contents.read()
                 contents = b(contents)
                 unsaved_files_array[i].name = b(fspath(name))
                 unsaved_files_array[i].contents = contents
                 unsaved_files_array[i].length = len(contents)
-        ptr = conf.lib.clang_codeCompleteAt(self, fspath(path), line, column,
-                unsaved_files_array, len(unsaved_files), options)
+        ptr = conf.lib.clang_codeCompleteAt(
+            self,
+            fspath(path),
+            line,
+            column,
+            unsaved_files_array,
+            len(unsaved_files),
+            options,
+        )
         if ptr:
             return CodeCompletionResults(ptr)
         return None
@@ -3193,6 +3288,7 @@ def get_tokens(self, locations=None, extent=None):
 
         return TokenGroup.get_tokens(self, extent)
 
+
 class File(ClangObject):
     """
     The File class represents a particular source file that is part of a
@@ -3229,6 +3325,7 @@ def from_result(res, fn, args):
         res._tu = args[0]._tu
         return res
 
+
 class FileInclusion(object):
     """
     The FileInclusion class represents the inclusion of one source file by
@@ -3249,6 +3346,7 @@ def is_input_file(self):
         """True if the included file is the input file."""
         return self.depth == 0
 
+
 class CompilationDatabaseError(Exception):
     """Represents an error that occurred when working with a CompilationDatabase
 
@@ -3267,15 +3365,19 @@ def __init__(self, enumeration, message):
         assert isinstance(enumeration, int)
 
         if enumeration > 1:
-            raise Exception("Encountered undefined CompilationDatabase error "
-                            "constant: %d. Please file a bug to have this "
-                            "value supported." % enumeration)
+            raise Exception(
+                "Encountered undefined CompilationDatabase error "
+                "constant: %d. Please file a bug to have this "
+                "value supported." % enumeration
+            )
 
         self.cdb_error = enumeration
-        Exception.__init__(self, 'Error %d: %s' % (enumeration, message))
+        Exception.__init__(self, "Error %d: %s" % (enumeration, message))
+
 
 class CompileCommand(object):
     """Represents the compile command used to build a file"""
+
     def __init__(self, cmd, ccmds):
         self.cmd = cmd
         # Keep a reference to the originating CompileCommands
@@ -3304,11 +3406,13 @@ def arguments(self):
         for i in range(length):
             yield conf.lib.clang_CompileCommand_getArg(self.cmd, i)
 
+
 class CompileCommands(object):
     """
     CompileCommands is an iterable object containing all CompileCommand
     that can be used for building a specific file.
     """
+
     def __init__(self, ccmds):
         self.ccmds = ccmds
 
@@ -3330,6 +3434,7 @@ def from_result(res, fn, args):
             return None
         return CompileCommands(res)
 
+
 class CompilationDatabase(ClangObject):
     """
     The CompilationDatabase is a wrapper class around
@@ -3344,8 +3449,7 @@ def __del__(self):
     @staticmethod
     def from_result(res, fn, args):
         if not res:
-            raise CompilationDatabaseError(0,
-                                           "CompilationDatabase loading failed")
+            raise CompilationDatabaseError(0, "CompilationDatabase loading failed")
         return CompilationDatabase(res)
 
     @staticmethod
@@ -3353,11 +3457,13 @@ def fromDirectory(buildDir):
         """Builds a CompilationDatabase from the database found in buildDir"""
         errorCode = c_uint()
         try:
-            cdb = conf.lib.clang_CompilationDatabase_fromDirectory(fspath(buildDir),
-                byref(errorCode))
+            cdb = conf.lib.clang_CompilationDatabase_fromDirectory(
+                fspath(buildDir), byref(errorCode)
+            )
         except CompilationDatabaseError as e:
-            raise CompilationDatabaseError(int(errorCode.value),
-                                           "CompilationDatabase loading failed")
+            raise CompilationDatabaseError(
+                int(errorCode.value), "CompilationDatabase loading failed"
+            )
         return cdb
 
     def getCompileCommands(self, filename):
@@ -3365,8 +3471,9 @@ def getCompileCommands(self, filename):
         Get an iterable object providing all the CompileCommands available to
         build filename. Returns None if filename is not found in the database.
         """
-        return conf.lib.clang_CompilationDatabase_getCompileCommands(self,
-                                                                     fspath(filename))
+        return conf.lib.clang_CompilationDatabase_getCompileCommands(
+            self, fspath(filename)
+        )
 
     def getAllCompileCommands(self):
         """
@@ -3385,10 +3492,8 @@ class Token(Structure):
     Tokens are obtained from parsed TranslationUnit instances. You currently
     can't create tokens manually.
     """
-    _fields_ = [
-        ('int_data', c_uint * 4),
-        ('ptr_data', c_void_p)
-    ]
+
+    _fields_ = [("int_data", c_uint * 4), ("ptr_data", c_void_p)]
 
     @property
     def spelling(self):
@@ -3423,763 +3528,295 @@ def cursor(self):
 
         return cursor
 
+
 # Now comes the plumbing to hook up the C library.
 
 # Register callback types in common container.
-callbacks['translation_unit_includes'] = CFUNCTYPE(None, c_object_p,
-        POINTER(SourceLocation), c_uint, py_object)
-callbacks['cursor_visit'] = CFUNCTYPE(c_int, Cursor, Cursor, py_object)
-callbacks['fields_visit'] = CFUNCTYPE(c_int, Cursor, py_object)
+callbacks["translation_unit_includes"] = CFUNCTYPE(
+    None, c_object_p, POINTER(SourceLocation), c_uint, py_object
+)
+callbacks["cursor_visit"] = CFUNCTYPE(c_int, Cursor, Cursor, py_object)
+callbacks["fields_visit"] = CFUNCTYPE(c_int, Cursor, py_object)
 
 # Functions strictly alphabetical order.
 functionList = [
-  ("clang_annotateTokens",
-   [TranslationUnit, POINTER(Token), c_uint, POINTER(Cursor)]),
-
-  ("clang_CompilationDatabase_dispose",
-   [c_object_p]),
-
-  ("clang_CompilationDatabase_fromDirectory",
-   [c_interop_string, POINTER(c_uint)],
-   c_object_p,
-   CompilationDatabase.from_result),
-
-  ("clang_CompilationDatabase_getAllCompileCommands",
-   [c_object_p],
-   c_object_p,
-   CompileCommands.from_result),
-
-  ("clang_CompilationDatabase_getCompileCommands",
-   [c_object_p, c_interop_string],
-   c_object_p,
-   CompileCommands.from_result),
-
-  ("clang_CompileCommands_dispose",
-   [c_object_p]),
-
-  ("clang_CompileCommands_getCommand",
-   [c_object_p, c_uint],
-   c_object_p),
-
-  ("clang_CompileCommands_getSize",
-   [c_object_p],
-   c_uint),
-
-  ("clang_CompileCommand_getArg",
-   [c_object_p, c_uint],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_CompileCommand_getDirectory",
-   [c_object_p],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_CompileCommand_getFilename",
-   [c_object_p],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_CompileCommand_getNumArgs",
-   [c_object_p],
-   c_uint),
-
-  ("clang_codeCompleteAt",
-   [TranslationUnit, c_interop_string, c_int, c_int, c_void_p, c_int, c_int],
-   POINTER(CCRStructure)),
-
-  ("clang_codeCompleteGetDiagnostic",
-   [CodeCompletionResults, c_int],
-   Diagnostic),
-
-  ("clang_codeCompleteGetNumDiagnostics",
-   [CodeCompletionResults],
-   c_int),
-
-  ("clang_createIndex",
-   [c_int, c_int],
-   c_object_p),
-
-  ("clang_createTranslationUnit",
-   [Index, c_interop_string],
-   c_object_p),
-
-  ("clang_CXXConstructor_isConvertingConstructor",
-   [Cursor],
-   bool),
-
-  ("clang_CXXConstructor_isCopyConstructor",
-   [Cursor],
-   bool),
-
-  ("clang_CXXConstructor_isDefaultConstructor",
-   [Cursor],
-   bool),
-
-  ("clang_CXXConstructor_isMoveConstructor",
-   [Cursor],
-   bool),
-
-  ("clang_CXXField_isMutable",
-   [Cursor],
-   bool),
-
-  ("clang_CXXMethod_isConst",
-   [Cursor],
-   bool),
-
-  ("clang_CXXMethod_isDefaulted",
-   [Cursor],
-   bool),
-
-  ("clang_CXXMethod_isDeleted",
-   [Cursor],
-   bool),
-
-  ("clang_CXXMethod_isCopyAssignmentOperator",
-   [Cursor],
-   bool),
-
-  ("clang_CXXMethod_isMoveAssignmentOperator",
-   [Cursor],
-   bool),
-
-  ("clang_CXXMethod_isExplicit",
-   [Cursor],
-   bool),
-
-  ("clang_CXXMethod_isPureVirtual",
-   [Cursor],
-   bool),
-
-  ("clang_CXXMethod_isStatic",
-   [Cursor],
-   bool),
-
-  ("clang_CXXMethod_isVirtual",
-   [Cursor],
-   bool),
-
-  ("clang_CXXRecord_isAbstract",
-   [Cursor],
-   bool),
-
-  ("clang_EnumDecl_isScoped",
-   [Cursor],
-   bool),
-
-  ("clang_defaultDiagnosticDisplayOptions",
-   [],
-   c_uint),
-
-  ("clang_defaultSaveOptions",
-   [TranslationUnit],
-   c_uint),
-
-  ("clang_disposeCodeCompleteResults",
-   [CodeCompletionResults]),
-
-# ("clang_disposeCXTUResourceUsage",
-#  [CXTUResourceUsage]),
-
-  ("clang_disposeDiagnostic",
-   [Diagnostic]),
-
-  ("clang_disposeIndex",
-   [Index]),
-
-  ("clang_disposeString",
-   [_CXString]),
-
-  ("clang_disposeTokens",
-   [TranslationUnit, POINTER(Token), c_uint]),
-
-  ("clang_disposeTranslationUnit",
-   [TranslationUnit]),
-
-  ("clang_equalCursors",
-   [Cursor, Cursor],
-   bool),
-
-  ("clang_equalLocations",
-   [SourceLocation, SourceLocation],
-   bool),
-
-  ("clang_equalRanges",
-   [SourceRange, SourceRange],
-   bool),
-
-  ("clang_equalTypes",
-   [Type, Type],
-   bool),
-
-  ("clang_formatDiagnostic",
-   [Diagnostic, c_uint],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_getArgType",
-   [Type, c_uint],
-   Type,
-   Type.from_result),
-
-  ("clang_getArrayElementType",
-   [Type],
-   Type,
-   Type.from_result),
-
-  ("clang_getArraySize",
-   [Type],
-   c_longlong),
-
-  ("clang_getFieldDeclBitWidth",
-   [Cursor],
-   c_int),
-
-  ("clang_getCanonicalCursor",
-   [Cursor],
-   Cursor,
-   Cursor.from_cursor_result),
-
-  ("clang_getCanonicalType",
-   [Type],
-   Type,
-   Type.from_result),
-
-  ("clang_getChildDiagnostics",
-   [Diagnostic],
-   c_object_p),
-
-  ("clang_getCompletionAvailability",
-   [c_void_p],
-   c_int),
-
-  ("clang_getCompletionBriefComment",
-   [c_void_p],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_getCompletionChunkCompletionString",
-   [c_void_p, c_int],
-   c_object_p),
-
-  ("clang_getCompletionChunkKind",
-   [c_void_p, c_int],
-   c_int),
-
-  ("clang_getCompletionChunkText",
-   [c_void_p, c_int],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_getCompletionPriority",
-   [c_void_p],
-   c_int),
-
-  ("clang_getCString",
-   [_CXString],
-   c_interop_string,
-   c_interop_string.to_python_string),
-
-  ("clang_getCursor",
-   [TranslationUnit, SourceLocation],
-   Cursor),
-
-  ("clang_getCursorAvailability",
-   [Cursor],
-   c_int),
-
-  ("clang_getCursorDefinition",
-   [Cursor],
-   Cursor,
-   Cursor.from_result),
-
-  ("clang_getCursorDisplayName",
-   [Cursor],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_getCursorExtent",
-   [Cursor],
-   SourceRange),
-
-  ("clang_getCursorLexicalParent",
-   [Cursor],
-   Cursor,
-   Cursor.from_cursor_result),
-
-  ("clang_getCursorLocation",
-   [Cursor],
-   SourceLocation),
-
-  ("clang_getCursorReferenced",
-   [Cursor],
-   Cursor,
-   Cursor.from_result),
-
-  ("clang_getCursorReferenceNameRange",
-   [Cursor, c_uint, c_uint],
-   SourceRange),
-
-  ("clang_getCursorResultType",
-   [Cursor],
-   Type,
-   Type.from_result),
-
-  ("clang_getCursorSemanticParent",
-   [Cursor],
-   Cursor,
-   Cursor.from_cursor_result),
-
-  ("clang_getCursorSpelling",
-   [Cursor],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_getCursorType",
-   [Cursor],
-   Type,
-   Type.from_result),
-
-  ("clang_getCursorUSR",
-   [Cursor],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_Cursor_getMangling",
-   [Cursor],
-   _CXString,
-   _CXString.from_result),
-
-# ("clang_getCXTUResourceUsage",
-#  [TranslationUnit],
-#  CXTUResourceUsage),
-
-  ("clang_getCXXAccessSpecifier",
-   [Cursor],
-   c_uint),
-
-  ("clang_getDeclObjCTypeEncoding",
-   [Cursor],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_getDiagnostic",
-   [c_object_p, c_uint],
-   c_object_p),
-
-  ("clang_getDiagnosticCategory",
-   [Diagnostic],
-   c_uint),
-
-  ("clang_getDiagnosticCategoryText",
-   [Diagnostic],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_getDiagnosticFixIt",
-   [Diagnostic, c_uint, POINTER(SourceRange)],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_getDiagnosticInSet",
-   [c_object_p, c_uint],
-   c_object_p),
-
-  ("clang_getDiagnosticLocation",
-   [Diagnostic],
-   SourceLocation),
-
-  ("clang_getDiagnosticNumFixIts",
-   [Diagnostic],
-   c_uint),
-
-  ("clang_getDiagnosticNumRanges",
-   [Diagnostic],
-   c_uint),
-
-  ("clang_getDiagnosticOption",
-   [Diagnostic, POINTER(_CXString)],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_getDiagnosticRange",
-   [Diagnostic, c_uint],
-   SourceRange),
-
-  ("clang_getDiagnosticSeverity",
-   [Diagnostic],
-   c_int),
-
-  ("clang_getDiagnosticSpelling",
-   [Diagnostic],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_getElementType",
-   [Type],
-   Type,
-   Type.from_result),
-
-  ("clang_getEnumConstantDeclUnsignedValue",
-   [Cursor],
-   c_ulonglong),
-
-  ("clang_getEnumConstantDeclValue",
-   [Cursor],
-   c_longlong),
-
-  ("clang_getEnumDeclIntegerType",
-   [Cursor],
-   Type,
-   Type.from_result),
-
-  ("clang_getFile",
-   [TranslationUnit, c_interop_string],
-   c_object_p),
-
-  ("clang_getFileName",
-   [File],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_getFileTime",
-   [File],
-   c_uint),
-
-  ("clang_getIBOutletCollectionType",
-   [Cursor],
-   Type,
-   Type.from_result),
-
-  ("clang_getIncludedFile",
-   [Cursor],
-   c_object_p,
-   File.from_result),
-
-  ("clang_getInclusions",
-   [TranslationUnit, callbacks['translation_unit_includes'], py_object]),
-
-  ("clang_getInstantiationLocation",
-   [SourceLocation, POINTER(c_object_p), POINTER(c_uint), POINTER(c_uint),
-    POINTER(c_uint)]),
-
-  ("clang_getLocation",
-   [TranslationUnit, File, c_uint, c_uint],
-   SourceLocation),
-
-  ("clang_getLocationForOffset",
-   [TranslationUnit, File, c_uint],
-   SourceLocation),
-
-  ("clang_getNullCursor",
-   None,
-   Cursor),
-
-  ("clang_getNumArgTypes",
-   [Type],
-   c_uint),
-
-  ("clang_getNumCompletionChunks",
-   [c_void_p],
-   c_int),
-
-  ("clang_getNumDiagnostics",
-   [c_object_p],
-   c_uint),
-
-  ("clang_getNumDiagnosticsInSet",
-   [c_object_p],
-   c_uint),
-
-  ("clang_getNumElements",
-   [Type],
-   c_longlong),
-
-  ("clang_getNumOverloadedDecls",
-   [Cursor],
-   c_uint),
-
-  ("clang_getOverloadedDecl",
-   [Cursor, c_uint],
-   Cursor,
-   Cursor.from_cursor_result),
-
-  ("clang_getPointeeType",
-   [Type],
-   Type,
-   Type.from_result),
-
-  ("clang_getRange",
-   [SourceLocation, SourceLocation],
-   SourceRange),
-
-  ("clang_getRangeEnd",
-   [SourceRange],
-   SourceLocation),
-
-  ("clang_getRangeStart",
-   [SourceRange],
-   SourceLocation),
-
-  ("clang_getResultType",
-   [Type],
-   Type,
-   Type.from_result),
-
-  ("clang_getSpecializedCursorTemplate",
-   [Cursor],
-   Cursor,
-   Cursor.from_cursor_result),
-
-  ("clang_getTemplateCursorKind",
-   [Cursor],
-   c_uint),
-
-  ("clang_getTokenExtent",
-   [TranslationUnit, Token],
-   SourceRange),
-
-  ("clang_getTokenKind",
-   [Token],
-   c_uint),
-
-  ("clang_getTokenLocation",
-   [TranslationUnit, Token],
-   SourceLocation),
-
-  ("clang_getTokenSpelling",
-   [TranslationUnit, Token],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_getTranslationUnitCursor",
-   [TranslationUnit],
-   Cursor,
-   Cursor.from_result),
-
-  ("clang_getTranslationUnitSpelling",
-   [TranslationUnit],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_getTUResourceUsageName",
-   [c_uint],
-   c_interop_string,
-   c_interop_string.to_python_string),
-
-  ("clang_getTypeDeclaration",
-   [Type],
-   Cursor,
-   Cursor.from_result),
-
-  ("clang_getTypedefDeclUnderlyingType",
-   [Cursor],
-   Type,
-   Type.from_result),
-
-  ("clang_getTypedefName",
-   [Type],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_getTypeKindSpelling",
-   [c_uint],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_getTypeSpelling",
-   [Type],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_hashCursor",
-   [Cursor],
-   c_uint),
-
-  ("clang_isAttribute",
-   [CursorKind],
-   bool),
-
-  ("clang_isConstQualifiedType",
-   [Type],
-   bool),
-
-  ("clang_isCursorDefinition",
-   [Cursor],
-   bool),
-
-  ("clang_isDeclaration",
-   [CursorKind],
-   bool),
-
-  ("clang_isExpression",
-   [CursorKind],
-   bool),
-
-  ("clang_isFileMultipleIncludeGuarded",
-   [TranslationUnit, File],
-   bool),
-
-  ("clang_isFunctionTypeVariadic",
-   [Type],
-   bool),
-
-  ("clang_isInvalid",
-   [CursorKind],
-   bool),
-
-  ("clang_isPODType",
-   [Type],
-   bool),
-
-  ("clang_isPreprocessing",
-   [CursorKind],
-   bool),
-
-  ("clang_isReference",
-   [CursorKind],
-   bool),
-
-  ("clang_isRestrictQualifiedType",
-   [Type],
-   bool),
-
-  ("clang_isStatement",
-   [CursorKind],
-   bool),
-
-  ("clang_isTranslationUnit",
-   [CursorKind],
-   bool),
-
-  ("clang_isUnexposed",
-   [CursorKind],
-   bool),
-
-  ("clang_isVirtualBase",
-   [Cursor],
-   bool),
-
-  ("clang_isVolatileQualifiedType",
-   [Type],
-   bool),
-
-  ("clang_parseTranslationUnit",
-   [Index, c_interop_string, c_void_p, c_int, c_void_p, c_int, c_int],
-   c_object_p),
-
-  ("clang_reparseTranslationUnit",
-   [TranslationUnit, c_int, c_void_p, c_int],
-   c_int),
-
-  ("clang_saveTranslationUnit",
-   [TranslationUnit, c_interop_string, c_uint],
-   c_int),
-
-  ("clang_tokenize",
-   [TranslationUnit, SourceRange, POINTER(POINTER(Token)), POINTER(c_uint)]),
-
-  ("clang_visitChildren",
-   [Cursor, callbacks['cursor_visit'], py_object],
-   c_uint),
-
-  ("clang_Cursor_getNumArguments",
-   [Cursor],
-   c_int),
-
-  ("clang_Cursor_getArgument",
-   [Cursor, c_uint],
-   Cursor,
-   Cursor.from_result),
-
-  ("clang_Cursor_getNumTemplateArguments",
-   [Cursor],
-   c_int),
-
-  ("clang_Cursor_getTemplateArgumentKind",
-   [Cursor, c_uint],
-   TemplateArgumentKind.from_id),
-
-  ("clang_Cursor_getTemplateArgumentType",
-   [Cursor, c_uint],
-   Type,
-   Type.from_result),
-
-  ("clang_Cursor_getTemplateArgumentValue",
-   [Cursor, c_uint],
-   c_longlong),
-
-  ("clang_Cursor_getTemplateArgumentUnsignedValue",
-   [Cursor, c_uint],
-   c_ulonglong),
-
-  ("clang_Cursor_isAnonymous",
-   [Cursor],
-   bool),
-
-  ("clang_Cursor_isBitField",
-   [Cursor],
-   bool),
-
-  ("clang_Cursor_getBriefCommentText",
-   [Cursor],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_Cursor_getRawCommentText",
-   [Cursor],
-   _CXString,
-   _CXString.from_result),
-
-  ("clang_Cursor_getOffsetOfField",
-   [Cursor],
-   c_longlong),
-
-  ("clang_Location_isInSystemHeader",
-   [SourceLocation],
-   bool),
-
-  ("clang_Type_getAlignOf",
-   [Type],
-   c_longlong),
-
-  ("clang_Type_getClassType",
-   [Type],
-   Type,
-   Type.from_result),
-
-  ("clang_Type_getNumTemplateArguments",
-   [Type],
-   c_int),
-
-  ("clang_Type_getTemplateArgumentAsType",
-   [Type, c_uint],
-   Type,
-   Type.from_result),
-
-  ("clang_Type_getOffsetOf",
-   [Type, c_interop_string],
-   c_longlong),
-
-  ("clang_Type_getSizeOf",
-   [Type],
-   c_longlong),
-
-  ("clang_Type_getCXXRefQualifier",
-   [Type],
-   c_uint),
-
-  ("clang_Type_getNamedType",
-   [Type],
-   Type,
-   Type.from_result),
-
-  ("clang_Type_visitFields",
-   [Type, callbacks['fields_visit'], py_object],
-   c_uint),
+    (
+        "clang_annotateTokens",
+        [TranslationUnit, POINTER(Token), c_uint, POINTER(Cursor)],
+    ),
+    ("clang_CompilationDatabase_dispose", [c_object_p]),
+    (
+        "clang_CompilationDatabase_fromDirectory",
+        [c_interop_string, POINTER(c_uint)],
+        c_object_p,
+        CompilationDatabase.from_result,
+    ),
+    (
+        "clang_CompilationDatabase_getAllCompileCommands",
+        [c_object_p],
+        c_object_p,
+        CompileCommands.from_result,
+    ),
+    (
+        "clang_CompilationDatabase_getCompileCommands",
+        [c_object_p, c_interop_string],
+        c_object_p,
+        CompileCommands.from_result,
+    ),
+    ("clang_CompileCommands_dispose", [c_object_p]),
+    ("clang_CompileCommands_getCommand", [c_object_p, c_uint], c_object_p),
+    ("clang_CompileCommands_getSize", [c_object_p], c_uint),
+    (
+        "clang_CompileCommand_getArg",
+        [c_object_p, c_uint],
+        _CXString,
+        _CXString.from_result,
+    ),
+    (
+        "clang_CompileCommand_getDirectory",
+        [c_object_p],
+        _CXString,
+        _CXString.from_result,
+    ),
+    (
+        "clang_CompileCommand_getFilename",
+        [c_object_p],
+        _CXString,
+        _CXString.from_result,
+    ),
+    ("clang_CompileCommand_getNumArgs", [c_object_p], c_uint),
+    (
+        "clang_codeCompleteAt",
+        [TranslationUnit, c_interop_string, c_int, c_int, c_void_p, c_int, c_int],
+        POINTER(CCRStructure),
+    ),
+    ("clang_codeCompleteGetDiagnostic", [CodeCompletionResults, c_int], Diagnostic),
+    ("clang_codeCompleteGetNumDiagnostics", [CodeCompletionResults], c_int),
+    ("clang_createIndex", [c_int, c_int], c_object_p),
+    ("clang_createTranslationUnit", [Index, c_interop_string], c_object_p),
+    ("clang_CXXConstructor_isConvertingConstructor", [Cursor], bool),
+    ("clang_CXXConstructor_isCopyConstructor", [Cursor], bool),
+    ("clang_CXXConstructor_isDefaultConstructor", [Cursor], bool),
+    ("clang_CXXConstructor_isMoveConstructor", [Cursor], bool),
+    ("clang_CXXField_isMutable", [Cursor], bool),
+    ("clang_CXXMethod_isConst", [Cursor], bool),
+    ("clang_CXXMethod_isDefaulted", [Cursor], bool),
+    ("clang_CXXMethod_isDeleted", [Cursor], bool),
+    ("clang_CXXMethod_isCopyAssignmentOperator", [Cursor], bool),
+    ("clang_CXXMethod_isMoveAssignmentOperator", [Cursor], bool),
+    ("clang_CXXMethod_isExplicit", [Cursor], bool),
+    ("clang_CXXMethod_isPureVirtual", [Cursor], bool),
+    ("clang_CXXMethod_isStatic", [Cursor], bool),
+    ("clang_CXXMethod_isVirtual", [Cursor], bool),
+    ("clang_CXXRecord_isAbstract", [Cursor], bool),
+    ("clang_EnumDecl_isScoped", [Cursor], bool),
+    ("clang_defaultDiagnosticDisplayOptions", [], c_uint),
+    ("clang_defaultSaveOptions", [TranslationUnit], c_uint),
+    ("clang_disposeCodeCompleteResults", [CodeCompletionResults]),
+    # ("clang_disposeCXTUResourceUsage",
+    #  [CXTUResourceUsage]),
+    ("clang_disposeDiagnostic", [Diagnostic]),
+    ("clang_disposeIndex", [Index]),
+    ("clang_disposeString", [_CXString]),
+    ("clang_disposeTokens", [TranslationUnit, POINTER(Token), c_uint]),
+    ("clang_disposeTranslationUnit", [TranslationUnit]),
+    ("clang_equalCursors", [Cursor, Cursor], bool),
+    ("clang_equalLocations", [SourceLocation, SourceLocation], bool),
+    ("clang_equalRanges", [SourceRange, SourceRange], bool),
+    ("clang_equalTypes", [Type, Type], bool),
+    ("clang_formatDiagnostic", [Diagnostic, c_uint], _CXString, _CXString.from_result),
+    ("clang_getArgType", [Type, c_uint], Type, Type.from_result),
+    ("clang_getArrayElementType", [Type], Type, Type.from_result),
+    ("clang_getArraySize", [Type], c_longlong),
+    ("clang_getFieldDeclBitWidth", [Cursor], c_int),
+    ("clang_getCanonicalCursor", [Cursor], Cursor, Cursor.from_cursor_result),
+    ("clang_getCanonicalType", [Type], Type, Type.from_result),
+    ("clang_getChildDiagnostics", [Diagnostic], c_object_p),
+    ("clang_getCompletionAvailability", [c_void_p], c_int),
+    ("clang_getCompletionBriefComment", [c_void_p], _CXString, _CXString.from_result),
+    ("clang_getCompletionChunkCompletionString", [c_void_p, c_int], c_object_p),
+    ("clang_getCompletionChunkKind", [c_void_p, c_int], c_int),
+    (
+        "clang_getCompletionChunkText",
+        [c_void_p, c_int],
+        _CXString,
+        _CXString.from_result,
+    ),
+    ("clang_getCompletionPriority", [c_void_p], c_int),
+    (
+        "clang_getCString",
+        [_CXString],
+        c_interop_string,
+        c_interop_string.to_python_string,
+    ),
+    ("clang_getCursor", [TranslationUnit, SourceLocation], Cursor),
+    ("clang_getCursorAvailability", [Cursor], c_int),
+    ("clang_getCursorDefinition", [Cursor], Cursor, Cursor.from_result),
+    ("clang_getCursorDisplayName", [Cursor], _CXString, _CXString.from_result),
+    ("clang_getCursorExtent", [Cursor], SourceRange),
+    ("clang_getCursorLexicalParent", [Cursor], Cursor, Cursor.from_cursor_result),
+    ("clang_getCursorLocation", [Cursor], SourceLocation),
+    ("clang_getCursorReferenced", [Cursor], Cursor, Cursor.from_result),
+    ("clang_getCursorReferenceNameRange", [Cursor, c_uint, c_uint], SourceRange),
+    ("clang_getCursorResultType", [Cursor], Type, Type.from_result),
+    ("clang_getCursorSemanticParent", [Cursor], Cursor, Cursor.from_cursor_result),
+    ("clang_getCursorSpelling", [Cursor], _CXString, _CXString.from_result),
+    ("clang_getCursorType", [Cursor], Type, Type.from_result),
+    ("clang_getCursorUSR", [Cursor], _CXString, _CXString.from_result),
+    ("clang_Cursor_getMangling", [Cursor], _CXString, _CXString.from_result),
+    # ("clang_getCXTUResourceUsage",
+    #  [TranslationUnit],
+    #  CXTUResourceUsage),
+    ("clang_getCXXAccessSpecifier", [Cursor], c_uint),
+    ("clang_getDeclObjCTypeEncoding", [Cursor], _CXString, _CXString.from_result),
+    ("clang_getDiagnostic", [c_object_p, c_uint], c_object_p),
+    ("clang_getDiagnosticCategory", [Diagnostic], c_uint),
+    ("clang_getDiagnosticCategoryText", [Diagnostic], _CXString, _CXString.from_result),
+    (
+        "clang_getDiagnosticFixIt",
+        [Diagnostic, c_uint, POINTER(SourceRange)],
+        _CXString,
+        _CXString.from_result,
+    ),
+    ("clang_getDiagnosticInSet", [c_object_p, c_uint], c_object_p),
+    ("clang_getDiagnosticLocation", [Diagnostic], SourceLocation),
+    ("clang_getDiagnosticNumFixIts", [Diagnostic], c_uint),
+    ("clang_getDiagnosticNumRanges", [Diagnostic], c_uint),
+    (
+        "clang_getDiagnosticOption",
+        [Diagnostic, POINTER(_CXString)],
+        _CXString,
+        _CXString.from_result,
+    ),
+    ("clang_getDiagnosticRange", [Diagnostic, c_uint], SourceRange),
+    ("clang_getDiagnosticSeverity", [Diagnostic], c_int),
+    ("clang_getDiagnosticSpelling", [Diagnostic], _CXString, _CXString.from_result),
+    ("clang_getElementType", [Type], Type, Type.from_result),
+    ("clang_getEnumConstantDeclUnsignedValue", [Cursor], c_ulonglong),
+    ("clang_getEnumConstantDeclValue", [Cursor], c_longlong),
+    ("clang_getEnumDeclIntegerType", [Cursor], Type, Type.from_result),
+    ("clang_getFile", [TranslationUnit, c_interop_string], c_object_p),
+    ("clang_getFileName", [File], _CXString, _CXString.from_result),
+    ("clang_getFileTime", [File], c_uint),
+    ("clang_getIBOutletCollectionType", [Cursor], Type, Type.from_result),
+    ("clang_getIncludedFile", [Cursor], c_object_p, File.from_result),
+    (
+        "clang_getInclusions",
+        [TranslationUnit, callbacks["translation_unit_includes"], py_object],
+    ),
+    (
+        "clang_getInstantiationLocation",
+        [
+            SourceLocation,
+            POINTER(c_object_p),
+            POINTER(c_uint),
+            POINTER(c_uint),
+            POINTER(c_uint),
+        ],
+    ),
+    ("clang_getLocation", [TranslationUnit, File, c_uint, c_uint], SourceLocation),
+    ("clang_getLocationForOffset", [TranslationUnit, File, c_uint], SourceLocation),
+    ("clang_getNullCursor", None, Cursor),
+    ("clang_getNumArgTypes", [Type], c_uint),
+    ("clang_getNumCompletionChunks", [c_void_p], c_int),
+    ("clang_getNumDiagnostics", [c_object_p], c_uint),
+    ("clang_getNumDiagnosticsInSet", [c_object_p], c_uint),
+    ("clang_getNumElements", [Type], c_longlong),
+    ("clang_getNumOverloadedDecls", [Cursor], c_uint),
+    ("clang_getOverloadedDecl", [Cursor, c_uint], Cursor, Cursor.from_cursor_result),
+    ("clang_getPointeeType", [Type], Type, Type.from_result),
+    ("clang_getRange", [SourceLocation, SourceLocation], SourceRange),
+    ("clang_getRangeEnd", [SourceRange], SourceLocation),
+    ("clang_getRangeStart", [SourceRange], SourceLocation),
+    ("clang_getResultType", [Type], Type, Type.from_result),
+    ("clang_getSpecializedCursorTemplate", [Cursor], Cursor, Cursor.from_cursor_result),
+    ("clang_getTemplateCursorKind", [Cursor], c_uint),
+    ("clang_getTokenExtent", [TranslationUnit, Token], SourceRange),
+    ("clang_getTokenKind", [Token], c_uint),
+    ("clang_getTokenLocation", [TranslationUnit, Token], SourceLocation),
+    (
+        "clang_getTokenSpelling",
+        [TranslationUnit, Token],
+        _CXString,
+        _CXString.from_result,
+    ),
+    ("clang_getTranslationUnitCursor", [TranslationUnit], Cursor, Cursor.from_result),
+    (
+        "clang_getTranslationUnitSpelling",
+        [TranslationUnit],
+        _CXString,
+        _CXString.from_result,
+    ),
+    (
+        "clang_getTUResourceUsageName",
+        [c_uint],
+        c_interop_string,
+        c_interop_string.to_python_string,
+    ),
+    ("clang_getTypeDeclaration", [Type], Cursor, Cursor.from_result),
+    ("clang_getTypedefDeclUnderlyingType", [Cursor], Type, Type.from_result),
+    ("clang_getTypedefName", [Type], _CXString, _CXString.from_result),
+    ("clang_getTypeKindSpelling", [c_uint], _CXString, _CXString.from_result),
+    ("clang_getTypeSpelling", [Type], _CXString, _CXString.from_result),
+    ("clang_hashCursor", [Cursor], c_uint),
+    ("clang_isAttribute", [CursorKind], bool),
+    ("clang_isConstQualifiedType", [Type], bool),
+    ("clang_isCursorDefinition", [Cursor], bool),
+    ("clang_isDeclaration", [CursorKind], bool),
+    ("clang_isExpression", [CursorKind], bool),
+    ("clang_isFileMultipleIncludeGuarded", [TranslationUnit, File], bool),
+    ("clang_isFunctionTypeVariadic", [Type], bool),
+    ("clang_isInvalid", [CursorKind], bool),
+    ("clang_isPODType", [Type], bool),
+    ("clang_isPreprocessing", [CursorKind], bool),
+    ("clang_isReference", [CursorKind], bool),
+    ("clang_isRestrictQualifiedType", [Type], bool),
+    ("clang_isStatement", [CursorKind], bool),
+    ("clang_isTranslationUnit", [CursorKind], bool),
+    ("clang_isUnexposed", [CursorKind], bool),
+    ("clang_isVirtualBase", [Cursor], bool),
+    ("clang_isVolatileQualifiedType", [Type], bool),
+    (
+        "clang_parseTranslationUnit",
+        [Index, c_interop_string, c_void_p, c_int, c_void_p, c_int, c_int],
+        c_object_p,
+    ),
+    ("clang_reparseTranslationUnit", [TranslationUnit, c_int, c_void_p, c_int], c_int),
+    ("clang_saveTranslationUnit", [TranslationUnit, c_interop_string, c_uint], c_int),
+    (
+        "clang_tokenize",
+        [TranslationUnit, SourceRange, POINTER(POINTER(Token)), POINTER(c_uint)],
+    ),
+    ("clang_visitChildren", [Cursor, callbacks["cursor_visit"], py_object], c_uint),
+    ("clang_Cursor_getNumArguments", [Cursor], c_int),
+    ("clang_Cursor_getArgument", [Cursor, c_uint], Cursor, Cursor.from_result),
+    ("clang_Cursor_getNumTemplateArguments", [Cursor], c_int),
+    (
+        "clang_Cursor_getTemplateArgumentKind",
+        [Cursor, c_uint],
+        TemplateArgumentKind.from_id,
+    ),
+    ("clang_Cursor_getTemplateArgumentType", [Cursor, c_uint], Type, Type.from_result),
+    ("clang_Cursor_getTemplateArgumentValue", [Cursor, c_uint], c_longlong),
+    ("clang_Cursor_getTemplateArgumentUnsignedValue", [Cursor, c_uint], c_ulonglong),
+    ("clang_Cursor_isAnonymous", [Cursor], bool),
+    ("clang_Cursor_isBitField", [Cursor], bool),
+    ("clang_Cursor_getBriefCommentText", [Cursor], _CXString, _CXString.from_result),
+    ("clang_Cursor_getRawCommentText", [Cursor], _CXString, _CXString.from_result),
+    ("clang_Cursor_getOffsetOfField", [Cursor], c_longlong),
+    ("clang_Location_isInSystemHeader", [SourceLocation], bool),
+    ("clang_Type_getAlignOf", [Type], c_longlong),
+    ("clang_Type_getClassType", [Type], Type, Type.from_result),
+    ("clang_Type_getNumTemplateArguments", [Type], c_int),
+    ("clang_Type_getTemplateArgumentAsType", [Type, c_uint], Type, Type.from_result),
+    ("clang_Type_getOffsetOf", [Type, c_interop_string], c_longlong),
+    ("clang_Type_getSizeOf", [Type], c_longlong),
+    ("clang_Type_getCXXRefQualifier", [Type], c_uint),
+    ("clang_Type_getNamedType", [Type], Type, Type.from_result),
+    ("clang_Type_visitFields", [Type, callbacks["fields_visit"], py_object], c_uint),
 ]
 
+
 class LibclangError(Exception):
     def __init__(self, message):
         self.m = message
@@ -4187,14 +3824,17 @@ def __init__(self, message):
     def __str__(self):
         return self.m
 
+
 def register_function(lib, item, ignore_errors):
     # A function may not exist, if these bindings are used with an older or
     # incompatible version of libclang.so.
     try:
         func = getattr(lib, item[0])
     except AttributeError as e:
-        msg = str(e) + ". Please ensure that your python bindings are "\
-                       "compatible with your libclang.so version."
+        msg = (
+            str(e) + ". Please ensure that your python bindings are "
+            "compatible with your libclang.so version."
+        )
         if ignore_errors:
             return
         raise LibclangError(msg)
@@ -4208,6 +3848,7 @@ def register_function(lib, item, ignore_errors):
     if len(item) == 4:
         func.errcheck = item[3]
 
+
 def register_functions(lib, ignore_errors):
     """Register function prototypes with a libclang library instance.
 
@@ -4221,6 +3862,7 @@ def register(item):
     for f in functionList:
         register(f)
 
+
 class Config(object):
     library_path = None
     library_file = None
@@ -4231,8 +3873,10 @@ class Config(object):
     def set_library_path(path):
         """Set the path in which to search for libclang"""
         if Config.loaded:
-            raise Exception("library path must be set before before using " \
-                            "any other functionalities in libclang.")
+            raise Exception(
+                "library path must be set before before using "
+                "any other functionalities in libclang."
+            )
 
         Config.library_path = fspath(path)
 
@@ -4240,14 +3884,16 @@ def set_library_path(path):
     def set_library_file(filename):
         """Set the exact location of libclang"""
         if Config.loaded:
-            raise Exception("library file must be set before before using " \
-                            "any other functionalities in libclang.")
+            raise Exception(
+                "library file must be set before before using "
+                "any other functionalities in libclang."
+            )
 
         Config.library_file = fspath(filename)
 
     @staticmethod
     def set_compatibility_check(check_status):
-        """ Perform compatibility check when loading libclang
+        """Perform compatibility check when loading libclang
 
         The python bindings are only tested and evaluated with the version of
         libclang they are provided with. To ensure correct behavior a (limited)
@@ -4264,8 +3910,10 @@ def set_compatibility_check(check_status):
         libclang versions.
         """
         if Config.loaded:
-            raise Exception("compatibility_check must be set before before " \
-                            "using any other functionalities in libclang.")
+            raise Exception(
+                "compatibility_check must be set before before "
+                "using any other functionalities in libclang."
+            )
 
         Config.compatibility_check = check_status
 
@@ -4281,17 +3929,18 @@ def get_filename(self):
             return Config.library_file
 
         import platform
+
         name = platform.system()
 
-        if name == 'Darwin':
-            file = 'libclang.dylib'
-        elif name == 'Windows':
-            file = 'libclang.dll'
+        if name == "Darwin":
+            file = "libclang.dylib"
+        elif name == "Windows":
+            file = "libclang.dll"
         else:
-            file = 'libclang.so'
+            file = "libclang.so"
 
         if Config.library_path:
-            file = Config.library_path + '/' + file
+            file = Config.library_path + "/" + file
 
         return file
 
@@ -4299,9 +3948,11 @@ def get_cindex_library(self):
         try:
             library = cdll.LoadLibrary(self.get_filename())
         except OSError as e:
-            msg = str(e) + ". To provide a path to libclang use " \
-                           "Config.set_library_path() or " \
-                           "Config.set_library_file()."
+            msg = (
+                str(e) + ". To provide a path to libclang use "
+                "Config.set_library_path() or "
+                "Config.set_library_file()."
+            )
             raise LibclangError(msg)
 
         return library
@@ -4314,34 +3965,36 @@ def function_exists(self, name):
 
         return True
 
+
 def register_enumerations():
     for name, value in clang.enumerations.TokenKinds:
         TokenKind.register(value, name)
 
+
 conf = Config()
 register_enumerations()
 
 __all__ = [
-    'AvailabilityKind',
-    'Config',
-    'CodeCompletionResults',
-    'CompilationDatabase',
-    'CompileCommands',
-    'CompileCommand',
-    'CursorKind',
-    'Cursor',
-    'Diagnostic',
-    'File',
-    'FixIt',
-    'Index',
-    'LinkageKind',
-    'SourceLocation',
-    'SourceRange',
-    'TLSKind',
-    'TokenKind',
-    'Token',
-    'TranslationUnitLoadError',
-    'TranslationUnit',
-    'TypeKind',
-    'Type',
+    "AvailabilityKind",
+    "Config",
+    "CodeCompletionResults",
+    "CompilationDatabase",
+    "CompileCommands",
+    "CompileCommand",
+    "CursorKind",
+    "Cursor",
+    "Diagnostic",
+    "File",
+    "FixIt",
+    "Index",
+    "LinkageKind",
+    "SourceLocation",
+    "SourceRange",
+    "TLSKind",
+    "TokenKind",
+    "Token",
+    "TranslationUnitLoadError",
+    "TranslationUnit",
+    "TypeKind",
+    "Type",
 ]

diff  --git a/clang/bindings/python/clang/enumerations.py b/clang/bindings/python/clang/enumerations.py
index 520e1346d3ebe..b1013c7372043 100644
--- a/clang/bindings/python/clang/enumerations.py
+++ b/clang/bindings/python/clang/enumerations.py
@@ -1,10 +1,10 @@
-#===- enumerations.py - Python Enumerations ------------------*- python -*--===#
+# ===- enumerations.py - Python Enumerations ------------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 """
 Clang Enumerations
@@ -23,11 +23,11 @@
 # Maps to CXTokenKind. Note that libclang maintains a separate set of token
 # enumerations from the C++ API.
 TokenKinds = [
-    ('PUNCTUATION', 0),
-    ('KEYWORD', 1),
-    ('IDENTIFIER', 2),
-    ('LITERAL', 3),
-    ('COMMENT', 4),
+    ("PUNCTUATION", 0),
+    ("KEYWORD", 1),
+    ("IDENTIFIER", 2),
+    ("LITERAL", 3),
+    ("COMMENT", 4),
 ]
 
-__all__ = ['TokenKinds']
+__all__ = ["TokenKinds"]

diff  --git a/clang/bindings/python/examples/cindex/cindex-dump.py b/clang/bindings/python/examples/cindex/cindex-dump.py
index 46073b285cb36..10c3e4ba2dba4 100644
--- a/clang/bindings/python/examples/cindex/cindex-dump.py
+++ b/clang/bindings/python/examples/cindex/cindex-dump.py
@@ -1,26 +1,30 @@
 #!/usr/bin/env python
 
-#===- cindex-dump.py - cindex/Python Source Dump -------------*- python -*--===#
+# ===- cindex-dump.py - cindex/Python Source Dump -------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 """
 A simple command line tool for dumping a source file using the Clang Index
 Library.
 """
 
+
 def get_diag_info(diag):
-    return { 'severity' : diag.severity,
-             'location' : diag.location,
-             'spelling' : diag.spelling,
-             'ranges' : diag.ranges,
-             'fixits' : diag.fixits }
+    return {
+        "severity": diag.severity,
+        "location": diag.location,
+        "spelling": diag.spelling,
+        "ranges": diag.ranges,
+        "fixits": diag.fixits,
+    }
+
 
-def get_cursor_id(cursor, cursor_list = []):
+def get_cursor_id(cursor, cursor_list=[]):
     if not opts.showIDs:
         return None
 
@@ -29,28 +33,31 @@ def get_cursor_id(cursor, cursor_list = []):
 
     # FIXME: This is really slow. It would be nice if the index API exposed
     # something that let us hash cursors.
-    for i,c in enumerate(cursor_list):
+    for i, c in enumerate(cursor_list):
         if cursor == c:
             return i
     cursor_list.append(cursor)
     return len(cursor_list) - 1
 
+
 def get_info(node, depth=0):
     if opts.maxDepth is not None and depth >= opts.maxDepth:
         children = None
     else:
-        children = [get_info(c, depth+1)
-                    for c in node.get_children()]
-    return { 'id' : get_cursor_id(node),
-             'kind' : node.kind,
-             'usr' : node.get_usr(),
-             'spelling' : node.spelling,
-             'location' : node.location,
-             'extent.start' : node.extent.start,
-             'extent.end' : node.extent.end,
-             'is_definition' : node.is_definition(),
-             'definition id' : get_cursor_id(node.get_definition()),
-             'children' : children }
+        children = [get_info(c, depth + 1) for c in node.get_children()]
+    return {
+        "id": get_cursor_id(node),
+        "kind": node.kind,
+        "usr": node.get_usr(),
+        "spelling": node.spelling,
+        "location": node.location,
+        "extent.start": node.extent.start,
+        "extent.end": node.extent.end,
+        "is_definition": node.is_definition(),
+        "definition id": get_cursor_id(node.get_definition()),
+        "children": children,
+    }
+
 
 def main():
     from clang.cindex import Index
@@ -61,26 +68,37 @@ def main():
     global opts
 
     parser = OptionParser("usage: %prog [options] {filename} [clang-args*]")
-    parser.add_option("", "--show-ids", dest="showIDs",
-                      help="Compute cursor IDs (very slow)",
-                      action="store_true", default=False)
-    parser.add_option("", "--max-depth", dest="maxDepth",
-                      help="Limit cursor expansion to depth N",
-                      metavar="N", type=int, default=None)
+    parser.add_option(
+        "",
+        "--show-ids",
+        dest="showIDs",
+        help="Compute cursor IDs (very slow)",
+        action="store_true",
+        default=False,
+    )
+    parser.add_option(
+        "",
+        "--max-depth",
+        dest="maxDepth",
+        help="Limit cursor expansion to depth N",
+        metavar="N",
+        type=int,
+        default=None,
+    )
     parser.disable_interspersed_args()
     (opts, args) = parser.parse_args()
 
     if len(args) == 0:
-        parser.error('invalid number arguments')
+        parser.error("invalid number arguments")
 
     index = Index.create()
     tu = index.parse(None, args)
     if not tu:
         parser.error("unable to load input")
 
-    pprint(('diags', [get_diag_info(d) for d in  tu.diagnostics]))
-    pprint(('nodes', get_info(tu.cursor)))
+    pprint(("diags", [get_diag_info(d) for d in tu.diagnostics]))
+    pprint(("nodes", get_info(tu.cursor)))
 
-if __name__ == '__main__':
-    main()
 
+if __name__ == "__main__":
+    main()

diff  --git a/clang/bindings/python/examples/cindex/cindex-includes.py b/clang/bindings/python/examples/cindex/cindex-includes.py
index ec1fbc0c3edb4..4e8bae19005cd 100644
--- a/clang/bindings/python/examples/cindex/cindex-includes.py
+++ b/clang/bindings/python/examples/cindex/cindex-includes.py
@@ -1,18 +1,19 @@
 #!/usr/bin/env python
 
-#===- cindex-includes.py - cindex/Python Inclusion Graph -----*- python -*--===#
+# ===- cindex-includes.py - cindex/Python Inclusion Graph -----*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 """
 A simple command line tool for dumping a Graphviz description (dot) that
 describes include dependencies.
 """
 
+
 def main():
     import sys
     from clang.cindex import Index
@@ -23,7 +24,7 @@ def main():
     parser.disable_interspersed_args()
     (opts, args) = parser.parse_args()
     if len(args) == 0:
-        parser.error('invalid number arguments')
+        parser.error("invalid number arguments")
 
     # FIXME: Add an output file option
     out = sys.stdout
@@ -36,22 +37,22 @@ def main():
     # A helper function for generating the node name.
     def name(f):
         if f:
-            return "\"" + f.name + "\""
+            return '"' + f.name + '"'
 
     # Generate the include graph
     out.write("digraph G {\n")
     for i in tu.get_includes():
-        line = "  ";
+        line = "  "
         if i.is_input_file:
             # Always write the input file as a node just in case it doesn't
             # actually include anything. This would generate a 1 node graph.
             line += name(i.include)
         else:
-            line += '%s->%s' % (name(i.source), name(i.include))
-        line += "\n";
+            line += "%s->%s" % (name(i.source), name(i.include))
+        line += "\n"
         out.write(line)
     out.write("}\n")
 
-if __name__ == '__main__':
-    main()
 
+if __name__ == "__main__":
+    main()

diff  --git a/clang/bindings/python/tests/cindex/test_access_specifiers.py b/clang/bindings/python/tests/cindex/test_access_specifiers.py
index e36424f240aa0..c1cc18ebe6e58 100644
--- a/clang/bindings/python/tests/cindex/test_access_specifiers.py
+++ b/clang/bindings/python/tests/cindex/test_access_specifiers.py
@@ -1,7 +1,8 @@
 import os
 from clang.cindex import Config
-if 'CLANG_LIBRARY_PATH' in os.environ:
-    Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
+
+if "CLANG_LIBRARY_PATH" in os.environ:
+    Config.set_library_path(os.environ["CLANG_LIBRARY_PATH"])
 
 from clang.cindex import AccessSpecifier
 from clang.cindex import Cursor
@@ -17,7 +18,8 @@ class TestAccessSpecifiers(unittest.TestCase):
     def test_access_specifiers(self):
         """Ensure that C++ access specifiers are available on cursors"""
 
-        tu = get_tu("""
+        tu = get_tu(
+            """
 class test_class {
 public:
   void public_member_function();
@@ -26,7 +28,9 @@ class test_class {
 private:
   void private_member_function();
 };
-""", lang = 'cpp')
+""",
+            lang="cpp",
+        )
 
         test_class = get_cursor(tu, "test_class")
         self.assertEqual(test_class.access_specifier, AccessSpecifier.INVALID)

diff  --git a/clang/bindings/python/tests/cindex/test_cdb.py b/clang/bindings/python/tests/cindex/test_cdb.py
index 99bc72143beff..a5cc22796aa2a 100644
--- a/clang/bindings/python/tests/cindex/test_cdb.py
+++ b/clang/bindings/python/tests/cindex/test_cdb.py
@@ -1,7 +1,8 @@
 import os
 from clang.cindex import Config
-if 'CLANG_LIBRARY_PATH' in os.environ:
-    Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
+
+if "CLANG_LIBRARY_PATH" in os.environ:
+    Config.set_library_path(os.environ["CLANG_LIBRARY_PATH"])
 
 from clang.cindex import CompilationDatabase
 from clang.cindex import CompilationDatabaseError
@@ -15,10 +16,10 @@
 from .util import str_to_path
 
 
-kInputsDir = os.path.join(os.path.dirname(__file__), 'INPUTS')
+kInputsDir = os.path.join(os.path.dirname(__file__), "INPUTS")
 
 
- at unittest.skipIf(sys.platform == 'win32', "TODO: Fix these tests on Windows")
+ at unittest.skipIf(sys.platform == "win32", "TODO: Fix these tests on Windows")
 class TestCDB(unittest.TestCase):
     def test_create_fail(self):
         """Check we fail loading a database with an assertion"""
@@ -27,7 +28,7 @@ def test_create_fail(self):
         # clang_CompilationDatabase_fromDirectory calls fprintf(stderr, ...)
         # Suppress its output.
         stderr = os.dup(2)
-        with open(os.devnull, 'wb') as null:
+        with open(os.devnull, "wb") as null:
             os.dup2(null.fileno(), 2)
         with self.assertRaises(CompilationDatabaseError) as cm:
             cdb = CompilationDatabase.fromDirectory(path)
@@ -35,8 +36,7 @@ def test_create_fail(self):
         os.close(stderr)
 
         e = cm.exception
-        self.assertEqual(e.cdb_error,
-            CompilationDatabaseError.ERROR_CANNOTLOADDATABASE)
+        self.assertEqual(e.cdb_error, CompilationDatabaseError.ERROR_CANNOTLOADDATABASE)
 
     def test_create(self):
         """Check we can load a compilation database"""
@@ -45,14 +45,16 @@ def test_create(self):
     def test_lookup_succeed(self):
         """Check we get some results if the file exists in the db"""
         cdb = CompilationDatabase.fromDirectory(kInputsDir)
-        cmds = cdb.getCompileCommands('/home/john.doe/MyProject/project.cpp')
+        cmds = cdb.getCompileCommands("/home/john.doe/MyProject/project.cpp")
         self.assertNotEqual(len(cmds), 0)
 
     @skip_if_no_fspath
     def test_lookup_succeed_pathlike(self):
         """Same as test_lookup_succeed, but with PathLikes"""
         cdb = CompilationDatabase.fromDirectory(str_to_path(kInputsDir))
-        cmds = cdb.getCompileCommands(str_to_path('/home/john.doe/MyProject/project.cpp'))
+        cmds = cdb.getCompileCommands(
+            str_to_path("/home/john.doe/MyProject/project.cpp")
+        )
         self.assertNotEqual(len(cmds), 0)
 
     def test_all_compilecommand(self):
@@ -61,71 +63,116 @@ def test_all_compilecommand(self):
         cmds = cdb.getAllCompileCommands()
         self.assertEqual(len(cmds), 3)
         expected = [
-            { 'wd': '/home/john.doe/MyProject',
-              'file': '/home/john.doe/MyProject/project.cpp',
-              'line': ['clang++', '--driver-mode=g++', '-o', 'project.o', '-c',
-                       '/home/john.doe/MyProject/project.cpp']},
-            { 'wd': '/home/john.doe/MyProjectA',
-              'file': '/home/john.doe/MyProject/project2.cpp',
-              'line': ['clang++', '--driver-mode=g++', '-o', 'project2.o', '-c',
-                       '/home/john.doe/MyProject/project2.cpp']},
-            { 'wd': '/home/john.doe/MyProjectB',
-              'file': '/home/john.doe/MyProject/project2.cpp',
-              'line': ['clang++', '--driver-mode=g++', '-DFEATURE=1', '-o',
-                       'project2-feature.o', '-c',
-                       '/home/john.doe/MyProject/project2.cpp']},
-
-            ]
+            {
+                "wd": "/home/john.doe/MyProject",
+                "file": "/home/john.doe/MyProject/project.cpp",
+                "line": [
+                    "clang++",
+                    "--driver-mode=g++",
+                    "-o",
+                    "project.o",
+                    "-c",
+                    "/home/john.doe/MyProject/project.cpp",
+                ],
+            },
+            {
+                "wd": "/home/john.doe/MyProjectA",
+                "file": "/home/john.doe/MyProject/project2.cpp",
+                "line": [
+                    "clang++",
+                    "--driver-mode=g++",
+                    "-o",
+                    "project2.o",
+                    "-c",
+                    "/home/john.doe/MyProject/project2.cpp",
+                ],
+            },
+            {
+                "wd": "/home/john.doe/MyProjectB",
+                "file": "/home/john.doe/MyProject/project2.cpp",
+                "line": [
+                    "clang++",
+                    "--driver-mode=g++",
+                    "-DFEATURE=1",
+                    "-o",
+                    "project2-feature.o",
+                    "-c",
+                    "/home/john.doe/MyProject/project2.cpp",
+                ],
+            },
+        ]
         for i in range(len(cmds)):
-            self.assertEqual(cmds[i].directory, expected[i]['wd'])
-            self.assertEqual(cmds[i].filename, expected[i]['file'])
-            for arg, exp in zip(cmds[i].arguments, expected[i]['line']):
+            self.assertEqual(cmds[i].directory, expected[i]["wd"])
+            self.assertEqual(cmds[i].filename, expected[i]["file"])
+            for arg, exp in zip(cmds[i].arguments, expected[i]["line"]):
                 self.assertEqual(arg, exp)
 
     def test_1_compilecommand(self):
         """Check file with single compile command"""
         cdb = CompilationDatabase.fromDirectory(kInputsDir)
-        file = '/home/john.doe/MyProject/project.cpp'
+        file = "/home/john.doe/MyProject/project.cpp"
         cmds = cdb.getCompileCommands(file)
         self.assertEqual(len(cmds), 1)
         self.assertEqual(cmds[0].directory, os.path.dirname(file))
         self.assertEqual(cmds[0].filename, file)
-        expected = [ 'clang++', '--driver-mode=g++', '-o', 'project.o', '-c',
-                     '/home/john.doe/MyProject/project.cpp']
+        expected = [
+            "clang++",
+            "--driver-mode=g++",
+            "-o",
+            "project.o",
+            "-c",
+            "/home/john.doe/MyProject/project.cpp",
+        ]
         for arg, exp in zip(cmds[0].arguments, expected):
             self.assertEqual(arg, exp)
 
     def test_2_compilecommand(self):
         """Check file with 2 compile commands"""
         cdb = CompilationDatabase.fromDirectory(kInputsDir)
-        cmds = cdb.getCompileCommands('/home/john.doe/MyProject/project2.cpp')
+        cmds = cdb.getCompileCommands("/home/john.doe/MyProject/project2.cpp")
         self.assertEqual(len(cmds), 2)
         expected = [
-            { 'wd': '/home/john.doe/MyProjectA',
-              'line': ['clang++', '--driver-mode=g++', '-o', 'project2.o', '-c',
-                       '/home/john.doe/MyProject/project2.cpp']},
-            { 'wd': '/home/john.doe/MyProjectB',
-              'line': ['clang++', '--driver-mode=g++', '-DFEATURE=1', '-o',
-                       'project2-feature.o', '-c',
-                       '/home/john.doe/MyProject/project2.cpp']}
-            ]
+            {
+                "wd": "/home/john.doe/MyProjectA",
+                "line": [
+                    "clang++",
+                    "--driver-mode=g++",
+                    "-o",
+                    "project2.o",
+                    "-c",
+                    "/home/john.doe/MyProject/project2.cpp",
+                ],
+            },
+            {
+                "wd": "/home/john.doe/MyProjectB",
+                "line": [
+                    "clang++",
+                    "--driver-mode=g++",
+                    "-DFEATURE=1",
+                    "-o",
+                    "project2-feature.o",
+                    "-c",
+                    "/home/john.doe/MyProject/project2.cpp",
+                ],
+            },
+        ]
         for i in range(len(cmds)):
-            self.assertEqual(cmds[i].directory, expected[i]['wd'])
-            for arg, exp in zip(cmds[i].arguments, expected[i]['line']):
+            self.assertEqual(cmds[i].directory, expected[i]["wd"])
+            for arg, exp in zip(cmds[i].arguments, expected[i]["line"]):
                 self.assertEqual(arg, exp)
 
     def test_compilecommand_iterator_stops(self):
         """Check that iterator stops after the correct number of elements"""
         cdb = CompilationDatabase.fromDirectory(kInputsDir)
         count = 0
-        for cmd in cdb.getCompileCommands('/home/john.doe/MyProject/project2.cpp'):
+        for cmd in cdb.getCompileCommands("/home/john.doe/MyProject/project2.cpp"):
             count += 1
             self.assertLessEqual(count, 2)
 
     def test_compilationDB_references(self):
         """Ensure CompilationsCommands are independent of the database"""
         cdb = CompilationDatabase.fromDirectory(kInputsDir)
-        cmds = cdb.getCompileCommands('/home/john.doe/MyProject/project.cpp')
+        cmds = cdb.getCompileCommands("/home/john.doe/MyProject/project.cpp")
         del cdb
         gc.collect()
         workingdir = cmds[0].directory
@@ -133,7 +180,7 @@ def test_compilationDB_references(self):
     def test_compilationCommands_references(self):
         """Ensure CompilationsCommand keeps a reference to CompilationCommands"""
         cdb = CompilationDatabase.fromDirectory(kInputsDir)
-        cmds = cdb.getCompileCommands('/home/john.doe/MyProject/project.cpp')
+        cmds = cdb.getCompileCommands("/home/john.doe/MyProject/project.cpp")
         del cdb
         cmd0 = cmds[0]
         del cmds

diff  --git a/clang/bindings/python/tests/cindex/test_code_completion.py b/clang/bindings/python/tests/cindex/test_code_completion.py
index 1603d3dfc17a8..ca52fc6f73e1d 100644
--- a/clang/bindings/python/tests/cindex/test_code_completion.py
+++ b/clang/bindings/python/tests/cindex/test_code_completion.py
@@ -1,7 +1,8 @@
 import os
 from clang.cindex import Config
-if 'CLANG_LIBRARY_PATH' in os.environ:
-    Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
+
+if "CLANG_LIBRARY_PATH" in os.environ:
+    Config.set_library_path(os.environ["CLANG_LIBRARY_PATH"])
 
 from clang.cindex import TranslationUnit
 
@@ -21,7 +22,10 @@ def check_completion_results(self, cr, expected):
             self.assertIn(c, completions)
 
     def test_code_complete(self):
-        files = [('fake.c', """
+        files = [
+            (
+                "fake.c",
+                """
 /// Aaa.
 int test1;
 
@@ -31,23 +35,34 @@ def test_code_complete(self):
 void f() {
 
 }
-""")]
+""",
+            )
+        ]
 
-        tu = TranslationUnit.from_source('fake.c', ['-std=c99'], unsaved_files=files,
-                options=TranslationUnit.PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION)
+        tu = TranslationUnit.from_source(
+            "fake.c",
+            ["-std=c99"],
+            unsaved_files=files,
+            options=TranslationUnit.PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION,
+        )
 
-        cr = tu.codeComplete('fake.c', 9, 1, unsaved_files=files, include_brief_comments=True)
+        cr = tu.codeComplete(
+            "fake.c", 9, 1, unsaved_files=files, include_brief_comments=True
+        )
 
         expected = [
-          "{'int', ResultType} | {'test1', TypedText} || Priority: 50 || Availability: Available || Brief comment: Aaa.",
-          "{'void', ResultType} | {'test2', TypedText} | {'(', LeftParen} | {')', RightParen} || Priority: 50 || Availability: Available || Brief comment: Bbb.",
-          "{'return', TypedText} | {';', SemiColon} || Priority: 40 || Availability: Available || Brief comment: None"
+            "{'int', ResultType} | {'test1', TypedText} || Priority: 50 || Availability: Available || Brief comment: Aaa.",
+            "{'void', ResultType} | {'test2', TypedText} | {'(', LeftParen} | {')', RightParen} || Priority: 50 || Availability: Available || Brief comment: Bbb.",
+            "{'return', TypedText} | {';', SemiColon} || Priority: 40 || Availability: Available || Brief comment: None",
         ]
         self.check_completion_results(cr, expected)
 
     @skip_if_no_fspath
     def test_code_complete_pathlike(self):
-        files = [(str_to_path('fake.c'), """
+        files = [
+            (
+                str_to_path("fake.c"),
+                """
 /// Aaa.
 int test1;
 
@@ -57,22 +72,37 @@ def test_code_complete_pathlike(self):
 void f() {
 
 }
-""")]
-
-        tu = TranslationUnit.from_source(str_to_path('fake.c'), ['-std=c99'], unsaved_files=files,
-                options=TranslationUnit.PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION)
+""",
+            )
+        ]
 
-        cr = tu.codeComplete(str_to_path('fake.c'), 9, 1, unsaved_files=files, include_brief_comments=True)
+        tu = TranslationUnit.from_source(
+            str_to_path("fake.c"),
+            ["-std=c99"],
+            unsaved_files=files,
+            options=TranslationUnit.PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION,
+        )
+
+        cr = tu.codeComplete(
+            str_to_path("fake.c"),
+            9,
+            1,
+            unsaved_files=files,
+            include_brief_comments=True,
+        )
 
         expected = [
-          "{'int', ResultType} | {'test1', TypedText} || Priority: 50 || Availability: Available || Brief comment: Aaa.",
-          "{'void', ResultType} | {'test2', TypedText} | {'(', LeftParen} | {')', RightParen} || Priority: 50 || Availability: Available || Brief comment: Bbb.",
-          "{'return', TypedText} | {';', SemiColon} || Priority: 40 || Availability: Available || Brief comment: None"
+            "{'int', ResultType} | {'test1', TypedText} || Priority: 50 || Availability: Available || Brief comment: Aaa.",
+            "{'void', ResultType} | {'test2', TypedText} | {'(', LeftParen} | {')', RightParen} || Priority: 50 || Availability: Available || Brief comment: Bbb.",
+            "{'return', TypedText} | {';', SemiColon} || Priority: 40 || Availability: Available || Brief comment: None",
         ]
         self.check_completion_results(cr, expected)
 
     def test_code_complete_availability(self):
-        files = [('fake.cpp', """
+        files = [
+            (
+                "fake.cpp",
+                """
 class P {
 protected:
   int member;
@@ -87,26 +117,30 @@ class Q : public P {
   x.; // member is inaccessible
   y.; // member is accessible
 }
-""")]
+""",
+            )
+        ]
 
-        tu = TranslationUnit.from_source('fake.cpp', ['-std=c++98'], unsaved_files=files)
+        tu = TranslationUnit.from_source(
+            "fake.cpp", ["-std=c++98"], unsaved_files=files
+        )
 
-        cr = tu.codeComplete('fake.cpp', 12, 5, unsaved_files=files)
+        cr = tu.codeComplete("fake.cpp", 12, 5, unsaved_files=files)
 
         expected = [
-          "{'const', TypedText} || Priority: 50 || Availability: Available || Brief comment: None",
-          "{'volatile', TypedText} || Priority: 50 || Availability: Available || Brief comment: None",
-          "{'operator', TypedText} || Priority: 40 || Availability: Available || Brief comment: None",
-          "{'P', TypedText} || Priority: 50 || Availability: Available || Brief comment: None",
-          "{'Q', TypedText} || Priority: 50 || Availability: Available || Brief comment: None"
+            "{'const', TypedText} || Priority: 50 || Availability: Available || Brief comment: None",
+            "{'volatile', TypedText} || Priority: 50 || Availability: Available || Brief comment: None",
+            "{'operator', TypedText} || Priority: 40 || Availability: Available || Brief comment: None",
+            "{'P', TypedText} || Priority: 50 || Availability: Available || Brief comment: None",
+            "{'Q', TypedText} || Priority: 50 || Availability: Available || Brief comment: None",
         ]
         self.check_completion_results(cr, expected)
 
-        cr = tu.codeComplete('fake.cpp', 13, 5, unsaved_files=files)
+        cr = tu.codeComplete("fake.cpp", 13, 5, unsaved_files=files)
         expected = [
             "{'P', TypedText} | {'::', Text} || Priority: 75 || Availability: Available || Brief comment: None",
             "{'P &', ResultType} | {'operator=', TypedText} | {'(', LeftParen} | {'const P &', Placeholder} | {')', RightParen} || Priority: 79 || Availability: Available || Brief comment: None",
             "{'int', ResultType} | {'member', TypedText} || Priority: 35 || Availability: NotAccessible || Brief comment: None",
-            "{'void', ResultType} | {'~P', TypedText} | {'(', LeftParen} | {')', RightParen} || Priority: 79 || Availability: Available || Brief comment: None"
+            "{'void', ResultType} | {'~P', TypedText} | {'(', LeftParen} | {')', RightParen} || Priority: 79 || Availability: Available || Brief comment: None",
         ]
         self.check_completion_results(cr, expected)

diff  --git a/clang/bindings/python/tests/cindex/test_comment.py b/clang/bindings/python/tests/cindex/test_comment.py
index 73fb617ae1681..0727c6fa35d95 100644
--- a/clang/bindings/python/tests/cindex/test_comment.py
+++ b/clang/bindings/python/tests/cindex/test_comment.py
@@ -1,7 +1,8 @@
 import os
 from clang.cindex import Config
-if 'CLANG_LIBRARY_PATH' in os.environ:
-    Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
+
+if "CLANG_LIBRARY_PATH" in os.environ:
+    Config.set_library_path(os.environ["CLANG_LIBRARY_PATH"])
 
 from clang.cindex import TranslationUnit
 from tests.cindex.util import get_cursor
@@ -11,7 +12,10 @@
 
 class TestComment(unittest.TestCase):
     def test_comment(self):
-        files = [('fake.c', """
+        files = [
+            (
+                "fake.c",
+                """
 /// Aaa.
 int test1;
 
@@ -22,11 +26,17 @@ def test_comment(self):
 void f() {
 
 }
-""")]
+""",
+            )
+        ]
         # make a comment-aware TU
-        tu = TranslationUnit.from_source('fake.c', ['-std=c99'], unsaved_files=files,
-                options=TranslationUnit.PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION)
-        test1 = get_cursor(tu, 'test1')
+        tu = TranslationUnit.from_source(
+            "fake.c",
+            ["-std=c99"],
+            unsaved_files=files,
+            options=TranslationUnit.PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION,
+        )
+        test1 = get_cursor(tu, "test1")
         self.assertIsNotNone(test1, "Could not find test1.")
         self.assertTrue(test1.type.is_pod())
         raw = test1.raw_comment
@@ -34,13 +44,13 @@ def test_comment(self):
         self.assertEqual(raw, """/// Aaa.""")
         self.assertEqual(brief, """Aaa.""")
 
-        test2 = get_cursor(tu, 'test2')
+        test2 = get_cursor(tu, "test2")
         raw = test2.raw_comment
         brief = test2.brief_comment
         self.assertEqual(raw, """/// Bbb.\n/// x""")
         self.assertEqual(brief, """Bbb. x""")
 
-        f = get_cursor(tu, 'f')
+        f = get_cursor(tu, "f")
         raw = f.raw_comment
         brief = f.brief_comment
         self.assertIsNone(raw)

diff  --git a/clang/bindings/python/tests/cindex/test_cursor.py b/clang/bindings/python/tests/cindex/test_cursor.py
index 74f7445cbc3ef..84cd813941844 100644
--- a/clang/bindings/python/tests/cindex/test_cursor.py
+++ b/clang/bindings/python/tests/cindex/test_cursor.py
@@ -1,7 +1,8 @@
 import os
 from clang.cindex import Config
-if 'CLANG_LIBRARY_PATH' in os.environ:
-    Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
+
+if "CLANG_LIBRARY_PATH" in os.environ:
+    Config.set_library_path(os.environ["CLANG_LIBRARY_PATH"])
 
 import ctypes
 import gc
@@ -53,6 +54,7 @@ class C {
         void foo<-7, float, true>();
     """
 
+
 class TestCursor(unittest.TestCase):
     def test_get_children(self):
         tu = get_tu(kInput)
@@ -66,9 +68,9 @@ def test_get_children(self):
 
         self.assertNotEqual(tu_nodes[0], tu_nodes[1])
         self.assertEqual(tu_nodes[0].kind, CursorKind.STRUCT_DECL)
-        self.assertEqual(tu_nodes[0].spelling, 's0')
+        self.assertEqual(tu_nodes[0].spelling, "s0")
         self.assertEqual(tu_nodes[0].is_definition(), True)
-        self.assertEqual(tu_nodes[0].location.file.name, 't.c')
+        self.assertEqual(tu_nodes[0].location.file.name, "t.c")
         self.assertEqual(tu_nodes[0].location.line, 1)
         self.assertEqual(tu_nodes[0].location.column, 8)
         self.assertGreater(tu_nodes[0].hash, 0)
@@ -77,25 +79,25 @@ def test_get_children(self):
         s0_nodes = list(tu_nodes[0].get_children())
         self.assertEqual(len(s0_nodes), 2)
         self.assertEqual(s0_nodes[0].kind, CursorKind.FIELD_DECL)
-        self.assertEqual(s0_nodes[0].spelling, 'a')
+        self.assertEqual(s0_nodes[0].spelling, "a")
         self.assertEqual(s0_nodes[0].type.kind, TypeKind.INT)
         self.assertEqual(s0_nodes[1].kind, CursorKind.FIELD_DECL)
-        self.assertEqual(s0_nodes[1].spelling, 'b')
+        self.assertEqual(s0_nodes[1].spelling, "b")
         self.assertEqual(s0_nodes[1].type.kind, TypeKind.INT)
 
         self.assertEqual(tu_nodes[1].kind, CursorKind.STRUCT_DECL)
-        self.assertEqual(tu_nodes[1].spelling, 's1')
-        self.assertEqual(tu_nodes[1].displayname, 's1')
+        self.assertEqual(tu_nodes[1].spelling, "s1")
+        self.assertEqual(tu_nodes[1].displayname, "s1")
         self.assertEqual(tu_nodes[1].is_definition(), False)
 
         self.assertEqual(tu_nodes[2].kind, CursorKind.FUNCTION_DECL)
-        self.assertEqual(tu_nodes[2].spelling, 'f0')
-        self.assertEqual(tu_nodes[2].displayname, 'f0(int, int)')
+        self.assertEqual(tu_nodes[2].spelling, "f0")
+        self.assertEqual(tu_nodes[2].displayname, "f0(int, int)")
         self.assertEqual(tu_nodes[2].is_definition(), True)
 
     def test_references(self):
         """Ensure that references to TranslationUnit are kept."""
-        tu = get_tu('int x;')
+        tu = get_tu("int x;")
         cursors = list(tu.cursor.get_children())
         self.assertGreater(len(cursors), 0)
 
@@ -111,12 +113,12 @@ def test_references(self):
         parent = cursor.semantic_parent
 
     def test_canonical(self):
-        source = 'struct X; struct X; struct X { int member; };'
+        source = "struct X; struct X; struct X { int member; };"
         tu = get_tu(source)
 
         cursors = []
         for cursor in tu.cursor.get_children():
-            if cursor.spelling == 'X':
+            if cursor.spelling == "X":
                 cursors.append(cursor)
 
         self.assertEqual(len(cursors), 3)
@@ -124,12 +126,12 @@ def test_canonical(self):
 
     def test_is_const_method(self):
         """Ensure Cursor.is_const_method works."""
-        source = 'class X { void foo() const; void bar(); };'
-        tu = get_tu(source, lang='cpp')
+        source = "class X { void foo() const; void bar(); };"
+        tu = get_tu(source, lang="cpp")
 
-        cls = get_cursor(tu, 'X')
-        foo = get_cursor(tu, 'foo')
-        bar = get_cursor(tu, 'bar')
+        cls = get_cursor(tu, "X")
+        foo = get_cursor(tu, "foo")
+        bar = get_cursor(tu, "bar")
         self.assertIsNotNone(cls)
         self.assertIsNotNone(foo)
         self.assertIsNotNone(bar)
@@ -139,10 +141,10 @@ def test_is_const_method(self):
 
     def test_is_converting_constructor(self):
         """Ensure Cursor.is_converting_constructor works."""
-        source = 'class X { explicit X(int); X(double); X(); };'
-        tu = get_tu(source, lang='cpp')
+        source = "class X { explicit X(int); X(double); X(); };"
+        tu = get_tu(source, lang="cpp")
 
-        xs = get_cursors(tu, 'X')
+        xs = get_cursors(tu, "X")
 
         self.assertEqual(len(xs), 4)
         self.assertEqual(xs[0].kind, CursorKind.CLASS_DECL)
@@ -155,13 +157,12 @@ def test_is_converting_constructor(self):
         self.assertTrue(cs[1].is_converting_constructor())
         self.assertFalse(cs[2].is_converting_constructor())
 
-
     def test_is_copy_constructor(self):
         """Ensure Cursor.is_copy_constructor works."""
-        source = 'class X { X(); X(const X&); X(X&&); };'
-        tu = get_tu(source, lang='cpp')
+        source = "class X { X(); X(const X&); X(X&&); };"
+        tu = get_tu(source, lang="cpp")
 
-        xs = get_cursors(tu, 'X')
+        xs = get_cursors(tu, "X")
         self.assertEqual(xs[0].kind, CursorKind.CLASS_DECL)
         cs = xs[1:]
         self.assertEqual(cs[0].kind, CursorKind.CONSTRUCTOR)
@@ -174,10 +175,10 @@ def test_is_copy_constructor(self):
 
     def test_is_default_constructor(self):
         """Ensure Cursor.is_default_constructor works."""
-        source = 'class X { X(); X(int); };'
-        tu = get_tu(source, lang='cpp')
+        source = "class X { X(); X(int); };"
+        tu = get_tu(source, lang="cpp")
 
-        xs = get_cursors(tu, 'X')
+        xs = get_cursors(tu, "X")
         self.assertEqual(xs[0].kind, CursorKind.CLASS_DECL)
         cs = xs[1:]
         self.assertEqual(cs[0].kind, CursorKind.CONSTRUCTOR)
@@ -188,10 +189,10 @@ def test_is_default_constructor(self):
 
     def test_is_move_constructor(self):
         """Ensure Cursor.is_move_constructor works."""
-        source = 'class X { X(); X(const X&); X(X&&); };'
-        tu = get_tu(source, lang='cpp')
+        source = "class X { X(); X(const X&); X(X&&); };"
+        tu = get_tu(source, lang="cpp")
 
-        xs = get_cursors(tu, 'X')
+        xs = get_cursors(tu, "X")
         self.assertEqual(xs[0].kind, CursorKind.CLASS_DECL)
         cs = xs[1:]
         self.assertEqual(cs[0].kind, CursorKind.CONSTRUCTOR)
@@ -204,11 +205,11 @@ def test_is_move_constructor(self):
 
     def test_is_default_method(self):
         """Ensure Cursor.is_default_method works."""
-        source = 'class X { X() = default; }; class Y { Y(); };'
-        tu = get_tu(source, lang='cpp')
+        source = "class X { X() = default; }; class Y { Y(); };"
+        tu = get_tu(source, lang="cpp")
 
-        xs = get_cursors(tu, 'X')
-        ys = get_cursors(tu, 'Y')
+        xs = get_cursors(tu, "X")
+        ys = get_cursors(tu, "Y")
 
         self.assertEqual(len(xs), 2)
         self.assertEqual(len(ys), 2)
@@ -268,14 +269,22 @@ class Bar {
         self.assertEqual(len(move_assignment_operators_cursors), 8)
         self.assertTrue(len(non_move_assignment_operators_cursors), 7)
 
-        self.assertTrue(all([
-            cursor.is_move_assignment_operator_method()
-            for cursor in move_assignment_operators_cursors
-        ]))
-        self.assertFalse(any([
-            cursor.is_move_assignment_operator_method()
-            for cursor in non_move_assignment_operators_cursors
-        ]))
+        self.assertTrue(
+            all(
+                [
+                    cursor.is_move_assignment_operator_method()
+                    for cursor in move_assignment_operators_cursors
+                ]
+            )
+        )
+        self.assertFalse(
+            any(
+                [
+                    cursor.is_move_assignment_operator_method()
+                    for cursor in non_move_assignment_operators_cursors
+                ]
+            )
+        )
 
     def test_is_explicit_method(self):
         """Ensure Cursor.is_explicit_method works."""
@@ -297,9 +306,7 @@ def test_is_explicit_method(self):
             explicit(false) operator float();
         };
         """
-        tu_with_explicit_methods = get_tu(
-            source_with_explicit_methods, lang="cpp"
-        )
+        tu_with_explicit_methods = get_tu(source_with_explicit_methods, lang="cpp")
         tu_without_explicit_methods = get_tu(
             source_without_explicit_methods, lang="cpp"
         )
@@ -319,23 +326,23 @@ def test_is_explicit_method(self):
         self.assertEqual(len(explicit_methods_cursors), 4)
         self.assertTrue(len(non_explicit_methods_cursors), 4)
 
-        self.assertTrue(all([
-            cursor.is_explicit_method()
-            for cursor in explicit_methods_cursors
-        ]))
-        self.assertFalse(any([
-            cursor.is_explicit_method()
-            for cursor in non_explicit_methods_cursors
-        ]))
+        self.assertTrue(
+            all([cursor.is_explicit_method() for cursor in explicit_methods_cursors])
+        )
+        self.assertFalse(
+            any(
+                [cursor.is_explicit_method() for cursor in non_explicit_methods_cursors]
+            )
+        )
 
     def test_is_mutable_field(self):
         """Ensure Cursor.is_mutable_field works."""
-        source = 'class X { int x_; mutable int y_; };'
-        tu = get_tu(source, lang='cpp')
+        source = "class X { int x_; mutable int y_; };"
+        tu = get_tu(source, lang="cpp")
 
-        cls = get_cursor(tu, 'X')
-        x_ = get_cursor(tu, 'x_')
-        y_ = get_cursor(tu, 'y_')
+        cls = get_cursor(tu, "X")
+        x_ = get_cursor(tu, "x_")
+        y_ = get_cursor(tu, "y_")
         self.assertIsNotNone(cls)
         self.assertIsNotNone(x_)
         self.assertIsNotNone(y_)
@@ -346,12 +353,12 @@ def test_is_mutable_field(self):
     def test_is_static_method(self):
         """Ensure Cursor.is_static_method works."""
 
-        source = 'class X { static void foo(); void bar(); };'
-        tu = get_tu(source, lang='cpp')
+        source = "class X { static void foo(); void bar(); };"
+        tu = get_tu(source, lang="cpp")
 
-        cls = get_cursor(tu, 'X')
-        foo = get_cursor(tu, 'foo')
-        bar = get_cursor(tu, 'bar')
+        cls = get_cursor(tu, "X")
+        foo = get_cursor(tu, "foo")
+        bar = get_cursor(tu, "bar")
         self.assertIsNotNone(cls)
         self.assertIsNotNone(foo)
         self.assertIsNotNone(bar)
@@ -361,12 +368,12 @@ def test_is_static_method(self):
 
     def test_is_pure_virtual_method(self):
         """Ensure Cursor.is_pure_virtual_method works."""
-        source = 'class X { virtual void foo() = 0; virtual void bar(); };'
-        tu = get_tu(source, lang='cpp')
+        source = "class X { virtual void foo() = 0; virtual void bar(); };"
+        tu = get_tu(source, lang="cpp")
 
-        cls = get_cursor(tu, 'X')
-        foo = get_cursor(tu, 'foo')
-        bar = get_cursor(tu, 'bar')
+        cls = get_cursor(tu, "X")
+        foo = get_cursor(tu, "foo")
+        bar = get_cursor(tu, "bar")
         self.assertIsNotNone(cls)
         self.assertIsNotNone(foo)
         self.assertIsNotNone(bar)
@@ -376,12 +383,12 @@ def test_is_pure_virtual_method(self):
 
     def test_is_virtual_method(self):
         """Ensure Cursor.is_virtual_method works."""
-        source = 'class X { virtual void foo(); void bar(); };'
-        tu = get_tu(source, lang='cpp')
+        source = "class X { virtual void foo(); void bar(); };"
+        tu = get_tu(source, lang="cpp")
 
-        cls = get_cursor(tu, 'X')
-        foo = get_cursor(tu, 'foo')
-        bar = get_cursor(tu, 'bar')
+        cls = get_cursor(tu, "X")
+        foo = get_cursor(tu, "foo")
+        bar = get_cursor(tu, "bar")
         self.assertIsNotNone(cls)
         self.assertIsNotNone(foo)
         self.assertIsNotNone(bar)
@@ -391,23 +398,23 @@ def test_is_virtual_method(self):
 
     def test_is_abstract_record(self):
         """Ensure Cursor.is_abstract_record works."""
-        source = 'struct X { virtual void x() = 0; }; struct Y : X { void x(); };'
-        tu = get_tu(source, lang='cpp')
+        source = "struct X { virtual void x() = 0; }; struct Y : X { void x(); };"
+        tu = get_tu(source, lang="cpp")
 
-        cls = get_cursor(tu, 'X')
+        cls = get_cursor(tu, "X")
         self.assertTrue(cls.is_abstract_record())
 
-        cls = get_cursor(tu, 'Y')
+        cls = get_cursor(tu, "Y")
         self.assertFalse(cls.is_abstract_record())
 
     def test_is_scoped_enum(self):
         """Ensure Cursor.is_scoped_enum works."""
-        source = 'class X {}; enum RegularEnum {}; enum class ScopedEnum {};'
-        tu = get_tu(source, lang='cpp')
+        source = "class X {}; enum RegularEnum {}; enum class ScopedEnum {};"
+        tu = get_tu(source, lang="cpp")
 
-        cls = get_cursor(tu, 'X')
-        regular_enum = get_cursor(tu, 'RegularEnum')
-        scoped_enum = get_cursor(tu, 'ScopedEnum')
+        cls = get_cursor(tu, "X")
+        regular_enum = get_cursor(tu, "RegularEnum")
+        scoped_enum = get_cursor(tu, "ScopedEnum")
         self.assertIsNotNone(cls)
         self.assertIsNotNone(regular_enum)
         self.assertIsNotNone(scoped_enum)
@@ -417,8 +424,8 @@ def test_is_scoped_enum(self):
         self.assertTrue(scoped_enum.is_scoped_enum())
 
     def test_underlying_type(self):
-        tu = get_tu('typedef int foo;')
-        typedef = get_cursor(tu, 'foo')
+        tu = get_tu("typedef int foo;")
+        typedef = get_cursor(tu, "foo")
         self.assertIsNotNone(typedef)
 
         self.assertTrue(typedef.kind.is_declaration())
@@ -426,25 +433,25 @@ def test_underlying_type(self):
         self.assertEqual(underlying.kind, TypeKind.INT)
 
     def test_semantic_parent(self):
-        tu = get_tu(kParentTest, 'cpp')
-        curs = get_cursors(tu, 'f')
-        decl = get_cursor(tu, 'C')
+        tu = get_tu(kParentTest, "cpp")
+        curs = get_cursors(tu, "f")
+        decl = get_cursor(tu, "C")
         self.assertEqual(len(curs), 2)
         self.assertEqual(curs[0].semantic_parent, curs[1].semantic_parent)
         self.assertEqual(curs[0].semantic_parent, decl)
 
     def test_lexical_parent(self):
-        tu = get_tu(kParentTest, 'cpp')
-        curs = get_cursors(tu, 'f')
-        decl = get_cursor(tu, 'C')
+        tu = get_tu(kParentTest, "cpp")
+        curs = get_cursors(tu, "f")
+        decl = get_cursor(tu, "C")
         self.assertEqual(len(curs), 2)
         self.assertNotEqual(curs[0].lexical_parent, curs[1].lexical_parent)
         self.assertEqual(curs[0].lexical_parent, decl)
         self.assertEqual(curs[1].lexical_parent, tu.cursor)
 
     def test_enum_type(self):
-        tu = get_tu('enum TEST { FOO=1, BAR=2 };')
-        enum = get_cursor(tu, 'TEST')
+        tu = get_tu("enum TEST { FOO=1, BAR=2 };")
+        enum = get_cursor(tu, "TEST")
         self.assertIsNotNone(enum)
 
         self.assertEqual(enum.kind, CursorKind.ENUM_DECL)
@@ -452,23 +459,23 @@ def test_enum_type(self):
         self.assertIn(enum_type.kind, (TypeKind.UINT, TypeKind.INT))
 
     def test_enum_type_cpp(self):
-        tu = get_tu('enum TEST : long long { FOO=1, BAR=2 };', lang="cpp")
-        enum = get_cursor(tu, 'TEST')
+        tu = get_tu("enum TEST : long long { FOO=1, BAR=2 };", lang="cpp")
+        enum = get_cursor(tu, "TEST")
         self.assertIsNotNone(enum)
 
         self.assertEqual(enum.kind, CursorKind.ENUM_DECL)
         self.assertEqual(enum.enum_type.kind, TypeKind.LONGLONG)
 
     def test_objc_type_encoding(self):
-        tu = get_tu('int i;', lang='objc')
-        i = get_cursor(tu, 'i')
+        tu = get_tu("int i;", lang="objc")
+        i = get_cursor(tu, "i")
 
         self.assertIsNotNone(i)
-        self.assertEqual(i.objc_type_encoding, 'i')
+        self.assertEqual(i.objc_type_encoding, "i")
 
     def test_enum_values(self):
-        tu = get_tu('enum TEST { SPAM=1, EGG, HAM = EGG * 20};')
-        enum = get_cursor(tu, 'TEST')
+        tu = get_tu("enum TEST { SPAM=1, EGG, HAM = EGG * 20};")
+        enum = get_cursor(tu, "TEST")
         self.assertIsNotNone(enum)
 
         self.assertEqual(enum.kind, CursorKind.ENUM_DECL)
@@ -486,8 +493,10 @@ def test_enum_values(self):
         self.assertEqual(ham.enum_value, 40)
 
     def test_enum_values_cpp(self):
-        tu = get_tu('enum TEST : long long { SPAM = -1, HAM = 0x10000000000};', lang="cpp")
-        enum = get_cursor(tu, 'TEST')
+        tu = get_tu(
+            "enum TEST : long long { SPAM = -1, HAM = 0x10000000000};", lang="cpp"
+        )
+        enum = get_cursor(tu, "TEST")
         self.assertIsNotNone(enum)
 
         self.assertEqual(enum.kind, CursorKind.ENUM_DECL)
@@ -503,9 +512,11 @@ def test_enum_values_cpp(self):
         self.assertEqual(ham.enum_value, 0x10000000000)
 
     def test_annotation_attribute(self):
-        tu = get_tu('int foo (void) __attribute__ ((annotate("here be annotation attribute")));')
+        tu = get_tu(
+            'int foo (void) __attribute__ ((annotate("here be annotation attribute")));'
+        )
 
-        foo = get_cursor(tu, 'foo')
+        foo = get_cursor(tu, "foo")
         self.assertIsNotNone(foo)
 
         for c in foo.get_children():
@@ -518,13 +529,13 @@ def test_annotation_attribute(self):
     def test_annotation_template(self):
         annotation = '__attribute__ ((annotate("annotation")))'
         for source, kind in [
-                ('int foo (T value) %s;', CursorKind.FUNCTION_TEMPLATE),
-                ('class %s foo {};', CursorKind.CLASS_TEMPLATE),
+            ("int foo (T value) %s;", CursorKind.FUNCTION_TEMPLATE),
+            ("class %s foo {};", CursorKind.CLASS_TEMPLATE),
         ]:
-            source = 'template<typename T> ' + (source % annotation)
+            source = "template<typename T> " + (source % annotation)
             tu = get_tu(source, lang="cpp")
 
-            foo = get_cursor(tu, 'foo')
+            foo = get_cursor(tu, "foo")
             self.assertIsNotNone(foo)
             self.assertEqual(foo.kind, kind)
 
@@ -536,8 +547,8 @@ def test_annotation_template(self):
                 self.fail("Couldn't find annotation for {}".format(kind))
 
     def test_result_type(self):
-        tu = get_tu('int foo();')
-        foo = get_cursor(tu, 'foo')
+        tu = get_tu("int foo();")
+        foo = get_cursor(tu, "foo")
 
         self.assertIsNotNone(foo)
         t = foo.result_type
@@ -549,22 +560,22 @@ def test_result_type_objc_method_decl(self):
         -(void)voidMethod;
         @end
         """
-        tu = get_tu(code, lang='objc')
-        cursor = get_cursor(tu, 'voidMethod')
+        tu = get_tu(code, lang="objc")
+        cursor = get_cursor(tu, "voidMethod")
         result_type = cursor.result_type
         self.assertEqual(cursor.kind, CursorKind.OBJC_INSTANCE_METHOD_DECL)
         self.assertEqual(result_type.kind, TypeKind.VOID)
 
     def test_availability(self):
-        tu = get_tu('class A { A(A const&) = delete; };', lang='cpp')
+        tu = get_tu("class A { A(A const&) = delete; };", lang="cpp")
 
         # AvailabilityKind.AVAILABLE
-        cursor = get_cursor(tu, 'A')
+        cursor = get_cursor(tu, "A")
         self.assertEqual(cursor.kind, CursorKind.CLASS_DECL)
         self.assertEqual(cursor.availability, AvailabilityKind.AVAILABLE)
 
         # AvailabilityKind.NOT_AVAILABLE
-        cursors = get_cursors(tu, 'A')
+        cursors = get_cursors(tu, "A")
         for c in cursors:
             if c.kind == CursorKind.CONSTRUCTOR:
                 self.assertEqual(c.availability, AvailabilityKind.NOT_AVAILABLE)
@@ -573,26 +584,26 @@ def test_availability(self):
             self.fail("Could not find cursor for deleted constructor")
 
         # AvailabilityKind.DEPRECATED
-        tu = get_tu('void test() __attribute__((deprecated));', lang='cpp')
-        cursor = get_cursor(tu, 'test')
+        tu = get_tu("void test() __attribute__((deprecated));", lang="cpp")
+        cursor = get_cursor(tu, "test")
         self.assertEqual(cursor.availability, AvailabilityKind.DEPRECATED)
 
         # AvailabilityKind.NOT_ACCESSIBLE is only used in the code completion results
 
     def test_get_tokens(self):
         """Ensure we can map cursors back to tokens."""
-        tu = get_tu('int foo(int i);')
-        foo = get_cursor(tu, 'foo')
+        tu = get_tu("int foo(int i);")
+        foo = get_cursor(tu, "foo")
 
         tokens = list(foo.get_tokens())
         self.assertEqual(len(tokens), 6)
-        self.assertEqual(tokens[0].spelling, 'int')
-        self.assertEqual(tokens[1].spelling, 'foo')
+        self.assertEqual(tokens[0].spelling, "int")
+        self.assertEqual(tokens[1].spelling, "foo")
 
     def test_get_token_cursor(self):
         """Ensure we can map tokens to cursors."""
-        tu = get_tu('class A {}; int foo(A var = A());', lang='cpp')
-        foo = get_cursor(tu, 'foo')
+        tu = get_tu("class A {}; int foo(A var = A());", lang="cpp")
+        foo = get_cursor(tu, "foo")
 
         for cursor in foo.walk_preorder():
             if cursor.kind.is_expression() and not cursor.kind.is_statement():
@@ -602,18 +613,18 @@ def test_get_token_cursor(self):
 
         tokens = list(cursor.get_tokens())
         self.assertEqual(len(tokens), 4, [t.spelling for t in tokens])
-        self.assertEqual(tokens[0].spelling, '=')
-        self.assertEqual(tokens[1].spelling, 'A')
-        self.assertEqual(tokens[2].spelling, '(')
-        self.assertEqual(tokens[3].spelling, ')')
+        self.assertEqual(tokens[0].spelling, "=")
+        self.assertEqual(tokens[1].spelling, "A")
+        self.assertEqual(tokens[2].spelling, "(")
+        self.assertEqual(tokens[3].spelling, ")")
         t_cursor = tokens[1].cursor
         self.assertEqual(t_cursor.kind, CursorKind.TYPE_REF)
-        r_cursor = t_cursor.referenced # should not raise an exception
+        r_cursor = t_cursor.referenced  # should not raise an exception
         self.assertEqual(r_cursor.kind, CursorKind.CLASS_DECL)
 
     def test_get_arguments(self):
-        tu = get_tu('void foo(int i, int j);')
-        foo = get_cursor(tu, 'foo')
+        tu = get_tu("void foo(int i, int j);")
+        foo = get_cursor(tu, "foo")
         arguments = list(foo.get_arguments())
 
         self.assertEqual(len(arguments), 2)
@@ -621,43 +632,49 @@ def test_get_arguments(self):
         self.assertEqual(arguments[1].spelling, "j")
 
     def test_get_num_template_arguments(self):
-        tu = get_tu(kTemplateArgTest, lang='cpp')
-        foos = get_cursors(tu, 'foo')
+        tu = get_tu(kTemplateArgTest, lang="cpp")
+        foos = get_cursors(tu, "foo")
 
         self.assertEqual(foos[1].get_num_template_arguments(), 3)
 
     def test_get_template_argument_kind(self):
-        tu = get_tu(kTemplateArgTest, lang='cpp')
-        foos = get_cursors(tu, 'foo')
+        tu = get_tu(kTemplateArgTest, lang="cpp")
+        foos = get_cursors(tu, "foo")
 
-        self.assertEqual(foos[1].get_template_argument_kind(0), TemplateArgumentKind.INTEGRAL)
-        self.assertEqual(foos[1].get_template_argument_kind(1), TemplateArgumentKind.TYPE)
-        self.assertEqual(foos[1].get_template_argument_kind(2), TemplateArgumentKind.INTEGRAL)
+        self.assertEqual(
+            foos[1].get_template_argument_kind(0), TemplateArgumentKind.INTEGRAL
+        )
+        self.assertEqual(
+            foos[1].get_template_argument_kind(1), TemplateArgumentKind.TYPE
+        )
+        self.assertEqual(
+            foos[1].get_template_argument_kind(2), TemplateArgumentKind.INTEGRAL
+        )
 
     def test_get_template_argument_type(self):
-        tu = get_tu(kTemplateArgTest, lang='cpp')
-        foos = get_cursors(tu, 'foo')
+        tu = get_tu(kTemplateArgTest, lang="cpp")
+        foos = get_cursors(tu, "foo")
 
         self.assertEqual(foos[1].get_template_argument_type(1).kind, TypeKind.FLOAT)
 
     def test_get_template_argument_value(self):
-        tu = get_tu(kTemplateArgTest, lang='cpp')
-        foos = get_cursors(tu, 'foo')
+        tu = get_tu(kTemplateArgTest, lang="cpp")
+        foos = get_cursors(tu, "foo")
 
         self.assertEqual(foos[1].get_template_argument_value(0), -7)
         self.assertEqual(foos[1].get_template_argument_value(2), True)
 
     def test_get_template_argument_unsigned_value(self):
-        tu = get_tu(kTemplateArgTest, lang='cpp')
-        foos = get_cursors(tu, 'foo')
+        tu = get_tu(kTemplateArgTest, lang="cpp")
+        foos = get_cursors(tu, "foo")
 
-        self.assertEqual(foos[1].get_template_argument_unsigned_value(0), 2 ** 32 - 7)
+        self.assertEqual(foos[1].get_template_argument_unsigned_value(0), 2**32 - 7)
         self.assertEqual(foos[1].get_template_argument_unsigned_value(2), True)
 
     def test_referenced(self):
-        tu = get_tu('void foo(); void bar() { foo(); }')
-        foo = get_cursor(tu, 'foo')
-        bar = get_cursor(tu, 'bar')
+        tu = get_tu("void foo(); void bar() { foo(); }")
+        foo = get_cursor(tu, "foo")
+        bar = get_cursor(tu, "bar")
         for c in bar.get_children():
             if c.kind == CursorKind.CALL_EXPR:
                 self.assertEqual(c.referenced.spelling, foo.spelling)
@@ -667,12 +684,14 @@ def test_mangled_name(self):
         kInputForMangling = """\
         int foo(int, int);
         """
-        tu = get_tu(kInputForMangling, lang='cpp')
-        foo = get_cursor(tu, 'foo')
+        tu = get_tu(kInputForMangling, lang="cpp")
+        foo = get_cursor(tu, "foo")
 
         # Since libclang does not link in targets, we cannot pass a triple to it
         # and force the target. To enable this test to pass on all platforms, accept
         # all valid manglings.
         # [c-index-test handles this by running the source through clang, emitting
         #  an AST file and running libclang on that AST file]
-        self.assertIn(foo.mangled_name, ('_Z3fooii', '__Z3fooii', '?foo@@YAHHH', '?foo@@YAHHH at Z'))
+        self.assertIn(
+            foo.mangled_name, ("_Z3fooii", "__Z3fooii", "?foo@@YAHHH", "?foo@@YAHHH at Z")
+        )

diff  --git a/clang/bindings/python/tests/cindex/test_cursor_kind.py b/clang/bindings/python/tests/cindex/test_cursor_kind.py
index ed245d593d551..87199dba06ed2 100644
--- a/clang/bindings/python/tests/cindex/test_cursor_kind.py
+++ b/clang/bindings/python/tests/cindex/test_cursor_kind.py
@@ -1,7 +1,8 @@
 import os
 from clang.cindex import Config
-if 'CLANG_LIBRARY_PATH' in os.environ:
-    Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
+
+if "CLANG_LIBRARY_PATH" in os.environ:
+    Config.set_library_path(os.environ["CLANG_LIBRARY_PATH"])
 
 from clang.cindex import CursorKind
 
@@ -10,7 +11,7 @@
 
 class TestCursorKind(unittest.TestCase):
     def test_name(self):
-        self.assertEqual(CursorKind.UNEXPOSED_DECL.name, 'UNEXPOSED_DECL')
+        self.assertEqual(CursorKind.UNEXPOSED_DECL.name, "UNEXPOSED_DECL")
 
     def test_get_all_kinds(self):
         kinds = CursorKind.get_all_kinds()
@@ -43,16 +44,27 @@ def test_kind_groups(self):
         self.assertFalse(CursorKind.TYPE_REF.is_unexposed())
 
         for k in CursorKind.get_all_kinds():
-            group = [n for n in ('is_declaration', 'is_reference', 'is_expression',
-                                 'is_statement', 'is_invalid', 'is_attribute')
-                     if getattr(k, n)()]
-
-            if k in (   CursorKind.TRANSLATION_UNIT,
-                        CursorKind.MACRO_DEFINITION,
-                        CursorKind.MACRO_INSTANTIATION,
-                        CursorKind.INCLUSION_DIRECTIVE,
-                        CursorKind.PREPROCESSING_DIRECTIVE,
-                        CursorKind.OVERLOAD_CANDIDATE):
+            group = [
+                n
+                for n in (
+                    "is_declaration",
+                    "is_reference",
+                    "is_expression",
+                    "is_statement",
+                    "is_invalid",
+                    "is_attribute",
+                )
+                if getattr(k, n)()
+            ]
+
+            if k in (
+                CursorKind.TRANSLATION_UNIT,
+                CursorKind.MACRO_DEFINITION,
+                CursorKind.MACRO_INSTANTIATION,
+                CursorKind.INCLUSION_DIRECTIVE,
+                CursorKind.PREPROCESSING_DIRECTIVE,
+                CursorKind.OVERLOAD_CANDIDATE,
+            ):
                 self.assertEqual(len(group), 0)
             else:
                 self.assertEqual(len(group), 1)

diff  --git a/clang/bindings/python/tests/cindex/test_diagnostics.py b/clang/bindings/python/tests/cindex/test_diagnostics.py
index f7e6e18c91da0..57c41baaa2541 100644
--- a/clang/bindings/python/tests/cindex/test_diagnostics.py
+++ b/clang/bindings/python/tests/cindex/test_diagnostics.py
@@ -1,7 +1,8 @@
 import os
 from clang.cindex import Config
-if 'CLANG_LIBRARY_PATH' in os.environ:
-    Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
+
+if "CLANG_LIBRARY_PATH" in os.environ:
+    Config.set_library_path(os.environ["CLANG_LIBRARY_PATH"])
 
 from clang.cindex import *
 from .util import get_tu
@@ -14,41 +15,42 @@
 
 class TestDiagnostics(unittest.TestCase):
     def test_diagnostic_warning(self):
-        tu = get_tu('int f0() {}\n')
+        tu = get_tu("int f0() {}\n")
         self.assertEqual(len(tu.diagnostics), 1)
         self.assertEqual(tu.diagnostics[0].severity, Diagnostic.Warning)
         self.assertEqual(tu.diagnostics[0].location.line, 1)
         self.assertEqual(tu.diagnostics[0].location.column, 11)
-        self.assertEqual(tu.diagnostics[0].spelling,
-                'non-void function does not return a value')
+        self.assertEqual(
+            tu.diagnostics[0].spelling, "non-void function does not return a value"
+        )
 
     def test_diagnostic_note(self):
         # FIXME: We aren't getting notes here for some reason.
-        tu = get_tu('#define A x\nvoid *A = 1;\n')
+        tu = get_tu("#define A x\nvoid *A = 1;\n")
         self.assertEqual(len(tu.diagnostics), 1)
         self.assertEqual(tu.diagnostics[0].severity, Diagnostic.Error)
         self.assertEqual(tu.diagnostics[0].location.line, 2)
         self.assertEqual(tu.diagnostics[0].location.column, 7)
-        self.assertIn('incompatible', tu.diagnostics[0].spelling)
-#       self.assertEqual(tu.diagnostics[1].severity, Diagnostic.Note)
-#       self.assertEqual(tu.diagnostics[1].location.line, 1)
-#       self.assertEqual(tu.diagnostics[1].location.column, 11)
-#       self.assertEqual(tu.diagnostics[1].spelling, 'instantiated from')
+        self.assertIn("incompatible", tu.diagnostics[0].spelling)
+
+    #       self.assertEqual(tu.diagnostics[1].severity, Diagnostic.Note)
+    #       self.assertEqual(tu.diagnostics[1].location.line, 1)
+    #       self.assertEqual(tu.diagnostics[1].location.column, 11)
+    #       self.assertEqual(tu.diagnostics[1].spelling, 'instantiated from')
 
     def test_diagnostic_fixit(self):
-        tu = get_tu('struct { int f0; } x = { f0 : 1 };')
+        tu = get_tu("struct { int f0; } x = { f0 : 1 };")
         self.assertEqual(len(tu.diagnostics), 1)
         self.assertEqual(tu.diagnostics[0].severity, Diagnostic.Warning)
         self.assertEqual(tu.diagnostics[0].location.line, 1)
         self.assertEqual(tu.diagnostics[0].location.column, 26)
-        self.assertRegex(tu.diagnostics[0].spelling,
-            'use of GNU old-style.*')
+        self.assertRegex(tu.diagnostics[0].spelling, "use of GNU old-style.*")
         self.assertEqual(len(tu.diagnostics[0].fixits), 1)
         self.assertEqual(tu.diagnostics[0].fixits[0].range.start.line, 1)
         self.assertEqual(tu.diagnostics[0].fixits[0].range.start.column, 26)
         self.assertEqual(tu.diagnostics[0].fixits[0].range.end.line, 1)
         self.assertEqual(tu.diagnostics[0].fixits[0].range.end.column, 30)
-        self.assertEqual(tu.diagnostics[0].fixits[0].value, '.f0 = ')
+        self.assertEqual(tu.diagnostics[0].fixits[0].value, ".f0 = ")
 
     def test_diagnostic_range(self):
         tu = get_tu('void f() { int i = "a"; }')
@@ -56,8 +58,7 @@ def test_diagnostic_range(self):
         self.assertEqual(tu.diagnostics[0].severity, Diagnostic.Error)
         self.assertEqual(tu.diagnostics[0].location.line, 1)
         self.assertEqual(tu.diagnostics[0].location.column, 16)
-        self.assertRegex(tu.diagnostics[0].spelling,
-            'incompatible pointer to.*')
+        self.assertRegex(tu.diagnostics[0].spelling, "incompatible pointer to.*")
         self.assertEqual(len(tu.diagnostics[0].fixits), 0)
         self.assertEqual(len(tu.diagnostics[0].ranges), 1)
         self.assertEqual(tu.diagnostics[0].ranges[0].start.line, 1)
@@ -69,7 +70,7 @@ def test_diagnostic_range(self):
 
     def test_diagnostic_category(self):
         """Ensure that category properties work."""
-        tu = get_tu('int f(int i) { return 7; }', all_warnings=True)
+        tu = get_tu("int f(int i) { return 7; }", all_warnings=True)
         self.assertEqual(len(tu.diagnostics), 1)
         d = tu.diagnostics[0]
 
@@ -78,33 +79,35 @@ def test_diagnostic_category(self):
         self.assertEqual(d.location.column, 11)
 
         self.assertEqual(d.category_number, 2)
-        self.assertEqual(d.category_name, 'Semantic Issue')
+        self.assertEqual(d.category_name, "Semantic Issue")
 
     def test_diagnostic_option(self):
         """Ensure that category option properties work."""
-        tu = get_tu('int f(int i) { return 7; }', all_warnings=True)
+        tu = get_tu("int f(int i) { return 7; }", all_warnings=True)
         self.assertEqual(len(tu.diagnostics), 1)
         d = tu.diagnostics[0]
 
-        self.assertEqual(d.option, '-Wunused-parameter')
-        self.assertEqual(d.disable_option, '-Wno-unused-parameter')
+        self.assertEqual(d.option, "-Wunused-parameter")
+        self.assertEqual(d.disable_option, "-Wno-unused-parameter")
 
     def test_diagnostic_children(self):
-        tu = get_tu('void f(int x) {} void g() { f(); }')
+        tu = get_tu("void f(int x) {} void g() { f(); }")
         self.assertEqual(len(tu.diagnostics), 1)
         d = tu.diagnostics[0]
 
         children = d.children
         self.assertEqual(len(children), 1)
         self.assertEqual(children[0].severity, Diagnostic.Note)
-        self.assertRegex(children[0].spelling,
-                '.*declared here')
+        self.assertRegex(children[0].spelling, ".*declared here")
         self.assertEqual(children[0].location.line, 1)
         self.assertEqual(children[0].location.column, 6)
 
     def test_diagnostic_string_repr(self):
-        tu = get_tu('struct MissingSemicolon{}')
+        tu = get_tu("struct MissingSemicolon{}")
         self.assertEqual(len(tu.diagnostics), 1)
         d = tu.diagnostics[0]
 
-        self.assertEqual(repr(d), '<Diagnostic severity 3, location <SourceLocation file \'t.c\', line 1, column 26>, spelling "expected \';\' after struct">')
+        self.assertEqual(
+            repr(d),
+            "<Diagnostic severity 3, location <SourceLocation file 't.c', line 1, column 26>, spelling \"expected ';' after struct\">",
+        )

diff  --git a/clang/bindings/python/tests/cindex/test_exception_specification_kind.py b/clang/bindings/python/tests/cindex/test_exception_specification_kind.py
index 6c13f70fb2565..8e2a6b5c50223 100644
--- a/clang/bindings/python/tests/cindex/test_exception_specification_kind.py
+++ b/clang/bindings/python/tests/cindex/test_exception_specification_kind.py
@@ -1,7 +1,8 @@
 import os
 from clang.cindex import Config
-if 'CLANG_LIBRARY_PATH' in os.environ:
-    Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
+
+if "CLANG_LIBRARY_PATH" in os.environ:
+    Config.set_library_path(os.environ["CLANG_LIBRARY_PATH"])
 
 import clang.cindex
 from clang.cindex import ExceptionSpecificationKind
@@ -24,12 +25,12 @@ def test_exception_specification_kind(self):
                     int square2(int x) noexcept;
                     int square3(int x) noexcept(noexcept(x * x));"""
 
-        tu = get_tu(source, lang='cpp', flags=['-std=c++14'])
+        tu = get_tu(source, lang="cpp", flags=["-std=c++14"])
 
         declarations = find_function_declarations(tu.cursor)
         expected = [
-            ('square1', ExceptionSpecificationKind.NONE),
-            ('square2', ExceptionSpecificationKind.BASIC_NOEXCEPT),
-            ('square3', ExceptionSpecificationKind.COMPUTED_NOEXCEPT)
+            ("square1", ExceptionSpecificationKind.NONE),
+            ("square2", ExceptionSpecificationKind.BASIC_NOEXCEPT),
+            ("square3", ExceptionSpecificationKind.COMPUTED_NOEXCEPT),
         ]
         self.assertListEqual(declarations, expected)

diff  --git a/clang/bindings/python/tests/cindex/test_file.py b/clang/bindings/python/tests/cindex/test_file.py
index a146fe5c9239c..7024b0cdf11d9 100644
--- a/clang/bindings/python/tests/cindex/test_file.py
+++ b/clang/bindings/python/tests/cindex/test_file.py
@@ -1,7 +1,8 @@
 import os
 from clang.cindex import Config
-if 'CLANG_LIBRARY_PATH' in os.environ:
-    Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
+
+if "CLANG_LIBRARY_PATH" in os.environ:
+    Config.set_library_path(os.environ["CLANG_LIBRARY_PATH"])
 
 from clang.cindex import Index, File
 
@@ -11,7 +12,7 @@
 class TestFile(unittest.TestCase):
     def test_file(self):
         index = Index.create()
-        tu = index.parse('t.c', unsaved_files = [('t.c', "")])
+        tu = index.parse("t.c", unsaved_files=[("t.c", "")])
         file = File.from_name(tu, "t.c")
         self.assertEqual(str(file), "t.c")
         self.assertEqual(file.name, "t.c")

diff  --git a/clang/bindings/python/tests/cindex/test_index.py b/clang/bindings/python/tests/cindex/test_index.py
index 46aafcf222e73..bf29628f5e4e7 100644
--- a/clang/bindings/python/tests/cindex/test_index.py
+++ b/clang/bindings/python/tests/cindex/test_index.py
@@ -1,14 +1,15 @@
 import os
 from clang.cindex import Config
-if 'CLANG_LIBRARY_PATH' in os.environ:
-    Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
+
+if "CLANG_LIBRARY_PATH" in os.environ:
+    Config.set_library_path(os.environ["CLANG_LIBRARY_PATH"])
 
 from clang.cindex import *
 import os
 import unittest
 
 
-kInputsDir = os.path.join(os.path.dirname(__file__), 'INPUTS')
+kInputsDir = os.path.join(os.path.dirname(__file__), "INPUTS")
 
 
 class TestIndex(unittest.TestCase):
@@ -20,7 +21,7 @@ def test_create(self):
     def test_parse(self):
         index = Index.create()
         self.assertIsInstance(index, Index)
-        tu = index.parse(os.path.join(kInputsDir, 'hello.cpp'))
+        tu = index.parse(os.path.join(kInputsDir, "hello.cpp"))
         self.assertIsInstance(tu, TranslationUnit)
-        tu = index.parse(None, ['-c', os.path.join(kInputsDir, 'hello.cpp')])
+        tu = index.parse(None, ["-c", os.path.join(kInputsDir, "hello.cpp")])
         self.assertIsInstance(tu, TranslationUnit)

diff  --git a/clang/bindings/python/tests/cindex/test_linkage.py b/clang/bindings/python/tests/cindex/test_linkage.py
index cdd97fc2df0dc..4a8838276fae5 100644
--- a/clang/bindings/python/tests/cindex/test_linkage.py
+++ b/clang/bindings/python/tests/cindex/test_linkage.py
@@ -1,7 +1,8 @@
 import os
 from clang.cindex import Config
-if 'CLANG_LIBRARY_PATH' in os.environ:
-    Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
+
+if "CLANG_LIBRARY_PATH" in os.environ:
+    Config.set_library_path(os.environ["CLANG_LIBRARY_PATH"])
 
 from clang.cindex import LinkageKind
 from clang.cindex import Cursor
@@ -17,22 +18,25 @@ class TestLinkage(unittest.TestCase):
     def test_linkage(self):
         """Ensure that linkage specifers are available on cursors"""
 
-        tu = get_tu("""
+        tu = get_tu(
+            """
 void foo() { int no_linkage; }
 static int internal;
 namespace { struct unique_external_type {} }
 unique_external_type unique_external;
 extern int external;
-""", lang = 'cpp')
+""",
+            lang="cpp",
+        )
 
-        no_linkage = get_cursor(tu.cursor, 'no_linkage')
+        no_linkage = get_cursor(tu.cursor, "no_linkage")
         self.assertEqual(no_linkage.linkage, LinkageKind.NO_LINKAGE)
 
-        internal = get_cursor(tu.cursor, 'internal')
+        internal = get_cursor(tu.cursor, "internal")
         self.assertEqual(internal.linkage, LinkageKind.INTERNAL)
 
-        unique_external = get_cursor(tu.cursor, 'unique_external')
+        unique_external = get_cursor(tu.cursor, "unique_external")
         self.assertEqual(unique_external.linkage, LinkageKind.UNIQUE_EXTERNAL)
 
-        external = get_cursor(tu.cursor, 'external')
+        external = get_cursor(tu.cursor, "external")
         self.assertEqual(external.linkage, LinkageKind.EXTERNAL)

diff  --git a/clang/bindings/python/tests/cindex/test_location.py b/clang/bindings/python/tests/cindex/test_location.py
index 0d2c69db883cc..e23677a9be3c0 100644
--- a/clang/bindings/python/tests/cindex/test_location.py
+++ b/clang/bindings/python/tests/cindex/test_location.py
@@ -1,7 +1,8 @@
 import os
 from clang.cindex import Config
-if 'CLANG_LIBRARY_PATH' in os.environ:
-    Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
+
+if "CLANG_LIBRARY_PATH" in os.environ:
+    Config.set_library_path(os.environ["CLANG_LIBRARY_PATH"])
 
 from clang.cindex import Cursor
 from clang.cindex import File
@@ -14,7 +15,7 @@
 import unittest
 
 
-baseInput="int one;\nint two;\n"
+baseInput = "int one;\nint two;\n"
 
 
 class TestLocation(unittest.TestCase):
@@ -25,43 +26,43 @@ def assert_location(self, loc, line, column, offset):
 
     def test_location(self):
         tu = get_tu(baseInput)
-        one = get_cursor(tu, 'one')
-        two = get_cursor(tu, 'two')
+        one = get_cursor(tu, "one")
+        two = get_cursor(tu, "two")
 
         self.assertIsNotNone(one)
         self.assertIsNotNone(two)
 
-        self.assert_location(one.location,line=1,column=5,offset=4)
-        self.assert_location(two.location,line=2,column=5,offset=13)
+        self.assert_location(one.location, line=1, column=5, offset=4)
+        self.assert_location(two.location, line=2, column=5, offset=13)
 
         # adding a linebreak at top should keep columns same
-        tu = get_tu('\n' + baseInput)
-        one = get_cursor(tu, 'one')
-        two = get_cursor(tu, 'two')
+        tu = get_tu("\n" + baseInput)
+        one = get_cursor(tu, "one")
+        two = get_cursor(tu, "two")
 
         self.assertIsNotNone(one)
         self.assertIsNotNone(two)
 
-        self.assert_location(one.location,line=2,column=5,offset=5)
-        self.assert_location(two.location,line=3,column=5,offset=14)
+        self.assert_location(one.location, line=2, column=5, offset=5)
+        self.assert_location(two.location, line=3, column=5, offset=14)
 
         # adding a space should affect column on first line only
-        tu = get_tu(' ' + baseInput)
-        one = get_cursor(tu, 'one')
-        two = get_cursor(tu, 'two')
+        tu = get_tu(" " + baseInput)
+        one = get_cursor(tu, "one")
+        two = get_cursor(tu, "two")
 
-        self.assert_location(one.location,line=1,column=6,offset=5)
-        self.assert_location(two.location,line=2,column=5,offset=14)
+        self.assert_location(one.location, line=1, column=6, offset=5)
+        self.assert_location(two.location, line=2, column=5, offset=14)
 
         # define the expected location ourselves and see if it matches
         # the returned location
         tu = get_tu(baseInput)
 
-        file = File.from_name(tu, 't.c')
+        file = File.from_name(tu, "t.c")
         location = SourceLocation.from_position(tu, file, 1, 5)
         cursor = Cursor.from_location(tu, location)
 
-        one = get_cursor(tu, 'one')
+        one = get_cursor(tu, "one")
         self.assertIsNotNone(one)
         self.assertEqual(one, cursor)
 
@@ -74,7 +75,7 @@ def test_location(self):
         offset_location = SourceLocation.from_offset(tu, file, 5)
         cursor = Cursor.from_location(tu, offset_location)
         verified = False
-        for n in [n for n in tu.cursor.get_children() if n.spelling == 'one']:
+        for n in [n for n in tu.cursor.get_children() if n.spelling == "one"]:
             self.assertEqual(n, cursor)
             verified = True
 
@@ -82,18 +83,22 @@ def test_location(self):
 
     def test_extent(self):
         tu = get_tu(baseInput)
-        one = get_cursor(tu, 'one')
-        two = get_cursor(tu, 'two')
-
-        self.assert_location(one.extent.start,line=1,column=1,offset=0)
-        self.assert_location(one.extent.end,line=1,column=8,offset=7)
-        self.assertEqual(baseInput[one.extent.start.offset:one.extent.end.offset], "int one")
-
-        self.assert_location(two.extent.start,line=2,column=1,offset=9)
-        self.assert_location(two.extent.end,line=2,column=8,offset=16)
-        self.assertEqual(baseInput[two.extent.start.offset:two.extent.end.offset], "int two")
-
-        file = File.from_name(tu, 't.c')
+        one = get_cursor(tu, "one")
+        two = get_cursor(tu, "two")
+
+        self.assert_location(one.extent.start, line=1, column=1, offset=0)
+        self.assert_location(one.extent.end, line=1, column=8, offset=7)
+        self.assertEqual(
+            baseInput[one.extent.start.offset : one.extent.end.offset], "int one"
+        )
+
+        self.assert_location(two.extent.start, line=2, column=1, offset=9)
+        self.assert_location(two.extent.end, line=2, column=8, offset=16)
+        self.assertEqual(
+            baseInput[two.extent.start.offset : two.extent.end.offset], "int two"
+        )
+
+        file = File.from_name(tu, "t.c")
         location1 = SourceLocation.from_position(tu, file, 1, 1)
         location2 = SourceLocation.from_position(tu, file, 1, 8)
 
@@ -106,15 +111,22 @@ def test_extent(self):
         self.assertNotEqual(range1, range3)
 
     def test_is_system_location(self):
-        header = os.path.normpath('./fake/fake.h')
-        tu = TranslationUnit.from_source('fake.c', [f'-isystem{os.path.dirname(header)}'], unsaved_files = [
-                ('fake.c', """
+        header = os.path.normpath("./fake/fake.h")
+        tu = TranslationUnit.from_source(
+            "fake.c",
+            [f"-isystem{os.path.dirname(header)}"],
+            unsaved_files=[
+                (
+                    "fake.c",
+                    """
 #include <fake.h>
 int one;
-"""),
-                (header, "int two();")
-                ])
-        one = get_cursor(tu, 'one')
-        two = get_cursor(tu, 'two')
+""",
+                ),
+                (header, "int two();"),
+            ],
+        )
+        one = get_cursor(tu, "one")
+        two = get_cursor(tu, "two")
         self.assertFalse(one.location.is_in_system_header)
         self.assertTrue(two.location.is_in_system_header)

diff  --git a/clang/bindings/python/tests/cindex/test_tls_kind.py b/clang/bindings/python/tests/cindex/test_tls_kind.py
index c828ac83a468d..b8ef74614ab03 100644
--- a/clang/bindings/python/tests/cindex/test_tls_kind.py
+++ b/clang/bindings/python/tests/cindex/test_tls_kind.py
@@ -1,7 +1,8 @@
 import os
 from clang.cindex import Config
-if 'CLANG_LIBRARY_PATH' in os.environ:
-    Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
+
+if "CLANG_LIBRARY_PATH" in os.environ:
+    Config.set_library_path(os.environ["CLANG_LIBRARY_PATH"])
 
 from clang.cindex import TLSKind
 from clang.cindex import Cursor
@@ -17,38 +18,57 @@ class TestTLSKind(unittest.TestCase):
     def test_tls_kind(self):
         """Ensure that thread-local storage kinds are available on cursors."""
 
-        tu = get_tu("""
+        tu = get_tu(
+            """
 int tls_none;
 thread_local int tls_dynamic;
 _Thread_local int tls_static;
-""", lang = 'cpp')
+""",
+            lang="cpp",
+        )
 
-        tls_none = get_cursor(tu.cursor, 'tls_none')
+        tls_none = get_cursor(tu.cursor, "tls_none")
         self.assertEqual(tls_none.tls_kind, TLSKind.NONE)
 
-        tls_dynamic = get_cursor(tu.cursor, 'tls_dynamic')
+        tls_dynamic = get_cursor(tu.cursor, "tls_dynamic")
         self.assertEqual(tls_dynamic.tls_kind, TLSKind.DYNAMIC)
 
-        tls_static = get_cursor(tu.cursor, 'tls_static')
+        tls_static = get_cursor(tu.cursor, "tls_static")
         self.assertEqual(tls_static.tls_kind, TLSKind.STATIC)
 
         # The following case tests '__declspec(thread)'.  Since it is a Microsoft
         # specific extension, specific flags are required for the parser to pick
         # these up.
-        flags = ['-fms-extensions', '-target', 'x86_64-unknown-windows-win32',
-                 '-fms-compatibility-version=18']
-        tu = get_tu("""
+        flags = [
+            "-fms-extensions",
+            "-target",
+            "x86_64-unknown-windows-win32",
+            "-fms-compatibility-version=18",
+        ]
+        tu = get_tu(
+            """
 __declspec(thread) int tls_declspec_msvc18;
-""", lang = 'cpp', flags=flags)
+""",
+            lang="cpp",
+            flags=flags,
+        )
 
-        tls_declspec_msvc18 = get_cursor(tu.cursor, 'tls_declspec_msvc18')
+        tls_declspec_msvc18 = get_cursor(tu.cursor, "tls_declspec_msvc18")
         self.assertEqual(tls_declspec_msvc18.tls_kind, TLSKind.STATIC)
 
-        flags = ['-fms-extensions', '-target', 'x86_64-unknown-windows-win32',
-                 '-fms-compatibility-version=19']
-        tu = get_tu("""
+        flags = [
+            "-fms-extensions",
+            "-target",
+            "x86_64-unknown-windows-win32",
+            "-fms-compatibility-version=19",
+        ]
+        tu = get_tu(
+            """
 __declspec(thread) int tls_declspec_msvc19;
-""", lang = 'cpp', flags=flags)
+""",
+            lang="cpp",
+            flags=flags,
+        )
 
-        tls_declspec_msvc19 = get_cursor(tu.cursor, 'tls_declspec_msvc19')
+        tls_declspec_msvc19 = get_cursor(tu.cursor, "tls_declspec_msvc19")
         self.assertEqual(tls_declspec_msvc19.tls_kind, TLSKind.DYNAMIC)

diff  --git a/clang/bindings/python/tests/cindex/test_token_kind.py b/clang/bindings/python/tests/cindex/test_token_kind.py
index 904e007cafc50..07200abad0909 100644
--- a/clang/bindings/python/tests/cindex/test_token_kind.py
+++ b/clang/bindings/python/tests/cindex/test_token_kind.py
@@ -1,7 +1,8 @@
 import os
 from clang.cindex import Config
-if 'CLANG_LIBRARY_PATH' in os.environ:
-    Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
+
+if "CLANG_LIBRARY_PATH" in os.environ:
+    Config.set_library_path(os.environ["CLANG_LIBRARY_PATH"])
 
 from clang.cindex import TokenKind
 
@@ -12,16 +13,16 @@ class TestTokenKind(unittest.TestCase):
     def test_constructor(self):
         """Ensure TokenKind constructor works as expected."""
 
-        t = TokenKind(5, 'foo')
+        t = TokenKind(5, "foo")
 
         self.assertEqual(t.value, 5)
-        self.assertEqual(t.name, 'foo')
+        self.assertEqual(t.name, "foo")
 
     def test_bad_register(self):
         """Ensure a duplicate value is rejected for registration."""
 
         with self.assertRaises(ValueError):
-            TokenKind.register(2, 'foo')
+            TokenKind.register(2, "foo")
 
     def test_unknown_value(self):
         """Ensure trying to fetch an unknown value raises."""
@@ -31,7 +32,7 @@ def test_unknown_value(self):
 
     def test_registration(self):
         """Ensure that items registered appear as class attributes."""
-        self.assertTrue(hasattr(TokenKind, 'LITERAL'))
+        self.assertTrue(hasattr(TokenKind, "LITERAL"))
         literal = TokenKind.LITERAL
 
         self.assertIsInstance(literal, TokenKind)
@@ -46,4 +47,4 @@ def test_repr(self):
         """Ensure repr() works."""
 
         r = repr(TokenKind.LITERAL)
-        self.assertEqual(r, 'TokenKind.LITERAL')
+        self.assertEqual(r, "TokenKind.LITERAL")

diff  --git a/clang/bindings/python/tests/cindex/test_tokens.py b/clang/bindings/python/tests/cindex/test_tokens.py
index dd6d3a3259edc..2cbf42c4c6cb9 100644
--- a/clang/bindings/python/tests/cindex/test_tokens.py
+++ b/clang/bindings/python/tests/cindex/test_tokens.py
@@ -1,7 +1,8 @@
 import os
 from clang.cindex import Config
-if 'CLANG_LIBRARY_PATH' in os.environ:
-    Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
+
+if "CLANG_LIBRARY_PATH" in os.environ:
+    Config.set_library_path(os.environ["CLANG_LIBRARY_PATH"])
 
 from clang.cindex import CursorKind
 from clang.cindex import Index
@@ -17,12 +18,12 @@
 class TestTokens(unittest.TestCase):
     def test_token_to_cursor(self):
         """Ensure we can obtain a Cursor from a Token instance."""
-        tu = get_tu('int i = 5;')
-        r = tu.get_extent('t.c', (0, 9))
+        tu = get_tu("int i = 5;")
+        r = tu.get_extent("t.c", (0, 9))
         tokens = list(tu.get_tokens(extent=r))
 
         self.assertEqual(len(tokens), 4)
-        self.assertEqual(tokens[1].spelling, 'i')
+        self.assertEqual(tokens[1].spelling, "i")
         self.assertEqual(tokens[1].kind, TokenKind.IDENTIFIER)
 
         cursor = tokens[1].cursor
@@ -32,8 +33,8 @@ def test_token_to_cursor(self):
     def test_token_location(self):
         """Ensure Token.location works."""
 
-        tu = get_tu('int foo = 10;')
-        r = tu.get_extent('t.c', (0, 11))
+        tu = get_tu("int foo = 10;")
+        r = tu.get_extent("t.c", (0, 11))
 
         tokens = list(tu.get_tokens(extent=r))
         self.assertEqual(len(tokens), 4)
@@ -46,8 +47,8 @@ def test_token_location(self):
 
     def test_token_extent(self):
         """Ensure Token.extent works."""
-        tu = get_tu('int foo = 10;')
-        r = tu.get_extent('t.c', (0, 11))
+        tu = get_tu("int foo = 10;")
+        r = tu.get_extent("t.c", (0, 11))
 
         tokens = list(tu.get_tokens(extent=r))
         self.assertEqual(len(tokens), 4)

diff  --git a/clang/bindings/python/tests/cindex/test_translation_unit.py b/clang/bindings/python/tests/cindex/test_translation_unit.py
index f3e770a936117..ff7213c69dd0f 100644
--- a/clang/bindings/python/tests/cindex/test_translation_unit.py
+++ b/clang/bindings/python/tests/cindex/test_translation_unit.py
@@ -1,7 +1,8 @@
 import os
 from clang.cindex import Config
-if 'CLANG_LIBRARY_PATH' in os.environ:
-    Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
+
+if "CLANG_LIBRARY_PATH" in os.environ:
+    Config.set_library_path(os.environ["CLANG_LIBRARY_PATH"])
 
 from contextlib import contextmanager
 import gc
@@ -25,7 +26,7 @@
 from .util import str_to_path
 
 
-kInputsDir = os.path.join(os.path.dirname(__file__), 'INPUTS')
+kInputsDir = os.path.join(os.path.dirname(__file__), "INPUTS")
 
 
 @contextmanager
@@ -52,79 +53,99 @@ def save_tu_pathlike(tu):
 
 class TestTranslationUnit(unittest.TestCase):
     def test_spelling(self):
-        path = os.path.join(kInputsDir, 'hello.cpp')
+        path = os.path.join(kInputsDir, "hello.cpp")
         tu = TranslationUnit.from_source(path)
         self.assertEqual(tu.spelling, path)
 
     def test_cursor(self):
-        path = os.path.join(kInputsDir, 'hello.cpp')
+        path = os.path.join(kInputsDir, "hello.cpp")
         tu = get_tu(path)
         c = tu.cursor
         self.assertIsInstance(c, Cursor)
         self.assertIs(c.kind, CursorKind.TRANSLATION_UNIT)
 
     def test_parse_arguments(self):
-        path = os.path.join(kInputsDir, 'parse_arguments.c')
-        tu = TranslationUnit.from_source(path, ['-DDECL_ONE=hello', '-DDECL_TWO=hi'])
+        path = os.path.join(kInputsDir, "parse_arguments.c")
+        tu = TranslationUnit.from_source(path, ["-DDECL_ONE=hello", "-DDECL_TWO=hi"])
         spellings = [c.spelling for c in tu.cursor.get_children()]
-        self.assertEqual(spellings[-2], 'hello')
-        self.assertEqual(spellings[-1], 'hi')
+        self.assertEqual(spellings[-2], "hello")
+        self.assertEqual(spellings[-1], "hi")
 
     def test_reparse_arguments(self):
-        path = os.path.join(kInputsDir, 'parse_arguments.c')
-        tu = TranslationUnit.from_source(path, ['-DDECL_ONE=hello', '-DDECL_TWO=hi'])
+        path = os.path.join(kInputsDir, "parse_arguments.c")
+        tu = TranslationUnit.from_source(path, ["-DDECL_ONE=hello", "-DDECL_TWO=hi"])
         tu.reparse()
         spellings = [c.spelling for c in tu.cursor.get_children()]
-        self.assertEqual(spellings[-2], 'hello')
-        self.assertEqual(spellings[-1], 'hi')
+        self.assertEqual(spellings[-2], "hello")
+        self.assertEqual(spellings[-1], "hi")
 
     def test_unsaved_files(self):
-        tu = TranslationUnit.from_source('fake.c', ['-I./'], unsaved_files = [
-                ('fake.c', """
+        tu = TranslationUnit.from_source(
+            "fake.c",
+            ["-I./"],
+            unsaved_files=[
+                (
+                    "fake.c",
+                    """
 #include "fake.h"
 int x;
 int SOME_DEFINE;
-"""),
-                ('./fake.h', """
+""",
+                ),
+                (
+                    "./fake.h",
+                    """
 #define SOME_DEFINE y
-""")
-                ])
+""",
+                ),
+            ],
+        )
         spellings = [c.spelling for c in tu.cursor.get_children()]
-        self.assertEqual(spellings[-2], 'x')
-        self.assertEqual(spellings[-1], 'y')
+        self.assertEqual(spellings[-2], "x")
+        self.assertEqual(spellings[-1], "y")
 
     def test_unsaved_files_2(self):
         if sys.version_info.major >= 3:
             from io import StringIO
         else:
             from io import BytesIO as StringIO
-        tu = TranslationUnit.from_source('fake.c', unsaved_files = [
-                ('fake.c', StringIO('int x;'))])
+        tu = TranslationUnit.from_source(
+            "fake.c", unsaved_files=[("fake.c", StringIO("int x;"))]
+        )
         spellings = [c.spelling for c in tu.cursor.get_children()]
-        self.assertEqual(spellings[-1], 'x')
+        self.assertEqual(spellings[-1], "x")
 
     @skip_if_no_fspath
     def test_from_source_accepts_pathlike(self):
-        tu = TranslationUnit.from_source(str_to_path('fake.c'), ['-Iincludes'], unsaved_files = [
-                (str_to_path('fake.c'), """
+        tu = TranslationUnit.from_source(
+            str_to_path("fake.c"),
+            ["-Iincludes"],
+            unsaved_files=[
+                (
+                    str_to_path("fake.c"),
+                    """
 #include "fake.h"
     int x;
     int SOME_DEFINE;
-    """),
-                    (str_to_path('includes/fake.h'), """
+    """,
+                ),
+                (
+                    str_to_path("includes/fake.h"),
+                    """
 #define SOME_DEFINE y
-    """)
-                ])
+    """,
+                ),
+            ],
+        )
         spellings = [c.spelling for c in tu.cursor.get_children()]
-        self.assertEqual(spellings[-2], 'x')
-        self.assertEqual(spellings[-1], 'y')
+        self.assertEqual(spellings[-2], "x")
+        self.assertEqual(spellings[-1], "y")
 
     def assert_normpaths_equal(self, path1, path2):
-        """ Compares two paths for equality after normalizing them with
-            os.path.normpath
+        """Compares two paths for equality after normalizing them with
+        os.path.normpath
         """
-        self.assertEqual(os.path.normpath(path1),
-                         os.path.normpath(path2))
+        self.assertEqual(os.path.normpath(path1), os.path.normpath(path2))
 
     def test_includes(self):
         def eq(expected, actual):
@@ -134,7 +155,7 @@ def eq(expected, actual):
             else:
                 self.assert_normpaths_equal(expected[1], actual.include.name)
 
-        src = os.path.join(kInputsDir, 'include.cpp')
+        src = os.path.join(kInputsDir, "include.cpp")
         h1 = os.path.join(kInputsDir, "header1.h")
         h2 = os.path.join(kInputsDir, "header2.h")
         h3 = os.path.join(kInputsDir, "header3.h")
@@ -145,21 +166,27 @@ def eq(expected, actual):
             eq(i[0], i[1])
 
     def test_inclusion_directive(self):
-        src = os.path.join(kInputsDir, 'include.cpp')
+        src = os.path.join(kInputsDir, "include.cpp")
         h1 = os.path.join(kInputsDir, "header1.h")
         h2 = os.path.join(kInputsDir, "header2.h")
         h3 = os.path.join(kInputsDir, "header3.h")
         inc = [h1, h3, h2, h3, h1]
 
-        tu = TranslationUnit.from_source(src, options=TranslationUnit.PARSE_DETAILED_PROCESSING_RECORD)
-        inclusion_directive_files = [c.get_included_file().name for c in tu.cursor.get_children() if c.kind == CursorKind.INCLUSION_DIRECTIVE]
+        tu = TranslationUnit.from_source(
+            src, options=TranslationUnit.PARSE_DETAILED_PROCESSING_RECORD
+        )
+        inclusion_directive_files = [
+            c.get_included_file().name
+            for c in tu.cursor.get_children()
+            if c.kind == CursorKind.INCLUSION_DIRECTIVE
+        ]
         for i in zip(inc, inclusion_directive_files):
             self.assert_normpaths_equal(i[0], i[1])
 
     def test_save(self):
         """Ensure TranslationUnit.save() works."""
 
-        tu = get_tu('int foo();')
+        tu = get_tu("int foo();")
 
         with save_tu(tu) as path:
             self.assertTrue(os.path.exists(path))
@@ -169,7 +196,7 @@ def test_save(self):
     def test_save_pathlike(self):
         """Ensure TranslationUnit.save() works with PathLike filename."""
 
-        tu = get_tu('int foo();')
+        tu = get_tu("int foo();")
 
         with save_tu_pathlike(tu) as path:
             self.assertTrue(os.path.exists(path))
@@ -178,9 +205,9 @@ def test_save_pathlike(self):
     def test_save_translation_errors(self):
         """Ensure that saving to an invalid directory raises."""
 
-        tu = get_tu('int foo();')
+        tu = get_tu("int foo();")
 
-        path = '/does/not/exist/llvm-test.ast'
+        path = "/does/not/exist/llvm-test.ast"
         self.assertFalse(os.path.exists(os.path.dirname(path)))
 
         with self.assertRaises(TranslationUnitSaveError) as cm:
@@ -192,7 +219,7 @@ def test_save_translation_errors(self):
     def test_load(self):
         """Ensure TranslationUnits can be constructed from saved files."""
 
-        tu = get_tu('int foo();')
+        tu = get_tu("int foo();")
         self.assertEqual(len(tu.diagnostics), 0)
         with save_tu(tu) as path:
             self.assertTrue(os.path.exists(path))
@@ -201,7 +228,7 @@ def test_load(self):
             tu2 = TranslationUnit.from_ast_file(filename=path)
             self.assertEqual(len(tu2.diagnostics), 0)
 
-            foo = get_cursor(tu2, 'foo')
+            foo = get_cursor(tu2, "foo")
             self.assertIsNotNone(foo)
 
             # Just in case there is an open file descriptor somewhere.
@@ -211,20 +238,20 @@ def test_load(self):
     def test_load_pathlike(self):
         """Ensure TranslationUnits can be constructed from saved files -
         PathLike variant."""
-        tu = get_tu('int foo();')
+        tu = get_tu("int foo();")
         self.assertEqual(len(tu.diagnostics), 0)
         with save_tu(tu) as path:
             tu2 = TranslationUnit.from_ast_file(filename=str_to_path(path))
             self.assertEqual(len(tu2.diagnostics), 0)
 
-            foo = get_cursor(tu2, 'foo')
+            foo = get_cursor(tu2, "foo")
             self.assertIsNotNone(foo)
 
             # Just in case there is an open file descriptor somewhere.
             del tu2
 
     def test_index_parse(self):
-        path = os.path.join(kInputsDir, 'hello.cpp')
+        path = os.path.join(kInputsDir, "hello.cpp")
         index = Index.create()
         tu = index.parse(path)
         self.assertIsInstance(tu, TranslationUnit)
@@ -232,97 +259,97 @@ def test_index_parse(self):
     def test_get_file(self):
         """Ensure tu.get_file() works appropriately."""
 
-        tu = get_tu('int foo();')
+        tu = get_tu("int foo();")
 
-        f = tu.get_file('t.c')
+        f = tu.get_file("t.c")
         self.assertIsInstance(f, File)
-        self.assertEqual(f.name, 't.c')
+        self.assertEqual(f.name, "t.c")
 
         with self.assertRaises(Exception):
-            f = tu.get_file('foobar.cpp')
+            f = tu.get_file("foobar.cpp")
 
     @skip_if_no_fspath
     def test_get_file_pathlike(self):
         """Ensure tu.get_file() works appropriately with PathLike filenames."""
 
-        tu = get_tu('int foo();')
+        tu = get_tu("int foo();")
 
-        f = tu.get_file(str_to_path('t.c'))
+        f = tu.get_file(str_to_path("t.c"))
         self.assertIsInstance(f, File)
-        self.assertEqual(f.name, 't.c')
+        self.assertEqual(f.name, "t.c")
 
         with self.assertRaises(Exception):
-            f = tu.get_file(str_to_path('foobar.cpp'))
+            f = tu.get_file(str_to_path("foobar.cpp"))
 
     def test_get_source_location(self):
         """Ensure tu.get_source_location() works."""
 
-        tu = get_tu('int foo();')
+        tu = get_tu("int foo();")
 
-        location = tu.get_location('t.c', 2)
+        location = tu.get_location("t.c", 2)
         self.assertIsInstance(location, SourceLocation)
         self.assertEqual(location.offset, 2)
-        self.assertEqual(location.file.name, 't.c')
+        self.assertEqual(location.file.name, "t.c")
 
-        location = tu.get_location('t.c', (1, 3))
+        location = tu.get_location("t.c", (1, 3))
         self.assertIsInstance(location, SourceLocation)
         self.assertEqual(location.line, 1)
         self.assertEqual(location.column, 3)
-        self.assertEqual(location.file.name, 't.c')
+        self.assertEqual(location.file.name, "t.c")
 
     def test_get_source_range(self):
         """Ensure tu.get_source_range() works."""
 
-        tu = get_tu('int foo();')
+        tu = get_tu("int foo();")
 
-        r = tu.get_extent('t.c', (1,4))
+        r = tu.get_extent("t.c", (1, 4))
         self.assertIsInstance(r, SourceRange)
         self.assertEqual(r.start.offset, 1)
         self.assertEqual(r.end.offset, 4)
-        self.assertEqual(r.start.file.name, 't.c')
-        self.assertEqual(r.end.file.name, 't.c')
+        self.assertEqual(r.start.file.name, "t.c")
+        self.assertEqual(r.end.file.name, "t.c")
 
-        r = tu.get_extent('t.c', ((1,2), (1,3)))
+        r = tu.get_extent("t.c", ((1, 2), (1, 3)))
         self.assertIsInstance(r, SourceRange)
         self.assertEqual(r.start.line, 1)
         self.assertEqual(r.start.column, 2)
         self.assertEqual(r.end.line, 1)
         self.assertEqual(r.end.column, 3)
-        self.assertEqual(r.start.file.name, 't.c')
-        self.assertEqual(r.end.file.name, 't.c')
+        self.assertEqual(r.start.file.name, "t.c")
+        self.assertEqual(r.end.file.name, "t.c")
 
-        start = tu.get_location('t.c', 0)
-        end = tu.get_location('t.c', 5)
+        start = tu.get_location("t.c", 0)
+        end = tu.get_location("t.c", 5)
 
-        r = tu.get_extent('t.c', (start, end))
+        r = tu.get_extent("t.c", (start, end))
         self.assertIsInstance(r, SourceRange)
         self.assertEqual(r.start.offset, 0)
         self.assertEqual(r.end.offset, 5)
-        self.assertEqual(r.start.file.name, 't.c')
-        self.assertEqual(r.end.file.name, 't.c')
+        self.assertEqual(r.start.file.name, "t.c")
+        self.assertEqual(r.end.file.name, "t.c")
 
     def test_get_tokens_gc(self):
         """Ensures get_tokens() works properly with garbage collection."""
 
-        tu = get_tu('int foo();')
-        r = tu.get_extent('t.c', (0, 10))
+        tu = get_tu("int foo();")
+        r = tu.get_extent("t.c", (0, 10))
         tokens = list(tu.get_tokens(extent=r))
 
-        self.assertEqual(tokens[0].spelling, 'int')
+        self.assertEqual(tokens[0].spelling, "int")
         gc.collect()
-        self.assertEqual(tokens[0].spelling, 'int')
+        self.assertEqual(tokens[0].spelling, "int")
 
         del tokens[1]
         gc.collect()
-        self.assertEqual(tokens[0].spelling, 'int')
+        self.assertEqual(tokens[0].spelling, "int")
 
         # May trigger segfault if we don't do our job properly.
         del tokens
         gc.collect()
-        gc.collect() # Just in case.
+        gc.collect()  # Just in case.
 
     def test_fail_from_source(self):
-        path = os.path.join(kInputsDir, 'non-existent.cpp')
+        path = os.path.join(kInputsDir, "non-existent.cpp")
         try:
             tu = TranslationUnit.from_source(path)
         except TranslationUnitLoadError:
@@ -330,7 +357,7 @@ def test_fail_from_source(self):
         self.assertEqual(tu, None)
 
     def test_fail_from_ast_file(self):
-        path = os.path.join(kInputsDir, 'non-existent.ast')
+        path = os.path.join(kInputsDir, "non-existent.ast")
         try:
             tu = TranslationUnit.from_ast_file(path)
         except TranslationUnitLoadError:

diff  --git a/clang/bindings/python/tests/cindex/test_type.py b/clang/bindings/python/tests/cindex/test_type.py
index efe9b0f50be88..1dd8db0e3e814 100644
--- a/clang/bindings/python/tests/cindex/test_type.py
+++ b/clang/bindings/python/tests/cindex/test_type.py
@@ -1,7 +1,8 @@
 import os
 from clang.cindex import Config
-if 'CLANG_LIBRARY_PATH' in os.environ:
-    Config.set_library_path(os.environ['CLANG_LIBRARY_PATH'])
+
+if "CLANG_LIBRARY_PATH" in os.environ:
+    Config.set_library_path(os.environ["CLANG_LIBRARY_PATH"])
 
 import gc
 import unittest
@@ -31,7 +32,7 @@
 """
 
 
-constarrayInput="""
+constarrayInput = """
 struct teststruct {
   void *A[2];
 };
@@ -42,81 +43,85 @@ class TestType(unittest.TestCase):
     def test_a_struct(self):
         tu = get_tu(kInput)
 
-        teststruct = get_cursor(tu, 'teststruct')
+        teststruct = get_cursor(tu, "teststruct")
         self.assertIsNotNone(teststruct, "Could not find teststruct.")
         fields = list(teststruct.get_children())
 
         self.assertEqual(fields[0].kind, CursorKind.FIELD_DECL)
         self.assertIsNotNone(fields[0].translation_unit)
-        self.assertEqual(fields[0].spelling, 'a')
+        self.assertEqual(fields[0].spelling, "a")
         self.assertFalse(fields[0].type.is_const_qualified())
         self.assertEqual(fields[0].type.kind, TypeKind.INT)
         self.assertEqual(fields[0].type.get_canonical().kind, TypeKind.INT)
-        self.assertEqual(fields[0].type.get_typedef_name(), '')
+        self.assertEqual(fields[0].type.get_typedef_name(), "")
 
         self.assertEqual(fields[1].kind, CursorKind.FIELD_DECL)
         self.assertIsNotNone(fields[1].translation_unit)
-        self.assertEqual(fields[1].spelling, 'b')
+        self.assertEqual(fields[1].spelling, "b")
         self.assertFalse(fields[1].type.is_const_qualified())
         self.assertEqual(fields[1].type.kind, TypeKind.ELABORATED)
         self.assertEqual(fields[1].type.get_canonical().kind, TypeKind.INT)
-        self.assertEqual(fields[1].type.get_declaration().spelling, 'I')
-        self.assertEqual(fields[1].type.get_typedef_name(), 'I')
+        self.assertEqual(fields[1].type.get_declaration().spelling, "I")
+        self.assertEqual(fields[1].type.get_typedef_name(), "I")
 
         self.assertEqual(fields[2].kind, CursorKind.FIELD_DECL)
         self.assertIsNotNone(fields[2].translation_unit)
-        self.assertEqual(fields[2].spelling, 'c')
+        self.assertEqual(fields[2].spelling, "c")
         self.assertFalse(fields[2].type.is_const_qualified())
         self.assertEqual(fields[2].type.kind, TypeKind.LONG)
         self.assertEqual(fields[2].type.get_canonical().kind, TypeKind.LONG)
-        self.assertEqual(fields[2].type.get_typedef_name(), '')
+        self.assertEqual(fields[2].type.get_typedef_name(), "")
 
         self.assertEqual(fields[3].kind, CursorKind.FIELD_DECL)
         self.assertIsNotNone(fields[3].translation_unit)
-        self.assertEqual(fields[3].spelling, 'd')
+        self.assertEqual(fields[3].spelling, "d")
         self.assertFalse(fields[3].type.is_const_qualified())
         self.assertEqual(fields[3].type.kind, TypeKind.ULONG)
         self.assertEqual(fields[3].type.get_canonical().kind, TypeKind.ULONG)
-        self.assertEqual(fields[3].type.get_typedef_name(), '')
+        self.assertEqual(fields[3].type.get_typedef_name(), "")
 
         self.assertEqual(fields[4].kind, CursorKind.FIELD_DECL)
         self.assertIsNotNone(fields[4].translation_unit)
-        self.assertEqual(fields[4].spelling, 'e')
+        self.assertEqual(fields[4].spelling, "e")
         self.assertFalse(fields[4].type.is_const_qualified())
         self.assertEqual(fields[4].type.kind, TypeKind.LONG)
         self.assertEqual(fields[4].type.get_canonical().kind, TypeKind.LONG)
-        self.assertEqual(fields[4].type.get_typedef_name(), '')
+        self.assertEqual(fields[4].type.get_typedef_name(), "")
 
         self.assertEqual(fields[5].kind, CursorKind.FIELD_DECL)
         self.assertIsNotNone(fields[5].translation_unit)
-        self.assertEqual(fields[5].spelling, 'f')
+        self.assertEqual(fields[5].spelling, "f")
         self.assertTrue(fields[5].type.is_const_qualified())
         self.assertEqual(fields[5].type.kind, TypeKind.INT)
         self.assertEqual(fields[5].type.get_canonical().kind, TypeKind.INT)
-        self.assertEqual(fields[5].type.get_typedef_name(), '')
+        self.assertEqual(fields[5].type.get_typedef_name(), "")
 
         self.assertEqual(fields[6].kind, CursorKind.FIELD_DECL)
         self.assertIsNotNone(fields[6].translation_unit)
-        self.assertEqual(fields[6].spelling, 'g')
+        self.assertEqual(fields[6].spelling, "g")
         self.assertFalse(fields[6].type.is_const_qualified())
         self.assertEqual(fields[6].type.kind, TypeKind.POINTER)
         self.assertEqual(fields[6].type.get_pointee().kind, TypeKind.INT)
-        self.assertEqual(fields[6].type.get_typedef_name(), '')
+        self.assertEqual(fields[6].type.get_typedef_name(), "")
 
         self.assertEqual(fields[7].kind, CursorKind.FIELD_DECL)
         self.assertIsNotNone(fields[7].translation_unit)
-        self.assertEqual(fields[7].spelling, 'h')
+        self.assertEqual(fields[7].spelling, "h")
         self.assertFalse(fields[7].type.is_const_qualified())
         self.assertEqual(fields[7].type.kind, TypeKind.POINTER)
         self.assertEqual(fields[7].type.get_pointee().kind, TypeKind.POINTER)
-        self.assertEqual(fields[7].type.get_pointee().get_pointee().kind, TypeKind.POINTER)
-        self.assertEqual(fields[7].type.get_pointee().get_pointee().get_pointee().kind, TypeKind.INT)
-        self.assertEqual(fields[7].type.get_typedef_name(), '')
+        self.assertEqual(
+            fields[7].type.get_pointee().get_pointee().kind, TypeKind.POINTER
+        )
+        self.assertEqual(
+            fields[7].type.get_pointee().get_pointee().get_pointee().kind, TypeKind.INT
+        )
+        self.assertEqual(fields[7].type.get_typedef_name(), "")
 
     def test_references(self):
         """Ensure that a Type maintains a reference to a TranslationUnit."""
 
-        tu = get_tu('int x;')
+        tu = get_tu("int x;")
         children = list(tu.cursor.get_children())
         self.assertGreater(len(children), 0)
 
@@ -136,10 +141,10 @@ def test_references(self):
     def testConstantArray(self):
         tu = get_tu(constarrayInput)
 
-        teststruct = get_cursor(tu, 'teststruct')
+        teststruct = get_cursor(tu, "teststruct")
         self.assertIsNotNone(teststruct, "Didn't find teststruct??")
         fields = list(teststruct.get_children())
-        self.assertEqual(fields[0].spelling, 'A')
+        self.assertEqual(fields[0].spelling, "A")
         self.assertEqual(fields[0].type.kind, TypeKind.CONSTANTARRAY)
         self.assertIsNotNone(fields[0].type.get_array_element_type())
         self.assertEqual(fields[0].type.get_array_element_type().kind, TypeKind.POINTER)
@@ -147,12 +152,12 @@ def testConstantArray(self):
 
     def test_equal(self):
         """Ensure equivalence operators work on Type."""
-        source = 'int a; int b; void *v;'
+        source = "int a; int b; void *v;"
         tu = get_tu(source)
 
-        a = get_cursor(tu, 'a')
-        b = get_cursor(tu, 'b')
-        v = get_cursor(tu, 'v')
+        a = get_cursor(tu, "a")
+        b = get_cursor(tu, "b")
+        v = get_cursor(tu, "v")
 
         self.assertIsNotNone(a)
         self.assertIsNotNone(b)
@@ -162,15 +167,15 @@ def test_equal(self):
         self.assertNotEqual(a.type, v.type)
 
         self.assertNotEqual(a.type, None)
-        self.assertNotEqual(a.type, 'foo')
+        self.assertNotEqual(a.type, "foo")
 
     def test_type_spelling(self):
         """Ensure Type.spelling works."""
-        tu = get_tu('int c[5]; void f(int i[]); int x; int v[x];')
-        c = get_cursor(tu, 'c')
-        i = get_cursor(tu, 'i')
-        x = get_cursor(tu, 'x')
-        v = get_cursor(tu, 'v')
+        tu = get_tu("int c[5]; void f(int i[]); int x; int v[x];")
+        c = get_cursor(tu, "c")
+        i = get_cursor(tu, "i")
+        x = get_cursor(tu, "x")
+        v = get_cursor(tu, "v")
         self.assertIsNotNone(c)
         self.assertIsNotNone(i)
         self.assertIsNotNone(x)
@@ -182,16 +187,16 @@ def test_type_spelling(self):
 
     def test_typekind_spelling(self):
         """Ensure TypeKind.spelling works."""
-        tu = get_tu('int a;')
-        a = get_cursor(tu, 'a')
+        tu = get_tu("int a;")
+        a = get_cursor(tu, "a")
 
         self.assertIsNotNone(a)
-        self.assertEqual(a.type.kind.spelling, 'Int')
+        self.assertEqual(a.type.kind.spelling, "Int")
 
     def test_function_argument_types(self):
         """Ensure that Type.argument_types() works as expected."""
-        tu = get_tu('void f(int, int);')
-        f = get_cursor(tu, 'f')
+        tu = get_tu("void f(int, int);")
+        f = get_cursor(tu, "f")
         self.assertIsNotNone(f)
 
         args = f.type.argument_types()
@@ -213,20 +218,20 @@ def test_function_argument_types(self):
 
     def test_argument_types_string_key(self):
         """Ensure that non-int keys raise a TypeError."""
-        tu = get_tu('void f(int, int);')
-        f = get_cursor(tu, 'f')
+        tu = get_tu("void f(int, int);")
+        f = get_cursor(tu, "f")
         self.assertIsNotNone(f)
 
         args = f.type.argument_types()
         self.assertEqual(len(args), 2)
 
         with self.assertRaises(TypeError):
-            args['foo']
+            args["foo"]
 
     def test_argument_types_negative_index(self):
         """Ensure that negative indexes on argument_types Raises an IndexError."""
-        tu = get_tu('void f(int, int);')
-        f = get_cursor(tu, 'f')
+        tu = get_tu("void f(int, int);")
+        f = get_cursor(tu, "f")
         args = f.type.argument_types()
 
         with self.assertRaises(IndexError):
@@ -234,8 +239,8 @@ def test_argument_types_negative_index(self):
 
     def test_argument_types_overflow_index(self):
         """Ensure that indexes beyond the length of Type.argument_types() raise."""
-        tu = get_tu('void f(int, int);')
-        f = get_cursor(tu, 'f')
+        tu = get_tu("void f(int, int);")
+        f = get_cursor(tu, "f")
         args = f.type.argument_types()
 
         with self.assertRaises(IndexError):
@@ -243,8 +248,8 @@ def test_argument_types_overflow_index(self):
 
     def test_argument_types_invalid_type(self):
         """Ensure that obtaining argument_types on a Type without them raises."""
-        tu = get_tu('int i;')
-        i = get_cursor(tu, 'i')
+        tu = get_tu("int i;")
+        i = get_cursor(tu, "i")
         self.assertIsNotNone(i)
 
         with self.assertRaises(Exception):
@@ -252,9 +257,9 @@ def test_argument_types_invalid_type(self):
 
     def test_is_pod(self):
         """Ensure Type.is_pod() works."""
-        tu = get_tu('int i; void f();')
-        i = get_cursor(tu, 'i')
-        f = get_cursor(tu, 'f')
+        tu = get_tu("int i; void f();")
+        i = get_cursor(tu, "i")
+        f = get_cursor(tu, "f")
 
         self.assertIsNotNone(i)
         self.assertIsNotNone(f)
@@ -265,7 +270,7 @@ def test_is_pod(self):
     def test_function_variadic(self):
         """Ensure Type.is_function_variadic works."""
 
-        source ="""
+        source = """
 #include <stdarg.h>
 
     void foo(int a, ...);
@@ -273,8 +278,8 @@ def test_function_variadic(self):
     """
 
         tu = get_tu(source)
-        foo = get_cursor(tu, 'foo')
-        bar = get_cursor(tu, 'bar')
+        foo = get_cursor(tu, "foo")
+        bar = get_cursor(tu, "bar")
 
         self.assertIsNotNone(foo)
         self.assertIsNotNone(bar)
@@ -285,10 +290,10 @@ def test_function_variadic(self):
 
     def test_element_type(self):
         """Ensure Type.element_type works."""
-        tu = get_tu('int c[5]; void f(int i[]); int x; int v[x];')
-        c = get_cursor(tu, 'c')
-        i = get_cursor(tu, 'i')
-        v = get_cursor(tu, 'v')
+        tu = get_tu("int c[5]; void f(int i[]); int x; int v[x];")
+        c = get_cursor(tu, "c")
+        i = get_cursor(tu, "i")
+        v = get_cursor(tu, "v")
         self.assertIsNotNone(c)
         self.assertIsNotNone(i)
         self.assertIsNotNone(v)
@@ -302,17 +307,17 @@ def test_element_type(self):
 
     def test_invalid_element_type(self):
         """Ensure Type.element_type raises if type doesn't have elements."""
-        tu = get_tu('int i;')
-        i = get_cursor(tu, 'i')
+        tu = get_tu("int i;")
+        i = get_cursor(tu, "i")
         self.assertIsNotNone(i)
         with self.assertRaises(Exception):
             i.element_type
 
     def test_element_count(self):
         """Ensure Type.element_count works."""
-        tu = get_tu('int i[5]; int j;')
-        i = get_cursor(tu, 'i')
-        j = get_cursor(tu, 'j')
+        tu = get_tu("int i[5]; int j;")
+        i = get_cursor(tu, "i")
+        j = get_cursor(tu, "j")
 
         self.assertIsNotNone(i)
         self.assertIsNotNone(j)
@@ -325,10 +330,10 @@ def test_element_count(self):
     def test_is_volatile_qualified(self):
         """Ensure Type.is_volatile_qualified works."""
 
-        tu = get_tu('volatile int i = 4; int j = 2;')
+        tu = get_tu("volatile int i = 4; int j = 2;")
 
-        i = get_cursor(tu, 'i')
-        j = get_cursor(tu, 'j')
+        i = get_cursor(tu, "i")
+        j = get_cursor(tu, "j")
 
         self.assertIsNotNone(i)
         self.assertIsNotNone(j)
@@ -340,10 +345,10 @@ def test_is_volatile_qualified(self):
     def test_is_restrict_qualified(self):
         """Ensure Type.is_restrict_qualified works."""
 
-        tu = get_tu('struct s { void * restrict i; void * j; };')
+        tu = get_tu("struct s { void * restrict i; void * j; };")
 
-        i = get_cursor(tu, 'i')
-        j = get_cursor(tu, 'j')
+        i = get_cursor(tu, "i")
+        j = get_cursor(tu, "j")
 
         self.assertIsNotNone(i)
         self.assertIsNotNone(j)
@@ -356,7 +361,7 @@ def test_record_layout(self):
         """Ensure Cursor.type.get_size, Cursor.type.get_align and
         Cursor.type.get_offset works."""
 
-        source ="""
+        source = """
     struct a {
         long a1;
         long a2:3;
@@ -364,15 +369,17 @@ def test_record_layout(self):
         long long a4;
     };
     """
-        tries=[(['-target','i386-linux-gnu'],(4,16,0,32,35,64)),
-               (['-target','nvptx64-unknown-unknown'],(8,24,0,64,67,128)),
-               (['-target','i386-pc-win32'],(8,16,0,32,35,64)),
-               (['-target','msp430-none-none'],(2,14,0,32,35,48))]
+        tries = [
+            (["-target", "i386-linux-gnu"], (4, 16, 0, 32, 35, 64)),
+            (["-target", "nvptx64-unknown-unknown"], (8, 24, 0, 64, 67, 128)),
+            (["-target", "i386-pc-win32"], (8, 16, 0, 32, 35, 64)),
+            (["-target", "msp430-none-none"], (2, 14, 0, 32, 35, 48)),
+        ]
         for flags, values in tries:
-            align,total,a1,a2,a3,a4 = values
+            align, total, a1, a2, a3, a4 = values
 
             tu = get_tu(source, flags=flags)
-            teststruct = get_cursor(tu, 'a')
+            teststruct = get_cursor(tu, "a")
             fields = list(teststruct.get_children())
 
             self.assertEqual(teststruct.type.get_align(), align)
@@ -390,7 +397,7 @@ def test_record_layout(self):
 
     def test_offset(self):
         """Ensure Cursor.get_record_field_offset works in anonymous records"""
-        source="""
+        source = """
     struct Test {
       struct {int a;} typeanon;
       struct {
@@ -401,14 +408,16 @@ def test_offset(self):
       };
       int bar;
     };"""
-        tries=[(['-target','i386-linux-gnu'],(4,16,0,32,64,96)),
-               (['-target','nvptx64-unknown-unknown'],(8,24,0,32,64,96)),
-               (['-target','i386-pc-win32'],(8,16,0,32,64,96)),
-               (['-target','msp430-none-none'],(2,14,0,32,64,96))]
+        tries = [
+            (["-target", "i386-linux-gnu"], (4, 16, 0, 32, 64, 96)),
+            (["-target", "nvptx64-unknown-unknown"], (8, 24, 0, 32, 64, 96)),
+            (["-target", "i386-pc-win32"], (8, 16, 0, 32, 64, 96)),
+            (["-target", "msp430-none-none"], (2, 14, 0, 32, 64, 96)),
+        ]
         for flags, values in tries:
-            align,total,f1,bariton,foo,bar = values
+            align, total, f1, bariton, foo, bar = values
             tu = get_tu(source)
-            teststruct = get_cursor(tu, 'Test')
+            teststruct = get_cursor(tu, "Test")
             children = list(teststruct.get_children())
             fields = list(teststruct.type.get_fields())
             self.assertEqual(children[0].kind, CursorKind.STRUCT_DECL)
@@ -426,7 +435,7 @@ def test_decay(self):
         """Ensure decayed types are handled as the original type"""
 
         tu = get_tu("void foo(int a[]);")
-        foo = get_cursor(tu, 'foo')
+        foo = get_cursor(tu, "foo")
         a = foo.type.argument_types()[0]
 
         self.assertEqual(a.kind, TypeKind.INCOMPLETEARRAY)
@@ -435,9 +444,9 @@ def test_decay(self):
 
     def test_addrspace(self):
         """Ensure the address space can be queried"""
-        tu = get_tu('__attribute__((address_space(2))) int testInteger = 3;', 'c')
+        tu = get_tu("__attribute__((address_space(2))) int testInteger = 3;", "c")
 
-        testInteger = get_cursor(tu, 'testInteger')
+        testInteger = get_cursor(tu, "testInteger")
 
         self.assertIsNotNone(testInteger, "Could not find testInteger.")
         self.assertEqual(testInteger.type.get_address_space(), 2)
@@ -452,17 +461,17 @@ class Template {
         Template<Foo> instance;
         int bar;
         """
-        tu = get_tu(source, lang='cpp')
+        tu = get_tu(source, lang="cpp")
 
         # Varible with a template argument.
-        cursor = get_cursor(tu, 'instance')
+        cursor = get_cursor(tu, "instance")
         cursor_type = cursor.type
         self.assertEqual(cursor.kind, CursorKind.VAR_DECL)
-        self.assertEqual(cursor_type.spelling, 'Template<Foo>')
+        self.assertEqual(cursor_type.spelling, "Template<Foo>")
         self.assertEqual(cursor_type.get_num_template_arguments(), 1)
         template_type = cursor_type.get_template_argument_type(0)
-        self.assertEqual(template_type.spelling, 'Foo')
+        self.assertEqual(template_type.spelling, "Foo")
 
         # Variable without a template argument.
-        cursor = get_cursor(tu, 'bar')
+        cursor = get_cursor(tu, "bar")
         self.assertEqual(cursor.get_num_template_arguments(), -1)

diff  --git a/clang/bindings/python/tests/cindex/util.py b/clang/bindings/python/tests/cindex/util.py
index 57e17941c5581..8ba3114b35d1e 100644
--- a/clang/bindings/python/tests/cindex/util.py
+++ b/clang/bindings/python/tests/cindex/util.py
@@ -1,7 +1,8 @@
 # This file provides common utility functions for the test suite.
 
 import os
-HAS_FSPATH = hasattr(os, 'fspath')
+
+HAS_FSPATH = hasattr(os, "fspath")
 
 if HAS_FSPATH:
     from pathlib import Path as str_to_path
@@ -13,7 +14,8 @@
 from clang.cindex import Cursor
 from clang.cindex import TranslationUnit
 
-def get_tu(source, lang='c', all_warnings=False, flags=[]):
+
+def get_tu(source, lang="c", all_warnings=False, flags=[]):
     """Obtain a translation unit from source and language.
 
     By default, the translation unit is created from source file "t.<ext>"
@@ -25,20 +27,20 @@ def get_tu(source, lang='c', all_warnings=False, flags=[]):
     all_warnings is a convenience argument to enable all compiler warnings.
     """
     args = list(flags)
-    name = 't.c'
-    if lang == 'cpp':
-        name = 't.cpp'
-        args.append('-std=c++11')
-    elif lang == 'objc':
-        name = 't.m'
-    elif lang != 'c':
-        raise Exception('Unknown language: %s' % lang)
+    name = "t.c"
+    if lang == "cpp":
+        name = "t.cpp"
+        args.append("-std=c++11")
+    elif lang == "objc":
+        name = "t.m"
+    elif lang != "c":
+        raise Exception("Unknown language: %s" % lang)
 
     if all_warnings:
-        args += ['-Wall', '-Wextra']
+        args += ["-Wall", "-Wextra"]
+
+    return TranslationUnit.from_source(name, args, unsaved_files=[(name, source)])
 
-    return TranslationUnit.from_source(name, args, unsaved_files=[(name,
-                                       source)])
 
 def get_cursor(source, spelling):
     """Obtain a cursor from a source object.
@@ -58,6 +60,7 @@ def get_cursor(source, spelling):
 
     return None
 
+
 def get_cursors(source, spelling):
     """Obtain all cursors from a source object with a specific spelling.
 
@@ -78,13 +81,14 @@ def get_cursors(source, spelling):
     return cursors
 
 
-skip_if_no_fspath = unittest.skipUnless(HAS_FSPATH,
-                                        "Requires file system path protocol / Python 3.6+")
+skip_if_no_fspath = unittest.skipUnless(
+    HAS_FSPATH, "Requires file system path protocol / Python 3.6+"
+)
 
 __all__ = [
-    'get_cursor',
-    'get_cursors',
-    'get_tu',
-    'skip_if_no_fspath',
-    'str_to_path',
+    "get_cursor",
+    "get_cursors",
+    "get_tu",
+    "skip_if_no_fspath",
+    "str_to_path",
 ]

diff  --git a/clang/docs/analyzer/conf.py b/clang/docs/analyzer/conf.py
index ccd0393c4d3ac..509aa857c4d52 100644
--- a/clang/docs/analyzer/conf.py
+++ b/clang/docs/analyzer/conf.py
@@ -17,103 +17,103 @@
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+# sys.path.insert(0, os.path.abspath('.'))
 
 # -- General configuration -----------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax']
+extensions = ["sphinx.ext.todo", "sphinx.ext.mathjax"]
 
 # Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
 
 # The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
 
 # The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
 
 # The master toctree document.
-master_doc = 'index'
+master_doc = "index"
 
 # General information about the project.
-project = u'Clang Static Analyzer'
-copyright = u'2013-%d, Analyzer Team' % date.today().year
+project = "Clang Static Analyzer"
+copyright = "2013-%d, Analyzer Team" % date.today().year
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
 # built documents.
 #
 # The short version.
-version = '17'
+version = "17"
 # The full version, including alpha/beta/rc tags.
-release = '17'
+release = "17"
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
-#language = None
+# language = None
 
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
-#today = ''
+# today = ''
 # Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# today_fmt = '%B %d, %Y'
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
-exclude_patterns = ['_build']
+exclude_patterns = ["_build"]
 
 # The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# default_role = None
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
 
 # If true, the current module name will be prepended to all description
 # unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
 
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # output. They are ignored by default.
-#show_authors = False
+# show_authors = False
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = "sphinx"
 
 
 # -- Options for HTML output ---------------------------------------------------
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
-html_theme = 'haiku'
+html_theme = "haiku"
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
 # documentation.
-#html_theme_options = {}
+# html_theme_options = {}
 
 # Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
+# html_theme_path = []
 
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
-#html_title = None
+# html_title = None
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
-#html_logo = None
+# html_logo = None
 
 # The name of an image file (within the static path) to use as favicon of the
 # docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
 # pixels large.
-#html_favicon = None
+# html_favicon = None
 
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
@@ -122,88 +122,91 @@
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
+# html_last_updated_fmt = '%b %d, %Y'
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
 
 # Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
+# html_sidebars = {}
 
 # Additional templates that should be rendered to pages, maps page names to
 # template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
 
 # If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
 
 # If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
 
 # If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
+# html_show_sourcelink = True
 
 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
 
 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
 
 # If true, an OpenSearch description file will be output, and all pages will
 # contain a <link> tag referring to it.  The value of this option must be the
 # base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
 
 # This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = 'ClangStaticAnalyzerdoc'
+htmlhelp_basename = "ClangStaticAnalyzerdoc"
 
 
 # -- Options for LaTeX output --------------------------------------------------
 
 latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
+    # The paper size ('letterpaper' or 'a4paper').
+    #'papersize': 'letterpaper',
+    # The font size ('10pt', '11pt' or '12pt').
+    #'pointsize': '10pt',
+    # Additional stuff for the LaTeX preamble.
+    #'preamble': '',
 }
 
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
-  ('index', 'ClangStaticAnalyzer.tex', u'Clang Static Analyzer Documentation',
-   u'Analyzer Team', 'manual'),
+    (
+        "index",
+        "ClangStaticAnalyzer.tex",
+        "Clang Static Analyzer Documentation",
+        "Analyzer Team",
+        "manual",
+    ),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
 # the title page.
-#latex_logo = None
+# latex_logo = None
 
 # For "manual" documents, if this is true, then toplevel headings are parts,
 # not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
 
 # If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
 
 # If true, show URL addresses after external links.
-#latex_show_urls = False
+# latex_show_urls = False
 
 # Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
 
 # If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
 
 
 # -- Options for manual page output --------------------------------------------
@@ -211,12 +214,17 @@
 # One entry per manual page. List of tuples
 # (source start file, name, description, authors, manual section).
 man_pages = [
-    ('index', 'clangstaticanalyzer', u'Clang Static Analyzer Documentation',
-     [u'Analyzer Team'], 1)
+    (
+        "index",
+        "clangstaticanalyzer",
+        "Clang Static Analyzer Documentation",
+        ["Analyzer Team"],
+        1,
+    )
 ]
 
 # If true, show URL addresses after external links.
-#man_show_urls = False
+# man_show_urls = False
 
 
 # -- Options for Texinfo output ------------------------------------------------
@@ -225,20 +233,26 @@
 # (source start file, target name, title, author,
 #  dir menu entry, description, category)
 texinfo_documents = [
-  ('index', 'ClangStaticAnalyzer', u'Clang Static Analyzer Documentation',
-   u'Analyzer Team', 'ClangStaticAnalyzer', 'One line description of project.',
-   'Miscellaneous'),
+    (
+        "index",
+        "ClangStaticAnalyzer",
+        "Clang Static Analyzer Documentation",
+        "Analyzer Team",
+        "ClangStaticAnalyzer",
+        "One line description of project.",
+        "Miscellaneous",
+    ),
 ]
 
 # Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
+# texinfo_appendices = []
 
 # If false, no module index is generated.
-#texinfo_domain_indices = True
+# texinfo_domain_indices = True
 
 # How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
+# texinfo_show_urls = 'footnote'
 
 
 # Example configuration for intersphinx: refer to the Python standard library.
-intersphinx_mapping = {'http://docs.python.org/': None}
+intersphinx_mapping = {"http://docs.python.org/": None}

diff  --git a/clang/docs/conf.py b/clang/docs/conf.py
index 0ad1b2f1dfcaf..de31a5dcd068e 100644
--- a/clang/docs/conf.py
+++ b/clang/docs/conf.py
@@ -18,80 +18,81 @@
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+# sys.path.insert(0, os.path.abspath('.'))
 
 # -- General configuration -----------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax']
+extensions = ["sphinx.ext.todo", "sphinx.ext.mathjax"]
 
 # Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
 
 # The suffix of source filenames.
 source_suffix = {
-    '.rst': 'restructuredtext',
+    ".rst": "restructuredtext",
 }
 
 try:
-  import recommonmark
+    import recommonmark
 except ImportError:
-  # manpages do not use any .md sources
-  if not tags.has('builder-man'):
-    raise
+    # manpages do not use any .md sources
+    if not tags.has("builder-man"):
+        raise
 else:
-  import sphinx
-  if sphinx.version_info >= (3, 0):
-    # This requires 0.5 or later.
-    extensions.append('recommonmark')
-  else:
-    source_parsers = {'.md': 'recommonmark.parser.CommonMarkParser'}
-  source_suffix['.md'] = 'markdown'
+    import sphinx
+
+    if sphinx.version_info >= (3, 0):
+        # This requires 0.5 or later.
+        extensions.append("recommonmark")
+    else:
+        source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
+    source_suffix[".md"] = "markdown"
 
 # The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
 
 # The master toctree document.
-master_doc = 'index'
+master_doc = "index"
 
 # General information about the project.
-project = u'Clang'
-copyright = u'2007-%d, The Clang Team' % date.today().year
+project = "Clang"
+copyright = "2007-%d, The Clang Team" % date.today().year
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
-#language = None
+# language = None
 
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
-#today = ''
+# today = ''
 # Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# today_fmt = '%B %d, %Y'
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
-exclude_patterns = ['_build']
+exclude_patterns = ["_build"]
 
 # The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# default_role = None
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
 
 # If true, the current module name will be prepended to all description
 # unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
 
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # output. They are ignored by default.
-#show_authors = False
+# show_authors = False
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'friendly'
+pygments_style = "friendly"
 
 in_progress_title = "(In-Progress) " if tags.has("PreRelease") else ""
 
@@ -103,31 +104,31 @@
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
-html_theme = 'haiku'
+html_theme = "haiku"
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
 # documentation.
-#html_theme_options = {}
+# html_theme_options = {}
 
 # Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
+# html_theme_path = []
 
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
-#html_title = None
+# html_title = None
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
-#html_logo = None
+# html_logo = None
 
 # The name of an image file (within the static path) to use as favicon of the
 # docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
 # pixels large.
-#html_favicon = None
+# html_favicon = None
 
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
@@ -136,88 +137,85 @@
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
+# html_last_updated_fmt = '%b %d, %Y'
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
 
 # Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
+# html_sidebars = {}
 
 # Additional templates that should be rendered to pages, maps page names to
 # template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
 
 # If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
 
 # If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
 
 # If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
+# html_show_sourcelink = True
 
 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
 
 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
 
 # If true, an OpenSearch description file will be output, and all pages will
 # contain a <link> tag referring to it.  The value of this option must be the
 # base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
 
 # This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = 'Clangdoc'
+htmlhelp_basename = "Clangdoc"
 
 
 # -- Options for LaTeX output --------------------------------------------------
 
 latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
+    # The paper size ('letterpaper' or 'a4paper').
+    #'papersize': 'letterpaper',
+    # The font size ('10pt', '11pt' or '12pt').
+    #'pointsize': '10pt',
+    # Additional stuff for the LaTeX preamble.
+    #'preamble': '',
 }
 
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
-  ('index', 'Clang.tex', u'Clang Documentation',
-   u'The Clang Team', 'manual'),
+    ("index", "Clang.tex", "Clang Documentation", "The Clang Team", "manual"),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
 # the title page.
-#latex_logo = None
+# latex_logo = None
 
 # For "manual" documents, if this is true, then toplevel headings are parts,
 # not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
 
 # If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
 
 # If true, show URL addresses after external links.
-#latex_show_urls = False
+# latex_show_urls = False
 
 # Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
 
 # If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
 
 
 # -- Options for manual page output --------------------------------------------
@@ -229,38 +227,49 @@
 # Automatically derive the list of man pages from the contents of the command
 # guide subdirectory. This was copied from llvm/docs/conf.py.
 basedir = os.path.dirname(__file__)
-man_page_authors = u'Maintained by the Clang / LLVM Team (<http://clang.llvm.org>)'
-command_guide_subpath = 'CommandGuide'
+man_page_authors = "Maintained by the Clang / LLVM Team (<http://clang.llvm.org>)"
+command_guide_subpath = "CommandGuide"
 command_guide_path = os.path.join(basedir, command_guide_subpath)
 for name in os.listdir(command_guide_path):
     # Ignore non-ReST files and the index page.
-    if not name.endswith('.rst') or name in ('index.rst',):
+    if not name.endswith(".rst") or name in ("index.rst",):
         continue
 
     # Otherwise, automatically extract the description.
     file_subpath = os.path.join(command_guide_subpath, name)
     with open(os.path.join(command_guide_path, name)) as f:
-        title = f.readline().rstrip('\n')
-        header = f.readline().rstrip('\n')
+        title = f.readline().rstrip("\n")
+        header = f.readline().rstrip("\n")
 
         if len(header) != len(title):
-            print((
-                "error: invalid header in %r (does not match title)" % (
-                    file_subpath,)), file=sys.stderr)
-        if ' - ' not in title:
-            print((
-                ("error: invalid title in %r "
-                 "(expected '<name> - <description>')") % (
-                    file_subpath,)), file=sys.stderr)
+            print(
+                (
+                    "error: invalid header in %r (does not match title)"
+                    % (file_subpath,)
+                ),
+                file=sys.stderr,
+            )
+        if " - " not in title:
+            print(
+                (
+                    (
+                        "error: invalid title in %r "
+                        "(expected '<name> - <description>')"
+                    )
+                    % (file_subpath,)
+                ),
+                file=sys.stderr,
+            )
 
         # Split the name out of the title.
-        name,description = title.split(' - ', 1)
-        man_pages.append((file_subpath.replace('.rst',''), name,
-                          description, man_page_authors, 1))
+        name, description = title.split(" - ", 1)
+        man_pages.append(
+            (file_subpath.replace(".rst", ""), name, description, man_page_authors, 1)
+        )
 
 
 # If true, show URL addresses after external links.
-#man_show_urls = False
+# man_show_urls = False
 
 
 # -- Options for Texinfo output ------------------------------------------------
@@ -269,16 +278,22 @@
 # (source start file, target name, title, author,
 #  dir menu entry, description, category)
 texinfo_documents = [
-  ('index', 'Clang', u'Clang Documentation',
-   u'The Clang Team', 'Clang', 'One line description of project.',
-   'Miscellaneous'),
+    (
+        "index",
+        "Clang",
+        "Clang Documentation",
+        "The Clang Team",
+        "Clang",
+        "One line description of project.",
+        "Miscellaneous",
+    ),
 ]
 
 # Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
+# texinfo_appendices = []
 
 # If false, no module index is generated.
-#texinfo_domain_indices = True
+# texinfo_domain_indices = True
 
 # How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
+# texinfo_show_urls = 'footnote'

diff  --git a/clang/docs/tools/dump_ast_matchers.py b/clang/docs/tools/dump_ast_matchers.py
index d3f39ee456758..a2d96aab91294 100755
--- a/clang/docs/tools/dump_ast_matchers.py
+++ b/clang/docs/tools/dump_ast_matchers.py
@@ -5,24 +5,25 @@
 
 import collections
 import re
+
 try:
     from urllib.request import urlopen
 except ImportError:
     from urllib2 import urlopen
 
-CLASS_INDEX_PAGE_URL = 'https://clang.llvm.org/doxygen/classes.html'
+CLASS_INDEX_PAGE_URL = "https://clang.llvm.org/doxygen/classes.html"
 try:
-  CLASS_INDEX_PAGE = urlopen(CLASS_INDEX_PAGE_URL).read().decode('utf-8')
+    CLASS_INDEX_PAGE = urlopen(CLASS_INDEX_PAGE_URL).read().decode("utf-8")
 except Exception as e:
-  raise Exception('Unable to get %s: %s' % (CLASS_INDEX_PAGE_URL, e))
+    raise Exception("Unable to get %s: %s" % (CLASS_INDEX_PAGE_URL, e))
 
-MATCHERS_FILE = '../../include/clang/ASTMatchers/ASTMatchers.h'
+MATCHERS_FILE = "../../include/clang/ASTMatchers/ASTMatchers.h"
 
 # Each matcher is documented in one row of the form:
 #   result | name | argA
 # The subsequent row contains the documentation and is hidden by default,
 # becoming visible via javascript when the user clicks the matcher name.
-TD_TEMPLATE="""
+TD_TEMPLATE = """
 <tr><td>%(result)s</td><td class="name" onclick="toggle('%(id)s')"><a name="%(id)sAnchor">%(name)s</a></td><td>%(args)s</td></tr>
 <tr><td colspan="4" class="doc" id="%(id)s"><pre>%(comment)s</pre></td></tr>
 """
@@ -40,189 +41,221 @@
 # Cache for doxygen urls we have already verified.
 doxygen_probes = {}
 
+
 def esc(text):
-  """Escape any html in the given text."""
-  text = re.sub(r'&', '&', text)
-  text = re.sub(r'<', '<', text)
-  text = re.sub(r'>', '>', text)
-  def link_if_exists(m):
-    """Wrap a likely AST node name in a link to its clang docs.
-
-       We want to do this only if the page exists, in which case it will be
-       referenced from the class index page.
-    """
-    name = m.group(1)
-    url = 'https://clang.llvm.org/doxygen/classclang_1_1%s.html' % name
-    if url not in doxygen_probes:
-      search_str = 'href="classclang_1_1%s.html"' % name
-      doxygen_probes[url] = search_str in CLASS_INDEX_PAGE
-      if not doxygen_probes[url]:
-        print('Did not find %s in class index page' % name)
-    if doxygen_probes[url]:
-      return r'Matcher<<a href="%s">%s</a>>' % (url, name)
-    else:
-      return m.group(0)
-  text = re.sub(
-    r'Matcher<([^\*&]+)>', link_if_exists, text)
-  return text
+    """Escape any html in the given text."""
+    text = re.sub(r"&", "&", text)
+    text = re.sub(r"<", "<", text)
+    text = re.sub(r">", ">", text)
+
+    def link_if_exists(m):
+        """Wrap a likely AST node name in a link to its clang docs.
+
+        We want to do this only if the page exists, in which case it will be
+        referenced from the class index page.
+        """
+        name = m.group(1)
+        url = "https://clang.llvm.org/doxygen/classclang_1_1%s.html" % name
+        if url not in doxygen_probes:
+            search_str = 'href="classclang_1_1%s.html"' % name
+            doxygen_probes[url] = search_str in CLASS_INDEX_PAGE
+            if not doxygen_probes[url]:
+                print("Did not find %s in class index page" % name)
+        if doxygen_probes[url]:
+            return r'Matcher<<a href="%s">%s</a>>' % (url, name)
+        else:
+            return m.group(0)
+
+    text = re.sub(r"Matcher<([^\*&]+)>", link_if_exists, text)
+    return text
+
 
 def extract_result_types(comment):
-  """Extracts a list of result types from the given comment.
-
-     We allow annotations in the comment of the matcher to specify what
-     nodes a matcher can match on. Those comments have the form:
-       Usable as: Any Matcher | (Matcher<T1>[, Matcher<t2>[, ...]])
-
-     Returns ['*'] in case of 'Any Matcher', or ['T1', 'T2', ...].
-     Returns the empty list if no 'Usable as' specification could be
-     parsed.
-  """
-  result_types = []
-  m = re.search(r'Usable as: Any Matcher[\s\n]*$', comment, re.S)
-  if m:
-    return ['*']
-  while True:
-    m = re.match(r'^(.*)Matcher<([^>]+)>\s*,?[\s\n]*$', comment, re.S)
-    if not m:
-      if re.search(r'Usable as:\s*$', comment):
-        return result_types
-      else:
-        return None
-    result_types += [m.group(2)]
-    comment = m.group(1)
+    """Extracts a list of result types from the given comment.
+
+    We allow annotations in the comment of the matcher to specify what
+    nodes a matcher can match on. Those comments have the form:
+      Usable as: Any Matcher | (Matcher<T1>[, Matcher<t2>[, ...]])
+
+    Returns ['*'] in case of 'Any Matcher', or ['T1', 'T2', ...].
+    Returns the empty list if no 'Usable as' specification could be
+    parsed.
+    """
+    result_types = []
+    m = re.search(r"Usable as: Any Matcher[\s\n]*$", comment, re.S)
+    if m:
+        return ["*"]
+    while True:
+        m = re.match(r"^(.*)Matcher<([^>]+)>\s*,?[\s\n]*$", comment, re.S)
+        if not m:
+            if re.search(r"Usable as:\s*$", comment):
+                return result_types
+            else:
+                return None
+        result_types += [m.group(2)]
+        comment = m.group(1)
+
 
 def strip_doxygen(comment):
-  """Returns the given comment without \-escaped words."""
-  # If there is only a doxygen keyword in the line, delete the whole line.
-  comment = re.sub(r'^\\[^\s]+\n', r'', comment, flags=re.M)
-  
-  # If there is a doxygen \see command, change the \see prefix into "See also:".
-  # FIXME: it would be better to turn this into a link to the target instead.
-  comment = re.sub(r'\\see', r'See also:', comment)
-  
-  # Delete the doxygen command and the following whitespace.
-  comment = re.sub(r'\\[^\s]+\s+', r'', comment)
-  return comment
+    """Returns the given comment without \-escaped words."""
+    # If there is only a doxygen keyword in the line, delete the whole line.
+    comment = re.sub(r"^\\[^\s]+\n", r"", comment, flags=re.M)
+
+    # If there is a doxygen \see command, change the \see prefix into "See also:".
+    # FIXME: it would be better to turn this into a link to the target instead.
+    comment = re.sub(r"\\see", r"See also:", comment)
+
+    # Delete the doxygen command and the following whitespace.
+    comment = re.sub(r"\\[^\s]+\s+", r"", comment)
+    return comment
+
 
 def unify_arguments(args):
-  """Gets rid of anything the user doesn't care about in the argument list."""
-  args = re.sub(r'internal::', r'', args)
-  args = re.sub(r'extern const\s+(.*)&', r'\1 ', args)
-  args = re.sub(r'&', r' ', args)
-  args = re.sub(r'(^|\s)M\d?(\s)', r'\1Matcher<*>\2', args)
-  args = re.sub(r'BindableMatcher', r'Matcher', args)
-  args = re.sub(r'const Matcher', r'Matcher', args)
-  return args
+    """Gets rid of anything the user doesn't care about in the argument list."""
+    args = re.sub(r"internal::", r"", args)
+    args = re.sub(r"extern const\s+(.*)&", r"\1 ", args)
+    args = re.sub(r"&", r" ", args)
+    args = re.sub(r"(^|\s)M\d?(\s)", r"\1Matcher<*>\2", args)
+    args = re.sub(r"BindableMatcher", r"Matcher", args)
+    args = re.sub(r"const Matcher", r"Matcher", args)
+    return args
+
 
 def unify_type(result_type):
-  """Gets rid of anything the user doesn't care about in the type name."""
-  result_type = re.sub(r'^internal::(Bindable)?Matcher<([a-zA-Z_][a-zA-Z0-9_]*)>$', r'\2', result_type)
-  return result_type
+    """Gets rid of anything the user doesn't care about in the type name."""
+    result_type = re.sub(
+        r"^internal::(Bindable)?Matcher<([a-zA-Z_][a-zA-Z0-9_]*)>$", r"\2", result_type
+    )
+    return result_type
+
 
 def add_matcher(result_type, name, args, comment, is_dyncast=False):
-  """Adds a matcher to one of our categories."""
-  if name == 'id':
-     # FIXME: Figure out whether we want to support the 'id' matcher.
-     return
-  matcher_id = '%s%d' % (name, ids[name])
-  ids[name] += 1
-  args = unify_arguments(args)
-  result_type = unify_type(result_type)
-
-  docs_result_type = esc('Matcher<%s>' % result_type);
-
-  if name == 'mapAnyOf':
-    args = "nodeMatcherFunction..."
-    docs_result_type = "<em>unspecified</em>"
-
-  matcher_html = TD_TEMPLATE % {
-    'result': docs_result_type,
-    'name': name,
-    'args': esc(args),
-    'comment': esc(strip_doxygen(comment)),
-    'id': matcher_id,
-  }
-  if is_dyncast:
-    dict = node_matchers
-    lookup = result_type + name
-  # Use a heuristic to figure out whether a matcher is a narrowing or
-  # traversal matcher. By default, matchers that take other matchers as
-  # arguments (and are not node matchers) do traversal. We specifically
-  # exclude known narrowing matchers that also take other matchers as
-  # arguments.
-  elif ('Matcher<' not in args or
-        name in ['allOf', 'anyOf', 'anything', 'unless', 'mapAnyOf']):
-    dict = narrowing_matchers
-    lookup = result_type + name + esc(args)
-  else:
-    dict = traversal_matchers
-    lookup = result_type + name + esc(args)
-  
-  if dict.get(lookup) is None or len(dict.get(lookup)) < len(matcher_html):
-    dict[lookup] = matcher_html
+    """Adds a matcher to one of our categories."""
+    if name == "id":
+        # FIXME: Figure out whether we want to support the 'id' matcher.
+        return
+    matcher_id = "%s%d" % (name, ids[name])
+    ids[name] += 1
+    args = unify_arguments(args)
+    result_type = unify_type(result_type)
+
+    docs_result_type = esc("Matcher<%s>" % result_type)
+
+    if name == "mapAnyOf":
+        args = "nodeMatcherFunction..."
+        docs_result_type = "<em>unspecified</em>"
+
+    matcher_html = TD_TEMPLATE % {
+        "result": docs_result_type,
+        "name": name,
+        "args": esc(args),
+        "comment": esc(strip_doxygen(comment)),
+        "id": matcher_id,
+    }
+    if is_dyncast:
+        dict = node_matchers
+        lookup = result_type + name
+    # Use a heuristic to figure out whether a matcher is a narrowing or
+    # traversal matcher. By default, matchers that take other matchers as
+    # arguments (and are not node matchers) do traversal. We specifically
+    # exclude known narrowing matchers that also take other matchers as
+    # arguments.
+    elif "Matcher<" not in args or name in [
+        "allOf",
+        "anyOf",
+        "anything",
+        "unless",
+        "mapAnyOf",
+    ]:
+        dict = narrowing_matchers
+        lookup = result_type + name + esc(args)
+    else:
+        dict = traversal_matchers
+        lookup = result_type + name + esc(args)
+
+    if dict.get(lookup) is None or len(dict.get(lookup)) < len(matcher_html):
+        dict[lookup] = matcher_html
+
 
 def act_on_decl(declaration, comment, allowed_types):
-  """Parse the matcher out of the given declaration and comment.
+    """Parse the matcher out of the given declaration and comment.
 
-     If 'allowed_types' is set, it contains a list of node types the matcher
-     can match on, as extracted from the static type asserts in the matcher
-     definition.
-  """
-  if declaration.strip():
+    If 'allowed_types' is set, it contains a list of node types the matcher
+    can match on, as extracted from the static type asserts in the matcher
+    definition.
+    """
+    if declaration.strip():
 
-    if re.match(r'^\s?(#|namespace|using)', declaration): return
+        if re.match(r"^\s?(#|namespace|using)", declaration):
+            return
 
-    # Node matchers are defined by writing:
-    #   VariadicDynCastAllOfMatcher<ResultType, ArgumentType> name;
-    m = re.match(r""".*Variadic(?:DynCast)?AllOfMatcher\s*<
+        # Node matchers are defined by writing:
+        #   VariadicDynCastAllOfMatcher<ResultType, ArgumentType> name;
+        m = re.match(
+            r""".*Variadic(?:DynCast)?AllOfMatcher\s*<
                        \s*([^\s,]+)\s*(?:,
                        \s*([^\s>]+)\s*)?>
-                       \s*([^\s;]+)\s*;\s*$""", declaration, flags=re.X)
-    if m:
-      result, inner, name = m.groups()
-      if not inner:
-        inner = result
-      add_matcher(result, name, 'Matcher<%s>...' % inner,
-                  comment, is_dyncast=True)
-      return
-
-    # Special case of type matchers:
-    #   AstTypeMatcher<ArgumentType> name
-    m = re.match(r""".*AstTypeMatcher\s*<
+                       \s*([^\s;]+)\s*;\s*$""",
+            declaration,
+            flags=re.X,
+        )
+        if m:
+            result, inner, name = m.groups()
+            if not inner:
+                inner = result
+            add_matcher(
+                result, name, "Matcher<%s>..." % inner, comment, is_dyncast=True
+            )
+            return
+
+        # Special case of type matchers:
+        #   AstTypeMatcher<ArgumentType> name
+        m = re.match(
+            r""".*AstTypeMatcher\s*<
                        \s*([^\s>]+)\s*>
-                       \s*([^\s;]+)\s*;\s*$""", declaration, flags=re.X)
-    if m:
-      inner, name = m.groups()
-      add_matcher('Type', name, 'Matcher<%s>...' % inner,
-                  comment, is_dyncast=True)
-      # FIXME: re-enable once we have implemented casting on the TypeLoc
-      # hierarchy.
-      # add_matcher('TypeLoc', '%sLoc' % name, 'Matcher<%sLoc>...' % inner,
-      #             comment, is_dyncast=True)
-      return
-
-    # Parse the various matcher definition macros.
-    m = re.match(""".*AST_TYPE(LOC)?_TRAVERSE_MATCHER(?:_DECL)?\(
+                       \s*([^\s;]+)\s*;\s*$""",
+            declaration,
+            flags=re.X,
+        )
+        if m:
+            inner, name = m.groups()
+            add_matcher(
+                "Type", name, "Matcher<%s>..." % inner, comment, is_dyncast=True
+            )
+            # FIXME: re-enable once we have implemented casting on the TypeLoc
+            # hierarchy.
+            # add_matcher('TypeLoc', '%sLoc' % name, 'Matcher<%sLoc>...' % inner,
+            #             comment, is_dyncast=True)
+            return
+
+        # Parse the various matcher definition macros.
+        m = re.match(
+            """.*AST_TYPE(LOC)?_TRAVERSE_MATCHER(?:_DECL)?\(
                        \s*([^\s,]+\s*),
                        \s*(?:[^\s,]+\s*),
                        \s*AST_POLYMORPHIC_SUPPORTED_TYPES\(([^)]*)\)
-                     \)\s*;\s*$""", declaration, flags=re.X)
-    if m:
-      loc, name, results = m.groups()[0:3]
-      result_types = [r.strip() for r in results.split(',')]
-
-      comment_result_types = extract_result_types(comment)
-      if (comment_result_types and
-          sorted(result_types) != sorted(comment_result_types)):
-        raise Exception('Inconsistent documentation for: %s' % name)
-      for result_type in result_types:
-        add_matcher(result_type, name, 'Matcher<Type>', comment)
-        # if loc:
-        #   add_matcher('%sLoc' % result_type, '%sLoc' % name, 'Matcher<TypeLoc>',
-        #               comment)
-      return
-
-    m = re.match(r"""^\s*AST_POLYMORPHIC_MATCHER(_P)?(.?)(?:_OVERLOAD)?\(
+                     \)\s*;\s*$""",
+            declaration,
+            flags=re.X,
+        )
+        if m:
+            loc, name, results = m.groups()[0:3]
+            result_types = [r.strip() for r in results.split(",")]
+
+            comment_result_types = extract_result_types(comment)
+            if comment_result_types and sorted(result_types) != sorted(
+                comment_result_types
+            ):
+                raise Exception("Inconsistent documentation for: %s" % name)
+            for result_type in result_types:
+                add_matcher(result_type, name, "Matcher<Type>", comment)
+                # if loc:
+                #   add_matcher('%sLoc' % result_type, '%sLoc' % name, 'Matcher<TypeLoc>',
+                #               comment)
+            return
+
+        m = re.match(
+            r"""^\s*AST_POLYMORPHIC_MATCHER(_P)?(.?)(?:_OVERLOAD)?\(
                           \s*([^\s,]+)\s*,
                           \s*AST_POLYMORPHIC_SUPPORTED_TYPES\(([^)]*)\)
                        (?:,\s*([^\s,]+)\s*
@@ -230,45 +263,56 @@ def act_on_decl(declaration, comment, allowed_types):
                        (?:,\s*([^\s,]+)\s*
                           ,\s*([^\s,]+)\s*)?
                        (?:,\s*\d+\s*)?
-                      \)\s*{\s*$""", declaration, flags=re.X)
-
-    if m:
-      p, n, name, results = m.groups()[0:4]
-      args = m.groups()[4:]
-      result_types = [r.strip() for r in results.split(',')]
-      if allowed_types and allowed_types != result_types:
-        raise Exception('Inconsistent documentation for: %s' % name)
-      if n not in ['', '2']:
-        raise Exception('Cannot parse "%s"' % declaration)
-      args = ', '.join('%s %s' % (args[i], args[i+1])
-                       for i in range(0, len(args), 2) if args[i])
-      for result_type in result_types:
-        add_matcher(result_type, name, args, comment)
-      return
-
-    m = re.match(r"""^\s*AST_POLYMORPHIC_MATCHER_REGEX(?:_OVERLOAD)?\(
+                      \)\s*{\s*$""",
+            declaration,
+            flags=re.X,
+        )
+
+        if m:
+            p, n, name, results = m.groups()[0:4]
+            args = m.groups()[4:]
+            result_types = [r.strip() for r in results.split(",")]
+            if allowed_types and allowed_types != result_types:
+                raise Exception("Inconsistent documentation for: %s" % name)
+            if n not in ["", "2"]:
+                raise Exception('Cannot parse "%s"' % declaration)
+            args = ", ".join(
+                "%s %s" % (args[i], args[i + 1])
+                for i in range(0, len(args), 2)
+                if args[i]
+            )
+            for result_type in result_types:
+                add_matcher(result_type, name, args, comment)
+            return
+
+        m = re.match(
+            r"""^\s*AST_POLYMORPHIC_MATCHER_REGEX(?:_OVERLOAD)?\(
                           \s*([^\s,]+)\s*,
                           \s*AST_POLYMORPHIC_SUPPORTED_TYPES\(([^)]*)\),
                           \s*([^\s,]+)\s*
                        (?:,\s*\d+\s*)?
-                      \)\s*{\s*$""", declaration, flags=re.X)
-
-    if m:
-      name, results, arg_name = m.groups()[0:3]
-      result_types = [r.strip() for r in results.split(',')]
-      if allowed_types and allowed_types != result_types:
-        raise Exception('Inconsistent documentation for: %s' % name)
-      arg = "StringRef %s, Regex::RegexFlags Flags = NoFlags" % arg_name
-      comment += """
+                      \)\s*{\s*$""",
+            declaration,
+            flags=re.X,
+        )
+
+        if m:
+            name, results, arg_name = m.groups()[0:3]
+            result_types = [r.strip() for r in results.split(",")]
+            if allowed_types and allowed_types != result_types:
+                raise Exception("Inconsistent documentation for: %s" % name)
+            arg = "StringRef %s, Regex::RegexFlags Flags = NoFlags" % arg_name
+            comment += """
 If the matcher is used in clang-query, RegexFlags parameter
 should be passed as a quoted string. e.g: "NoFlags".
 Flags can be combined with '|' example \"IgnoreCase | BasicRegex\"
 """
-      for result_type in result_types:
-        add_matcher(result_type, name, arg, comment)
-      return
+            for result_type in result_types:
+                add_matcher(result_type, name, arg, comment)
+            return
 
-    m = re.match(r"""^\s*AST_MATCHER_FUNCTION(_P)?(.?)(?:_OVERLOAD)?\(
+        m = re.match(
+            r"""^\s*AST_MATCHER_FUNCTION(_P)?(.?)(?:_OVERLOAD)?\(
                        (?:\s*([^\s,]+)\s*,)?
                           \s*([^\s,]+)\s*
                        (?:,\s*([^\s,]+)\s*
@@ -276,18 +320,25 @@ def act_on_decl(declaration, comment, allowed_types):
                        (?:,\s*([^\s,]+)\s*
                           ,\s*([^\s,]+)\s*)?
                        (?:,\s*\d+\s*)?
-                      \)\s*{\s*$""", declaration, flags=re.X)
-    if m:
-      p, n, result, name = m.groups()[0:4]
-      args = m.groups()[4:]
-      if n not in ['', '2']:
-        raise Exception('Cannot parse "%s"' % declaration)
-      args = ', '.join('%s %s' % (args[i], args[i+1])
-                       for i in range(0, len(args), 2) if args[i])
-      add_matcher(result, name, args, comment)
-      return
-
-    m = re.match(r"""^\s*AST_MATCHER(_P)?(.?)(?:_OVERLOAD)?\(
+                      \)\s*{\s*$""",
+            declaration,
+            flags=re.X,
+        )
+        if m:
+            p, n, result, name = m.groups()[0:4]
+            args = m.groups()[4:]
+            if n not in ["", "2"]:
+                raise Exception('Cannot parse "%s"' % declaration)
+            args = ", ".join(
+                "%s %s" % (args[i], args[i + 1])
+                for i in range(0, len(args), 2)
+                if args[i]
+            )
+            add_matcher(result, name, args, comment)
+            return
+
+        m = re.match(
+            r"""^\s*AST_MATCHER(_P)?(.?)(?:_OVERLOAD)?\(
                        (?:\s*([^\s,]+)\s*,)?
                           \s*([^\s,]+)\s*
                        (?:,\s*([^,]+)\s*
@@ -295,160 +346,197 @@ def act_on_decl(declaration, comment, allowed_types):
                        (?:,\s*([^\s,]+)\s*
                           ,\s*([^\s,]+)\s*)?
                        (?:,\s*\d+\s*)?
-                      \)\s*{""", declaration, flags=re.X)
-    if m:
-      p, n, result, name = m.groups()[0:4]
-      args = m.groups()[4:]
-      if not result:
-        if not allowed_types:
-          raise Exception('Did not find allowed result types for: %s' % name)
-        result_types = allowed_types
-      else:
-        result_types = [result]
-      if n not in ['', '2']:
-        raise Exception('Cannot parse "%s"' % declaration)
-      args = ', '.join('%s %s' % (args[i], args[i+1])
-                       for i in range(0, len(args), 2) if args[i])
-      for result_type in result_types:
-        add_matcher(result_type, name, args, comment)
-      return
-
-    m = re.match(r"""^\s*AST_MATCHER_REGEX(?:_OVERLOAD)?\(
+                      \)\s*{""",
+            declaration,
+            flags=re.X,
+        )
+        if m:
+            p, n, result, name = m.groups()[0:4]
+            args = m.groups()[4:]
+            if not result:
+                if not allowed_types:
+                    raise Exception("Did not find allowed result types for: %s" % name)
+                result_types = allowed_types
+            else:
+                result_types = [result]
+            if n not in ["", "2"]:
+                raise Exception('Cannot parse "%s"' % declaration)
+            args = ", ".join(
+                "%s %s" % (args[i], args[i + 1])
+                for i in range(0, len(args), 2)
+                if args[i]
+            )
+            for result_type in result_types:
+                add_matcher(result_type, name, args, comment)
+            return
+
+        m = re.match(
+            r"""^\s*AST_MATCHER_REGEX(?:_OVERLOAD)?\(
                        \s*([^\s,]+)\s*,
                        \s*([^\s,]+)\s*,
                        \s*([^\s,]+)\s*
                        (?:,\s*\d+\s*)?
-                      \)\s*{""", declaration, flags=re.X)
-    if m:
-      result, name, arg_name = m.groups()[0:3]
-      if not result:
-        if not allowed_types:
-          raise Exception('Did not find allowed result types for: %s' % name)
-        result_types = allowed_types
-      else:
-        result_types = [result]
-      arg = "StringRef %s, Regex::RegexFlags Flags = NoFlags" % arg_name
-      comment += """
+                      \)\s*{""",
+            declaration,
+            flags=re.X,
+        )
+        if m:
+            result, name, arg_name = m.groups()[0:3]
+            if not result:
+                if not allowed_types:
+                    raise Exception("Did not find allowed result types for: %s" % name)
+                result_types = allowed_types
+            else:
+                result_types = [result]
+            arg = "StringRef %s, Regex::RegexFlags Flags = NoFlags" % arg_name
+            comment += """
 If the matcher is used in clang-query, RegexFlags parameter
 should be passed as a quoted string. e.g: "NoFlags".
 Flags can be combined with '|' example \"IgnoreCase | BasicRegex\"
 """
 
-      for result_type in result_types:
-        add_matcher(result_type, name, arg, comment)
-      return
+            for result_type in result_types:
+                add_matcher(result_type, name, arg, comment)
+            return
 
-    # Parse ArgumentAdapting matchers.
-    m = re.match(
-        r"""^.*ArgumentAdaptingMatcherFunc<.*>\s*
+        # Parse ArgumentAdapting matchers.
+        m = re.match(
+            r"""^.*ArgumentAdaptingMatcherFunc<.*>\s*
               ([a-zA-Z]*);$""",
-        declaration, flags=re.X)
-    if m:
-      name = m.groups()[0]
-      add_matcher('*', name, 'Matcher<*>', comment)
-      return
-
-    # Parse Variadic functions.
-    m = re.match(
-        r"""^.*internal::VariadicFunction\s*<\s*([^,]+),\s*([^,]+),\s*[^>]+>\s*
+            declaration,
+            flags=re.X,
+        )
+        if m:
+            name = m.groups()[0]
+            add_matcher("*", name, "Matcher<*>", comment)
+            return
+
+        # Parse Variadic functions.
+        m = re.match(
+            r"""^.*internal::VariadicFunction\s*<\s*([^,]+),\s*([^,]+),\s*[^>]+>\s*
               ([a-zA-Z]*);$""",
-        declaration, flags=re.X)
-    if m:
-      result, arg, name = m.groups()[:3]
-      add_matcher(result, name, '%s, ..., %s' % (arg, arg), comment)
-      return
-
-    m = re.match(
-        r"""^.*internal::VariadicFunction\s*<\s*
+            declaration,
+            flags=re.X,
+        )
+        if m:
+            result, arg, name = m.groups()[:3]
+            add_matcher(result, name, "%s, ..., %s" % (arg, arg), comment)
+            return
+
+        m = re.match(
+            r"""^.*internal::VariadicFunction\s*<\s*
               internal::PolymorphicMatcher<[\S\s]+
               AST_POLYMORPHIC_SUPPORTED_TYPES\(([^)]*)\),\s*(.*);$""",
-        declaration, flags=re.X)
-
-    if m:
-      results, trailing = m.groups()
-      trailing, name = trailing.rsplit(">", 1)
-      name = name.strip()
-      trailing, _ = trailing.rsplit(",", 1)
-      _, arg = trailing.rsplit(",", 1)
-      arg = arg.strip()
-
-      result_types = [r.strip() for r in results.split(',')]
-      for result_type in result_types:
-        add_matcher(result_type, name, '%s, ..., %s' % (arg, arg), comment)
-      return
-      
-
-    # Parse Variadic operator matchers.
-    m = re.match(
-        r"""^.*VariadicOperatorMatcherFunc\s*<\s*([^,]+),\s*([^\s]+)\s*>\s*
+            declaration,
+            flags=re.X,
+        )
+
+        if m:
+            results, trailing = m.groups()
+            trailing, name = trailing.rsplit(">", 1)
+            name = name.strip()
+            trailing, _ = trailing.rsplit(",", 1)
+            _, arg = trailing.rsplit(",", 1)
+            arg = arg.strip()
+
+            result_types = [r.strip() for r in results.split(",")]
+            for result_type in result_types:
+                add_matcher(result_type, name, "%s, ..., %s" % (arg, arg), comment)
+            return
+
+        # Parse Variadic operator matchers.
+        m = re.match(
+            r"""^.*VariadicOperatorMatcherFunc\s*<\s*([^,]+),\s*([^\s]+)\s*>\s*
               ([a-zA-Z]*);$""",
-        declaration, flags=re.X)
-    if m:
-      min_args, max_args, name = m.groups()[:3]
-      if max_args == '1':
-        add_matcher('*', name, 'Matcher<*>', comment)
-        return
-      elif max_args == 'std::numeric_limits<unsigned>::max()':
-        add_matcher('*', name, 'Matcher<*>, ..., Matcher<*>', comment)
-        return
-
-    m = re.match(
-        r"""^.*MapAnyOfMatcher<.*>\s*
+            declaration,
+            flags=re.X,
+        )
+        if m:
+            min_args, max_args, name = m.groups()[:3]
+            if max_args == "1":
+                add_matcher("*", name, "Matcher<*>", comment)
+                return
+            elif max_args == "std::numeric_limits<unsigned>::max()":
+                add_matcher("*", name, "Matcher<*>, ..., Matcher<*>", comment)
+                return
+
+        m = re.match(
+            r"""^.*MapAnyOfMatcher<.*>\s*
               ([a-zA-Z]*);$""",
-        declaration, flags=re.X)
-    if m:
-      name = m.groups()[0]
-      add_matcher('*', name, 'Matcher<*>...Matcher<*>', comment)
-      return
-
-    # Parse free standing matcher functions, like:
-    #   Matcher<ResultType> Name(Matcher<ArgumentType> InnerMatcher) {
-    m = re.match(r"""^\s*(?:template\s+<\s*(?:class|typename)\s+(.+)\s*>\s+)?   
+            declaration,
+            flags=re.X,
+        )
+        if m:
+            name = m.groups()[0]
+            add_matcher("*", name, "Matcher<*>...Matcher<*>", comment)
+            return
+
+        # Parse free standing matcher functions, like:
+        #   Matcher<ResultType> Name(Matcher<ArgumentType> InnerMatcher) {
+        m = re.match(
+            r"""^\s*(?:template\s+<\s*(?:class|typename)\s+(.+)\s*>\s+)?   
                      (.*)\s+
                      ([^\s\(]+)\s*\(
                      (.*)
-                     \)\s*{""", declaration, re.X)
-    if m:
-      template_name, result, name, args = m.groups()
-      if template_name:
-        matcherTemplateArgs = re.findall(r'Matcher<\s*(%s)\s*>' % template_name, args)
-        templateArgs = re.findall(r'(?:^|[\s,<])(%s)(?:$|[\s,>])' % template_name, args)
-        if len(matcherTemplateArgs) < len(templateArgs):
-          # The template name is used naked, so don't replace with `*`` later on
-          template_name = None
-        else :
-          args = re.sub(r'(^|[\s,<])%s($|[\s,>])' % template_name, r'\1*\2', args)
-      args = ', '.join(p.strip() for p in args.split(','))
-      m = re.match(r'(?:^|.*\s+)internal::(?:Bindable)?Matcher<([^>]+)>$', result)
-      if m:
-        result_types = [m.group(1)]
-        if template_name and len(result_types) == 1 and result_types[0] == template_name:
-          result_types = ['*']
-      else:
-        result_types = extract_result_types(comment)
-      if not result_types:
-        if not comment:
-          # Only overloads don't have their own doxygen comments; ignore those.
-          print('Ignoring "%s"' % name)
+                     \)\s*{""",
+            declaration,
+            re.X,
+        )
+        if m:
+            template_name, result, name, args = m.groups()
+            if template_name:
+                matcherTemplateArgs = re.findall(
+                    r"Matcher<\s*(%s)\s*>" % template_name, args
+                )
+                templateArgs = re.findall(
+                    r"(?:^|[\s,<])(%s)(?:$|[\s,>])" % template_name, args
+                )
+                if len(matcherTemplateArgs) < len(templateArgs):
+                    # The template name is used naked, so don't replace with `*`` later on
+                    template_name = None
+                else:
+                    args = re.sub(
+                        r"(^|[\s,<])%s($|[\s,>])" % template_name, r"\1*\2", args
+                    )
+            args = ", ".join(p.strip() for p in args.split(","))
+            m = re.match(r"(?:^|.*\s+)internal::(?:Bindable)?Matcher<([^>]+)>$", result)
+            if m:
+                result_types = [m.group(1)]
+                if (
+                    template_name
+                    and len(result_types) == 1
+                    and result_types[0] == template_name
+                ):
+                    result_types = ["*"]
+            else:
+                result_types = extract_result_types(comment)
+            if not result_types:
+                if not comment:
+                    # Only overloads don't have their own doxygen comments; ignore those.
+                    print('Ignoring "%s"' % name)
+                else:
+                    print('Cannot determine result type for "%s"' % name)
+            else:
+                for result_type in result_types:
+                    add_matcher(result_type, name, args, comment)
         else:
-          print('Cannot determine result type for "%s"' % name)
-      else:
-        for result_type in result_types:
-          add_matcher(result_type, name, args, comment)
-    else:
-      print('*** Unparsable: "' + declaration + '" ***')
+            print('*** Unparsable: "' + declaration + '" ***')
+
 
 def sort_table(matcher_type, matcher_map):
-  """Returns the sorted html table for the given row map."""
-  table = ''
-  for key in sorted(matcher_map.keys()):
-    table += matcher_map[key] + '\n'
-  return ('<!-- START_%(type)s_MATCHERS -->\n' +
-          '%(table)s' + 
-          '<!--END_%(type)s_MATCHERS -->') % {
-    'type': matcher_type,
-    'table': table,
-  }
+    """Returns the sorted html table for the given row map."""
+    table = ""
+    for key in sorted(matcher_map.keys()):
+        table += matcher_map[key] + "\n"
+    return (
+        "<!-- START_%(type)s_MATCHERS -->\n"
+        + "%(table)s"
+        + "<!--END_%(type)s_MATCHERS -->"
+    ) % {
+        "type": matcher_type,
+        "table": table,
+    }
+
 
 # Parse the ast matchers.
 # We alternate between two modes:
@@ -457,51 +545,64 @@ def sort_table(matcher_type, matcher_map):
 #   definition might contain static asserts that specify the result
 #   type.
 # body = False: We parse the comments and declaration of the matcher.
-comment = ''
-declaration = ''
+comment = ""
+declaration = ""
 allowed_types = []
 body = False
 for line in open(MATCHERS_FILE).read().splitlines():
-  if body:
-    if line.strip() and line[0] == '}':
-      if declaration:
-        act_on_decl(declaration, comment, allowed_types)
-        comment = ''
-        declaration = ''
-        allowed_types = []
-      body = False
+    if body:
+        if line.strip() and line[0] == "}":
+            if declaration:
+                act_on_decl(declaration, comment, allowed_types)
+                comment = ""
+                declaration = ""
+                allowed_types = []
+            body = False
+        else:
+            m = re.search(r"is_base_of<([^,]+), NodeType>", line)
+            if m and m.group(1):
+                allowed_types += [m.group(1)]
+        continue
+    if line.strip() and line.lstrip()[0] == "/":
+        comment += re.sub(r"^/+\s?", "", line) + "\n"
     else:
-      m = re.search(r'is_base_of<([^,]+), NodeType>', line)
-      if m and m.group(1):
-        allowed_types += [m.group(1)]
-    continue
-  if line.strip() and line.lstrip()[0] == '/':
-    comment += re.sub(r'^/+\s?', '', line) + '\n'
-  else:
-    declaration += ' ' + line
-    if ((not line.strip()) or 
-        line.rstrip()[-1] == ';' or
-        (line.rstrip()[-1] == '{' and line.rstrip()[-3:] != '= {')):
-      if line.strip() and line.rstrip()[-1] == '{':
-        body = True
-      else:
-        act_on_decl(declaration, comment, allowed_types)
-        comment = ''
-        declaration = ''
-        allowed_types = []
-
-node_matcher_table = sort_table('DECL', node_matchers)
-narrowing_matcher_table = sort_table('NARROWING', narrowing_matchers)
-traversal_matcher_table = sort_table('TRAVERSAL', traversal_matchers)
-
-reference = open('../LibASTMatchersReference.html').read()
-reference = re.sub(r'<!-- START_DECL_MATCHERS.*END_DECL_MATCHERS -->',
-                   node_matcher_table, reference, flags=re.S)
-reference = re.sub(r'<!-- START_NARROWING_MATCHERS.*END_NARROWING_MATCHERS -->',
-                   narrowing_matcher_table, reference, flags=re.S)
-reference = re.sub(r'<!-- START_TRAVERSAL_MATCHERS.*END_TRAVERSAL_MATCHERS -->',
-                   traversal_matcher_table, reference, flags=re.S)
-
-with open('../LibASTMatchersReference.html', 'w', newline='\n') as output:
-  output.write(reference)
-
+        declaration += " " + line
+        if (
+            (not line.strip())
+            or line.rstrip()[-1] == ";"
+            or (line.rstrip()[-1] == "{" and line.rstrip()[-3:] != "= {")
+        ):
+            if line.strip() and line.rstrip()[-1] == "{":
+                body = True
+            else:
+                act_on_decl(declaration, comment, allowed_types)
+                comment = ""
+                declaration = ""
+                allowed_types = []
+
+node_matcher_table = sort_table("DECL", node_matchers)
+narrowing_matcher_table = sort_table("NARROWING", narrowing_matchers)
+traversal_matcher_table = sort_table("TRAVERSAL", traversal_matchers)
+
+reference = open("../LibASTMatchersReference.html").read()
+reference = re.sub(
+    r"<!-- START_DECL_MATCHERS.*END_DECL_MATCHERS -->",
+    node_matcher_table,
+    reference,
+    flags=re.S,
+)
+reference = re.sub(
+    r"<!-- START_NARROWING_MATCHERS.*END_NARROWING_MATCHERS -->",
+    narrowing_matcher_table,
+    reference,
+    flags=re.S,
+)
+reference = re.sub(
+    r"<!-- START_TRAVERSAL_MATCHERS.*END_TRAVERSAL_MATCHERS -->",
+    traversal_matcher_table,
+    reference,
+    flags=re.S,
+)
+
+with open("../LibASTMatchersReference.html", "w", newline="\n") as output:
+    output.write(reference)

diff  --git a/clang/docs/tools/dump_format_help.py b/clang/docs/tools/dump_format_help.py
index 68869d91056ce..041bee7806280 100755
--- a/clang/docs/tools/dump_format_help.py
+++ b/clang/docs/tools/dump_format_help.py
@@ -7,28 +7,27 @@
 import subprocess
 import sys
 
-CLANG_DIR = os.path.join(os.path.dirname(__file__), '../..')
-DOC_FILE = os.path.join(CLANG_DIR, 'docs/ClangFormat.rst')
+CLANG_DIR = os.path.join(os.path.dirname(__file__), "../..")
+DOC_FILE = os.path.join(CLANG_DIR, "docs/ClangFormat.rst")
 
 
 def substitute(text, tag, contents):
-    replacement = '\n.. START_%s\n\n%s\n\n.. END_%s\n' % (tag, contents, tag)
-    pattern = r'\n\.\. START_%s\n.*\n\.\. END_%s\n' % (tag, tag)
-    return re.sub(pattern, '%s', text, flags=re.S) % replacement
+    replacement = "\n.. START_%s\n\n%s\n\n.. END_%s\n" % (tag, contents, tag)
+    pattern = r"\n\.\. START_%s\n.*\n\.\. END_%s\n" % (tag, tag)
+    return re.sub(pattern, "%s", text, flags=re.S) % replacement
 
 
 def indent(text, columns, indent_first_line=True):
-    indent_str = ' ' * columns
-    s = re.sub(r'\n([^\n])', '\n' + indent_str + '\\1', text, flags=re.S)
-    if not indent_first_line or s.startswith('\n'):
+    indent_str = " " * columns
+    s = re.sub(r"\n([^\n])", "\n" + indent_str + "\\1", text, flags=re.S)
+    if not indent_first_line or s.startswith("\n"):
         return s
     return indent_str + s
 
 
 def get_help_output():
     args = ["clang-format", "--help"]
-    cmd = subprocess.Popen(args, stdout=subprocess.PIPE,
-                           stderr=subprocess.STDOUT)
+    cmd = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
     out, _ = cmd.communicate()
     out = out.decode(sys.stdout.encoding)
     return out
@@ -36,12 +35,15 @@ def get_help_output():
 
 def get_help_text():
     out = get_help_output()
-    out = re.sub(r' clang-format\.exe ', ' clang-format ', out)
+    out = re.sub(r" clang-format\.exe ", " clang-format ", out)
 
-    out = '''.. code-block:: console
+    out = (
+        """.. code-block:: console
 
 $ clang-format -help
-''' + out
+"""
+        + out
+    )
     out = indent(out, 2, indent_first_line=False)
     return out
 
@@ -49,7 +51,7 @@ def get_help_text():
 def validate(text, columns):
     for line in text.splitlines():
         if len(line) > columns:
-            print('warning: line too long:\n', line, file=sys.stderr)
+            print("warning: line too long:\n", line, file=sys.stderr)
 
 
 help_text = get_help_text()
@@ -58,7 +60,7 @@ def validate(text, columns):
 with open(DOC_FILE) as f:
     contents = f.read()
 
-contents = substitute(contents, 'FORMAT_HELP', help_text)
+contents = substitute(contents, "FORMAT_HELP", help_text)
 
-with open(DOC_FILE, 'wb') as output:
+with open(DOC_FILE, "wb") as output:
     output.write(contents.encode())

diff  --git a/clang/docs/tools/dump_format_style.py b/clang/docs/tools/dump_format_style.py
index 7b032894ce62c..e531032e26587 100755
--- a/clang/docs/tools/dump_format_style.py
+++ b/clang/docs/tools/dump_format_style.py
@@ -10,354 +10,412 @@
 from io import TextIOWrapper
 from typing import Set
 
-CLANG_DIR = os.path.join(os.path.dirname(__file__), '../..')
-FORMAT_STYLE_FILE = os.path.join(CLANG_DIR, 'include/clang/Format/Format.h')
-INCLUDE_STYLE_FILE = os.path.join(CLANG_DIR, 'include/clang/Tooling/Inclusions/IncludeStyle.h')
-DOC_FILE = os.path.join(CLANG_DIR, 'docs/ClangFormatStyleOptions.rst')
+CLANG_DIR = os.path.join(os.path.dirname(__file__), "../..")
+FORMAT_STYLE_FILE = os.path.join(CLANG_DIR, "include/clang/Format/Format.h")
+INCLUDE_STYLE_FILE = os.path.join(
+    CLANG_DIR, "include/clang/Tooling/Inclusions/IncludeStyle.h"
+)
+DOC_FILE = os.path.join(CLANG_DIR, "docs/ClangFormatStyleOptions.rst")
 
-PLURALS_FILE = os.path.join(os.path.dirname(__file__), 'plurals.txt')
+PLURALS_FILE = os.path.join(os.path.dirname(__file__), "plurals.txt")
 
 plurals: Set[str] = set()
-with open(PLURALS_FILE, 'a+') as f:
-  f.seek(0)
-  plurals = set(f.read().splitlines())
+with open(PLURALS_FILE, "a+") as f:
+    f.seek(0)
+    plurals = set(f.read().splitlines())
+
 
 def substitute(text, tag, contents):
-  replacement = '\n.. START_%s\n\n%s\n\n.. END_%s\n' % (tag, contents, tag)
-  pattern = r'\n\.\. START_%s\n.*\n\.\. END_%s\n' % (tag, tag)
-  return re.sub(pattern, '%s', text, flags=re.S) % replacement
+    replacement = "\n.. START_%s\n\n%s\n\n.. END_%s\n" % (tag, contents, tag)
+    pattern = r"\n\.\. START_%s\n.*\n\.\. END_%s\n" % (tag, tag)
+    return re.sub(pattern, "%s", text, flags=re.S) % replacement
+
 
 def register_plural(singular: str, plural: str):
-  if plural not in plurals:
-    if not hasattr(register_plural, "generated_new_plural"):
-      print('Plural generation: you can use '
-      f'`git checkout -- {os.path.relpath(PLURALS_FILE)}` '
-      'to reemit warnings or `git add` to include new plurals\n')
-    register_plural.generated_new_plural = True
-
-    plurals.add(plural)
-    with open(PLURALS_FILE, 'a') as f:
-      f.write(plural + '\n')
-    cf = inspect.currentframe()
-    lineno = ''
-    if cf and cf.f_back:
-      lineno = ':' + str(cf.f_back.f_lineno)
-    print(f'{__file__}{lineno} check if plural of {singular} is {plural}', file=sys.stderr)
-  return plural
+    if plural not in plurals:
+        if not hasattr(register_plural, "generated_new_plural"):
+            print(
+                "Plural generation: you can use "
+                f"`git checkout -- {os.path.relpath(PLURALS_FILE)}` "
+                "to reemit warnings or `git add` to include new plurals\n"
+            )
+        register_plural.generated_new_plural = True
+
+        plurals.add(plural)
+        with open(PLURALS_FILE, "a") as f:
+            f.write(plural + "\n")
+        cf = inspect.currentframe()
+        lineno = ""
+        if cf and cf.f_back:
+            lineno = ":" + str(cf.f_back.f_lineno)
+        print(
+            f"{__file__}{lineno} check if plural of {singular} is {plural}",
+            file=sys.stderr,
+        )
+    return plural
+
 
 def pluralize(word: str):
-  lword = word.lower()
-  if len(lword) >= 2 and lword[-1] == 'y' and lword[-2] not in 'aeiou':
-    return register_plural(word, word[:-1] + 'ies')
-  elif lword.endswith(('s', 'sh', 'ch', 'x', 'z')):
-    return register_plural(word, word[:-1] + 'es')
-  elif lword.endswith('fe'):
-    return register_plural(word, word[:-2] + 'ves')
-  elif lword.endswith('f') and not lword.endswith('ff'):
-    return register_plural(word, word[:-1] + 'ves')
-  else:
-    return register_plural(word, word + 's')
+    lword = word.lower()
+    if len(lword) >= 2 and lword[-1] == "y" and lword[-2] not in "aeiou":
+        return register_plural(word, word[:-1] + "ies")
+    elif lword.endswith(("s", "sh", "ch", "x", "z")):
+        return register_plural(word, word[:-1] + "es")
+    elif lword.endswith("fe"):
+        return register_plural(word, word[:-2] + "ves")
+    elif lword.endswith("f") and not lword.endswith("ff"):
+        return register_plural(word, word[:-1] + "ves")
+    else:
+        return register_plural(word, word + "s")
 
 
 def to_yaml_type(typestr: str):
-  if typestr == 'bool':
-    return 'Boolean'
-  elif typestr == 'int':
-    return 'Integer'
-  elif typestr == 'unsigned':
-    return 'Unsigned'
-  elif typestr == 'std::string':
-    return 'String'
+    if typestr == "bool":
+        return "Boolean"
+    elif typestr == "int":
+        return "Integer"
+    elif typestr == "unsigned":
+        return "Unsigned"
+    elif typestr == "std::string":
+        return "String"
+
+    match = re.match(r"std::vector<(.*)>$", typestr)
+    if match:
+        return "List of " + pluralize(to_yaml_type(match.group(1)))
 
-  match = re.match(r'std::vector<(.*)>$', typestr)
-  if match:
-    return 'List of ' + pluralize(to_yaml_type(match.group(1)))
+    match = re.match(r"std::optional<(.*)>$", typestr)
+    if match:
+        return to_yaml_type(match.group(1))
 
-  match = re.match(r'std::optional<(.*)>$', typestr)
-  if match:
-    return to_yaml_type(match.group(1))
+    return typestr
 
-  return typestr
 
 def doxygen2rst(text):
-  text = re.sub(r'<tt>\s*(.*?)\s*<\/tt>', r'``\1``', text)
-  text = re.sub(r'\\c ([^ ,;\.]+)', r'``\1``', text)
-  text = re.sub(r'\\\w+ ', '', text)
-  return text
+    text = re.sub(r"<tt>\s*(.*?)\s*<\/tt>", r"``\1``", text)
+    text = re.sub(r"\\c ([^ ,;\.]+)", r"``\1``", text)
+    text = re.sub(r"\\\w+ ", "", text)
+    return text
+
 
 def indent(text, columns, indent_first_line=True):
-  indent_str = ' ' * columns
-  s = re.sub(r'\n([^\n])', '\n' + indent_str + '\\1', text, flags=re.S)
-  if not indent_first_line or s.startswith('\n'):
-    return s
-  return indent_str + s
+    indent_str = " " * columns
+    s = re.sub(r"\n([^\n])", "\n" + indent_str + "\\1", text, flags=re.S)
+    if not indent_first_line or s.startswith("\n"):
+        return s
+    return indent_str + s
+
 
 class Option(object):
-  def __init__(self, name, opt_type, comment, version):
-    self.name = name
-    self.type = opt_type
-    self.comment = comment.strip()
-    self.enum = None
-    self.nested_struct = None
-    self.version = version
-
-  def __str__(self):
-    s = ".. _%s:\n\n**%s** (``%s``) " % (self.name, self.name, to_yaml_type(self.type))
-    if self.version:
-      s += ':versionbadge:`clang-format %s` ' % self.version
-    s += ':ref:`¶ <%s>`\n%s' % (self.name, doxygen2rst(indent(self.comment, 2)))
-    if self.enum and self.enum.values:
-      s += indent('\n\nPossible values:\n\n%s\n' % self.enum, 2)
-    if self.nested_struct:
-      s += indent('\n\nNested configuration flags:\n\n%s\n' %self.nested_struct,
-                  2)
-    return s
+    def __init__(self, name, opt_type, comment, version):
+        self.name = name
+        self.type = opt_type
+        self.comment = comment.strip()
+        self.enum = None
+        self.nested_struct = None
+        self.version = version
+
+    def __str__(self):
+        s = ".. _%s:\n\n**%s** (``%s``) " % (
+            self.name,
+            self.name,
+            to_yaml_type(self.type),
+        )
+        if self.version:
+            s += ":versionbadge:`clang-format %s` " % self.version
+        s += ":ref:`¶ <%s>`\n%s" % (self.name, doxygen2rst(indent(self.comment, 2)))
+        if self.enum and self.enum.values:
+            s += indent("\n\nPossible values:\n\n%s\n" % self.enum, 2)
+        if self.nested_struct:
+            s += indent(
+                "\n\nNested configuration flags:\n\n%s\n" % self.nested_struct, 2
+            )
+        return s
+
 
 class NestedStruct(object):
-  def __init__(self, name, comment):
-    self.name = name
-    self.comment = comment.strip()
-    self.values = []
+    def __init__(self, name, comment):
+        self.name = name
+        self.comment = comment.strip()
+        self.values = []
+
+    def __str__(self):
+        return self.comment + "\n" + "\n".join(map(str, self.values))
 
-  def __str__(self):
-    return self.comment + '\n' + '\n'.join(map(str, self.values))
 
 class NestedField(object):
-  def __init__(self, name, comment):
-    self.name = name
-    self.comment = comment.strip()
+    def __init__(self, name, comment):
+        self.name = name
+        self.comment = comment.strip()
+
+    def __str__(self):
+        return "\n* ``%s`` %s" % (
+            self.name,
+            doxygen2rst(indent(self.comment, 2, indent_first_line=False)),
+        )
 
-  def __str__(self):
-    return '\n* ``%s`` %s' % (
-        self.name,
-        doxygen2rst(indent(self.comment, 2, indent_first_line=False)))
 
 class Enum(object):
-  def __init__(self, name, comment):
-    self.name = name
-    self.comment = comment.strip()
-    self.values = []
+    def __init__(self, name, comment):
+        self.name = name
+        self.comment = comment.strip()
+        self.values = []
+
+    def __str__(self):
+        return "\n".join(map(str, self.values))
 
-  def __str__(self):
-    return '\n'.join(map(str, self.values))
 
 class NestedEnum(object):
-  def __init__(self, name, enumtype, comment, values):
-    self.name = name
-    self.comment = comment
-    self.values = values
-    self.type = enumtype
-
-  def __str__(self):
-    s = '\n* ``%s %s``\n%s' % (to_yaml_type(self.type), self.name,
-                                 doxygen2rst(indent(self.comment, 2)))
-    s += indent('\nPossible values:\n\n', 2)
-    s += indent('\n'.join(map(str, self.values)), 2)
-    return s
+    def __init__(self, name, enumtype, comment, values):
+        self.name = name
+        self.comment = comment
+        self.values = values
+        self.type = enumtype
+
+    def __str__(self):
+        s = "\n* ``%s %s``\n%s" % (
+            to_yaml_type(self.type),
+            self.name,
+            doxygen2rst(indent(self.comment, 2)),
+        )
+        s += indent("\nPossible values:\n\n", 2)
+        s += indent("\n".join(map(str, self.values)), 2)
+        return s
+
 
 class EnumValue(object):
-  def __init__(self, name, comment, config):
-    self.name = name
-    self.comment = comment
-    self.config = config
+    def __init__(self, name, comment, config):
+        self.name = name
+        self.comment = comment
+        self.config = config
 
-  def __str__(self):
-    return '* ``%s`` (in configuration: ``%s``)\n%s' % (
-        self.name,
-        re.sub('.*_', '', self.config),
-        doxygen2rst(indent(self.comment, 2)))
+    def __str__(self):
+        return "* ``%s`` (in configuration: ``%s``)\n%s" % (
+            self.name,
+            re.sub(".*_", "", self.config),
+            doxygen2rst(indent(self.comment, 2)),
+        )
 
 
 class OptionsReader:
-  def __init__(self, header: TextIOWrapper):
-    self.header = header
-    self.in_code_block = False
-    self.code_indent = 0
-    self.lineno = 0
-    self.last_err_lineno = -1
+    def __init__(self, header: TextIOWrapper):
+        self.header = header
+        self.in_code_block = False
+        self.code_indent = 0
+        self.lineno = 0
+        self.last_err_lineno = -1
 
-  def __file_path(self):
-    return os.path.relpath(self.header.name)
+    def __file_path(self):
+        return os.path.relpath(self.header.name)
 
-  def __print_line(self, line: str):
-    print(f'{self.lineno:>6} | {line}', file=sys.stderr)
+    def __print_line(self, line: str):
+        print(f"{self.lineno:>6} | {line}", file=sys.stderr)
 
-  def __warning(self, msg: str, line: str):
-    print(f'{self.__file_path()}:{self.lineno}: warning: {msg}:', file=sys.stderr)
-    self.__print_line(line)
-
-  def __clean_comment_line(self, line: str):
-    match = re.match(r'^/// (?P<indent> +)?\\code(\{.(?P<lang>\w+)\})?$', line)
-    if match:
-      if self.in_code_block:
-        self.__warning('`\\code` in another `\\code`', line)
-      self.in_code_block = True
-      indent_str = match.group('indent')
-      if not indent_str:
-        indent_str = ''
-      self.code_indent = len(indent_str)
-      lang = match.group('lang')
-      if not lang:
-        lang = 'c++'
-      return f'\n{indent_str}.. code-block:: {lang}\n\n'
-
-    endcode_match = re.match(r'^/// +\\endcode$', line)
-    if endcode_match:
-      if not self.in_code_block:
-        self.__warning('no correct `\\code` found before this `\\endcode`', line)
-      self.in_code_block = False
-      return ''
-
-    # check code block indentation
-    if (self.in_code_block and not line == '///' and not
-        line.startswith('///  ' + ' ' * self.code_indent)):
-      if self.last_err_lineno == self.lineno - 1:
+    def __warning(self, msg: str, line: str):
+        print(f"{self.__file_path()}:{self.lineno}: warning: {msg}:", file=sys.stderr)
         self.__print_line(line)
-      else:
-        self.__warning('code block should be indented', line)
-      self.last_err_lineno = self.lineno
 
-    match = re.match(r'^/// \\warning$', line)
-    if match:
-      return '\n.. warning:: \n\n'
-
-    endwarning_match = re.match(r'^/// +\\endwarning$', line)
-    if endwarning_match:
-      return ''
-    return line[4:] + '\n'
-
-  def read_options(self):
-    class State:
-      BeforeStruct, Finished, InStruct, InNestedStruct, InNestedFieldComment, \
-        InFieldComment, InEnum, InEnumMemberComment = range(8)
-    state = State.BeforeStruct
-
-    options = []
-    enums = {}
-    nested_structs = {}
-    comment = ''
-    enum = None
-    nested_struct = None
-    version = None
-
-    for line in self.header:
-      self.lineno += 1
-      line = line.strip()
-      if state == State.BeforeStruct:
-        if line in ('struct FormatStyle {', 'struct IncludeStyle {'):
-          state = State.InStruct
-      elif state == State.InStruct:
-        if line.startswith('///'):
-          state = State.InFieldComment
-          comment = self.__clean_comment_line(line)
-        elif line == '};':
-          state = State.Finished
-          break
-      elif state == State.InFieldComment:
-        if line.startswith(r'/// \version'):
-          match = re.match(r'/// \\version\s*(?P<version>[0-9.]+)*', line)
-          if match:
-            version = match.group('version')
-        elif line.startswith('///'):
-          comment += self.__clean_comment_line(line)
-        elif line.startswith('enum'):
-          state = State.InEnum
-          name = re.sub(r'enum\s+(\w+)\s*(:((\s*\w+)+)\s*)?\{', '\\1', line)
-          enum = Enum(name, comment)
-        elif line.startswith('struct'):
-          state = State.InNestedStruct
-          name = re.sub(r'struct\s+(\w+)\s*\{', '\\1', line)
-          nested_struct = NestedStruct(name, comment)
-        elif line.endswith(';'):
-          prefix = '// '
-          if line.startswith(prefix):
-            line = line[len(prefix):]
-          state = State.InStruct
-          field_type, field_name = re.match(r'([<>:\w(,\s)]+)\s+(\w+);',
-                                            line).groups()
-
-          if not version:
-            self.__warning(f'missing version for {field_name}', line)
-          option = Option(str(field_name), str(field_type), comment, version)
-          options.append(option)
-          version = None
-        else:
-          raise Exception('Invalid format, expected comment, field or enum\n' + line)
-      elif state == State.InNestedStruct:
-        if line.startswith('///'):
-          state = State.InNestedFieldComment
-          comment = self.__clean_comment_line(line)
-        elif line == '};':
-          state = State.InStruct
-          nested_structs[nested_struct.name] = nested_struct
-      elif state == State.InNestedFieldComment:
-        if line.startswith('///'):
-          comment += self.__clean_comment_line(line)
-        else:
-          state = State.InNestedStruct
-          field_type, field_name = re.match(r'([<>:\w(,\s)]+)\s+(\w+);', line).groups()
-          if field_type in enums:
-            nested_struct.values.append(NestedEnum(field_name,
-                                                   field_type,
-                                                   comment,
-                                                   enums[field_type].values))
-          else:
-            nested_struct.values.append(NestedField(field_type + " " + field_name, comment))
-
-      elif state == State.InEnum:
-        if line.startswith('///'):
-          state = State.InEnumMemberComment
-          comment = self.__clean_comment_line(line)
-        elif line == '};':
-          state = State.InStruct
-          enums[enum.name] = enum
-        else:
-          # Enum member without documentation. Must be documented where the enum
-          # is used.
-          pass
-      elif state == State.InEnumMemberComment:
-        if line.startswith('///'):
-          comment += self.__clean_comment_line(line)
-        else:
-          state = State.InEnum
-          val = line.replace(',', '')
-          pos = val.find(" // ")
-          if pos != -1:
-            config = val[pos + 4:]
-            val = val[:pos]
-          else:
-            config = val
-          enum.values.append(EnumValue(val, comment, config))
-    if state != State.Finished:
-      raise Exception('Not finished by the end of file')
-
-    for option in options:
-      if option.type not in ['bool', 'unsigned', 'int', 'std::string',
-                             'std::vector<std::string>',
-                             'std::vector<IncludeCategory>',
-                             'std::vector<RawStringFormat>',
-                             'std::optional<unsigned>']:
-        if option.type in enums:
-          option.enum = enums[option.type]
-        elif option.type in nested_structs:
-          option.nested_struct = nested_structs[option.type]
-        else:
-          raise Exception('Unknown type: %s' % option.type)
-    return options
+    def __clean_comment_line(self, line: str):
+        match = re.match(r"^/// (?P<indent> +)?\\code(\{.(?P<lang>\w+)\})?$", line)
+        if match:
+            if self.in_code_block:
+                self.__warning("`\\code` in another `\\code`", line)
+            self.in_code_block = True
+            indent_str = match.group("indent")
+            if not indent_str:
+                indent_str = ""
+            self.code_indent = len(indent_str)
+            lang = match.group("lang")
+            if not lang:
+                lang = "c++"
+            return f"\n{indent_str}.. code-block:: {lang}\n\n"
+
+        endcode_match = re.match(r"^/// +\\endcode$", line)
+        if endcode_match:
+            if not self.in_code_block:
+                self.__warning(
+                    "no correct `\\code` found before this `\\endcode`", line
+                )
+            self.in_code_block = False
+            return ""
+
+        # check code block indentation
+        if (
+            self.in_code_block
+            and not line == "///"
+            and not line.startswith("///  " + " " * self.code_indent)
+        ):
+            if self.last_err_lineno == self.lineno - 1:
+                self.__print_line(line)
+            else:
+                self.__warning("code block should be indented", line)
+            self.last_err_lineno = self.lineno
+
+        match = re.match(r"^/// \\warning$", line)
+        if match:
+            return "\n.. warning:: \n\n"
+
+        endwarning_match = re.match(r"^/// +\\endwarning$", line)
+        if endwarning_match:
+            return ""
+        return line[4:] + "\n"
+
+    def read_options(self):
+        class State:
+            (
+                BeforeStruct,
+                Finished,
+                InStruct,
+                InNestedStruct,
+                InNestedFieldComment,
+                InFieldComment,
+                InEnum,
+                InEnumMemberComment,
+            ) = range(8)
+
+        state = State.BeforeStruct
+
+        options = []
+        enums = {}
+        nested_structs = {}
+        comment = ""
+        enum = None
+        nested_struct = None
+        version = None
+
+        for line in self.header:
+            self.lineno += 1
+            line = line.strip()
+            if state == State.BeforeStruct:
+                if line in ("struct FormatStyle {", "struct IncludeStyle {"):
+                    state = State.InStruct
+            elif state == State.InStruct:
+                if line.startswith("///"):
+                    state = State.InFieldComment
+                    comment = self.__clean_comment_line(line)
+                elif line == "};":
+                    state = State.Finished
+                    break
+            elif state == State.InFieldComment:
+                if line.startswith(r"/// \version"):
+                    match = re.match(r"/// \\version\s*(?P<version>[0-9.]+)*", line)
+                    if match:
+                        version = match.group("version")
+                elif line.startswith("///"):
+                    comment += self.__clean_comment_line(line)
+                elif line.startswith("enum"):
+                    state = State.InEnum
+                    name = re.sub(r"enum\s+(\w+)\s*(:((\s*\w+)+)\s*)?\{", "\\1", line)
+                    enum = Enum(name, comment)
+                elif line.startswith("struct"):
+                    state = State.InNestedStruct
+                    name = re.sub(r"struct\s+(\w+)\s*\{", "\\1", line)
+                    nested_struct = NestedStruct(name, comment)
+                elif line.endswith(";"):
+                    prefix = "// "
+                    if line.startswith(prefix):
+                        line = line[len(prefix) :]
+                    state = State.InStruct
+                    field_type, field_name = re.match(
+                        r"([<>:\w(,\s)]+)\s+(\w+);", line
+                    ).groups()
+
+                    if not version:
+                        self.__warning(f"missing version for {field_name}", line)
+                    option = Option(str(field_name), str(field_type), comment, version)
+                    options.append(option)
+                    version = None
+                else:
+                    raise Exception(
+                        "Invalid format, expected comment, field or enum\n" + line
+                    )
+            elif state == State.InNestedStruct:
+                if line.startswith("///"):
+                    state = State.InNestedFieldComment
+                    comment = self.__clean_comment_line(line)
+                elif line == "};":
+                    state = State.InStruct
+                    nested_structs[nested_struct.name] = nested_struct
+            elif state == State.InNestedFieldComment:
+                if line.startswith("///"):
+                    comment += self.__clean_comment_line(line)
+                else:
+                    state = State.InNestedStruct
+                    field_type, field_name = re.match(
+                        r"([<>:\w(,\s)]+)\s+(\w+);", line
+                    ).groups()
+                    if field_type in enums:
+                        nested_struct.values.append(
+                            NestedEnum(
+                                field_name,
+                                field_type,
+                                comment,
+                                enums[field_type].values,
+                            )
+                        )
+                    else:
+                        nested_struct.values.append(
+                            NestedField(field_type + " " + field_name, comment)
+                        )
+
+            elif state == State.InEnum:
+                if line.startswith("///"):
+                    state = State.InEnumMemberComment
+                    comment = self.__clean_comment_line(line)
+                elif line == "};":
+                    state = State.InStruct
+                    enums[enum.name] = enum
+                else:
+                    # Enum member without documentation. Must be documented where the enum
+                    # is used.
+                    pass
+            elif state == State.InEnumMemberComment:
+                if line.startswith("///"):
+                    comment += self.__clean_comment_line(line)
+                else:
+                    state = State.InEnum
+                    val = line.replace(",", "")
+                    pos = val.find(" // ")
+                    if pos != -1:
+                        config = val[pos + 4 :]
+                        val = val[:pos]
+                    else:
+                        config = val
+                    enum.values.append(EnumValue(val, comment, config))
+        if state != State.Finished:
+            raise Exception("Not finished by the end of file")
+
+        for option in options:
+            if option.type not in [
+                "bool",
+                "unsigned",
+                "int",
+                "std::string",
+                "std::vector<std::string>",
+                "std::vector<IncludeCategory>",
+                "std::vector<RawStringFormat>",
+                "std::optional<unsigned>",
+            ]:
+                if option.type in enums:
+                    option.enum = enums[option.type]
+                elif option.type in nested_structs:
+                    option.nested_struct = nested_structs[option.type]
+                else:
+                    raise Exception("Unknown type: %s" % option.type)
+        return options
 
 
 with open(FORMAT_STYLE_FILE) as f:
-  opts = OptionsReader(f).read_options()
+    opts = OptionsReader(f).read_options()
 with open(INCLUDE_STYLE_FILE) as f:
-  opts += OptionsReader(f).read_options()
+    opts += OptionsReader(f).read_options()
 
 opts = sorted(opts, key=lambda x: x.name)
-options_text = '\n\n'.join(map(str, opts))
+options_text = "\n\n".join(map(str, opts))
 
 with open(DOC_FILE) as f:
-  contents = f.read()
+    contents = f.read()
 
-contents = substitute(contents, 'FORMAT_STYLE_OPTIONS', options_text)
+contents = substitute(contents, "FORMAT_STYLE_OPTIONS", options_text)
 
-with open(DOC_FILE, 'wb') as output:
-  output.write(contents.encode())
+with open(DOC_FILE, "wb") as output:
+    output.write(contents.encode())

diff  --git a/clang/docs/tools/generate_formatted_state.py b/clang/docs/tools/generate_formatted_state.py
index 86ccd65487cb4..66cebbf7af33a 100755
--- a/clang/docs/tools/generate_formatted_state.py
+++ b/clang/docs/tools/generate_formatted_state.py
@@ -9,14 +9,17 @@
 
 
 def get_git_revision_short_hash():
-    """ Get the get SHA in short hash form. """
-    return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']
-                                   ).decode(sys.stdout.encoding).strip()
+    """Get the get SHA in short hash form."""
+    return (
+        subprocess.check_output(["git", "rev-parse", "--short", "HEAD"])
+        .decode(sys.stdout.encoding)
+        .strip()
+    )
 
 
 def get_style(count, passed):
-    """ Determine if this directory is good based on  the number of clean
-        files vs the number of files in total. """
+    """Determine if this directory is good based on  the number of clean
+    files vs the number of files in total."""
     if passed == count:
         return ":good:"
     if passed != 0:
@@ -24,10 +27,10 @@ def get_style(count, passed):
     return ":none:"
 
 
-TOP_DIR = os.path.join(os.path.dirname(__file__), '../../..')
-CLANG_DIR = os.path.join(os.path.dirname(__file__), '../..')
-DOC_FILE = os.path.join(CLANG_DIR, 'docs/ClangFormattedStatus.rst')
-CLEAN_FILE = os.path.join(CLANG_DIR, 'docs/tools/clang-formatted-files.txt')
+TOP_DIR = os.path.join(os.path.dirname(__file__), "../../..")
+CLANG_DIR = os.path.join(os.path.dirname(__file__), "../..")
+DOC_FILE = os.path.join(CLANG_DIR, "docs/ClangFormattedStatus.rst")
+CLEAN_FILE = os.path.join(CLANG_DIR, "docs/tools/clang-formatted-files.txt")
 
 rootdir = TOP_DIR
 
@@ -75,15 +78,14 @@ def get_style(count, passed):
      - {style2}`{percent}%`
 """
 
-FNULL = open(os.devnull, 'w')
+FNULL = open(os.devnull, "w")
 
 
-with open(DOC_FILE, 'wb') as output:
+with open(DOC_FILE, "wb") as output:
     cleanfiles = open(CLEAN_FILE, "wb")
     sha = get_git_revision_short_hash()
     today = datetime.now().strftime("%B %d, %Y %H:%M:%S")
-    output.write(bytes(RST_PREFIX.format(today=today,
-                                         sha=sha).encode("utf-8")))
+    output.write(bytes(RST_PREFIX.format(today=today, sha=sha).encode("utf-8")))
 
     total_files_count = 0
     total_files_pass = 0
@@ -100,13 +102,14 @@ def get_style(count, passed):
                 git_check = subprocess.Popen(
                     ["git", "ls-files", "--error-unmatch", act_sub_dir],
                     stdout=FNULL,
-                    stderr=FNULL)
+                    stderr=FNULL,
+                )
                 if git_check.wait() != 0:
                     print("Skipping directory: ", act_sub_dir)
                     subdirs.remove(subdir)
 
         path = os.path.relpath(root, TOP_DIR)
-        path = path.replace('\\', '/')
+        path = path.replace("\\", "/")
 
         file_count = 0
         file_pass = 0
@@ -124,8 +127,8 @@ def get_style(count, passed):
             stdout, err = cmd.communicate()
 
             relpath = os.path.relpath(file_path, TOP_DIR)
-            relpath = relpath.replace('\\', '/')
-            if err.decode(sys.stdout.encoding).find(': warning:') > 0:
+            relpath = relpath.replace("\\", "/")
+            if err.decode(sys.stdout.encoding).find(": warning:") > 0:
                 print(relpath, ":", "FAIL")
                 file_fail += 1
             else:
@@ -139,25 +142,39 @@ def get_style(count, passed):
         total_files_fail += file_fail
 
         if file_count > 0:
-            percent = (int(100.0 * (float(file_pass)/float(file_count))))
+            percent = int(100.0 * (float(file_pass) / float(file_count)))
             style = get_style(file_count, file_pass)
-            output.write(bytes(TABLE_ROW.format(path=path,
-                                                count=file_count,
-                                                passes=file_pass,
-                                                fails=file_fail,
-                                                percent=str(percent), style="",
-                                                style2=style).encode("utf-8")))
+            output.write(
+                bytes(
+                    TABLE_ROW.format(
+                        path=path,
+                        count=file_count,
+                        passes=file_pass,
+                        fails=file_fail,
+                        percent=str(percent),
+                        style="",
+                        style2=style,
+                    ).encode("utf-8")
+                )
+            )
             output.flush()
 
             print("----\n")
             print(path, file_count, file_pass, file_fail, percent)
             print("----\n")
 
-    total_percent = (float(total_files_pass)/float(total_files_count))
+    total_percent = float(total_files_pass) / float(total_files_count)
     percent_str = str(int(100.0 * total_percent))
-    output.write(bytes(TABLE_ROW.format(path="Total",
-                                        count=total_files_count,
-                                        passes=total_files_pass,
-                                        fails=total_files_fail,
-                                        percent=percent_str, style=":total:",
-                                        style2=":total:").encode("utf-8")))
+    output.write(
+        bytes(
+            TABLE_ROW.format(
+                path="Total",
+                count=total_files_count,
+                passes=total_files_pass,
+                fails=total_files_fail,
+                percent=percent_str,
+                style=":total:",
+                style2=":total:",
+            ).encode("utf-8")
+        )
+    )

diff  --git a/clang/lib/Tooling/DumpTool/generate_cxx_src_locs.py b/clang/lib/Tooling/DumpTool/generate_cxx_src_locs.py
index 771da5d9c9f37..dafb332961ede 100755
--- a/clang/lib/Tooling/DumpTool/generate_cxx_src_locs.py
+++ b/clang/lib/Tooling/DumpTool/generate_cxx_src_locs.py
@@ -8,22 +8,24 @@
 import shutil
 import argparse
 
+
 class Generator(object):
 
-    implementationContent = ''
+    implementationContent = ""
 
-    RefClades = {"DeclarationNameInfo",
+    RefClades = {
+        "DeclarationNameInfo",
         "NestedNameSpecifierLoc",
         "TemplateArgumentLoc",
-        "TypeLoc"}
+        "TypeLoc",
+    }
 
     def __init__(self, templateClasses):
         self.templateClasses = templateClasses
 
     def GeneratePrologue(self):
 
-        self.implementationContent += \
-            """
+        self.implementationContent += """
 /*===- Generated file -------------------------------------------*- C++ -*-===*\
 |*                                                                            *|
 |* Introspection of available AST node SourceLocations                        *|
@@ -63,20 +65,23 @@ def GenerateBaseGetLocationsDeclaration(self, CladeName):
         if CladeName in self.RefClades:
             InstanceDecoration = "&"
 
-        self.implementationContent += \
-            """
+        self.implementationContent += """
 void GetLocationsImpl(SharedLocationCall const& Prefix,
     clang::{0} const {1}Object, SourceLocationMap &Locs,
     SourceRangeMap &Rngs,
     std::vector<clang::TypeLoc> &TypeLocRecursionGuard);
-""".format(CladeName, InstanceDecoration)
+""".format(
+            CladeName, InstanceDecoration
+        )
 
-    def GenerateSrcLocMethod(self,
-            ClassName, ClassData, CreateLocalRecursionGuard):
+    def GenerateSrcLocMethod(self, ClassName, ClassData, CreateLocalRecursionGuard):
 
         NormalClassName = ClassName
-        RecursionGuardParam = ('' if CreateLocalRecursionGuard else \
-            ', std::vector<clang::TypeLoc>& TypeLocRecursionGuard')
+        RecursionGuardParam = (
+            ""
+            if CreateLocalRecursionGuard
+            else ", std::vector<clang::TypeLoc>& TypeLocRecursionGuard"
+        )
 
         if "templateParms" in ClassData:
             TemplatePreamble = "template <typename "
@@ -92,147 +97,163 @@ def GenerateSrcLocMethod(self,
                 TemplatePreamble += TA
 
             ClassName += ">"
-            TemplatePreamble += ">\n";
+            TemplatePreamble += ">\n"
             self.implementationContent += TemplatePreamble
 
-        self.implementationContent += \
-            """
+        self.implementationContent += """
 static void GetLocations{0}(SharedLocationCall const& Prefix,
     clang::{1} const &Object,
     SourceLocationMap &Locs, SourceRangeMap &Rngs {2})
 {{
-""".format(NormalClassName, ClassName, RecursionGuardParam)
+""".format(
+            NormalClassName, ClassName, RecursionGuardParam
+        )
 
-        if 'sourceLocations' in ClassData:
-            for locName in ClassData['sourceLocations']:
-                self.implementationContent += \
-                    """
+        if "sourceLocations" in ClassData:
+            for locName in ClassData["sourceLocations"]:
+                self.implementationContent += """
   Locs.insert(LocationAndString(Object.{0}(),
     llvm::makeIntrusiveRefCnt<LocationCall>(Prefix, "{0}")));
-""".format(locName)
+""".format(
+                    locName
+                )
 
-            self.implementationContent += '\n'
+            self.implementationContent += "\n"
 
-        if 'sourceRanges' in ClassData:
-            for rngName in ClassData['sourceRanges']:
-                self.implementationContent += \
-                    """
+        if "sourceRanges" in ClassData:
+            for rngName in ClassData["sourceRanges"]:
+                self.implementationContent += """
   Rngs.insert(RangeAndString(Object.{0}(),
     llvm::makeIntrusiveRefCnt<LocationCall>(Prefix, "{0}")));
-""".format(rngName)
-
-            self.implementationContent += '\n'
-
-        if 'typeLocs' in ClassData or 'typeSourceInfos' in ClassData \
-                or 'nestedNameLocs' in ClassData \
-                or 'declNameInfos' in ClassData:
+""".format(
+                    rngName
+                )
+
+            self.implementationContent += "\n"
+
+        if (
+            "typeLocs" in ClassData
+            or "typeSourceInfos" in ClassData
+            or "nestedNameLocs" in ClassData
+            or "declNameInfos" in ClassData
+        ):
             if CreateLocalRecursionGuard:
-                self.implementationContent += \
-                    'std::vector<clang::TypeLoc> TypeLocRecursionGuard;\n'
+                self.implementationContent += (
+                    "std::vector<clang::TypeLoc> TypeLocRecursionGuard;\n"
+                )
 
-            self.implementationContent += '\n'
+            self.implementationContent += "\n"
 
-            if 'typeLocs' in ClassData:
-                for typeLoc in ClassData['typeLocs']:
+            if "typeLocs" in ClassData:
+                for typeLoc in ClassData["typeLocs"]:
 
-                    self.implementationContent += \
-                        """
+                    self.implementationContent += """
               if (Object.{0}()) {{
                 GetLocationsImpl(
                     llvm::makeIntrusiveRefCnt<LocationCall>(Prefix, "{0}"),
                     Object.{0}(), Locs, Rngs, TypeLocRecursionGuard);
                 }}
-              """.format(typeLoc)
-
-            self.implementationContent += '\n'
-            if 'typeSourceInfos' in ClassData:
-                for tsi in ClassData['typeSourceInfos']:
-                    self.implementationContent += \
-                        """
+              """.format(
+                        typeLoc
+                    )
+
+            self.implementationContent += "\n"
+            if "typeSourceInfos" in ClassData:
+                for tsi in ClassData["typeSourceInfos"]:
+                    self.implementationContent += """
               if (Object.{0}()) {{
                 GetLocationsImpl(llvm::makeIntrusiveRefCnt<LocationCall>(
                     llvm::makeIntrusiveRefCnt<LocationCall>(Prefix, "{0}",
                         LocationCall::ReturnsPointer), "getTypeLoc"),
                     Object.{0}()->getTypeLoc(), Locs, Rngs, TypeLocRecursionGuard);
                     }}
-              """.format(tsi)
+              """.format(
+                        tsi
+                    )
 
-                self.implementationContent += '\n'
+                self.implementationContent += "\n"
 
-            if 'nestedNameLocs' in ClassData:
-                for NN in ClassData['nestedNameLocs']:
-                    self.implementationContent += \
-                        """
+            if "nestedNameLocs" in ClassData:
+                for NN in ClassData["nestedNameLocs"]:
+                    self.implementationContent += """
               if (Object.{0}())
                 GetLocationsImpl(
                     llvm::makeIntrusiveRefCnt<LocationCall>(Prefix, "{0}"),
                     Object.{0}(), Locs, Rngs, TypeLocRecursionGuard);
-              """.format(NN)
+              """.format(
+                        NN
+                    )
 
-            if 'declNameInfos' in ClassData:
-                for declName in ClassData['declNameInfos']:
+            if "declNameInfos" in ClassData:
+                for declName in ClassData["declNameInfos"]:
 
-                    self.implementationContent += \
-                        """
+                    self.implementationContent += """
                       GetLocationsImpl(
                           llvm::makeIntrusiveRefCnt<LocationCall>(Prefix, "{0}"),
                           Object.{0}(), Locs, Rngs, TypeLocRecursionGuard);
-                      """.format(declName)
+                      """.format(
+                        declName
+                    )
 
-        self.implementationContent += '}\n'
+        self.implementationContent += "}\n"
 
     def GenerateFiles(self, OutputFile):
-        with open(os.path.join(os.getcwd(),
-                  OutputFile), 'w') as f:
+        with open(os.path.join(os.getcwd(), OutputFile), "w") as f:
             f.write(self.implementationContent)
 
-    def GenerateBaseGetLocationsFunction(self, ASTClassNames,
-            ClassEntries, CladeName, InheritanceMap,
-            CreateLocalRecursionGuard):
+    def GenerateBaseGetLocationsFunction(
+        self,
+        ASTClassNames,
+        ClassEntries,
+        CladeName,
+        InheritanceMap,
+        CreateLocalRecursionGuard,
+    ):
 
-        MethodReturnType = 'NodeLocationAccessors'
+        MethodReturnType = "NodeLocationAccessors"
         InstanceDecoration = "*"
         if CladeName in self.RefClades:
             InstanceDecoration = "&"
 
-        Signature = \
-            'GetLocations(clang::{0} const {1}Object)'.format(
-                CladeName, InstanceDecoration)
-        ImplSignature = \
-            """
+        Signature = "GetLocations(clang::{0} const {1}Object)".format(
+            CladeName, InstanceDecoration
+        )
+        ImplSignature = """
     GetLocationsImpl(SharedLocationCall const& Prefix,
         clang::{0} const {1}Object, SourceLocationMap &Locs,
         SourceRangeMap &Rngs,
         std::vector<clang::TypeLoc> &TypeLocRecursionGuard)
-    """.format(CladeName, InstanceDecoration)
+    """.format(
+            CladeName, InstanceDecoration
+        )
 
-        self.implementationContent += 'void {0} {{ '.format(ImplSignature)
+        self.implementationContent += "void {0} {{ ".format(ImplSignature)
 
         if CladeName == "TypeLoc":
-            self.implementationContent += 'if (Object.isNull()) return;'
+            self.implementationContent += "if (Object.isNull()) return;"
 
-            self.implementationContent += \
-                """
+            self.implementationContent += """
             if (llvm::find(TypeLocRecursionGuard, Object) != TypeLocRecursionGuard.end())
               return;
             TypeLocRecursionGuard.push_back(Object);
             RecursionPopper RAII(TypeLocRecursionGuard);
                 """
 
-        RecursionGuardParam = ''
+        RecursionGuardParam = ""
         if not CreateLocalRecursionGuard:
-            RecursionGuardParam = ', TypeLocRecursionGuard'
+            RecursionGuardParam = ", TypeLocRecursionGuard"
 
-        ArgPrefix = '*'
+        ArgPrefix = "*"
         if CladeName in self.RefClades:
-            ArgPrefix = ''
-        self.implementationContent += \
-            'GetLocations{0}(Prefix, {1}Object, Locs, Rngs {2});'.format(
-                CladeName, ArgPrefix, RecursionGuardParam)
+            ArgPrefix = ""
+        self.implementationContent += (
+            "GetLocations{0}(Prefix, {1}Object, Locs, Rngs {2});".format(
+                CladeName, ArgPrefix, RecursionGuardParam
+            )
+        )
 
         if CladeName == "TypeLoc":
-            self.implementationContent += \
-                '''
+            self.implementationContent += """
         if (auto QTL = Object.getAs<clang::QualifiedTypeLoc>()) {
             auto Dequalified = QTL.getNextTypeLoc();
             return GetLocationsImpl(llvm::makeIntrusiveRefCnt<LocationCall>(Prefix, "getNextTypeLoc"),
@@ -240,7 +261,7 @@ def GenerateBaseGetLocationsFunction(self, ASTClassNames,
                                 Locs,
                                 Rngs,
                                 TypeLocRecursionGuard);
-        }'''
+        }"""
 
         for ASTClassName in ASTClassNames:
             if ASTClassName in self.templateClasses:
@@ -248,21 +269,22 @@ def GenerateBaseGetLocationsFunction(self, ASTClassNames,
             if ASTClassName == CladeName:
                 continue
             if CladeName != "TypeLoc":
-                self.implementationContent += \
-                """
+                self.implementationContent += """
 if (auto Derived = llvm::dyn_cast<clang::{0}>(Object)) {{
   GetLocations{0}(Prefix, *Derived, Locs, Rngs {1});
 }}
-""".format(ASTClassName, RecursionGuardParam)
+""".format(
+                    ASTClassName, RecursionGuardParam
+                )
                 continue
 
-            self.GenerateBaseTypeLocVisit(ASTClassName, ClassEntries,
-                RecursionGuardParam, InheritanceMap)
+            self.GenerateBaseTypeLocVisit(
+                ASTClassName, ClassEntries, RecursionGuardParam, InheritanceMap
+            )
 
-        self.implementationContent += '}'
+        self.implementationContent += "}"
 
-        self.implementationContent += \
-            """
+        self.implementationContent += """
 {0} NodeIntrospection::{1} {{
   NodeLocationAccessors Result;
   SharedLocationCall Prefix;
@@ -270,108 +292,125 @@ def GenerateBaseGetLocationsFunction(self, ASTClassNames,
 
   GetLocationsImpl(Prefix, Object, Result.LocationAccessors,
                    Result.RangeAccessors, TypeLocRecursionGuard);
-""".format(MethodReturnType, Signature)
-
-        self.implementationContent += 'return Result; }'
-
-    def GenerateBaseTypeLocVisit(self, ASTClassName, ClassEntries,
-            RecursionGuardParam, InheritanceMap):
-        CallPrefix = 'Prefix'
-        if ASTClassName != 'TypeLoc':
-            CallPrefix = \
-                '''llvm::makeIntrusiveRefCnt<LocationCall>(Prefix,
+""".format(
+            MethodReturnType, Signature
+        )
+
+        self.implementationContent += "return Result; }"
+
+    def GenerateBaseTypeLocVisit(
+        self, ASTClassName, ClassEntries, RecursionGuardParam, InheritanceMap
+    ):
+        CallPrefix = "Prefix"
+        if ASTClassName != "TypeLoc":
+            CallPrefix = """llvm::makeIntrusiveRefCnt<LocationCall>(Prefix,
                     "getAs<clang::{0}>", LocationCall::IsCast)
-                '''.format(ASTClassName)
+                """.format(
+                ASTClassName
+            )
 
         if ASTClassName in ClassEntries:
 
-            self.implementationContent += \
-            """
+            self.implementationContent += """
             if (auto ConcreteTL = Object.getAs<clang::{0}>())
               GetLocations{1}({2}, ConcreteTL, Locs, Rngs {3});
-            """.format(ASTClassName, ASTClassName,
-                       CallPrefix, RecursionGuardParam)
+            """.format(
+                ASTClassName, ASTClassName, CallPrefix, RecursionGuardParam
+            )
 
         if ASTClassName in InheritanceMap:
             for baseTemplate in self.templateClasses:
                 if baseTemplate in InheritanceMap[ASTClassName]:
-                    self.implementationContent += \
-                    """
+                    self.implementationContent += """
     if (auto ConcreteTL = Object.getAs<clang::{0}>())
       GetLocations{1}({2}, ConcreteTL, Locs, Rngs {3});
-    """.format(InheritanceMap[ASTClassName], baseTemplate,
-            CallPrefix, RecursionGuardParam)
-
+    """.format(
+                        InheritanceMap[ASTClassName],
+                        baseTemplate,
+                        CallPrefix,
+                        RecursionGuardParam,
+                    )
 
     def GenerateDynNodeVisitor(self, CladeNames):
-        MethodReturnType = 'NodeLocationAccessors'
+        MethodReturnType = "NodeLocationAccessors"
 
-        Signature = \
-            'GetLocations(clang::DynTypedNode const &Node)'
+        Signature = "GetLocations(clang::DynTypedNode const &Node)"
 
-        self.implementationContent += MethodReturnType \
-            + ' NodeIntrospection::' + Signature + '{'
+        self.implementationContent += (
+            MethodReturnType + " NodeIntrospection::" + Signature + "{"
+        )
 
         for CladeName in CladeNames:
             if CladeName == "DeclarationNameInfo":
                 continue
-            self.implementationContent += \
-                """
+            self.implementationContent += """
     if (const auto *N = Node.get<{0}>())
-    """.format(CladeName)
+    """.format(
+                CladeName
+            )
             ArgPrefix = ""
             if CladeName in self.RefClades:
                 ArgPrefix = "*"
-            self.implementationContent += \
-            """
-      return GetLocations({0}const_cast<{1} *>(N));""".format(ArgPrefix, CladeName)
+            self.implementationContent += """
+      return GetLocations({0}const_cast<{1} *>(N));""".format(
+                ArgPrefix, CladeName
+            )
 
-        self.implementationContent += '\nreturn {}; }'
+        self.implementationContent += "\nreturn {}; }"
 
     def GenerateEpilogue(self):
 
-        self.implementationContent += '''
+        self.implementationContent += """
   }
 }
-'''
+"""
+
 
 def main():
 
     parser = argparse.ArgumentParser()
-    parser.add_argument('--json-input-path',
-                      help='Read API description from FILE', metavar='FILE')
-    parser.add_argument('--output-file', help='Generate output in FILEPATH',
-                      metavar='FILEPATH')
-    parser.add_argument('--use-empty-implementation',
-                      help='Generate empty implementation',
-                      action="store", type=int)
-    parser.add_argument('--empty-implementation',
-                      help='Copy empty implementation from FILEPATH',
-                      action="store", metavar='FILEPATH')
+    parser.add_argument(
+        "--json-input-path", help="Read API description from FILE", metavar="FILE"
+    )
+    parser.add_argument(
+        "--output-file", help="Generate output in FILEPATH", metavar="FILEPATH"
+    )
+    parser.add_argument(
+        "--use-empty-implementation",
+        help="Generate empty implementation",
+        action="store",
+        type=int,
+    )
+    parser.add_argument(
+        "--empty-implementation",
+        help="Copy empty implementation from FILEPATH",
+        action="store",
+        metavar="FILEPATH",
+    )
 
     options = parser.parse_args()
 
     use_empty_implementation = options.use_empty_implementation
 
-    if (not use_empty_implementation
-            and not os.path.exists(options.json_input_path)):
+    if not use_empty_implementation and not os.path.exists(options.json_input_path):
         use_empty_implementation = True
 
     if not use_empty_implementation:
         with open(options.json_input_path) as f:
             jsonData = json.load(f)
 
-        if not 'classesInClade' in jsonData or not jsonData["classesInClade"]:
+        if not "classesInClade" in jsonData or not jsonData["classesInClade"]:
             use_empty_implementation = True
 
     if use_empty_implementation:
-        if not os.path.exists(options.output_file) or \
-                not filecmp.cmp(options.empty_implementation, options.output_file):
+        if not os.path.exists(options.output_file) or not filecmp.cmp(
+            options.empty_implementation, options.output_file
+        ):
             shutil.copyfile(options.empty_implementation, options.output_file)
         sys.exit(0)
 
     templateClasses = []
-    for (ClassName, ClassAccessors) in jsonData['classEntries'].items():
+    for (ClassName, ClassAccessors) in jsonData["classEntries"].items():
         if "templateParms" in ClassAccessors:
             templateClasses.append(ClassName)
 
@@ -379,33 +418,35 @@ def main():
 
     g.GeneratePrologue()
 
-    for (CladeName, ClassNameData) in jsonData['classesInClade'].items():
+    for (CladeName, ClassNameData) in jsonData["classesInClade"].items():
         g.GenerateBaseGetLocationsDeclaration(CladeName)
 
     def getCladeName(ClassName):
-      for (CladeName, ClassNameData) in jsonData['classesInClade'].items():
-        if ClassName in ClassNameData:
-          return CladeName
+        for (CladeName, ClassNameData) in jsonData["classesInClade"].items():
+            if ClassName in ClassNameData:
+                return CladeName
 
-    for (ClassName, ClassAccessors) in jsonData['classEntries'].items():
+    for (ClassName, ClassAccessors) in jsonData["classEntries"].items():
         cladeName = getCladeName(ClassName)
         g.GenerateSrcLocMethod(
-            ClassName, ClassAccessors,
-            cladeName not in Generator.RefClades)
+            ClassName, ClassAccessors, cladeName not in Generator.RefClades
+        )
 
-    for (CladeName, ClassNameData) in jsonData['classesInClade'].items():
+    for (CladeName, ClassNameData) in jsonData["classesInClade"].items():
         g.GenerateBaseGetLocationsFunction(
             ClassNameData,
-            jsonData['classEntries'],
+            jsonData["classEntries"],
             CladeName,
             jsonData["classInheritance"],
-            CladeName not in Generator.RefClades)
+            CladeName not in Generator.RefClades,
+        )
 
-    g.GenerateDynNodeVisitor(jsonData['classesInClade'].keys())
+    g.GenerateDynNodeVisitor(jsonData["classesInClade"].keys())
 
     g.GenerateEpilogue()
 
     g.GenerateFiles(options.output_file)
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     main()

diff  --git a/clang/test/AST/gen_ast_dump_json_test.py b/clang/test/AST/gen_ast_dump_json_test.py
index e6720485b3bf7..301d60e479dbf 100644
--- a/clang/test/AST/gen_ast_dump_json_test.py
+++ b/clang/test/AST/gen_ast_dump_json_test.py
@@ -22,23 +22,22 @@ def normalize(dict_var):
                     normalize(e)
         elif type(v) is str:
             if v != "0x0" and re.match(r"0x[0-9A-Fa-f]+", v):
-                dict_var[k] = '0x{{.*}}'
+                dict_var[k] = "0x{{.*}}"
             elif os.path.isfile(v):
-                dict_var[k] = '{{.*}}'
+                dict_var[k] = "{{.*}}"
             else:
-                splits = (v.split(' '))
+                splits = v.split(" ")
                 out_splits = []
                 for split in splits:
-                    inner_splits = split.rsplit(':',2)
+                    inner_splits = split.rsplit(":", 2)
                     if os.path.isfile(inner_splits[0]):
                         out_splits.append(
-                            '{{.*}}:%s:%s'
-                            %(inner_splits[1],
-                              inner_splits[2]))
+                            "{{.*}}:%s:%s" % (inner_splits[1], inner_splits[2])
+                        )
                         continue
                     out_splits.append(split)
 
-                dict_var[k] = ' '.join(out_splits)
+                dict_var[k] = " ".join(out_splits)
 
 
 def filter_json(dict_var, filters, out):
@@ -64,19 +63,39 @@ def default_clang_path():
 
 def main():
     parser = argparse.ArgumentParser()
-    parser.add_argument("--clang", help="The clang binary (could be a relative or absolute path)",
-                        action="store", default=default_clang_path())
-    parser.add_argument("--source", help="the source file(s). Without --update, the command used to generate the JSON "
-                                         "will be of the format <clang> -cc1 -ast-dump=json <opts> <source>",
-                        action="store", nargs=argparse.ONE_OR_MORE, required=True)
-    parser.add_argument("--filters", help="comma separated list of AST filters. Ex: --filters=TypedefDecl,BuiltinType",
-                        action="store", default='')
+    parser.add_argument(
+        "--clang",
+        help="The clang binary (could be a relative or absolute path)",
+        action="store",
+        default=default_clang_path(),
+    )
+    parser.add_argument(
+        "--source",
+        help="the source file(s). Without --update, the command used to generate the JSON "
+        "will be of the format <clang> -cc1 -ast-dump=json <opts> <source>",
+        action="store",
+        nargs=argparse.ONE_OR_MORE,
+        required=True,
+    )
+    parser.add_argument(
+        "--filters",
+        help="comma separated list of AST filters. Ex: --filters=TypedefDecl,BuiltinType",
+        action="store",
+        default="",
+    )
     update_or_generate_group = parser.add_mutually_exclusive_group()
-    update_or_generate_group.add_argument("--update", help="Update the file in-place", action="store_true")
-    update_or_generate_group.add_argument("--opts", help="other options",
-                                          action="store", default='', type=str)
-    parser.add_argument("--update-manual", help="When using --update, also update files that do not have the "
-                                                "autogenerated disclaimer", action="store_true")
+    update_or_generate_group.add_argument(
+        "--update", help="Update the file in-place", action="store_true"
+    )
+    update_or_generate_group.add_argument(
+        "--opts", help="other options", action="store", default="", type=str
+    )
+    parser.add_argument(
+        "--update-manual",
+        help="When using --update, also update files that do not have the "
+        "autogenerated disclaimer",
+        action="store_true",
+    )
     args = parser.parse_args()
 
     if not args.source:
@@ -87,15 +106,22 @@ def main():
         sys.exit("clang binary specified not present.")
 
     for src in args.source:
-        process_file(src, clang_binary, cmdline_filters=args.filters,
-                     cmdline_opts=args.opts, do_update=args.update,
-                     force_update=args.update_manual)
-
-
-def process_file(source_file, clang_binary, cmdline_filters, cmdline_opts,
-                 do_update, force_update):
-    note_firstline = "// NOTE: CHECK lines have been autogenerated by " \
-                     "gen_ast_dump_json_test.py"
+        process_file(
+            src,
+            clang_binary,
+            cmdline_filters=args.filters,
+            cmdline_opts=args.opts,
+            do_update=args.update,
+            force_update=args.update_manual,
+        )
+
+
+def process_file(
+    source_file, clang_binary, cmdline_filters, cmdline_opts, do_update, force_update
+):
+    note_firstline = (
+        "// NOTE: CHECK lines have been autogenerated by " "gen_ast_dump_json_test.py"
+    )
     filters_line_prefix = "// using --filters="
     note = note_firstline
 
@@ -110,21 +136,27 @@ def process_file(source_file, clang_binary, cmdline_filters, cmdline_opts,
                 if found_autogenerated_line:
                     # print("Filters line: '", line.rstrip(), "'", sep="")
                     if line.startswith(filters_line_prefix):
-                        filters_line = line[len(filters_line_prefix):].rstrip()
+                        filters_line = line[len(filters_line_prefix) :].rstrip()
                     break
                 if line.startswith(note_firstline):
                     found_autogenerated_line = True
                     # print("Found autogenerated disclaimer at line", i + 1)
         if not found_autogenerated_line and not force_update:
-            print("Not updating", source_file, "since it is not autogenerated.",
-                  file=sys.stderr)
+            print(
+                "Not updating",
+                source_file,
+                "since it is not autogenerated.",
+                file=sys.stderr,
+            )
             return
         if not cmdline_filters and filters_line:
             cmdline_filters = filters_line
             print("Inferred filters as '" + cmdline_filters + "'")
 
         if "RUN: %clang_cc1 " not in first_line:
-            sys.exit("When using --update the first line of the input file must contain RUN: %clang_cc1")
+            sys.exit(
+                "When using --update the first line of the input file must contain RUN: %clang_cc1"
+            )
         clang_start = first_line.find("%clang_cc1") + len("%clang_cc1")
         file_check_idx = first_line.rfind("| FileCheck")
         if file_check_idx:
@@ -142,13 +174,13 @@ def process_file(source_file, clang_binary, cmdline_filters, cmdline_opts,
         options = cmdline_opts.split()
         options.append("-ast-dump=json")
     cmd.extend(options)
-    using_ast_dump_filter = any('ast-dump-filter' in arg for arg in cmd)
+    using_ast_dump_filter = any("ast-dump-filter" in arg for arg in cmd)
     cmd.append(source_file)
     print("Will run", cmd)
     filters = set()
     if cmdline_filters:
         note += "\n" + filters_line_prefix + cmdline_filters
-        filters = set(cmdline_filters.split(','))
+        filters = set(cmdline_filters.split(","))
     print("Will use the following filters:", filters)
 
     try:
@@ -156,7 +188,7 @@ def process_file(source_file, clang_binary, cmdline_filters, cmdline_opts,
     except Exception as ex:
         print("The clang command failed with %s" % ex)
         return -1
-    
+
     out_asts = []
     if using_ast_dump_filter:
         # If we're using a filter, then we might have multiple JSON objects
@@ -183,7 +215,7 @@ def process_file(source_file, clang_binary, cmdline_filters, cmdline_opts,
             out_asts.append(j)
         else:
             filter_json(j, filters, out_asts)
-        
+
     with tempfile.NamedTemporaryFile("w", delete=False) as f:
         with open(source_file, "r") as srcf:
             for line in srcf.readlines():
@@ -194,16 +226,16 @@ def process_file(source_file, clang_binary, cmdline_filters, cmdline_opts,
         f.write(note + "\n")
         for out_ast in out_asts:
             append_str = json.dumps(out_ast, indent=1, ensure_ascii=False)
-            out_str = '\n\n'
+            out_str = "\n\n"
             out_str += "// CHECK-NOT: {{^}}Dumping\n"
             index = 0
             for append_line in append_str.splitlines()[2:]:
                 if index == 0:
-                    out_str += '// CHECK: %s\n' %(append_line.rstrip())
+                    out_str += "// CHECK: %s\n" % (append_line.rstrip())
                     index += 1
                 else:
-                    out_str += '// CHECK-NEXT: %s\n' %(append_line.rstrip())
-                    
+                    out_str += "// CHECK-NEXT: %s\n" % (append_line.rstrip())
+
             f.write(out_str)
         f.flush()
         f.close()
@@ -211,13 +243,13 @@ def process_file(source_file, clang_binary, cmdline_filters, cmdline_opts,
             print("Updating json appended source file to %s." % source_file)
             copyfile(f.name, source_file)
         else:
-            partition = source_file.rpartition('.')
-            dest_path = '%s-json%s%s' % (partition[0], partition[1], partition[2])
+            partition = source_file.rpartition(".")
+            dest_path = "%s-json%s%s" % (partition[0], partition[1], partition[2])
             print("Writing json appended source file to %s." % dest_path)
             copyfile(f.name, dest_path)
         os.remove(f.name)
     return 0
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/clang/test/Analysis/analyzer_test.py b/clang/test/Analysis/analyzer_test.py
index 03124333fe7bf..c476ceea9a59e 100644
--- a/clang/test/Analysis/analyzer_test.py
+++ b/clang/test/Analysis/analyzer_test.py
@@ -3,7 +3,6 @@
 
 # Custom format class for static analyzer tests
 class AnalyzerTest(lit.formats.ShTest):
-
     def __init__(self, execute_external, use_z3_solver=False):
         super(AnalyzerTest, self).__init__(execute_external)
         self.use_z3_solver = use_z3_solver
@@ -15,18 +14,24 @@ def execute(self, test, litConfig):
         saved_test = test
         lit.TestRunner.parseIntegratedTestScript(test)
 
-        if 'z3' not in test.requires:
-            results.append(self.executeWithAnalyzeSubstitution(
-                saved_test, litConfig, '-analyzer-constraints=range'))
+        if "z3" not in test.requires:
+            results.append(
+                self.executeWithAnalyzeSubstitution(
+                    saved_test, litConfig, "-analyzer-constraints=range"
+                )
+            )
 
             if results[-1].code == lit.Test.FAIL:
                 return results[-1]
 
         # If z3 backend available, add an additional run line for it
-        if self.use_z3_solver == '1':
-            assert(test.config.clang_staticanalyzer_z3 == '1')
-            results.append(self.executeWithAnalyzeSubstitution(
-                saved_test, litConfig, '-analyzer-constraints=z3 -DANALYZER_CM_Z3'))
+        if self.use_z3_solver == "1":
+            assert test.config.clang_staticanalyzer_z3 == "1"
+            results.append(
+                self.executeWithAnalyzeSubstitution(
+                    saved_test, litConfig, "-analyzer-constraints=z3 -DANALYZER_CM_Z3"
+                )
+            )
 
         # Combine all result outputs into the last element
         for x in results:
@@ -35,14 +40,14 @@ def execute(self, test, litConfig):
 
         if results:
             return results[-1]
-        return lit.Test.Result(lit.Test.UNSUPPORTED,
-            "Test requires the following unavailable features: z3")
+        return lit.Test.Result(
+            lit.Test.UNSUPPORTED, "Test requires the following unavailable features: z3"
+        )
 
     def executeWithAnalyzeSubstitution(self, test, litConfig, substitution):
         saved_substitutions = list(test.config.substitutions)
-        test.config.substitutions.append(('%analyze', substitution))
-        result = lit.TestRunner.executeShTest(test, litConfig,
-            self.execute_external)
+        test.config.substitutions.append(("%analyze", substitution))
+        result = lit.TestRunner.executeShTest(test, litConfig, self.execute_external)
         test.config.substitutions = saved_substitutions
 
         return result

diff  --git a/clang/test/Analysis/check-analyzer-fixit.py b/clang/test/Analysis/check-analyzer-fixit.py
index 6a8f6859f816b..b616255de89b0 100644
--- a/clang/test/Analysis/check-analyzer-fixit.py
+++ b/clang/test/Analysis/check-analyzer-fixit.py
@@ -1,16 +1,16 @@
 #!/usr/bin/env python
 #
-#===- check-analyzer-fixit.py - Static Analyzer test helper ---*- python -*-===#
+# ===- check-analyzer-fixit.py - Static Analyzer test helper ---*- python -*-===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 #
 # This file copy-pasted mostly from the Clang-Tidy's 'check_clang_tidy.py'.
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 r"""
 Clang Static Analyzer test helper
@@ -33,7 +33,7 @@
 
 
 def write_file(file_name, text):
-    with open(file_name, 'w') as f:
+    with open(file_name, "w") as f:
         f.write(text)
 
 
@@ -44,18 +44,18 @@ def run_test_once(args, extra_args):
 
     file_name_with_extension = input_file_name
     _, extension = os.path.splitext(file_name_with_extension)
-    if extension not in ['.c', '.hpp', '.m', '.mm']:
-        extension = '.cpp'
+    if extension not in [".c", ".hpp", ".m", ".mm"]:
+        extension = ".cpp"
     temp_file_name = temp_file_name + extension
 
-    with open(input_file_name, 'r') as input_file:
+    with open(input_file_name, "r") as input_file:
         input_text = input_file.read()
 
     # Remove the contents of the CHECK lines to avoid CHECKs matching on
     # themselves.  We need to keep the comments to preserve line numbers while
     # avoiding empty lines which could potentially trigger formatting-related
     # checks.
-    cleaned_test = re.sub('// *CHECK-[A-Z0-9\-]*:[^\r\n]*', '//', input_text)
+    cleaned_test = re.sub("// *CHECK-[A-Z0-9\-]*:[^\r\n]*", "//", input_text)
     write_file(temp_file_name, cleaned_test)
 
     original_file_name = temp_file_name + ".orig"
@@ -63,59 +63,82 @@ def run_test_once(args, extra_args):
 
     try:
         builtin_include_dir = subprocess.check_output(
-            ['clang', '-print-file-name=include'], stderr=subprocess.STDOUT).decode()
+            ["clang", "-print-file-name=include"], stderr=subprocess.STDOUT
+        ).decode()
     except subprocess.CalledProcessError as e:
-        print('Cannot print Clang include directory: ' + e.output.decode())
+        print("Cannot print Clang include directory: " + e.output.decode())
 
     builtin_include_dir = os.path.normpath(builtin_include_dir)
 
-    args = (['clang', '-cc1', '-internal-isystem', builtin_include_dir,
-             '-nostdsysteminc', '-analyze', '-analyzer-constraints=range',
-             '-analyzer-config', 'apply-fixits=true']
-            + clang_analyzer_extra_args + ['-verify', temp_file_name])
-
-    print('Running ' + str(args) + '...')
+    args = (
+        [
+            "clang",
+            "-cc1",
+            "-internal-isystem",
+            builtin_include_dir,
+            "-nostdsysteminc",
+            "-analyze",
+            "-analyzer-constraints=range",
+            "-analyzer-config",
+            "apply-fixits=true",
+        ]
+        + clang_analyzer_extra_args
+        + ["-verify", temp_file_name]
+    )
+
+    print("Running " + str(args) + "...")
 
     try:
-        clang_analyzer_output = \
-            subprocess.check_output(args, stderr=subprocess.STDOUT).decode()
+        clang_analyzer_output = subprocess.check_output(
+            args, stderr=subprocess.STDOUT
+        ).decode()
     except subprocess.CalledProcessError as e:
-        print('Clang Static Analyzer test failed:\n' + e.output.decode())
+        print("Clang Static Analyzer test failed:\n" + e.output.decode())
         raise
 
-    print('----------------- Clang Static Analyzer output -----------------\n' +
-          clang_analyzer_output +
-          '\n--------------------------------------------------------------')
+    print(
+        "----------------- Clang Static Analyzer output -----------------\n"
+        + clang_analyzer_output
+        + "\n--------------------------------------------------------------"
+    )
 
     try:
         
diff _output = subprocess.check_output(
-            ['
diff ', '-u', original_file_name, temp_file_name],
-            stderr=subprocess.STDOUT)
+            ["
diff ", "-u", original_file_name, temp_file_name], stderr=subprocess.STDOUT
+        )
     except subprocess.CalledProcessError as e:
         
diff _output = e.output
 
-    print('----------------------------- Fixes ----------------------------\n' +
-          
diff _output.decode() +
-          '\n--------------------------------------------------------------')
+    print(
+        "----------------------------- Fixes ----------------------------\n"
+        + 
diff _output.decode()
+        + "\n--------------------------------------------------------------"
+    )
 
     try:
         subprocess.check_output(
-            ['FileCheck', '-input-file=' + temp_file_name, input_file_name,
-             '-check-prefixes=CHECK-FIXES', '-strict-whitespace'],
-            stderr=subprocess.STDOUT)
+            [
+                "FileCheck",
+                "-input-file=" + temp_file_name,
+                input_file_name,
+                "-check-prefixes=CHECK-FIXES",
+                "-strict-whitespace",
+            ],
+            stderr=subprocess.STDOUT,
+        )
     except subprocess.CalledProcessError as e:
-        print('FileCheck failed:\n' + e.output.decode())
+        print("FileCheck failed:\n" + e.output.decode())
         raise
 
 
 def main():
     parser = argparse.ArgumentParser()
-    parser.add_argument('input_file_name')
-    parser.add_argument('temp_file_name')
+    parser.add_argument("input_file_name")
+    parser.add_argument("temp_file_name")
 
     args, extra_args = parser.parse_known_args()
     run_test_once(args, extra_args)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/clang/test/Analysis/exploded-graph-rewriter/lit.local.cfg b/clang/test/Analysis/exploded-graph-rewriter/lit.local.cfg
index 9786932aa5a16..ca334e955bb8a 100644
--- a/clang/test/Analysis/exploded-graph-rewriter/lit.local.cfg
+++ b/clang/test/Analysis/exploded-graph-rewriter/lit.local.cfg
@@ -7,11 +7,17 @@ import os
 use_lit_shell = os.environ.get("LIT_USE_INTERNAL_SHELL")
 config.test_format = lit.formats.ShTest(use_lit_shell == "0")
 
-config.substitutions.append(('%exploded_graph_rewriter',
-                             '\'%s\' %s --dump-dot-only' % (
-                                config.python_executable,
-                                os.path.join(
-                                    config.clang_src_dir,
-                                    'utils', 'analyzer', 'exploded-graph-rewriter.py'))))
+config.substitutions.append(
+    (
+        "%exploded_graph_rewriter",
+        "'%s' %s --dump-dot-only"
+        % (
+            config.python_executable,
+            os.path.join(
+                config.clang_src_dir, "utils", "analyzer", "exploded-graph-rewriter.py"
+            ),
+        ),
+    )
+)
 
-config.suffixes.add('.dot')
+config.suffixes.add(".dot")

diff  --git a/clang/test/Analysis/lit.local.cfg b/clang/test/Analysis/lit.local.cfg
index 1e8cf4c3b7c4b..e2946cf345cb9 100644
--- a/clang/test/Analysis/lit.local.cfg
+++ b/clang/test/Analysis/lit.local.cfg
@@ -6,23 +6,37 @@ import site
 # is available.
 site.addsitedir(os.path.dirname(__file__))
 import analyzer_test
+
 config.test_format = analyzer_test.AnalyzerTest(
-        config.test_format.execute_external, config.use_z3_solver)
+    config.test_format.execute_external, config.use_z3_solver
+)
 
 # Filtering command used by Clang Analyzer tests (when comparing .plist files
 # with reference output)
-config.substitutions.append(('%normalize_plist',
-    "grep -Ev '%s|%s|%s'" %
-        ('^[[:space:]]*<string>.* version .*</string>[[:space:]]*$',
-         '^[[:space:]]*<string>/.*</string>[[:space:]]*$',
-         '^[[:space:]]*<string>.:.*</string>[[:space:]]*$')))
+config.substitutions.append(
+    (
+        "%normalize_plist",
+        "grep -Ev '%s|%s|%s'"
+        % (
+            "^[[:space:]]*<string>.* version .*</string>[[:space:]]*$",
+            "^[[:space:]]*<string>/.*</string>[[:space:]]*$",
+            "^[[:space:]]*<string>.:.*</string>[[:space:]]*$",
+        ),
+    )
+)
 
 # Filtering command for testing SARIF output against reference output.
-config.substitutions.append(('%normalize_sarif',
-    "grep -Ev '^[[:space:]]*(%s|%s|%s)[[:space:]]*$'" %
-        ('"uri": "file:.*%basename_t"',
-         '"version": ".* version .*"',
-         '"version": "2.1.0"')))
+config.substitutions.append(
+    (
+        "%normalize_sarif",
+        "grep -Ev '^[[:space:]]*(%s|%s|%s)[[:space:]]*$'"
+        % (
+            '"uri": "file:.*%basename_t"',
+            '"version": ".* version .*"',
+            '"version": "2.1.0"',
+        ),
+    )
+)
 
 if not config.root.clang_staticanalyzer:
     config.unsupported = True

diff  --git a/clang/test/Analysis/scan-build/lit.local.cfg b/clang/test/Analysis/scan-build/lit.local.cfg
index 0c28dc63b5e81..fab52b1c7bd67 100644
--- a/clang/test/Analysis/scan-build/lit.local.cfg
+++ b/clang/test/Analysis/scan-build/lit.local.cfg
@@ -9,12 +9,16 @@ config.test_format = lit.formats.ShTest(use_lit_shell == "0")
 
 clang_path = config.clang if config.have_llvm_driver else os.path.realpath(config.clang)
 
-config.substitutions.append(('%scan-build',
-                             '\'%s\' --use-analyzer=%s ' % (
-                                 lit.util.which('scan-build',
-                                                os.path.join(
-                                                    config.clang_src_dir,
-                                                    'tools',
-                                                    'scan-build',
-                                                    'bin')),
-                                 clang_path)))
+config.substitutions.append(
+    (
+        "%scan-build",
+        "'%s' --use-analyzer=%s "
+        % (
+            lit.util.which(
+                "scan-build",
+                os.path.join(config.clang_src_dir, "tools", "scan-build", "bin"),
+            ),
+            clang_path,
+        ),
+    )
+)

diff  --git a/clang/test/CodeGen/builtins-nvptx-mma.py b/clang/test/CodeGen/builtins-nvptx-mma.py
index baadc7e1a7428..72f4f5c9f655e 100644
--- a/clang/test/CodeGen/builtins-nvptx-mma.py
+++ b/clang/test/CodeGen/builtins-nvptx-mma.py
@@ -12,352 +12,395 @@
 from itertools import product
 from string import Template
 
+
 class MMAFrag:
-  def __init__(self, geom, frag, ptx_elt_type):
-    self.geom = geom
-    self.frag = frag
-    self.ptx_type = ptx_elt_type;
+    def __init__(self, geom, frag, ptx_elt_type):
+        self.geom = geom
+        self.frag = frag
+        self.ptx_type = ptx_elt_type
+
+    def __repr__(self):
+        return "%s:%s:%s" % (self.geom, self.frag, self.ptx_type)
 
-  def __repr__(self):
-    return "%s:%s:%s" % (self.geom, self.frag, self.ptx_type)
 
 class MMAOp:
-  def __init__(self, a, b, c, d, b1op=""):
-    self.a = a
-    self.b = b
-    self.c = c
-    self.d = d
-    self.b1op = b1op
+    def __init__(self, a, b, c, d, b1op=""):
+        self.a = a
+        self.b = b
+        self.c = c
+        self.d = d
+        self.b1op = b1op
+
+    def __repr__(self):
+        return "{A:%s, B:%s, C:%s, D:%s}" % (self.a, self.b, self.c, self.d)
 
-  def __repr__(self):
-    return ("{A:%s, B:%s, C:%s, D:%s}" % (self.a, self.b, self.c, self.d ))
 
 def make_mma_ops(geoms, types_a, types_b, types_c, types_d, b1ops=None):
-  ops = []
-  if b1ops is None:
-    b1ops = [""]
-  for geom, type_a, type_c in product( geoms,  types_a, types_c):
-    for type_b, type_d in product(types_b if types_b else [type_a],
-                                  types_d if types_d else [type_c]):
-      ops += [
-          MMAOp(MMAFrag(geom, "a", type_a),
-                MMAFrag(geom, "b", type_b),
-                MMAFrag(geom, "c", type_c),
-                MMAFrag(geom, "d", type_d), b1op)
-          for b1op in b1ops]
-  return ops
+    ops = []
+    if b1ops is None:
+        b1ops = [""]
+    for geom, type_a, type_c in product(geoms, types_a, types_c):
+        for type_b, type_d in product(
+            types_b if types_b else [type_a], types_d if types_d else [type_c]
+        ):
+            ops += [
+                MMAOp(
+                    MMAFrag(geom, "a", type_a),
+                    MMAFrag(geom, "b", type_b),
+                    MMAFrag(geom, "c", type_c),
+                    MMAFrag(geom, "d", type_d),
+                    b1op,
+                )
+                for b1op in b1ops
+            ]
+    return ops
+
 
 def make_ldst_ops(geoms, frags, types):
-  return [MMAFrag(geom, frag, ptx_type) for (geom, frag, ptx_type)
-          in product(geoms, frags, types)]
+    return [
+        MMAFrag(geom, frag, ptx_type)
+        for (geom, frag, ptx_type) in product(geoms, frags, types)
+    ]
+
 
 def get_mma_ops():
-  return (make_mma_ops(["m16n16k8"],
-                       ["tf32"], [], ["f32"], []) +
-          make_mma_ops(["m16n16k16", "m32n8k16", "m8n32k16"],
-                       ["bf16"], [], ["f32"], []) +
-          make_mma_ops(["m8n8k4"],
-                       ["f64"], [], ["f64"], []) +
-          make_mma_ops(["m16n16k16", "m32n8k16", "m8n32k16"],
-                       ["f16"], [], ["f16", "f32"], ["f16", "f32"]) +
-          make_mma_ops(["m16n16k16", "m32n8k16", "m8n32k16"],
-                       ["s8", "u8"], [], ["s32"], []) +
-          make_mma_ops(["m8n8k32"],
-                       ["s4", "u4"], [], ["s32"], []) +
-          make_mma_ops(["m8n8k128"],
-                       ["b1"], [], ["s32"], [],
-                       [".xor.popc", ".and.popc"]))
+    return (
+        make_mma_ops(["m16n16k8"], ["tf32"], [], ["f32"], [])
+        + make_mma_ops(["m16n16k16", "m32n8k16", "m8n32k16"], ["bf16"], [], ["f32"], [])
+        + make_mma_ops(["m8n8k4"], ["f64"], [], ["f64"], [])
+        + make_mma_ops(
+            ["m16n16k16", "m32n8k16", "m8n32k16"],
+            ["f16"],
+            [],
+            ["f16", "f32"],
+            ["f16", "f32"],
+        )
+        + make_mma_ops(
+            ["m16n16k16", "m32n8k16", "m8n32k16"], ["s8", "u8"], [], ["s32"], []
+        )
+        + make_mma_ops(["m8n8k32"], ["s4", "u4"], [], ["s32"], [])
+        + make_mma_ops(
+            ["m8n8k128"], ["b1"], [], ["s32"], [], [".xor.popc", ".and.popc"]
+        )
+    )
+
 
 def get_ldst_ops():
-  # NOTE: fragemts are from the point of view of PTX.
-  # fragment `d` is only for store ops, others for both loads and stores.
-  return (make_ldst_ops(["m16n16k16", "m32n8k16", "m8n32k16"],
-                        ["a", "b"], ["f16", "u8", "s8", "bf16"]) +
-          make_ldst_ops(["m16n16k16", "m32n8k16", "m8n32k16"],
-                        ["c", "d"], ["f16", "f32", "s32"]) +
-          make_ldst_ops(["m8n8k32"], ["a", "b"], ["s4","u4"]) +
-          make_ldst_ops(["m8n8k128"], ["a", "b"], ["b1"]) +
-          make_ldst_ops(["m8n8k32", "m8n8k128"],  ["c", "d"], ["s32"]) +
-          make_ldst_ops(["m8n8k4"], ["a", "b", "c", "d"], ["f64"]) +
-          # TF32 m16n16k8 is odd.
-          # For fragment 'C' it uses __mma_*tf32*_m16n16k8_ld_c
-          # but 'D' calls __mma_m16n16k8_st_c_*f32*.
-          make_ldst_ops(["m16n16k8"], ["a", "b", "c"], ["tf32"]) +
-          make_ldst_ops(["m16n16k8"], ["d"], ["f32"]))
+    # NOTE: fragemts are from the point of view of PTX.
+    # fragment `d` is only for store ops, others for both loads and stores.
+    return (
+        make_ldst_ops(
+            ["m16n16k16", "m32n8k16", "m8n32k16"],
+            ["a", "b"],
+            ["f16", "u8", "s8", "bf16"],
+        )
+        + make_ldst_ops(
+            ["m16n16k16", "m32n8k16", "m8n32k16"], ["c", "d"], ["f16", "f32", "s32"]
+        )
+        + make_ldst_ops(["m8n8k32"], ["a", "b"], ["s4", "u4"])
+        + make_ldst_ops(["m8n8k128"], ["a", "b"], ["b1"])
+        + make_ldst_ops(["m8n8k32", "m8n8k128"], ["c", "d"], ["s32"])
+        + make_ldst_ops(["m8n8k4"], ["a", "b", "c", "d"], ["f64"])
+        +
+        # TF32 m16n16k8 is odd.
+        # For fragment 'C' it uses __mma_*tf32*_m16n16k8_ld_c
+        # but 'D' calls __mma_m16n16k8_st_c_*f32*.
+        make_ldst_ops(["m16n16k8"], ["a", "b", "c"], ["tf32"])
+        + make_ldst_ops(["m16n16k8"], ["d"], ["f32"])
+    )
+
 
 def is_geom_supported(geom):
-  # geometries for FP and ints.
-  if geom in ["m8n32k16", "m32n8k16"]:
-    return ptx_version >= 61
-  # geometries for sub-ints.
-  if geom in ["m8n8k32", "m8n8k128"]:
-    return ptx_version >= 63 and gpu_arch >= 75
-  if geom == "m16n16k16":
-    return ptx_version >= 60
-  if geom in ["m16n16k8", "m8n8k4"]:
-    return ptx_version >= 70 and gpu_arch >= 80
-  assert(False) # Unexpected geometry.
+    # geometries for FP and ints.
+    if geom in ["m8n32k16", "m32n8k16"]:
+        return ptx_version >= 61
+    # geometries for sub-ints.
+    if geom in ["m8n8k32", "m8n8k128"]:
+        return ptx_version >= 63 and gpu_arch >= 75
+    if geom == "m16n16k16":
+        return ptx_version >= 60
+    if geom in ["m16n16k8", "m8n8k4"]:
+        return ptx_version >= 70 and gpu_arch >= 80
+    assert False  # Unexpected geometry.
+
 
 def is_type_supported(ptx_type):
-  if ptx_type in ["s8", "u8", "s32"]:
-    return ptx_version >= 63 and gpu_arch >= 72
-  if ptx_type in ["s4", "u4", "b1"]:
-    return ptx_version >= 63 and gpu_arch >= 75
-  if ptx_type in ["bf16", "tf32", "f64"]:
-    return ptx_version >= 70 and gpu_arch >= 80
-  return ptx_version >= 60 and gpu_arch >= 70
+    if ptx_type in ["s8", "u8", "s32"]:
+        return ptx_version >= 63 and gpu_arch >= 72
+    if ptx_type in ["s4", "u4", "b1"]:
+        return ptx_version >= 63 and gpu_arch >= 75
+    if ptx_type in ["bf16", "tf32", "f64"]:
+        return ptx_version >= 70 and gpu_arch >= 80
+    return ptx_version >= 60 and gpu_arch >= 70
+
 
 def is_rnd_supported(op):
-  # rnd is only supported for FP64 WMMA
-  return op.a.ptx_type == "f64"
+    # rnd is only supported for FP64 WMMA
+    return op.a.ptx_type == "f64"
+
 
 def is_mma_variant_supported(op, layout_a, layout_b, satf):
-  if not (is_type_supported(op.a.ptx_type)
-          and is_geom_supported(op.a.geom)):
-    return False
+    if not (is_type_supported(op.a.ptx_type) and is_geom_supported(op.a.geom)):
+        return False
+
+    if satf and not op.a.ptx_type in ["f16", "s8", "u8", "s4", "u4"]:
+        return False
 
-  if satf and not op.a.ptx_type in ["f16", "s8", "u8", "s4", "u4"]:
-    return False
+    # sub-integer types require row/col layout.
+    if op.a.ptx_type in ["s4", "u4", "b1"]:
+        return layout_a == "row" and layout_b == "col"
+    return True
 
-  # sub-integer types require row/col layout.
-  if op.a.ptx_type in ["s4", "u4", "b1"]:
-    return layout_a == "row" and layout_b == "col"
-  return True
 
 def is_ldst_variant_supported(frag, layout):
-  if not (is_type_supported(frag.ptx_type)
-          and is_geom_supported(frag.geom)):
-    return False
-  if frag.ptx_type in ["s4", "u4", "b1"]:
-    # sub-integer types require sm_75 and ptx63, row/col layout for a/b.
-    return ((frag.frag == "a" and layout == "row")
+    if not (is_type_supported(frag.ptx_type) and is_geom_supported(frag.geom)):
+        return False
+    if frag.ptx_type in ["s4", "u4", "b1"]:
+        # sub-integer types require sm_75 and ptx63, row/col layout for a/b.
+        return (
+            (frag.frag == "a" and layout == "row")
             or (frag.frag == "b" and layout == "col")
-            or frag.frag in ["c", "d"])
-  return True
+            or frag.frag in ["c", "d"]
+        )
+    return True
+
 
 def get_builtin_prefix(frag):
-  prefix = None
-  if frag.geom in ["m16n16k16", "m32n8k16", "m8n32k16"]:
-    if frag.ptx_type in ["f16", "f32"]:
-      prefix = "__hmma"
-    elif frag.ptx_type == "bf16":
-      prefix = "__mma_bf16"
+    prefix = None
+    if frag.geom in ["m16n16k16", "m32n8k16", "m8n32k16"]:
+        if frag.ptx_type in ["f16", "f32"]:
+            prefix = "__hmma"
+        elif frag.ptx_type == "bf16":
+            prefix = "__mma_bf16"
+        else:
+            prefix = "__imma"
+    elif frag.geom == "m8n8k32":
+        prefix = "__imma"  # sub-integers
+    elif frag.geom == "m8n8k128":
+        prefix = "__bmma"
+    elif frag.geom == "m8n8k4":
+        prefix = "__dmma"
+    elif frag.geom == "m16n16k8":
+        if frag.ptx_type == "f32":
+            prefix = "__mma"
+        else:
+            prefix = "__mma_tf32"
+    assert prefix
+    return prefix
+
+
+def get_ldst_builtin_name(frag):
+    prefix = get_builtin_prefix(frag)
+
+    if prefix == "__hmma":
+        suffix = "" if frag.frag in ["a", "b"] else frag.ptx_type
+    elif prefix in ["__dmma", "__mma_bf16", "__mma_tf32"]:
+        suffix = "" if frag.frag in ["a", "b", "c"] else frag.ptx_type
     else:
-      prefix = "__imma"
-  elif frag.geom == "m8n8k32":
-    prefix = "__imma" # sub-integers
-  elif frag.geom == "m8n8k128":
-    prefix = "__bmma"
-  elif frag.geom == "m8n8k4":
-    prefix = "__dmma"
-  elif frag.geom == "m16n16k8":
-    if frag.ptx_type == "f32":
-      prefix = "__mma"
+        suffix = "" if frag.frag == "c" else frag.ptx_type
+        if suffix == "s32":
+            suffix = "i32"
+
+    if frag.frag == "d":
+        ifrag = "c"
+        op = "st"
     else:
-      prefix = "__mma_tf32"
-  assert prefix
-  return prefix
+        ifrag = frag.frag
+        op = "ld"
+
+    name = "%s_%s_%s_%s%s" % (
+        prefix,
+        frag.geom,
+        op,
+        ifrag,
+        "_" + suffix if suffix else "",
+    )
+    return name
 
-def get_ldst_builtin_name(frag):
-  prefix = get_builtin_prefix(frag)
-
-  if prefix == "__hmma":
-    suffix = "" if frag.frag in ["a","b"] else frag.ptx_type
-  elif prefix in ["__dmma", "__mma_bf16", "__mma_tf32"]:
-    suffix = "" if frag.frag in ["a","b","c"] else frag.ptx_type
-  else:
-    suffix = "" if frag.frag == "c" else frag.ptx_type
-    if suffix == "s32":
-      suffix = "i32"
-
-  if frag.frag == "d":
-    ifrag = "c"
-    op = "st"
-  else:
-    ifrag = frag.frag
-    op = "ld"
-
-  name = "%s_%s_%s_%s%s" % (prefix, frag.geom, op, ifrag,
-                             "_" + suffix if suffix else "")
-  return name
 
 def get_mma_builtin_name(op):
-  prefix = get_builtin_prefix(op.a)
-
-  if prefix == "__hmma":
-    suffix = op.d.ptx_type + op.c.ptx_type
-  elif prefix in ["__mma_bf16", "__mma_tf32"]:
-    suffix = op.d.ptx_type
-  else:
-    suffix = op.a.ptx_type
-
-  name = "{prefix}_{geom}_mma{b1op}_{suffix}".format(
-      prefix = prefix,
-      geom = op.a.geom,
-      b1op = op.b1op.replace(".","_"),
-      suffix = suffix)
-  return name
+    prefix = get_builtin_prefix(op.a)
 
-def get_required_sm(frag, b1op=""):
-  if frag.ptx_type in ["f64", "bf16", "tf32"]:
-    return 80
-  if frag.ptx_type in ["u4", "s4", "b1"]:
-    if b1op == ".and.popc":
-      return 80
-    return 75
-  if frag.ptx_type in ["s8", "u8"]:
-    return 72
-  if frag.ptx_type == "s32":
-    if frag.geom in ["m8n8k32", "m8n8k128"]: # s4/u4/b1
-      return 75
-    else:                       # s8/u8
-      return 72
-  if frag.ptx_type in ["f16", "f32"]:
-    if frag.geom == "m16n16k8":
-      return 80
+    if prefix == "__hmma":
+        suffix = op.d.ptx_type + op.c.ptx_type
+    elif prefix in ["__mma_bf16", "__mma_tf32"]:
+        suffix = op.d.ptx_type
     else:
-      return 70
-  assert(False)
+        suffix = op.a.ptx_type
+
+    name = "{prefix}_{geom}_mma{b1op}_{suffix}".format(
+        prefix=prefix, geom=op.a.geom, b1op=op.b1op.replace(".", "_"), suffix=suffix
+    )
+    return name
+
+
+def get_required_sm(frag, b1op=""):
+    if frag.ptx_type in ["f64", "bf16", "tf32"]:
+        return 80
+    if frag.ptx_type in ["u4", "s4", "b1"]:
+        if b1op == ".and.popc":
+            return 80
+        return 75
+    if frag.ptx_type in ["s8", "u8"]:
+        return 72
+    if frag.ptx_type == "s32":
+        if frag.geom in ["m8n8k32", "m8n8k128"]:  # s4/u4/b1
+            return 75
+        else:  # s8/u8
+            return 72
+    if frag.ptx_type in ["f16", "f32"]:
+        if frag.geom == "m16n16k8":
+            return 80
+        else:
+            return 70
+    assert False
+
 
 def get_required_ptx(frag, b1op=""):
-  if frag.ptx_type == "b1" and b1op == ".and.popc":
-    return 71
-  if frag.ptx_type in ["f64", "bf16", "tf32"]:
-    return 70
-  if frag.ptx_type in ["f16", "f32"]:
-    if frag.geom == "m16n16k16":
-      return 60
-    if frag.geom == "m16n16k8":
-      return 70
-    return 61
-  return 63
+    if frag.ptx_type == "b1" and b1op == ".and.popc":
+        return 71
+    if frag.ptx_type in ["f64", "bf16", "tf32"]:
+        return 70
+    if frag.ptx_type in ["f16", "f32"]:
+        if frag.geom == "m16n16k16":
+            return 60
+        if frag.geom == "m16n16k8":
+            return 70
+        return 61
+    return 63
+
 
 def get_src_dst_prefix(frag):
-  if frag.ptx_type == "f32":
-    return "f"
-  if frag.ptx_type == "f64":
-    return "d"
-  if frag.ptx_type == "tf32" and frag.frag in ["c", "d"]:
-    return "f"
-  return ""
+    if frag.ptx_type == "f32":
+        return "f"
+    if frag.ptx_type == "f64":
+        return "d"
+    if frag.ptx_type == "tf32" and frag.frag in ["c", "d"]:
+        return "f"
+    return ""
+
 
 def gen_wmma_ldst_tests(results):
-  load_template = """
+    load_template = """
   // CHECK${check_suffix}: call {{.*}} @${intrinsic}
   // expected-error-re at +1 {{'${builtin}' needs target feature (sm_${min_sm}{{.*}},(ptx${min_ptx}{{.*}}}}
   ${builtin}(${dst}, ${src}, ldm, ${blayout});
 """.rstrip()
-  intrinsic_template = "llvm.nvvm.wmma.${geom}.${op}.${frag}.${ilayout}.stride.${itype}"
+    intrinsic_template = (
+        "llvm.nvvm.wmma.${geom}.${op}.${frag}.${ilayout}.stride.${itype}"
+    )
+
+    for frag, layout in sorted(product(get_ldst_ops(), ["row", "col"]), key=str):
+
+        if not is_ldst_variant_supported(frag, layout):
+            continue
+
+        src_dst_prefix = get_src_dst_prefix(frag)
+
+        min_sm = get_required_sm(frag)
+        min_ptx = get_required_ptx(frag)
+        # TF32 uses f32 for accumulator loads.
+        if frag.geom == "m16n16k8" and frag.frag == "c":
+            assert frag.ptx_type == "tf32"
+            itype = "f32"
+        else:
+            itype = frag.ptx_type
+
+        params = {
+            "check_suffix": "_PTX%d_SM%d" % (min_ptx, min_sm),
+            "builtin": get_ldst_builtin_name(frag),
+            "min_ptx": min_ptx,
+            "min_sm": min_sm,
+            "dst": src_dst_prefix + "dst",
+            "src": src_dst_prefix + "src",
+            "blayout": 0 if layout == "row" else 1,
+            "intrinsic": Template(intrinsic_template).substitute(
+                {
+                    "frag": frag.frag,
+                    "geom": frag.geom,
+                    "ilayout": layout,
+                    "itype": itype,
+                    "op": "store" if frag.frag == "d" else "load",
+                }
+            ),
+        }
+        results[(min_ptx, min_sm)] += Template(load_template).substitute(params)
+
+    return results
 
-  for frag, layout in sorted(product(get_ldst_ops(), ["row","col"]), key=str):
 
-    if not is_ldst_variant_supported(frag, layout):
-      continue
-
-    src_dst_prefix = get_src_dst_prefix(frag)
-
-    min_sm = get_required_sm(frag)
-    min_ptx = get_required_ptx(frag)
-    # TF32 uses f32 for accumulator loads.
-    if frag.geom == "m16n16k8" and frag.frag =="c":
-      assert frag.ptx_type == "tf32"
-      itype = "f32"
+def mma_signature(op):
+    if op.a.ptx_type == "f16":
+        # FP16 ops identified by accumulator & result type.
+        return "%s.%s" % (op.d.ptx_type, op.c.ptx_type)
     else:
-      itype = frag.ptx_type
-
-    params = {
-        "check_suffix" : "_PTX%d_SM%d" % (min_ptx, min_sm),
-        "builtin" : get_ldst_builtin_name(frag),
-        "min_ptx" : min_ptx,
-        "min_sm" : min_sm,
-        "dst": src_dst_prefix + "dst",
-        "src": src_dst_prefix + "src",
-        "blayout" : 0 if layout == "row" else 1,
-        "intrinsic" : Template(intrinsic_template).substitute({
-            "frag" : frag.frag,
-            "geom"   : frag.geom,
-            "ilayout" : layout,
-            "itype" : itype,
-            "op" : "store" if frag.frag == "d" else "load",
-        })
-    }
-    results[(min_ptx,min_sm)] += Template(load_template).substitute(params)
-
-  return results
+        # other ops are identified by input type.
+        return op.a.ptx_type
 
-def mma_signature(op):
-  if op.a.ptx_type == "f16":
-    # FP16 ops identified by accumulator & result type.
-    return "%s.%s" % (op.d.ptx_type, op.c.ptx_type)
-  else:
-    # other ops are identified by input type.
-    return op.a.ptx_type
 
 # Get numeric value for rowcol parameter of the builtin
 # AFAICT it uses the encoding accepted by NVVM intrinsics:
 # https://docs.nvidia.com/cuda/nvvm-ir-spec/index.html#nvvm-intrin-warp-level-matrix-mma
 def get_ilayout(a, b):
-  return {
-      "row.row" : 0,
-      "row.col" : 1,
-      "col.row" : 2,
-      "col.col" : 3
-  }[a + "." + b]
+    return {"row.row": 0, "row.col": 1, "col.row": 2, "col.col": 3}[a + "." + b]
+
 
 def gen_wmma_mma_tests(results):
-  mma_template = """
+    mma_template = """
   // CHECK${check_suffix}: call {{.*}} @${intrinsic}
   // expected-error-re at +1 {{'${builtin}' needs target feature (sm_${min_sm}{{.*}},(ptx${min_ptx}{{.*}}}}
   ${builtin}(${dst}, ${asrc}, ${asrc}, ${csrc}, ${ilayout}${maybe_satf});
 """.rstrip()
-  intrinsic_template = "llvm.nvvm.wmma.${geom}.mma${b1op}.${alayout}.${blayout}.${intrinsic_signature}${satf}"
-
-  for op, alayout, blayout, satf in sorted(product( get_mma_ops(),
-                                                    ["row","col"],
-                                                    ["row","col"],
-                                                    [".satfinite", ""]),
-                                           key=str):
-
-    if not is_mma_variant_supported(op, alayout, blayout, satf):
-      continue
-
-    asrc_prefix = get_src_dst_prefix(op.a)
-    csrc_prefix = get_src_dst_prefix(op.c)
-    ddst_prefix = get_src_dst_prefix(op.d)
-    if op.a.ptx_type == "b1": # .b1 MMA has no satf argument.
-       isatf_arg = ""
-    else:
-       isatf_arg = ", 1" if satf else ", 0"
-    min_sm = get_required_sm(op.a, op.b1op)
-    min_ptx = get_required_ptx(op.a, op.b1op)
-    params = {
-        "check_suffix" : "_PTX%d_SM%d" % (min_ptx, min_sm),
-        "builtin" : get_mma_builtin_name(op),
-        "min_ptx" : min_ptx,
-        "min_sm" : min_sm,
-        "dst": ddst_prefix + "dst",
-        "asrc": asrc_prefix + "src",
-        "csrc": csrc_prefix + "src",
-        "ilayout" : get_ilayout(alayout, blayout),
-        "maybe_satf" : isatf_arg,
-        "intrinsic" : Template(intrinsic_template).substitute({
-            "geom"  : op.a.geom,
-            "alayout" : alayout,
-            "blayout" : blayout,
-            "intrinsic_signature" : mma_signature(op),
-            "satf"  : satf,
-            "b1op"  : op.b1op
-        })
-    }
-    results[(min_ptx, min_sm)] += Template(mma_template).substitute(params)
-
-  return results
+    intrinsic_template = "llvm.nvvm.wmma.${geom}.mma${b1op}.${alayout}.${blayout}.${intrinsic_signature}${satf}"
+
+    for op, alayout, blayout, satf in sorted(
+        product(get_mma_ops(), ["row", "col"], ["row", "col"], [".satfinite", ""]),
+        key=str,
+    ):
+
+        if not is_mma_variant_supported(op, alayout, blayout, satf):
+            continue
+
+        asrc_prefix = get_src_dst_prefix(op.a)
+        csrc_prefix = get_src_dst_prefix(op.c)
+        ddst_prefix = get_src_dst_prefix(op.d)
+        if op.a.ptx_type == "b1":  # .b1 MMA has no satf argument.
+            isatf_arg = ""
+        else:
+            isatf_arg = ", 1" if satf else ", 0"
+        min_sm = get_required_sm(op.a, op.b1op)
+        min_ptx = get_required_ptx(op.a, op.b1op)
+        params = {
+            "check_suffix": "_PTX%d_SM%d" % (min_ptx, min_sm),
+            "builtin": get_mma_builtin_name(op),
+            "min_ptx": min_ptx,
+            "min_sm": min_sm,
+            "dst": ddst_prefix + "dst",
+            "asrc": asrc_prefix + "src",
+            "csrc": csrc_prefix + "src",
+            "ilayout": get_ilayout(alayout, blayout),
+            "maybe_satf": isatf_arg,
+            "intrinsic": Template(intrinsic_template).substitute(
+                {
+                    "geom": op.a.geom,
+                    "alayout": alayout,
+                    "blayout": blayout,
+                    "intrinsic_signature": mma_signature(op),
+                    "satf": satf,
+                    "b1op": op.b1op,
+                }
+            ),
+        }
+        results[(min_ptx, min_sm)] += Template(mma_template).substitute(params)
+
+    return results
+
 
 def gen_tests():
-  results = gen_wmma_ldst_tests(defaultdict(str))
-  results = gen_wmma_mma_tests(results)
+    results = gen_wmma_ldst_tests(defaultdict(str))
+    results = gen_wmma_mma_tests(results)
 
-  run_template = r"""
+    run_template = r"""
 //
 // *** DO NOT EDIT ***
 //
@@ -376,20 +419,30 @@ def gen_tests():
 // ${run}:   -DPTX=${ptx} -DSM=${sm} -fcuda-is-device -S -o /dev/null -x cuda \
 // ${run}:   -verify %s
 """
-  def supported_variants(ptx, sm, results):
-    return [(ptx_, sm_) for ptx_, sm_ in results if ptx_ <= ptx and sm_ <= sm]
-
-  print(Template(run_template).substitute({
-      "run" : "RUN", # To avoid lit misinterpreting the template
-      "ptx" : ptx_version,
-      "sm" : gpu_arch,
-      "check_labels" : ",".join(["CHECK_PTX%d_SM%d" % (ptx_, sm_)
-                                 for ptx_, sm_
-                                 in supported_variants(ptx_version, gpu_arch,
-                                                       results)])
-  }))
-
-  print("""
+
+    def supported_variants(ptx, sm, results):
+        return [(ptx_, sm_) for ptx_, sm_ in results if ptx_ <= ptx and sm_ <= sm]
+
+    print(
+        Template(run_template).substitute(
+            {
+                "run": "RUN",  # To avoid lit misinterpreting the template
+                "ptx": ptx_version,
+                "sm": gpu_arch,
+                "check_labels": ",".join(
+                    [
+                        "CHECK_PTX%d_SM%d" % (ptx_, sm_)
+                        for ptx_, sm_ in supported_variants(
+                            ptx_version, gpu_arch, results
+                        )
+                    ]
+                ),
+            }
+        )
+    )
+
+    print(
+        """
 #if !defined(CUDA_VERSION)
 #define __device__ __attribute__((device))
 #define __global__ __attribute__((global))
@@ -403,15 +456,17 @@ def supported_variants(ptx, sm, results):
 __device__ void test_wmma_buitins(int *src, int *dst,
                                   float *fsrc, float *fdst,
                                   double *dsrc, double *ddst, int ldm) {
-""");
+"""
+    )
+
+    for (ptx, sm), tests in sorted(results.items()):
+        print()
+        print("#if (PTX >= %d) && (SM >= %d)" % (ptx, sm))
+        print(tests)
+        print("#endif // (PTX >= %d) && (SM >= %d)" % (ptx, sm))
 
-  for (ptx, sm), tests in sorted(results.items()):
-    print()
-    print("#if (PTX >= %d) && (SM >= %d)" % (ptx, sm))
-    print(tests)
-    print("#endif // (PTX >= %d) && (SM >= %d)"% (ptx, sm))
+    print("}")
 
-  print("}")
 
 parser = argparse.ArgumentParser()
 parser.add_argument("--ptx", type=int, default=60)

diff  --git a/clang/test/CodeGenHIP/lit.local.cfg b/clang/test/CodeGenHIP/lit.local.cfg
index ded4330455aeb..88cd98ad0aa6f 100644
--- a/clang/test/CodeGenHIP/lit.local.cfg
+++ b/clang/test/CodeGenHIP/lit.local.cfg
@@ -1 +1 @@
-config.suffixes = ['.cpp', '.hip']
+config.suffixes = [".cpp", ".hip"]

diff  --git a/clang/test/CodeGenHLSL/lit.local.cfg b/clang/test/CodeGenHLSL/lit.local.cfg
index c6ead2b71de6c..0604d1a83dc68 100644
--- a/clang/test/CodeGenHLSL/lit.local.cfg
+++ b/clang/test/CodeGenHLSL/lit.local.cfg
@@ -1 +1 @@
-config.suffixes = ['.c', '.hlsl']
+config.suffixes = [".c", ".hlsl"]

diff  --git a/clang/test/CodeGenObjC/lit.local.cfg b/clang/test/CodeGenObjC/lit.local.cfg
index 3957a0545185b..546691463d1ad 100644
--- a/clang/test/CodeGenObjC/lit.local.cfg
+++ b/clang/test/CodeGenObjC/lit.local.cfg
@@ -1,2 +1,2 @@
-if any(target in config.target_triple for target in ('aix', 'zos')):
-  config.unsupported = True
+if any(target in config.target_triple for target in ("aix", "zos")):
+    config.unsupported = True

diff  --git a/clang/test/CodeGenObjCXX/lit.local.cfg b/clang/test/CodeGenObjCXX/lit.local.cfg
index 3957a0545185b..546691463d1ad 100644
--- a/clang/test/CodeGenObjCXX/lit.local.cfg
+++ b/clang/test/CodeGenObjCXX/lit.local.cfg
@@ -1,2 +1,2 @@
-if any(target in config.target_triple for target in ('aix', 'zos')):
-  config.unsupported = True
+if any(target in config.target_triple for target in ("aix", "zos")):
+    config.unsupported = True

diff  --git a/clang/test/Driver/XRay/lit.local.cfg b/clang/test/Driver/XRay/lit.local.cfg
index 1755d9ab5ce9e..8bc9091bd985a 100644
--- a/clang/test/Driver/XRay/lit.local.cfg
+++ b/clang/test/Driver/XRay/lit.local.cfg
@@ -2,22 +2,29 @@ import platform
 
 # Only run the tests in platforms where XRay instrumentation is supported.
 supported_targets = [
-    'amd64', 'x86_64', 'x86_64h', 'arm', 'aarch64', 'arm64', 'powerpc64le', 'mips',
-    'mipsel', 'mips64', 'mips64el'
+    "amd64",
+    "x86_64",
+    "x86_64h",
+    "arm",
+    "aarch64",
+    "arm64",
+    "powerpc64le",
+    "mips",
+    "mipsel",
+    "mips64",
+    "mips64el",
 ]
 
 # Only on platforms we support.
-supported_oses = [
-    'Linux', 'FreeBSD', 'Darwin'
-]
+supported_oses = ["Linux", "FreeBSD", "Darwin"]
 
-triple_set = set(config.target_triple.split('-'))
+triple_set = set(config.target_triple.split("-"))
 if len(triple_set.intersection(supported_targets)) == 0:
-  config.unsupported = True
+    config.unsupported = True
 
 # Do not run for 'android' despite being linux.
-if platform.system() not in supported_oses or 'android' in triple_set:
-  config.unsupported = True
+if platform.system() not in supported_oses or "android" in triple_set:
+    config.unsupported = True
 
 if config.enable_shared:
-  config.available_features.update(['enable_shared'])
+    config.available_features.update(["enable_shared"])

diff  --git a/clang/test/Driver/ftime-trace-sections.py b/clang/test/Driver/ftime-trace-sections.py
index 297ab38e36555..02afa4ac54eb7 100644
--- a/clang/test/Driver/ftime-trace-sections.py
+++ b/clang/test/Driver/ftime-trace-sections.py
@@ -2,15 +2,21 @@
 
 import json, sys, time
 
+
 def is_inside(range1, range2):
-    a = range1["ts"]; b = a + range1["dur"]
-    c = range2["ts"]; d = c + range2["dur"]
+    a = range1["ts"]
+    b = a + range1["dur"]
+    c = range2["ts"]
+    d = c + range2["dur"]
     return (a >= c and a <= d) and (b >= c and b <= d)
 
+
 def is_before(range1, range2):
-    b = range1["ts"] + range1["dur"]; c = range2["ts"]
+    b = range1["ts"] + range1["dur"]
+    c = range2["ts"]
     return b <= c
 
+
 log_contents = json.loads(sys.stdin.read())
 events = log_contents["traceEvents"]
 codegens = [event for event in events if event["name"] == "CodeGen Function"]
@@ -22,13 +28,23 @@ def is_before(range1, range2):
 
 # Make sure that the 'beginningOfTime' is not later than now.
 if beginning_of_time > seconds_since_epoch:
-    sys.exit("'beginningOfTime' should represent the absolute time when the "
-             "process has started")
-
-if not all([any([is_inside(codegen, frontend) for frontend in frontends])
-                        for codegen in codegens]):
+    sys.exit(
+        "'beginningOfTime' should represent the absolute time when the "
+        "process has started"
+    )
+
+if not all(
+    [
+        any([is_inside(codegen, frontend) for frontend in frontends])
+        for codegen in codegens
+    ]
+):
     sys.exit("Not all CodeGen sections are inside any Frontend section!")
 
-if not all([all([is_before(frontend, backend) for frontend in frontends])
-                        for backend in backends]):
+if not all(
+    [
+        all([is_before(frontend, backend) for frontend in frontends])
+        for backend in backends
+    ]
+):
     sys.exit("Not all Frontend section are before all Backend sections!")

diff  --git a/clang/test/Driver/lit.local.cfg b/clang/test/Driver/lit.local.cfg
index 21d23ff0a271e..0d124704d6ecd 100644
--- a/clang/test/Driver/lit.local.cfg
+++ b/clang/test/Driver/lit.local.cfg
@@ -1,26 +1,49 @@
 from lit.llvm import llvm_config
 
-config.suffixes = ['.c', '.cpp', '.cppm', '.h', '.m', '.mm', '.S', '.s', '.f90', '.F90', '.f95',
-                   '.cu', '.rs', '.cl', '.clcpp', '.hip', '.hipi', '.hlsl']
+config.suffixes = [
+    ".c",
+    ".cpp",
+    ".cppm",
+    ".h",
+    ".m",
+    ".mm",
+    ".S",
+    ".s",
+    ".f90",
+    ".F90",
+    ".f95",
+    ".cu",
+    ".rs",
+    ".cl",
+    ".clcpp",
+    ".hip",
+    ".hipi",
+    ".hlsl",
+]
 config.substitutions = list(config.substitutions)
-config.substitutions.insert(0,
-    ('%clang_cc1',
-     """*** Do not use 'clang -cc1' in Driver tests. ***""") )
+config.substitutions.insert(
+    0, ("%clang_cc1", """*** Do not use 'clang -cc1' in Driver tests. ***""")
+)
 
 # Remove harmful environmental variables for clang Driver tests.
 # Some might be useful for other tests so they are only removed here.
-driver_overwrite_env_vars = ['MACOSX_DEPLOYMENT_TARGET',
-                             'IPHONEOS_DEPLOYMENT_TARGET',
-                             'SDKROOT', 'CCC_OVERRIDE_OPTIONS',
-                             'CC_PRINT_OPTIONS', 'CC_PRINT_HEADERS',
-                             'CC_LOG_DIAGNOSTICS', 'CC_PRINT_PROC_STAT']
+driver_overwrite_env_vars = [
+    "MACOSX_DEPLOYMENT_TARGET",
+    "IPHONEOS_DEPLOYMENT_TARGET",
+    "SDKROOT",
+    "CCC_OVERRIDE_OPTIONS",
+    "CC_PRINT_OPTIONS",
+    "CC_PRINT_HEADERS",
+    "CC_LOG_DIAGNOSTICS",
+    "CC_PRINT_PROC_STAT",
+]
 
 for name in driver_overwrite_env_vars:
-  if name in config.environment:
-    del config.environment[name]
+    if name in config.environment:
+        del config.environment[name]
 
 if llvm_config.use_lld(required=False):
-    config.available_features.add('lld')
+    config.available_features.add("lld")
 
 if config.ppc_linux_default_ieeelongdouble:
-  config.available_features.add('ppc_linux_default_ieeelongdouble')
+    config.available_features.add("ppc_linux_default_ieeelongdouble")

diff  --git a/clang/test/Format/lit.local.cfg b/clang/test/Format/lit.local.cfg
index abf4e8a65cc33..3851397a3f7cc 100644
--- a/clang/test/Format/lit.local.cfg
+++ b/clang/test/Format/lit.local.cfg
@@ -1,4 +1,19 @@
 # Suffixes supported by clang-format.
-config.suffixes = ['.c', '.cc', '.cpp', '.h', '.m', '.mm', '.java', '.js',
-                   '.ts', '.proto', '.protodevel', '.pb.txt', '.textproto',
-                   '.textpb', '.asciipb', '.td']
+config.suffixes = [
+    ".c",
+    ".cc",
+    ".cpp",
+    ".h",
+    ".m",
+    ".mm",
+    ".java",
+    ".js",
+    ".ts",
+    ".proto",
+    ".protodevel",
+    ".pb.txt",
+    ".textproto",
+    ".textpb",
+    ".asciipb",
+    ".td",
+]

diff  --git a/clang/test/Frontend/lit.local.cfg b/clang/test/Frontend/lit.local.cfg
index 835360be27551..e625fca44678e 100644
--- a/clang/test/Frontend/lit.local.cfg
+++ b/clang/test/Frontend/lit.local.cfg
@@ -1 +1 @@
-config.suffixes = ['.c', '.cpp', '.m', '.mm', '.ll', '.cl', '.test']
+config.suffixes = [".c", ".cpp", ".m", ".mm", ".ll", ".cl", ".test"]

diff  --git a/clang/test/Headers/lit.local.cfg b/clang/test/Headers/lit.local.cfg
index 7774a20d637b1..eee4495e06e67 100644
--- a/clang/test/Headers/lit.local.cfg
+++ b/clang/test/Headers/lit.local.cfg
@@ -1,4 +1,6 @@
 config.substitutions = list(config.substitutions)
 
 # Enable substituting Clang Sema source directory for TableGen input.
-config.substitutions.append(('%clang_src_sema_dir', os.path.join(config.clang_src_dir, 'lib', 'Sema')))
+config.substitutions.append(
+    ("%clang_src_sema_dir", os.path.join(config.clang_src_dir, "lib", "Sema"))
+)

diff  --git a/clang/test/Index/skip-parsed-bodies/lit.local.cfg b/clang/test/Index/skip-parsed-bodies/lit.local.cfg
index b38b51a6b5c7b..a4e8af3a410ad 100644
--- a/clang/test/Index/skip-parsed-bodies/lit.local.cfg
+++ b/clang/test/Index/skip-parsed-bodies/lit.local.cfg
@@ -1 +1 @@
-config.suffixes = ['.json']
+config.suffixes = [".json"]

diff  --git a/clang/test/Interpreter/lit.local.cfg b/clang/test/Interpreter/lit.local.cfg
index b1ca76669a50d..ac6d2205e9fcd 100644
--- a/clang/test/Interpreter/lit.local.cfg
+++ b/clang/test/Interpreter/lit.local.cfg
@@ -1,2 +1,2 @@
-if 'host-supports-jit' not in config.available_features:
-    config.unsupported = True
\ No newline at end of file
+if "host-supports-jit" not in config.available_features:
+    config.unsupported = True

diff  --git a/clang/test/LibClang/lit.local.cfg b/clang/test/LibClang/lit.local.cfg
index 588f0ac749974..c7e54f509e5b5 100644
--- a/clang/test/LibClang/lit.local.cfg
+++ b/clang/test/LibClang/lit.local.cfg
@@ -1,2 +1,4 @@
-config.substitutions.append(('%libclang', os.path.join(config.clang_lib_dir, 'libclang.so')))
+config.substitutions.append(
+    ("%libclang", os.path.join(config.clang_lib_dir, "libclang.so"))
+)
 config.pipefail = False

diff  --git a/clang/test/OpenMP/lit.local.cfg b/clang/test/OpenMP/lit.local.cfg
index c114693dd7ed8..58ee923cb7ec5 100644
--- a/clang/test/OpenMP/lit.local.cfg
+++ b/clang/test/OpenMP/lit.local.cfg
@@ -1,5 +1,5 @@
 # -*- Python -*- vim: set ft=python ts=4 sw=4 expandtab tw=79:
 from lit.llvm.subst import ToolSubst
 
-fc = ToolSubst('FileCheck', unresolved='fatal')
-config.substitutions.insert(0, (fc.regex, 'FileCheck --allow-unused-prefixes'))
+fc = ToolSubst("FileCheck", unresolved="fatal")
+config.substitutions.insert(0, (fc.regex, "FileCheck --allow-unused-prefixes"))

diff  --git a/clang/test/ParserHLSL/lit.local.cfg b/clang/test/ParserHLSL/lit.local.cfg
index d637d6d68030d..4f76d3519f271 100644
--- a/clang/test/ParserHLSL/lit.local.cfg
+++ b/clang/test/ParserHLSL/lit.local.cfg
@@ -1 +1 @@
-config.suffixes = ['.hlsl']
+config.suffixes = [".hlsl"]

diff  --git a/clang/test/Sema/lit.local.cfg b/clang/test/Sema/lit.local.cfg
index f4ef5d2fa02de..baf1b39ef238c 100644
--- a/clang/test/Sema/lit.local.cfg
+++ b/clang/test/Sema/lit.local.cfg
@@ -1,4 +1,4 @@
 config.substitutions = list(config.substitutions)
-config.substitutions.insert(0,
-    (r'%clang\b',
-     """*** Do not use the driver in Sema tests. ***""") )
+config.substitutions.insert(
+    0, (r"%clang\b", """*** Do not use the driver in Sema tests. ***""")
+)

diff  --git a/clang/test/SemaCUDA/lit.local.cfg b/clang/test/SemaCUDA/lit.local.cfg
index f4ef5d2fa02de..baf1b39ef238c 100644
--- a/clang/test/SemaCUDA/lit.local.cfg
+++ b/clang/test/SemaCUDA/lit.local.cfg
@@ -1,4 +1,4 @@
 config.substitutions = list(config.substitutions)
-config.substitutions.insert(0,
-    (r'%clang\b',
-     """*** Do not use the driver in Sema tests. ***""") )
+config.substitutions.insert(
+    0, (r"%clang\b", """*** Do not use the driver in Sema tests. ***""")
+)

diff  --git a/clang/test/SemaHLSL/lit.local.cfg b/clang/test/SemaHLSL/lit.local.cfg
index d637d6d68030d..4f76d3519f271 100644
--- a/clang/test/SemaHLSL/lit.local.cfg
+++ b/clang/test/SemaHLSL/lit.local.cfg
@@ -1 +1 @@
-config.suffixes = ['.hlsl']
+config.suffixes = [".hlsl"]

diff  --git a/clang/test/SemaObjCXX/lit.local.cfg b/clang/test/SemaObjCXX/lit.local.cfg
index f4ef5d2fa02de..baf1b39ef238c 100644
--- a/clang/test/SemaObjCXX/lit.local.cfg
+++ b/clang/test/SemaObjCXX/lit.local.cfg
@@ -1,4 +1,4 @@
 config.substitutions = list(config.substitutions)
-config.substitutions.insert(0,
-    (r'%clang\b',
-     """*** Do not use the driver in Sema tests. ***""") )
+config.substitutions.insert(
+    0, (r"%clang\b", """*** Do not use the driver in Sema tests. ***""")
+)

diff  --git a/clang/test/SemaOpenCL/lit.local.cfg b/clang/test/SemaOpenCL/lit.local.cfg
index f4ef5d2fa02de..baf1b39ef238c 100644
--- a/clang/test/SemaOpenCL/lit.local.cfg
+++ b/clang/test/SemaOpenCL/lit.local.cfg
@@ -1,4 +1,4 @@
 config.substitutions = list(config.substitutions)
-config.substitutions.insert(0,
-    (r'%clang\b',
-     """*** Do not use the driver in Sema tests. ***""") )
+config.substitutions.insert(
+    0, (r"%clang\b", """*** Do not use the driver in Sema tests. ***""")
+)

diff  --git a/clang/test/TableGen/lit.local.cfg b/clang/test/TableGen/lit.local.cfg
index 9a4a0144f720b..1f86bce9f49c4 100644
--- a/clang/test/TableGen/lit.local.cfg
+++ b/clang/test/TableGen/lit.local.cfg
@@ -1 +1 @@
-config.suffixes = ['.td']
+config.suffixes = [".td"]

diff  --git a/clang/test/Unit/lit.cfg.py b/clang/test/Unit/lit.cfg.py
index bd75b3bb39682..475069e630d74 100644
--- a/clang/test/Unit/lit.cfg.py
+++ b/clang/test/Unit/lit.cfg.py
@@ -10,67 +10,73 @@
 import lit.util
 
 # name: The name of this test suite.
-config.name = 'Clang-Unit'
+config.name = "Clang-Unit"
 
 # suffixes: A list of file extensions to treat as test files.
 config.suffixes = []
 
 # test_source_root: The root path where tests are located.
 # test_exec_root: The root path where tests should be run.
-config.test_exec_root = os.path.join(config.clang_obj_root, 'unittests')
+config.test_exec_root = os.path.join(config.clang_obj_root, "unittests")
 config.test_source_root = config.test_exec_root
 
 # testFormat: The test format to use to interpret tests.
-config.test_format = lit.formats.GoogleTest(config.llvm_build_mode, 'Tests')
+config.test_format = lit.formats.GoogleTest(config.llvm_build_mode, "Tests")
 
 # Propagate the temp directory. Windows requires this because it uses \Windows\
 # if none of these are present.
-if 'TMP' in os.environ:
-    config.environment['TMP'] = os.environ['TMP']
-if 'TEMP' in os.environ:
-    config.environment['TEMP'] = os.environ['TEMP']
+if "TMP" in os.environ:
+    config.environment["TMP"] = os.environ["TMP"]
+if "TEMP" in os.environ:
+    config.environment["TEMP"] = os.environ["TEMP"]
 
-if 'HOME' in os.environ:
-    config.environment['HOME'] = os.environ['HOME']
+if "HOME" in os.environ:
+    config.environment["HOME"] = os.environ["HOME"]
 
 # Propagate sanitizer options.
 for var in [
-    'ASAN_SYMBOLIZER_PATH',
-    'HWASAN_SYMBOLIZER_PATH',
-    'MSAN_SYMBOLIZER_PATH',
-    'TSAN_SYMBOLIZER_PATH',
-    'UBSAN_SYMBOLIZER_PATH',
-    'ASAN_OPTIONS',
-    'HWASAN_OPTIONS',
-    'MSAN_OPTIONS',
-    'TSAN_OPTIONS',
-    'UBSAN_OPTIONS',
+    "ASAN_SYMBOLIZER_PATH",
+    "HWASAN_SYMBOLIZER_PATH",
+    "MSAN_SYMBOLIZER_PATH",
+    "TSAN_SYMBOLIZER_PATH",
+    "UBSAN_SYMBOLIZER_PATH",
+    "ASAN_OPTIONS",
+    "HWASAN_OPTIONS",
+    "MSAN_OPTIONS",
+    "TSAN_OPTIONS",
+    "UBSAN_OPTIONS",
 ]:
     if var in os.environ:
         config.environment[var] = os.environ[var]
 
+
 def find_shlibpath_var():
-    if platform.system() in ['Linux', 'FreeBSD', 'NetBSD', 'OpenBSD', 'SunOS']:
-        yield 'LD_LIBRARY_PATH'
-    elif platform.system() == 'Darwin':
-        yield 'DYLD_LIBRARY_PATH'
-    elif platform.system() == 'Windows':
-        yield 'PATH'
-    elif platform.system() == 'AIX':
-        yield 'LIBPATH'
+    if platform.system() in ["Linux", "FreeBSD", "NetBSD", "OpenBSD", "SunOS"]:
+        yield "LD_LIBRARY_PATH"
+    elif platform.system() == "Darwin":
+        yield "DYLD_LIBRARY_PATH"
+    elif platform.system() == "Windows":
+        yield "PATH"
+    elif platform.system() == "AIX":
+        yield "LIBPATH"
+
 
 for shlibpath_var in find_shlibpath_var():
     # in stand-alone builds, shlibdir is clang's build tree
     # while llvm_libs_dir is installed LLVM (and possibly older clang)
     shlibpath = os.path.pathsep.join(
-        (config.shlibdir,
-         config.llvm_libs_dir,
-         config.environment.get(shlibpath_var, '')))
+        (
+            config.shlibdir,
+            config.llvm_libs_dir,
+            config.environment.get(shlibpath_var, ""),
+        )
+    )
     config.environment[shlibpath_var] = shlibpath
     break
 else:
-    lit_config.warning("unable to inject shared library path on '{}'"
-                       .format(platform.system()))
+    lit_config.warning(
+        "unable to inject shared library path on '{}'".format(platform.system())
+    )
 
 # It is not realistically possible to account for all options that could
 # possibly be present in system and user configuration files, so disable

diff  --git a/clang/test/lit.cfg.py b/clang/test/lit.cfg.py
index e9bfaf2e96774..5ff9b90c9e86e 100644
--- a/clang/test/lit.cfg.py
+++ b/clang/test/lit.cfg.py
@@ -16,7 +16,7 @@
 # Configuration file for the 'lit' test runner.
 
 # name: The name of this test suite.
-config.name = 'Clang'
+config.name = "Clang"
 
 # testFormat: The test format to use to interpret tests.
 #
@@ -25,31 +25,54 @@
 config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
 
 # suffixes: A list of file extensions to treat as test files.
-config.suffixes = ['.c', '.cpp', '.i', '.cppm', '.m', '.mm', '.cu', '.hip', '.hlsl',
-                   '.ll', '.cl', '.clcpp', '.s', '.S', '.modulemap', '.test', '.rs', '.ifs', '.rc']
+config.suffixes = [
+    ".c",
+    ".cpp",
+    ".i",
+    ".cppm",
+    ".m",
+    ".mm",
+    ".cu",
+    ".hip",
+    ".hlsl",
+    ".ll",
+    ".cl",
+    ".clcpp",
+    ".s",
+    ".S",
+    ".modulemap",
+    ".test",
+    ".rs",
+    ".ifs",
+    ".rc",
+]
 
 # excludes: A list of directories to exclude from the testsuite. The 'Inputs'
 # subdirectories contain auxiliary inputs for various tests in their parent
 # directories.
-config.excludes = ['Inputs', 'CMakeLists.txt', 'README.txt', 'LICENSE.txt', 'debuginfo-tests']
+config.excludes = [
+    "Inputs",
+    "CMakeLists.txt",
+    "README.txt",
+    "LICENSE.txt",
+    "debuginfo-tests",
+]
 
 # test_source_root: The root path where tests are located.
 config.test_source_root = os.path.dirname(__file__)
 
 # test_exec_root: The root path where tests should be run.
-config.test_exec_root = os.path.join(config.clang_obj_root, 'test')
+config.test_exec_root = os.path.join(config.clang_obj_root, "test")
 
 llvm_config.use_default_substitutions()
 
 llvm_config.use_clang()
 
-config.substitutions.append(
-    ('%src_include_dir', config.clang_src_dir + '/include'))
+config.substitutions.append(("%src_include_dir", config.clang_src_dir + "/include"))
 
-config.substitutions.append(
-    ('%target_triple', config.target_triple))
+config.substitutions.append(("%target_triple", config.target_triple))
 
-config.substitutions.append(('%PATH%', config.environment['PATH']))
+config.substitutions.append(("%PATH%", config.environment["PATH"]))
 
 
 # For each occurrence of a clang tool name, replace it with the full path to
@@ -59,207 +82,262 @@
 tool_dirs = [config.clang_tools_dir, config.llvm_tools_dir]
 
 tools = [
-    'apinotes-test', 'c-index-test', 'clang-
diff ', 'clang-format', 'clang-repl', 'clang-offload-packager',
-    'clang-tblgen', 'clang-scan-deps', 'opt', 'llvm-ifs', 'yaml2obj', 'clang-linker-wrapper',
-    'llvm-lto', 'llvm-lto2', 'llvm-profdata',
-    ToolSubst('%clang_extdef_map', command=FindTool(
-        'clang-extdef-mapping'), unresolved='ignore'),
+    "apinotes-test",
+    "c-index-test",
+    "clang-
diff ",
+    "clang-format",
+    "clang-repl",
+    "clang-offload-packager",
+    "clang-tblgen",
+    "clang-scan-deps",
+    "opt",
+    "llvm-ifs",
+    "yaml2obj",
+    "clang-linker-wrapper",
+    "llvm-lto",
+    "llvm-lto2",
+    "llvm-profdata",
+    ToolSubst(
+        "%clang_extdef_map",
+        command=FindTool("clang-extdef-mapping"),
+        unresolved="ignore",
+    ),
 ]
 
 if config.clang_examples:
-    config.available_features.add('examples')
+    config.available_features.add("examples")
+
 
 def have_host_jit_feature_support(feature_name):
-    clang_repl_exe = lit.util.which('clang-repl', config.clang_tools_dir)
+    clang_repl_exe = lit.util.which("clang-repl", config.clang_tools_dir)
 
     if not clang_repl_exe:
         return False
 
     try:
         clang_repl_cmd = subprocess.Popen(
-            [clang_repl_exe, '--host-supports-' + feature_name], stdout=subprocess.PIPE)
+            [clang_repl_exe, "--host-supports-" + feature_name], stdout=subprocess.PIPE
+        )
     except OSError:
-        print('could not exec clang-repl')
+        print("could not exec clang-repl")
         return False
 
-    clang_repl_out = clang_repl_cmd.stdout.read().decode('ascii')
+    clang_repl_out = clang_repl_cmd.stdout.read().decode("ascii")
     clang_repl_cmd.wait()
 
-    return 'true' in clang_repl_out
+    return "true" in clang_repl_out
 
-if have_host_jit_feature_support('jit'):
-    config.available_features.add('host-supports-jit')
+
+if have_host_jit_feature_support("jit"):
+    config.available_features.add("host-supports-jit")
 
 if config.clang_staticanalyzer:
-    config.available_features.add('staticanalyzer')
-    tools.append('clang-check')
+    config.available_features.add("staticanalyzer")
+    tools.append("clang-check")
 
     if config.clang_staticanalyzer_z3:
-        config.available_features.add('z3')
+        config.available_features.add("z3")
     else:
-        config.available_features.add('no-z3')
+        config.available_features.add("no-z3")
 
     check_analyzer_fixit_path = os.path.join(
-        config.test_source_root, "Analysis", "check-analyzer-fixit.py")
+        config.test_source_root, "Analysis", "check-analyzer-fixit.py"
+    )
     config.substitutions.append(
-        ('%check_analyzer_fixit',
-         '"%s" %s' % (config.python_executable, check_analyzer_fixit_path)))
+        (
+            "%check_analyzer_fixit",
+            '"%s" %s' % (config.python_executable, check_analyzer_fixit_path),
+        )
+    )
 
 llvm_config.add_tool_substitutions(tools, tool_dirs)
 
 config.substitutions.append(
-    ('%hmaptool', "'%s' %s" % (config.python_executable,
-                             os.path.join(config.clang_src_dir, 'utils', 'hmaptool', 'hmaptool'))))
+    (
+        "%hmaptool",
+        "'%s' %s"
+        % (
+            config.python_executable,
+            os.path.join(config.clang_src_dir, "utils", "hmaptool", "hmaptool"),
+        ),
+    )
+)
 
 config.substitutions.append(
-    ('%deps-to-rsp',
-     '"%s" %s' % (config.python_executable, os.path.join(config.clang_src_dir, 'utils',
-                                                         'module-deps-to-rsp.py'))))
+    (
+        "%deps-to-rsp",
+        '"%s" %s'
+        % (
+            config.python_executable,
+            os.path.join(config.clang_src_dir, "utils", "module-deps-to-rsp.py"),
+        ),
+    )
+)
 
-config.substitutions.append(('%host_cc', config.host_cc))
-config.substitutions.append(('%host_cxx', config.host_cxx))
+config.substitutions.append(("%host_cc", config.host_cc))
+config.substitutions.append(("%host_cxx", config.host_cxx))
 
 
 # Plugins (loadable modules)
 if config.has_plugins and config.llvm_plugin_ext:
-    config.available_features.add('plugins')
+    config.available_features.add("plugins")
 
 if config.clang_default_pie_on_linux:
-    config.available_features.add('default-pie-on-linux')
+    config.available_features.add("default-pie-on-linux")
 
 # Set available features we allow tests to conditionalize on.
 #
-if config.clang_default_cxx_stdlib != '':
-    config.available_features.add('default-cxx-stdlib={}'.format(config.clang_default_cxx_stdlib))
+if config.clang_default_cxx_stdlib != "":
+    config.available_features.add(
+        "default-cxx-stdlib={}".format(config.clang_default_cxx_stdlib)
+    )
 
 # As of 2011.08, crash-recovery tests still do not pass on FreeBSD.
-if platform.system() not in ['FreeBSD']:
-    config.available_features.add('crash-recovery')
+if platform.system() not in ["FreeBSD"]:
+    config.available_features.add("crash-recovery")
 
 # ANSI escape sequences in non-dumb terminal
-if platform.system() not in ['Windows']:
-    config.available_features.add('ansi-escape-sequences')
+if platform.system() not in ["Windows"]:
+    config.available_features.add("ansi-escape-sequences")
 
 # Capability to print utf8 to the terminal.
 # Windows expects codepage, unless Wide API.
-if platform.system() not in ['Windows']:
-    config.available_features.add('utf8-capable-terminal')
+if platform.system() not in ["Windows"]:
+    config.available_features.add("utf8-capable-terminal")
 
 # Support for libgcc runtime. Used to rule out tests that require
 # clang to run with -rtlib=libgcc.
-if platform.system() not in ['Darwin', 'Fuchsia']:
-    config.available_features.add('libgcc')
+if platform.system() not in ["Darwin", "Fuchsia"]:
+    config.available_features.add("libgcc")
 
 # Case-insensitive file system
 
 
 def is_filesystem_case_insensitive():
-    handle, path = tempfile.mkstemp(
-        prefix='case-test', dir=config.test_exec_root)
+    handle, path = tempfile.mkstemp(prefix="case-test", dir=config.test_exec_root)
     isInsensitive = os.path.exists(
-        os.path.join(
-            os.path.dirname(path),
-            os.path.basename(path).upper()
-        ))
+        os.path.join(os.path.dirname(path), os.path.basename(path).upper())
+    )
     os.close(handle)
     os.remove(path)
     return isInsensitive
 
 
 if is_filesystem_case_insensitive():
-    config.available_features.add('case-insensitive-filesystem')
+    config.available_features.add("case-insensitive-filesystem")
 
 # Tests that require the /dev/fd filesystem.
-if os.path.exists('/dev/fd/0') and sys.platform not in ['cygwin']:
-    config.available_features.add('dev-fd-fs')
+if os.path.exists("/dev/fd/0") and sys.platform not in ["cygwin"]:
+    config.available_features.add("dev-fd-fs")
 
 # Set on native MS environment.
-if re.match(r'.*-(windows-msvc)$', config.target_triple):
-    config.available_features.add('ms-sdk')
+if re.match(r".*-(windows-msvc)$", config.target_triple):
+    config.available_features.add("ms-sdk")
 
 # [PR8833] LLP64-incompatible tests
-if not re.match(r'^(aarch64|x86_64).*-(windows-msvc|windows-gnu)$', config.target_triple):
-    config.available_features.add('LP64')
+if not re.match(
+    r"^(aarch64|x86_64).*-(windows-msvc|windows-gnu)$", config.target_triple
+):
+    config.available_features.add("LP64")
 
 # Tests that are specific to the Apple Silicon macOS.
-if re.match(r'^arm64(e)?-apple-(macos|darwin)', config.target_triple):
-    config.available_features.add('apple-silicon-mac')
+if re.match(r"^arm64(e)?-apple-(macos|darwin)", config.target_triple):
+    config.available_features.add("apple-silicon-mac")
 
 # [PR18856] Depends to remove opened file. On win32, a file could be removed
 # only if all handles were closed.
-if platform.system() not in ['Windows']:
-    config.available_features.add('can-remove-opened-file')
+if platform.system() not in ["Windows"]:
+    config.available_features.add("can-remove-opened-file")
 
 # Features
 known_arches = ["x86_64", "mips64", "ppc64", "aarch64"]
-if (any(config.target_triple.startswith(x) for x in known_arches)):
-  config.available_features.add("clang-target-64-bits")
-
+if any(config.target_triple.startswith(x) for x in known_arches):
+    config.available_features.add("clang-target-64-bits")
 
 
 def calculate_arch_features(arch_string):
     features = []
     for arch in arch_string.split():
-        features.append(arch.lower() + '-registered-target')
+        features.append(arch.lower() + "-registered-target")
     return features
 
 
 llvm_config.feature_config(
-    [('--assertion-mode', {'ON': 'asserts'}),
-     ('--cxxflags', {r'-D_GLIBCXX_DEBUG\b': 'libstdcxx-safe-mode'}),
-     ('--targets-built', calculate_arch_features),
-     ])
+    [
+        ("--assertion-mode", {"ON": "asserts"}),
+        ("--cxxflags", {r"-D_GLIBCXX_DEBUG\b": "libstdcxx-safe-mode"}),
+        ("--targets-built", calculate_arch_features),
+    ]
+)
 
-if lit.util.which('xmllint'):
-    config.available_features.add('xmllint')
+if lit.util.which("xmllint"):
+    config.available_features.add("xmllint")
 
 if config.enable_backtrace:
-    config.available_features.add('backtrace')
+    config.available_features.add("backtrace")
 
 if config.enable_threads:
-    config.available_features.add('thread_support')
+    config.available_features.add("thread_support")
 
 # Check if we should allow outputs to console.
-run_console_tests = int(lit_config.params.get('enable_console', '0'))
+run_console_tests = int(lit_config.params.get("enable_console", "0"))
 if run_console_tests != 0:
-    config.available_features.add('console')
+    config.available_features.add("console")
 
 lit.util.usePlatformSdkOnDarwin(config, lit_config)
 macOSSDKVersion = lit.util.findPlatformSdkVersionOnMacOS(config, lit_config)
 if macOSSDKVersion is not None:
-    config.available_features.add('macos-sdk-' + str(macOSSDKVersion))
+    config.available_features.add("macos-sdk-" + str(macOSSDKVersion))
 
-if os.path.exists('/etc/gentoo-release'):
-    config.available_features.add('gentoo')
+if os.path.exists("/etc/gentoo-release"):
+    config.available_features.add("gentoo")
 
 if config.enable_shared:
     config.available_features.add("enable_shared")
 
 # Add a vendor-specific feature.
 if config.clang_vendor_uti:
-    config.available_features.add('clang-vendor=' + config.clang_vendor_uti)
+    config.available_features.add("clang-vendor=" + config.clang_vendor_uti)
 
 if config.have_llvm_driver:
-  config.available_features.add('llvm-driver')
+    config.available_features.add("llvm-driver")
+
 
 def exclude_unsupported_files_for_aix(dirname):
     for filename in os.listdir(dirname):
-        source_path = os.path.join( dirname, filename)
+        source_path = os.path.join(dirname, filename)
         if os.path.isdir(source_path):
             continue
-        f = open(source_path, 'r', encoding='ISO-8859-1')
+        f = open(source_path, "r", encoding="ISO-8859-1")
         try:
-           data = f.read()
-           # 64-bit object files are not supported on AIX, so exclude the tests.
-           if (any(option in data for option in ('-emit-obj', '-fmodule-format=obj', '-fintegrated-as')) and
-              '64' in config.target_triple):
-               config.excludes += [ filename ]
+            data = f.read()
+            # 64-bit object files are not supported on AIX, so exclude the tests.
+            if (
+                any(
+                    option in data
+                    for option in (
+                        "-emit-obj",
+                        "-fmodule-format=obj",
+                        "-fintegrated-as",
+                    )
+                )
+                and "64" in config.target_triple
+            ):
+                config.excludes += [filename]
         finally:
-           f.close()
-
-if 'aix' in config.target_triple:
-    for directory in ('/CodeGenCXX', '/Misc', '/Modules', '/PCH', '/Driver',
-                      '/ASTMerge/anonymous-fields', '/ASTMerge/injected-class-name-decl'):
+            f.close()
+
+
+if "aix" in config.target_triple:
+    for directory in (
+        "/CodeGenCXX",
+        "/Misc",
+        "/Modules",
+        "/PCH",
+        "/Driver",
+        "/ASTMerge/anonymous-fields",
+        "/ASTMerge/injected-class-name-decl",
+    ):
         exclude_unsupported_files_for_aix(config.test_source_root + directory)
 
 # Some tests perform deep recursion, which requires a larger pthread stack size
@@ -268,10 +346,10 @@ def exclude_unsupported_files_for_aix(dirname):
 # a larger pthread stack size for the tests. Various applications and runtime
 # libraries on AIX use a default pthread stack size of 4 MiB, so we will use
 # that as a default value here.
-if 'AIXTHREAD_STK' in os.environ:
-    config.environment['AIXTHREAD_STK'] = os.environ['AIXTHREAD_STK']
-elif platform.system() == 'AIX':
-    config.environment['AIXTHREAD_STK'] = '4194304'
+if "AIXTHREAD_STK" in os.environ:
+    config.environment["AIXTHREAD_STK"] = os.environ["AIXTHREAD_STK"]
+elif platform.system() == "AIX":
+    config.environment["AIXTHREAD_STK"] = "4194304"
 
 # The llvm-nm tool supports an environment variable "OBJECT_MODE" on AIX OS, which
 # controls the kind of objects they will support. If there is no "OBJECT_MODE"
@@ -280,9 +358,9 @@ def exclude_unsupported_files_for_aix(dirname):
 # 32-bit and 64-bit objects by default, set the environment variable
 # "OBJECT_MODE" to 'any' for llvm-nm on AIX OS.
 
-if 'system-aix' in config.available_features:
-        config.substitutions.append(('llvm-nm', 'env OBJECT_MODE=any llvm-nm'))
-        config.substitutions.append(('llvm-ar', 'env OBJECT_MODE=any llvm-ar'))
+if "system-aix" in config.available_features:
+    config.substitutions.append(("llvm-nm", "env OBJECT_MODE=any llvm-nm"))
+    config.substitutions.append(("llvm-ar", "env OBJECT_MODE=any llvm-ar"))
 
 # It is not realistically possible to account for all options that could
 # possibly be present in system and user configuration files, so disable

diff  --git a/clang/test/utils/update_cc_test_checks/lit.local.cfg b/clang/test/utils/update_cc_test_checks/lit.local.cfg
index b461878880e47..f2810fa18c8fc 100644
--- a/clang/test/utils/update_cc_test_checks/lit.local.cfg
+++ b/clang/test/utils/update_cc_test_checks/lit.local.cfg
@@ -20,30 +20,41 @@ if config.standalone_build:
 else:
 
     config.test_format = lit.formats.ShTest(execute_external=False)
-    config.suffixes = ['.test']
+    config.suffixes = [".test"]
 
-    clang_path = os.path.join(config.clang_tools_dir, 'clang')
-    extra_args = '--clang ' + shell_quote(clang_path)
-    opt_path = os.path.join(config.llvm_tools_dir, 'opt')
-    extra_args += ' --opt ' + shell_quote(opt_path)
+    clang_path = os.path.join(config.clang_tools_dir, "clang")
+    extra_args = "--clang " + shell_quote(clang_path)
+    opt_path = os.path.join(config.llvm_tools_dir, "opt")
+    extra_args += " --opt " + shell_quote(opt_path)
     # Specify an explicit default version in UTC tests, so that the --version
     # embedded in UTC_ARGS does not change in all test expectations every time
     # the default is bumped.
-    extra_args += ' --version=1'
-    script_path = os.path.join(config.llvm_src_root, 'utils',
-                               'update_cc_test_checks.py')
+    extra_args += " --version=1"
+    script_path = os.path.join(
+        config.llvm_src_root, "utils", "update_cc_test_checks.py"
+    )
     assert os.path.isfile(script_path)
     # Windows: llvm-lit.py, Linux: llvm-lit
     if config.llvm_external_lit:
         lit = config.llvm_external_lit
     else:
-        lit = shell_quote(glob.glob(os.path.join(config.llvm_tools_dir, 'llvm-lit*'))[0])
+        lit = shell_quote(
+            glob.glob(os.path.join(config.llvm_tools_dir, "llvm-lit*"))[0]
+        )
     python = shell_quote(config.python_executable)
     config.substitutions.append(
-        ('%update_cc_test_checks', "%s %s %s" % (
-            python, shell_quote(script_path), extra_args)))
+        (
+            "%update_cc_test_checks",
+            "%s %s %s" % (python, shell_quote(script_path), extra_args),
+        )
+    )
     config.substitutions.append(
-        ('%clang_tools_dir', shell_quote(config.clang_tools_dir)))
+        ("%clang_tools_dir", shell_quote(config.clang_tools_dir))
+    )
     config.substitutions.append(
-        ('%lit', "%s %s -Dclang_lit_site_cfg=%s -j1 -vv" % (
-            python, lit, shell_quote(config.clang_lit_site_cfg))))
+        (
+            "%lit",
+            "%s %s -Dclang_lit_site_cfg=%s -j1 -vv"
+            % (python, lit, shell_quote(config.clang_lit_site_cfg)),
+        )
+    )

diff  --git a/clang/tools/clang-format/clang-format-
diff .py b/clang/tools/clang-format/clang-format-
diff .py
index 1dcc8689d5fef..6e707fc0fb647 100755
--- a/clang/tools/clang-format/clang-format-
diff .py
+++ b/clang/tools/clang-format/clang-format-
diff .py
@@ -1,12 +1,12 @@
 #!/usr/bin/env python3
 #
-#===- clang-format-
diff .py - ClangFormat Diff Reformatter ----*- python -*--===#
+# ===- clang-format-
diff .py - ClangFormat Diff Reformatter ----*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 """
 This script reads input from a unified 
diff  and reformats all the changed
@@ -36,116 +36,156 @@
 
 
 def main():
-  parser = argparse.ArgumentParser(description=__doc__,
-                                   formatter_class=
-                                           argparse.RawDescriptionHelpFormatter)
-  parser.add_argument('-i', action='store_true', default=False,
-                      help='apply edits to files instead of displaying a 
diff ')
-  parser.add_argument('-p', metavar='NUM', default=0,
-                      help='strip the smallest prefix containing P slashes')
-  parser.add_argument('-regex', metavar='PATTERN', default=None,
-                      help='custom pattern selecting file paths to reformat '
-                      '(case sensitive, overrides -iregex)')
-  parser.add_argument('-iregex', metavar='PATTERN', default=
-                      r'.*\.(cpp|cc|c\+\+|cxx|cppm|ccm|cxxm|c\+\+m|c|cl|h|hh|hpp|hxx'
-                      r'|m|mm|inc|js|ts|proto|protodevel|java|cs|json)',
-                      help='custom pattern selecting file paths to reformat '
-                      '(case insensitive, overridden by -regex)')
-  parser.add_argument('-sort-includes', action='store_true', default=False,
-                      help='let clang-format sort include blocks')
-  parser.add_argument('-v', '--verbose', action='store_true',
-                      help='be more verbose, ineffective without -i')
-  parser.add_argument('-style',
-                      help='formatting style to apply (LLVM, GNU, Google, Chromium, '
-                      'Microsoft, Mozilla, WebKit)')
-  parser.add_argument('-fallback-style',
-                      help='The name of the predefined style used as a'
-                      'fallback in case clang-format is invoked with'
-                      '-style=file, but can not find the .clang-format'
-                      'file to use.')
-  parser.add_argument('-binary', default='clang-format',
-                      help='location of binary to use for clang-format')
-  args = parser.parse_args()
-
-  # Extract changed lines for each file.
-  filename = None
-  lines_by_file = {}
-  for line in sys.stdin:
-    match = re.search(r'^\+\+\+\ (.*?/){%s}(\S*)' % args.p, line)
-    if match:
-      filename = match.group(2)
-    if filename is None:
-      continue
-
-    if args.regex is not None:
-      if not re.match('^%s$' % args.regex, filename):
-        continue
-    else:
-      if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE):
-        continue
-
-    match = re.search(r'^@@.*\+(\d+)(?:,(\d+))?', line)
-    if match:
-      start_line = int(match.group(1))
-      line_count = 1
-      if match.group(2):
-        line_count = int(match.group(2))
-        # The input is something like
-        #
-        # @@ -1, +0,0 @@
-        #
-        # which means no lines were added.
-        if line_count == 0:
-          continue
-      # Also format lines range if line_count is 0 in case of deleting
-      # surrounding statements.
-      end_line = start_line
-      if line_count != 0:
-        end_line += line_count - 1
-      lines_by_file.setdefault(filename, []).extend(
-          ['-lines', str(start_line) + ':' + str(end_line)])
-
-  # Reformat files containing changes in place.
-  for filename, lines in lines_by_file.items():
-    if args.i and args.verbose:
-      print('Formatting {}'.format(filename))
-    command = [args.binary, filename]
-    if args.i:
-      command.append('-i')
-    if args.sort_includes:
-      command.append('-sort-includes')
-    command.extend(lines)
-    if args.style:
-      command.extend(['-style', args.style])
-    if args.fallback_style:
-      command.extend(['-fallback-style', args.fallback_style])
-
-    try:
-      p = subprocess.Popen(command,
-                           stdout=subprocess.PIPE,
-                           stderr=None,
-                           stdin=subprocess.PIPE,
-                           universal_newlines=True)
-    except OSError as e:
-      # Give the user more context when clang-format isn't
-      # found/isn't executable, etc.
-      raise RuntimeError(
-        'Failed to run "%s" - %s"' % (" ".join(command), e.strerror))
-
-    stdout, stderr = p.communicate()
-    if p.returncode != 0:
-      sys.exit(p.returncode)
-
-    if not args.i:
-      with open(filename) as f:
-        code = f.readlines()
-      formatted_code = StringIO(stdout).readlines()
-      
diff  = 
diff lib.unified_
diff (code, formatted_code,
-                                  filename, filename,
-                                  '(before formatting)', '(after formatting)')
-      
diff _string = ''.join(
diff )
-      if len(
diff _string) > 0:
-        sys.stdout.write(
diff _string)
-
-if __name__ == '__main__':
-  main()
+    parser = argparse.ArgumentParser(
+        description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
+    )
+    parser.add_argument(
+        "-i",
+        action="store_true",
+        default=False,
+        help="apply edits to files instead of displaying a 
diff ",
+    )
+    parser.add_argument(
+        "-p",
+        metavar="NUM",
+        default=0,
+        help="strip the smallest prefix containing P slashes",
+    )
+    parser.add_argument(
+        "-regex",
+        metavar="PATTERN",
+        default=None,
+        help="custom pattern selecting file paths to reformat "
+        "(case sensitive, overrides -iregex)",
+    )
+    parser.add_argument(
+        "-iregex",
+        metavar="PATTERN",
+        default=r".*\.(cpp|cc|c\+\+|cxx|cppm|ccm|cxxm|c\+\+m|c|cl|h|hh|hpp|hxx"
+        r"|m|mm|inc|js|ts|proto|protodevel|java|cs|json)",
+        help="custom pattern selecting file paths to reformat "
+        "(case insensitive, overridden by -regex)",
+    )
+    parser.add_argument(
+        "-sort-includes",
+        action="store_true",
+        default=False,
+        help="let clang-format sort include blocks",
+    )
+    parser.add_argument(
+        "-v",
+        "--verbose",
+        action="store_true",
+        help="be more verbose, ineffective without -i",
+    )
+    parser.add_argument(
+        "-style",
+        help="formatting style to apply (LLVM, GNU, Google, Chromium, "
+        "Microsoft, Mozilla, WebKit)",
+    )
+    parser.add_argument(
+        "-fallback-style",
+        help="The name of the predefined style used as a"
+        "fallback in case clang-format is invoked with"
+        "-style=file, but can not find the .clang-format"
+        "file to use.",
+    )
+    parser.add_argument(
+        "-binary",
+        default="clang-format",
+        help="location of binary to use for clang-format",
+    )
+    args = parser.parse_args()
+
+    # Extract changed lines for each file.
+    filename = None
+    lines_by_file = {}
+    for line in sys.stdin:
+        match = re.search(r"^\+\+\+\ (.*?/){%s}(\S*)" % args.p, line)
+        if match:
+            filename = match.group(2)
+        if filename is None:
+            continue
+
+        if args.regex is not None:
+            if not re.match("^%s$" % args.regex, filename):
+                continue
+        else:
+            if not re.match("^%s$" % args.iregex, filename, re.IGNORECASE):
+                continue
+
+        match = re.search(r"^@@.*\+(\d+)(?:,(\d+))?", line)
+        if match:
+            start_line = int(match.group(1))
+            line_count = 1
+            if match.group(2):
+                line_count = int(match.group(2))
+                # The input is something like
+                #
+                # @@ -1, +0,0 @@
+                #
+                # which means no lines were added.
+                if line_count == 0:
+                    continue
+            # Also format lines range if line_count is 0 in case of deleting
+            # surrounding statements.
+            end_line = start_line
+            if line_count != 0:
+                end_line += line_count - 1
+            lines_by_file.setdefault(filename, []).extend(
+                ["-lines", str(start_line) + ":" + str(end_line)]
+            )
+
+    # Reformat files containing changes in place.
+    for filename, lines in lines_by_file.items():
+        if args.i and args.verbose:
+            print("Formatting {}".format(filename))
+        command = [args.binary, filename]
+        if args.i:
+            command.append("-i")
+        if args.sort_includes:
+            command.append("-sort-includes")
+        command.extend(lines)
+        if args.style:
+            command.extend(["-style", args.style])
+        if args.fallback_style:
+            command.extend(["-fallback-style", args.fallback_style])
+
+        try:
+            p = subprocess.Popen(
+                command,
+                stdout=subprocess.PIPE,
+                stderr=None,
+                stdin=subprocess.PIPE,
+                universal_newlines=True,
+            )
+        except OSError as e:
+            # Give the user more context when clang-format isn't
+            # found/isn't executable, etc.
+            raise RuntimeError(
+                'Failed to run "%s" - %s"' % (" ".join(command), e.strerror)
+            )
+
+        stdout, stderr = p.communicate()
+        if p.returncode != 0:
+            sys.exit(p.returncode)
+
+        if not args.i:
+            with open(filename) as f:
+                code = f.readlines()
+            formatted_code = StringIO(stdout).readlines()
+            
diff  = 
diff lib.unified_
diff (
+                code,
+                formatted_code,
+                filename,
+                filename,
+                "(before formatting)",
+                "(after formatting)",
+            )
+            
diff _string = "".join(
diff )
+            if len(
diff _string) > 0:
+                sys.stdout.write(
diff _string)
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/clang/tools/clang-format/clang-format-sublime.py b/clang/tools/clang-format/clang-format-sublime.py
index 20c867092ef55..dcd72e68e94fa 100644
--- a/clang/tools/clang-format/clang-format-sublime.py
+++ b/clang/tools/clang-format/clang-format-sublime.py
@@ -18,7 +18,7 @@
 import subprocess
 
 # Change this to the full path if clang-format is not on the path.
-binary = 'clang-format'
+binary = "clang-format"
 
 # Change this to format according to other formatting styles. See the output of
 # 'clang-format --help' for a list of supported styles. The default looks for
@@ -26,35 +26,48 @@
 # used.
 style = None
 
+
 class ClangFormatCommand(sublime_plugin.TextCommand):
-  def run(self, edit):
-    encoding = self.view.encoding()
-    if encoding == 'Undefined':
-      encoding = 'utf-8'
-    regions = []
-    command = [binary]
-    if style:
-      command.extend(['-style', style])
-    for region in self.view.sel():
-      regions.append(region)
-      region_offset = min(region.a, region.b)
-      region_length = abs(region.b - region.a)
-      command.extend(['-offset', str(region_offset),
-                      '-length', str(region_length),
-                      '-assume-filename', str(self.view.file_name())])
-    old_viewport_position = self.view.viewport_position()
-    buf = self.view.substr(sublime.Region(0, self.view.size()))
-    p = subprocess.Popen(command, stdout=subprocess.PIPE,
-                         stderr=subprocess.PIPE, stdin=subprocess.PIPE)
-    output, error = p.communicate(buf.encode(encoding))
-    if error:
-      print(error)
-    self.view.replace(
-        edit, sublime.Region(0, self.view.size()),
-        output.decode(encoding))
-    self.view.sel().clear()
-    for region in regions:
-      self.view.sel().add(region)
-    # FIXME: Without the 10ms delay, the viewport sometimes jumps.
-    sublime.set_timeout(lambda: self.view.set_viewport_position(
-      old_viewport_position, False), 10)
+    def run(self, edit):
+        encoding = self.view.encoding()
+        if encoding == "Undefined":
+            encoding = "utf-8"
+        regions = []
+        command = [binary]
+        if style:
+            command.extend(["-style", style])
+        for region in self.view.sel():
+            regions.append(region)
+            region_offset = min(region.a, region.b)
+            region_length = abs(region.b - region.a)
+            command.extend(
+                [
+                    "-offset",
+                    str(region_offset),
+                    "-length",
+                    str(region_length),
+                    "-assume-filename",
+                    str(self.view.file_name()),
+                ]
+            )
+        old_viewport_position = self.view.viewport_position()
+        buf = self.view.substr(sublime.Region(0, self.view.size()))
+        p = subprocess.Popen(
+            command,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            stdin=subprocess.PIPE,
+        )
+        output, error = p.communicate(buf.encode(encoding))
+        if error:
+            print(error)
+        self.view.replace(
+            edit, sublime.Region(0, self.view.size()), output.decode(encoding)
+        )
+        self.view.sel().clear()
+        for region in regions:
+            self.view.sel().add(region)
+        # FIXME: Without the 10ms delay, the viewport sometimes jumps.
+        sublime.set_timeout(
+            lambda: self.view.set_viewport_position(old_viewport_position, False), 10
+        )

diff  --git a/clang/tools/clang-format/clang-format.py b/clang/tools/clang-format/clang-format.py
index 7933dbcfadd88..28e0d14a552fd 100644
--- a/clang/tools/clang-format/clang-format.py
+++ b/clang/tools/clang-format/clang-format.py
@@ -49,9 +49,9 @@
 
 # set g:clang_format_path to the path to clang-format if it is not on the path
 # Change this to the full path if clang-format is not on the path.
-binary = 'clang-format'
+binary = "clang-format"
 if vim.eval('exists("g:clang_format_path")') == "1":
-  binary = vim.eval('g:clang_format_path')
+    binary = vim.eval("g:clang_format_path")
 
 # Change this to format according to other formatting styles. See the output of
 # 'clang-format --help' for a list of supported styles. The default looks for
@@ -60,99 +60,109 @@
 style = None
 fallback_style = None
 if vim.eval('exists("g:clang_format_fallback_style")') == "1":
-  fallback_style = vim.eval('g:clang_format_fallback_style')
+    fallback_style = vim.eval("g:clang_format_fallback_style")
+
 
 def get_buffer(encoding):
-  if platform.python_version_tuple()[0] == '3':
-    return vim.current.buffer
-  return [ line.decode(encoding) for line in vim.current.buffer ]
+    if platform.python_version_tuple()[0] == "3":
+        return vim.current.buffer
+    return [line.decode(encoding) for line in vim.current.buffer]
+
 
 def main():
-  # Get the current text.
-  encoding = vim.eval("&encoding")
-  buf = get_buffer(encoding)
-  # Join the buffer into a single string with a terminating newline
-  text = ('\n'.join(buf) + '\n').encode(encoding)
+    # Get the current text.
+    encoding = vim.eval("&encoding")
+    buf = get_buffer(encoding)
+    # Join the buffer into a single string with a terminating newline
+    text = ("\n".join(buf) + "\n").encode(encoding)
 
-  # Determine range to format.
-  if vim.eval('exists("l:lines")') == '1':
-    lines = ['-lines', vim.eval('l:lines')]
-  elif vim.eval('exists("l:format
diff ")') == '1' and \
-       os.path.exists(vim.current.buffer.name):
-    with open(vim.current.buffer.name, 'r') as f:
-      ondisk = f.read().splitlines();
-    sequence = 
diff lib.SequenceMatcher(None, ondisk, vim.current.buffer)
-    lines = []
-    for op in reversed(sequence.get_opcodes()):
-      if op[0] not in ['equal', 'delete']:
-        lines += ['-lines', '%s:%s' % (op[3] + 1, op[4])]
-    if lines == []:
-      return
-  else:
-    lines = ['-lines', '%s:%s' % (vim.current.range.start + 1,
-                                  vim.current.range.end + 1)]
+    # Determine range to format.
+    if vim.eval('exists("l:lines")') == "1":
+        lines = ["-lines", vim.eval("l:lines")]
+    elif vim.eval('exists("l:format
diff ")') == "1" and os.path.exists(
+        vim.current.buffer.name
+    ):
+        with open(vim.current.buffer.name, "r") as f:
+            ondisk = f.read().splitlines()
+        sequence = 
diff lib.SequenceMatcher(None, ondisk, vim.current.buffer)
+        lines = []
+        for op in reversed(sequence.get_opcodes()):
+            if op[0] not in ["equal", "delete"]:
+                lines += ["-lines", "%s:%s" % (op[3] + 1, op[4])]
+        if lines == []:
+            return
+    else:
+        lines = [
+            "-lines",
+            "%s:%s" % (vim.current.range.start + 1, vim.current.range.end + 1),
+        ]
 
-  # Convert cursor (line, col) to bytes.
-  # Don't use line2byte: https://github.com/vim/vim/issues/5930
-  _, cursor_line, cursor_col, _ = vim.eval('getpos(".")') # 1-based
-  cursor_byte = 0
-  for line in text.split(b'\n')[:int(cursor_line) - 1]:
-    cursor_byte += len(line) + 1
-  cursor_byte += int(cursor_col) - 1
-  if cursor_byte < 0:
-    print('Couldn\'t determine cursor position. Is your file empty?')
-    return
+    # Convert cursor (line, col) to bytes.
+    # Don't use line2byte: https://github.com/vim/vim/issues/5930
+    _, cursor_line, cursor_col, _ = vim.eval('getpos(".")')  # 1-based
+    cursor_byte = 0
+    for line in text.split(b"\n")[: int(cursor_line) - 1]:
+        cursor_byte += len(line) + 1
+    cursor_byte += int(cursor_col) - 1
+    if cursor_byte < 0:
+        print("Couldn't determine cursor position. Is your file empty?")
+        return
 
-  # Avoid flashing an ugly, ugly cmd prompt on Windows when invoking clang-format.
-  startupinfo = None
-  if sys.platform.startswith('win32'):
-    startupinfo = subprocess.STARTUPINFO()
-    startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
-    startupinfo.wShowWindow = subprocess.SW_HIDE
+    # Avoid flashing an ugly, ugly cmd prompt on Windows when invoking clang-format.
+    startupinfo = None
+    if sys.platform.startswith("win32"):
+        startupinfo = subprocess.STARTUPINFO()
+        startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
+        startupinfo.wShowWindow = subprocess.SW_HIDE
 
-  # Call formatter.
-  command = [binary, '-cursor', str(cursor_byte)]
-  if lines != ['-lines', 'all']:
-    command += lines
-  if style:
-    command.extend(['-style', style])
-  if fallback_style:
-    command.extend(['-fallback-style', fallback_style])
-  if vim.current.buffer.name:
-    command.extend(['-assume-filename', vim.current.buffer.name])
-  p = subprocess.Popen(command,
-                       stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                       stdin=subprocess.PIPE, startupinfo=startupinfo)
-  stdout, stderr = p.communicate(input=text)
+    # Call formatter.
+    command = [binary, "-cursor", str(cursor_byte)]
+    if lines != ["-lines", "all"]:
+        command += lines
+    if style:
+        command.extend(["-style", style])
+    if fallback_style:
+        command.extend(["-fallback-style", fallback_style])
+    if vim.current.buffer.name:
+        command.extend(["-assume-filename", vim.current.buffer.name])
+    p = subprocess.Popen(
+        command,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        stdin=subprocess.PIPE,
+        startupinfo=startupinfo,
+    )
+    stdout, stderr = p.communicate(input=text)
 
-  # If successful, replace buffer contents.
-  if stderr:
-    print(stderr)
+    # If successful, replace buffer contents.
+    if stderr:
+        print(stderr)
+
+    if not stdout:
+        print(
+            "No output from clang-format (crashed?).\n"
+            "Please report to bugs.llvm.org."
+        )
+    else:
+        header, content = stdout.split(b"\n", 1)
+        header = json.loads(header.decode("utf-8"))
+        # Strip off the trailing newline (added above).
+        # This maintains trailing empty lines present in the buffer if
+        # the -lines specification requests them to remain unchanged.
+        lines = content.decode(encoding).split("\n")[:-1]
+        sequence = 
diff lib.SequenceMatcher(None, buf, lines)
+        for op in reversed(sequence.get_opcodes()):
+            if op[0] != "equal":
+                vim.current.buffer[op[1] : op[2]] = lines[op[3] : op[4]]
+        if header.get("IncompleteFormat"):
+            print("clang-format: incomplete (syntax errors)")
+        # Convert cursor bytes to (line, col)
+        # Don't use goto: https://github.com/vim/vim/issues/5930
+        cursor_byte = int(header["Cursor"])
+        prefix = content[0:cursor_byte]
+        cursor_line = 1 + prefix.count(b"\n")
+        cursor_column = 1 + len(prefix.rsplit(b"\n", 1)[-1])
+        vim.command("call cursor(%d, %d)" % (cursor_line, cursor_column))
 
-  if not stdout:
-    print(
-        'No output from clang-format (crashed?).\n'
-        'Please report to bugs.llvm.org.'
-    )
-  else:
-    header, content = stdout.split(b'\n', 1)
-    header = json.loads(header.decode('utf-8'))
-    # Strip off the trailing newline (added above).
-    # This maintains trailing empty lines present in the buffer if
-    # the -lines specification requests them to remain unchanged.
-    lines = content.decode(encoding).split('\n')[:-1]
-    sequence = 
diff lib.SequenceMatcher(None, buf, lines)
-    for op in reversed(sequence.get_opcodes()):
-      if op[0] != 'equal':
-        vim.current.buffer[op[1]:op[2]] = lines[op[3]:op[4]]
-    if header.get('IncompleteFormat'):
-      print('clang-format: incomplete (syntax errors)')
-    # Convert cursor bytes to (line, col)
-    # Don't use goto: https://github.com/vim/vim/issues/5930
-    cursor_byte = int(header['Cursor'])
-    prefix = content[0:cursor_byte]
-    cursor_line = 1 + prefix.count(b'\n')
-    cursor_column = 1 + len(prefix.rsplit(b'\n', 1)[-1])
-    vim.command('call cursor(%d, %d)' % (cursor_line, cursor_column))
 
 main()

diff  --git a/clang/tools/clang-rename/clang-rename.py b/clang/tools/clang-rename/clang-rename.py
index 3381c5267f1c0..1cbabaf859a5e 100644
--- a/clang/tools/clang-rename/clang-rename.py
+++ b/clang/tools/clang-rename/clang-rename.py
@@ -1,4 +1,4 @@
-'''
+"""
 Minimal clang-rename integration with Vim.
 
 Before installing make sure one of the following is satisfied:
@@ -20,39 +20,43 @@
 All you have to do now is to place a cursor on a variable/function/class which
 you would like to rename and press '<leader>cr'. You will be prompted for a new
 name if the cursor points to a valid symbol.
-'''
+"""
 
 from __future__ import absolute_import, division, print_function
 import vim
 import subprocess
 import sys
 
+
 def main():
-    binary = 'clang-rename'
+    binary = "clang-rename"
     if vim.eval('exists("g:clang_rename_path")') == "1":
-        binary = vim.eval('g:clang_rename_path')
+        binary = vim.eval("g:clang_rename_path")
 
     # Get arguments for clang-rename binary.
     offset = int(vim.eval('line2byte(line("."))+col(".")')) - 2
     if offset < 0:
-        print('Couldn\'t determine cursor position. Is your file empty?',
-              file=sys.stderr)
+        print(
+            "Couldn't determine cursor position. Is your file empty?", file=sys.stderr
+        )
         return
     filename = vim.current.buffer.name
 
-    new_name_request_message = 'type new name:'
+    new_name_request_message = "type new name:"
     new_name = vim.eval("input('{}\n')".format(new_name_request_message))
 
     # Call clang-rename.
-    command = [binary,
-               filename,
-               '-i',
-               '-offset', str(offset),
-               '-new-name', str(new_name)]
+    command = [
+        binary,
+        filename,
+        "-i",
+        "-offset",
+        str(offset),
+        "-new-name",
+        str(new_name),
+    ]
     # FIXME: make it possible to run the tool on unsaved file.
-    p = subprocess.Popen(command,
-                         stdout=subprocess.PIPE,
-                         stderr=subprocess.PIPE)
+    p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     stdout, stderr = p.communicate()
 
     if stderr:
@@ -62,5 +66,5 @@ def main():
     vim.command("checktime")
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/clang/tools/include-mapping/cppreference_parser.py b/clang/tools/include-mapping/cppreference_parser.py
index 19bdde7eb94d9..cefdbeaf334e1 100644
--- a/clang/tools/include-mapping/cppreference_parser.py
+++ b/clang/tools/include-mapping/cppreference_parser.py
@@ -1,11 +1,11 @@
 #!/usr/bin/env python
-#===- cppreference_parser.py -  ------------------------------*- python -*--===#
+# ===- cppreference_parser.py -  ------------------------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 from bs4 import BeautifulSoup, NavigableString
 
@@ -18,176 +18,189 @@
 
 
 class Symbol:
+    def __init__(self, name, namespace, headers):
+        # unqualifed symbol name, e.g. "move"
+        self.name = name
+        # namespace of the symbol (with trailing "::"), e.g. "std::", "" (global scope)
+        # None for C symbols.
+        self.namespace = namespace
+        # a list of corresponding headers
+        self.headers = headers
 
-  def __init__(self, name, namespace, headers):
-    # unqualifed symbol name, e.g. "move"
-    self.name = name
-    # namespace of the symbol (with trailing "::"), e.g. "std::", "" (global scope)
-    # None for C symbols.
-    self.namespace = namespace
-    # a list of corresponding headers
-    self.headers = headers
-
-  def __lt__(self, other):
-    if self.namespace != other.namespace:
-      return str(self.namespace) < str(other.namespace)
-    return self.name < other.name
+    def __lt__(self, other):
+        if self.namespace != other.namespace:
+            return str(self.namespace) < str(other.namespace)
+        return self.name < other.name
 
 
 def _HasClass(tag, *classes):
-  for c in tag.get('class', []):
-    if c in classes:
-      return True
-  return False
+    for c in tag.get("class", []):
+        if c in classes:
+            return True
+    return False
 
 
 def _ParseSymbolPage(symbol_page_html, symbol_name):
-  """Parse symbol page and retrieve the include header defined in this page.
-  The symbol page provides header for the symbol, specifically in
-  "Defined in header <header>" section. An example:
-
-  <tr class="t-dsc-header">
-    <td colspan="2"> <div>Defined in header <code><ratio></code> </div>
-  </td></tr>
-
-  Returns a list of headers.
-  """
-  headers = set()
-  all_headers = set()
-
-  soup = BeautifulSoup(symbol_page_html, "html.parser")
-  # Rows in table are like:
-  #   Defined in header <foo>      .t-dsc-header
-  #   Defined in header <bar>      .t-dsc-header
-  #   decl1                        .t-dcl
-  #   Defined in header <baz>      .t-dsc-header
-  #   decl2                        .t-dcl
-  for table in soup.select('table.t-dcl-begin, table.t-dsc-begin'):
-    current_headers = []
-    was_decl = False
-    for row in table.select('tr'):
-      if _HasClass(row, 't-dcl', 't-dsc'):
-        was_decl = True
-        # Symbols are in the first cell.
-        found_symbols = row.find('td').stripped_strings
-        if not symbol_name in found_symbols:
-          continue
-        headers.update(current_headers)
-      elif _HasClass(row, 't-dsc-header'):
-        # If we saw a decl since the last header, this is a new block of headers
-        # for a new block of decls.
-        if was_decl:
-          current_headers = []
+    """Parse symbol page and retrieve the include header defined in this page.
+    The symbol page provides header for the symbol, specifically in
+    "Defined in header <header>" section. An example:
+
+    <tr class="t-dsc-header">
+      <td colspan="2"> <div>Defined in header <code><ratio></code> </div>
+    </td></tr>
+
+    Returns a list of headers.
+    """
+    headers = set()
+    all_headers = set()
+
+    soup = BeautifulSoup(symbol_page_html, "html.parser")
+    # Rows in table are like:
+    #   Defined in header <foo>      .t-dsc-header
+    #   Defined in header <bar>      .t-dsc-header
+    #   decl1                        .t-dcl
+    #   Defined in header <baz>      .t-dsc-header
+    #   decl2                        .t-dcl
+    for table in soup.select("table.t-dcl-begin, table.t-dsc-begin"):
+        current_headers = []
         was_decl = False
-        # There are also .t-dsc-header for "defined in namespace".
-        if not "Defined in header " in row.text:
-          continue
-        # The interesting header content (e.g. <cstdlib>) is wrapped in <code>.
-        for header_code in row.find_all("code"):
-          current_headers.append(header_code.text)
-          all_headers.add(header_code.text)
-  # If the symbol was never named, consider all named headers.
-  return headers or all_headers
+        for row in table.select("tr"):
+            if _HasClass(row, "t-dcl", "t-dsc"):
+                was_decl = True
+                # Symbols are in the first cell.
+                found_symbols = row.find("td").stripped_strings
+                if not symbol_name in found_symbols:
+                    continue
+                headers.update(current_headers)
+            elif _HasClass(row, "t-dsc-header"):
+                # If we saw a decl since the last header, this is a new block of headers
+                # for a new block of decls.
+                if was_decl:
+                    current_headers = []
+                was_decl = False
+                # There are also .t-dsc-header for "defined in namespace".
+                if not "Defined in header " in row.text:
+                    continue
+                # The interesting header content (e.g. <cstdlib>) is wrapped in <code>.
+                for header_code in row.find_all("code"):
+                    current_headers.append(header_code.text)
+                    all_headers.add(header_code.text)
+    # If the symbol was never named, consider all named headers.
+    return headers or all_headers
 
 
 def _ParseIndexPage(index_page_html):
-  """Parse index page.
-  The index page lists all std symbols and hrefs to their detailed pages
-  (which contain the defined header). An example:
-
-  <a href="abs.html" title="abs"><tt>abs()</tt></a> (int) <br>
-  <a href="acos.html" title="acos"><tt>acos()</tt></a> <br>
-
-  Returns a list of tuple (symbol_name, relative_path_to_symbol_page, variant).
-  """
-  symbols = []
-  soup = BeautifulSoup(index_page_html, "html.parser")
-  for symbol_href in soup.select("a[title]"):
-    # Ignore annotated symbols like "acos<>() (std::complex)".
-    # These tend to be overloads, and we the primary is more useful.
-    # This accidentally accepts begin/end despite the (iterator) caption: the
-    # (since C++11) note is first. They are good symbols, so the bug is unfixed.
-    caption = symbol_href.next_sibling
-    variant = None
-    if isinstance(caption, NavigableString) and "(" in caption:
-      variant = caption.text.strip(" ()")
-    symbol_tt = symbol_href.find("tt")
-    if symbol_tt:
-      symbols.append((symbol_tt.text.rstrip("<>()"), # strip any trailing <>()
-                      symbol_href["href"], variant))
-  return symbols
+    """Parse index page.
+    The index page lists all std symbols and hrefs to their detailed pages
+    (which contain the defined header). An example:
+
+    <a href="abs.html" title="abs"><tt>abs()</tt></a> (int) <br>
+    <a href="acos.html" title="acos"><tt>acos()</tt></a> <br>
+
+    Returns a list of tuple (symbol_name, relative_path_to_symbol_page, variant).
+    """
+    symbols = []
+    soup = BeautifulSoup(index_page_html, "html.parser")
+    for symbol_href in soup.select("a[title]"):
+        # Ignore annotated symbols like "acos<>() (std::complex)".
+        # These tend to be overloads, and we the primary is more useful.
+        # This accidentally accepts begin/end despite the (iterator) caption: the
+        # (since C++11) note is first. They are good symbols, so the bug is unfixed.
+        caption = symbol_href.next_sibling
+        variant = None
+        if isinstance(caption, NavigableString) and "(" in caption:
+            variant = caption.text.strip(" ()")
+        symbol_tt = symbol_href.find("tt")
+        if symbol_tt:
+            symbols.append(
+                (
+                    symbol_tt.text.rstrip("<>()"),  # strip any trailing <>()
+                    symbol_href["href"],
+                    variant,
+                )
+            )
+    return symbols
 
 
 def _ReadSymbolPage(path, name):
-  with open(path) as f:
-    return _ParseSymbolPage(f.read(), name)
+    with open(path) as f:
+        return _ParseSymbolPage(f.read(), name)
 
 
 def _GetSymbols(pool, root_dir, index_page_name, namespace, variants_to_accept):
-  """Get all symbols listed in the index page. All symbols should be in the
-  given namespace.
-
-  Returns a list of Symbols.
-  """
-
-  # Workflow steps:
-  #   1. Parse index page which lists all symbols to get symbol
-  #      name (unqualified name) and its href link to the symbol page which
-  #      contains the defined header.
-  #   2. Parse the symbol page to get the defined header.
-  index_page_path = os.path.join(root_dir, index_page_name)
-  with open(index_page_path, "r") as f:
-    # Read each symbol page in parallel.
-    results = [] # (symbol_name, promise of [header...])
-    for symbol_name, symbol_page_path, variant in _ParseIndexPage(f.read()):
-      # Variant symbols (e.g. the std::locale version of isalpha) add ambiguity.
-      # FIXME: use these as a fallback rather than ignoring entirely.
-      variants_for_symbol = variants_to_accept.get(
-          (namespace or "") + symbol_name, ())
-      if variant and variant not in variants_for_symbol:
-        continue
-      path = os.path.join(root_dir, symbol_page_path)
-      if os.path.isfile(path):
-        results.append((symbol_name,
-                      pool.apply_async(_ReadSymbolPage, (path, symbol_name))))
-      else:
-        sys.stderr.write("Discarding information for symbol: %s. Page %s does not exist.\n" 
-          % (symbol_name, path))
-
-    # Build map from symbol name to a set of headers.
-    symbol_headers = collections.defaultdict(set)
-    for symbol_name, lazy_headers in results:
-      symbol_headers[symbol_name].update(lazy_headers.get())
-
-  symbols = []
-  for name, headers in sorted(symbol_headers.items(), key=lambda t : t[0]):
-    symbols.append(Symbol(name, namespace, list(headers)))
-  return symbols
+    """Get all symbols listed in the index page. All symbols should be in the
+    given namespace.
+
+    Returns a list of Symbols.
+    """
+
+    # Workflow steps:
+    #   1. Parse index page which lists all symbols to get symbol
+    #      name (unqualified name) and its href link to the symbol page which
+    #      contains the defined header.
+    #   2. Parse the symbol page to get the defined header.
+    index_page_path = os.path.join(root_dir, index_page_name)
+    with open(index_page_path, "r") as f:
+        # Read each symbol page in parallel.
+        results = []  # (symbol_name, promise of [header...])
+        for symbol_name, symbol_page_path, variant in _ParseIndexPage(f.read()):
+            # Variant symbols (e.g. the std::locale version of isalpha) add ambiguity.
+            # FIXME: use these as a fallback rather than ignoring entirely.
+            variants_for_symbol = variants_to_accept.get(
+                (namespace or "") + symbol_name, ()
+            )
+            if variant and variant not in variants_for_symbol:
+                continue
+            path = os.path.join(root_dir, symbol_page_path)
+            if os.path.isfile(path):
+                results.append(
+                    (
+                        symbol_name,
+                        pool.apply_async(_ReadSymbolPage, (path, symbol_name)),
+                    )
+                )
+            else:
+                sys.stderr.write(
+                    "Discarding information for symbol: %s. Page %s does not exist.\n"
+                    % (symbol_name, path)
+                )
+
+        # Build map from symbol name to a set of headers.
+        symbol_headers = collections.defaultdict(set)
+        for symbol_name, lazy_headers in results:
+            symbol_headers[symbol_name].update(lazy_headers.get())
+
+    symbols = []
+    for name, headers in sorted(symbol_headers.items(), key=lambda t: t[0]):
+        symbols.append(Symbol(name, namespace, list(headers)))
+    return symbols
 
 
 def GetSymbols(parse_pages):
-  """Get all symbols by parsing the given pages.
-
-  Args:
-    parse_pages: a list of tuples (page_root_dir, index_page_name, namespace)
-  """
-  # By default we prefer the non-variant versions, as they're more common. But
-  # there are some symbols, whose variant is more common. This list describes
-  # those symbols.
-  variants_to_accept = {
-      # std::remove<> has variant algorithm.
-      "std::remove": ("algorithm"),
-  }
-  symbols = []
-  # Run many workers to process individual symbol pages under the symbol index.
-  # Don't allow workers to capture Ctrl-C.
-  pool = multiprocessing.Pool(
-      initializer=lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
-  try:
-    for root_dir, page_name, namespace in parse_pages:
-      symbols.extend(_GetSymbols(pool, root_dir, page_name, namespace,
-                                 variants_to_accept))
-  finally:
-    pool.terminate()
-    pool.join()
-  return sorted(symbols)
+    """Get all symbols by parsing the given pages.
+
+    Args:
+      parse_pages: a list of tuples (page_root_dir, index_page_name, namespace)
+    """
+    # By default we prefer the non-variant versions, as they're more common. But
+    # there are some symbols, whose variant is more common. This list describes
+    # those symbols.
+    variants_to_accept = {
+        # std::remove<> has variant algorithm.
+        "std::remove": ("algorithm"),
+    }
+    symbols = []
+    # Run many workers to process individual symbol pages under the symbol index.
+    # Don't allow workers to capture Ctrl-C.
+    pool = multiprocessing.Pool(
+        initializer=lambda: signal.signal(signal.SIGINT, signal.SIG_IGN)
+    )
+    try:
+        for root_dir, page_name, namespace in parse_pages:
+            symbols.extend(
+                _GetSymbols(pool, root_dir, page_name, namespace, variants_to_accept)
+            )
+    finally:
+        pool.terminate()
+        pool.join()
+    return sorted(symbols)

diff  --git a/clang/tools/include-mapping/gen_std.py b/clang/tools/include-mapping/gen_std.py
index 944fd17263700..2390ff1f2cced 100755
--- a/clang/tools/include-mapping/gen_std.py
+++ b/clang/tools/include-mapping/gen_std.py
@@ -1,11 +1,11 @@
 #!/usr/bin/env python
-#===- gen_std.py -  ------------------------------------------*- python -*--===#
+# ===- gen_std.py -  ------------------------------------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 """gen_std.py is a tool to generate a lookup table (from qualified names to
 include headers) for C/C++ Standard Library symbols by parsing archived HTML
@@ -55,244 +55,235 @@
 //===----------------------------------------------------------------------===//
 """
 
-def ParseArg():
-  parser = argparse.ArgumentParser(description='Generate StdGen file')
-  parser.add_argument('-cppreference', metavar='PATH',
-                      default='',
-                      help='path to the cppreference offline HTML directory',
-                      required=True
-                     )
-  parser.add_argument('-symbols',
-                      default='cpp',
-                      help='Generate c or cpp (removed) symbols. One of {cpp, c, cpp_removed}.',
-                      required=True) 
-  return parser.parse_args()
-
-def AdditionalHeadersForIOSymbols(symbol):
-  # IO-related symbols declared in the <iosfwd> header, per C++
-  # [iosfwd.syn 31.3.1]:
-  iosfwd_symbols = [
-      'basic_ios',
-      'basic_streambuf',
-      'basic_istream',
-      'basic_ostream',
-      'basic_iostream',
-
-      'basic_stringbuf',
-      'basic_istringstream',
-      'basic_ostringstream',
-      'basic_stringstream',
-
-      'basic_spanbuf',
-      'basic_ispanstream',
-      'basic_ospanstream',
-      'basic_spanstream',
-
-      'basic_filebuf',
-      'basic_ifstream',
-      'basic_ofstream',
-      'basic_fstream',
-
-      'basic_syncbuf',
-      'basic_osyncstream',
-
-      'istreambuf_iterator',
-      'ostreambuf_iterator',
-
-      'ios',
-      'wios',
-
-      'streambuf',
-      'istream',
-      'ostream',
-      'iostream',
-
-      'stringbuf',
-      'istringstream',
-      'ostringstream',
-      'stringstream',
-
-      'spanbuf',
-      'ispanstream',
-      'ospanstream',
-      'spanstream',
-
-      'filebuf',
-      'ifstream',
-      'ofstream',
-      'fstream',
 
-      'syncbuf',
-      'osyncstream',
-
-      'wstreambuf',
-      'wistream',
-      'wostream',
-      'wiostream',
-
-      'wstringbuf',
-      'wistringstream',
-      'wostringstream',
-      'wstringstream',
-
-      'wspanbuf',
-      'wispanstream',
-      'wospanstream',
-      'wspanstream',
-
-      'wfilebuf',
-      'wifstream',
-      'wofstream',
-      'wfstream',
+def ParseArg():
+    parser = argparse.ArgumentParser(description="Generate StdGen file")
+    parser.add_argument(
+        "-cppreference",
+        metavar="PATH",
+        default="",
+        help="path to the cppreference offline HTML directory",
+        required=True,
+    )
+    parser.add_argument(
+        "-symbols",
+        default="cpp",
+        help="Generate c or cpp (removed) symbols. One of {cpp, c, cpp_removed}.",
+        required=True,
+    )
+    return parser.parse_args()
 
-      'wsyncbuf',
-      'wosyncstream',
 
-      'fpos',
-      'streampos',
-      'wstreampos',
-      'u8streampos',
-      'u16streampos',
-      'u32streampos',
-  ]
-  assert(len(symbol.headers) == 1)
-  sym_header = symbol.headers[0]
-  headers = []
-  # <iostream> is preferred than <iosfwd>
+def AdditionalHeadersForIOSymbols(symbol):
+    # IO-related symbols declared in the <iosfwd> header, per C++
+    # [iosfwd.syn 31.3.1]:
+    iosfwd_symbols = [
+        "basic_ios",
+        "basic_streambuf",
+        "basic_istream",
+        "basic_ostream",
+        "basic_iostream",
+        "basic_stringbuf",
+        "basic_istringstream",
+        "basic_ostringstream",
+        "basic_stringstream",
+        "basic_spanbuf",
+        "basic_ispanstream",
+        "basic_ospanstream",
+        "basic_spanstream",
+        "basic_filebuf",
+        "basic_ifstream",
+        "basic_ofstream",
+        "basic_fstream",
+        "basic_syncbuf",
+        "basic_osyncstream",
+        "istreambuf_iterator",
+        "ostreambuf_iterator",
+        "ios",
+        "wios",
+        "streambuf",
+        "istream",
+        "ostream",
+        "iostream",
+        "stringbuf",
+        "istringstream",
+        "ostringstream",
+        "stringstream",
+        "spanbuf",
+        "ispanstream",
+        "ospanstream",
+        "spanstream",
+        "filebuf",
+        "ifstream",
+        "ofstream",
+        "fstream",
+        "syncbuf",
+        "osyncstream",
+        "wstreambuf",
+        "wistream",
+        "wostream",
+        "wiostream",
+        "wstringbuf",
+        "wistringstream",
+        "wostringstream",
+        "wstringstream",
+        "wspanbuf",
+        "wispanstream",
+        "wospanstream",
+        "wspanstream",
+        "wfilebuf",
+        "wifstream",
+        "wofstream",
+        "wfstream",
+        "wsyncbuf",
+        "wosyncstream",
+        "fpos",
+        "streampos",
+        "wstreampos",
+        "u8streampos",
+        "u16streampos",
+        "u32streampos",
+    ]
+    assert len(symbol.headers) == 1
+    sym_header = symbol.headers[0]
+    headers = []
+    # <iostream> is preferred than <iosfwd>
 
-  # <iostream> is an alternative of <streambuf>, <istream>, <ostream>, <ios>.
-  # per C++ [iostream.syn 31.4.1]
-  if sym_header in ["<ios>", "<istream>", "<ostream>", "<streambuf>"]:
-    headers.append("<iostream>")
+    # <iostream> is an alternative of <streambuf>, <istream>, <ostream>, <ios>.
+    # per C++ [iostream.syn 31.4.1]
+    if sym_header in ["<ios>", "<istream>", "<ostream>", "<streambuf>"]:
+        headers.append("<iostream>")
 
-  if symbol.name in iosfwd_symbols:
-    headers.append("<iosfwd>")
+    if symbol.name in iosfwd_symbols:
+        headers.append("<iosfwd>")
 
-  return headers
+    return headers
 
 
 def GetCCompatibilitySymbols(symbol):
-   # C++ form of the C standard headers.
-  c_compat_headers = {
-    "<cassert>",
-    "<cctype>",
-    "<cerrno>",
-    "<cfenv>",
-    "<cfloat>",
-    "<cinttypes>",
-    "<climits>",
-    "<clocale>",
-    "<cmath>",
-    "<csetjmp>",
-    "<csignal>",
-    "<cstdarg>",
-    "<cstddef>",
-    "<cstdint>",
-    "<cstdio>",
-    "<cstdlib>",
-    "<cstring>",
-    "<ctime>",
-    "<cuchar>",
-    "<cwchar>",
-    "<cwctype>",
-  }
-  # C++ [support.c.headers.other] 17.14.7
-  #    ..., behaves as if each name placed in the standard library namespace by
-  #    the corresponding <cname> header is placed within the global namespace
-  #    scope, except for the functions described in [sf.cmath], the
-  #    std​::​lerp function overloads ([c.math.lerp]), the declaration of
-  #    std​::​byte ([cstddef.syn]), and the functions and function templates
-  #    described in [support.types.byteops].
-  exception_symbols = {
-    "(assoc_)?laguerre[f|l]?",
-    "(assoc_|sph_)?legendre[f|l]?",
-    "beta[f|l]?",
-    "(comp_)?ellint_[1-3][f|l]?",
-    "(cyl_|sph_)?bessel_[i-k][f|l]?",
-    "(cyl_|sph_)?neumann[f|l]?",
-    "expint[f|l]?",
-    "hermite[f|l]?",
-    "riemann_zeta[f|l]?",
-    "lerp",
-    "byte",
-  }
-  assert(len(symbol.headers) == 1)
-  header = symbol.headers[0]
-  if header not in c_compat_headers:
-    return []
-  if any(re.fullmatch(x, symbol.name) for x in exception_symbols):
-    return []
-
-  # Introduce two more entries, both in the global namespace, one using the
-  # C++-compat header and another using the C header.
-  results = []
-  if symbol.namespace != None:
-    # avoid printing duplicated entries, for C macros!
-    results.append(cppreference_parser.Symbol(symbol.name, None, [header]))
-  c_header = "<" + header[2:-1] +  ".h>" # <cstdio> => <stdio.h>
-  results.append(cppreference_parser.Symbol(symbol.name, None, [c_header]))
-  return results
+    # C++ form of the C standard headers.
+    c_compat_headers = {
+        "<cassert>",
+        "<cctype>",
+        "<cerrno>",
+        "<cfenv>",
+        "<cfloat>",
+        "<cinttypes>",
+        "<climits>",
+        "<clocale>",
+        "<cmath>",
+        "<csetjmp>",
+        "<csignal>",
+        "<cstdarg>",
+        "<cstddef>",
+        "<cstdint>",
+        "<cstdio>",
+        "<cstdlib>",
+        "<cstring>",
+        "<ctime>",
+        "<cuchar>",
+        "<cwchar>",
+        "<cwctype>",
+    }
+    # C++ [support.c.headers.other] 17.14.7
+    #    ..., behaves as if each name placed in the standard library namespace by
+    #    the corresponding <cname> header is placed within the global namespace
+    #    scope, except for the functions described in [sf.cmath], the
+    #    std​::​lerp function overloads ([c.math.lerp]), the declaration of
+    #    std​::​byte ([cstddef.syn]), and the functions and function templates
+    #    described in [support.types.byteops].
+    exception_symbols = {
+        "(assoc_)?laguerre[f|l]?",
+        "(assoc_|sph_)?legendre[f|l]?",
+        "beta[f|l]?",
+        "(comp_)?ellint_[1-3][f|l]?",
+        "(cyl_|sph_)?bessel_[i-k][f|l]?",
+        "(cyl_|sph_)?neumann[f|l]?",
+        "expint[f|l]?",
+        "hermite[f|l]?",
+        "riemann_zeta[f|l]?",
+        "lerp",
+        "byte",
+    }
+    assert len(symbol.headers) == 1
+    header = symbol.headers[0]
+    if header not in c_compat_headers:
+        return []
+    if any(re.fullmatch(x, symbol.name) for x in exception_symbols):
+        return []
+
+    # Introduce two more entries, both in the global namespace, one using the
+    # C++-compat header and another using the C header.
+    results = []
+    if symbol.namespace != None:
+        # avoid printing duplicated entries, for C macros!
+        results.append(cppreference_parser.Symbol(symbol.name, None, [header]))
+    c_header = "<" + header[2:-1] + ".h>"  # <cstdio> => <stdio.h>
+    results.append(cppreference_parser.Symbol(symbol.name, None, [c_header]))
+    return results
 
 
 def main():
-  args = ParseArg()
-  if args.symbols == 'cpp':
-    page_root = os.path.join(args.cppreference, "en", "cpp")
-    symbol_index_root = os.path.join(page_root, "symbol_index")
-    parse_pages =  [
-      (page_root, "symbol_index.html", "std::"),
-      # std sub-namespace symbols have separated pages.
-      # We don't index std literal operators (e.g.
-      # std::literals::chrono_literals::operator""d), these symbols can't be
-      # accessed by std::<symbol_name>.
-      #
-      # std::placeholders symbols are handled manually in StdSpecialSymbolMap.inc
-      (symbol_index_root, "chrono.html", "std::chrono::"),
-      (symbol_index_root, "execution.html", "std::execution::"),
-      (symbol_index_root, "numbers.html", "std::numbers::"),
-      (symbol_index_root, "filesystem.html", "std::filesystem::"),
-      (symbol_index_root, "pmr.html", "std::pmr::"),
-      (symbol_index_root, "ranges.html", "std::ranges::"),
-      (symbol_index_root, "regex_constants.html", "std::regex_constants::"),
-      (symbol_index_root, "this_thread.html", "std::this_thread::"),
-      # Zombie symbols that were available from the Standard Library, but are
-      # removed in the following standards.
-      (symbol_index_root, "zombie_names.html", "std::"),
-      (symbol_index_root, "macro.html", None),
-    ]
-  elif args.symbols == 'c':
-    page_root = os.path.join(args.cppreference, "en", "c")
-    symbol_index_root = page_root
-    parse_pages = [(page_root, "index.html", None)]  
-    
-  if not os.path.exists(symbol_index_root):
-    exit("Path %s doesn't exist!" % symbol_index_root)
-
-  symbols = cppreference_parser.GetSymbols(parse_pages)
-  
-  # We don't have version information from the unzipped offline HTML files.
-  # so we use the modified time of the symbol_index.html as the version.
-  index_page_path = os.path.join(page_root, "index.html")
-  cppreference_modified_date = datetime.datetime.fromtimestamp(
-    os.stat(index_page_path).st_mtime).strftime('%Y-%m-%d')
-  print(CODE_PREFIX % (args.symbols.upper(), cppreference_modified_date))
-  for symbol in symbols:
-    if len(symbol.headers) == 1:
-      augmented_symbols = [symbol]
-      augmented_symbols.extend(GetCCompatibilitySymbols(symbol))
-      for s in augmented_symbols:
-        s.headers.extend(AdditionalHeadersForIOSymbols(s))
-        for header in s.headers:
-          # SYMBOL(unqualified_name, namespace, header)
-          print("SYMBOL(%s, %s, %s)" % (s.name, s.namespace,
-                                        header))
-    elif len(symbol.headers) == 0:
-      sys.stderr.write("No header found for symbol %s\n" % symbol.name)
-    else:
-      # FIXME: support symbols with multiple headers (e.g. std::move).
-      sys.stderr.write("Ambiguous header for symbol %s: %s\n" % (
-          symbol.name, ', '.join(symbol.headers)))
-
-
-if __name__ == '__main__':
-  main()
+    args = ParseArg()
+    if args.symbols == "cpp":
+        page_root = os.path.join(args.cppreference, "en", "cpp")
+        symbol_index_root = os.path.join(page_root, "symbol_index")
+        parse_pages = [
+            (page_root, "symbol_index.html", "std::"),
+            # std sub-namespace symbols have separated pages.
+            # We don't index std literal operators (e.g.
+            # std::literals::chrono_literals::operator""d), these symbols can't be
+            # accessed by std::<symbol_name>.
+            #
+            # std::placeholders symbols are handled manually in StdSpecialSymbolMap.inc
+            (symbol_index_root, "chrono.html", "std::chrono::"),
+            (symbol_index_root, "execution.html", "std::execution::"),
+            (symbol_index_root, "numbers.html", "std::numbers::"),
+            (symbol_index_root, "filesystem.html", "std::filesystem::"),
+            (symbol_index_root, "pmr.html", "std::pmr::"),
+            (symbol_index_root, "ranges.html", "std::ranges::"),
+            (symbol_index_root, "regex_constants.html", "std::regex_constants::"),
+            (symbol_index_root, "this_thread.html", "std::this_thread::"),
+            # Zombie symbols that were available from the Standard Library, but are
+            # removed in the following standards.
+            (symbol_index_root, "zombie_names.html", "std::"),
+            (symbol_index_root, "macro.html", None),
+        ]
+    elif args.symbols == "c":
+        page_root = os.path.join(args.cppreference, "en", "c")
+        symbol_index_root = page_root
+        parse_pages = [(page_root, "index.html", None)]
+
+    if not os.path.exists(symbol_index_root):
+        exit("Path %s doesn't exist!" % symbol_index_root)
+
+    symbols = cppreference_parser.GetSymbols(parse_pages)
+
+    # We don't have version information from the unzipped offline HTML files.
+    # so we use the modified time of the symbol_index.html as the version.
+    index_page_path = os.path.join(page_root, "index.html")
+    cppreference_modified_date = datetime.datetime.fromtimestamp(
+        os.stat(index_page_path).st_mtime
+    ).strftime("%Y-%m-%d")
+    print(CODE_PREFIX % (args.symbols.upper(), cppreference_modified_date))
+    for symbol in symbols:
+        if len(symbol.headers) == 1:
+            augmented_symbols = [symbol]
+            augmented_symbols.extend(GetCCompatibilitySymbols(symbol))
+            for s in augmented_symbols:
+                s.headers.extend(AdditionalHeadersForIOSymbols(s))
+                for header in s.headers:
+                    # SYMBOL(unqualified_name, namespace, header)
+                    print("SYMBOL(%s, %s, %s)" % (s.name, s.namespace, header))
+        elif len(symbol.headers) == 0:
+            sys.stderr.write("No header found for symbol %s\n" % symbol.name)
+        else:
+            # FIXME: support symbols with multiple headers (e.g. std::move).
+            sys.stderr.write(
+                "Ambiguous header for symbol %s: %s\n"
+                % (symbol.name, ", ".join(symbol.headers))
+            )
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/clang/tools/include-mapping/test.py b/clang/tools/include-mapping/test.py
index 507e462e75273..eef328381f2bb 100755
--- a/clang/tools/include-mapping/test.py
+++ b/clang/tools/include-mapping/test.py
@@ -1,20 +1,20 @@
 #!/usr/bin/env python
-#===- test.py -  ---------------------------------------------*- python -*--===#
+# ===- test.py -  ---------------------------------------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 from cppreference_parser import _ParseSymbolPage, _ParseIndexPage
 
 import unittest
 
-class TestStdGen(unittest.TestCase):
 
-  def testParseIndexPage(self):
-    html = """
+class TestStdGen(unittest.TestCase):
+    def testParseIndexPage(self):
+        html = """
  <a href="abs.html" title="abs"><tt>abs()</tt></a> (int) <br>
  <a href="complex/abs.html" title="abs"><tt>abs<>()</tt></a> (std::complex) <br>
  <a href="acos.html" title="acos"><tt>acos()</tt></a> <br>
@@ -22,24 +22,23 @@ def testParseIndexPage(self):
  <a href="as_bytes.html" title="as bytes"><tt>as_bytes<>()</tt></a> <span class="t-mark-rev t-since-cxx20">(since C++20)</span> <br>
  """
 
-    actual = _ParseIndexPage(html)
-    expected = [
-      ("abs", "abs.html", 'int'),
-      ("abs", "complex/abs.html", 'std::complex'),
-      ("acos", "acos.html", None),
-      ("acosh", "acosh.html", None),
-      ("as_bytes", "as_bytes.html", None),
-    ]
-    self.assertEqual(len(actual), len(expected))
-    for i in range(0, len(actual)):
-      self.assertEqual(expected[i][0], actual[i][0])
-      self.assertTrue(actual[i][1].endswith(expected[i][1]))
-      self.assertEqual(expected[i][2], actual[i][2])
-
+        actual = _ParseIndexPage(html)
+        expected = [
+            ("abs", "abs.html", "int"),
+            ("abs", "complex/abs.html", "std::complex"),
+            ("acos", "acos.html", None),
+            ("acosh", "acosh.html", None),
+            ("as_bytes", "as_bytes.html", None),
+        ]
+        self.assertEqual(len(actual), len(expected))
+        for i in range(0, len(actual)):
+            self.assertEqual(expected[i][0], actual[i][0])
+            self.assertTrue(actual[i][1].endswith(expected[i][1]))
+            self.assertEqual(expected[i][2], actual[i][2])
 
-  def testParseSymbolPage_SingleHeader(self):
-    # Defined in header <cmath>
-    html = """
+    def testParseSymbolPage_SingleHeader(self):
+        # Defined in header <cmath>
+        html = """
  <table class="t-dcl-begin"><tbody>
   <tr class="t-dsc-header">
   <td> <div>Defined in header <code><a href="cmath.html" title="cmath"><cmath></a></code>
@@ -53,14 +52,13 @@ def testParseSymbolPage_SingleHeader(self):
   </tr>
 </tbody></table>
 """
-    self.assertEqual(_ParseSymbolPage(html, 'foo'), set(['<cmath>']))
+        self.assertEqual(_ParseSymbolPage(html, "foo"), set(["<cmath>"]))
 
-
-  def testParseSymbolPage_MulHeaders(self):
-    #  Defined in header <cstddef>
-    #  Defined in header <cstdio>
-    #  Defined in header <cstdlib>
-    html = """
+    def testParseSymbolPage_MulHeaders(self):
+        #  Defined in header <cstddef>
+        #  Defined in header <cstdio>
+        #  Defined in header <cstdlib>
+        html = """
 <table class="t-dcl-begin"><tbody>
   <tr class="t-dsc-header">
     <td> <div>Defined in header <code><a href="cstddef.html" title="cstddef"><cstddef></a></code>
@@ -94,15 +92,13 @@ def testParseSymbolPage_MulHeaders(self):
   </tr>
 </tbody></table>
 """
-    self.assertEqual(_ParseSymbolPage(html, "foo"),
-                     set(['<cstdio>', '<cstdlib>']))
-
+        self.assertEqual(_ParseSymbolPage(html, "foo"), set(["<cstdio>", "<cstdlib>"]))
 
-  def testParseSymbolPage_MulHeadersInSameDiv(self):
-    # Multile <code> blocks in a Div.
-    # Defined in header <algorithm>
-    # Defined in header <utility>
-    html = """
+    def testParseSymbolPage_MulHeadersInSameDiv(self):
+        # Multile <code> blocks in a Div.
+        # Defined in header <algorithm>
+        # Defined in header <utility>
+        html = """
 <table class="t-dcl-begin"><tbody>
 <tr class="t-dsc-header">
 <td><div>
@@ -121,14 +117,15 @@ def testParseSymbolPage_MulHeadersInSameDiv(self):
 </tr>
 </tbody></table>
 """
-    self.assertEqual(_ParseSymbolPage(html, "foo"),
-                     set(['<algorithm>', '<utility>']))
+        self.assertEqual(
+            _ParseSymbolPage(html, "foo"), set(["<algorithm>", "<utility>"])
+        )
 
-  def testParseSymbolPage_MulSymbolsInSameTd(self):
-    # defined in header <cstdint>
-    #   int8_t
-    #   int16_t
-    html = """
+    def testParseSymbolPage_MulSymbolsInSameTd(self):
+        # defined in header <cstdint>
+        #   int8_t
+        #   int16_t
+        html = """
 <table class="t-dcl-begin"><tbody>
 <tr class="t-dsc-header">
 <td><div>
@@ -145,11 +142,9 @@ def testParseSymbolPage_MulSymbolsInSameTd(self):
 </tr>
 </tbody></table>
 """
-    self.assertEqual(_ParseSymbolPage(html, "int8_t"),
-                     set(['<cstdint>']))
-    self.assertEqual(_ParseSymbolPage(html, "int16_t"),
-                     set(['<cstdint>']))
+        self.assertEqual(_ParseSymbolPage(html, "int8_t"), set(["<cstdint>"]))
+        self.assertEqual(_ParseSymbolPage(html, "int16_t"), set(["<cstdint>"]))
 
 
-if __name__ == '__main__':
-  unittest.main()
+if __name__ == "__main__":
+    unittest.main()

diff  --git a/clang/tools/libclang/linker-script-to-export-list.py b/clang/tools/libclang/linker-script-to-export-list.py
index 37fb172b9a0a2..745996028d835 100644
--- a/clang/tools/libclang/linker-script-to-export-list.py
+++ b/clang/tools/libclang/linker-script-to-export-list.py
@@ -3,9 +3,9 @@
 import sys
 
 input_file = open(sys.argv[1])
-output_file = open(sys.argv[2], 'w')
+output_file = open(sys.argv[2], "w")
 
 for line in input_file:
-    m = re.search('^\s+(clang_[^;]+)', line)
+    m = re.search("^\s+(clang_[^;]+)", line)
     if m:
         output_file.write(m.group(1) + "\n")

diff  --git a/clang/tools/scan-build-py/lib/libear/__init__.py b/clang/tools/scan-build-py/lib/libear/__init__.py
index 0dfe8c11eba02..b3a8e6cf12518 100644
--- a/clang/tools/scan-build-py/lib/libear/__init__.py
+++ b/clang/tools/scan-build-py/lib/libear/__init__.py
@@ -13,42 +13,43 @@
 import contextlib
 import logging
 
-__all__ = ['build_libear']
+__all__ = ["build_libear"]
 
 
 def build_libear(compiler, dst_dir):
-    """ Returns the full path to the 'libear' library. """
+    """Returns the full path to the 'libear' library."""
 
     try:
         src_dir = os.path.dirname(os.path.realpath(__file__))
         toolset = make_toolset(src_dir)
         toolset.set_compiler(compiler)
-        toolset.set_language_standard('c99')
-        toolset.add_definitions(['-D_GNU_SOURCE'])
+        toolset.set_language_standard("c99")
+        toolset.add_definitions(["-D_GNU_SOURCE"])
 
         configure = do_configure(toolset)
-        configure.check_function_exists('execve', 'HAVE_EXECVE')
-        configure.check_function_exists('execv', 'HAVE_EXECV')
-        configure.check_function_exists('execvpe', 'HAVE_EXECVPE')
-        configure.check_function_exists('execvp', 'HAVE_EXECVP')
-        configure.check_function_exists('execvP', 'HAVE_EXECVP2')
-        configure.check_function_exists('exect', 'HAVE_EXECT')
-        configure.check_function_exists('execl', 'HAVE_EXECL')
-        configure.check_function_exists('execlp', 'HAVE_EXECLP')
-        configure.check_function_exists('execle', 'HAVE_EXECLE')
-        configure.check_function_exists('posix_spawn', 'HAVE_POSIX_SPAWN')
-        configure.check_function_exists('posix_spawnp', 'HAVE_POSIX_SPAWNP')
-        configure.check_symbol_exists('_NSGetEnviron', 'crt_externs.h',
-                                      'HAVE_NSGETENVIRON')
+        configure.check_function_exists("execve", "HAVE_EXECVE")
+        configure.check_function_exists("execv", "HAVE_EXECV")
+        configure.check_function_exists("execvpe", "HAVE_EXECVPE")
+        configure.check_function_exists("execvp", "HAVE_EXECVP")
+        configure.check_function_exists("execvP", "HAVE_EXECVP2")
+        configure.check_function_exists("exect", "HAVE_EXECT")
+        configure.check_function_exists("execl", "HAVE_EXECL")
+        configure.check_function_exists("execlp", "HAVE_EXECLP")
+        configure.check_function_exists("execle", "HAVE_EXECLE")
+        configure.check_function_exists("posix_spawn", "HAVE_POSIX_SPAWN")
+        configure.check_function_exists("posix_spawnp", "HAVE_POSIX_SPAWNP")
+        configure.check_symbol_exists(
+            "_NSGetEnviron", "crt_externs.h", "HAVE_NSGETENVIRON"
+        )
         configure.write_by_template(
-            os.path.join(src_dir, 'config.h.in'),
-            os.path.join(dst_dir, 'config.h'))
+            os.path.join(src_dir, "config.h.in"), os.path.join(dst_dir, "config.h")
+        )
 
-        target = create_shared_library('ear', toolset)
+        target = create_shared_library("ear", toolset)
         target.add_include(dst_dir)
-        target.add_sources('ear.c')
+        target.add_sources("ear.c")
         target.link_against(toolset.dl_libraries())
-        target.link_against(['pthread'])
+        target.link_against(["pthread"])
         target.build_release(dst_dir)
 
         return os.path.join(dst_dir, target.name)
@@ -59,10 +60,11 @@ def build_libear(compiler, dst_dir):
 
 
 def execute(cmd, *args, **kwargs):
-    """ Make subprocess execution silent. """
+    """Make subprocess execution silent."""
 
     import subprocess
-    kwargs.update({'stdout': subprocess.PIPE, 'stderr': subprocess.STDOUT})
+
+    kwargs.update({"stdout": subprocess.PIPE, "stderr": subprocess.STDOUT})
     return subprocess.check_call(cmd, *args, **kwargs)
 
 
@@ -76,7 +78,7 @@ def TemporaryDirectory(**kwargs):
 
 
 class Toolset(object):
-    """ Abstract class to represent 
diff erent toolset. """
+    """Abstract class to represent 
diff erent toolset."""
 
     def __init__(self, src_dir):
         self.src_dir = src_dir
@@ -84,15 +86,15 @@ def __init__(self, src_dir):
         self.c_flags = []
 
     def set_compiler(self, compiler):
-        """ part of public interface """
+        """part of public interface"""
         self.compiler = compiler
 
     def set_language_standard(self, standard):
-        """ part of public interface """
-        self.c_flags.append('-std=' + standard)
+        """part of public interface"""
+        self.c_flags.append("-std=" + standard)
 
     def add_definitions(self, defines):
-        """ part of public interface """
+        """part of public interface"""
         self.c_flags.extend(defines)
 
     def dl_libraries(self):
@@ -102,8 +104,8 @@ def shared_library_name(self, name):
         raise NotImplementedError()
 
     def shared_library_c_flags(self, release):
-        extra = ['-DNDEBUG', '-O3'] if release else []
-        return extra + ['-fPIC'] + self.c_flags
+        extra = ["-DNDEBUG", "-O3"] if release else []
+        return extra + ["-fPIC"] + self.c_flags
 
     def shared_library_ld_flags(self, release, name):
         raise NotImplementedError()
@@ -117,11 +119,11 @@ def dl_libraries(self):
         return []
 
     def shared_library_name(self, name):
-        return 'lib' + name + '.dylib'
+        return "lib" + name + ".dylib"
 
     def shared_library_ld_flags(self, release, name):
-        extra = ['-dead_strip'] if release else []
-        return extra + ['-dynamiclib', '-install_name', '@rpath/' + name]
+        extra = ["-dead_strip"] if release else []
+        return extra + ["-dynamiclib", "-install_name", "@rpath/" + name]
 
 
 class UnixToolset(Toolset):
@@ -132,11 +134,11 @@ def dl_libraries(self):
         return []
 
     def shared_library_name(self, name):
-        return 'lib' + name + '.so'
+        return "lib" + name + ".so"
 
     def shared_library_ld_flags(self, release, name):
         extra = [] if release else []
-        return extra + ['-shared', '-Wl,-soname,' + name]
+        return extra + ["-shared", "-Wl,-soname," + name]
 
 
 class LinuxToolset(UnixToolset):
@@ -144,16 +146,16 @@ def __init__(self, src_dir):
         UnixToolset.__init__(self, src_dir)
 
     def dl_libraries(self):
-        return ['dl']
+        return ["dl"]
 
 
 def make_toolset(src_dir):
     platform = sys.platform
-    if platform in {'win32', 'cygwin'}:
-        raise RuntimeError('not implemented on this platform')
-    elif platform == 'darwin':
+    if platform in {"win32", "cygwin"}:
+        raise RuntimeError("not implemented on this platform")
+    elif platform == "darwin":
         return DarwinToolset(src_dir)
-    elif platform in {'linux', 'linux2'}:
+    elif platform in {"linux", "linux2"}:
         return LinuxToolset(src_dir)
     else:
         return UnixToolset(src_dir)
@@ -162,17 +164,16 @@ def make_toolset(src_dir):
 class Configure(object):
     def __init__(self, toolset):
         self.ctx = toolset
-        self.results = {'APPLE': sys.platform == 'darwin'}
+        self.results = {"APPLE": sys.platform == "darwin"}
 
     def _try_to_compile_and_link(self, source):
         try:
             with TemporaryDirectory() as work_dir:
-                src_file = 'check.c'
-                with open(os.path.join(work_dir, src_file), 'w') as handle:
+                src_file = "check.c"
+                with open(os.path.join(work_dir, src_file), "w") as handle:
                     handle.write(source)
 
-                execute([self.ctx.compiler, src_file] + self.ctx.c_flags,
-                        cwd=work_dir)
+                execute([self.ctx.compiler, src_file] + self.ctx.c_flags, cwd=work_dir)
                 return True
         except Exception:
             return False
@@ -181,39 +182,41 @@ def check_function_exists(self, function, name):
         template = "int FUNCTION(); int main() { return FUNCTION(); }"
         source = template.replace("FUNCTION", function)
 
-        logging.debug('Checking function %s', function)
+        logging.debug("Checking function %s", function)
         found = self._try_to_compile_and_link(source)
-        logging.debug('Checking function %s -- %s', function,
-                      'found' if found else 'not found')
+        logging.debug(
+            "Checking function %s -- %s", function, "found" if found else "not found"
+        )
         self.results.update({name: found})
 
     def check_symbol_exists(self, symbol, include, name):
         template = """#include <INCLUDE>
                       int main() { return ((int*)(&SYMBOL))[0]; }"""
-        source = template.replace('INCLUDE', include).replace("SYMBOL", symbol)
+        source = template.replace("INCLUDE", include).replace("SYMBOL", symbol)
 
-        logging.debug('Checking symbol %s', symbol)
+        logging.debug("Checking symbol %s", symbol)
         found = self._try_to_compile_and_link(source)
-        logging.debug('Checking symbol %s -- %s', symbol,
-                      'found' if found else 'not found')
+        logging.debug(
+            "Checking symbol %s -- %s", symbol, "found" if found else "not found"
+        )
         self.results.update({name: found})
 
     def write_by_template(self, template, output):
         def transform(line, definitions):
 
-            pattern = re.compile(r'^#cmakedefine\s+(\S+)')
+            pattern = re.compile(r"^#cmakedefine\s+(\S+)")
             m = pattern.match(line)
             if m:
                 key = m.group(1)
                 if key not in definitions or not definitions[key]:
-                    return '/* #undef {0} */{1}'.format(key, os.linesep)
+                    return "/* #undef {0} */{1}".format(key, os.linesep)
                 else:
-                    return '#define {0}{1}'.format(key, os.linesep)
+                    return "#define {0}{1}".format(key, os.linesep)
             return line
 
-        with open(template, 'r') as src_handle:
-            logging.debug('Writing config to %s', output)
-            with open(output, 'w') as dst_handle:
+        with open(template, "r") as src_handle:
+            logging.debug("Writing config to %s", output)
+            with open(output, "w") as dst_handle:
                 for line in src_handle:
                     dst_handle.write(transform(line, self.results))
 
@@ -231,28 +234,38 @@ def __init__(self, name, toolset):
         self.lib = []
 
     def add_include(self, directory):
-        self.inc.extend(['-I', directory])
+        self.inc.extend(["-I", directory])
 
     def add_sources(self, source):
         self.src.append(source)
 
     def link_against(self, libraries):
-        self.lib.extend(['-l' + lib for lib in libraries])
+        self.lib.extend(["-l" + lib for lib in libraries])
 
     def build_release(self, directory):
         for src in self.src:
-            logging.debug('Compiling %s', src)
+            logging.debug("Compiling %s", src)
             execute(
-                [self.ctx.compiler, '-c', os.path.join(self.ctx.src_dir, src),
-                 '-o', src + '.o'] + self.inc +
-                self.ctx.shared_library_c_flags(True),
-                cwd=directory)
-        logging.debug('Linking %s', self.name)
+                [
+                    self.ctx.compiler,
+                    "-c",
+                    os.path.join(self.ctx.src_dir, src),
+                    "-o",
+                    src + ".o",
+                ]
+                + self.inc
+                + self.ctx.shared_library_c_flags(True),
+                cwd=directory,
+            )
+        logging.debug("Linking %s", self.name)
         execute(
-            [self.ctx.compiler] + [src + '.o' for src in self.src] +
-            ['-o', self.name] + self.lib +
-            self.ctx.shared_library_ld_flags(True, self.name),
-            cwd=directory)
+            [self.ctx.compiler]
+            + [src + ".o" for src in self.src]
+            + ["-o", self.name]
+            + self.lib
+            + self.ctx.shared_library_ld_flags(True, self.name),
+            cwd=directory,
+        )
 
 
 def create_shared_library(name, toolset):

diff  --git a/clang/tools/scan-build-py/lib/libscanbuild/__init__.py b/clang/tools/scan-build-py/lib/libscanbuild/__init__.py
index 2e43281650988..ba1600866c4d7 100644
--- a/clang/tools/scan-build-py/lib/libscanbuild/__init__.py
+++ b/clang/tools/scan-build-py/lib/libscanbuild/__init__.py
@@ -14,22 +14,23 @@
 import subprocess
 import sys
 
-ENVIRONMENT_KEY = 'INTERCEPT_BUILD'
+ENVIRONMENT_KEY = "INTERCEPT_BUILD"
 
-Execution = collections.namedtuple('Execution', ['pid', 'cwd', 'cmd'])
+Execution = collections.namedtuple("Execution", ["pid", "cwd", "cmd"])
 
-CtuConfig = collections.namedtuple('CtuConfig', ['collect', 'analyze', 'dir',
-                                                 'extdef_map_cmd'])
+CtuConfig = collections.namedtuple(
+    "CtuConfig", ["collect", "analyze", "dir", "extdef_map_cmd"]
+)
 
 
 def duplicate_check(method):
-    """ Predicate to detect duplicated entries.
+    """Predicate to detect duplicated entries.
 
     Unique hash method can be use to detect duplicates. Entries are
     represented as dictionaries, which has no default hash method.
     This implementation uses a set datatype to store the unique hash values.
 
-    This method returns a method which can detect the duplicate values. """
+    This method returns a method which can detect the duplicate values."""
 
     def predicate(entry):
         entry_hash = predicate.unique(entry)
@@ -44,35 +45,36 @@ def predicate(entry):
 
 
 def run_build(command, *args, **kwargs):
-    """ Run and report build command execution
+    """Run and report build command execution
 
     :param command: array of tokens
     :return: exit code of the process
     """
-    environment = kwargs.get('env', os.environ)
-    logging.debug('run build %s, in environment: %s', command, environment)
+    environment = kwargs.get("env", os.environ)
+    logging.debug("run build %s, in environment: %s", command, environment)
     exit_code = subprocess.call(command, *args, **kwargs)
-    logging.debug('build finished with exit code: %d', exit_code)
+    logging.debug("build finished with exit code: %d", exit_code)
     return exit_code
 
 
 def run_command(command, cwd=None):
-    """ Run a given command and report the execution.
+    """Run a given command and report the execution.
 
     :param command: array of tokens
     :param cwd: the working directory where the command will be executed
     :return: output of the command
     """
+
     def decode_when_needed(result):
-        """ check_output returns bytes or string depend on python version """
-        return result.decode('utf-8') if isinstance(result, bytes) else result
+        """check_output returns bytes or string depend on python version"""
+        return result.decode("utf-8") if isinstance(result, bytes) else result
 
     try:
         directory = os.path.abspath(cwd) if cwd else os.getcwd()
-        logging.debug('exec command %s in %s', command, directory)
-        output = subprocess.check_output(command,
-                                         cwd=directory,
-                                         stderr=subprocess.STDOUT)
+        logging.debug("exec command %s in %s", command, directory)
+        output = subprocess.check_output(
+            command, cwd=directory, stderr=subprocess.STDOUT
+        )
         return decode_when_needed(output).splitlines()
     except subprocess.CalledProcessError as ex:
         ex.output = decode_when_needed(ex.output).splitlines()
@@ -80,7 +82,7 @@ def decode_when_needed(result):
 
 
 def reconfigure_logging(verbose_level):
-    """ Reconfigure logging level and format based on the verbose flag.
+    """Reconfigure logging level and format based on the verbose flag.
 
     :param verbose_level: number of `-v` flags received by the command
     :return: no return value
@@ -95,45 +97,48 @@ def reconfigure_logging(verbose_level):
     root.setLevel(level)
     # Be verbose with messages.
     if verbose_level <= 3:
-        fmt_string = '%(name)s: %(levelname)s: %(message)s'
+        fmt_string = "%(name)s: %(levelname)s: %(message)s"
     else:
-        fmt_string = '%(name)s: %(levelname)s: %(funcName)s: %(message)s'
+        fmt_string = "%(name)s: %(levelname)s: %(funcName)s: %(message)s"
     handler = logging.StreamHandler(sys.stdout)
     handler.setFormatter(logging.Formatter(fmt=fmt_string))
     root.handlers = [handler]
 
 
 def command_entry_point(function):
-    """ Decorator for command entry methods.
+    """Decorator for command entry methods.
 
     The decorator initialize/shutdown logging and guard on programming
     errors (catch exceptions).
 
     The decorated method can have arbitrary parameters, the return value will
-    be the exit code of the process. """
+    be the exit code of the process."""
 
     @functools.wraps(function)
     def wrapper(*args, **kwargs):
-        """ Do housekeeping tasks and execute the wrapped method. """
+        """Do housekeeping tasks and execute the wrapped method."""
 
         try:
-            logging.basicConfig(format='%(name)s: %(message)s',
-                                level=logging.WARNING,
-                                stream=sys.stdout)
+            logging.basicConfig(
+                format="%(name)s: %(message)s", level=logging.WARNING, stream=sys.stdout
+            )
             # This hack to get the executable name as %(name).
             logging.getLogger().name = os.path.basename(sys.argv[0])
             return function(*args, **kwargs)
         except KeyboardInterrupt:
-            logging.warning('Keyboard interrupt')
+            logging.warning("Keyboard interrupt")
             return 130  # Signal received exit code for bash.
         except Exception:
-            logging.exception('Internal error.')
+            logging.exception("Internal error.")
             if logging.getLogger().isEnabledFor(logging.DEBUG):
-                logging.error("Please report this bug and attach the output "
-                              "to the bug report")
+                logging.error(
+                    "Please report this bug and attach the output " "to the bug report"
+                )
             else:
-                logging.error("Please run this command again and turn on "
-                              "verbose mode (add '-vvvv' as argument).")
+                logging.error(
+                    "Please run this command again and turn on "
+                    "verbose mode (add '-vvvv' as argument)."
+                )
             return 64  # Some non used exit code for internal errors.
         finally:
             logging.shutdown()
@@ -142,7 +147,7 @@ def wrapper(*args, **kwargs):
 
 
 def compiler_wrapper(function):
-    """ Implements compiler wrapper base functionality.
+    """Implements compiler wrapper base functionality.
 
     A compiler wrapper executes the real compiler, then implement some
     functionality, then returns with the real compiler exit code.
@@ -155,53 +160,56 @@ def compiler_wrapper(function):
     The :param function: will receive the following arguments:
 
     :param result:       the exit code of the compilation.
-    :param execution:    the command executed by the wrapper. """
+    :param execution:    the command executed by the wrapper."""
 
     def is_cxx_compiler():
-        """ Find out was it a C++ compiler call. Compiler wrapper names
+        """Find out was it a C++ compiler call. Compiler wrapper names
         contain the compiler type. C++ compiler wrappers ends with `c++`,
-        but might have `.exe` extension on windows. """
+        but might have `.exe` extension on windows."""
 
         wrapper_command = os.path.basename(sys.argv[0])
-        return re.match(r'(.+)c\+\+(.*)', wrapper_command)
+        return re.match(r"(.+)c\+\+(.*)", wrapper_command)
 
     def run_compiler(executable):
-        """ Execute compilation with the real compiler. """
+        """Execute compilation with the real compiler."""
 
         command = executable + sys.argv[1:]
-        logging.debug('compilation: %s', command)
+        logging.debug("compilation: %s", command)
         result = subprocess.call(command)
-        logging.debug('compilation exit code: %d', result)
+        logging.debug("compilation exit code: %d", result)
         return result
 
     # Get relevant parameters from environment.
     parameters = json.loads(os.environ[ENVIRONMENT_KEY])
-    reconfigure_logging(parameters['verbose'])
+    reconfigure_logging(parameters["verbose"])
     # Execute the requested compilation. Do crash if anything goes wrong.
     cxx = is_cxx_compiler()
-    compiler = parameters['cxx'] if cxx else parameters['cc']
+    compiler = parameters["cxx"] if cxx else parameters["cc"]
     result = run_compiler(compiler)
     # Call the wrapped method and ignore it's return value.
     try:
         call = Execution(
             pid=os.getpid(),
             cwd=os.getcwd(),
-            cmd=['c++' if cxx else 'cc'] + sys.argv[1:])
+            cmd=["c++" if cxx else "cc"] + sys.argv[1:],
+        )
         function(result, call)
     except:
-        logging.exception('Compiler wrapper failed complete.')
+        logging.exception("Compiler wrapper failed complete.")
     finally:
         # Always return the real compiler exit code.
         return result
 
 
 def wrapper_environment(args):
-    """ Set up environment for interpose compiler wrapper."""
+    """Set up environment for interpose compiler wrapper."""
 
     return {
-        ENVIRONMENT_KEY: json.dumps({
-            'verbose': args.verbose,
-            'cc': shlex.split(args.cc),
-            'cxx': shlex.split(args.cxx)
-        })
+        ENVIRONMENT_KEY: json.dumps(
+            {
+                "verbose": args.verbose,
+                "cc": shlex.split(args.cc),
+                "cxx": shlex.split(args.cxx),
+            }
+        )
     }

diff  --git a/clang/tools/scan-build-py/lib/libscanbuild/analyze.py b/clang/tools/scan-build-py/lib/libscanbuild/analyze.py
index ebd6df1dc7579..72aac8f545240 100644
--- a/clang/tools/scan-build-py/lib/libscanbuild/analyze.py
+++ b/clang/tools/scan-build-py/lib/libscanbuild/analyze.py
@@ -25,37 +25,49 @@
 import glob
 from collections import defaultdict
 
-from libscanbuild import command_entry_point, compiler_wrapper, \
-    wrapper_environment, run_build, run_command, CtuConfig
-from libscanbuild.arguments import parse_args_for_scan_build, \
-    parse_args_for_analyze_build
+from libscanbuild import (
+    command_entry_point,
+    compiler_wrapper,
+    wrapper_environment,
+    run_build,
+    run_command,
+    CtuConfig,
+)
+from libscanbuild.arguments import (
+    parse_args_for_scan_build,
+    parse_args_for_analyze_build,
+)
 from libscanbuild.intercept import capture
 from libscanbuild.report import document
-from libscanbuild.compilation import split_command, classify_source, \
-    compiler_language
-from libscanbuild.clang import get_version, get_arguments, get_triple_arch, \
-    ClangErrorException
+from libscanbuild.compilation import split_command, classify_source, compiler_language
+from libscanbuild.clang import (
+    get_version,
+    get_arguments,
+    get_triple_arch,
+    ClangErrorException,
+)
 from libscanbuild.shell import decode
 
-__all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper']
+__all__ = ["scan_build", "analyze_build", "analyze_compiler_wrapper"]
 
-scanbuild_dir = os.path.dirname(os.path.realpath(__import__('sys').argv[0]))
+scanbuild_dir = os.path.dirname(os.path.realpath(__import__("sys").argv[0]))
 
-COMPILER_WRAPPER_CC = os.path.join(scanbuild_dir, '..', 'libexec', 'analyze-cc')
-COMPILER_WRAPPER_CXX = os.path.join(scanbuild_dir, '..', 'libexec', 'analyze-c++')
+COMPILER_WRAPPER_CC = os.path.join(scanbuild_dir, "..", "libexec", "analyze-cc")
+COMPILER_WRAPPER_CXX = os.path.join(scanbuild_dir, "..", "libexec", "analyze-c++")
 
-CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt'
-CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps'
+CTU_EXTDEF_MAP_FILENAME = "externalDefMap.txt"
+CTU_TEMP_DEFMAP_FOLDER = "tmpExternalDefMaps"
 
 
 @command_entry_point
 def scan_build():
-    """ Entry point for scan-build command. """
+    """Entry point for scan-build command."""
 
     args = parse_args_for_scan_build()
     # will re-assign the report directory as new output
     with report_directory(
-            args.output, args.keep_empty, args.output_format) as args.output:
+        args.output, args.keep_empty, args.output_format
+    ) as args.output:
         # Run against a build command. there are cases, when analyzer run
         # is not required. But we need to set up everything for the
         # wrappers, because 'configure' needs to capture the CC/CXX values
@@ -78,11 +90,13 @@ def scan_build():
 
 @command_entry_point
 def analyze_build():
-    """ Entry point for analyze-build command. """
+    """Entry point for analyze-build command."""
 
     args = parse_args_for_analyze_build()
     # will re-assign the report directory as new output
-    with report_directory(args.output, args.keep_empty, args.output_format) as args.output:
+    with report_directory(
+        args.output, args.keep_empty, args.output_format
+    ) as args.output:
         # Run the analyzer against a compilation db.
         govern_analyzer_runs(args)
         # Cover report generation and bug counting.
@@ -92,52 +106,57 @@ def analyze_build():
 
 
 def need_analyzer(args):
-    """ Check the intent of the build command.
+    """Check the intent of the build command.
 
     When static analyzer run against project configure step, it should be
     silent and no need to run the analyzer or generate report.
 
     To run `scan-build` against the configure step might be necessary,
     when compiler wrappers are used. That's the moment when build setup
-    check the compiler and capture the location for the build process. """
+    check the compiler and capture the location for the build process."""
 
-    return len(args) and not re.search(r'configure|autogen', args[0])
+    return len(args) and not re.search(r"configure|autogen", args[0])
 
 
 def prefix_with(constant, pieces):
-    """ From a sequence create another sequence where every second element
+    """From a sequence create another sequence where every second element
     is from the original sequence and the odd elements are the prefix.
 
-    eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3] """
+    eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3]"""
 
     return [elem for piece in pieces for elem in [constant, piece]]
 
 
 def get_ctu_config_from_args(args):
-    """ CTU configuration is created from the chosen phases and dir. """
+    """CTU configuration is created from the chosen phases and dir."""
 
     return (
-        CtuConfig(collect=args.ctu_phases.collect,
-                  analyze=args.ctu_phases.analyze,
-                  dir=args.ctu_dir,
-                  extdef_map_cmd=args.extdef_map_cmd)
-        if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir')
-        else CtuConfig(collect=False, analyze=False, dir='', extdef_map_cmd=''))
+        CtuConfig(
+            collect=args.ctu_phases.collect,
+            analyze=args.ctu_phases.analyze,
+            dir=args.ctu_dir,
+            extdef_map_cmd=args.extdef_map_cmd,
+        )
+        if hasattr(args, "ctu_phases") and hasattr(args.ctu_phases, "dir")
+        else CtuConfig(collect=False, analyze=False, dir="", extdef_map_cmd="")
+    )
 
 
 def get_ctu_config_from_json(ctu_conf_json):
-    """ CTU configuration is created from the chosen phases and dir. """
+    """CTU configuration is created from the chosen phases and dir."""
 
     ctu_config = json.loads(ctu_conf_json)
     # Recover namedtuple from json when coming from analyze-cc or analyze-c++
-    return CtuConfig(collect=ctu_config[0],
-                     analyze=ctu_config[1],
-                     dir=ctu_config[2],
-                     extdef_map_cmd=ctu_config[3])
+    return CtuConfig(
+        collect=ctu_config[0],
+        analyze=ctu_config[1],
+        dir=ctu_config[2],
+        extdef_map_cmd=ctu_config[3],
+    )
 
 
 def create_global_ctu_extdef_map(extdef_map_lines):
-    """ Takes iterator of individual external definition maps and creates a
+    """Takes iterator of individual external definition maps and creates a
     global map keeping only unique names. We leave conflicting names out of
     CTU.
 
@@ -151,7 +170,7 @@ def create_global_ctu_extdef_map(extdef_map_lines):
     mangled_to_asts = defaultdict(set)
 
     for line in extdef_map_lines:
-        mangled_name, ast_file = line.strip().split(' ', 1)
+        mangled_name, ast_file = line.strip().split(" ", 1)
         mangled_to_asts[mangled_name].add(ast_file)
 
     mangled_ast_pairs = []
@@ -164,7 +183,7 @@ def create_global_ctu_extdef_map(extdef_map_lines):
 
 
 def merge_ctu_extdef_maps(ctudir):
-    """ Merge individual external definition maps into a global one.
+    """Merge individual external definition maps into a global one.
 
     As the collect phase runs parallel on multiple threads, all compilation
     units are separately mapped into a temporary file in CTU_TEMP_DEFMAP_FOLDER.
@@ -174,30 +193,28 @@ def merge_ctu_extdef_maps(ctudir):
     CTU_EXTDEF_MAP_FILENAME."""
 
     def generate_extdef_map_lines(extdefmap_dir):
-        """ Iterate over all lines of input files in a determined order. """
+        """Iterate over all lines of input files in a determined order."""
 
-        files = glob.glob(os.path.join(extdefmap_dir, '*'))
+        files = glob.glob(os.path.join(extdefmap_dir, "*"))
         files.sort()
         for filename in files:
-            with open(filename, 'r') as in_file:
+            with open(filename, "r") as in_file:
                 for line in in_file:
                     yield line
 
     def write_global_map(arch, mangled_ast_pairs):
-        """ Write (mangled name, ast file) pairs into final file. """
+        """Write (mangled name, ast file) pairs into final file."""
 
-        extern_defs_map_file = os.path.join(ctudir, arch,
-                                           CTU_EXTDEF_MAP_FILENAME)
-        with open(extern_defs_map_file, 'w') as out_file:
+        extern_defs_map_file = os.path.join(ctudir, arch, CTU_EXTDEF_MAP_FILENAME)
+        with open(extern_defs_map_file, "w") as out_file:
             for mangled_name, ast_file in mangled_ast_pairs:
-                out_file.write('%s %s\n' % (mangled_name, ast_file))
+                out_file.write("%s %s\n" % (mangled_name, ast_file))
 
-    triple_arches = glob.glob(os.path.join(ctudir, '*'))
+    triple_arches = glob.glob(os.path.join(ctudir, "*"))
     for triple_path in triple_arches:
         if os.path.isdir(triple_path):
             triple_arch = os.path.basename(triple_path)
-            extdefmap_dir = os.path.join(ctudir, triple_arch,
-                                     CTU_TEMP_DEFMAP_FOLDER)
+            extdefmap_dir = os.path.join(ctudir, triple_arch, CTU_TEMP_DEFMAP_FOLDER)
 
             extdef_map_lines = generate_extdef_map_lines(extdefmap_dir)
             mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines)
@@ -208,45 +225,49 @@ def write_global_map(arch, mangled_ast_pairs):
 
 
 def run_analyzer_parallel(args):
-    """ Runs the analyzer against the given compilation database. """
+    """Runs the analyzer against the given compilation database."""
 
     def exclude(filename, directory):
-        """ Return true when any excluded directory prefix the filename. """
+        """Return true when any excluded directory prefix the filename."""
         if not os.path.isabs(filename):
             # filename is either absolute or relative to directory. Need to turn
             # it to absolute since 'args.excludes' are absolute paths.
             filename = os.path.normpath(os.path.join(directory, filename))
-        return any(re.match(r'^' + exclude_directory, filename)
-                   for exclude_directory in args.excludes)
+        return any(
+            re.match(r"^" + exclude_directory, filename)
+            for exclude_directory in args.excludes
+        )
 
     consts = {
-        'clang': args.clang,
-        'output_dir': args.output,
-        'output_format': args.output_format,
-        'output_failures': args.output_failures,
-        'direct_args': analyzer_params(args),
-        'force_debug': args.force_debug,
-        'ctu': get_ctu_config_from_args(args)
+        "clang": args.clang,
+        "output_dir": args.output,
+        "output_format": args.output_format,
+        "output_failures": args.output_failures,
+        "direct_args": analyzer_params(args),
+        "force_debug": args.force_debug,
+        "ctu": get_ctu_config_from_args(args),
     }
 
-    logging.debug('run analyzer against compilation database')
-    with open(args.cdb, 'r') as handle:
-        generator = (dict(cmd, **consts)
-                     for cmd in json.load(handle) if not exclude(
-                            cmd['file'], cmd['directory']))
+    logging.debug("run analyzer against compilation database")
+    with open(args.cdb, "r") as handle:
+        generator = (
+            dict(cmd, **consts)
+            for cmd in json.load(handle)
+            if not exclude(cmd["file"], cmd["directory"])
+        )
         # when verbose output requested execute sequentially
         pool = multiprocessing.Pool(1 if args.verbose > 2 else None)
         for current in pool.imap_unordered(run, generator):
             if current is not None:
                 # display error message from the static analyzer
-                for line in current['error_output']:
+                for line in current["error_output"]:
                     logging.info(line.rstrip())
         pool.close()
         pool.join()
 
 
 def govern_analyzer_runs(args):
-    """ Governs multiple runs in CTU mode or runs once in normal mode. """
+    """Governs multiple runs in CTU mode or runs once in normal mode."""
 
     ctu_config = get_ctu_config_from_args(args)
     # If we do a CTU collect (1st phase) we remove all previous collection
@@ -262,12 +283,14 @@ def govern_analyzer_runs(args):
     if ctu_config.collect and ctu_config.analyze:
         # CTU strings are coming from args.ctu_dir and extdef_map_cmd,
         # so we can leave it empty
-        args.ctu_phases = CtuConfig(collect=True, analyze=False,
-                                    dir='', extdef_map_cmd='')
+        args.ctu_phases = CtuConfig(
+            collect=True, analyze=False, dir="", extdef_map_cmd=""
+        )
         run_analyzer_parallel(args)
         merge_ctu_extdef_maps(ctu_config.dir)
-        args.ctu_phases = CtuConfig(collect=False, analyze=True,
-                                    dir='', extdef_map_cmd='')
+        args.ctu_phases = CtuConfig(
+            collect=False, analyze=True, dir="", extdef_map_cmd=""
+        )
         run_analyzer_parallel(args)
         shutil.rmtree(ctu_config.dir, ignore_errors=True)
     else:
@@ -278,36 +301,38 @@ def govern_analyzer_runs(args):
 
 
 def setup_environment(args):
-    """ Set up environment for build command to interpose compiler wrapper. """
+    """Set up environment for build command to interpose compiler wrapper."""
 
     environment = dict(os.environ)
     environment.update(wrapper_environment(args))
-    environment.update({
-        'CC': COMPILER_WRAPPER_CC,
-        'CXX': COMPILER_WRAPPER_CXX,
-        'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '',
-        'ANALYZE_BUILD_REPORT_DIR': args.output,
-        'ANALYZE_BUILD_REPORT_FORMAT': args.output_format,
-        'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '',
-        'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)),
-        'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else '',
-        'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args))
-    })
+    environment.update(
+        {
+            "CC": COMPILER_WRAPPER_CC,
+            "CXX": COMPILER_WRAPPER_CXX,
+            "ANALYZE_BUILD_CLANG": args.clang if need_analyzer(args.build) else "",
+            "ANALYZE_BUILD_REPORT_DIR": args.output,
+            "ANALYZE_BUILD_REPORT_FORMAT": args.output_format,
+            "ANALYZE_BUILD_REPORT_FAILURES": "yes" if args.output_failures else "",
+            "ANALYZE_BUILD_PARAMETERS": " ".join(analyzer_params(args)),
+            "ANALYZE_BUILD_FORCE_DEBUG": "yes" if args.force_debug else "",
+            "ANALYZE_BUILD_CTU": json.dumps(get_ctu_config_from_args(args)),
+        }
+    )
     return environment
 
 
 @command_entry_point
 def analyze_compiler_wrapper():
-    """ Entry point for `analyze-cc` and `analyze-c++` compiler wrappers. """
+    """Entry point for `analyze-cc` and `analyze-c++` compiler wrappers."""
 
     return compiler_wrapper(analyze_compiler_wrapper_impl)
 
 
 def analyze_compiler_wrapper_impl(result, execution):
-    """ Implements analyzer compiler wrapper functionality. """
+    """Implements analyzer compiler wrapper functionality."""
 
     # don't run analyzer when compilation fails. or when it's not requested.
-    if result or not os.getenv('ANALYZE_BUILD_CLANG'):
+    if result or not os.getenv("ANALYZE_BUILD_CLANG"):
         return
 
     # check is it a compilation?
@@ -316,55 +341,56 @@ def analyze_compiler_wrapper_impl(result, execution):
         return
     # collect the needed parameters from environment, crash when missing
     parameters = {
-        'clang': os.getenv('ANALYZE_BUILD_CLANG'),
-        'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'),
-        'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'),
-        'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'),
-        'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS',
-                                 '').split(' '),
-        'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'),
-        'directory': execution.cwd,
-        'command': [execution.cmd[0], '-c'] + compilation.flags,
-        'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU'))
+        "clang": os.getenv("ANALYZE_BUILD_CLANG"),
+        "output_dir": os.getenv("ANALYZE_BUILD_REPORT_DIR"),
+        "output_format": os.getenv("ANALYZE_BUILD_REPORT_FORMAT"),
+        "output_failures": os.getenv("ANALYZE_BUILD_REPORT_FAILURES"),
+        "direct_args": os.getenv("ANALYZE_BUILD_PARAMETERS", "").split(" "),
+        "force_debug": os.getenv("ANALYZE_BUILD_FORCE_DEBUG"),
+        "directory": execution.cwd,
+        "command": [execution.cmd[0], "-c"] + compilation.flags,
+        "ctu": get_ctu_config_from_json(os.getenv("ANALYZE_BUILD_CTU")),
     }
     # call static analyzer against the compilation
     for source in compilation.files:
-        parameters.update({'file': source})
-        logging.debug('analyzer parameters %s', parameters)
+        parameters.update({"file": source})
+        logging.debug("analyzer parameters %s", parameters)
         current = run(parameters)
         # display error message from the static analyzer
         if current is not None:
-            for line in current['error_output']:
+            for line in current["error_output"]:
                 logging.info(line.rstrip())
 
 
 @contextlib.contextmanager
 def report_directory(hint, keep, output_format):
-    """ Responsible for the report directory.
+    """Responsible for the report directory.
 
     hint -- could specify the parent directory of the output directory.
-    keep -- a boolean value to keep or delete the empty report directory. """
+    keep -- a boolean value to keep or delete the empty report directory."""
 
-    stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-'
+    stamp_format = "scan-build-%Y-%m-%d-%H-%M-%S-%f-"
     stamp = datetime.datetime.now().strftime(stamp_format)
     parent_dir = os.path.abspath(hint)
     if not os.path.exists(parent_dir):
         os.makedirs(parent_dir)
     name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir)
 
-    logging.info('Report directory created: %s', name)
+    logging.info("Report directory created: %s", name)
 
     try:
         yield name
     finally:
         args = (name,)
         if os.listdir(name):
-            if output_format not in ['sarif', 'sarif-html']: # FIXME:
+            if output_format not in ["sarif", "sarif-html"]:  # FIXME:
                 # 'scan-view' currently does not support sarif format.
                 msg = "Run 'scan-view %s' to examine bug reports."
-            elif output_format == 'sarif-html':
-                msg = "Run 'scan-view %s' to examine bug reports or see " \
+            elif output_format == "sarif-html":
+                msg = (
+                    "Run 'scan-view %s' to examine bug reports or see "
                     "merged sarif results at %s/results-merged.sarif."
+                )
                 args = (name, name)
             else:
                 msg = "View merged sarif results at %s/results-merged.sarif."
@@ -381,53 +407,53 @@ def report_directory(hint, keep, output_format):
 
 
 def analyzer_params(args):
-    """ A group of command line arguments can mapped to command
-    line arguments of the analyzer. This method generates those. """
+    """A group of command line arguments can mapped to command
+    line arguments of the analyzer. This method generates those."""
 
     result = []
 
     if args.constraints_model:
-        result.append('-analyzer-constraints={0}'.format(
-            args.constraints_model))
+        result.append("-analyzer-constraints={0}".format(args.constraints_model))
     if args.internal_stats:
-        result.append('-analyzer-stats')
+        result.append("-analyzer-stats")
     if args.analyze_headers:
-        result.append('-analyzer-opt-analyze-headers')
+        result.append("-analyzer-opt-analyze-headers")
     if args.stats:
-        result.append('-analyzer-checker=debug.Stats')
+        result.append("-analyzer-checker=debug.Stats")
     if args.maxloop:
-        result.extend(['-analyzer-max-loop', str(args.maxloop)])
+        result.extend(["-analyzer-max-loop", str(args.maxloop)])
     if args.output_format:
-        result.append('-analyzer-output={0}'.format(args.output_format))
+        result.append("-analyzer-output={0}".format(args.output_format))
     if args.analyzer_config:
-        result.extend(['-analyzer-config', args.analyzer_config])
+        result.extend(["-analyzer-config", args.analyzer_config])
     if args.verbose >= 4:
-        result.append('-analyzer-display-progress')
+        result.append("-analyzer-display-progress")
     if args.plugins:
-        result.extend(prefix_with('-load', args.plugins))
+        result.extend(prefix_with("-load", args.plugins))
     if args.enable_checker:
-        checkers = ','.join(args.enable_checker)
-        result.extend(['-analyzer-checker', checkers])
+        checkers = ",".join(args.enable_checker)
+        result.extend(["-analyzer-checker", checkers])
     if args.disable_checker:
-        checkers = ','.join(args.disable_checker)
-        result.extend(['-analyzer-disable-checker', checkers])
+        checkers = ",".join(args.disable_checker)
+        result.extend(["-analyzer-disable-checker", checkers])
 
-    return prefix_with('-Xclang', result)
+    return prefix_with("-Xclang", result)
 
 
 def require(required):
-    """ Decorator for checking the required values in state.
+    """Decorator for checking the required values in state.
 
     It checks the required attributes in the passed state and stop when
-    any of those is missing. """
+    any of those is missing."""
 
     def decorator(function):
         @functools.wraps(function)
         def wrapper(*args, **kwargs):
             for key in required:
                 if key not in args[0]:
-                    raise KeyError('{0} not passed to {1}'.format(
-                        key, function.__name__))
+                    raise KeyError(
+                        "{0} not passed to {1}".format(key, function.__name__)
+                    )
 
             return function(*args, **kwargs)
 
@@ -436,18 +462,22 @@ def wrapper(*args, **kwargs):
     return decorator
 
 
- at require(['command',  # entry from compilation database
-          'directory',  # entry from compilation database
-          'file',  # entry from compilation database
-          'clang',  # clang executable name (and path)
-          'direct_args',  # arguments from command line
-          'force_debug',  # kill non debug macros
-          'output_dir',  # where generated report files shall go
-          'output_format',  # it's 'plist', 'html', 'plist-html', 'plist-multi-file', 'sarif', or 'sarif-html'
-          'output_failures',  # generate crash reports or not
-          'ctu'])  # ctu control options
+ at require(
+    [
+        "command",  # entry from compilation database
+        "directory",  # entry from compilation database
+        "file",  # entry from compilation database
+        "clang",  # clang executable name (and path)
+        "direct_args",  # arguments from command line
+        "force_debug",  # kill non debug macros
+        "output_dir",  # where generated report files shall go
+        "output_format",  # it's 'plist', 'html', 'plist-html', 'plist-multi-file', 'sarif', or 'sarif-html'
+        "output_failures",  # generate crash reports or not
+        "ctu",
+    ]
+)  # ctu control options
 def run(opts):
-    """ Entry point to run (or not) static analyzer against a single entry
+    """Entry point to run (or not) static analyzer against a single entry
     of the compilation database.
 
     This complex task is decomposed into smaller methods which are calling
@@ -457,10 +487,10 @@ def run(opts):
     The passed parameter is a python dictionary. Each method first check
     that the needed parameters received. (This is done by the 'require'
     decorator. It's like an 'assert' to check the contract between the
-    caller and the called method.) """
+    caller and the called method.)"""
 
     try:
-        command = opts.pop('command')
+        command = opts.pop("command")
         command = command if isinstance(command, list) else decode(command)
         logging.debug("Run analyzer against '%s'", command)
         opts.update(classify_parameters(command))
@@ -471,25 +501,35 @@ def run(opts):
         return None
 
 
- at require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language',
-          'error_output', 'exit_code'])
+ at require(
+    [
+        "clang",
+        "directory",
+        "flags",
+        "file",
+        "output_dir",
+        "language",
+        "error_output",
+        "exit_code",
+    ]
+)
 def report_failure(opts):
-    """ Create report when analyzer failed.
+    """Create report when analyzer failed.
 
     The major report is the preprocessor output. The output filename generated
     randomly. The compiler output also captured into '.stderr.txt' file.
-    And some more execution context also saved into '.info.txt' file. """
+    And some more execution context also saved into '.info.txt' file."""
 
     def extension():
-        """ Generate preprocessor file extension. """
+        """Generate preprocessor file extension."""
 
-        mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'}
-        return mapping.get(opts['language'], '.i')
+        mapping = {"objective-c++": ".mii", "objective-c": ".mi", "c++": ".ii"}
+        return mapping.get(opts["language"], ".i")
 
     def destination():
-        """ Creates failures directory if not exits yet. """
+        """Creates failures directory if not exits yet."""
 
-        failures_dir = os.path.join(opts['output_dir'], 'failures')
+        failures_dir = os.path.join(opts["output_dir"], "failures")
         if not os.path.isdir(failures_dir):
             os.makedirs(failures_dir)
         return failures_dir
@@ -497,17 +537,20 @@ def destination():
     # Classify error type: when Clang terminated by a signal it's a 'Crash'.
     # (python subprocess Popen.returncode is negative when child terminated
     # by signal.) Everything else is 'Other Error'.
-    error = 'crash' if opts['exit_code'] < 0 else 'other_error'
+    error = "crash" if opts["exit_code"] < 0 else "other_error"
     # Create preprocessor output file name. (This is blindly following the
     # Perl implementation.)
-    (handle, name) = tempfile.mkstemp(suffix=extension(),
-                                      prefix='clang_' + error + '_',
-                                      dir=destination())
+    (handle, name) = tempfile.mkstemp(
+        suffix=extension(), prefix="clang_" + error + "_", dir=destination()
+    )
     os.close(handle)
     # Execute Clang again, but run the syntax check only.
-    cwd = opts['directory']
-    cmd = [opts['clang'], '-fsyntax-only', '-E'] + opts['flags'] + \
-        [opts['file'], '-o', name]
+    cwd = opts["directory"]
+    cmd = (
+        [opts["clang"], "-fsyntax-only", "-E"]
+        + opts["flags"]
+        + [opts["file"], "-o", name]
+    )
     try:
         cmd = get_arguments(cmd, cwd)
         run_command(cmd, cwd=cwd)
@@ -516,72 +559,79 @@ def destination():
     except ClangErrorException:
         pass
     # write general information about the crash
-    with open(name + '.info.txt', 'w') as handle:
-        handle.write(opts['file'] + os.linesep)
-        handle.write(error.title().replace('_', ' ') + os.linesep)
-        handle.write(' '.join(cmd) + os.linesep)
-        handle.write(' '.join(os.uname()) + os.linesep)
-        handle.write(get_version(opts['clang']))
+    with open(name + ".info.txt", "w") as handle:
+        handle.write(opts["file"] + os.linesep)
+        handle.write(error.title().replace("_", " ") + os.linesep)
+        handle.write(" ".join(cmd) + os.linesep)
+        handle.write(" ".join(os.uname()) + os.linesep)
+        handle.write(get_version(opts["clang"]))
         handle.close()
     # write the captured output too
-    with open(name + '.stderr.txt', 'w') as handle:
-        handle.writelines(opts['error_output'])
+    with open(name + ".stderr.txt", "w") as handle:
+        handle.writelines(opts["error_output"])
         handle.close()
 
 
- at require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir',
-          'output_format'])
+ at require(
+    [
+        "clang",
+        "directory",
+        "flags",
+        "direct_args",
+        "file",
+        "output_dir",
+        "output_format",
+    ]
+)
 def run_analyzer(opts, continuation=report_failure):
-    """ It assembles the analysis command line and executes it. Capture the
+    """It assembles the analysis command line and executes it. Capture the
     output of the analysis and returns with it. If failure reports are
-    requested, it calls the continuation to generate it. """
+    requested, it calls the continuation to generate it."""
 
     def target():
-        """ Creates output file name for reports. """
-        if opts['output_format'] in {
-                'plist',
-                'plist-html',
-                'plist-multi-file'}:
-            (handle, name) = tempfile.mkstemp(prefix='report-',
-                                              suffix='.plist',
-                                              dir=opts['output_dir'])
+        """Creates output file name for reports."""
+        if opts["output_format"] in {"plist", "plist-html", "plist-multi-file"}:
+            (handle, name) = tempfile.mkstemp(
+                prefix="report-", suffix=".plist", dir=opts["output_dir"]
+            )
             os.close(handle)
             return name
-        elif opts['output_format'] in {
-                'sarif',
-                'sarif-html'}:
-            (handle, name) = tempfile.mkstemp(prefix='result-',
-                                              suffix='.sarif',
-                                              dir=opts['output_dir'])
+        elif opts["output_format"] in {"sarif", "sarif-html"}:
+            (handle, name) = tempfile.mkstemp(
+                prefix="result-", suffix=".sarif", dir=opts["output_dir"]
+            )
             os.close(handle)
             return name
-        return opts['output_dir']
+        return opts["output_dir"]
 
     try:
-        cwd = opts['directory']
-        cmd = get_arguments([opts['clang'], '--analyze'] +
-                            opts['direct_args'] + opts['flags'] +
-                            [opts['file'], '-o', target()],
-                            cwd)
+        cwd = opts["directory"]
+        cmd = get_arguments(
+            [opts["clang"], "--analyze"]
+            + opts["direct_args"]
+            + opts["flags"]
+            + [opts["file"], "-o", target()],
+            cwd,
+        )
         output = run_command(cmd, cwd=cwd)
-        return {'error_output': output, 'exit_code': 0}
+        return {"error_output": output, "exit_code": 0}
     except subprocess.CalledProcessError as ex:
-        result = {'error_output': ex.output, 'exit_code': ex.returncode}
-        if opts.get('output_failures', False):
+        result = {"error_output": ex.output, "exit_code": ex.returncode}
+        if opts.get("output_failures", False):
             opts.update(result)
             continuation(opts)
         return result
     except ClangErrorException as ex:
-        result = {'error_output': ex.error, 'exit_code': 0}
-        if opts.get('output_failures', False):
+        result = {"error_output": ex.error, "exit_code": 0}
+        if opts.get("output_failures", False):
             opts.update(result)
             continuation(opts)
         return result
 
 
 def extdef_map_list_src_to_ast(extdef_src_list):
-    """ Turns textual external definition map list with source files into an
-    external definition map list with ast files. """
+    """Turns textual external definition map list with source files into an
+    external definition map list with ast files."""
 
     extdef_ast_list = []
     for extdef_src_txt in extdef_src_list:
@@ -595,17 +645,20 @@ def extdef_map_list_src_to_ast(extdef_src_list):
     return extdef_ast_list
 
 
- at require(['clang', 'directory', 'flags', 'direct_args', 'file', 'ctu'])
+ at require(["clang", "directory", "flags", "direct_args", "file", "ctu"])
 def ctu_collect_phase(opts):
-    """ Preprocess source by generating all data needed by CTU analysis. """
+    """Preprocess source by generating all data needed by CTU analysis."""
 
     def generate_ast(triple_arch):
-        """ Generates ASTs for the current compilation command. """
-
-        args = opts['direct_args'] + opts['flags']
-        ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch, 'ast',
-                                       os.path.realpath(opts['file'])[1:] +
-                                       '.ast')
+        """Generates ASTs for the current compilation command."""
+
+        args = opts["direct_args"] + opts["flags"]
+        ast_joined_path = os.path.join(
+            opts["ctu"].dir,
+            triple_arch,
+            "ast",
+            os.path.realpath(opts["file"])[1:] + ".ast",
+        )
         ast_path = os.path.abspath(ast_joined_path)
         ast_dir = os.path.dirname(ast_path)
         if not os.path.isdir(ast_dir):
@@ -614,29 +667,31 @@ def generate_ast(triple_arch):
             except OSError:
                 # In case an other process already created it.
                 pass
-        ast_command = [opts['clang'], '-emit-ast']
+        ast_command = [opts["clang"], "-emit-ast"]
         ast_command.extend(args)
-        ast_command.append('-w')
-        ast_command.append(opts['file'])
-        ast_command.append('-o')
+        ast_command.append("-w")
+        ast_command.append(opts["file"])
+        ast_command.append("-o")
         ast_command.append(ast_path)
         logging.debug("Generating AST using '%s'", ast_command)
-        run_command(ast_command, cwd=opts['directory'])
+        run_command(ast_command, cwd=opts["directory"])
 
     def map_extdefs(triple_arch):
-        """ Generate external definition map file for the current source. """
+        """Generate external definition map file for the current source."""
 
-        args = opts['direct_args'] + opts['flags']
-        extdefmap_command = [opts['ctu'].extdef_map_cmd]
-        extdefmap_command.append(opts['file'])
-        extdefmap_command.append('--')
+        args = opts["direct_args"] + opts["flags"]
+        extdefmap_command = [opts["ctu"].extdef_map_cmd]
+        extdefmap_command.append(opts["file"])
+        extdefmap_command.append("--")
         extdefmap_command.extend(args)
-        logging.debug("Generating external definition map using '%s'",
-                      extdefmap_command)
-        extdef_src_list = run_command(extdefmap_command, cwd=opts['directory'])
+        logging.debug(
+            "Generating external definition map using '%s'", extdefmap_command
+        )
+        extdef_src_list = run_command(extdefmap_command, cwd=opts["directory"])
         extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list)
-        extern_defs_map_folder = os.path.join(opts['ctu'].dir, triple_arch,
-                                             CTU_TEMP_DEFMAP_FOLDER)
+        extern_defs_map_folder = os.path.join(
+            opts["ctu"].dir, triple_arch, CTU_TEMP_DEFMAP_FOLDER
+        )
         if not os.path.isdir(extern_defs_map_folder):
             try:
                 os.makedirs(extern_defs_map_folder)
@@ -644,91 +699,107 @@ def map_extdefs(triple_arch):
                 # In case an other process already created it.
                 pass
         if extdef_ast_list:
-            with tempfile.NamedTemporaryFile(mode='w',
-                                             dir=extern_defs_map_folder,
-                                             delete=False) as out_file:
+            with tempfile.NamedTemporaryFile(
+                mode="w", dir=extern_defs_map_folder, delete=False
+            ) as out_file:
                 out_file.write("\n".join(extdef_ast_list) + "\n")
 
-    cwd = opts['directory']
-    cmd = [opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] \
-        + [opts['file']]
+    cwd = opts["directory"]
+    cmd = (
+        [opts["clang"], "--analyze"]
+        + opts["direct_args"]
+        + opts["flags"]
+        + [opts["file"]]
+    )
     triple_arch = get_triple_arch(cmd, cwd)
     generate_ast(triple_arch)
     map_extdefs(triple_arch)
 
 
- at require(['ctu'])
+ at require(["ctu"])
 def dispatch_ctu(opts, continuation=run_analyzer):
-    """ Execute only one phase of 2 phases of CTU if needed. """
+    """Execute only one phase of 2 phases of CTU if needed."""
 
-    ctu_config = opts['ctu']
+    ctu_config = opts["ctu"]
 
     if ctu_config.collect or ctu_config.analyze:
         assert ctu_config.collect != ctu_config.analyze
         if ctu_config.collect:
             return ctu_collect_phase(opts)
         if ctu_config.analyze:
-            cwd = opts['directory']
-            cmd = [opts['clang'], '--analyze'] + opts['direct_args'] \
-                + opts['flags'] + [opts['file']]
+            cwd = opts["directory"]
+            cmd = (
+                [opts["clang"], "--analyze"]
+                + opts["direct_args"]
+                + opts["flags"]
+                + [opts["file"]]
+            )
             triarch = get_triple_arch(cmd, cwd)
-            ctu_options = ['ctu-dir=' + os.path.join(ctu_config.dir, triarch),
-                           'experimental-enable-naive-ctu-analysis=true']
-            analyzer_options = prefix_with('-analyzer-config', ctu_options)
-            direct_options = prefix_with('-Xanalyzer', analyzer_options)
-            opts['direct_args'].extend(direct_options)
+            ctu_options = [
+                "ctu-dir=" + os.path.join(ctu_config.dir, triarch),
+                "experimental-enable-naive-ctu-analysis=true",
+            ]
+            analyzer_options = prefix_with("-analyzer-config", ctu_options)
+            direct_options = prefix_with("-Xanalyzer", analyzer_options)
+            opts["direct_args"].extend(direct_options)
 
     return continuation(opts)
 
 
- at require(['flags', 'force_debug'])
+ at require(["flags", "force_debug"])
 def filter_debug_flags(opts, continuation=dispatch_ctu):
-    """ Filter out nondebug macros when requested. """
+    """Filter out nondebug macros when requested."""
 
-    if opts.pop('force_debug'):
+    if opts.pop("force_debug"):
         # lazy implementation just append an undefine macro at the end
-        opts.update({'flags': opts['flags'] + ['-UNDEBUG']})
+        opts.update({"flags": opts["flags"] + ["-UNDEBUG"]})
 
     return continuation(opts)
 
 
- at require(['language', 'compiler', 'file', 'flags'])
+ at require(["language", "compiler", "file", "flags"])
 def language_check(opts, continuation=filter_debug_flags):
-    """ Find out the language from command line parameters or file name
-    extension. The decision also influenced by the compiler invocation. """
-
-    accepted = frozenset({
-        'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output',
-        'c++-cpp-output', 'objective-c-cpp-output'
-    })
+    """Find out the language from command line parameters or file name
+    extension. The decision also influenced by the compiler invocation."""
+
+    accepted = frozenset(
+        {
+            "c",
+            "c++",
+            "objective-c",
+            "objective-c++",
+            "c-cpp-output",
+            "c++-cpp-output",
+            "objective-c-cpp-output",
+        }
+    )
 
     # language can be given as a parameter...
-    language = opts.pop('language')
-    compiler = opts.pop('compiler')
+    language = opts.pop("language")
+    compiler = opts.pop("compiler")
     # ... or find out from source file extension
     if language is None and compiler is not None:
-        language = classify_source(opts['file'], compiler == 'c')
+        language = classify_source(opts["file"], compiler == "c")
 
     if language is None:
-        logging.debug('skip analysis, language not known')
+        logging.debug("skip analysis, language not known")
         return None
     elif language not in accepted:
-        logging.debug('skip analysis, language not supported')
+        logging.debug("skip analysis, language not supported")
         return None
     else:
-        logging.debug('analysis, language: %s', language)
-        opts.update({'language': language,
-                     'flags': ['-x', language] + opts['flags']})
+        logging.debug("analysis, language: %s", language)
+        opts.update({"language": language, "flags": ["-x", language] + opts["flags"]})
         return continuation(opts)
 
 
- at require(['arch_list', 'flags'])
+ at require(["arch_list", "flags"])
 def arch_check(opts, continuation=language_check):
-    """ Do run analyzer through one of the given architectures. """
+    """Do run analyzer through one of the given architectures."""
 
-    disabled = frozenset({'ppc', 'ppc64'})
+    disabled = frozenset({"ppc", "ppc64"})
 
-    received_list = opts.pop('arch_list')
+    received_list = opts.pop("arch_list")
     if received_list:
         # filter out disabled architectures and -arch switches
         filtered_list = [a for a in received_list if a not in disabled]
@@ -738,15 +809,15 @@ def arch_check(opts, continuation=language_check):
             # the same, those should not change the pre-processing step.
             # But that's the only pass we have before run the analyzer.
             current = filtered_list.pop()
-            logging.debug('analysis, on arch: %s', current)
+            logging.debug("analysis, on arch: %s", current)
 
-            opts.update({'flags': ['-arch', current] + opts['flags']})
+            opts.update({"flags": ["-arch", current] + opts["flags"]})
             return continuation(opts)
         else:
-            logging.debug('skip analysis, found not supported arch')
+            logging.debug("skip analysis, found not supported arch")
             return None
     else:
-        logging.debug('analysis, on default arch')
+        logging.debug("analysis, on default arch")
         return continuation(opts)
 
 
@@ -755,49 +826,49 @@ def arch_check(opts, continuation=language_check):
 #
 # Keys are the option name, value number of options to skip
 IGNORED_FLAGS = {
-    '-c': 0,  # compile option will be overwritten
-    '-fsyntax-only': 0,  # static analyzer option will be overwritten
-    '-o': 1,  # will set up own output file
+    "-c": 0,  # compile option will be overwritten
+    "-fsyntax-only": 0,  # static analyzer option will be overwritten
+    "-o": 1,  # will set up own output file
     # flags below are inherited from the perl implementation.
-    '-g': 0,
-    '-save-temps': 0,
-    '-install_name': 1,
-    '-exported_symbols_list': 1,
-    '-current_version': 1,
-    '-compatibility_version': 1,
-    '-init': 1,
-    '-e': 1,
-    '-seg1addr': 1,
-    '-bundle_loader': 1,
-    '-multiply_defined': 1,
-    '-sectorder': 3,
-    '--param': 1,
-    '--serialize-diagnostics': 1
+    "-g": 0,
+    "-save-temps": 0,
+    "-install_name": 1,
+    "-exported_symbols_list": 1,
+    "-current_version": 1,
+    "-compatibility_version": 1,
+    "-init": 1,
+    "-e": 1,
+    "-seg1addr": 1,
+    "-bundle_loader": 1,
+    "-multiply_defined": 1,
+    "-sectorder": 3,
+    "--param": 1,
+    "--serialize-diagnostics": 1,
 }
 
 
 def classify_parameters(command):
-    """ Prepare compiler flags (filters some and add others) and take out
-    language (-x) and architecture (-arch) flags for future processing. """
+    """Prepare compiler flags (filters some and add others) and take out
+    language (-x) and architecture (-arch) flags for future processing."""
 
     result = {
-        'flags': [],  # the filtered compiler flags
-        'arch_list': [],  # list of architecture flags
-        'language': None,  # compilation language, None, if not specified
-        'compiler': compiler_language(command)  # 'c' or 'c++'
+        "flags": [],  # the filtered compiler flags
+        "arch_list": [],  # list of architecture flags
+        "language": None,  # compilation language, None, if not specified
+        "compiler": compiler_language(command),  # 'c' or 'c++'
     }
 
     # iterate on the compile options
     args = iter(command[1:])
     for arg in args:
         # take arch flags into a separate basket
-        if arg == '-arch':
-            result['arch_list'].append(next(args))
+        if arg == "-arch":
+            result["arch_list"].append(next(args))
         # take language
-        elif arg == '-x':
-            result['language'] = next(args)
+        elif arg == "-x":
+            result["language"] = next(args)
         # parameters which looks source file are not flags
-        elif re.match(r'^[^-].+', arg) and classify_source(arg):
+        elif re.match(r"^[^-].+", arg) and classify_source(arg):
             pass
         # ignore some flags
         elif arg in IGNORED_FLAGS:
@@ -806,10 +877,10 @@ def classify_parameters(command):
                 next(args)
         # we don't care about extra warnings, but we should suppress ones
         # that we don't want to see.
-        elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg):
+        elif re.match(r"^-W.+", arg) and not re.match(r"^-Wno-.+", arg):
             pass
         # and consider everything else as compilation flag.
         else:
-            result['flags'].append(arg)
+            result["flags"].append(arg)
 
     return result

diff  --git a/clang/tools/scan-build-py/lib/libscanbuild/arguments.py b/clang/tools/scan-build-py/lib/libscanbuild/arguments.py
index 7af25ecdf3024..e9794e7959778 100644
--- a/clang/tools/scan-build-py/lib/libscanbuild/arguments.py
+++ b/clang/tools/scan-build-py/lib/libscanbuild/arguments.py
@@ -21,65 +21,68 @@
 from libscanbuild import reconfigure_logging, CtuConfig
 from libscanbuild.clang import get_checkers, is_ctu_capable
 
-__all__ = ['parse_args_for_intercept_build', 'parse_args_for_analyze_build',
-           'parse_args_for_scan_build']
+__all__ = [
+    "parse_args_for_intercept_build",
+    "parse_args_for_analyze_build",
+    "parse_args_for_scan_build",
+]
 
 
 def parse_args_for_intercept_build():
-    """ Parse and validate command-line arguments for intercept-build. """
+    """Parse and validate command-line arguments for intercept-build."""
 
     parser = create_intercept_parser()
     args = parser.parse_args()
 
     reconfigure_logging(args.verbose)
-    logging.debug('Raw arguments %s', sys.argv)
+    logging.debug("Raw arguments %s", sys.argv)
 
     # short validation logic
     if not args.build:
-        parser.error(message='missing build command')
+        parser.error(message="missing build command")
 
-    logging.debug('Parsed arguments: %s', args)
+    logging.debug("Parsed arguments: %s", args)
     return args
 
 
 def parse_args_for_analyze_build():
-    """ Parse and validate command-line arguments for analyze-build. """
+    """Parse and validate command-line arguments for analyze-build."""
 
     from_build_command = False
     parser = create_analyze_parser(from_build_command)
     args = parser.parse_args()
 
     reconfigure_logging(args.verbose)
-    logging.debug('Raw arguments %s', sys.argv)
+    logging.debug("Raw arguments %s", sys.argv)
 
     normalize_args_for_analyze(args, from_build_command)
     validate_args_for_analyze(parser, args, from_build_command)
-    logging.debug('Parsed arguments: %s', args)
+    logging.debug("Parsed arguments: %s", args)
     return args
 
 
 def parse_args_for_scan_build():
-    """ Parse and validate command-line arguments for scan-build. """
+    """Parse and validate command-line arguments for scan-build."""
 
     from_build_command = True
     parser = create_analyze_parser(from_build_command)
     args = parser.parse_args()
 
     reconfigure_logging(args.verbose)
-    logging.debug('Raw arguments %s', sys.argv)
+    logging.debug("Raw arguments %s", sys.argv)
 
     normalize_args_for_analyze(args, from_build_command)
     validate_args_for_analyze(parser, args, from_build_command)
-    logging.debug('Parsed arguments: %s', args)
+    logging.debug("Parsed arguments: %s", args)
     return args
 
 
 def normalize_args_for_analyze(args, from_build_command):
-    """ Normalize parsed arguments for analyze-build and scan-build.
+    """Normalize parsed arguments for analyze-build and scan-build.
 
     :param args: Parsed argument object. (Will be mutated.)
     :param from_build_command: Boolean value tells is the command suppose
-    to run the analyzer against a build command or a compilation db. """
+    to run the analyzer against a build command or a compilation db."""
 
     # make plugins always a list. (it might be None when not specified.)
     if args.plugins is None:
@@ -96,16 +99,19 @@ def normalize_args_for_analyze(args, from_build_command):
     # which have good default value.)
     if from_build_command:
         # add cdb parameter invisibly to make report module working.
-        args.cdb = 'compile_commands.json'
+        args.cdb = "compile_commands.json"
 
     # Make ctu_dir an abspath as it is needed inside clang
-    if not from_build_command and hasattr(args, 'ctu_phases') \
-            and hasattr(args.ctu_phases, 'dir'):
+    if (
+        not from_build_command
+        and hasattr(args, "ctu_phases")
+        and hasattr(args.ctu_phases, "dir")
+    ):
         args.ctu_dir = os.path.abspath(args.ctu_dir)
 
 
 def validate_args_for_analyze(parser, args, from_build_command):
-    """ Command line parsing is done by the argparse module, but semantic
+    """Command line parsing is done by the argparse module, but semantic
     validation still needs to be done. This method is doing it for
     analyze-build and scan-build commands.
 
@@ -114,7 +120,7 @@ def validate_args_for_analyze(parser, args, from_build_command):
     :param from_build_command: Boolean value tells is the command suppose
     to run the analyzer against a build command or a compilation db.
     :return: No return value, but this call might throw when validation
-    fails. """
+    fails."""
 
     if args.help_checkers_verbose:
         print_checkers(get_checkers(args.clang, args.plugins))
@@ -123,25 +129,33 @@ def validate_args_for_analyze(parser, args, from_build_command):
         print_active_checkers(get_checkers(args.clang, args.plugins))
         parser.exit(status=0)
     elif from_build_command and not args.build:
-        parser.error(message='missing build command')
+        parser.error(message="missing build command")
     elif not from_build_command and not os.path.exists(args.cdb):
-        parser.error(message='compilation database is missing')
+        parser.error(message="compilation database is missing")
 
     # If the user wants CTU mode
-    if not from_build_command and hasattr(args, 'ctu_phases') \
-            and hasattr(args.ctu_phases, 'dir'):
+    if (
+        not from_build_command
+        and hasattr(args, "ctu_phases")
+        and hasattr(args.ctu_phases, "dir")
+    ):
         # If CTU analyze_only, the input directory should exist
-        if args.ctu_phases.analyze and not args.ctu_phases.collect \
-                and not os.path.exists(args.ctu_dir):
-            parser.error(message='missing CTU directory')
+        if (
+            args.ctu_phases.analyze
+            and not args.ctu_phases.collect
+            and not os.path.exists(args.ctu_dir)
+        ):
+            parser.error(message="missing CTU directory")
         # Check CTU capability via checking clang-extdef-mapping
         if not is_ctu_capable(args.extdef_map_cmd):
-            parser.error(message="""This version of clang does not support CTU
-            functionality or clang-extdef-mapping command not found.""")
+            parser.error(
+                message="""This version of clang does not support CTU
+            functionality or clang-extdef-mapping command not found."""
+            )
 
 
 def create_intercept_parser():
-    """ Creates a parser for command-line arguments to 'intercept'. """
+    """Creates a parser for command-line arguments to 'intercept'."""
 
     parser = create_default_parser()
     parser_add_cdb(parser)
@@ -149,23 +163,25 @@ def create_intercept_parser():
     parser_add_prefer_wrapper(parser)
     parser_add_compilers(parser)
 
-    advanced = parser.add_argument_group('advanced options')
+    advanced = parser.add_argument_group("advanced options")
     group = advanced.add_mutually_exclusive_group()
     group.add_argument(
-        '--append',
-        action='store_true',
+        "--append",
+        action="store_true",
         help="""Extend existing compilation database with new entries.
         Duplicate entries are detected and not present in the final output.
         The output is not continuously updated, it's done when the build
-        command finished. """)
+        command finished. """,
+    )
 
     parser.add_argument(
-        dest='build', nargs=argparse.REMAINDER, help="""Command to run.""")
+        dest="build", nargs=argparse.REMAINDER, help="""Command to run."""
+    )
     return parser
 
 
 def create_analyze_parser(from_build_command):
-    """ Creates a parser for command-line arguments to 'analyze'. """
+    """Creates a parser for command-line arguments to 'analyze'."""
 
     parser = create_default_parser()
 
@@ -174,155 +190,174 @@ def create_analyze_parser(from_build_command):
         parser_add_compilers(parser)
 
         parser.add_argument(
-            '--intercept-first',
-            action='store_true',
+            "--intercept-first",
+            action="store_true",
             help="""Run the build commands first, intercept compiler
             calls and then run the static analyzer afterwards.
             Generally speaking it has better coverage on build commands.
             With '--override-compiler' it use compiler wrapper, but does
-            not run the analyzer till the build is finished.""")
+            not run the analyzer till the build is finished.""",
+        )
     else:
         parser_add_cdb(parser)
 
     parser.add_argument(
-        '--status-bugs',
-        action='store_true',
+        "--status-bugs",
+        action="store_true",
         help="""The exit status of '%(prog)s' is the same as the executed
         build command. This option ignores the build exit status and sets to
-        be non zero if it found potential bugs or zero otherwise.""")
+        be non zero if it found potential bugs or zero otherwise.""",
+    )
     parser.add_argument(
-        '--exclude',
-        metavar='<directory>',
-        dest='excludes',
-        action='append',
+        "--exclude",
+        metavar="<directory>",
+        dest="excludes",
+        action="append",
         default=[],
         help="""Do not run static analyzer against files found in this
         directory. (You can specify this option multiple times.)
-        Could be useful when project contains 3rd party libraries.""")
+        Could be useful when project contains 3rd party libraries.""",
+    )
 
-    output = parser.add_argument_group('output control options')
+    output = parser.add_argument_group("output control options")
     output.add_argument(
-        '--output',
-        '-o',
-        metavar='<path>',
+        "--output",
+        "-o",
+        metavar="<path>",
         default=tempfile.gettempdir(),
         help="""Specifies the output directory for analyzer reports.
-        Subdirectory will be created if default directory is targeted.""")
+        Subdirectory will be created if default directory is targeted.""",
+    )
     output.add_argument(
-        '--keep-empty',
-        action='store_true',
+        "--keep-empty",
+        action="store_true",
         help="""Don't remove the build results directory even if no issues
-        were reported.""")
+        were reported.""",
+    )
     output.add_argument(
-        '--html-title',
-        metavar='<title>',
+        "--html-title",
+        metavar="<title>",
         help="""Specify the title used on generated HTML pages.
-        If not specified, a default title will be used.""")
+        If not specified, a default title will be used.""",
+    )
     format_group = output.add_mutually_exclusive_group()
     format_group.add_argument(
-        '--plist',
-        '-plist',
-        dest='output_format',
-        const='plist',
-        default='html',
-        action='store_const',
-        help="""Cause the results as a set of .plist files.""")
+        "--plist",
+        "-plist",
+        dest="output_format",
+        const="plist",
+        default="html",
+        action="store_const",
+        help="""Cause the results as a set of .plist files.""",
+    )
     format_group.add_argument(
-        '--plist-html',
-        '-plist-html',
-        dest='output_format',
-        const='plist-html',
-        default='html',
-        action='store_const',
-        help="""Cause the results as a set of .html and .plist files.""")
+        "--plist-html",
+        "-plist-html",
+        dest="output_format",
+        const="plist-html",
+        default="html",
+        action="store_const",
+        help="""Cause the results as a set of .html and .plist files.""",
+    )
     format_group.add_argument(
-        '--plist-multi-file',
-        '-plist-multi-file',
-        dest='output_format',
-        const='plist-multi-file',
-        default='html',
-        action='store_const',
+        "--plist-multi-file",
+        "-plist-multi-file",
+        dest="output_format",
+        const="plist-multi-file",
+        default="html",
+        action="store_const",
         help="""Cause the results as a set of .plist files with extra
-        information on related files.""")
+        information on related files.""",
+    )
     format_group.add_argument(
-        '--sarif',
-        '-sarif',
-        dest='output_format',
-        const='sarif',
-        default='html',
-        action='store_const',
-        help="""Cause the results as a result.sarif file.""")
+        "--sarif",
+        "-sarif",
+        dest="output_format",
+        const="sarif",
+        default="html",
+        action="store_const",
+        help="""Cause the results as a result.sarif file.""",
+    )
     format_group.add_argument(
-        '--sarif-html',
-        '-sarif-html',
-        dest='output_format',
-        const='sarif-html',
-        default='html',
-        action='store_const',
-        help="""Cause the results as a result.sarif file and .html files.""")
-
-    advanced = parser.add_argument_group('advanced options')
+        "--sarif-html",
+        "-sarif-html",
+        dest="output_format",
+        const="sarif-html",
+        default="html",
+        action="store_const",
+        help="""Cause the results as a result.sarif file and .html files.""",
+    )
+
+    advanced = parser.add_argument_group("advanced options")
     advanced.add_argument(
-        '--use-analyzer',
-        metavar='<path>',
-        dest='clang',
-        default='clang',
+        "--use-analyzer",
+        metavar="<path>",
+        dest="clang",
+        default="clang",
         help="""'%(prog)s' uses the 'clang' executable relative to itself for
         static analysis. One can override this behavior with this option by
-        using the 'clang' packaged with Xcode (on OS X) or from the PATH.""")
+        using the 'clang' packaged with Xcode (on OS X) or from the PATH.""",
+    )
     advanced.add_argument(
-        '--no-failure-reports',
-        '-no-failure-reports',
-        dest='output_failures',
-        action='store_false',
+        "--no-failure-reports",
+        "-no-failure-reports",
+        dest="output_failures",
+        action="store_false",
         help="""Do not create a 'failures' subdirectory that includes analyzer
-        crash reports and preprocessed source files.""")
+        crash reports and preprocessed source files.""",
+    )
     parser.add_argument(
-        '--analyze-headers',
-        action='store_true',
+        "--analyze-headers",
+        action="store_true",
         help="""Also analyze functions in #included files. By default, such
         functions are skipped unless they are called by functions within the
-        main source file.""")
+        main source file.""",
+    )
     advanced.add_argument(
-        '--stats',
-        '-stats',
-        action='store_true',
-        help="""Generates visitation statistics for the project.""")
+        "--stats",
+        "-stats",
+        action="store_true",
+        help="""Generates visitation statistics for the project.""",
+    )
     advanced.add_argument(
-        '--internal-stats',
-        action='store_true',
-        help="""Generate internal analyzer statistics.""")
+        "--internal-stats",
+        action="store_true",
+        help="""Generate internal analyzer statistics.""",
+    )
     advanced.add_argument(
-        '--maxloop',
-        '-maxloop',
-        metavar='<loop count>',
+        "--maxloop",
+        "-maxloop",
+        metavar="<loop count>",
         type=int,
         help="""Specify the number of times a block can be visited before
         giving up. Increase for more comprehensive coverage at a cost of
-        speed.""")
+        speed.""",
+    )
     advanced.add_argument(
-        '--store',
-        '-store',
-        metavar='<model>',
-        dest='store_model',
-        choices=['region', 'basic'],
+        "--store",
+        "-store",
+        metavar="<model>",
+        dest="store_model",
+        choices=["region", "basic"],
         help="""Specify the store model used by the analyzer. 'region'
         specifies a field- sensitive store model. 'basic' which is far less
         precise but can more quickly analyze code. 'basic' was the default
-        store model for checker-0.221 and earlier.""")
+        store model for checker-0.221 and earlier.""",
+    )
     advanced.add_argument(
-        '--constraints',
-        '-constraints',
-        metavar='<model>',
-        dest='constraints_model',
-        choices=['range', 'basic'],
+        "--constraints",
+        "-constraints",
+        metavar="<model>",
+        dest="constraints_model",
+        choices=["range", "basic"],
         help="""Specify the constraint engine used by the analyzer. Specifying
         'basic' uses a simpler, less powerful constraint model used by
-        checker-0.160 and earlier.""")
+        checker-0.160 and earlier.""",
+    )
     advanced.add_argument(
-        '--analyzer-config',
-        '-analyzer-config',
-        metavar='<options>',
+        "--analyzer-config",
+        "-analyzer-config",
+        metavar="<options>",
         help="""Provide options to pass through to the analyzer's
         -analyzer-config flag. Several options are separated with comma:
         'key1=val1,key2=val2'
@@ -332,134 +367,148 @@ def create_analyze_parser(from_build_command):
 
         Switch the page naming to:
         report-<filename>-<function/method name>-<id>.html
-        instead of report-XXXXXX.html""")
+        instead of report-XXXXXX.html""",
+    )
     advanced.add_argument(
-        '--force-analyze-debug-code',
-        dest='force_debug',
-        action='store_true',
+        "--force-analyze-debug-code",
+        dest="force_debug",
+        action="store_true",
         help="""Tells analyzer to enable assertions in code even if they were
-        disabled during compilation, enabling more precise results.""")
+        disabled during compilation, enabling more precise results.""",
+    )
 
-    plugins = parser.add_argument_group('checker options')
+    plugins = parser.add_argument_group("checker options")
     plugins.add_argument(
-        '--load-plugin',
-        '-load-plugin',
-        metavar='<plugin library>',
-        dest='plugins',
-        action='append',
-        help="""Loading external checkers using the clang plugin interface.""")
+        "--load-plugin",
+        "-load-plugin",
+        metavar="<plugin library>",
+        dest="plugins",
+        action="append",
+        help="""Loading external checkers using the clang plugin interface.""",
+    )
     plugins.add_argument(
-        '--enable-checker',
-        '-enable-checker',
-        metavar='<checker name>',
+        "--enable-checker",
+        "-enable-checker",
+        metavar="<checker name>",
         action=AppendCommaSeparated,
-        help="""Enable specific checker.""")
+        help="""Enable specific checker.""",
+    )
     plugins.add_argument(
-        '--disable-checker',
-        '-disable-checker',
-        metavar='<checker name>',
+        "--disable-checker",
+        "-disable-checker",
+        metavar="<checker name>",
         action=AppendCommaSeparated,
-        help="""Disable specific checker.""")
+        help="""Disable specific checker.""",
+    )
     plugins.add_argument(
-        '--help-checkers',
-        action='store_true',
+        "--help-checkers",
+        action="store_true",
         help="""A default group of checkers is run unless explicitly disabled.
         Exactly which checkers constitute the default group is a function of
-        the operating system in use. These can be printed with this flag.""")
+        the operating system in use. These can be printed with this flag.""",
+    )
     plugins.add_argument(
-        '--help-checkers-verbose',
-        action='store_true',
-        help="""Print all available checkers and mark the enabled ones.""")
+        "--help-checkers-verbose",
+        action="store_true",
+        help="""Print all available checkers and mark the enabled ones.""",
+    )
 
     if from_build_command:
         parser.add_argument(
-            dest='build', nargs=argparse.REMAINDER, help="""Command to run.""")
+            dest="build", nargs=argparse.REMAINDER, help="""Command to run."""
+        )
     else:
-        ctu = parser.add_argument_group('cross translation unit analysis')
+        ctu = parser.add_argument_group("cross translation unit analysis")
         ctu_mutex_group = ctu.add_mutually_exclusive_group()
         ctu_mutex_group.add_argument(
-            '--ctu',
-            action='store_const',
-            const=CtuConfig(collect=True, analyze=True,
-                            dir='', extdef_map_cmd=''),
-            dest='ctu_phases',
+            "--ctu",
+            action="store_const",
+            const=CtuConfig(collect=True, analyze=True, dir="", extdef_map_cmd=""),
+            dest="ctu_phases",
             help="""Perform cross translation unit (ctu) analysis (both collect
             and analyze phases) using default <ctu-dir> for temporary output.
-            At the end of the analysis, the temporary directory is removed.""")
+            At the end of the analysis, the temporary directory is removed.""",
+        )
         ctu.add_argument(
-            '--ctu-dir',
-            metavar='<ctu-dir>',
-            dest='ctu_dir',
-            default='ctu-dir',
+            "--ctu-dir",
+            metavar="<ctu-dir>",
+            dest="ctu_dir",
+            default="ctu-dir",
             help="""Defines the temporary directory used between ctu
-            phases.""")
+            phases.""",
+        )
         ctu_mutex_group.add_argument(
-            '--ctu-collect-only',
-            action='store_const',
-            const=CtuConfig(collect=True, analyze=False,
-                            dir='', extdef_map_cmd=''),
-            dest='ctu_phases',
+            "--ctu-collect-only",
+            action="store_const",
+            const=CtuConfig(collect=True, analyze=False, dir="", extdef_map_cmd=""),
+            dest="ctu_phases",
             help="""Perform only the collect phase of ctu.
-            Keep <ctu-dir> for further use.""")
+            Keep <ctu-dir> for further use.""",
+        )
         ctu_mutex_group.add_argument(
-            '--ctu-analyze-only',
-            action='store_const',
-            const=CtuConfig(collect=False, analyze=True,
-                            dir='', extdef_map_cmd=''),
-            dest='ctu_phases',
+            "--ctu-analyze-only",
+            action="store_const",
+            const=CtuConfig(collect=False, analyze=True, dir="", extdef_map_cmd=""),
+            dest="ctu_phases",
             help="""Perform only the analyze phase of ctu. <ctu-dir> should be
-            present and will not be removed after analysis.""")
+            present and will not be removed after analysis.""",
+        )
         ctu.add_argument(
-            '--use-extdef-map-cmd',
-            metavar='<path>',
-            dest='extdef_map_cmd',
-            default='clang-extdef-mapping',
+            "--use-extdef-map-cmd",
+            metavar="<path>",
+            dest="extdef_map_cmd",
+            default="clang-extdef-mapping",
             help="""'%(prog)s' uses the 'clang-extdef-mapping' executable
             relative to itself for generating external definition maps for
             static analysis. One can override this behavior with this option
             by using the 'clang-extdef-mapping' packaged with Xcode (on OS X)
-            or from the PATH.""")
+            or from the PATH.""",
+        )
     return parser
 
 
 def create_default_parser():
-    """ Creates command line parser for all build wrapper commands. """
+    """Creates command line parser for all build wrapper commands."""
 
     parser = argparse.ArgumentParser(
-        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter
+    )
 
     parser.add_argument(
-        '--verbose',
-        '-v',
-        action='count',
+        "--verbose",
+        "-v",
+        action="count",
         default=0,
         help="""Enable verbose output from '%(prog)s'. A second, third and
-        fourth flags increases verbosity.""")
+        fourth flags increases verbosity.""",
+    )
     return parser
 
 
 def parser_add_cdb(parser):
     parser.add_argument(
-        '--cdb',
-        metavar='<file>',
+        "--cdb",
+        metavar="<file>",
         default="compile_commands.json",
-        help="""The JSON compilation database.""")
+        help="""The JSON compilation database.""",
+    )
 
 
 def parser_add_prefer_wrapper(parser):
     parser.add_argument(
-        '--override-compiler',
-        action='store_true',
+        "--override-compiler",
+        action="store_true",
         help="""Always resort to the compiler wrapper even when better
-        intercept methods are available.""")
+        intercept methods are available.""",
+    )
 
 
 def parser_add_compilers(parser):
     parser.add_argument(
-        '--use-cc',
-        metavar='<path>',
-        dest='cc',
-        default=os.getenv('CC', 'cc'),
+        "--use-cc",
+        metavar="<path>",
+        dest="cc",
+        default=os.getenv("CC", "cc"),
         help="""When '%(prog)s' analyzes a project by interposing a compiler
         wrapper, which executes a real compiler for compilation and do other
         tasks (record the compiler invocation). Because of this interposing,
@@ -468,17 +517,19 @@ def parser_add_compilers(parser):
         your default compiler.
 
         If you need '%(prog)s' to use a specific compiler for *compilation*
-        then you can use this option to specify a path to that compiler.""")
+        then you can use this option to specify a path to that compiler.""",
+    )
     parser.add_argument(
-        '--use-c++',
-        metavar='<path>',
-        dest='cxx',
-        default=os.getenv('CXX', 'c++'),
-        help="""This is the same as "--use-cc" but for C++ code.""")
+        "--use-c++",
+        metavar="<path>",
+        dest="cxx",
+        default=os.getenv("CXX", "c++"),
+        help="""This is the same as "--use-cc" but for C++ code.""",
+    )
 
 
 class AppendCommaSeparated(argparse.Action):
-    """ argparse Action class to support multiple comma separated lists. """
+    """argparse Action class to support multiple comma separated lists."""
 
     def __call__(self, __parser, namespace, values, __option_string):
         # getattr(obj, attr, default) does not really returns default but none
@@ -486,32 +537,31 @@ def __call__(self, __parser, namespace, values, __option_string):
             setattr(namespace, self.dest, [])
         # once it's fixed we can use as expected
         actual = getattr(namespace, self.dest)
-        actual.extend(values.split(','))
+        actual.extend(values.split(","))
         setattr(namespace, self.dest, actual)
 
 
 def print_active_checkers(checkers):
-    """ Print active checkers to stdout. """
+    """Print active checkers to stdout."""
 
-    for name in sorted(name for name, (_, active) in checkers.items()
-                       if active):
+    for name in sorted(name for name, (_, active) in checkers.items() if active):
         print(name)
 
 
 def print_checkers(checkers):
-    """ Print verbose checker help to stdout. """
+    """Print verbose checker help to stdout."""
 
-    print('')
-    print('available checkers:')
-    print('')
+    print("")
+    print("available checkers:")
+    print("")
     for name in sorted(checkers.keys()):
         description, active = checkers[name]
-        prefix = '+' if active else ' '
+        prefix = "+" if active else " "
         if len(name) > 30:
-            print(' {0} {1}'.format(prefix, name))
-            print(' ' * 35 + description)
+            print(" {0} {1}".format(prefix, name))
+            print(" " * 35 + description)
         else:
-            print(' {0} {1: <30}  {2}'.format(prefix, name, description))
-    print('')
+            print(" {0} {1: <30}  {2}".format(prefix, name, description))
+    print("")
     print('NOTE: "+" indicates that an analysis is enabled by default.')
-    print('')
+    print("")

diff  --git a/clang/tools/scan-build-py/lib/libscanbuild/clang.py b/clang/tools/scan-build-py/lib/libscanbuild/clang.py
index 4f02cb20d3fe0..25a50b6a216dc 100644
--- a/clang/tools/scan-build-py/lib/libscanbuild/clang.py
+++ b/clang/tools/scan-build-py/lib/libscanbuild/clang.py
@@ -12,11 +12,16 @@
 from libscanbuild import run_command
 from libscanbuild.shell import decode
 
-__all__ = ['get_version', 'get_arguments', 'get_checkers', 'is_ctu_capable',
-           'get_triple_arch']
+__all__ = [
+    "get_version",
+    "get_arguments",
+    "get_checkers",
+    "is_ctu_capable",
+    "get_triple_arch",
+]
 
 # regex for activated checker
-ACTIVE_CHECKER_PATTERN = re.compile(r'^-analyzer-checker=(.*)$')
+ACTIVE_CHECKER_PATTERN = re.compile(r"^-analyzer-checker=(.*)$")
 
 
 class ClangErrorException(Exception):
@@ -25,38 +30,38 @@ def __init__(self, error):
 
 
 def get_version(clang):
-    """ Returns the compiler version as string.
+    """Returns the compiler version as string.
 
     :param clang:   the compiler we are using
-    :return:        the version string printed to stderr """
+    :return:        the version string printed to stderr"""
 
-    output = run_command([clang, '-v'])
+    output = run_command([clang, "-v"])
     # the relevant version info is in the first line
     return output[0]
 
 
 def get_arguments(command, cwd):
-    """ Capture Clang invocation.
+    """Capture Clang invocation.
 
     :param command: the compilation command
     :param cwd:     the current working directory
-    :return:        the detailed front-end invocation command """
+    :return:        the detailed front-end invocation command"""
 
     cmd = command[:]
-    cmd.insert(1, '-###')
-    cmd.append('-fno-color-diagnostics')
+    cmd.insert(1, "-###")
+    cmd.append("-fno-color-diagnostics")
 
     output = run_command(cmd, cwd=cwd)
     # The relevant information is in the last line of the output.
     # Don't check if finding last line fails, would throw exception anyway.
     last_line = output[-1]
-    if re.search(r'clang(.*): error:', last_line):
+    if re.search(r"clang(.*): error:", last_line):
         raise ClangErrorException(last_line)
     return decode(last_line)
 
 
 def get_active_checkers(clang, plugins):
-    """ Get the active checker list.
+    """Get the active checker list.
 
     :param clang:   the compiler we are using
     :param plugins: list of plugins which was requested by the user
@@ -65,40 +70,42 @@ def get_active_checkers(clang, plugins):
     To get the default checkers we execute Clang to print how this
     compilation would be called. And take out the enabled checker from the
     arguments. For input file we specify stdin and pass only language
-    information. """
+    information."""
 
     def get_active_checkers_for(language):
-        """ Returns a list of active checkers for the given language. """
-
-        load_args = [arg
-                     for plugin in plugins
-                     for arg in ['-Xclang', '-load', '-Xclang', plugin]]
-        cmd = [clang, '--analyze'] + load_args + ['-x', language, '-']
-        return [ACTIVE_CHECKER_PATTERN.match(arg).group(1)
-                for arg in get_arguments(cmd, '.')
-                if ACTIVE_CHECKER_PATTERN.match(arg)]
+        """Returns a list of active checkers for the given language."""
+
+        load_args = [
+            arg for plugin in plugins for arg in ["-Xclang", "-load", "-Xclang", plugin]
+        ]
+        cmd = [clang, "--analyze"] + load_args + ["-x", language, "-"]
+        return [
+            ACTIVE_CHECKER_PATTERN.match(arg).group(1)
+            for arg in get_arguments(cmd, ".")
+            if ACTIVE_CHECKER_PATTERN.match(arg)
+        ]
 
     result = set()
-    for language in ['c', 'c++', 'objective-c', 'objective-c++']:
+    for language in ["c", "c++", "objective-c", "objective-c++"]:
         result.update(get_active_checkers_for(language))
     return frozenset(result)
 
 
 def is_active(checkers):
-    """ Returns a method, which classifies the checker active or not,
-    based on the received checker name list. """
+    """Returns a method, which classifies the checker active or not,
+    based on the received checker name list."""
 
     def predicate(checker):
-        """ Returns True if the given checker is active. """
+        """Returns True if the given checker is active."""
 
         return any(pattern.match(checker) for pattern in predicate.patterns)
 
-    predicate.patterns = [re.compile(r'^' + a + r'(\.|$)') for a in checkers]
+    predicate.patterns = [re.compile(r"^" + a + r"(\.|$)") for a in checkers]
     return predicate
 
 
 def parse_checkers(stream):
-    """ Parse clang -analyzer-checker-help output.
+    """Parse clang -analyzer-checker-help output.
 
     Below the line 'CHECKERS:' are there the name description pairs.
     Many of them are in one line, but some long named checker has the
@@ -112,40 +119,40 @@ def parse_checkers(stream):
     :param stream:  list of lines to parse
     :return:        generator of tuples
 
-    (<checker name>, <checker description>) """
+    (<checker name>, <checker description>)"""
 
     lines = iter(stream)
     # find checkers header
     for line in lines:
-        if re.match(r'^CHECKERS:', line):
+        if re.match(r"^CHECKERS:", line):
             break
     # find entries
     state = None
     for line in lines:
-        if state and not re.match(r'^\s\s\S', line):
+        if state and not re.match(r"^\s\s\S", line):
             yield (state, line.strip())
             state = None
-        elif re.match(r'^\s\s\S+$', line.rstrip()):
+        elif re.match(r"^\s\s\S+$", line.rstrip()):
             state = line.strip()
         else:
-            pattern = re.compile(r'^\s\s(?P<key>\S*)\s*(?P<value>.*)')
+            pattern = re.compile(r"^\s\s(?P<key>\S*)\s*(?P<value>.*)")
             match = pattern.match(line.rstrip())
             if match:
                 current = match.groupdict()
-                yield (current['key'], current['value'])
+                yield (current["key"], current["value"])
 
 
 def get_checkers(clang, plugins):
-    """ Get all the available checkers from default and from the plugins.
+    """Get all the available checkers from default and from the plugins.
 
     :param clang:   the compiler we are using
     :param plugins: list of plugins which was requested by the user
     :return:        a dictionary of all available checkers and its status
 
-    {<checker name>: (<checker description>, <is active by default>)} """
+    {<checker name>: (<checker description>, <is active by default>)}"""
 
-    load = [elem for plugin in plugins for elem in ['-load', plugin]]
-    cmd = [clang, '-cc1'] + load + ['-analyzer-checker-help']
+    load = [elem for plugin in plugins for elem in ["-load", plugin]]
+    cmd = [clang, "-cc1"] + load + ["-analyzer-checker-help"]
 
     lines = run_command(cmd)
 
@@ -156,17 +163,17 @@ def get_checkers(clang, plugins):
         for name, description in parse_checkers(lines)
     }
     if not checkers:
-        raise Exception('Could not query Clang for available checkers.')
+        raise Exception("Could not query Clang for available checkers.")
 
     return checkers
 
 
 def is_ctu_capable(extdef_map_cmd):
-    """ Detects if the current (or given) clang and external definition mapping
-    executables are CTU compatible. """
+    """Detects if the current (or given) clang and external definition mapping
+    executables are CTU compatible."""
 
     try:
-        run_command([extdef_map_cmd, '-version'])
+        run_command([extdef_map_cmd, "-version"])
     except (OSError, subprocess.CalledProcessError):
         return False
     return True
@@ -174,7 +181,7 @@ def is_ctu_capable(extdef_map_cmd):
 
 def get_triple_arch(command, cwd):
     """Returns the architecture part of the target triple for the given
-    compilation command. """
+    compilation command."""
 
     cmd = get_arguments(command, cwd)
     try:

diff  --git a/clang/tools/scan-build-py/lib/libscanbuild/compilation.py b/clang/tools/scan-build-py/lib/libscanbuild/compilation.py
index 38ce634fbeb4f..6b9e3cabae309 100644
--- a/clang/tools/scan-build-py/lib/libscanbuild/compilation.py
+++ b/clang/tools/scan-build-py/lib/libscanbuild/compilation.py
@@ -8,7 +8,7 @@
 import os
 import collections
 
-__all__ = ['split_command', 'classify_source', 'compiler_language']
+__all__ = ["split_command", "classify_source", "compiler_language"]
 
 # Ignored compiler options map for compilation database creation.
 # The map is used in `split_command` method. (Which does ignore and classify
@@ -19,55 +19,56 @@
 IGNORED_FLAGS = {
     # compiling only flag, ignored because the creator of compilation
     # database will explicitly set it.
-    '-c': 0,
+    "-c": 0,
     # preprocessor macros, ignored because would cause duplicate entries in
     # the output (the only 
diff erence would be these flags). this is actual
     # finding from users, who suffered longer execution time caused by the
     # duplicates.
-    '-MD': 0,
-    '-MMD': 0,
-    '-MG': 0,
-    '-MP': 0,
-    '-MF': 1,
-    '-MT': 1,
-    '-MQ': 1,
+    "-MD": 0,
+    "-MMD": 0,
+    "-MG": 0,
+    "-MP": 0,
+    "-MF": 1,
+    "-MT": 1,
+    "-MQ": 1,
     # linker options, ignored because for compilation database will contain
     # compilation commands only. so, the compiler would ignore these flags
     # anyway. the benefit to get rid of them is to make the output more
     # readable.
-    '-static': 0,
-    '-shared': 0,
-    '-s': 0,
-    '-rdynamic': 0,
-    '-l': 1,
-    '-L': 1,
-    '-u': 1,
-    '-z': 1,
-    '-T': 1,
-    '-Xlinker': 1
+    "-static": 0,
+    "-shared": 0,
+    "-s": 0,
+    "-rdynamic": 0,
+    "-l": 1,
+    "-L": 1,
+    "-u": 1,
+    "-z": 1,
+    "-T": 1,
+    "-Xlinker": 1,
 }
 
 # Known C/C++ compiler executable name patterns
-COMPILER_PATTERNS = frozenset([
-    re.compile(r'^(intercept-|analyze-|)c(c|\+\+)$'),
-    re.compile(r'^([^-]*-)*[mg](cc|\+\+)(-\d+(\.\d+){0,2})?$'),
-    re.compile(r'^([^-]*-)*clang(\+\+)?(-\d+(\.\d+){0,2})?$'),
-    re.compile(r'^llvm-g(cc|\+\+)$'),
-])
+COMPILER_PATTERNS = frozenset(
+    [
+        re.compile(r"^(intercept-|analyze-|)c(c|\+\+)$"),
+        re.compile(r"^([^-]*-)*[mg](cc|\+\+)(-\d+(\.\d+){0,2})?$"),
+        re.compile(r"^([^-]*-)*clang(\+\+)?(-\d+(\.\d+){0,2})?$"),
+        re.compile(r"^llvm-g(cc|\+\+)$"),
+    ]
+)
 
 
 def split_command(command):
-    """ Returns a value when the command is a compilation, None otherwise.
+    """Returns a value when the command is a compilation, None otherwise.
 
     The value on success is a named tuple with the following attributes:
 
         files:    list of source files
         flags:    list of compile options
-        compiler: string value of 'c' or 'c++' """
+        compiler: string value of 'c' or 'c++'"""
 
     # the result of this method
-    result = collections.namedtuple('Compilation',
-                                    ['compiler', 'flags', 'files'])
+    result = collections.namedtuple("Compilation", ["compiler", "flags", "files"])
     result.compiler = compiler_language(command)
     result.flags = []
     result.files = []
@@ -78,20 +79,20 @@ def split_command(command):
     args = iter(command[1:])
     for arg in args:
         # quit when compilation pass is not involved
-        if arg in {'-E', '-S', '-cc1', '-M', '-MM', '-###'}:
+        if arg in {"-E", "-S", "-cc1", "-M", "-MM", "-###"}:
             return None
         # ignore some flags
         elif arg in IGNORED_FLAGS:
             count = IGNORED_FLAGS[arg]
             for _ in range(count):
                 next(args)
-        elif re.match(r'^-(l|L|Wl,).+', arg):
+        elif re.match(r"^-(l|L|Wl,).+", arg):
             pass
         # some parameters could look like filename, take as compile option
-        elif arg in {'-D', '-I'}:
+        elif arg in {"-D", "-I"}:
             result.flags.extend([arg, next(args)])
         # parameter which looks source file is taken...
-        elif re.match(r'^[^-].+', arg) and classify_source(arg):
+        elif re.match(r"^[^-].+", arg) and classify_source(arg):
             result.files.append(arg)
         # and consider everything else as compile option.
         else:
@@ -101,25 +102,25 @@ def split_command(command):
 
 
 def classify_source(filename, c_compiler=True):
-    """ Return the language from file name extension. """
+    """Return the language from file name extension."""
 
     mapping = {
-        '.c': 'c' if c_compiler else 'c++',
-        '.i': 'c-cpp-output' if c_compiler else 'c++-cpp-output',
-        '.ii': 'c++-cpp-output',
-        '.m': 'objective-c',
-        '.mi': 'objective-c-cpp-output',
-        '.mm': 'objective-c++',
-        '.mii': 'objective-c++-cpp-output',
-        '.C': 'c++',
-        '.cc': 'c++',
-        '.CC': 'c++',
-        '.cp': 'c++',
-        '.cpp': 'c++',
-        '.cxx': 'c++',
-        '.c++': 'c++',
-        '.C++': 'c++',
-        '.txx': 'c++'
+        ".c": "c" if c_compiler else "c++",
+        ".i": "c-cpp-output" if c_compiler else "c++-cpp-output",
+        ".ii": "c++-cpp-output",
+        ".m": "objective-c",
+        ".mi": "objective-c-cpp-output",
+        ".mm": "objective-c++",
+        ".mii": "objective-c++-cpp-output",
+        ".C": "c++",
+        ".cc": "c++",
+        ".CC": "c++",
+        ".cp": "c++",
+        ".cpp": "c++",
+        ".cxx": "c++",
+        ".c++": "c++",
+        ".C++": "c++",
+        ".txx": "c++",
     }
 
     __, extension = os.path.splitext(os.path.basename(filename))
@@ -127,14 +128,14 @@ def classify_source(filename, c_compiler=True):
 
 
 def compiler_language(command):
-    """ A predicate to decide the command is a compiler call or not.
+    """A predicate to decide the command is a compiler call or not.
 
-    Returns 'c' or 'c++' when it match. None otherwise. """
+    Returns 'c' or 'c++' when it match. None otherwise."""
 
-    cplusplus = re.compile(r'^(.+)(\+\+)(-.+|)$')
+    cplusplus = re.compile(r"^(.+)(\+\+)(-.+|)$")
 
     if command:
         executable = os.path.basename(command[0])
         if any(pattern.match(executable) for pattern in COMPILER_PATTERNS):
-            return 'c++' if cplusplus.match(executable) else 'c'
+            return "c++" if cplusplus.match(executable) else "c"
     return None

diff  --git a/clang/tools/scan-build-py/lib/libscanbuild/intercept.py b/clang/tools/scan-build-py/lib/libscanbuild/intercept.py
index 70f3233f5e8cb..59789f6001f4f 100644
--- a/clang/tools/scan-build-py/lib/libscanbuild/intercept.py
+++ b/clang/tools/scan-build-py/lib/libscanbuild/intercept.py
@@ -28,216 +28,225 @@
 import glob
 import logging
 from libear import build_libear, TemporaryDirectory
-from libscanbuild import command_entry_point, compiler_wrapper, \
-    wrapper_environment, run_command, run_build
+from libscanbuild import (
+    command_entry_point,
+    compiler_wrapper,
+    wrapper_environment,
+    run_command,
+    run_build,
+)
 from libscanbuild import duplicate_check
 from libscanbuild.compilation import split_command
 from libscanbuild.arguments import parse_args_for_intercept_build
 from libscanbuild.shell import encode, decode
 
-__all__ = ['capture', 'intercept_build', 'intercept_compiler_wrapper']
+__all__ = ["capture", "intercept_build", "intercept_compiler_wrapper"]
 
-GS = chr(0x1d)
-RS = chr(0x1e)
-US = chr(0x1f)
+GS = chr(0x1D)
+RS = chr(0x1E)
+US = chr(0x1F)
 
-COMPILER_WRAPPER_CC = 'intercept-cc'
-COMPILER_WRAPPER_CXX = 'intercept-c++'
-TRACE_FILE_EXTENSION = '.cmd'  # same as in ear.c
-WRAPPER_ONLY_PLATFORMS = frozenset({'win32', 'cygwin'})
+COMPILER_WRAPPER_CC = "intercept-cc"
+COMPILER_WRAPPER_CXX = "intercept-c++"
+TRACE_FILE_EXTENSION = ".cmd"  # same as in ear.c
+WRAPPER_ONLY_PLATFORMS = frozenset({"win32", "cygwin"})
 
 
 @command_entry_point
 def intercept_build():
-    """ Entry point for 'intercept-build' command. """
+    """Entry point for 'intercept-build' command."""
 
     args = parse_args_for_intercept_build()
     return capture(args)
 
 
 def capture(args):
-    """ The entry point of build command interception. """
+    """The entry point of build command interception."""
 
     def post_processing(commands):
-        """ To make a compilation database, it needs to filter out commands
+        """To make a compilation database, it needs to filter out commands
         which are not compiler calls. Needs to find the source file name
         from the arguments. And do shell escaping on the command.
 
         To support incremental builds, it is desired to read elements from
         an existing compilation database from a previous run. These elements
-        shall be merged with the new elements. """
+        shall be merged with the new elements."""
 
         # create entries from the current run
         current = itertools.chain.from_iterable(
             # creates a sequence of entry generators from an exec,
-            format_entry(command) for command in commands)
+            format_entry(command)
+            for command in commands
+        )
         # read entries from previous run
-        if 'append' in args and args.append and os.path.isfile(args.cdb):
+        if "append" in args and args.append and os.path.isfile(args.cdb):
             with open(args.cdb) as handle:
                 previous = iter(json.load(handle))
         else:
             previous = iter([])
         # filter out duplicate entries from both
         duplicate = duplicate_check(entry_hash)
-        return (entry
-                for entry in itertools.chain(previous, current)
-                if os.path.exists(entry['file']) and not duplicate(entry))
+        return (
+            entry
+            for entry in itertools.chain(previous, current)
+            if os.path.exists(entry["file"]) and not duplicate(entry)
+        )
 
-    with TemporaryDirectory(prefix='intercept-') as tmp_dir:
+    with TemporaryDirectory(prefix="intercept-") as tmp_dir:
         # run the build command
         environment = setup_environment(args, tmp_dir)
         exit_code = run_build(args.build, env=environment)
         # read the intercepted exec calls
         exec_traces = itertools.chain.from_iterable(
             parse_exec_trace(os.path.join(tmp_dir, filename))
-            for filename in sorted(glob.iglob(os.path.join(tmp_dir, '*.cmd'))))
+            for filename in sorted(glob.iglob(os.path.join(tmp_dir, "*.cmd")))
+        )
         # do post processing
         entries = post_processing(exec_traces)
         # dump the compilation database
-        with open(args.cdb, 'w+') as handle:
+        with open(args.cdb, "w+") as handle:
             json.dump(list(entries), handle, sort_keys=True, indent=4)
         return exit_code
 
 
 def setup_environment(args, destination):
-    """ Sets up the environment for the build command.
+    """Sets up the environment for the build command.
 
     It sets the required environment variables and execute the given command.
     The exec calls will be logged by the 'libear' preloaded library or by the
-    'wrapper' programs. """
+    'wrapper' programs."""
 
-    c_compiler = args.cc if 'cc' in args else 'cc'
-    cxx_compiler = args.cxx if 'cxx' in args else 'c++'
+    c_compiler = args.cc if "cc" in args else "cc"
+    cxx_compiler = args.cxx if "cxx" in args else "c++"
 
-    libear_path = None if args.override_compiler or is_preload_disabled(
-        sys.platform) else build_libear(c_compiler, destination)
+    libear_path = (
+        None
+        if args.override_compiler or is_preload_disabled(sys.platform)
+        else build_libear(c_compiler, destination)
+    )
 
     environment = dict(os.environ)
-    environment.update({'INTERCEPT_BUILD_TARGET_DIR': destination})
+    environment.update({"INTERCEPT_BUILD_TARGET_DIR": destination})
 
     if not libear_path:
-        logging.debug('intercept gonna use compiler wrappers')
+        logging.debug("intercept gonna use compiler wrappers")
         environment.update(wrapper_environment(args))
-        environment.update({
-            'CC': COMPILER_WRAPPER_CC,
-            'CXX': COMPILER_WRAPPER_CXX
-        })
-    elif sys.platform == 'darwin':
-        logging.debug('intercept gonna preload libear on OSX')
-        environment.update({
-            'DYLD_INSERT_LIBRARIES': libear_path,
-            'DYLD_FORCE_FLAT_NAMESPACE': '1'
-        })
+        environment.update({"CC": COMPILER_WRAPPER_CC, "CXX": COMPILER_WRAPPER_CXX})
+    elif sys.platform == "darwin":
+        logging.debug("intercept gonna preload libear on OSX")
+        environment.update(
+            {"DYLD_INSERT_LIBRARIES": libear_path, "DYLD_FORCE_FLAT_NAMESPACE": "1"}
+        )
     else:
-        logging.debug('intercept gonna preload libear on UNIX')
-        environment.update({'LD_PRELOAD': libear_path})
+        logging.debug("intercept gonna preload libear on UNIX")
+        environment.update({"LD_PRELOAD": libear_path})
 
     return environment
 
 
 @command_entry_point
 def intercept_compiler_wrapper():
-    """ Entry point for `intercept-cc` and `intercept-c++`. """
+    """Entry point for `intercept-cc` and `intercept-c++`."""
 
     return compiler_wrapper(intercept_compiler_wrapper_impl)
 
 
 def intercept_compiler_wrapper_impl(_, execution):
-    """ Implement intercept compiler wrapper functionality.
+    """Implement intercept compiler wrapper functionality.
 
     It does generate execution report into target directory.
-    The target directory name is from environment variables. """
+    The target directory name is from environment variables."""
 
-    message_prefix = 'execution report might be incomplete: %s'
+    message_prefix = "execution report might be incomplete: %s"
 
-    target_dir = os.getenv('INTERCEPT_BUILD_TARGET_DIR')
+    target_dir = os.getenv("INTERCEPT_BUILD_TARGET_DIR")
     if not target_dir:
-        logging.warning(message_prefix, 'missing target directory')
+        logging.warning(message_prefix, "missing target directory")
         return
     # write current execution info to the pid file
     try:
         target_file_name = str(os.getpid()) + TRACE_FILE_EXTENSION
         target_file = os.path.join(target_dir, target_file_name)
-        logging.debug('writing execution report to: %s', target_file)
+        logging.debug("writing execution report to: %s", target_file)
         write_exec_trace(target_file, execution)
     except IOError:
-        logging.warning(message_prefix, 'io problem')
+        logging.warning(message_prefix, "io problem")
 
 
 def write_exec_trace(filename, entry):
-    """ Write execution report file.
+    """Write execution report file.
 
     This method shall be sync with the execution report writer in interception
     library. The entry in the file is a JSON objects.
 
     :param filename:    path to the output execution trace file,
-    :param entry:       the Execution object to append to that file. """
+    :param entry:       the Execution object to append to that file."""
 
-    with open(filename, 'ab') as handler:
+    with open(filename, "ab") as handler:
         pid = str(entry.pid)
         command = US.join(entry.cmd) + US
-        content = RS.join([pid, pid, 'wrapper', entry.cwd, command]) + GS
-        handler.write(content.encode('utf-8'))
+        content = RS.join([pid, pid, "wrapper", entry.cwd, command]) + GS
+        handler.write(content.encode("utf-8"))
 
 
 def parse_exec_trace(filename):
-    """ Parse the file generated by the 'libear' preloaded library.
+    """Parse the file generated by the 'libear' preloaded library.
 
     Given filename points to a file which contains the basic report
     generated by the interception library or wrapper command. A single
-    report file _might_ contain multiple process creation info. """
+    report file _might_ contain multiple process creation info."""
 
-    logging.debug('parse exec trace file: %s', filename)
-    with open(filename, 'r') as handler:
+    logging.debug("parse exec trace file: %s", filename)
+    with open(filename, "r") as handler:
         content = handler.read()
         for group in filter(bool, content.split(GS)):
             records = group.split(RS)
             yield {
-                'pid': records[0],
-                'ppid': records[1],
-                'function': records[2],
-                'directory': records[3],
-                'command': records[4].split(US)[:-1]
+                "pid": records[0],
+                "ppid": records[1],
+                "function": records[2],
+                "directory": records[3],
+                "command": records[4].split(US)[:-1],
             }
 
 
 def format_entry(exec_trace):
-    """ Generate the desired fields for compilation database entries. """
+    """Generate the desired fields for compilation database entries."""
 
     def abspath(cwd, name):
-        """ Create normalized absolute path from input filename. """
+        """Create normalized absolute path from input filename."""
         fullname = name if os.path.isabs(name) else os.path.join(cwd, name)
         return os.path.normpath(fullname)
 
-    logging.debug('format this command: %s', exec_trace['command'])
-    compilation = split_command(exec_trace['command'])
+    logging.debug("format this command: %s", exec_trace["command"])
+    compilation = split_command(exec_trace["command"])
     if compilation:
         for source in compilation.files:
-            compiler = 'c++' if compilation.compiler == 'c++' else 'cc'
-            command = [compiler, '-c'] + compilation.flags + [source]
-            logging.debug('formated as: %s', command)
+            compiler = "c++" if compilation.compiler == "c++" else "cc"
+            command = [compiler, "-c"] + compilation.flags + [source]
+            logging.debug("formated as: %s", command)
             yield {
-                'directory': exec_trace['directory'],
-                'command': encode(command),
-                'file': abspath(exec_trace['directory'], source)
+                "directory": exec_trace["directory"],
+                "command": encode(command),
+                "file": abspath(exec_trace["directory"], source),
             }
 
 
 def is_preload_disabled(platform):
-    """ Library-based interposition will fail silently if SIP is enabled,
+    """Library-based interposition will fail silently if SIP is enabled,
     so this should be detected. You can detect whether SIP is enabled on
     Darwin by checking whether (1) there is a binary called 'csrutil' in
     the path and, if so, (2) whether the output of executing 'csrutil status'
     contains 'System Integrity Protection status: enabled'.
 
     :param platform: name of the platform (returned by sys.platform),
-    :return: True if library preload will fail by the dynamic linker. """
+    :return: True if library preload will fail by the dynamic linker."""
 
     if platform in WRAPPER_ONLY_PLATFORMS:
         return True
-    elif platform == 'darwin':
-        command = ['csrutil', 'status']
-        pattern = re.compile(r'System Integrity Protection status:\s+enabled')
+    elif platform == "darwin":
+        command = ["csrutil", "status"]
+        pattern = re.compile(r"System Integrity Protection status:\s+enabled")
         try:
             return any(pattern.match(line) for line in run_command(command))
         except:
@@ -247,16 +256,16 @@ def is_preload_disabled(platform):
 
 
 def entry_hash(entry):
-    """ Implement unique hash method for compilation database entries. """
+    """Implement unique hash method for compilation database entries."""
 
     # For faster lookup in set filename is reverted
-    filename = entry['file'][::-1]
+    filename = entry["file"][::-1]
     # For faster lookup in set directory is reverted
-    directory = entry['directory'][::-1]
+    directory = entry["directory"][::-1]
     # On OS X the 'cc' and 'c++' compilers are wrappers for
     # 'clang' therefore both call would be logged. To avoid
     # this the hash does not contain the first word of the
     # command.
-    command = ' '.join(decode(entry['command'])[1:])
+    command = " ".join(decode(entry["command"])[1:])
 
-    return '<>'.join([filename, directory, command])
+    return "<>".join([filename, directory, command])

diff  --git a/clang/tools/scan-build-py/lib/libscanbuild/report.py b/clang/tools/scan-build-py/lib/libscanbuild/report.py
index 0962b636a9219..c745d765aa538 100644
--- a/clang/tools/scan-build-py/lib/libscanbuild/report.py
+++ b/clang/tools/scan-build-py/lib/libscanbuild/report.py
@@ -20,16 +20,16 @@
 from libscanbuild import duplicate_check
 from libscanbuild.clang import get_version
 
-__all__ = ['document']
+__all__ = ["document"]
 
 
 def document(args):
-    """ Generates cover report and returns the number of bugs/crashes. """
+    """Generates cover report and returns the number of bugs/crashes."""
 
-    html_reports_available = args.output_format in {'html', 'plist-html', 'sarif-html'}
-    sarif_reports_available = args.output_format in {'sarif', 'sarif-html'}
+    html_reports_available = args.output_format in {"html", "plist-html", "sarif-html"}
+    sarif_reports_available = args.output_format in {"sarif", "sarif-html"}
 
-    logging.debug('count crashes and bugs')
+    logging.debug("count crashes and bugs")
     crash_count = sum(1 for _ in read_crashes(args.output))
     bug_counter = create_counters()
     for bug in read_bugs(args.output, html_reports_available):
@@ -39,7 +39,7 @@ def document(args):
     if html_reports_available and result:
         use_cdb = os.path.exists(args.cdb)
 
-        logging.debug('generate index.html file')
+        logging.debug("generate index.html file")
         # common prefix for source files to have sorter path
         prefix = commonprefix_from(args.cdb) if use_cdb else os.getcwd()
         # assemble the cover from multiple fragments
@@ -60,24 +60,26 @@ def document(args):
                 os.remove(fragment)
 
     if sarif_reports_available:
-        logging.debug('merging sarif files')
+        logging.debug("merging sarif files")
         merge_sarif_files(args.output)
 
     return result
 
 
 def assemble_cover(args, prefix, fragments):
-    """ Put together the fragments into a final report. """
+    """Put together the fragments into a final report."""
 
     import getpass
     import socket
 
     if args.html_title is None:
-        args.html_title = os.path.basename(prefix) + ' - analyzer results'
+        args.html_title = os.path.basename(prefix) + " - analyzer results"
 
-    with open(os.path.join(args.output, 'index.html'), 'w') as handle:
+    with open(os.path.join(args.output, "index.html"), "w") as handle:
         indent = 0
-        handle.write(reindent("""
+        handle.write(
+            reindent(
+                """
         |<!DOCTYPE html>
         |<html>
         |  <head>
@@ -85,9 +87,14 @@ def assemble_cover(args, prefix, fragments):
         |    <link type="text/css" rel="stylesheet" href="scanview.css"/>
         |    <script type='text/javascript' src="sorttable.js"></script>
         |    <script type='text/javascript' src='selectable.js'></script>
-        |  </head>""", indent).format(html_title=args.html_title))
-        handle.write(comment('SUMMARYENDHEAD'))
-        handle.write(reindent("""
+        |  </head>""",
+                indent,
+            ).format(html_title=args.html_title)
+        )
+        handle.write(comment("SUMMARYENDHEAD"))
+        handle.write(
+            reindent(
+                """
         |  <body>
         |    <h1>{html_title}</h1>
         |    <table>
@@ -96,30 +103,41 @@ def assemble_cover(args, prefix, fragments):
         |      <tr><th>Command Line:</th><td>{cmd_args}</td></tr>
         |      <tr><th>Clang Version:</th><td>{clang_version}</td></tr>
         |      <tr><th>Date:</th><td>{date}</td></tr>
-        |    </table>""", indent).format(html_title=args.html_title,
-                                         user_name=getpass.getuser(),
-                                         host_name=socket.gethostname(),
-                                         current_dir=prefix,
-                                         cmd_args=' '.join(sys.argv),
-                                         clang_version=get_version(args.clang),
-                                         date=datetime.datetime.today(
-                                         ).strftime('%c')))
+        |    </table>""",
+                indent,
+            ).format(
+                html_title=args.html_title,
+                user_name=getpass.getuser(),
+                host_name=socket.gethostname(),
+                current_dir=prefix,
+                cmd_args=" ".join(sys.argv),
+                clang_version=get_version(args.clang),
+                date=datetime.datetime.today().strftime("%c"),
+            )
+        )
         for fragment in fragments:
             # copy the content of fragments
-            with open(fragment, 'r') as input_handle:
+            with open(fragment, "r") as input_handle:
                 shutil.copyfileobj(input_handle, handle)
-        handle.write(reindent("""
+        handle.write(
+            reindent(
+                """
         |  </body>
-        |</html>""", indent))
+        |</html>""",
+                indent,
+            )
+        )
 
 
 def bug_summary(output_dir, bug_counter):
-    """ Bug summary is a HTML table to give a better overview of the bugs. """
+    """Bug summary is a HTML table to give a better overview of the bugs."""
 
-    name = os.path.join(output_dir, 'summary.html.fragment')
-    with open(name, 'w') as handle:
+    name = os.path.join(output_dir, "summary.html.fragment")
+    with open(name, "w") as handle:
         indent = 4
-        handle.write(reindent("""
+        handle.write(
+            reindent(
+                """
         |<h2>Bug Summary</h2>
         |<table>
         |  <thead>
@@ -129,8 +147,13 @@ def bug_summary(output_dir, bug_counter):
         |      <td class="sorttable_nosort">Display?</td>
         |    </tr>
         |  </thead>
-        |  <tbody>""", indent))
-        handle.write(reindent("""
+        |  <tbody>""",
+                indent,
+            )
+        )
+        handle.write(
+            reindent(
+                """
         |    <tr style="font-weight:bold">
         |      <td class="SUMM_DESC">All Bugs</td>
         |      <td class="Q">{0}</td>
@@ -140,14 +163,24 @@ def bug_summary(output_dir, bug_counter):
         |                 onClick="CopyCheckedStateToCheckButtons(this);"/>
         |        </center>
         |      </td>
-        |    </tr>""", indent).format(bug_counter.total))
+        |    </tr>""",
+                indent,
+            ).format(bug_counter.total)
+        )
         for category, types in bug_counter.categories.items():
-            handle.write(reindent("""
+            handle.write(
+                reindent(
+                    """
         |    <tr>
         |      <th>{0}</th><th colspan=2></th>
-        |    </tr>""", indent).format(category))
+        |    </tr>""",
+                    indent,
+                ).format(category)
+            )
             for bug_type in types.values():
-                handle.write(reindent("""
+                handle.write(
+                    reindent(
+                        """
         |    <tr>
         |      <td class="SUMM_DESC">{bug_type}</td>
         |      <td class="Q">{bug_count}</td>
@@ -157,24 +190,34 @@ def bug_summary(output_dir, bug_counter):
         |                 onClick="ToggleDisplay(this,'{bug_type_class}');"/>
         |        </center>
         |      </td>
-        |    </tr>""", indent).format(**bug_type))
-        handle.write(reindent("""
+        |    </tr>""",
+                        indent,
+                    ).format(**bug_type)
+                )
+        handle.write(
+            reindent(
+                """
         |  </tbody>
-        |</table>""", indent))
-        handle.write(comment('SUMMARYBUGEND'))
+        |</table>""",
+                indent,
+            )
+        )
+        handle.write(comment("SUMMARYBUGEND"))
     return name
 
 
 def bug_report(output_dir, prefix):
-    """ Creates a fragment from the analyzer reports. """
+    """Creates a fragment from the analyzer reports."""
 
     pretty = prettify_bug(prefix, output_dir)
     bugs = (pretty(bug) for bug in read_bugs(output_dir, True))
 
-    name = os.path.join(output_dir, 'bugs.html.fragment')
-    with open(name, 'w') as handle:
+    name = os.path.join(output_dir, "bugs.html.fragment")
+    with open(name, "w") as handle:
         indent = 4
-        handle.write(reindent("""
+        handle.write(
+            reindent(
+                """
         |<h2>Reports</h2>
         |<table class="sortable" style="table-layout:automatic">
         |  <thead>
@@ -191,10 +234,15 @@ def bug_report(output_dir, prefix):
         |      <td class="sorttable_nosort"></td>
         |    </tr>
         |  </thead>
-        |  <tbody>""", indent))
-        handle.write(comment('REPORTBUGCOL'))
+        |  <tbody>""",
+                indent,
+            )
+        )
+        handle.write(comment("REPORTBUGCOL"))
         for current in bugs:
-            handle.write(reindent("""
+            handle.write(
+                reindent(
+                    """
         |    <tr class="{bug_type_class}">
         |      <td class="DESC">{bug_category}</td>
         |      <td class="DESC">{bug_type}</td>
@@ -203,25 +251,35 @@ def bug_report(output_dir, prefix):
         |      <td class="Q">{bug_line}</td>
         |      <td class="Q">{bug_path_length}</td>
         |      <td><a href="{report_file}#EndPath">View Report</a></td>
-        |    </tr>""", indent).format(**current))
-            handle.write(comment('REPORTBUG', {'id': current['report_file']}))
-        handle.write(reindent("""
+        |    </tr>""",
+                    indent,
+                ).format(**current)
+            )
+            handle.write(comment("REPORTBUG", {"id": current["report_file"]}))
+        handle.write(
+            reindent(
+                """
         |  </tbody>
-        |</table>""", indent))
-        handle.write(comment('REPORTBUGEND'))
+        |</table>""",
+                indent,
+            )
+        )
+        handle.write(comment("REPORTBUGEND"))
     return name
 
 
 def crash_report(output_dir, prefix):
-    """ Creates a fragment from the compiler crashes. """
+    """Creates a fragment from the compiler crashes."""
 
     pretty = prettify_crash(prefix, output_dir)
     crashes = (pretty(crash) for crash in read_crashes(output_dir))
 
-    name = os.path.join(output_dir, 'crashes.html.fragment')
-    with open(name, 'w') as handle:
+    name = os.path.join(output_dir, "crashes.html.fragment")
+    with open(name, "w") as handle:
         indent = 4
-        handle.write(reindent("""
+        handle.write(
+            reindent(
+                """
         |<h2>Analyzer Failures</h2>
         |<p>The analyzer had problems processing the following files:</p>
         |<table>
@@ -233,49 +291,64 @@ def crash_report(output_dir, prefix):
         |      <td>STDERR Output</td>
         |    </tr>
         |  </thead>
-        |  <tbody>""", indent))
+        |  <tbody>""",
+                indent,
+            )
+        )
         for current in crashes:
-            handle.write(reindent("""
+            handle.write(
+                reindent(
+                    """
         |    <tr>
         |      <td>{problem}</td>
         |      <td>{source}</td>
         |      <td><a href="{file}">preprocessor output</a></td>
         |      <td><a href="{stderr}">analyzer std err</a></td>
-        |    </tr>""", indent).format(**current))
-            handle.write(comment('REPORTPROBLEM', current))
-        handle.write(reindent("""
+        |    </tr>""",
+                    indent,
+                ).format(**current)
+            )
+            handle.write(comment("REPORTPROBLEM", current))
+        handle.write(
+            reindent(
+                """
         |  </tbody>
-        |</table>""", indent))
-        handle.write(comment('REPORTCRASHES'))
+        |</table>""",
+                indent,
+            )
+        )
+        handle.write(comment("REPORTCRASHES"))
     return name
 
 
 def read_crashes(output_dir):
-    """ Generate a unique sequence of crashes from given output directory. """
+    """Generate a unique sequence of crashes from given output directory."""
 
-    return (parse_crash(filename)
-            for filename in glob.iglob(os.path.join(output_dir, 'failures',
-                                                    '*.info.txt')))
+    return (
+        parse_crash(filename)
+        for filename in glob.iglob(os.path.join(output_dir, "failures", "*.info.txt"))
+    )
 
 
 def read_bugs(output_dir, html):
     # type: (str, bool) -> Generator[Dict[str, Any], None, None]
-    """ Generate a unique sequence of bugs from given output directory.
+    """Generate a unique sequence of bugs from given output directory.
 
     Duplicates can be in a project if the same module was compiled multiple
     times with 
diff erent compiler options. These would be better to show in
-    the final report (cover) only once. """
+    the final report (cover) only once."""
 
     def empty(file_name):
         return os.stat(file_name).st_size == 0
 
     duplicate = duplicate_check(
-        lambda bug: '{bug_line}.{bug_path_length}:{bug_file}'.format(**bug))
+        lambda bug: "{bug_line}.{bug_path_length}:{bug_file}".format(**bug)
+    )
 
     # get the right parser for the job.
     parser = parse_bug_html if html else parse_bug_plist
     # get the input files, which are not empty.
-    pattern = os.path.join(output_dir, '*.html' if html else '*.plist')
+    pattern = os.path.join(output_dir, "*.html" if html else "*.plist")
     bug_files = (file for file in glob.iglob(pattern) if not empty(file))
 
     for bug_file in bug_files:
@@ -283,8 +356,9 @@ def empty(file_name):
             if not duplicate(bug):
                 yield bug
 
+
 def merge_sarif_files(output_dir, sort_files=False):
-    """ Reads and merges all .sarif files in the given output directory.
+    """Reads and merges all .sarif files in the given output directory.
 
     Each sarif file in the output directory is understood as a single run
     and thus appear separate in the top level runs array. This requires
@@ -296,43 +370,49 @@ def empty(file_name):
 
     def update_sarif_object(sarif_object, runs_count_offset):
         """
-            Given a SARIF object, checks its dictionary entries for a 'message' property.
-            If it exists, updates the message index of embedded links in the run index.
+        Given a SARIF object, checks its dictionary entries for a 'message' property.
+        If it exists, updates the message index of embedded links in the run index.
 
-            Recursively looks through entries in the dictionary.
+        Recursively looks through entries in the dictionary.
         """
         if not isinstance(sarif_object, dict):
             return sarif_object
 
-        if 'message' in sarif_object:
-            sarif_object['message'] = match_and_update_run(sarif_object['message'], runs_count_offset)
+        if "message" in sarif_object:
+            sarif_object["message"] = match_and_update_run(
+                sarif_object["message"], runs_count_offset
+            )
 
         for key in sarif_object:
             if isinstance(sarif_object[key], list):
                 # iterate through subobjects and update it.
-                arr = [update_sarif_object(entry, runs_count_offset) for entry in sarif_object[key]]
+                arr = [
+                    update_sarif_object(entry, runs_count_offset)
+                    for entry in sarif_object[key]
+                ]
                 sarif_object[key] = arr
             elif isinstance(sarif_object[key], dict):
-                sarif_object[key] = update_sarif_object(sarif_object[key], runs_count_offset)
+                sarif_object[key] = update_sarif_object(
+                    sarif_object[key], runs_count_offset
+                )
             else:
                 # do nothing
                 pass
 
         return sarif_object
 
-
     def match_and_update_run(message, runs_count_offset):
         """
-            Given a SARIF message object, checks if the text property contains an embedded link and
-            updates the run index if necessary.
+        Given a SARIF message object, checks if the text property contains an embedded link and
+        updates the run index if necessary.
         """
-        if 'text' not in message:
+        if "text" not in message:
             return message
 
         # we only merge runs, so we only need to update the run index
-        pattern = re.compile(r'sarif:/runs/(\d+)')
+        pattern = re.compile(r"sarif:/runs/(\d+)")
 
-        text = message['text']
+        text = message["text"]
         matches = re.finditer(pattern, text)
         matches_list = list(matches)
 
@@ -340,14 +420,16 @@ def match_and_update_run(message, runs_count_offset):
         for idx in range(len(matches_list) - 1, -1, -1):
             match = matches_list[idx]
             new_run_count = str(runs_count_offset + int(match.group(1)))
-            text = text[0:match.start(1)] + new_run_count + text[match.end(1):]
+            text = text[0 : match.start(1)] + new_run_count + text[match.end(1) :]
 
-        message['text'] = text
+        message["text"] = text
         return message
 
-
-
-    sarif_files = (file for file in glob.iglob(os.path.join(output_dir, '*.sarif')) if not empty(file))
+    sarif_files = (
+        file
+        for file in glob.iglob(os.path.join(output_dir, "*.sarif"))
+        if not empty(file)
+    )
     # exposed for testing since the order of files returned by glob is not guaranteed to be sorted
     if sort_files:
         sarif_files = list(sarif_files)
@@ -358,7 +440,7 @@ def match_and_update_run(message, runs_count_offset):
     for sarif_file in sarif_files:
         with open(sarif_file) as fp:
             sarif = json.load(fp)
-            if 'runs' not in sarif:
+            if "runs" not in sarif:
                 continue
 
             # start with the first file
@@ -366,58 +448,60 @@ def match_and_update_run(message, runs_count_offset):
                 merged = sarif
             else:
                 # extract the run and append it to the merged output
-                for run in sarif['runs']:
+                for run in sarif["runs"]:
                     new_run = update_sarif_object(run, runs_count)
-                    merged['runs'].append(new_run)
+                    merged["runs"].append(new_run)
 
-            runs_count += len(sarif['runs'])
+            runs_count += len(sarif["runs"])
 
-    with open(os.path.join(output_dir, 'results-merged.sarif'), 'w') as out:
+    with open(os.path.join(output_dir, "results-merged.sarif"), "w") as out:
         json.dump(merged, out, indent=4, sort_keys=True)
 
 
 def parse_bug_plist(filename):
-    """ Returns the generator of bugs from a single .plist file. """
-
-    with open(filename, 'rb') as fp:
-      content = plistlib.load(fp)
-      files = content.get('files')
-      for bug in content.get('diagnostics', []):
-          if len(files) <= int(bug['location']['file']):
-              logging.warning('Parsing bug from "%s" failed', filename)
-              continue
-
-          yield {
-              'result': filename,
-              'bug_type': bug['type'],
-              'bug_category': bug['category'],
-              'bug_line': int(bug['location']['line']),
-              'bug_path_length': int(bug['location']['col']),
-              'bug_file': files[int(bug['location']['file'])]
-          }
+    """Returns the generator of bugs from a single .plist file."""
+
+    with open(filename, "rb") as fp:
+        content = plistlib.load(fp)
+        files = content.get("files")
+        for bug in content.get("diagnostics", []):
+            if len(files) <= int(bug["location"]["file"]):
+                logging.warning('Parsing bug from "%s" failed', filename)
+                continue
 
+            yield {
+                "result": filename,
+                "bug_type": bug["type"],
+                "bug_category": bug["category"],
+                "bug_line": int(bug["location"]["line"]),
+                "bug_path_length": int(bug["location"]["col"]),
+                "bug_file": files[int(bug["location"]["file"])],
+            }
 
-def parse_bug_html(filename):
-    """ Parse out the bug information from HTML output. """
 
-    patterns = [re.compile(r'<!-- BUGTYPE (?P<bug_type>.*) -->$'),
-                re.compile(r'<!-- BUGFILE (?P<bug_file>.*) -->$'),
-                re.compile(r'<!-- BUGPATHLENGTH (?P<bug_path_length>.*) -->$'),
-                re.compile(r'<!-- BUGLINE (?P<bug_line>.*) -->$'),
-                re.compile(r'<!-- BUGCATEGORY (?P<bug_category>.*) -->$'),
-                re.compile(r'<!-- BUGDESC (?P<bug_description>.*) -->$'),
-                re.compile(r'<!-- FUNCTIONNAME (?P<bug_function>.*) -->$')]
-    endsign = re.compile(r'<!-- BUGMETAEND -->')
+def parse_bug_html(filename):
+    """Parse out the bug information from HTML output."""
+
+    patterns = [
+        re.compile(r"<!-- BUGTYPE (?P<bug_type>.*) -->$"),
+        re.compile(r"<!-- BUGFILE (?P<bug_file>.*) -->$"),
+        re.compile(r"<!-- BUGPATHLENGTH (?P<bug_path_length>.*) -->$"),
+        re.compile(r"<!-- BUGLINE (?P<bug_line>.*) -->$"),
+        re.compile(r"<!-- BUGCATEGORY (?P<bug_category>.*) -->$"),
+        re.compile(r"<!-- BUGDESC (?P<bug_description>.*) -->$"),
+        re.compile(r"<!-- FUNCTIONNAME (?P<bug_function>.*) -->$"),
+    ]
+    endsign = re.compile(r"<!-- BUGMETAEND -->")
 
     bug = {
-        'report_file': filename,
-        'bug_function': 'n/a',  # compatibility with < clang-3.5
-        'bug_category': 'Other',
-        'bug_line': 0,
-        'bug_path_length': 1
+        "report_file": filename,
+        "bug_function": "n/a",  # compatibility with < clang-3.5
+        "bug_category": "Other",
+        "bug_line": 0,
+        "bug_path_length": 1,
     }
 
-    with open(filename, encoding='utf-8') as handler:
+    with open(filename, encoding="utf-8") as handler:
         for line in handler.readlines():
             # do not read the file further
             if endsign.match(line):
@@ -429,61 +513,64 @@ def parse_bug_html(filename):
                     bug.update(match.groupdict())
                     break
 
-    encode_value(bug, 'bug_line', int)
-    encode_value(bug, 'bug_path_length', int)
+    encode_value(bug, "bug_line", int)
+    encode_value(bug, "bug_path_length", int)
 
     yield bug
 
 
 def parse_crash(filename):
-    """ Parse out the crash information from the report file. """
+    """Parse out the crash information from the report file."""
 
-    match = re.match(r'(.*)\.info\.txt', filename)
+    match = re.match(r"(.*)\.info\.txt", filename)
     name = match.group(1) if match else None
-    with open(filename, mode='rb') as handler:
+    with open(filename, mode="rb") as handler:
         # this is a workaround to fix windows read '\r\n' as new lines.
         lines = [line.decode().rstrip() for line in handler.readlines()]
         return {
-            'source': lines[0],
-            'problem': lines[1],
-            'file': name,
-            'info': name + '.info.txt',
-            'stderr': name + '.stderr.txt'
+            "source": lines[0],
+            "problem": lines[1],
+            "file": name,
+            "info": name + ".info.txt",
+            "stderr": name + ".stderr.txt",
         }
 
 
 def category_type_name(bug):
-    """ Create a new bug attribute from bug by category and type.
+    """Create a new bug attribute from bug by category and type.
 
-    The result will be used as CSS class selector in the final report. """
+    The result will be used as CSS class selector in the final report."""
 
     def smash(key):
-        """ Make value ready to be HTML attribute value. """
+        """Make value ready to be HTML attribute value."""
 
-        return bug.get(key, '').lower().replace(' ', '_').replace("'", '')
+        return bug.get(key, "").lower().replace(" ", "_").replace("'", "")
 
-    return escape('bt_' + smash('bug_category') + '_' + smash('bug_type'))
+    return escape("bt_" + smash("bug_category") + "_" + smash("bug_type"))
 
 
 def create_counters():
-    """ Create counters for bug statistics.
+    """Create counters for bug statistics.
 
     Two entries are maintained: 'total' is an integer, represents the
     number of bugs. The 'categories' is a two level categorisation of bug
     counters. The first level is 'bug category' the second is 'bug type'.
     Each entry in this classification is a dictionary of 'count', 'type'
-    and 'label'. """
+    and 'label'."""
 
     def predicate(bug):
-        bug_category = bug['bug_category']
-        bug_type = bug['bug_type']
+        bug_category = bug["bug_category"]
+        bug_type = bug["bug_type"]
         current_category = predicate.categories.get(bug_category, dict())
-        current_type = current_category.get(bug_type, {
-            'bug_type': bug_type,
-            'bug_type_class': category_type_name(bug),
-            'bug_count': 0
-        })
-        current_type.update({'bug_count': current_type['bug_count'] + 1})
+        current_type = current_category.get(
+            bug_type,
+            {
+                "bug_type": bug_type,
+                "bug_type_class": category_type_name(bug),
+                "bug_count": 0,
+            },
+        )
+        current_type.update({"bug_count": current_type["bug_count"] + 1})
         current_category.update({bug_type: current_type})
         predicate.categories.update({bug_category: current_category})
         predicate.total += 1
@@ -495,14 +582,14 @@ def predicate(bug):
 
 def prettify_bug(prefix, output_dir):
     def predicate(bug):
-        """ Make safe this values to embed into HTML. """
+        """Make safe this values to embed into HTML."""
 
-        bug['bug_type_class'] = category_type_name(bug)
+        bug["bug_type_class"] = category_type_name(bug)
 
-        encode_value(bug, 'bug_file', lambda x: escape(chop(prefix, x)))
-        encode_value(bug, 'bug_category', escape)
-        encode_value(bug, 'bug_type', escape)
-        encode_value(bug, 'report_file', lambda x: escape(chop(output_dir, x)))
+        encode_value(bug, "bug_file", lambda x: escape(chop(prefix, x)))
+        encode_value(bug, "bug_category", escape)
+        encode_value(bug, "bug_type", escape)
+        encode_value(bug, "report_file", lambda x: escape(chop(output_dir, x)))
         return bug
 
     return predicate
@@ -510,28 +597,28 @@ def predicate(bug):
 
 def prettify_crash(prefix, output_dir):
     def predicate(crash):
-        """ Make safe this values to embed into HTML. """
+        """Make safe this values to embed into HTML."""
 
-        encode_value(crash, 'source', lambda x: escape(chop(prefix, x)))
-        encode_value(crash, 'problem', escape)
-        encode_value(crash, 'file', lambda x: escape(chop(output_dir, x)))
-        encode_value(crash, 'info', lambda x: escape(chop(output_dir, x)))
-        encode_value(crash, 'stderr', lambda x: escape(chop(output_dir, x)))
+        encode_value(crash, "source", lambda x: escape(chop(prefix, x)))
+        encode_value(crash, "problem", escape)
+        encode_value(crash, "file", lambda x: escape(chop(output_dir, x)))
+        encode_value(crash, "info", lambda x: escape(chop(output_dir, x)))
+        encode_value(crash, "stderr", lambda x: escape(chop(output_dir, x)))
         return crash
 
     return predicate
 
 
 def copy_resource_files(output_dir):
-    """ Copy the javascript and css files to the report directory. """
+    """Copy the javascript and css files to the report directory."""
 
     this_dir = os.path.dirname(os.path.realpath(__file__))
-    for resource in os.listdir(os.path.join(this_dir, 'resources')):
-        shutil.copy(os.path.join(this_dir, 'resources', resource), output_dir)
+    for resource in os.listdir(os.path.join(this_dir, "resources")):
+        shutil.copy(os.path.join(this_dir, "resources", resource), output_dir)
 
 
 def encode_value(container, key, encode):
-    """ Run 'encode' on 'container[key]' value and update it. """
+    """Run 'encode' on 'container[key]' value and update it."""
 
     if key in container:
         value = encode(container[key])
@@ -539,56 +626,56 @@ def encode_value(container, key, encode):
 
 
 def chop(prefix, filename):
-    """ Create 'filename' from '/prefix/filename' """
+    """Create 'filename' from '/prefix/filename'"""
 
     return filename if not len(prefix) else os.path.relpath(filename, prefix)
 
 
 def escape(text):
-    """ Paranoid HTML escape method. (Python version independent) """
+    """Paranoid HTML escape method. (Python version independent)"""
 
     escape_table = {
-        '&': '&',
-        '"': '"',
-        "'": ''',
-        '>': '>',
-        '<': '<'
+        "&": "&",
+        '"': """,
+        "'": "'",
+        ">": ">",
+        "<": "<",
     }
-    return ''.join(escape_table.get(c, c) for c in text)
+    return "".join(escape_table.get(c, c) for c in text)
 
 
 def reindent(text, indent):
-    """ Utility function to format html output and keep indentation. """
+    """Utility function to format html output and keep indentation."""
 
-    result = ''
+    result = ""
     for line in text.splitlines():
         if len(line.strip()):
-            result += ' ' * indent + line.split('|')[1] + os.linesep
+            result += " " * indent + line.split("|")[1] + os.linesep
     return result
 
 
 def comment(name, opts=dict()):
-    """ Utility function to format meta information as comment. """
+    """Utility function to format meta information as comment."""
 
-    attributes = ''
+    attributes = ""
     for key, value in opts.items():
         attributes += ' {0}="{1}"'.format(key, value)
 
-    return '<!-- {0}{1} -->{2}'.format(name, attributes, os.linesep)
+    return "<!-- {0}{1} -->{2}".format(name, attributes, os.linesep)
 
 
 def commonprefix_from(filename):
-    """ Create file prefix from a compilation database entries. """
+    """Create file prefix from a compilation database entries."""
 
-    with open(filename, 'r') as handle:
-        return commonprefix(item['file'] for item in json.load(handle))
+    with open(filename, "r") as handle:
+        return commonprefix(item["file"] for item in json.load(handle))
 
 
 def commonprefix(files):
-    """ Fixed version of os.path.commonprefix.
+    """Fixed version of os.path.commonprefix.
 
     :param files: list of file names.
-    :return: the longest path prefix that is a prefix of all files. """
+    :return: the longest path prefix that is a prefix of all files."""
     result = None
     for current in files:
         if result is not None:
@@ -597,7 +684,7 @@ def commonprefix(files):
             result = current
 
     if result is None:
-        return ''
+        return ""
     elif not os.path.isdir(result):
         return os.path.dirname(result)
     else:

diff  --git a/clang/tools/scan-build-py/lib/libscanbuild/shell.py b/clang/tools/scan-build-py/lib/libscanbuild/shell.py
index f9c08dfef2bdc..6d5b862e111a7 100644
--- a/clang/tools/scan-build-py/lib/libscanbuild/shell.py
+++ b/clang/tools/scan-build-py/lib/libscanbuild/shell.py
@@ -7,28 +7,45 @@
 import re
 import shlex
 
-__all__ = ['encode', 'decode']
+__all__ = ["encode", "decode"]
 
 
 def encode(command):
-    """ Takes a command as list and returns a string. """
+    """Takes a command as list and returns a string."""
 
     def needs_quote(word):
-        """ Returns true if arguments needs to be protected by quotes.
+        """Returns true if arguments needs to be protected by quotes.
 
         Previous implementation was shlex.split method, but that's not good
         for this job. Currently is running through the string with a basic
-        state checking. """
-
-        reserved = {' ', '$', '%', '&', '(', ')', '[', ']', '{', '}', '*', '|',
-                    '<', '>', '@', '?', '!'}
+        state checking."""
+
+        reserved = {
+            " ",
+            "$",
+            "%",
+            "&",
+            "(",
+            ")",
+            "[",
+            "]",
+            "{",
+            "}",
+            "*",
+            "|",
+            "<",
+            ">",
+            "@",
+            "?",
+            "!",
+        }
         state = 0
         for current in word:
             if state == 0 and current in reserved:
                 return True
-            elif state == 0 and current == '\\':
+            elif state == 0 and current == "\\":
                 state = 1
-            elif state == 1 and current in reserved | {'\\'}:
+            elif state == 1 and current in reserved | {"\\"}:
                 state = 0
             elif state == 0 and current == '"':
                 state = 2
@@ -41,10 +58,10 @@ def needs_quote(word):
         return state != 0
 
     def escape(word):
-        """ Do protect argument if that's needed. """
+        """Do protect argument if that's needed."""
 
-        table = {'\\': '\\\\', '"': '\\"'}
-        escaped = ''.join([table.get(c, c) for c in word])
+        table = {"\\": "\\\\", '"': '\\"'}
+        escaped = "".join([table.get(c, c) for c in word])
 
         return '"' + escaped + '"' if needs_quote(word) else escaped
 
@@ -52,14 +69,14 @@ def escape(word):
 
 
 def decode(string):
-    """ Takes a command string and returns as a list. """
+    """Takes a command string and returns as a list."""
 
     def unescape(arg):
-        """ Gets rid of the escaping characters. """
+        """Gets rid of the escaping characters."""
 
         if len(arg) >= 2 and arg[0] == arg[-1] and arg[0] == '"':
             arg = arg[1:-1]
-            return re.sub(r'\\(["\\])', r'\1', arg)
-        return re.sub(r'\\([\\ $%&\(\)\[\]\{\}\*|<>@?!])', r'\1', arg)
+            return re.sub(r'\\(["\\])', r"\1", arg)
+        return re.sub(r"\\([\\ $%&\(\)\[\]\{\}\*|<>@?!])", r"\1", arg)
 
     return [unescape(arg) for arg in shlex.split(string)]

diff  --git a/clang/tools/scan-build-py/tests/__init__.py b/clang/tools/scan-build-py/tests/__init__.py
index 45d5b8d7cc025..58a1282c6bcff 100644
--- a/clang/tools/scan-build-py/tests/__init__.py
+++ b/clang/tools/scan-build-py/tests/__init__.py
@@ -7,7 +7,7 @@
 import sys
 
 this_dir = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(os.path.dirname(this_dir), 'lib'))
+sys.path.append(os.path.join(os.path.dirname(this_dir), "lib"))
 
 import unittest
 

diff  --git a/clang/tools/scan-build-py/tests/functional/cases/__init__.py b/clang/tools/scan-build-py/tests/functional/cases/__init__.py
index e8c490add6c61..a6a293f73d156 100644
--- a/clang/tools/scan-build-py/tests/functional/cases/__init__.py
+++ b/clang/tools/scan-build-py/tests/functional/cases/__init__.py
@@ -10,40 +10,51 @@
 
 def load_tests(loader, suite, pattern):
     from . import test_from_cdb
+
     suite.addTests(loader.loadTestsFromModule(test_from_cdb))
     from . import test_from_cmd
+
     suite.addTests(loader.loadTestsFromModule(test_from_cmd))
     from . import test_create_cdb
+
     suite.addTests(loader.loadTestsFromModule(test_create_cdb))
     from . import test_exec_anatomy
+
     suite.addTests(loader.loadTestsFromModule(test_exec_anatomy))
     return suite
 
 
 def make_args(target):
     this_dir, _ = os.path.split(__file__)
-    path = os.path.abspath(os.path.join(this_dir, '..', 'src'))
-    return ['make', 'SRCDIR={}'.format(path), 'OBJDIR={}'.format(target), '-f',
-            os.path.join(path, 'build', 'Makefile')]
+    path = os.path.abspath(os.path.join(this_dir, "..", "src"))
+    return [
+        "make",
+        "SRCDIR={}".format(path),
+        "OBJDIR={}".format(target),
+        "-f",
+        os.path.join(path, "build", "Makefile"),
+    ]
 
 
 def silent_call(cmd, *args, **kwargs):
-    kwargs.update({'stdout': subprocess.PIPE, 'stderr': subprocess.STDOUT})
+    kwargs.update({"stdout": subprocess.PIPE, "stderr": subprocess.STDOUT})
     return subprocess.call(cmd, *args, **kwargs)
 
 
 def silent_check_call(cmd, *args, **kwargs):
-    kwargs.update({'stdout': subprocess.PIPE, 'stderr': subprocess.STDOUT})
+    kwargs.update({"stdout": subprocess.PIPE, "stderr": subprocess.STDOUT})
     return subprocess.check_call(cmd, *args, **kwargs)
 
 
 def call_and_report(analyzer_cmd, build_cmd):
-    child = subprocess.Popen(analyzer_cmd + ['-v'] + build_cmd,
-                             universal_newlines=True,
-                             stdout=subprocess.PIPE,
-                             stderr=subprocess.STDOUT)
-
-    pattern = re.compile('Report directory created: (.+)')
+    child = subprocess.Popen(
+        analyzer_cmd + ["-v"] + build_cmd,
+        universal_newlines=True,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.STDOUT,
+    )
+
+    pattern = re.compile("Report directory created: (.+)")
     directory = None
     for line in child.stdout.readlines():
         match = pattern.search(line)
@@ -59,12 +70,11 @@ def call_and_report(analyzer_cmd, build_cmd):
 def check_call_and_report(analyzer_cmd, build_cmd):
     exit_code, result = call_and_report(analyzer_cmd, build_cmd)
     if exit_code != 0:
-        raise subprocess.CalledProcessError(
-            exit_code, analyzer_cmd + build_cmd, None)
+        raise subprocess.CalledProcessError(exit_code, analyzer_cmd + build_cmd, None)
     else:
         return result
 
 
 def create_empty_file(filename):
-    with open(filename, 'a') as handle:
+    with open(filename, "a") as handle:
         pass

diff  --git a/clang/tools/scan-build-py/tests/functional/cases/test_create_cdb.py b/clang/tools/scan-build-py/tests/functional/cases/test_create_cdb.py
index 692a489b61178..1675be3dc963d 100644
--- a/clang/tools/scan-build-py/tests/functional/cases/test_create_cdb.py
+++ b/clang/tools/scan-build-py/tests/functional/cases/test_create_cdb.py
@@ -14,62 +14,60 @@
 class CompilationDatabaseTest(unittest.TestCase):
     @staticmethod
     def run_intercept(tmpdir, args):
-        result = os.path.join(tmpdir, 'cdb.json')
+        result = os.path.join(tmpdir, "cdb.json")
         make = make_args(tmpdir) + args
-        silent_check_call(
-            ['intercept-build', '--cdb', result] + make)
+        silent_check_call(["intercept-build", "--cdb", result] + make)
         return result
 
     @staticmethod
     def count_entries(filename):
-        with open(filename, 'r') as handler:
+        with open(filename, "r") as handler:
             content = json.load(handler)
             return len(content)
 
     def test_successful_build(self):
         with libear.TemporaryDirectory() as tmpdir:
-            result = self.run_intercept(tmpdir, ['build_regular'])
+            result = self.run_intercept(tmpdir, ["build_regular"])
             self.assertTrue(os.path.isfile(result))
             self.assertEqual(5, self.count_entries(result))
 
     def test_successful_build_with_wrapper(self):
         with libear.TemporaryDirectory() as tmpdir:
-            result = os.path.join(tmpdir, 'cdb.json')
-            make = make_args(tmpdir) + ['build_regular']
-            silent_check_call(['intercept-build', '--cdb', result,
-                               '--override-compiler'] + make)
+            result = os.path.join(tmpdir, "cdb.json")
+            make = make_args(tmpdir) + ["build_regular"]
+            silent_check_call(
+                ["intercept-build", "--cdb", result, "--override-compiler"] + make
+            )
             self.assertTrue(os.path.isfile(result))
             self.assertEqual(5, self.count_entries(result))
 
-    @unittest.skipIf(os.getenv('TRAVIS'), 'ubuntu make return -11')
+    @unittest.skipIf(os.getenv("TRAVIS"), "ubuntu make return -11")
     def test_successful_build_parallel(self):
         with libear.TemporaryDirectory() as tmpdir:
-            result = self.run_intercept(tmpdir, ['-j', '4', 'build_regular'])
+            result = self.run_intercept(tmpdir, ["-j", "4", "build_regular"])
             self.assertTrue(os.path.isfile(result))
             self.assertEqual(5, self.count_entries(result))
 
-    @unittest.skipIf(os.getenv('TRAVIS'), 'ubuntu env remove clang from path')
+    @unittest.skipIf(os.getenv("TRAVIS"), "ubuntu env remove clang from path")
     def test_successful_build_on_empty_env(self):
         with libear.TemporaryDirectory() as tmpdir:
-            result = os.path.join(tmpdir, 'cdb.json')
-            make = make_args(tmpdir) + ['CC=clang', 'build_regular']
-            silent_check_call(['intercept-build', '--cdb', result,
-                               'env', '-'] + make)
+            result = os.path.join(tmpdir, "cdb.json")
+            make = make_args(tmpdir) + ["CC=clang", "build_regular"]
+            silent_check_call(["intercept-build", "--cdb", result, "env", "-"] + make)
             self.assertTrue(os.path.isfile(result))
             self.assertEqual(5, self.count_entries(result))
 
     def test_successful_build_all_in_one(self):
         with libear.TemporaryDirectory() as tmpdir:
-            result = self.run_intercept(tmpdir, ['build_all_in_one'])
+            result = self.run_intercept(tmpdir, ["build_all_in_one"])
             self.assertTrue(os.path.isfile(result))
             self.assertEqual(5, self.count_entries(result))
 
     def test_not_successful_build(self):
         with libear.TemporaryDirectory() as tmpdir:
-            result = os.path.join(tmpdir, 'cdb.json')
-            make = make_args(tmpdir) + ['build_broken']
-            silent_call(
-                ['intercept-build', '--cdb', result] + make)
+            result = os.path.join(tmpdir, "cdb.json")
+            make = make_args(tmpdir) + ["build_broken"]
+            silent_call(["intercept-build", "--cdb", result] + make)
             self.assertTrue(os.path.isfile(result))
             self.assertEqual(2, self.count_entries(result))
 
@@ -77,50 +75,48 @@ def test_not_successful_build(self):
 class ExitCodeTest(unittest.TestCase):
     @staticmethod
     def run_intercept(tmpdir, target):
-        result = os.path.join(tmpdir, 'cdb.json')
+        result = os.path.join(tmpdir, "cdb.json")
         make = make_args(tmpdir) + [target]
-        return silent_call(
-            ['intercept-build', '--cdb', result] + make)
+        return silent_call(["intercept-build", "--cdb", result] + make)
 
     def test_successful_build(self):
         with libear.TemporaryDirectory() as tmpdir:
-            exitcode = self.run_intercept(tmpdir, 'build_clean')
+            exitcode = self.run_intercept(tmpdir, "build_clean")
             self.assertFalse(exitcode)
 
     def test_not_successful_build(self):
         with libear.TemporaryDirectory() as tmpdir:
-            exitcode = self.run_intercept(tmpdir, 'build_broken')
+            exitcode = self.run_intercept(tmpdir, "build_broken")
             self.assertTrue(exitcode)
 
 
 class ResumeFeatureTest(unittest.TestCase):
     @staticmethod
     def run_intercept(tmpdir, target, args):
-        result = os.path.join(tmpdir, 'cdb.json')
+        result = os.path.join(tmpdir, "cdb.json")
         make = make_args(tmpdir) + [target]
-        silent_check_call(
-            ['intercept-build', '--cdb', result] + args + make)
+        silent_check_call(["intercept-build", "--cdb", result] + args + make)
         return result
 
     @staticmethod
     def count_entries(filename):
-        with open(filename, 'r') as handler:
+        with open(filename, "r") as handler:
             content = json.load(handler)
             return len(content)
 
     def test_overwrite_existing_cdb(self):
         with libear.TemporaryDirectory() as tmpdir:
-            result = self.run_intercept(tmpdir, 'build_clean', [])
+            result = self.run_intercept(tmpdir, "build_clean", [])
             self.assertTrue(os.path.isfile(result))
-            result = self.run_intercept(tmpdir, 'build_regular', [])
+            result = self.run_intercept(tmpdir, "build_regular", [])
             self.assertTrue(os.path.isfile(result))
             self.assertEqual(2, self.count_entries(result))
 
     def test_append_to_existing_cdb(self):
         with libear.TemporaryDirectory() as tmpdir:
-            result = self.run_intercept(tmpdir, 'build_clean', [])
+            result = self.run_intercept(tmpdir, "build_clean", [])
             self.assertTrue(os.path.isfile(result))
-            result = self.run_intercept(tmpdir, 'build_regular', ['--append'])
+            result = self.run_intercept(tmpdir, "build_regular", ["--append"])
             self.assertTrue(os.path.isfile(result))
             self.assertEqual(5, self.count_entries(result))
 
@@ -128,63 +124,63 @@ def test_append_to_existing_cdb(self):
 class ResultFormatingTest(unittest.TestCase):
     @staticmethod
     def run_intercept(tmpdir, command):
-        result = os.path.join(tmpdir, 'cdb.json')
-        silent_check_call(
-            ['intercept-build', '--cdb', result] + command,
-            cwd=tmpdir)
-        with open(result, 'r') as handler:
+        result = os.path.join(tmpdir, "cdb.json")
+        silent_check_call(["intercept-build", "--cdb", result] + command, cwd=tmpdir)
+        with open(result, "r") as handler:
             content = json.load(handler)
             return content
 
     def assert_creates_number_of_entries(self, command, count):
         with libear.TemporaryDirectory() as tmpdir:
-            filename = os.path.join(tmpdir, 'test.c')
+            filename = os.path.join(tmpdir, "test.c")
             create_empty_file(filename)
             command.append(filename)
-            cmd = ['sh', '-c', ' '.join(command)]
+            cmd = ["sh", "-c", " ".join(command)]
             cdb = self.run_intercept(tmpdir, cmd)
             self.assertEqual(count, len(cdb))
 
     def test_filter_preprocessor_only_calls(self):
-        self.assert_creates_number_of_entries(['cc', '-c'], 1)
-        self.assert_creates_number_of_entries(['cc', '-c', '-E'], 0)
-        self.assert_creates_number_of_entries(['cc', '-c', '-M'], 0)
-        self.assert_creates_number_of_entries(['cc', '-c', '-MM'], 0)
+        self.assert_creates_number_of_entries(["cc", "-c"], 1)
+        self.assert_creates_number_of_entries(["cc", "-c", "-E"], 0)
+        self.assert_creates_number_of_entries(["cc", "-c", "-M"], 0)
+        self.assert_creates_number_of_entries(["cc", "-c", "-MM"], 0)
 
     def assert_command_creates_entry(self, command, expected):
         with libear.TemporaryDirectory() as tmpdir:
             filename = os.path.join(tmpdir, command[-1])
             create_empty_file(filename)
-            cmd = ['sh', '-c', ' '.join(command)]
+            cmd = ["sh", "-c", " ".join(command)]
             cdb = self.run_intercept(tmpdir, cmd)
-            self.assertEqual(' '.join(expected), cdb[0]['command'])
+            self.assertEqual(" ".join(expected), cdb[0]["command"])
 
     def test_filter_preprocessor_flags(self):
         self.assert_command_creates_entry(
-            ['cc', '-c', '-MD', 'test.c'],
-            ['cc', '-c', 'test.c'])
+            ["cc", "-c", "-MD", "test.c"], ["cc", "-c", "test.c"]
+        )
         self.assert_command_creates_entry(
-            ['cc', '-c', '-MMD', 'test.c'],
-            ['cc', '-c', 'test.c'])
+            ["cc", "-c", "-MMD", "test.c"], ["cc", "-c", "test.c"]
+        )
         self.assert_command_creates_entry(
-            ['cc', '-c', '-MD', '-MF', 'test.d', 'test.c'],
-            ['cc', '-c', 'test.c'])
+            ["cc", "-c", "-MD", "-MF", "test.d", "test.c"], ["cc", "-c", "test.c"]
+        )
 
     def test_pass_language_flag(self):
         self.assert_command_creates_entry(
-            ['cc', '-c', '-x', 'c', 'test.c'],
-            ['cc', '-c', '-x', 'c', 'test.c'])
+            ["cc", "-c", "-x", "c", "test.c"], ["cc", "-c", "-x", "c", "test.c"]
+        )
         self.assert_command_creates_entry(
-            ['cc', '-c', 'test.c'],
-            ['cc', '-c', 'test.c'])
+            ["cc", "-c", "test.c"], ["cc", "-c", "test.c"]
+        )
 
     def test_pass_arch_flags(self):
         self.assert_command_creates_entry(
-            ['clang', '-c', 'test.c'],
-            ['cc', '-c', 'test.c'])
+            ["clang", "-c", "test.c"], ["cc", "-c", "test.c"]
+        )
         self.assert_command_creates_entry(
-            ['clang', '-c', '-arch', 'i386', 'test.c'],
-            ['cc', '-c', '-arch', 'i386', 'test.c'])
+            ["clang", "-c", "-arch", "i386", "test.c"],
+            ["cc", "-c", "-arch", "i386", "test.c"],
+        )
         self.assert_command_creates_entry(
-            ['clang', '-c', '-arch', 'i386', '-arch', 'armv7l', 'test.c'],
-            ['cc', '-c', '-arch', 'i386', '-arch', 'armv7l', 'test.c'])
+            ["clang", "-c", "-arch", "i386", "-arch", "armv7l", "test.c"],
+            ["cc", "-c", "-arch", "i386", "-arch", "armv7l", "test.c"],
+        )

diff  --git a/clang/tools/scan-build-py/tests/functional/cases/test_exec_anatomy.py b/clang/tools/scan-build-py/tests/functional/cases/test_exec_anatomy.py
index d6bf572481a34..cdcbc1f511c18 100644
--- a/clang/tools/scan-build-py/tests/functional/cases/test_exec_anatomy.py
+++ b/clang/tools/scan-build-py/tests/functional/cases/test_exec_anatomy.py
@@ -13,18 +13,16 @@
 
 def run(source_dir, target_dir):
     def execute(cmd):
-        return subprocess.check_call(cmd,
-                                     cwd=target_dir,
-                                     stdout=subprocess.PIPE,
-                                     stderr=subprocess.STDOUT)
-
-    execute(['cmake', source_dir])
-    execute(['make'])
-
-    result_file = os.path.join(target_dir, 'result.json')
-    expected_file = os.path.join(target_dir, 'expected.json')
-    execute(['intercept-build', '--cdb', result_file, './exec',
-             expected_file])
+        return subprocess.check_call(
+            cmd, cwd=target_dir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
+        )
+
+    execute(["cmake", source_dir])
+    execute(["make"])
+
+    result_file = os.path.join(target_dir, "result.json")
+    expected_file = os.path.join(target_dir, "expected.json")
+    execute(["intercept-build", "--cdb", result_file, "./exec", expected_file])
     return (expected_file, result_file)
 
 
@@ -43,7 +41,7 @@ def read_json(filename):
 
     def test_all_exec_calls(self):
         this_dir, _ = os.path.split(__file__)
-        source_dir = os.path.abspath(os.path.join(this_dir, '..', 'exec'))
+        source_dir = os.path.abspath(os.path.join(this_dir, "..", "exec"))
         with libear.TemporaryDirectory() as tmp_dir:
             expected, result = run(source_dir, tmp_dir)
             self.assertEqualJson(expected, result)

diff  --git a/clang/tools/scan-build-py/tests/functional/cases/test_from_cdb.py b/clang/tools/scan-build-py/tests/functional/cases/test_from_cdb.py
index 3b52d852fbf23..7384fe95ffdbf 100644
--- a/clang/tools/scan-build-py/tests/functional/cases/test_from_cdb.py
+++ b/clang/tools/scan-build-py/tests/functional/cases/test_from_cdb.py
@@ -13,14 +13,14 @@
 
 
 def prepare_cdb(name, target_dir):
-    target_file = 'build_{0}.json'.format(name)
+    target_file = "build_{0}.json".format(name)
     this_dir, _ = os.path.split(__file__)
-    path = os.path.abspath(os.path.join(this_dir, '..', 'src'))
-    source_dir = os.path.join(path, 'compilation_database')
-    source_file = os.path.join(source_dir, target_file + '.in')
-    target_file = os.path.join(target_dir, 'compile_commands.json')
-    with open(source_file, 'r') as in_handle:
-        with open(target_file, 'w') as out_handle:
+    path = os.path.abspath(os.path.join(this_dir, "..", "src"))
+    source_dir = os.path.join(path, "compilation_database")
+    source_file = os.path.join(source_dir, target_file + ".in")
+    target_file = os.path.join(target_dir, "compile_commands.json")
+    with open(source_file, "r") as in_handle:
+        with open(target_file, "w") as out_handle:
             for line in in_handle:
                 temp = string.Template(line)
                 out_handle.write(temp.substitute(path=path))
@@ -28,134 +28,128 @@ def prepare_cdb(name, target_dir):
 
 
 def run_analyzer(directory, cdb, args):
-    cmd = ['analyze-build', '--cdb', cdb, '--output', directory] \
-        + args
+    cmd = ["analyze-build", "--cdb", cdb, "--output", directory] + args
     return call_and_report(cmd, [])
 
 
 class OutputDirectoryTest(unittest.TestCase):
     def test_regular_keeps_report_dir(self):
         with libear.TemporaryDirectory() as tmpdir:
-            cdb = prepare_cdb('regular', tmpdir)
+            cdb = prepare_cdb("regular", tmpdir)
             exit_code, reportdir = run_analyzer(tmpdir, cdb, [])
             self.assertTrue(os.path.isdir(reportdir))
 
     def test_clear_deletes_report_dir(self):
         with libear.TemporaryDirectory() as tmpdir:
-            cdb = prepare_cdb('clean', tmpdir)
+            cdb = prepare_cdb("clean", tmpdir)
             exit_code, reportdir = run_analyzer(tmpdir, cdb, [])
             self.assertFalse(os.path.isdir(reportdir))
 
     def test_clear_keeps_report_dir_when_asked(self):
         with libear.TemporaryDirectory() as tmpdir:
-            cdb = prepare_cdb('clean', tmpdir)
-            exit_code, reportdir = run_analyzer(tmpdir, cdb, ['--keep-empty'])
+            cdb = prepare_cdb("clean", tmpdir)
+            exit_code, reportdir = run_analyzer(tmpdir, cdb, ["--keep-empty"])
             self.assertTrue(os.path.isdir(reportdir))
 
 
 class ExitCodeTest(unittest.TestCase):
     def test_regular_does_not_set_exit_code(self):
         with libear.TemporaryDirectory() as tmpdir:
-            cdb = prepare_cdb('regular', tmpdir)
+            cdb = prepare_cdb("regular", tmpdir)
             exit_code, __ = run_analyzer(tmpdir, cdb, [])
             self.assertFalse(exit_code)
 
     def test_clear_does_not_set_exit_code(self):
         with libear.TemporaryDirectory() as tmpdir:
-            cdb = prepare_cdb('clean', tmpdir)
+            cdb = prepare_cdb("clean", tmpdir)
             exit_code, __ = run_analyzer(tmpdir, cdb, [])
             self.assertFalse(exit_code)
 
     def test_regular_sets_exit_code_if_asked(self):
         with libear.TemporaryDirectory() as tmpdir:
-            cdb = prepare_cdb('regular', tmpdir)
-            exit_code, __ = run_analyzer(tmpdir, cdb, ['--status-bugs'])
+            cdb = prepare_cdb("regular", tmpdir)
+            exit_code, __ = run_analyzer(tmpdir, cdb, ["--status-bugs"])
             self.assertTrue(exit_code)
 
     def test_clear_does_not_set_exit_code_if_asked(self):
         with libear.TemporaryDirectory() as tmpdir:
-            cdb = prepare_cdb('clean', tmpdir)
-            exit_code, __ = run_analyzer(tmpdir, cdb, ['--status-bugs'])
+            cdb = prepare_cdb("clean", tmpdir)
+            exit_code, __ = run_analyzer(tmpdir, cdb, ["--status-bugs"])
             self.assertFalse(exit_code)
 
     def test_regular_sets_exit_code_if_asked_from_plist(self):
         with libear.TemporaryDirectory() as tmpdir:
-            cdb = prepare_cdb('regular', tmpdir)
-            exit_code, __ = run_analyzer(
-                tmpdir, cdb, ['--status-bugs', '--plist'])
+            cdb = prepare_cdb("regular", tmpdir)
+            exit_code, __ = run_analyzer(tmpdir, cdb, ["--status-bugs", "--plist"])
             self.assertTrue(exit_code)
 
     def test_clear_does_not_set_exit_code_if_asked_from_plist(self):
         with libear.TemporaryDirectory() as tmpdir:
-            cdb = prepare_cdb('clean', tmpdir)
-            exit_code, __ = run_analyzer(
-                tmpdir, cdb, ['--status-bugs', '--plist'])
+            cdb = prepare_cdb("clean", tmpdir)
+            exit_code, __ = run_analyzer(tmpdir, cdb, ["--status-bugs", "--plist"])
             self.assertFalse(exit_code)
 
 
 class OutputFormatTest(unittest.TestCase):
     @staticmethod
     def get_html_count(directory):
-        return len(glob.glob(os.path.join(directory, 'report-*.html')))
+        return len(glob.glob(os.path.join(directory, "report-*.html")))
 
     @staticmethod
     def get_plist_count(directory):
-        return len(glob.glob(os.path.join(directory, 'report-*.plist')))
+        return len(glob.glob(os.path.join(directory, "report-*.plist")))
 
     @staticmethod
     def get_sarif_count(directory):
-        return len(glob.glob(os.path.join(directory, 'result-*.sarif')))
+        return len(glob.glob(os.path.join(directory, "result-*.sarif")))
 
     def test_default_only_creates_html_report(self):
         with libear.TemporaryDirectory() as tmpdir:
-            cdb = prepare_cdb('regular', tmpdir)
+            cdb = prepare_cdb("regular", tmpdir)
             exit_code, reportdir = run_analyzer(tmpdir, cdb, [])
-            self.assertTrue(
-                os.path.exists(os.path.join(reportdir, 'index.html')))
+            self.assertTrue(os.path.exists(os.path.join(reportdir, "index.html")))
             self.assertEqual(self.get_html_count(reportdir), 2)
             self.assertEqual(self.get_plist_count(reportdir), 0)
             self.assertEqual(self.get_sarif_count(reportdir), 0)
 
     def test_plist_and_html_creates_html_and_plist_reports(self):
         with libear.TemporaryDirectory() as tmpdir:
-            cdb = prepare_cdb('regular', tmpdir)
-            exit_code, reportdir = run_analyzer(tmpdir, cdb, ['--plist-html'])
-            self.assertTrue(
-                os.path.exists(os.path.join(reportdir, 'index.html')))
+            cdb = prepare_cdb("regular", tmpdir)
+            exit_code, reportdir = run_analyzer(tmpdir, cdb, ["--plist-html"])
+            self.assertTrue(os.path.exists(os.path.join(reportdir, "index.html")))
             self.assertEqual(self.get_html_count(reportdir), 2)
             self.assertEqual(self.get_plist_count(reportdir), 5)
             self.assertEqual(self.get_sarif_count(reportdir), 0)
 
     def test_plist_only_creates_plist_report(self):
         with libear.TemporaryDirectory() as tmpdir:
-            cdb = prepare_cdb('regular', tmpdir)
-            exit_code, reportdir = run_analyzer(tmpdir, cdb, ['--plist'])
-            self.assertFalse(
-                os.path.exists(os.path.join(reportdir, 'index.html')))
+            cdb = prepare_cdb("regular", tmpdir)
+            exit_code, reportdir = run_analyzer(tmpdir, cdb, ["--plist"])
+            self.assertFalse(os.path.exists(os.path.join(reportdir, "index.html")))
             self.assertEqual(self.get_html_count(reportdir), 0)
             self.assertEqual(self.get_plist_count(reportdir), 5)
             self.assertEqual(self.get_sarif_count(reportdir), 0)
 
     def test_sarif_only_creates_sarif_result(self):
         with libear.TemporaryDirectory() as tmpdir:
-            cdb = prepare_cdb('regular', tmpdir)
-            exit_code, reportdir = run_analyzer(tmpdir, cdb, ['--sarif'])
-            self.assertFalse(
-                os.path.exists(os.path.join(reportdir, 'index.html')))
+            cdb = prepare_cdb("regular", tmpdir)
+            exit_code, reportdir = run_analyzer(tmpdir, cdb, ["--sarif"])
+            self.assertFalse(os.path.exists(os.path.join(reportdir, "index.html")))
             self.assertTrue(
-                os.path.exists(os.path.join(reportdir, 'results-merged.sarif')))
+                os.path.exists(os.path.join(reportdir, "results-merged.sarif"))
+            )
             self.assertEqual(self.get_html_count(reportdir), 0)
             self.assertEqual(self.get_plist_count(reportdir), 0)
             self.assertEqual(self.get_sarif_count(reportdir), 5)
 
     def test_sarif_and_html_creates_sarif_and_html_reports(self):
         with libear.TemporaryDirectory() as tmpdir:
-            cdb = prepare_cdb('regular', tmpdir)
-            exit_code, reportdir = run_analyzer(tmpdir, cdb, ['--sarif-html'])
-            self.assertTrue(
-                os.path.exists(os.path.join(reportdir, 'index.html')))
+            cdb = prepare_cdb("regular", tmpdir)
+            exit_code, reportdir = run_analyzer(tmpdir, cdb, ["--sarif-html"])
+            self.assertTrue(os.path.exists(os.path.join(reportdir, "index.html")))
             self.assertTrue(
-                os.path.exists(os.path.join(reportdir, 'results-merged.sarif')))
+                os.path.exists(os.path.join(reportdir, "results-merged.sarif"))
+            )
             self.assertEqual(self.get_html_count(reportdir), 2)
             self.assertEqual(self.get_plist_count(reportdir), 0)
             self.assertEqual(self.get_sarif_count(reportdir), 5)
@@ -164,49 +158,48 @@ def test_sarif_and_html_creates_sarif_and_html_reports(self):
 class FailureReportTest(unittest.TestCase):
     def test_broken_creates_failure_reports(self):
         with libear.TemporaryDirectory() as tmpdir:
-            cdb = prepare_cdb('broken', tmpdir)
+            cdb = prepare_cdb("broken", tmpdir)
             exit_code, reportdir = run_analyzer(tmpdir, cdb, [])
-            self.assertTrue(
-                os.path.isdir(os.path.join(reportdir, 'failures')))
+            self.assertTrue(os.path.isdir(os.path.join(reportdir, "failures")))
 
     def test_broken_does_not_creates_failure_reports(self):
         with libear.TemporaryDirectory() as tmpdir:
-            cdb = prepare_cdb('broken', tmpdir)
-            exit_code, reportdir = run_analyzer(
-                tmpdir, cdb, ['--no-failure-reports'])
-            self.assertFalse(
-                os.path.isdir(os.path.join(reportdir, 'failures')))
+            cdb = prepare_cdb("broken", tmpdir)
+            exit_code, reportdir = run_analyzer(tmpdir, cdb, ["--no-failure-reports"])
+            self.assertFalse(os.path.isdir(os.path.join(reportdir, "failures")))
 
 
 class TitleTest(unittest.TestCase):
     def assertTitleEqual(self, directory, expected):
         import re
+
         patterns = [
-            re.compile(r'<title>(?P<page>.*)</title>'),
-            re.compile(r'<h1>(?P<head>.*)</h1>')
+            re.compile(r"<title>(?P<page>.*)</title>"),
+            re.compile(r"<h1>(?P<head>.*)</h1>"),
         ]
         result = dict()
 
-        index = os.path.join(directory, 'index.html')
-        with open(index, 'r') as handler:
+        index = os.path.join(directory, "index.html")
+        with open(index, "r") as handler:
             for line in handler.readlines():
                 for regex in patterns:
                     match = regex.match(line.strip())
                     if match:
                         result.update(match.groupdict())
                         break
-        self.assertEqual(result['page'], result['head'])
-        self.assertEqual(result['page'], expected)
+        self.assertEqual(result["page"], result["head"])
+        self.assertEqual(result["page"], expected)
 
     def test_default_title_in_report(self):
         with libear.TemporaryDirectory() as tmpdir:
-            cdb = prepare_cdb('broken', tmpdir)
+            cdb = prepare_cdb("broken", tmpdir)
             exit_code, reportdir = run_analyzer(tmpdir, cdb, [])
-            self.assertTitleEqual(reportdir, 'src - analyzer results')
+            self.assertTitleEqual(reportdir, "src - analyzer results")
 
     def test_given_title_in_report(self):
         with libear.TemporaryDirectory() as tmpdir:
-            cdb = prepare_cdb('broken', tmpdir)
+            cdb = prepare_cdb("broken", tmpdir)
             exit_code, reportdir = run_analyzer(
-                tmpdir, cdb, ['--html-title', 'this is the title'])
-            self.assertTitleEqual(reportdir, 'this is the title')
+                tmpdir, cdb, ["--html-title", "this is the title"]
+            )
+            self.assertTitleEqual(reportdir, "this is the title")

diff  --git a/clang/tools/scan-build-py/tests/functional/cases/test_from_cmd.py b/clang/tools/scan-build-py/tests/functional/cases/test_from_cmd.py
index aa244631bce9f..7549a3fbc2616 100644
--- a/clang/tools/scan-build-py/tests/functional/cases/test_from_cmd.py
+++ b/clang/tools/scan-build-py/tests/functional/cases/test_from_cmd.py
@@ -13,105 +13,126 @@
 
 
 class OutputDirectoryTest(unittest.TestCase):
-
     @staticmethod
     def run_analyzer(outdir, args, cmd):
         return check_call_and_report(
-            ['scan-build-py', '--intercept-first', '-o', outdir] + args,
-            cmd)
+            ["scan-build-py", "--intercept-first", "-o", outdir] + args, cmd
+        )
 
     def test_regular_keeps_report_dir(self):
         with libear.TemporaryDirectory() as tmpdir:
-            make = make_args(tmpdir) + ['build_regular']
+            make = make_args(tmpdir) + ["build_regular"]
             outdir = self.run_analyzer(tmpdir, [], make)
             self.assertTrue(os.path.isdir(outdir))
 
     def test_clear_deletes_report_dir(self):
         with libear.TemporaryDirectory() as tmpdir:
-            make = make_args(tmpdir) + ['build_clean']
+            make = make_args(tmpdir) + ["build_clean"]
             outdir = self.run_analyzer(tmpdir, [], make)
             self.assertFalse(os.path.isdir(outdir))
 
     def test_clear_keeps_report_dir_when_asked(self):
         with libear.TemporaryDirectory() as tmpdir:
-            make = make_args(tmpdir) + ['build_clean']
-            outdir = self.run_analyzer(tmpdir, ['--keep-empty'], make)
+            make = make_args(tmpdir) + ["build_clean"]
+            outdir = self.run_analyzer(tmpdir, ["--keep-empty"], make)
             self.assertTrue(os.path.isdir(outdir))
 
 
 class RunAnalyzerTest(unittest.TestCase):
-
     @staticmethod
     def get_plist_count(directory):
-        return len(glob.glob(os.path.join(directory, 'report-*.plist')))
+        return len(glob.glob(os.path.join(directory, "report-*.plist")))
 
     def test_interposition_works(self):
         with libear.TemporaryDirectory() as tmpdir:
-            make = make_args(tmpdir) + ['build_regular']
+            make = make_args(tmpdir) + ["build_regular"]
             outdir = check_call_and_report(
-                ['scan-build-py', '--plist', '-o', tmpdir, '--override-compiler'],
-                make)
+                ["scan-build-py", "--plist", "-o", tmpdir, "--override-compiler"], make
+            )
 
             self.assertTrue(os.path.isdir(outdir))
             self.assertEqual(self.get_plist_count(outdir), 5)
 
     def test_intercept_wrapper_works(self):
         with libear.TemporaryDirectory() as tmpdir:
-            make = make_args(tmpdir) + ['build_regular']
+            make = make_args(tmpdir) + ["build_regular"]
             outdir = check_call_and_report(
-                ['scan-build-py', '--plist', '-o', tmpdir, '--intercept-first',
-                 '--override-compiler'],
-                make)
+                [
+                    "scan-build-py",
+                    "--plist",
+                    "-o",
+                    tmpdir,
+                    "--intercept-first",
+                    "--override-compiler",
+                ],
+                make,
+            )
 
             self.assertTrue(os.path.isdir(outdir))
             self.assertEqual(self.get_plist_count(outdir), 5)
 
     def test_intercept_library_works(self):
         with libear.TemporaryDirectory() as tmpdir:
-            make = make_args(tmpdir) + ['build_regular']
+            make = make_args(tmpdir) + ["build_regular"]
             outdir = check_call_and_report(
-                ['scan-build-py', '--plist', '-o', tmpdir, '--intercept-first'],
-                make)
+                ["scan-build-py", "--plist", "-o", tmpdir, "--intercept-first"], make
+            )
 
             self.assertTrue(os.path.isdir(outdir))
             self.assertEqual(self.get_plist_count(outdir), 5)
 
     @staticmethod
     def compile_empty_source_file(target_dir, is_cxx):
-        compiler = '$CXX' if is_cxx else '$CC'
-        src_file_name = 'test.cxx' if is_cxx else 'test.c'
+        compiler = "$CXX" if is_cxx else "$CC"
+        src_file_name = "test.cxx" if is_cxx else "test.c"
         src_file = os.path.join(target_dir, src_file_name)
-        obj_file = os.path.join(target_dir, 'test.o')
+        obj_file = os.path.join(target_dir, "test.o")
         create_empty_file(src_file)
-        command = ' '.join([compiler, '-c', src_file, '-o', obj_file])
-        return ['sh', '-c', command]
+        command = " ".join([compiler, "-c", src_file, "-o", obj_file])
+        return ["sh", "-c", command]
 
     def test_interposition_cc_works(self):
         with libear.TemporaryDirectory() as tmpdir:
             outdir = check_call_and_report(
-                ['scan-build-py', '--plist', '-o', tmpdir, '--override-compiler'],
-                self.compile_empty_source_file(tmpdir, False))
+                ["scan-build-py", "--plist", "-o", tmpdir, "--override-compiler"],
+                self.compile_empty_source_file(tmpdir, False),
+            )
             self.assertEqual(self.get_plist_count(outdir), 1)
 
     def test_interposition_cxx_works(self):
         with libear.TemporaryDirectory() as tmpdir:
             outdir = check_call_and_report(
-                ['scan-build-py', '--plist', '-o', tmpdir, '--override-compiler'],
-                self.compile_empty_source_file(tmpdir, True))
+                ["scan-build-py", "--plist", "-o", tmpdir, "--override-compiler"],
+                self.compile_empty_source_file(tmpdir, True),
+            )
             self.assertEqual(self.get_plist_count(outdir), 1)
 
     def test_intercept_cc_works(self):
         with libear.TemporaryDirectory() as tmpdir:
             outdir = check_call_and_report(
-                ['scan-build-py', '--plist', '-o', tmpdir, '--override-compiler',
-                 '--intercept-first'],
-                self.compile_empty_source_file(tmpdir, False))
+                [
+                    "scan-build-py",
+                    "--plist",
+                    "-o",
+                    tmpdir,
+                    "--override-compiler",
+                    "--intercept-first",
+                ],
+                self.compile_empty_source_file(tmpdir, False),
+            )
             self.assertEqual(self.get_plist_count(outdir), 1)
 
     def test_intercept_cxx_works(self):
         with libear.TemporaryDirectory() as tmpdir:
             outdir = check_call_and_report(
-                ['scan-build-py', '--plist', '-o', tmpdir, '--override-compiler',
-                 '--intercept-first'],
-                self.compile_empty_source_file(tmpdir, True))
+                [
+                    "scan-build-py",
+                    "--plist",
+                    "-o",
+                    tmpdir,
+                    "--override-compiler",
+                    "--intercept-first",
+                ],
+                self.compile_empty_source_file(tmpdir, True),
+            )
             self.assertEqual(self.get_plist_count(outdir), 1)

diff  --git a/clang/tools/scan-build-py/tests/unit/test_analyze.py b/clang/tools/scan-build-py/tests/unit/test_analyze.py
index 823fcab9f47e5..b9b0524fb288d 100644
--- a/clang/tools/scan-build-py/tests/unit/test_analyze.py
+++ b/clang/tools/scan-build-py/tests/unit/test_analyze.py
@@ -17,41 +17,42 @@ class ReportDirectoryTest(unittest.TestCase):
     # order. This is required so that report directories from two runs of
     # scan-build can be easily matched up to compare results.
     def test_directory_name_comparison(self):
-        with libear.TemporaryDirectory() as tmpdir, \
-             sut.report_directory(tmpdir, False, 'html') as report_dir1, \
-             sut.report_directory(tmpdir, False, 'html') as report_dir2, \
-             sut.report_directory(tmpdir, False, 'html') as report_dir3:
+        with libear.TemporaryDirectory() as tmpdir, sut.report_directory(
+            tmpdir, False, "html"
+        ) as report_dir1, sut.report_directory(
+            tmpdir, False, "html"
+        ) as report_dir2, sut.report_directory(
+            tmpdir, False, "html"
+        ) as report_dir3:
             self.assertLess(report_dir1, report_dir2)
             self.assertLess(report_dir2, report_dir3)
 
 
 class FilteringFlagsTest(unittest.TestCase):
-
     def test_language_captured(self):
         def test(flags):
-            cmd = ['clang', '-c', 'source.c'] + flags
+            cmd = ["clang", "-c", "source.c"] + flags
             opts = sut.classify_parameters(cmd)
-            return opts['language']
+            return opts["language"]
 
         self.assertEqual(None, test([]))
-        self.assertEqual('c', test(['-x', 'c']))
-        self.assertEqual('cpp', test(['-x', 'cpp']))
+        self.assertEqual("c", test(["-x", "c"]))
+        self.assertEqual("cpp", test(["-x", "cpp"]))
 
     def test_arch(self):
         def test(flags):
-            cmd = ['clang', '-c', 'source.c'] + flags
+            cmd = ["clang", "-c", "source.c"] + flags
             opts = sut.classify_parameters(cmd)
-            return opts['arch_list']
+            return opts["arch_list"]
 
         self.assertEqual([], test([]))
-        self.assertEqual(['mips'], test(['-arch', 'mips']))
-        self.assertEqual(['mips', 'i386'],
-                         test(['-arch', 'mips', '-arch', 'i386']))
+        self.assertEqual(["mips"], test(["-arch", "mips"]))
+        self.assertEqual(["mips", "i386"], test(["-arch", "mips", "-arch", "i386"]))
 
     def assertFlagsChanged(self, expected, flags):
-        cmd = ['clang', '-c', 'source.c'] + flags
+        cmd = ["clang", "-c", "source.c"] + flags
         opts = sut.classify_parameters(cmd)
-        self.assertEqual(expected, opts['flags'])
+        self.assertEqual(expected, opts["flags"])
 
     def assertFlagsUnchanged(self, flags):
         self.assertFlagsChanged(flags, flags)
@@ -60,59 +61,59 @@ def assertFlagsFiltered(self, flags):
         self.assertFlagsChanged([], flags)
 
     def test_optimalizations_pass(self):
-        self.assertFlagsUnchanged(['-O'])
-        self.assertFlagsUnchanged(['-O1'])
-        self.assertFlagsUnchanged(['-Os'])
-        self.assertFlagsUnchanged(['-O2'])
-        self.assertFlagsUnchanged(['-O3'])
+        self.assertFlagsUnchanged(["-O"])
+        self.assertFlagsUnchanged(["-O1"])
+        self.assertFlagsUnchanged(["-Os"])
+        self.assertFlagsUnchanged(["-O2"])
+        self.assertFlagsUnchanged(["-O3"])
 
     def test_include_pass(self):
         self.assertFlagsUnchanged([])
-        self.assertFlagsUnchanged(['-include', '/usr/local/include'])
-        self.assertFlagsUnchanged(['-I.'])
-        self.assertFlagsUnchanged(['-I', '.'])
-        self.assertFlagsUnchanged(['-I/usr/local/include'])
-        self.assertFlagsUnchanged(['-I', '/usr/local/include'])
-        self.assertFlagsUnchanged(['-I/opt', '-I', '/opt/otp/include'])
-        self.assertFlagsUnchanged(['-isystem', '/path'])
-        self.assertFlagsUnchanged(['-isystem=/path'])
+        self.assertFlagsUnchanged(["-include", "/usr/local/include"])
+        self.assertFlagsUnchanged(["-I."])
+        self.assertFlagsUnchanged(["-I", "."])
+        self.assertFlagsUnchanged(["-I/usr/local/include"])
+        self.assertFlagsUnchanged(["-I", "/usr/local/include"])
+        self.assertFlagsUnchanged(["-I/opt", "-I", "/opt/otp/include"])
+        self.assertFlagsUnchanged(["-isystem", "/path"])
+        self.assertFlagsUnchanged(["-isystem=/path"])
 
     def test_define_pass(self):
-        self.assertFlagsUnchanged(['-DNDEBUG'])
-        self.assertFlagsUnchanged(['-UNDEBUG'])
-        self.assertFlagsUnchanged(['-Dvar1=val1', '-Dvar2=val2'])
+        self.assertFlagsUnchanged(["-DNDEBUG"])
+        self.assertFlagsUnchanged(["-UNDEBUG"])
+        self.assertFlagsUnchanged(["-Dvar1=val1", "-Dvar2=val2"])
         self.assertFlagsUnchanged(['-Dvar="val ues"'])
 
     def test_output_filtered(self):
-        self.assertFlagsFiltered(['-o', 'source.o'])
+        self.assertFlagsFiltered(["-o", "source.o"])
 
     def test_some_warning_filtered(self):
-        self.assertFlagsFiltered(['-Wall'])
-        self.assertFlagsFiltered(['-Wnoexcept'])
-        self.assertFlagsFiltered(['-Wreorder', '-Wunused', '-Wundef'])
-        self.assertFlagsUnchanged(['-Wno-reorder', '-Wno-unused'])
+        self.assertFlagsFiltered(["-Wall"])
+        self.assertFlagsFiltered(["-Wnoexcept"])
+        self.assertFlagsFiltered(["-Wreorder", "-Wunused", "-Wundef"])
+        self.assertFlagsUnchanged(["-Wno-reorder", "-Wno-unused"])
 
     def test_compile_only_flags_pass(self):
-        self.assertFlagsUnchanged(['-std=C99'])
-        self.assertFlagsUnchanged(['-nostdinc'])
-        self.assertFlagsUnchanged(['-isystem', '/image/debian'])
-        self.assertFlagsUnchanged(['-iprefix', '/usr/local'])
-        self.assertFlagsUnchanged(['-iquote=me'])
-        self.assertFlagsUnchanged(['-iquote', 'me'])
+        self.assertFlagsUnchanged(["-std=C99"])
+        self.assertFlagsUnchanged(["-nostdinc"])
+        self.assertFlagsUnchanged(["-isystem", "/image/debian"])
+        self.assertFlagsUnchanged(["-iprefix", "/usr/local"])
+        self.assertFlagsUnchanged(["-iquote=me"])
+        self.assertFlagsUnchanged(["-iquote", "me"])
 
     def test_compile_and_link_flags_pass(self):
-        self.assertFlagsUnchanged(['-fsinged-char'])
-        self.assertFlagsUnchanged(['-fPIC'])
-        self.assertFlagsUnchanged(['-stdlib=libc++'])
-        self.assertFlagsUnchanged(['--sysroot', '/'])
-        self.assertFlagsUnchanged(['-isysroot', '/'])
+        self.assertFlagsUnchanged(["-fsinged-char"])
+        self.assertFlagsUnchanged(["-fPIC"])
+        self.assertFlagsUnchanged(["-stdlib=libc++"])
+        self.assertFlagsUnchanged(["--sysroot", "/"])
+        self.assertFlagsUnchanged(["-isysroot", "/"])
 
     def test_some_flags_filtered(self):
-        self.assertFlagsFiltered(['-g'])
-        self.assertFlagsFiltered(['-fsyntax-only'])
-        self.assertFlagsFiltered(['-save-temps'])
-        self.assertFlagsFiltered(['-init', 'my_init'])
-        self.assertFlagsFiltered(['-sectorder', 'a', 'b', 'c'])
+        self.assertFlagsFiltered(["-g"])
+        self.assertFlagsFiltered(["-fsyntax-only"])
+        self.assertFlagsFiltered(["-save-temps"])
+        self.assertFlagsFiltered(["-init", "my_init"])
+        self.assertFlagsFiltered(["-sectorder", "a", "b", "c"])
 
 
 class Spy(object):
@@ -126,23 +127,22 @@ def call(self, params):
 
 
 class RunAnalyzerTest(unittest.TestCase):
-
     @staticmethod
-    def run_analyzer(content, failures_report, output_format='plist'):
+    def run_analyzer(content, failures_report, output_format="plist"):
         with libear.TemporaryDirectory() as tmpdir:
-            filename = os.path.join(tmpdir, 'test.cpp')
-            with open(filename, 'w') as handle:
+            filename = os.path.join(tmpdir, "test.cpp")
+            with open(filename, "w") as handle:
                 handle.write(content)
 
             opts = {
-                'clang': 'clang',
-                'directory': os.getcwd(),
-                'flags': [],
-                'direct_args': [],
-                'file': filename,
-                'output_dir': tmpdir,
-                'output_format': output_format,
-                'output_failures': failures_report
+                "clang": "clang",
+                "directory": os.getcwd(),
+                "flags": [],
+                "direct_args": [],
+                "file": filename,
+                "output_dir": tmpdir,
+                "output_format": output_format,
+                "output_failures": failures_report,
             }
             spy = Spy()
             result = sut.run_analyzer(opts, spy.call)
@@ -155,57 +155,58 @@ def test_run_analyzer(self):
         content = "int div(int n, int d) { return n / d; }"
         (result, fwds, _) = RunAnalyzerTest.run_analyzer(content, False)
         self.assertEqual(None, fwds)
-        self.assertEqual(0, result['exit_code'])
+        self.assertEqual(0, result["exit_code"])
 
     def test_run_analyzer_crash(self):
         content = "int div(int n, int d) { return n / d }"
         (result, fwds, _) = RunAnalyzerTest.run_analyzer(content, False)
         self.assertEqual(None, fwds)
-        self.assertEqual(1, result['exit_code'])
+        self.assertEqual(1, result["exit_code"])
 
     def test_run_analyzer_crash_and_forwarded(self):
         content = "int div(int n, int d) { return n / d }"
         (_, fwds, _) = RunAnalyzerTest.run_analyzer(content, True)
-        self.assertEqual(1, fwds['exit_code'])
-        self.assertTrue(len(fwds['error_output']) > 0)
+        self.assertEqual(1, fwds["exit_code"])
+        self.assertTrue(len(fwds["error_output"]) > 0)
 
     def test_run_analyzer_with_sarif(self):
         content = "int div(int n, int d) { return n / d; }"
-        (result, fwds, output_files) = RunAnalyzerTest.run_analyzer(content, False, output_format='sarif')
+        (result, fwds, output_files) = RunAnalyzerTest.run_analyzer(
+            content, False, output_format="sarif"
+        )
         self.assertEqual(None, fwds)
-        self.assertEqual(0, result['exit_code'])
+        self.assertEqual(0, result["exit_code"])
 
-        pattern = re.compile(r'^result-.+\.sarif$')
+        pattern = re.compile(r"^result-.+\.sarif$")
         for f in output_files:
             if re.match(pattern, f):
                 return
-        self.fail('no result sarif files found in output')
+        self.fail("no result sarif files found in output")
 
 
 class ReportFailureTest(unittest.TestCase):
-
     def assertUnderFailures(self, path):
-        self.assertEqual('failures', os.path.basename(os.path.dirname(path)))
+        self.assertEqual("failures", os.path.basename(os.path.dirname(path)))
 
     def test_report_failure_create_files(self):
         with libear.TemporaryDirectory() as tmpdir:
             # create input file
-            filename = os.path.join(tmpdir, 'test.c')
-            with open(filename, 'w') as handle:
-                handle.write('int main() { return 0')
-            uname_msg = ' '.join(os.uname()) + os.linesep
-            error_msg = 'this is my error output'
+            filename = os.path.join(tmpdir, "test.c")
+            with open(filename, "w") as handle:
+                handle.write("int main() { return 0")
+            uname_msg = " ".join(os.uname()) + os.linesep
+            error_msg = "this is my error output"
             # execute test
             opts = {
-                'clang': 'clang',
-                'directory': os.getcwd(),
-                'flags': [],
-                'file': filename,
-                'output_dir': tmpdir,
-                'language': 'c',
-                'error_type': 'other_error',
-                'error_output': error_msg,
-                'exit_code': 13
+                "clang": "clang",
+                "directory": os.getcwd(),
+                "flags": [],
+                "file": filename,
+                "output_dir": tmpdir,
+                "language": "c",
+                "error_type": "other_error",
+                "error_output": error_msg,
+                "exit_code": 13,
             }
             sut.report_failure(opts)
             # verify the result
@@ -214,105 +215,98 @@ def test_report_failure_create_files(self):
             for root, _, files in os.walk(tmpdir):
                 keys = [os.path.join(root, name) for name in files]
                 for key in keys:
-                    with open(key, 'r') as handle:
+                    with open(key, "r") as handle:
                         result[key] = handle.readlines()
-                    if re.match(r'^(.*/)+clang(.*)\.i$', key):
+                    if re.match(r"^(.*/)+clang(.*)\.i$", key):
                         pp_file = key
 
             # prepocessor file generated
             self.assertUnderFailures(pp_file)
             # info file generated and content dumped
-            info_file = pp_file + '.info.txt'
+            info_file = pp_file + ".info.txt"
             self.assertTrue(info_file in result)
-            self.assertEqual('Other Error\n', result[info_file][1])
+            self.assertEqual("Other Error\n", result[info_file][1])
             self.assertEqual(uname_msg, result[info_file][3])
             # error file generated and content dumped
-            error_file = pp_file + '.stderr.txt'
+            error_file = pp_file + ".stderr.txt"
             self.assertTrue(error_file in result)
             self.assertEqual([error_msg], result[error_file])
 
 
 class AnalyzerTest(unittest.TestCase):
-
     def test_nodebug_macros_appended(self):
         def test(flags):
             spy = Spy()
-            opts = {'flags': flags, 'force_debug': True}
-            self.assertEqual(spy.success,
-                             sut.filter_debug_flags(opts, spy.call))
-            return spy.arg['flags']
+            opts = {"flags": flags, "force_debug": True}
+            self.assertEqual(spy.success, sut.filter_debug_flags(opts, spy.call))
+            return spy.arg["flags"]
 
-        self.assertEqual(['-UNDEBUG'], test([]))
-        self.assertEqual(['-DNDEBUG', '-UNDEBUG'], test(['-DNDEBUG']))
-        self.assertEqual(['-DSomething', '-UNDEBUG'], test(['-DSomething']))
+        self.assertEqual(["-UNDEBUG"], test([]))
+        self.assertEqual(["-DNDEBUG", "-UNDEBUG"], test(["-DNDEBUG"]))
+        self.assertEqual(["-DSomething", "-UNDEBUG"], test(["-DSomething"]))
 
     def test_set_language_fall_through(self):
         def language(expected, input):
             spy = Spy()
-            input.update({'compiler': 'c', 'file': 'test.c'})
+            input.update({"compiler": "c", "file": "test.c"})
             self.assertEqual(spy.success, sut.language_check(input, spy.call))
-            self.assertEqual(expected, spy.arg['language'])
+            self.assertEqual(expected, spy.arg["language"])
 
-        language('c',   {'language': 'c', 'flags': []})
-        language('c++', {'language': 'c++', 'flags': []})
+        language("c", {"language": "c", "flags": []})
+        language("c++", {"language": "c++", "flags": []})
 
     def test_set_language_stops_on_not_supported(self):
         spy = Spy()
-        input = {
-            'compiler': 'c',
-            'flags': [],
-            'file': 'test.java',
-            'language': 'java'
-        }
+        input = {"compiler": "c", "flags": [], "file": "test.java", "language": "java"}
         self.assertIsNone(sut.language_check(input, spy.call))
         self.assertIsNone(spy.arg)
 
     def test_set_language_sets_flags(self):
         def flags(expected, input):
             spy = Spy()
-            input.update({'compiler': 'c', 'file': 'test.c'})
+            input.update({"compiler": "c", "file": "test.c"})
             self.assertEqual(spy.success, sut.language_check(input, spy.call))
-            self.assertEqual(expected, spy.arg['flags'])
+            self.assertEqual(expected, spy.arg["flags"])
 
-        flags(['-x', 'c'],   {'language': 'c', 'flags': []})
-        flags(['-x', 'c++'], {'language': 'c++', 'flags': []})
+        flags(["-x", "c"], {"language": "c", "flags": []})
+        flags(["-x", "c++"], {"language": "c++", "flags": []})
 
     def test_set_language_from_filename(self):
         def language(expected, input):
             spy = Spy()
-            input.update({'language': None, 'flags': []})
+            input.update({"language": None, "flags": []})
             self.assertEqual(spy.success, sut.language_check(input, spy.call))
-            self.assertEqual(expected, spy.arg['language'])
+            self.assertEqual(expected, spy.arg["language"])
 
-        language('c',   {'file': 'file.c',   'compiler': 'c'})
-        language('c++', {'file': 'file.c',   'compiler': 'c++'})
-        language('c++', {'file': 'file.cxx', 'compiler': 'c'})
-        language('c++', {'file': 'file.cxx', 'compiler': 'c++'})
-        language('c++', {'file': 'file.cpp', 'compiler': 'c++'})
-        language('c-cpp-output',   {'file': 'file.i', 'compiler': 'c'})
-        language('c++-cpp-output', {'file': 'file.i', 'compiler': 'c++'})
+        language("c", {"file": "file.c", "compiler": "c"})
+        language("c++", {"file": "file.c", "compiler": "c++"})
+        language("c++", {"file": "file.cxx", "compiler": "c"})
+        language("c++", {"file": "file.cxx", "compiler": "c++"})
+        language("c++", {"file": "file.cpp", "compiler": "c++"})
+        language("c-cpp-output", {"file": "file.i", "compiler": "c"})
+        language("c++-cpp-output", {"file": "file.i", "compiler": "c++"})
 
     def test_arch_loop_sets_flags(self):
         def flags(archs):
             spy = Spy()
-            input = {'flags': [], 'arch_list': archs}
+            input = {"flags": [], "arch_list": archs}
             sut.arch_check(input, spy.call)
-            return spy.arg['flags']
+            return spy.arg["flags"]
 
         self.assertEqual([], flags([]))
-        self.assertEqual(['-arch', 'i386'], flags(['i386']))
-        self.assertEqual(['-arch', 'i386'], flags(['i386', 'ppc']))
-        self.assertEqual(['-arch', 'sparc'], flags(['i386', 'sparc']))
+        self.assertEqual(["-arch", "i386"], flags(["i386"]))
+        self.assertEqual(["-arch", "i386"], flags(["i386", "ppc"]))
+        self.assertEqual(["-arch", "sparc"], flags(["i386", "sparc"]))
 
     def test_arch_loop_stops_on_not_supported(self):
         def stop(archs):
             spy = Spy()
-            input = {'flags': [], 'arch_list': archs}
+            input = {"flags": [], "arch_list": archs}
             self.assertIsNone(sut.arch_check(input, spy.call))
             self.assertIsNone(spy.arg)
 
-        stop(['ppc'])
-        stop(['ppc64'])
+        stop(["ppc"])
+        stop(["ppc64"])
 
 
 @sut.require([])
@@ -320,37 +314,35 @@ def method_without_expecteds(opts):
     return 0
 
 
- at sut.require(['this', 'that'])
+ at sut.require(["this", "that"])
 def method_with_expecteds(opts):
     return 0
 
 
 @sut.require([])
 def method_exception_from_inside(opts):
-    raise Exception('here is one')
+    raise Exception("here is one")
 
 
 class RequireDecoratorTest(unittest.TestCase):
-
     def test_method_without_expecteds(self):
         self.assertEqual(method_without_expecteds(dict()), 0)
         self.assertEqual(method_without_expecteds({}), 0)
-        self.assertEqual(method_without_expecteds({'this': 2}), 0)
-        self.assertEqual(method_without_expecteds({'that': 3}), 0)
+        self.assertEqual(method_without_expecteds({"this": 2}), 0)
+        self.assertEqual(method_without_expecteds({"that": 3}), 0)
 
     def test_method_with_expecteds(self):
         self.assertRaises(KeyError, method_with_expecteds, dict())
         self.assertRaises(KeyError, method_with_expecteds, {})
-        self.assertRaises(KeyError, method_with_expecteds, {'this': 2})
-        self.assertRaises(KeyError, method_with_expecteds, {'that': 3})
-        self.assertEqual(method_with_expecteds({'this': 0, 'that': 3}), 0)
+        self.assertRaises(KeyError, method_with_expecteds, {"this": 2})
+        self.assertRaises(KeyError, method_with_expecteds, {"that": 3})
+        self.assertEqual(method_with_expecteds({"this": 0, "that": 3}), 0)
 
     def test_method_exception_not_caught(self):
         self.assertRaises(Exception, method_exception_from_inside, dict())
 
 
 class PrefixWithTest(unittest.TestCase):
-
     def test_gives_empty_on_empty(self):
         res = sut.prefix_with(0, [])
         self.assertFalse(res)
@@ -361,69 +353,75 @@ def test_interleaves_prefix(self):
 
 
 class MergeCtuMapTest(unittest.TestCase):
-
     def test_no_map_gives_empty(self):
         pairs = sut.create_global_ctu_extdef_map([])
         self.assertFalse(pairs)
 
     def test_multiple_maps_merged(self):
-        concat_map = ['c:@F at fun1#I# ast/fun1.c.ast',
-                      'c:@F at fun2#I# ast/fun2.c.ast',
-                      'c:@F at fun3#I# ast/fun3.c.ast']
+        concat_map = [
+            "c:@F at fun1#I# ast/fun1.c.ast",
+            "c:@F at fun2#I# ast/fun2.c.ast",
+            "c:@F at fun3#I# ast/fun3.c.ast",
+        ]
         pairs = sut.create_global_ctu_extdef_map(concat_map)
-        self.assertTrue(('c:@F at fun1#I#', 'ast/fun1.c.ast') in pairs)
-        self.assertTrue(('c:@F at fun2#I#', 'ast/fun2.c.ast') in pairs)
-        self.assertTrue(('c:@F at fun3#I#', 'ast/fun3.c.ast') in pairs)
+        self.assertTrue(("c:@F at fun1#I#", "ast/fun1.c.ast") in pairs)
+        self.assertTrue(("c:@F at fun2#I#", "ast/fun2.c.ast") in pairs)
+        self.assertTrue(("c:@F at fun3#I#", "ast/fun3.c.ast") in pairs)
         self.assertEqual(3, len(pairs))
 
     def test_not_unique_func_left_out(self):
-        concat_map = ['c:@F at fun1#I# ast/fun1.c.ast',
-                      'c:@F at fun2#I# ast/fun2.c.ast',
-                      'c:@F at fun1#I# ast/fun7.c.ast']
+        concat_map = [
+            "c:@F at fun1#I# ast/fun1.c.ast",
+            "c:@F at fun2#I# ast/fun2.c.ast",
+            "c:@F at fun1#I# ast/fun7.c.ast",
+        ]
         pairs = sut.create_global_ctu_extdef_map(concat_map)
-        self.assertFalse(('c:@F at fun1#I#', 'ast/fun1.c.ast') in pairs)
-        self.assertFalse(('c:@F at fun1#I#', 'ast/fun7.c.ast') in pairs)
-        self.assertTrue(('c:@F at fun2#I#', 'ast/fun2.c.ast') in pairs)
+        self.assertFalse(("c:@F at fun1#I#", "ast/fun1.c.ast") in pairs)
+        self.assertFalse(("c:@F at fun1#I#", "ast/fun7.c.ast") in pairs)
+        self.assertTrue(("c:@F at fun2#I#", "ast/fun2.c.ast") in pairs)
         self.assertEqual(1, len(pairs))
 
     def test_duplicates_are_kept(self):
-        concat_map = ['c:@F at fun1#I# ast/fun1.c.ast',
-                      'c:@F at fun2#I# ast/fun2.c.ast',
-                      'c:@F at fun1#I# ast/fun1.c.ast']
+        concat_map = [
+            "c:@F at fun1#I# ast/fun1.c.ast",
+            "c:@F at fun2#I# ast/fun2.c.ast",
+            "c:@F at fun1#I# ast/fun1.c.ast",
+        ]
         pairs = sut.create_global_ctu_extdef_map(concat_map)
-        self.assertTrue(('c:@F at fun1#I#', 'ast/fun1.c.ast') in pairs)
-        self.assertTrue(('c:@F at fun2#I#', 'ast/fun2.c.ast') in pairs)
+        self.assertTrue(("c:@F at fun1#I#", "ast/fun1.c.ast") in pairs)
+        self.assertTrue(("c:@F at fun2#I#", "ast/fun2.c.ast") in pairs)
         self.assertEqual(2, len(pairs))
 
     def test_space_handled_in_source(self):
-        concat_map = ['c:@F at fun1#I# ast/f un.c.ast']
+        concat_map = ["c:@F at fun1#I# ast/f un.c.ast"]
         pairs = sut.create_global_ctu_extdef_map(concat_map)
-        self.assertTrue(('c:@F at fun1#I#', 'ast/f un.c.ast') in pairs)
+        self.assertTrue(("c:@F at fun1#I#", "ast/f un.c.ast") in pairs)
         self.assertEqual(1, len(pairs))
 
 
 class ExtdefMapSrcToAstTest(unittest.TestCase):
-
     def test_empty_gives_empty(self):
         fun_ast_lst = sut.extdef_map_list_src_to_ast([])
         self.assertFalse(fun_ast_lst)
 
     def test_sources_to_asts(self):
-        fun_src_lst = ['c:@F at f1#I# ' + os.path.join(os.sep + 'path', 'f1.c'),
-                       'c:@F at f2#I# ' + os.path.join(os.sep + 'path', 'f2.c')]
+        fun_src_lst = [
+            "c:@F at f1#I# " + os.path.join(os.sep + "path", "f1.c"),
+            "c:@F at f2#I# " + os.path.join(os.sep + "path", "f2.c"),
+        ]
         fun_ast_lst = sut.extdef_map_list_src_to_ast(fun_src_lst)
-        self.assertTrue('c:@F at f1#I# ' +
-                        os.path.join('ast', 'path', 'f1.c.ast')
-                        in fun_ast_lst)
-        self.assertTrue('c:@F at f2#I# ' +
-                        os.path.join('ast', 'path', 'f2.c.ast')
-                        in fun_ast_lst)
+        self.assertTrue(
+            "c:@F at f1#I# " + os.path.join("ast", "path", "f1.c.ast") in fun_ast_lst
+        )
+        self.assertTrue(
+            "c:@F at f2#I# " + os.path.join("ast", "path", "f2.c.ast") in fun_ast_lst
+        )
         self.assertEqual(2, len(fun_ast_lst))
 
     def test_spaces_handled(self):
-        fun_src_lst = ['c:@F at f1#I# ' + os.path.join(os.sep + 'path', 'f 1.c')]
+        fun_src_lst = ["c:@F at f1#I# " + os.path.join(os.sep + "path", "f 1.c")]
         fun_ast_lst = sut.extdef_map_list_src_to_ast(fun_src_lst)
-        self.assertTrue('c:@F at f1#I# ' +
-                        os.path.join('ast', 'path', 'f 1.c.ast')
-                        in fun_ast_lst)
+        self.assertTrue(
+            "c:@F at f1#I# " + os.path.join("ast", "path", "f 1.c.ast") in fun_ast_lst
+        )
         self.assertEqual(1, len(fun_ast_lst))

diff  --git a/clang/tools/scan-build-py/tests/unit/test_clang.py b/clang/tools/scan-build-py/tests/unit/test_clang.py
index 80ce61a1fab17..79ff968650abe 100644
--- a/clang/tools/scan-build-py/tests/unit/test_clang.py
+++ b/clang/tools/scan-build-py/tests/unit/test_clang.py
@@ -12,40 +12,40 @@
 
 class ClangGetVersion(unittest.TestCase):
     def test_get_version_is_not_empty(self):
-        self.assertTrue(sut.get_version('clang'))
+        self.assertTrue(sut.get_version("clang"))
 
     def test_get_version_throws(self):
         with self.assertRaises(OSError):
-            sut.get_version('notexists')
+            sut.get_version("notexists")
 
 
 class ClangGetArgumentsTest(unittest.TestCase):
     def test_get_clang_arguments(self):
         with libear.TemporaryDirectory() as tmpdir:
-            filename = os.path.join(tmpdir, 'test.c')
-            with open(filename, 'w') as handle:
-                handle.write('')
+            filename = os.path.join(tmpdir, "test.c")
+            with open(filename, "w") as handle:
+                handle.write("")
 
             result = sut.get_arguments(
-                ['clang', '-c', filename, '-DNDEBUG', '-Dvar="this is it"'],
-                tmpdir)
+                ["clang", "-c", filename, "-DNDEBUG", '-Dvar="this is it"'], tmpdir
+            )
 
-            self.assertTrue('NDEBUG' in result)
+            self.assertTrue("NDEBUG" in result)
             self.assertTrue('var="this is it"' in result)
 
     def test_get_clang_arguments_fails(self):
         with self.assertRaises(Exception):
-            sut.get_arguments(['clang', '-x', 'c', 'notexist.c'], '.')
+            sut.get_arguments(["clang", "-x", "c", "notexist.c"], ".")
 
     def test_get_clang_arguments_fails_badly(self):
         with self.assertRaises(OSError):
-            sut.get_arguments(['notexist'], '.')
+            sut.get_arguments(["notexist"], ".")
 
 
 class ClangGetCheckersTest(unittest.TestCase):
     def test_get_checkers(self):
         # this test is only to see is not crashing
-        result = sut.get_checkers('clang', [])
+        result = sut.get_checkers("clang", [])
         self.assertTrue(len(result))
         # do check result types
         string_type = unicode if sys.version_info < (3,) else str
@@ -56,50 +56,51 @@ def test_get_checkers(self):
 
     def test_get_active_checkers(self):
         # this test is only to see is not crashing
-        result = sut.get_active_checkers('clang', [])
+        result = sut.get_active_checkers("clang", [])
         self.assertTrue(len(result))
         # do check result types
         for value in result:
             self.assertEqual(str, type(value))
 
     def test_is_active(self):
-        test = sut.is_active(['a', 'b.b', 'c.c.c'])
+        test = sut.is_active(["a", "b.b", "c.c.c"])
 
-        self.assertTrue(test('a'))
-        self.assertTrue(test('a.b'))
-        self.assertTrue(test('b.b'))
-        self.assertTrue(test('b.b.c'))
-        self.assertTrue(test('c.c.c.p'))
+        self.assertTrue(test("a"))
+        self.assertTrue(test("a.b"))
+        self.assertTrue(test("b.b"))
+        self.assertTrue(test("b.b.c"))
+        self.assertTrue(test("c.c.c.p"))
 
-        self.assertFalse(test('ab'))
-        self.assertFalse(test('ba'))
-        self.assertFalse(test('bb'))
-        self.assertFalse(test('c.c'))
-        self.assertFalse(test('b'))
-        self.assertFalse(test('d'))
+        self.assertFalse(test("ab"))
+        self.assertFalse(test("ba"))
+        self.assertFalse(test("bb"))
+        self.assertFalse(test("c.c"))
+        self.assertFalse(test("b"))
+        self.assertFalse(test("d"))
 
     def test_parse_checkers(self):
         lines = [
-            'OVERVIEW: Clang Static Analyzer Checkers List',
-            '',
-            'CHECKERS:',
-            '  checker.one       Checker One description',
-            '  checker.two',
-            '                    Checker Two description']
+            "OVERVIEW: Clang Static Analyzer Checkers List",
+            "",
+            "CHECKERS:",
+            "  checker.one       Checker One description",
+            "  checker.two",
+            "                    Checker Two description",
+        ]
         result = dict(sut.parse_checkers(lines))
-        self.assertTrue('checker.one' in result)
-        self.assertEqual('Checker One description', result.get('checker.one'))
-        self.assertTrue('checker.two' in result)
-        self.assertEqual('Checker Two description', result.get('checker.two'))
+        self.assertTrue("checker.one" in result)
+        self.assertEqual("Checker One description", result.get("checker.one"))
+        self.assertTrue("checker.two" in result)
+        self.assertEqual("Checker Two description", result.get("checker.two"))
 
 
 class ClangIsCtuCapableTest(unittest.TestCase):
     def test_ctu_not_found(self):
-        is_ctu = sut.is_ctu_capable('not-found-clang-extdef-mapping')
+        is_ctu = sut.is_ctu_capable("not-found-clang-extdef-mapping")
         self.assertFalse(is_ctu)
 
 
 class ClangGetTripleArchTest(unittest.TestCase):
     def test_arch_is_not_empty(self):
-        arch = sut.get_triple_arch(['clang', '-E', '-'], '.')
+        arch = sut.get_triple_arch(["clang", "-E", "-"], ".")
         self.assertTrue(len(arch) > 0)

diff  --git a/clang/tools/scan-build-py/tests/unit/test_compilation.py b/clang/tools/scan-build-py/tests/unit/test_compilation.py
index e8ad3d8c99ffb..eb29900d82b1f 100644
--- a/clang/tools/scan-build-py/tests/unit/test_compilation.py
+++ b/clang/tools/scan-build-py/tests/unit/test_compilation.py
@@ -8,82 +8,77 @@
 
 
 class CompilerTest(unittest.TestCase):
-
     def test_is_compiler_call(self):
-        self.assertIsNotNone(sut.compiler_language(['clang']))
-        self.assertIsNotNone(sut.compiler_language(['clang-3.6']))
-        self.assertIsNotNone(sut.compiler_language(['clang++']))
-        self.assertIsNotNone(sut.compiler_language(['clang++-3.5.1']))
-        self.assertIsNotNone(sut.compiler_language(['cc']))
-        self.assertIsNotNone(sut.compiler_language(['c++']))
-        self.assertIsNotNone(sut.compiler_language(['gcc']))
-        self.assertIsNotNone(sut.compiler_language(['g++']))
-        self.assertIsNotNone(sut.compiler_language(['/usr/local/bin/gcc']))
-        self.assertIsNotNone(sut.compiler_language(['/usr/local/bin/g++']))
-        self.assertIsNotNone(sut.compiler_language(['/usr/local/bin/clang']))
-        self.assertIsNotNone(
-            sut.compiler_language(['armv7_neno-linux-gnueabi-g++']))
+        self.assertIsNotNone(sut.compiler_language(["clang"]))
+        self.assertIsNotNone(sut.compiler_language(["clang-3.6"]))
+        self.assertIsNotNone(sut.compiler_language(["clang++"]))
+        self.assertIsNotNone(sut.compiler_language(["clang++-3.5.1"]))
+        self.assertIsNotNone(sut.compiler_language(["cc"]))
+        self.assertIsNotNone(sut.compiler_language(["c++"]))
+        self.assertIsNotNone(sut.compiler_language(["gcc"]))
+        self.assertIsNotNone(sut.compiler_language(["g++"]))
+        self.assertIsNotNone(sut.compiler_language(["/usr/local/bin/gcc"]))
+        self.assertIsNotNone(sut.compiler_language(["/usr/local/bin/g++"]))
+        self.assertIsNotNone(sut.compiler_language(["/usr/local/bin/clang"]))
+        self.assertIsNotNone(sut.compiler_language(["armv7_neno-linux-gnueabi-g++"]))
 
         self.assertIsNone(sut.compiler_language([]))
-        self.assertIsNone(sut.compiler_language(['']))
-        self.assertIsNone(sut.compiler_language(['ld']))
-        self.assertIsNone(sut.compiler_language(['as']))
-        self.assertIsNone(sut.compiler_language(['/usr/local/bin/compiler']))
+        self.assertIsNone(sut.compiler_language([""]))
+        self.assertIsNone(sut.compiler_language(["ld"]))
+        self.assertIsNone(sut.compiler_language(["as"]))
+        self.assertIsNone(sut.compiler_language(["/usr/local/bin/compiler"]))
 
 
 class SplitTest(unittest.TestCase):
-
     def test_detect_cxx_from_compiler_name(self):
         def test(cmd):
-            result = sut.split_command([cmd, '-c', 'src.c'])
+            result = sut.split_command([cmd, "-c", "src.c"])
             self.assertIsNotNone(result, "wrong input for test")
-            return result.compiler == 'c++'
+            return result.compiler == "c++"
 
-        self.assertFalse(test('cc'))
-        self.assertFalse(test('gcc'))
-        self.assertFalse(test('clang'))
+        self.assertFalse(test("cc"))
+        self.assertFalse(test("gcc"))
+        self.assertFalse(test("clang"))
 
-        self.assertTrue(test('c++'))
-        self.assertTrue(test('g++'))
-        self.assertTrue(test('g++-5.3.1'))
-        self.assertTrue(test('clang++'))
-        self.assertTrue(test('clang++-3.7.1'))
-        self.assertTrue(test('armv7_neno-linux-gnueabi-g++'))
+        self.assertTrue(test("c++"))
+        self.assertTrue(test("g++"))
+        self.assertTrue(test("g++-5.3.1"))
+        self.assertTrue(test("clang++"))
+        self.assertTrue(test("clang++-3.7.1"))
+        self.assertTrue(test("armv7_neno-linux-gnueabi-g++"))
 
     def test_action(self):
-        self.assertIsNotNone(sut.split_command(['clang', 'source.c']))
-        self.assertIsNotNone(sut.split_command(['clang', '-c', 'source.c']))
-        self.assertIsNotNone(sut.split_command(['clang', '-c', 'source.c',
-                                                '-MF', 'a.d']))
+        self.assertIsNotNone(sut.split_command(["clang", "source.c"]))
+        self.assertIsNotNone(sut.split_command(["clang", "-c", "source.c"]))
+        self.assertIsNotNone(
+            sut.split_command(["clang", "-c", "source.c", "-MF", "a.d"])
+        )
 
-        self.assertIsNone(sut.split_command(['clang', '-E', 'source.c']))
-        self.assertIsNone(sut.split_command(['clang', '-c', '-E', 'source.c']))
-        self.assertIsNone(sut.split_command(['clang', '-c', '-M', 'source.c']))
-        self.assertIsNone(
-            sut.split_command(['clang', '-c', '-MM', 'source.c']))
+        self.assertIsNone(sut.split_command(["clang", "-E", "source.c"]))
+        self.assertIsNone(sut.split_command(["clang", "-c", "-E", "source.c"]))
+        self.assertIsNone(sut.split_command(["clang", "-c", "-M", "source.c"]))
+        self.assertIsNone(sut.split_command(["clang", "-c", "-MM", "source.c"]))
 
     def test_source_file(self):
         def test(expected, cmd):
             self.assertEqual(expected, sut.split_command(cmd).files)
 
-        test(['src.c'], ['clang', 'src.c'])
-        test(['src.c'], ['clang', '-c', 'src.c'])
-        test(['src.C'], ['clang', '-x', 'c', 'src.C'])
-        test(['src.cpp'], ['clang++', '-c', 'src.cpp'])
-        test(['s1.c', 's2.c'], ['clang', '-c', 's1.c', 's2.c'])
-        test(['s1.c', 's2.c'], ['cc', 's1.c', 's2.c', '-ldep', '-o', 'a.out'])
-        test(['src.c'], ['clang', '-c', '-I', './include', 'src.c'])
-        test(['src.c'], ['clang', '-c', '-I', '/opt/me/include', 'src.c'])
-        test(['src.c'], ['clang', '-c', '-D', 'config=file.c', 'src.c'])
-
-        self.assertIsNone(
-            sut.split_command(['cc', 'this.o', 'that.o', '-o', 'a.out']))
-        self.assertIsNone(
-            sut.split_command(['cc', 'this.o', '-lthat', '-o', 'a.out']))
+        test(["src.c"], ["clang", "src.c"])
+        test(["src.c"], ["clang", "-c", "src.c"])
+        test(["src.C"], ["clang", "-x", "c", "src.C"])
+        test(["src.cpp"], ["clang++", "-c", "src.cpp"])
+        test(["s1.c", "s2.c"], ["clang", "-c", "s1.c", "s2.c"])
+        test(["s1.c", "s2.c"], ["cc", "s1.c", "s2.c", "-ldep", "-o", "a.out"])
+        test(["src.c"], ["clang", "-c", "-I", "./include", "src.c"])
+        test(["src.c"], ["clang", "-c", "-I", "/opt/me/include", "src.c"])
+        test(["src.c"], ["clang", "-c", "-D", "config=file.c", "src.c"])
+
+        self.assertIsNone(sut.split_command(["cc", "this.o", "that.o", "-o", "a.out"]))
+        self.assertIsNone(sut.split_command(["cc", "this.o", "-lthat", "-o", "a.out"]))
 
     def test_filter_flags(self):
         def test(expected, flags):
-            command = ['clang', '-c', 'src.c'] + flags
+            command = ["clang", "-c", "src.c"] + flags
             self.assertEqual(expected, sut.split_command(command).flags)
 
         def same(expected):
@@ -93,29 +88,28 @@ def filtered(flags):
             test([], flags)
 
         same([])
-        same(['-I', '/opt/me/include', '-DNDEBUG', '-ULIMITS'])
-        same(['-O', '-O2'])
-        same(['-m32', '-mmms'])
-        same(['-Wall', '-Wno-unused', '-g', '-funroll-loops'])
+        same(["-I", "/opt/me/include", "-DNDEBUG", "-ULIMITS"])
+        same(["-O", "-O2"])
+        same(["-m32", "-mmms"])
+        same(["-Wall", "-Wno-unused", "-g", "-funroll-loops"])
 
         filtered([])
-        filtered(['-lclien', '-L/opt/me/lib', '-L', '/opt/you/lib'])
-        filtered(['-static'])
-        filtered(['-MD', '-MT', 'something'])
-        filtered(['-MMD', '-MF', 'something'])
+        filtered(["-lclien", "-L/opt/me/lib", "-L", "/opt/you/lib"])
+        filtered(["-static"])
+        filtered(["-MD", "-MT", "something"])
+        filtered(["-MMD", "-MF", "something"])
 
 
 class SourceClassifierTest(unittest.TestCase):
-
     def test_sources(self):
-        self.assertIsNone(sut.classify_source('file.o'))
-        self.assertIsNone(sut.classify_source('file.exe'))
-        self.assertIsNone(sut.classify_source('/path/file.o'))
-        self.assertIsNone(sut.classify_source('clang'))
-
-        self.assertEqual('c', sut.classify_source('file.c'))
-        self.assertEqual('c', sut.classify_source('./file.c'))
-        self.assertEqual('c', sut.classify_source('/path/file.c'))
-        self.assertEqual('c++', sut.classify_source('file.c', False))
-        self.assertEqual('c++', sut.classify_source('./file.c', False))
-        self.assertEqual('c++', sut.classify_source('/path/file.c', False))
+        self.assertIsNone(sut.classify_source("file.o"))
+        self.assertIsNone(sut.classify_source("file.exe"))
+        self.assertIsNone(sut.classify_source("/path/file.o"))
+        self.assertIsNone(sut.classify_source("clang"))
+
+        self.assertEqual("c", sut.classify_source("file.c"))
+        self.assertEqual("c", sut.classify_source("./file.c"))
+        self.assertEqual("c", sut.classify_source("/path/file.c"))
+        self.assertEqual("c++", sut.classify_source("file.c", False))
+        self.assertEqual("c++", sut.classify_source("./file.c", False))
+        self.assertEqual("c++", sut.classify_source("/path/file.c", False))

diff  --git a/clang/tools/scan-build-py/tests/unit/test_intercept.py b/clang/tools/scan-build-py/tests/unit/test_intercept.py
index 5473b88d833cc..5f930814b74f4 100644
--- a/clang/tools/scan-build-py/tests/unit/test_intercept.py
+++ b/clang/tools/scan-build-py/tests/unit/test_intercept.py
@@ -10,30 +10,30 @@
 
 
 class InterceptUtilTest(unittest.TestCase):
-
     def test_format_entry_filters_action(self):
         def test(command):
-            trace = {'command': command, 'directory': '/opt/src/project'}
+            trace = {"command": command, "directory": "/opt/src/project"}
             return list(sut.format_entry(trace))
 
-        self.assertTrue(test(['cc', '-c', 'file.c', '-o', 'file.o']))
-        self.assertFalse(test(['cc', '-E', 'file.c']))
-        self.assertFalse(test(['cc', '-MM', 'file.c']))
-        self.assertFalse(test(['cc', 'this.o', 'that.o', '-o', 'a.out']))
+        self.assertTrue(test(["cc", "-c", "file.c", "-o", "file.o"]))
+        self.assertFalse(test(["cc", "-E", "file.c"]))
+        self.assertFalse(test(["cc", "-MM", "file.c"]))
+        self.assertFalse(test(["cc", "this.o", "that.o", "-o", "a.out"]))
 
     def test_format_entry_normalize_filename(self):
-        parent = os.path.join(os.sep, 'home', 'me')
-        current = os.path.join(parent, 'project')
+        parent = os.path.join(os.sep, "home", "me")
+        current = os.path.join(parent, "project")
 
         def test(filename):
-            trace = {'directory': current, 'command': ['cc', '-c', filename]}
-            return list(sut.format_entry(trace))[0]['file']
+            trace = {"directory": current, "command": ["cc", "-c", filename]}
+            return list(sut.format_entry(trace))[0]["file"]
 
-        self.assertEqual(os.path.join(current, 'file.c'), test('file.c'))
-        self.assertEqual(os.path.join(current, 'file.c'), test('./file.c'))
-        self.assertEqual(os.path.join(parent, 'file.c'), test('../file.c'))
-        self.assertEqual(os.path.join(current, 'file.c'),
-                         test(os.path.join(current, 'file.c')))
+        self.assertEqual(os.path.join(current, "file.c"), test("file.c"))
+        self.assertEqual(os.path.join(current, "file.c"), test("./file.c"))
+        self.assertEqual(os.path.join(parent, "file.c"), test("../file.c"))
+        self.assertEqual(
+            os.path.join(current, "file.c"), test(os.path.join(current, "file.c"))
+        )
 
     def test_sip(self):
         def create_status_report(filename, message):
@@ -43,32 +43,34 @@ def create_status_report(filename, message):
                          echo '{0}'
                          echo 'sa-la-la-la'
                          echo 'la-la-la'
-                      """.format(message)
-            lines = [line.strip() for line in content.split('\n')]
-            with open(filename, 'w') as handle:
-                handle.write('\n'.join(lines))
+                      """.format(
+                message
+            )
+            lines = [line.strip() for line in content.split("\n")]
+            with open(filename, "w") as handle:
+                handle.write("\n".join(lines))
                 handle.close()
-            os.chmod(filename, 0x1ff)
+            os.chmod(filename, 0x1FF)
 
         def create_csrutil(dest_dir, status):
-            filename = os.path.join(dest_dir, 'csrutil')
-            message = 'System Integrity Protection status: {0}'.format(status)
+            filename = os.path.join(dest_dir, "csrutil")
+            message = "System Integrity Protection status: {0}".format(status)
             return create_status_report(filename, message)
 
         def create_sestatus(dest_dir, status):
-            filename = os.path.join(dest_dir, 'sestatus')
-            message = 'SELinux status:\t{0}'.format(status)
+            filename = os.path.join(dest_dir, "sestatus")
+            message = "SELinux status:\t{0}".format(status)
             return create_status_report(filename, message)
 
-        ENABLED = 'enabled'
-        DISABLED = 'disabled'
+        ENABLED = "enabled"
+        DISABLED = "disabled"
 
-        OSX = 'darwin'
+        OSX = "darwin"
 
         with libear.TemporaryDirectory() as tmpdir:
-            saved = os.environ['PATH']
+            saved = os.environ["PATH"]
             try:
-                os.environ['PATH'] = tmpdir + ':' + saved
+                os.environ["PATH"] = tmpdir + ":" + saved
 
                 create_csrutil(tmpdir, ENABLED)
                 self.assertTrue(sut.is_preload_disabled(OSX))
@@ -76,14 +78,14 @@ def create_sestatus(dest_dir, status):
                 create_csrutil(tmpdir, DISABLED)
                 self.assertFalse(sut.is_preload_disabled(OSX))
             finally:
-                os.environ['PATH'] = saved
+                os.environ["PATH"] = saved
 
-        saved = os.environ['PATH']
+        saved = os.environ["PATH"]
         try:
-            os.environ['PATH'] = ''
+            os.environ["PATH"] = ""
             # shall be false when it's not in the path
             self.assertFalse(sut.is_preload_disabled(OSX))
 
-            self.assertFalse(sut.is_preload_disabled('unix'))
+            self.assertFalse(sut.is_preload_disabled("unix"))
         finally:
-            os.environ['PATH'] = saved
+            os.environ["PATH"] = saved

diff  --git a/clang/tools/scan-build-py/tests/unit/test_libear.py b/clang/tools/scan-build-py/tests/unit/test_libear.py
index 933da50242ff4..22e8c2afb9d9a 100644
--- a/clang/tools/scan-build-py/tests/unit/test_libear.py
+++ b/clang/tools/scan-build-py/tests/unit/test_libear.py
@@ -23,7 +23,7 @@ def test_removes_directory_when_exception(self):
             with sut.TemporaryDirectory() as tmpdir:
                 self.assertTrue(os.path.isdir(tmpdir))
                 dirname = tmpdir
-                raise RuntimeError('message')
+                raise RuntimeError("message")
         except:
             self.assertIsNotNone(dirname)
             self.assertFalse(os.path.exists(dirname))

diff  --git a/clang/tools/scan-build-py/tests/unit/test_report.py b/clang/tools/scan-build-py/tests/unit/test_report.py
index 57f0331c46219..4d85590a21122 100644
--- a/clang/tools/scan-build-py/tests/unit/test_report.py
+++ b/clang/tools/scan-build-py/tests/unit/test_report.py
@@ -13,8 +13,8 @@
 
 def run_bug_parse(content):
     with libear.TemporaryDirectory() as tmpdir:
-        file_name = os.path.join(tmpdir, 'test.html')
-        with open(file_name, 'w') as handle:
+        file_name = os.path.join(tmpdir, "test.html")
+        with open(file_name, "w") as handle:
             handle.writelines(content)
         for bug in sut.parse_bug_html(file_name):
             return bug
@@ -22,14 +22,13 @@ def run_bug_parse(content):
 
 def run_crash_parse(content, preproc):
     with libear.TemporaryDirectory() as tmpdir:
-        file_name = os.path.join(tmpdir, preproc + '.info.txt')
-        with open(file_name, 'w') as handle:
+        file_name = os.path.join(tmpdir, preproc + ".info.txt")
+        with open(file_name, "w") as handle:
             handle.writelines(content)
         return sut.parse_crash(file_name)
 
 
 class ParseFileTest(unittest.TestCase):
-
     def test_parse_bug(self):
         content = [
             "some header\n",
@@ -42,56 +41,56 @@ def test_parse_bug(self):
             "<!-- BUGPATHLENGTH 4 -->\n",
             "<!-- BUGMETAEND -->\n",
             "<!-- REPORTHEADER -->\n",
-            "some tails\n"]
+            "some tails\n",
+        ]
         result = run_bug_parse(content)
-        self.assertEqual(result['bug_category'], 'Logic error')
-        self.assertEqual(result['bug_path_length'], 4)
-        self.assertEqual(result['bug_line'], 5)
-        self.assertEqual(result['bug_description'], 'Division by zero')
-        self.assertEqual(result['bug_type'], 'Division by zero')
-        self.assertEqual(result['bug_file'], 'xx')
+        self.assertEqual(result["bug_category"], "Logic error")
+        self.assertEqual(result["bug_path_length"], 4)
+        self.assertEqual(result["bug_line"], 5)
+        self.assertEqual(result["bug_description"], "Division by zero")
+        self.assertEqual(result["bug_type"], "Division by zero")
+        self.assertEqual(result["bug_file"], "xx")
 
     def test_parse_bug_empty(self):
         content = []
         result = run_bug_parse(content)
-        self.assertEqual(result['bug_category'], 'Other')
-        self.assertEqual(result['bug_path_length'], 1)
-        self.assertEqual(result['bug_line'], 0)
+        self.assertEqual(result["bug_category"], "Other")
+        self.assertEqual(result["bug_path_length"], 1)
+        self.assertEqual(result["bug_line"], 0)
 
     def test_parse_crash(self):
         content = [
             "/some/path/file.c\n",
             "Some very serious Error\n",
             "bla\n",
-            "bla-bla\n"]
-        result = run_crash_parse(content, 'file.i')
-        self.assertEqual(result['source'], content[0].rstrip())
-        self.assertEqual(result['problem'], content[1].rstrip())
-        self.assertEqual(os.path.basename(result['file']),
-                         'file.i')
-        self.assertEqual(os.path.basename(result['info']),
-                         'file.i.info.txt')
-        self.assertEqual(os.path.basename(result['stderr']),
-                         'file.i.stderr.txt')
+            "bla-bla\n",
+        ]
+        result = run_crash_parse(content, "file.i")
+        self.assertEqual(result["source"], content[0].rstrip())
+        self.assertEqual(result["problem"], content[1].rstrip())
+        self.assertEqual(os.path.basename(result["file"]), "file.i")
+        self.assertEqual(os.path.basename(result["info"]), "file.i.info.txt")
+        self.assertEqual(os.path.basename(result["stderr"]), "file.i.stderr.txt")
 
     def test_parse_real_crash(self):
         import libscanbuild.analyze as sut2
         import re
+
         with libear.TemporaryDirectory() as tmpdir:
-            filename = os.path.join(tmpdir, 'test.c')
-            with open(filename, 'w') as handle:
-                handle.write('int main() { return 0')
+            filename = os.path.join(tmpdir, "test.c")
+            with open(filename, "w") as handle:
+                handle.write("int main() { return 0")
             # produce failure report
             opts = {
-                'clang': 'clang',
-                'directory': os.getcwd(),
-                'flags': [],
-                'file': filename,
-                'output_dir': tmpdir,
-                'language': 'c',
-                'error_type': 'other_error',
-                'error_output': 'some output',
-                'exit_code': 13
+                "clang": "clang",
+                "directory": os.getcwd(),
+                "flags": [],
+                "file": filename,
+                "output_dir": tmpdir,
+                "language": "c",
+                "error_type": "other_error",
+                "error_output": "some output",
+                "exit_code": 13,
             }
             sut2.report_failure(opts)
             # find the info file
@@ -99,99 +98,89 @@ def test_parse_real_crash(self):
             for root, _, files in os.walk(tmpdir):
                 keys = [os.path.join(root, name) for name in files]
                 for key in keys:
-                    if re.match(r'^(.*/)+clang(.*)\.i$', key):
+                    if re.match(r"^(.*/)+clang(.*)\.i$", key):
                         pp_file = key
             self.assertIsNot(pp_file, None)
             # read the failure report back
-            result = sut.parse_crash(pp_file + '.info.txt')
-            self.assertEqual(result['source'], filename)
-            self.assertEqual(result['problem'], 'Other Error')
-            self.assertEqual(result['file'], pp_file)
-            self.assertEqual(result['info'], pp_file + '.info.txt')
-            self.assertEqual(result['stderr'], pp_file + '.stderr.txt')
+            result = sut.parse_crash(pp_file + ".info.txt")
+            self.assertEqual(result["source"], filename)
+            self.assertEqual(result["problem"], "Other Error")
+            self.assertEqual(result["file"], pp_file)
+            self.assertEqual(result["info"], pp_file + ".info.txt")
+            self.assertEqual(result["stderr"], pp_file + ".stderr.txt")
 
 
 class ReportMethodTest(unittest.TestCase):
-
     def test_chop(self):
-        self.assertEqual('file', sut.chop('/prefix', '/prefix/file'))
-        self.assertEqual('file', sut.chop('/prefix/', '/prefix/file'))
-        self.assertEqual('lib/file', sut.chop('/prefix/', '/prefix/lib/file'))
-        self.assertEqual('/prefix/file', sut.chop('', '/prefix/file'))
+        self.assertEqual("file", sut.chop("/prefix", "/prefix/file"))
+        self.assertEqual("file", sut.chop("/prefix/", "/prefix/file"))
+        self.assertEqual("lib/file", sut.chop("/prefix/", "/prefix/lib/file"))
+        self.assertEqual("/prefix/file", sut.chop("", "/prefix/file"))
 
     def test_chop_when_cwd(self):
-        self.assertEqual('../src/file', sut.chop('/cwd', '/src/file'))
-        self.assertEqual('../src/file', sut.chop('/prefix/cwd',
-                                                 '/prefix/src/file'))
+        self.assertEqual("../src/file", sut.chop("/cwd", "/src/file"))
+        self.assertEqual("../src/file", sut.chop("/prefix/cwd", "/prefix/src/file"))
 
 
 class GetPrefixFromCompilationDatabaseTest(unittest.TestCase):
-
     def test_with_
diff erent_filenames(self):
-        self.assertEqual(
-            sut.commonprefix(['/tmp/a.c', '/tmp/b.c']), '/tmp')
+        self.assertEqual(sut.commonprefix(["/tmp/a.c", "/tmp/b.c"]), "/tmp")
 
     def test_with_
diff erent_dirnames(self):
-        self.assertEqual(
-            sut.commonprefix(['/tmp/abs/a.c', '/tmp/ack/b.c']), '/tmp')
+        self.assertEqual(sut.commonprefix(["/tmp/abs/a.c", "/tmp/ack/b.c"]), "/tmp")
 
     def test_no_common_prefix(self):
-        self.assertEqual(
-            sut.commonprefix(['/tmp/abs/a.c', '/usr/ack/b.c']), '/')
+        self.assertEqual(sut.commonprefix(["/tmp/abs/a.c", "/usr/ack/b.c"]), "/")
 
     def test_with_single_file(self):
-        self.assertEqual(
-            sut.commonprefix(['/tmp/a.c']), '/tmp')
+        self.assertEqual(sut.commonprefix(["/tmp/a.c"]), "/tmp")
 
     def test_empty(self):
-        self.assertEqual(
-            sut.commonprefix([]), '')
+        self.assertEqual(sut.commonprefix([]), "")
 
-class MergeSarifTest(unittest.TestCase):
 
+class MergeSarifTest(unittest.TestCase):
     def test_merging_sarif(self):
         sarif1 = {
-            '$schema': 'https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json',
-            'runs': [
+            "$schema": "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json",
+            "runs": [
                 {
-                    'artifacts': [
+                    "artifacts": [
                         {
-                            'length': 100,
-                            'location': {
-                                'uri': '//clang/tools/scan-build-py/tests/unit/test_report.py'
+                            "length": 100,
+                            "location": {
+                                "uri": "//clang/tools/scan-build-py/tests/unit/test_report.py"
                             },
-                            'mimeType': 'text/plain',
-                            'roles': [
-                                'resultFile'
-                            ]
+                            "mimeType": "text/plain",
+                            "roles": ["resultFile"],
                         }
                     ],
-                    'columnKind': 'unicodeCodePoints',
-                    'results': [
+                    "columnKind": "unicodeCodePoints",
+                    "results": [
                         {
-                            'codeFlows': [
+                            "codeFlows": [
                                 {
-                                    'threadFlows': [
+                                    "threadFlows": [
                                         {
-                                            'locations': [
+                                            "locations": [
                                                 {
-                                                    'importance': 'important',
-                                                    'location': {
-                                                        'message': {
-                                                            'text': 'test message 1'
+                                                    "importance": "important",
+                                                    "location": {
+                                                        "message": {
+                                                            "text": "test message 1"
                                                         },
-                                                        'physicalLocation': {
-                                                            'artifactLocation': {
-                                                                'index': 0,
-                                                                'uri': '//clang/tools/scan-build-py/tests/unit/test_report.py'
+                                                        "physicalLocation": {
+                                                            "artifactLocation": {
+                                                                "index": 0,
+                                                                "uri": "//clang/tools/scan-build-py/tests/unit/test_report.py",
                                                             },
-                                                            'region': {
-                                                                'endColumn': 5,
-                                                                'startColumn': 1,
-                                                                'startLine': 2
-                                                            }
-                                                        }
-                                                    }
+                                                            "region": {
+                                                                "endColumn": 5,
+                                                                "startColumn": 1,
+                                                                "startLine": 2,
+                                                            },
+                                                        },
+                                                    },
                                                 }
                                             ]
                                         }
@@ -200,101 +189,99 @@ def test_merging_sarif(self):
                             ]
                         },
                         {
-                            'codeFlows': [
+                            "codeFlows": [
                                 {
-                                    'threadFlows': [
+                                    "threadFlows": [
                                         {
-                                            'locations': [
+                                            "locations": [
                                                 {
-                                                    'importance': 'important',
-                                                    'location': {
-                                                        'message': {
-                                                            'text': 'test message 2'
+                                                    "importance": "important",
+                                                    "location": {
+                                                        "message": {
+                                                            "text": "test message 2"
                                                         },
-                                                        'physicalLocation': {
-                                                            'artifactLocation': {
-                                                                'index': 0,
-                                                                'uri': '//clang/tools/scan-build-py/tests/unit/test_report.py'
+                                                        "physicalLocation": {
+                                                            "artifactLocation": {
+                                                                "index": 0,
+                                                                "uri": "//clang/tools/scan-build-py/tests/unit/test_report.py",
+                                                            },
+                                                            "region": {
+                                                                "endColumn": 23,
+                                                                "startColumn": 9,
+                                                                "startLine": 10,
                                                             },
-                                                            'region': {
-                                                                'endColumn': 23,
-                                                                'startColumn': 9,
-                                                                'startLine': 10
-                                                            }
-                                                        }
-                                                    }
+                                                        },
+                                                    },
                                                 }
                                             ]
                                         }
                                     ]
                                 }
                             ]
-                        }
+                        },
                     ],
-                    'tool': {
-                        'driver': {
-                            'fullName': 'clang static analyzer',
-                            'language': 'en-US',
-                            'name': 'clang',
-                            'rules': [
+                    "tool": {
+                        "driver": {
+                            "fullName": "clang static analyzer",
+                            "language": "en-US",
+                            "name": "clang",
+                            "rules": [
                                 {
-                                    'fullDescription': {
-                                        'text': 'test rule for merge sarif test'
+                                    "fullDescription": {
+                                        "text": "test rule for merge sarif test"
                                     },
-                                    'helpUrl': '//clang/tools/scan-build-py/tests/unit/test_report.py',
-                                    'id': 'testId',
-                                    'name': 'testName'
+                                    "helpUrl": "//clang/tools/scan-build-py/tests/unit/test_report.py",
+                                    "id": "testId",
+                                    "name": "testName",
                                 }
                             ],
-                            'version': 'test clang'
+                            "version": "test clang",
                         }
-                    }
+                    },
                 }
             ],
-            'version': '2.1.0'
+            "version": "2.1.0",
         }
         sarif2 = {
-            '$schema': 'https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json',
-            'runs': [
+            "$schema": "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json",
+            "runs": [
                 {
-                    'artifacts': [
+                    "artifacts": [
                         {
-                            'length': 1523,
-                            'location': {
-                                'uri': '//clang/tools/scan-build-py/tests/unit/test_report.py'
+                            "length": 1523,
+                            "location": {
+                                "uri": "//clang/tools/scan-build-py/tests/unit/test_report.py"
                             },
-                            'mimeType': 'text/plain',
-                            'roles': [
-                                'resultFile'
-                            ]
+                            "mimeType": "text/plain",
+                            "roles": ["resultFile"],
                         }
                     ],
-                    'columnKind': 'unicodeCodePoints',
-                    'results': [
+                    "columnKind": "unicodeCodePoints",
+                    "results": [
                         {
-                            'codeFlows': [
+                            "codeFlows": [
                                 {
-                                    'threadFlows': [
+                                    "threadFlows": [
                                         {
-                                            'locations': [
+                                            "locations": [
                                                 {
-                                                    'importance': 'important',
-                                                    'location': {
-                                                        'message': {
-                                                            'text': 'test message 3'
+                                                    "importance": "important",
+                                                    "location": {
+                                                        "message": {
+                                                            "text": "test message 3"
                                                         },
-                                                        'physicalLocation': {
-                                                            'artifactLocation': {
-                                                                'index': 0,
-                                                                'uri': '//clang/tools/scan-build-py/tests/unit/test_report.py'
+                                                        "physicalLocation": {
+                                                            "artifactLocation": {
+                                                                "index": 0,
+                                                                "uri": "//clang/tools/scan-build-py/tests/unit/test_report.py",
+                                                            },
+                                                            "region": {
+                                                                "endColumn": 99,
+                                                                "startColumn": 99,
+                                                                "startLine": 17,
                                                             },
-                                                            'region': {
-                                                                'endColumn': 99,
-                                                                'startColumn': 99,
-                                                                'startLine': 17
-                                                            }
-                                                        }
-                                                    }
+                                                        },
+                                                    },
                                                 }
                                             ]
                                         }
@@ -303,213 +290,213 @@ def test_merging_sarif(self):
                             ]
                         },
                         {
-                            'codeFlows': [
+                            "codeFlows": [
                                 {
-                                    'threadFlows': [
+                                    "threadFlows": [
                                         {
-                                            'locations': [
+                                            "locations": [
                                                 {
-                                                    'importance': 'important',
-                                                    'location': {
-                                                        'message': {
-                                                            'text': 'test message 4'
+                                                    "importance": "important",
+                                                    "location": {
+                                                        "message": {
+                                                            "text": "test message 4"
                                                         },
-                                                        'physicalLocation': {
-                                                            'artifactLocation': {
-                                                                'index': 0,
-                                                                'uri': '//clang/tools/scan-build-py/tests/unit/test_report.py'
+                                                        "physicalLocation": {
+                                                            "artifactLocation": {
+                                                                "index": 0,
+                                                                "uri": "//clang/tools/scan-build-py/tests/unit/test_report.py",
                                                             },
-                                                            'region': {
-                                                                'endColumn': 305,
-                                                                'startColumn': 304,
-                                                                'startLine': 1
-                                                            }
-                                                        }
-                                                    }
+                                                            "region": {
+                                                                "endColumn": 305,
+                                                                "startColumn": 304,
+                                                                "startLine": 1,
+                                                            },
+                                                        },
+                                                    },
                                                 }
                                             ]
                                         }
                                     ]
                                 }
                             ]
-                        }
+                        },
                     ],
-                    'tool': {
-                        'driver': {
-                            'fullName': 'clang static analyzer',
-                            'language': 'en-US',
-                            'name': 'clang',
-                            'rules': [
+                    "tool": {
+                        "driver": {
+                            "fullName": "clang static analyzer",
+                            "language": "en-US",
+                            "name": "clang",
+                            "rules": [
                                 {
-                                    'fullDescription': {
-                                        'text': 'test rule for merge sarif test'
+                                    "fullDescription": {
+                                        "text": "test rule for merge sarif test"
                                     },
-                                    'helpUrl': '//clang/tools/scan-build-py/tests/unit/test_report.py',
-                                    'id': 'testId',
-                                    'name': 'testName'
+                                    "helpUrl": "//clang/tools/scan-build-py/tests/unit/test_report.py",
+                                    "id": "testId",
+                                    "name": "testName",
                                 }
                             ],
-                            'version': 'test clang'
+                            "version": "test clang",
                         }
-                    }
+                    },
                 }
             ],
-            'version': '2.1.0'
+            "version": "2.1.0",
         }
 
         contents = [sarif1, sarif2]
         with libear.TemporaryDirectory() as tmpdir:
             for idx, content in enumerate(contents):
-                file_name = os.path.join(tmpdir, 'results-{}.sarif'.format(idx))
-                with open(file_name, 'w') as handle:
+                file_name = os.path.join(tmpdir, "results-{}.sarif".format(idx))
+                with open(file_name, "w") as handle:
                     json.dump(content, handle)
 
             sut.merge_sarif_files(tmpdir, sort_files=True)
 
-            self.assertIn('results-merged.sarif', os.listdir(tmpdir))
-            with open(os.path.join(tmpdir, 'results-merged.sarif')) as f:
+            self.assertIn("results-merged.sarif", os.listdir(tmpdir))
+            with open(os.path.join(tmpdir, "results-merged.sarif")) as f:
                 merged = json.load(f)
-                self.assertEqual(len(merged['runs']), 2)
-                self.assertEqual(len(merged['runs'][0]['results']), 2)
-                self.assertEqual(len(merged['runs'][1]['results']), 2)
+                self.assertEqual(len(merged["runs"]), 2)
+                self.assertEqual(len(merged["runs"][0]["results"]), 2)
+                self.assertEqual(len(merged["runs"][1]["results"]), 2)
 
                 expected = sarif1
-                for run in sarif2['runs']:
-                    expected['runs'].append(run)
+                for run in sarif2["runs"]:
+                    expected["runs"].append(run)
 
                 self.assertEqual(merged, expected)
 
     def test_merge_updates_embedded_link(self):
         sarif1 = {
-            'runs': [
+            "runs": [
                 {
-                    'results': [
+                    "results": [
                         {
-                            'codeFlows': [
+                            "codeFlows": [
                                 {
-                                    'message': {
-                                        'text': 'test message 1-1 [link](sarif:/runs/1/results/0) [link2](sarif:/runs/1/results/0)'
+                                    "message": {
+                                        "text": "test message 1-1 [link](sarif:/runs/1/results/0) [link2](sarif:/runs/1/results/0)"
                                     },
-                                    'threadFlows': [
+                                    "threadFlows": [
                                         {
-                                            'message': {
-                                                'text': 'test message 1-2 [link](sarif:/runs/1/results/0)'
+                                            "message": {
+                                                "text": "test message 1-2 [link](sarif:/runs/1/results/0)"
                                             }
                                         }
-                                    ]
+                                    ],
                                 }
                             ]
                         }
                     ]
                 },
                 {
-                    'results': [
+                    "results": [
                         {
-                            'codeFlows': [
+                            "codeFlows": [
                                 {
-                                    'message': {
-                                        'text': 'test message 2-1 [link](sarif:/runs/0/results/0)'
+                                    "message": {
+                                        "text": "test message 2-1 [link](sarif:/runs/0/results/0)"
                                     },
-                                    'threadFlows': [
+                                    "threadFlows": [
                                         {
-                                            'message': {
-                                                'text': 'test message 2-2 [link](sarif:/runs/0/results/0)'
+                                            "message": {
+                                                "text": "test message 2-2 [link](sarif:/runs/0/results/0)"
                                             }
                                         }
-                                    ]
+                                    ],
                                 }
                             ]
                         }
                     ]
-                }
+                },
             ]
         }
         sarif2 = {
-            'runs': [
+            "runs": [
                 {
-                    'results': [
+                    "results": [
                         {
-                            'codeFlows': [
+                            "codeFlows": [
                                 {
-                                    'message': {
-                                        'text': 'test message 3-1 [link](sarif:/runs/1/results/0) [link2](sarif:/runs/1/results/0)'
+                                    "message": {
+                                        "text": "test message 3-1 [link](sarif:/runs/1/results/0) [link2](sarif:/runs/1/results/0)"
                                     },
-                                    'threadFlows': [
+                                    "threadFlows": [
                                         {
-                                            'message': {
-                                                'text': 'test message 3-2 [link](sarif:/runs/1/results/0)'
+                                            "message": {
+                                                "text": "test message 3-2 [link](sarif:/runs/1/results/0)"
                                             }
                                         }
-                                    ]
+                                    ],
                                 }
                             ]
                         }
                     ],
                 },
                 {
-                    'results': [
+                    "results": [
                         {
-                            'codeFlows': [
+                            "codeFlows": [
                                 {
-                                    'message': {
-                                        'text': 'test message 4-1 [link](sarif:/runs/0/results/0)'
+                                    "message": {
+                                        "text": "test message 4-1 [link](sarif:/runs/0/results/0)"
                                     },
-                                    'threadFlows': [
+                                    "threadFlows": [
                                         {
-                                            'message': {
-                                                'text': 'test message 4-2 [link](sarif:/runs/0/results/0)'
+                                            "message": {
+                                                "text": "test message 4-2 [link](sarif:/runs/0/results/0)"
                                             }
                                         }
-                                    ]
+                                    ],
                                 }
                             ]
                         }
                     ]
-                }
+                },
             ]
         }
         sarif3 = {
-            'runs': [
+            "runs": [
                 {
-                    'results': [
+                    "results": [
                         {
-                            'codeFlows': [
+                            "codeFlows": [
                                 {
-                                    'message': {
-                                        'text': 'test message 5-1 [link](sarif:/runs/1/results/0) [link2](sarif:/runs/1/results/0)'
+                                    "message": {
+                                        "text": "test message 5-1 [link](sarif:/runs/1/results/0) [link2](sarif:/runs/1/results/0)"
                                     },
-                                    'threadFlows': [
+                                    "threadFlows": [
                                         {
-                                            'message': {
-                                                'text': 'test message 5-2 [link](sarif:/runs/1/results/0)'
+                                            "message": {
+                                                "text": "test message 5-2 [link](sarif:/runs/1/results/0)"
                                             }
                                         }
-                                    ]
+                                    ],
                                 }
                             ]
                         }
                     ],
                 },
                 {
-                    'results': [
+                    "results": [
                         {
-                            'codeFlows': [
+                            "codeFlows": [
                                 {
-                                    'message': {
-                                        'text': 'test message 6-1 [link](sarif:/runs/0/results/0)'
+                                    "message": {
+                                        "text": "test message 6-1 [link](sarif:/runs/0/results/0)"
                                     },
-                                    'threadFlows': [
+                                    "threadFlows": [
                                         {
-                                            'message': {
-                                                'text': 'test message 6-2 [link](sarif:/runs/0/results/0)'
+                                            "message": {
+                                                "text": "test message 6-2 [link](sarif:/runs/0/results/0)"
                                             }
                                         }
-                                    ]
+                                    ],
                                 }
                             ]
                         }
                     ]
-                }
+                },
             ]
         }
 
@@ -517,145 +504,206 @@ def test_merge_updates_embedded_link(self):
 
         with libear.TemporaryDirectory() as tmpdir:
             for idx, content in enumerate(contents):
-                file_name = os.path.join(tmpdir, 'results-{}.sarif'.format(idx))
-                with open(file_name, 'w') as handle:
+                file_name = os.path.join(tmpdir, "results-{}.sarif".format(idx))
+                with open(file_name, "w") as handle:
                     json.dump(content, handle)
 
             sut.merge_sarif_files(tmpdir, sort_files=True)
 
-            self.assertIn('results-merged.sarif', os.listdir(tmpdir))
-            with open(os.path.join(tmpdir, 'results-merged.sarif')) as f:
+            self.assertIn("results-merged.sarif", os.listdir(tmpdir))
+            with open(os.path.join(tmpdir, "results-merged.sarif")) as f:
                 merged = json.load(f)
-                self.assertEqual(len(merged['runs']), 6)
-
-                code_flows = [merged['runs'][x]['results'][0]['codeFlows'][0]['message']['text'] for x in range(6)]
-                thread_flows = [merged['runs'][x]['results'][0]['codeFlows'][0]['threadFlows'][0]['message']['text'] for x in range(6)]
+                self.assertEqual(len(merged["runs"]), 6)
+
+                code_flows = [
+                    merged["runs"][x]["results"][0]["codeFlows"][0]["message"]["text"]
+                    for x in range(6)
+                ]
+                thread_flows = [
+                    merged["runs"][x]["results"][0]["codeFlows"][0]["threadFlows"][0][
+                        "message"
+                    ]["text"]
+                    for x in range(6)
+                ]
 
                 # The run index should be updated for the second and third sets of runs
-                self.assertEqual(code_flows,
+                self.assertEqual(
+                    code_flows,
                     [
-                        'test message 1-1 [link](sarif:/runs/1/results/0) [link2](sarif:/runs/1/results/0)',
-                        'test message 2-1 [link](sarif:/runs/0/results/0)',
-                        'test message 3-1 [link](sarif:/runs/3/results/0) [link2](sarif:/runs/3/results/0)',
-                        'test message 4-1 [link](sarif:/runs/2/results/0)',
-                        'test message 5-1 [link](sarif:/runs/5/results/0) [link2](sarif:/runs/5/results/0)',
-                        'test message 6-1 [link](sarif:/runs/4/results/0)'
-                    ])
-                self.assertEquals(thread_flows,
+                        "test message 1-1 [link](sarif:/runs/1/results/0) [link2](sarif:/runs/1/results/0)",
+                        "test message 2-1 [link](sarif:/runs/0/results/0)",
+                        "test message 3-1 [link](sarif:/runs/3/results/0) [link2](sarif:/runs/3/results/0)",
+                        "test message 4-1 [link](sarif:/runs/2/results/0)",
+                        "test message 5-1 [link](sarif:/runs/5/results/0) [link2](sarif:/runs/5/results/0)",
+                        "test message 6-1 [link](sarif:/runs/4/results/0)",
+                    ],
+                )
+                self.assertEquals(
+                    thread_flows,
                     [
-                        'test message 1-2 [link](sarif:/runs/1/results/0)',
-                        'test message 2-2 [link](sarif:/runs/0/results/0)',
-                        'test message 3-2 [link](sarif:/runs/3/results/0)',
-                        'test message 4-2 [link](sarif:/runs/2/results/0)',
-                        'test message 5-2 [link](sarif:/runs/5/results/0)',
-                        'test message 6-2 [link](sarif:/runs/4/results/0)'
-                    ])
+                        "test message 1-2 [link](sarif:/runs/1/results/0)",
+                        "test message 2-2 [link](sarif:/runs/0/results/0)",
+                        "test message 3-2 [link](sarif:/runs/3/results/0)",
+                        "test message 4-2 [link](sarif:/runs/2/results/0)",
+                        "test message 5-2 [link](sarif:/runs/5/results/0)",
+                        "test message 6-2 [link](sarif:/runs/4/results/0)",
+                    ],
+                )
 
     def test_overflow_run_count(self):
         sarif1 = {
-            'runs': [
-                {'results': [{
-                    'message': {'text': 'run 1-0 [link](sarif:/runs/1/results/0)'}
-                }]},
-                {'results': [{
-                    'message': {'text': 'run 1-1 [link](sarif:/runs/2/results/0)'}
-                }]},
-                {'results': [{
-                    'message': {'text': 'run 1-2 [link](sarif:/runs/3/results/0)'}
-                }]},
-                {'results': [{
-                    'message': {'text': 'run 1-3 [link](sarif:/runs/4/results/0)'}
-                }]},
-                {'results': [{
-                    'message': {'text': 'run 1-4 [link](sarif:/runs/5/results/0)'}
-                }]},
-                {'results': [{
-                    'message': {'text': 'run 1-5 [link](sarif:/runs/6/results/0)'}
-                }]},
-                {'results': [{
-                    'message': {'text': 'run 1-6 [link](sarif:/runs/7/results/0)'}
-                }]},
-                {'results': [{
-                    'message': {'text': 'run 1-7 [link](sarif:/runs/8/results/0)'}
-                }]},
-                {'results': [{
-                    'message': {'text': 'run 1-8 [link](sarif:/runs/9/results/0)'}
-                }]},
-                {'results': [{
-                    'message': {'text': 'run 1-9 [link](sarif:/runs/0/results/0)'}
-                }]}
+            "runs": [
+                {
+                    "results": [
+                        {"message": {"text": "run 1-0 [link](sarif:/runs/1/results/0)"}}
+                    ]
+                },
+                {
+                    "results": [
+                        {"message": {"text": "run 1-1 [link](sarif:/runs/2/results/0)"}}
+                    ]
+                },
+                {
+                    "results": [
+                        {"message": {"text": "run 1-2 [link](sarif:/runs/3/results/0)"}}
+                    ]
+                },
+                {
+                    "results": [
+                        {"message": {"text": "run 1-3 [link](sarif:/runs/4/results/0)"}}
+                    ]
+                },
+                {
+                    "results": [
+                        {"message": {"text": "run 1-4 [link](sarif:/runs/5/results/0)"}}
+                    ]
+                },
+                {
+                    "results": [
+                        {"message": {"text": "run 1-5 [link](sarif:/runs/6/results/0)"}}
+                    ]
+                },
+                {
+                    "results": [
+                        {"message": {"text": "run 1-6 [link](sarif:/runs/7/results/0)"}}
+                    ]
+                },
+                {
+                    "results": [
+                        {"message": {"text": "run 1-7 [link](sarif:/runs/8/results/0)"}}
+                    ]
+                },
+                {
+                    "results": [
+                        {"message": {"text": "run 1-8 [link](sarif:/runs/9/results/0)"}}
+                    ]
+                },
+                {
+                    "results": [
+                        {"message": {"text": "run 1-9 [link](sarif:/runs/0/results/0)"}}
+                    ]
+                },
             ]
         }
         sarif2 = {
-            'runs': [
-                {'results': [{
-                    'message': {'text': 'run 2-0 [link](sarif:/runs/1/results/0) [link2](sarif:/runs/2/results/0)'}
-                }]},
-                {'results': [{
-                    'message': {'text': 'run 2-1 [link](sarif:/runs/2/results/0)'}
-                }]},
-                {'results': [{
-                    'message': {'text': 'run 2-2 [link](sarif:/runs/3/results/0)'}
-                }]},
-                {'results': [{
-                    'message': {'text': 'run 2-3 [link](sarif:/runs/4/results/0)'}
-                }]},
-                {'results': [{
-                    'message': {'text': 'run 2-4 [link](sarif:/runs/5/results/0)'}
-                }]},
-                {'results': [{
-                    'message': {'text': 'run 2-5 [link](sarif:/runs/6/results/0)'}
-                }]},
-                {'results': [{
-                    'message': {'text': 'run 2-6 [link](sarif:/runs/7/results/0)'}
-                }]},
-                {'results': [{
-                    'message': {'text': 'run 2-7 [link](sarif:/runs/8/results/0)'}
-                }]},
-                {'results': [{
-                    'message': {'text': 'run 2-8 [link](sarif:/runs/9/results/0)'}
-                }]},
-                {'results': [{
-                    'message': {'text': 'run 2-9 [link](sarif:/runs/0/results/0)'}
-                }]}
+            "runs": [
+                {
+                    "results": [
+                        {
+                            "message": {
+                                "text": "run 2-0 [link](sarif:/runs/1/results/0) [link2](sarif:/runs/2/results/0)"
+                            }
+                        }
+                    ]
+                },
+                {
+                    "results": [
+                        {"message": {"text": "run 2-1 [link](sarif:/runs/2/results/0)"}}
+                    ]
+                },
+                {
+                    "results": [
+                        {"message": {"text": "run 2-2 [link](sarif:/runs/3/results/0)"}}
+                    ]
+                },
+                {
+                    "results": [
+                        {"message": {"text": "run 2-3 [link](sarif:/runs/4/results/0)"}}
+                    ]
+                },
+                {
+                    "results": [
+                        {"message": {"text": "run 2-4 [link](sarif:/runs/5/results/0)"}}
+                    ]
+                },
+                {
+                    "results": [
+                        {"message": {"text": "run 2-5 [link](sarif:/runs/6/results/0)"}}
+                    ]
+                },
+                {
+                    "results": [
+                        {"message": {"text": "run 2-6 [link](sarif:/runs/7/results/0)"}}
+                    ]
+                },
+                {
+                    "results": [
+                        {"message": {"text": "run 2-7 [link](sarif:/runs/8/results/0)"}}
+                    ]
+                },
+                {
+                    "results": [
+                        {"message": {"text": "run 2-8 [link](sarif:/runs/9/results/0)"}}
+                    ]
+                },
+                {
+                    "results": [
+                        {"message": {"text": "run 2-9 [link](sarif:/runs/0/results/0)"}}
+                    ]
+                },
             ]
         }
 
         contents = [sarif1, sarif2]
         with libear.TemporaryDirectory() as tmpdir:
             for idx, content in enumerate(contents):
-                file_name = os.path.join(tmpdir, 'results-{}.sarif'.format(idx))
-                with open(file_name, 'w') as handle:
+                file_name = os.path.join(tmpdir, "results-{}.sarif".format(idx))
+                with open(file_name, "w") as handle:
                     json.dump(content, handle)
 
             sut.merge_sarif_files(tmpdir, sort_files=True)
 
-            self.assertIn('results-merged.sarif', os.listdir(tmpdir))
-            with open(os.path.join(tmpdir, 'results-merged.sarif')) as f:
+            self.assertIn("results-merged.sarif", os.listdir(tmpdir))
+            with open(os.path.join(tmpdir, "results-merged.sarif")) as f:
                 merged = json.load(f)
-                self.assertEqual(len(merged['runs']), 20)
-
-                messages = [merged['runs'][x]['results'][0]['message']['text'] for x in range(20)]
-                self.assertEqual(messages,
+                self.assertEqual(len(merged["runs"]), 20)
+
+                messages = [
+                    merged["runs"][x]["results"][0]["message"]["text"]
+                    for x in range(20)
+                ]
+                self.assertEqual(
+                    messages,
                     [
-                        'run 1-0 [link](sarif:/runs/1/results/0)',
-                        'run 1-1 [link](sarif:/runs/2/results/0)',
-                        'run 1-2 [link](sarif:/runs/3/results/0)',
-                        'run 1-3 [link](sarif:/runs/4/results/0)',
-                        'run 1-4 [link](sarif:/runs/5/results/0)',
-                        'run 1-5 [link](sarif:/runs/6/results/0)',
-                        'run 1-6 [link](sarif:/runs/7/results/0)',
-                        'run 1-7 [link](sarif:/runs/8/results/0)',
-                        'run 1-8 [link](sarif:/runs/9/results/0)',
-                        'run 1-9 [link](sarif:/runs/0/results/0)',
-                        'run 2-0 [link](sarif:/runs/11/results/0) [link2](sarif:/runs/12/results/0)',
-                        'run 2-1 [link](sarif:/runs/12/results/0)',
-                        'run 2-2 [link](sarif:/runs/13/results/0)',
-                        'run 2-3 [link](sarif:/runs/14/results/0)',
-                        'run 2-4 [link](sarif:/runs/15/results/0)',
-                        'run 2-5 [link](sarif:/runs/16/results/0)',
-                        'run 2-6 [link](sarif:/runs/17/results/0)',
-                        'run 2-7 [link](sarif:/runs/18/results/0)',
-                        'run 2-8 [link](sarif:/runs/19/results/0)',
-                        'run 2-9 [link](sarif:/runs/10/results/0)'
-                    ])
+                        "run 1-0 [link](sarif:/runs/1/results/0)",
+                        "run 1-1 [link](sarif:/runs/2/results/0)",
+                        "run 1-2 [link](sarif:/runs/3/results/0)",
+                        "run 1-3 [link](sarif:/runs/4/results/0)",
+                        "run 1-4 [link](sarif:/runs/5/results/0)",
+                        "run 1-5 [link](sarif:/runs/6/results/0)",
+                        "run 1-6 [link](sarif:/runs/7/results/0)",
+                        "run 1-7 [link](sarif:/runs/8/results/0)",
+                        "run 1-8 [link](sarif:/runs/9/results/0)",
+                        "run 1-9 [link](sarif:/runs/0/results/0)",
+                        "run 2-0 [link](sarif:/runs/11/results/0) [link2](sarif:/runs/12/results/0)",
+                        "run 2-1 [link](sarif:/runs/12/results/0)",
+                        "run 2-2 [link](sarif:/runs/13/results/0)",
+                        "run 2-3 [link](sarif:/runs/14/results/0)",
+                        "run 2-4 [link](sarif:/runs/15/results/0)",
+                        "run 2-5 [link](sarif:/runs/16/results/0)",
+                        "run 2-6 [link](sarif:/runs/17/results/0)",
+                        "run 2-7 [link](sarif:/runs/18/results/0)",
+                        "run 2-8 [link](sarif:/runs/19/results/0)",
+                        "run 2-9 [link](sarif:/runs/10/results/0)",
+                    ],
+                )

diff  --git a/clang/tools/scan-build-py/tests/unit/test_shell.py b/clang/tools/scan-build-py/tests/unit/test_shell.py
index 6ffbb8782a953..dd27d2caa714b 100644
--- a/clang/tools/scan-build-py/tests/unit/test_shell.py
+++ b/clang/tools/scan-build-py/tests/unit/test_shell.py
@@ -8,7 +8,6 @@
 
 
 class ShellTest(unittest.TestCase):
-
     def test_encode_decode_are_same(self):
         def test(value):
             self.assertEqual(sut.encode(sut.decode(value)), value)
@@ -22,20 +21,26 @@ def test(value):
             self.assertEqual(sut.decode(sut.encode(value)), value)
 
         test([])
-        test(['clang'])
-        test(['clang', 'this', 'and', 'that'])
-        test(['clang', 'this and', 'that'])
-        test(['clang', "it's me", 'again'])
-        test(['clang', 'some "words" are', 'quoted'])
+        test(["clang"])
+        test(["clang", "this", "and", "that"])
+        test(["clang", "this and", "that"])
+        test(["clang", "it's me", "again"])
+        test(["clang", 'some "words" are', "quoted"])
 
     def test_encode(self):
-        self.assertEqual(sut.encode(['clang', "it's me", 'again']),
-                         'clang "it\'s me" again')
-        self.assertEqual(sut.encode(['clang', "it(s me", 'again)']),
-                         'clang "it(s me" "again)"')
-        self.assertEqual(sut.encode(['clang', 'redirect > it']),
-                         'clang "redirect > it"')
-        self.assertEqual(sut.encode(['clang', '-DKEY="VALUE"']),
-                         'clang -DKEY=\\"VALUE\\"')
-        self.assertEqual(sut.encode(['clang', '-DKEY="value with spaces"']),
-                         'clang -DKEY=\\"value with spaces\\"')
+        self.assertEqual(
+            sut.encode(["clang", "it's me", "again"]), 'clang "it\'s me" again'
+        )
+        self.assertEqual(
+            sut.encode(["clang", "it(s me", "again)"]), 'clang "it(s me" "again)"'
+        )
+        self.assertEqual(
+            sut.encode(["clang", "redirect > it"]), 'clang "redirect > it"'
+        )
+        self.assertEqual(
+            sut.encode(["clang", '-DKEY="VALUE"']), 'clang -DKEY=\\"VALUE\\"'
+        )
+        self.assertEqual(
+            sut.encode(["clang", '-DKEY="value with spaces"']),
+            'clang -DKEY=\\"value with spaces\\"',
+        )

diff  --git a/clang/tools/scan-view/share/Reporter.py b/clang/tools/scan-view/share/Reporter.py
index 31a14fb0cf74e..21874b378687e 100644
--- a/clang/tools/scan-view/share/Reporter.py
+++ b/clang/tools/scan-view/share/Reporter.py
@@ -5,23 +5,28 @@
 
 import subprocess, sys, os
 
-__all__ = ['ReportFailure', 'BugReport', 'getReporters']
+__all__ = ["ReportFailure", "BugReport", "getReporters"]
 
 #
 
+
 class ReportFailure(Exception):
     """Generic exception for failures in bug reporting."""
-    def __init__(self, value):        
+
+    def __init__(self, value):
         self.value = value
 
+
 # Collect information about a bug.
 
+
 class BugReport(object):
     def __init__(self, title, description, files):
         self.title = title
         self.description = description
         self.files = files
 
+
 # Reporter interfaces.
 
 import os
@@ -33,54 +38,76 @@ def __init__(self, title, description, files):
 from email.mime.multipart import MIMEMultipart
 from email.mime.text import MIMEText
 
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 # ReporterParameter
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
+
 
 class ReporterParameter(object):
-  def __init__(self, n):
-    self.name = n
-  def getName(self):
-    return self.name
-  def getValue(self,r,bugtype,getConfigOption):
-     return getConfigOption(r.getName(),self.getName())
-  def saveConfigValue(self):
-    return True
-
-class TextParameter (ReporterParameter):
-  def getHTML(self,r,bugtype,getConfigOption):
-    return """\
+    def __init__(self, n):
+        self.name = n
+
+    def getName(self):
+        return self.name
+
+    def getValue(self, r, bugtype, getConfigOption):
+        return getConfigOption(r.getName(), self.getName())
+
+    def saveConfigValue(self):
+        return True
+
+
+class TextParameter(ReporterParameter):
+    def getHTML(self, r, bugtype, getConfigOption):
+        return """\
 <tr>
 <td class="form_clabel">%s:</td>
 <td class="form_value"><input type="text" name="%s_%s" value="%s"></td>
-</tr>"""%(self.getName(),r.getName(),self.getName(),self.getValue(r,bugtype,getConfigOption))
-
-class SelectionParameter (ReporterParameter):
-  def __init__(self, n, values):
-    ReporterParameter.__init__(self,n)
-    self.values = values
-    
-  def getHTML(self,r,bugtype,getConfigOption):
-    default = self.getValue(r,bugtype,getConfigOption)
-    return """\
+</tr>""" % (
+            self.getName(),
+            r.getName(),
+            self.getName(),
+            self.getValue(r, bugtype, getConfigOption),
+        )
+
+
+class SelectionParameter(ReporterParameter):
+    def __init__(self, n, values):
+        ReporterParameter.__init__(self, n)
+        self.values = values
+
+    def getHTML(self, r, bugtype, getConfigOption):
+        default = self.getValue(r, bugtype, getConfigOption)
+        return """\
 <tr>
 <td class="form_clabel">%s:</td><td class="form_value"><select name="%s_%s">
 %s
-</select></td>"""%(self.getName(),r.getName(),self.getName(),'\n'.join(["""\
-<option value="%s"%s>%s</option>"""%(o[0],
-                                     o[0] == default and ' selected="selected"' or '',
-                                     o[1]) for o in self.values]))
-
-#===------------------------------------------------------------------------===#
+</select></td>""" % (
+            self.getName(),
+            r.getName(),
+            self.getName(),
+            "\n".join(
+                [
+                    """\
+<option value="%s"%s>%s</option>"""
+                    % (o[0], o[0] == default and ' selected="selected"' or "", o[1])
+                    for o in self.values
+                ]
+            ),
+        )
+
+
+# ===------------------------------------------------------------------------===#
 # Reporters
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
+
 
 class EmailReporter(object):
     def getName(self):
-        return 'Email'
+        return "Email"
 
     def getParameters(self):
-        return [TextParameter(x) for x in ['To', 'From', 'SMTP Server', 'SMTP Port']]
+        return [TextParameter(x) for x in ["To", "From", "SMTP Server", "SMTP Port"]]
 
     # Lifted from python email module examples.
     def attachFile(self, outer, path):
@@ -91,22 +118,24 @@ def attachFile(self, outer, path):
         if ctype is None or encoding is not None:
             # No guess could be made, or the file is encoded (compressed), so
             # use a generic bag-of-bits type.
-            ctype = 'application/octet-stream'
-        maintype, subtype = ctype.split('/', 1)
-        if maintype == 'text':
+            ctype = "application/octet-stream"
+        maintype, subtype = ctype.split("/", 1)
+        if maintype == "text":
             fp = open(path)
             # Note: we should handle calculating the charset
             msg = MIMEText(fp.read(), _subtype=subtype)
             fp.close()
         else:
-            fp = open(path, 'rb')
+            fp = open(path, "rb")
             msg = MIMEBase(maintype, subtype)
             msg.set_payload(fp.read())
             fp.close()
             # Encode the payload using Base64
             encoders.encode_base64(msg)
         # Set the filename parameter
-        msg.add_header('Content-Disposition', 'attachment', filename=os.path.basename(path))
+        msg.add_header(
+            "Content-Disposition", "attachment", filename=os.path.basename(path)
+        )
         outer.attach(msg)
 
     def fileReport(self, report, parameters):
@@ -115,69 +144,83 @@ def fileReport(self, report, parameters):
 ---
 Title: %s
 Description: %s
-"""%(report.title, report.description)
+""" % (
+            report.title,
+            report.description,
+        )
 
-        if not parameters.get('To'):
+        if not parameters.get("To"):
             raise ReportFailure('No "To" address specified.')
-        if not parameters.get('From'):
+        if not parameters.get("From"):
             raise ReportFailure('No "From" address specified.')
 
         msg = MIMEMultipart()
-        msg['Subject'] = 'BUG REPORT: %s'%(report.title)
+        msg["Subject"] = "BUG REPORT: %s" % (report.title)
         # FIXME: Get config parameters
-        msg['To'] = parameters.get('To')
-        msg['From'] = parameters.get('From')
+        msg["To"] = parameters.get("To")
+        msg["From"] = parameters.get("From")
         msg.preamble = mainMsg
 
-        msg.attach(MIMEText(mainMsg, _subtype='text/plain'))
+        msg.attach(MIMEText(mainMsg, _subtype="text/plain"))
         for file in report.files:
             self.attachFile(msg, file)
 
         try:
-            s = smtplib.SMTP(host=parameters.get('SMTP Server'),
-                             port=parameters.get('SMTP Port'))
-            s.sendmail(msg['From'], msg['To'], msg.as_string())
+            s = smtplib.SMTP(
+                host=parameters.get("SMTP Server"), port=parameters.get("SMTP Port")
+            )
+            s.sendmail(msg["From"], msg["To"], msg.as_string())
             s.close()
         except:
-            raise ReportFailure('Unable to send message via SMTP.')
+            raise ReportFailure("Unable to send message via SMTP.")
 
         return "Message sent!"
 
+
 class BugzillaReporter(object):
     def getName(self):
-        return 'Bugzilla'
-    
+        return "Bugzilla"
+
     def getParameters(self):
-        return [TextParameter(x) for x in ['URL','Product']]
+        return [TextParameter(x) for x in ["URL", "Product"]]
 
     def fileReport(self, report, parameters):
         raise NotImplementedError
- 
+
 
 class RadarClassificationParameter(SelectionParameter):
-  def __init__(self):
-    SelectionParameter.__init__(self,"Classification",
-            [['1', 'Security'], ['2', 'Crash/Hang/Data Loss'],
-             ['3', 'Performance'], ['4', 'UI/Usability'], 
-             ['6', 'Serious Bug'], ['7', 'Other']])
-
-  def saveConfigValue(self):
-    return False
-    
-  def getValue(self,r,bugtype,getConfigOption):
-    if bugtype.find("leak") != -1:
-      return '3'
-    elif bugtype.find("dereference") != -1:
-      return '2'
-    elif bugtype.find("missing ivar release") != -1:
-      return '3'
-    else:
-      return '7'
+    def __init__(self):
+        SelectionParameter.__init__(
+            self,
+            "Classification",
+            [
+                ["1", "Security"],
+                ["2", "Crash/Hang/Data Loss"],
+                ["3", "Performance"],
+                ["4", "UI/Usability"],
+                ["6", "Serious Bug"],
+                ["7", "Other"],
+            ],
+        )
+
+    def saveConfigValue(self):
+        return False
+
+    def getValue(self, r, bugtype, getConfigOption):
+        if bugtype.find("leak") != -1:
+            return "3"
+        elif bugtype.find("dereference") != -1:
+            return "2"
+        elif bugtype.find("missing ivar release") != -1:
+            return "3"
+        else:
+            return "7"
+
 
 ###
 
+
 def getReporters():
     reporters = []
     reporters.append(EmailReporter())
     return reporters
-

diff  --git a/clang/tools/scan-view/share/ScanView.py b/clang/tools/scan-view/share/ScanView.py
index 5a5d15e85b30c..a89bf3f24fc5a 100644
--- a/clang/tools/scan-view/share/ScanView.py
+++ b/clang/tools/scan-view/share/ScanView.py
@@ -1,4 +1,5 @@
 from __future__ import print_function
+
 try:
     from http.server import HTTPServer, SimpleHTTPRequestHandler
 except ImportError:
@@ -6,6 +7,7 @@
     from SimpleHTTPServer import SimpleHTTPRequestHandler
 import os
 import sys
+
 try:
     from urlparse import urlparse
     from urllib import unquote
@@ -27,6 +29,7 @@
 import itertools
 
 import Reporter
+
 try:
     import configparser
 except ImportError:
@@ -35,19 +38,22 @@
 ###
 # Various patterns matched or replaced by server.
 
-kReportFileRE = re.compile('(.*/)?report-(.*)\\.html')
+kReportFileRE = re.compile("(.*/)?report-(.*)\\.html")
 
-kBugKeyValueRE = re.compile('<!-- BUG([^ ]*) (.*) -->')
+kBugKeyValueRE = re.compile("<!-- BUG([^ ]*) (.*) -->")
 
 #  <!-- REPORTPROBLEM file="crashes/clang_crash_ndSGF9.mi" stderr="crashes/clang_crash_ndSGF9.mi.stderr.txt" info="crashes/clang_crash_ndSGF9.mi.info" -->
 
-kReportCrashEntryRE = re.compile('<!-- REPORTPROBLEM (.*?)-->')
+kReportCrashEntryRE = re.compile("<!-- REPORTPROBLEM (.*?)-->")
 kReportCrashEntryKeyValueRE = re.compile(' ?([^=]+)="(.*?)"')
 
 kReportReplacements = []
 
 # Add custom javascript.
-kReportReplacements.append((re.compile('<!-- SUMMARYENDHEAD -->'), """\
+kReportReplacements.append(
+    (
+        re.compile("<!-- SUMMARYENDHEAD -->"),
+        """\
 <script language="javascript" type="text/javascript">
 function load(url) {
   if (window.XMLHttpRequest) {
@@ -60,22 +66,37 @@
     req.send("");
   }
 }
-</script>"""))
+</script>""",
+    )
+)
 
 # Insert additional columns.
-kReportReplacements.append((re.compile('<!-- REPORTBUGCOL -->'), 
-                            '<td></td><td></td>'))
+kReportReplacements.append((re.compile("<!-- REPORTBUGCOL -->"), "<td></td><td></td>"))
 
 # Insert report bug and open file links.
-kReportReplacements.append((re.compile('<!-- REPORTBUG id="report-(.*)\\.html" -->'),
-                            ('<td class="Button"><a href="report/\\1">Report Bug</a></td>' + 
-                             '<td class="Button"><a href="javascript:load(\'open/\\1\')">Open File</a></td>')))
-
-kReportReplacements.append((re.compile('<!-- REPORTHEADER -->'),
-                                       '<h3><a href="/">Summary</a> > Report %(report)s</h3>'))
-
-kReportReplacements.append((re.compile('<!-- REPORTSUMMARYEXTRA -->'),
-                            '<td class="Button"><a href="report/%(report)s">Report Bug</a></td>'))
+kReportReplacements.append(
+    (
+        re.compile('<!-- REPORTBUG id="report-(.*)\\.html" -->'),
+        (
+            '<td class="Button"><a href="report/\\1">Report Bug</a></td>'
+            + '<td class="Button"><a href="javascript:load(\'open/\\1\')">Open File</a></td>'
+        ),
+    )
+)
+
+kReportReplacements.append(
+    (
+        re.compile("<!-- REPORTHEADER -->"),
+        '<h3><a href="/">Summary</a> > Report %(report)s</h3>',
+    )
+)
+
+kReportReplacements.append(
+    (
+        re.compile("<!-- REPORTSUMMARYEXTRA -->"),
+        '<td class="Button"><a href="report/%(report)s">Report Bug</a></td>',
+    )
+)
 
 # Insert report crashes link.
 
@@ -83,15 +104,15 @@
 # be enabled. Also the radar reporter needs to be fixed to report
 # multiple files.
 
-#kReportReplacements.append((re.compile('<!-- REPORTCRASHES -->'),
+# kReportReplacements.append((re.compile('<!-- REPORTCRASHES -->'),
 #                            '<br>These files will automatically be attached to ' +
 #                            'reports filed here: <a href="report_crashes">Report Crashes</a>.'))
 
 ###
 # Other simple parameters
 
-kShare = posixpath.join(posixpath.dirname(__file__), '../share/scan-view')
-kConfigPath = os.path.expanduser('~/.scanview.cfg')
+kShare = posixpath.join(posixpath.dirname(__file__), "../share/scan-view")
+kConfigPath = os.path.expanduser("~/.scanview.cfg")
 
 ###
 
@@ -99,6 +120,7 @@
 
 __all__ = ["create_server"]
 
+
 class ReporterThread(threading.Thread):
     def __init__(self, report, reporter, parameters, server):
         threading.Thread.__init__(self)
@@ -113,28 +135,32 @@ def run(self):
         result = None
         try:
             if self.server.options.debug:
-                print("%s: SERVER: submitting bug."%(sys.argv[0],), file=sys.stderr)
+                print("%s: SERVER: submitting bug." % (sys.argv[0],), file=sys.stderr)
             self.status = self.reporter.fileReport(self.report, self.parameters)
             self.success = True
             time.sleep(3)
             if self.server.options.debug:
-                print("%s: SERVER: submission complete."%(sys.argv[0],), file=sys.stderr)
+                print(
+                    "%s: SERVER: submission complete." % (sys.argv[0],), file=sys.stderr
+                )
         except Reporter.ReportFailure as e:
             self.status = e.value
         except Exception as e:
             s = StringIO()
             import traceback
-            print('<b>Unhandled Exception</b><br><pre>', file=s)
+
+            print("<b>Unhandled Exception</b><br><pre>", file=s)
             traceback.print_exc(file=s)
-            print('</pre>', file=s)
+            print("</pre>", file=s)
             self.status = s.getvalue()
 
+
 class ScanViewServer(HTTPServer):
     def __init__(self, address, handler, root, reporters, options):
         HTTPServer.__init__(self, address, handler)
         self.root = root
         self.reporters = reporters
-        self.options = options        
+        self.options = options
         self.halted = False
         self.config = None
         self.load_config()
@@ -143,12 +169,12 @@ def load_config(self):
         self.config = configparser.RawConfigParser()
 
         # Add defaults
-        self.config.add_section('ScanView')
+        self.config.add_section("ScanView")
         for r in self.reporters:
             self.config.add_section(r.getName())
             for p in r.getParameters():
-              if p.saveConfigValue():
-                self.config.set(r.getName(), p.getName(), '')
+                if p.saveConfigValue():
+                    self.config.set(r.getName(), p.getName(), "")
 
         # Ignore parse errors
         try:
@@ -158,17 +184,18 @@ def load_config(self):
 
         # Save on exit
         import atexit
+
         atexit.register(lambda: self.save_config())
-        
+
     def save_config(self):
         # Ignore errors (only called on exit).
         try:
-            f = open(kConfigPath,'w')
+            f = open(kConfigPath, "w")
             self.config.write(f)
             f.close()
         except:
             pass
-        
+
     def halt(self):
         self.halted = True
         if self.options.debug:
@@ -181,11 +208,12 @@ def serve_forever(self):
             try:
                 self.handle_request()
             except OSError as e:
-                print('OSError',e.errno)
+                print("OSError", e.errno)
 
     def finish_request(self, request, client_address):
         if self.options.autoReload:
             import ScanView
+
             self.RequestHandlerClass = reload(ScanView).ScanViewRequestHandler
         HTTPServer.finish_request(self, request, client_address)
 
@@ -194,22 +222,26 @@ def handle_error(self, request, client_address):
         info = sys.exc_info()
         if info and isinstance(info[1], socket.error):
             if self.options.debug > 1:
-                print("%s: SERVER: ignored socket error." % (sys.argv[0],), file=sys.stderr)
+                print(
+                    "%s: SERVER: ignored socket error." % (sys.argv[0],),
+                    file=sys.stderr,
+                )
             return
         HTTPServer.handle_error(self, request, client_address)
 
+
 # Borrowed from Quixote, with simplifications.
 def parse_query(qs, fields=None):
     if fields is None:
         fields = {}
-    for chunk in (_f for _f in qs.split('&') if _f):
-        if '=' not in chunk:
+    for chunk in (_f for _f in qs.split("&") if _f):
+        if "=" not in chunk:
             name = chunk
-            value = ''
+            value = ""
         else:
-            name, value = chunk.split('=', 1)
-        name = unquote(name.replace('+', ' '))
-        value = unquote(value.replace('+', ' '))
+            name, value = chunk.split("=", 1)
+        name = unquote(name.replace("+", " "))
+        value = unquote(value.replace("+", " "))
         item = fields.get(name)
         if item is None:
             fields[name] = [value]
@@ -217,6 +249,7 @@ def parse_query(qs, fields=None):
             item.append(value)
     return fields
 
+
 class ScanViewRequestHandler(SimpleHTTPRequestHandler):
     server_version = "ScanViewServer/" + __version__
     dynamic_mtime = time.time()
@@ -226,17 +259,17 @@ def do_HEAD(self):
             SimpleHTTPRequestHandler.do_HEAD(self)
         except Exception as e:
             self.handle_exception(e)
-            
+
     def do_GET(self):
         try:
             SimpleHTTPRequestHandler.do_GET(self)
         except Exception as e:
             self.handle_exception(e)
-            
+
     def do_POST(self):
         """Serve a POST request."""
         try:
-            length = self.headers.getheader('content-length') or "0"
+            length = self.headers.getheader("content-length") or "0"
             try:
                 length = int(length)
             except:
@@ -248,46 +281,52 @@ def do_POST(self):
                 self.copyfile(f, self.wfile)
                 f.close()
         except Exception as e:
-            self.handle_exception(e)            
+            self.handle_exception(e)
 
     def log_message(self, format, *args):
         if self.server.options.debug:
-            sys.stderr.write("%s: SERVER: %s - - [%s] %s\n" %
-                             (sys.argv[0],
-                              self.address_string(),
-                              self.log_date_time_string(),
-                              format%args))
+            sys.stderr.write(
+                "%s: SERVER: %s - - [%s] %s\n"
+                % (
+                    sys.argv[0],
+                    self.address_string(),
+                    self.log_date_time_string(),
+                    format % args,
+                )
+            )
 
     def load_report(self, report):
-        path = os.path.join(self.server.root, 'report-%s.html'%report)
+        path = os.path.join(self.server.root, "report-%s.html" % report)
         data = open(path).read()
         keys = {}
         for item in kBugKeyValueRE.finditer(data):
-            k,v = item.groups()
+            k, v = item.groups()
             keys[k] = v
         return keys
 
     def load_crashes(self):
-        path = posixpath.join(self.server.root, 'index.html')
+        path = posixpath.join(self.server.root, "index.html")
         data = open(path).read()
         problems = []
         for item in kReportCrashEntryRE.finditer(data):
             fieldData = item.group(1)
-            fields = dict([i.groups() for i in 
-                           kReportCrashEntryKeyValueRE.finditer(fieldData)])
+            fields = dict(
+                [i.groups() for i in kReportCrashEntryKeyValueRE.finditer(fieldData)]
+            )
             problems.append(fields)
         return problems
 
     def handle_exception(self, exc):
         import traceback
+
         s = StringIO()
         print("INTERNAL ERROR\n", file=s)
         traceback.print_exc(file=s)
-        f = self.send_string(s.getvalue(), 'text/plain')
+        f = self.send_string(s.getvalue(), "text/plain")
         if f:
             self.copyfile(f, self.wfile)
-            f.close()        
-            
+            f.close()
+
     def get_scalar_field(self, name):
         if name in self.fields:
             return self.fields[name][0]
@@ -295,20 +334,20 @@ def get_scalar_field(self, name):
             return None
 
     def submit_bug(self, c):
-        title = self.get_scalar_field('title')
-        description = self.get_scalar_field('description')
-        report = self.get_scalar_field('report')
-        reporterIndex = self.get_scalar_field('reporter')
+        title = self.get_scalar_field("title")
+        description = self.get_scalar_field("description")
+        report = self.get_scalar_field("report")
+        reporterIndex = self.get_scalar_field("reporter")
         files = []
-        for fileID in self.fields.get('files',[]):
+        for fileID in self.fields.get("files", []):
             try:
                 i = int(fileID)
             except:
                 i = None
-            if i is None or i<0 or i>=len(c.files):
-                return (False, 'Invalid file ID')
+            if i is None or i < 0 or i >= len(c.files):
+                return (False, "Invalid file ID")
             files.append(c.files[i])
-        
+
         if not title:
             return (False, "Missing title.")
         if not description:
@@ -317,25 +356,27 @@ def submit_bug(self, c):
             reporterIndex = int(reporterIndex)
         except:
             return (False, "Invalid report method.")
-        
+
         # Get the reporter and parameters.
         reporter = self.server.reporters[reporterIndex]
         parameters = {}
         for o in reporter.getParameters():
-            name = '%s_%s'%(reporter.getName(),o.getName())
+            name = "%s_%s" % (reporter.getName(), o.getName())
             if name not in self.fields:
-                return (False, 
-                        'Missing field "%s" for %s report method.'%(name,
-                                                                    reporter.getName()))
+                return (
+                    False,
+                    'Missing field "%s" for %s report method.'
+                    % (name, reporter.getName()),
+                )
             parameters[o.getName()] = self.get_scalar_field(name)
 
         # Update config defaults.
-        if report != 'None':
-            self.server.config.set('ScanView', 'reporter', reporterIndex)
+        if report != "None":
+            self.server.config.set("ScanView", "reporter", reporterIndex)
             for o in reporter.getParameters():
-              if o.saveConfigValue():
-                name = o.getName()
-                self.server.config.set(reporter.getName(), name, parameters[name])
+                if o.saveConfigValue():
+                    name = o.getName()
+                    self.server.config.set(reporter.getName(), name, parameters[name])
 
         # Create the report.
         bug = Reporter.BugReport(title, description, files)
@@ -346,35 +387,38 @@ def submit_bug(self, c):
 
         # Wait for thread to die...
         while t.isAlive():
-            time.sleep(.25)
+            time.sleep(0.25)
         submitStatus = t.status
 
         return (t.success, t.status)
 
     def send_report_submit(self):
-        report = self.get_scalar_field('report')
+        report = self.get_scalar_field("report")
         c = self.get_report_context(report)
         if c.reportSource is None:
             reportingFor = "Report Crashes > "
-            fileBug = """\
-<a href="/report_crashes">File Bug</a> > """%locals()
+            fileBug = (
+                """\
+<a href="/report_crashes">File Bug</a> > """
+                % locals()
+            )
         else:
-            reportingFor = '<a href="/%s">Report %s</a> > ' % (c.reportSource, 
-                                                                   report)
+            reportingFor = '<a href="/%s">Report %s</a> > ' % (c.reportSource, report)
             fileBug = '<a href="/report/%s">File Bug</a> > ' % report
-        title = self.get_scalar_field('title')
-        description = self.get_scalar_field('description')
+        title = self.get_scalar_field("title")
+        description = self.get_scalar_field("description")
 
-        res,message = self.submit_bug(c)
+        res, message = self.submit_bug(c)
 
         if res:
-            statusClass = 'SubmitOk'
-            statusName = 'Succeeded'
+            statusClass = "SubmitOk"
+            statusName = "Succeeded"
         else:
-            statusClass = 'SubmitFail'
-            statusName = 'Failed'
+            statusClass = "SubmitFail"
+            statusName = "Failed"
 
-        result = """
+        result = (
+            """
 <head>
   <title>Bug Submission</title>
   <link rel="stylesheet" type="text/css" href="/scanview.css" />
@@ -412,23 +456,25 @@ def send_report_submit(self):
 <hr>
 <a href="/">Return to Summary</a>
 </body>
-</html>"""%locals()
+</html>"""
+            % locals()
+        )
         return self.send_string(result)
 
     def send_open_report(self, report):
         try:
             keys = self.load_report(report)
         except IOError:
-            return self.send_error(400, 'Invalid report.')
+            return self.send_error(400, "Invalid report.")
 
-        file = keys.get('FILE')
+        file = keys.get("FILE")
         if not file or not posixpath.exists(file):
             return self.send_error(400, 'File does not exist: "%s"' % file)
 
         import startfile
+
         if self.server.options.debug:
-            print('%s: SERVER: opening "%s"'%(sys.argv[0],
-                                                            file), file=sys.stderr)
+            print('%s: SERVER: opening "%s"' % (sys.argv[0], file), file=sys.stderr)
 
         status = startfile.open(file)
         if status:
@@ -436,28 +482,30 @@ def send_open_report(self, report):
         else:
             res = 'Open failed: "%s"' % file
 
-        return self.send_string(res, 'text/plain')
+        return self.send_string(res, "text/plain")
 
     def get_report_context(self, report):
         class Context(object):
             pass
-        if report is None or report == 'None':
+
+        if report is None or report == "None":
             data = self.load_crashes()
             # Don't allow empty reports.
             if not data:
-                raise ValueError('No crashes detected!')
+                raise ValueError("No crashes detected!")
             c = Context()
-            c.title = 'clang static analyzer failures'
+            c.title = "clang static analyzer failures"
 
             stderrSummary = ""
             for item in data:
-                if 'stderr' in item:
-                    path = posixpath.join(self.server.root, item['stderr'])
+                if "stderr" in item:
+                    path = posixpath.join(self.server.root, item["stderr"])
                     if os.path.exists(path):
                         lns = itertools.islice(open(path), 0, 10)
-                        stderrSummary += '%s\n--\n%s' % (item.get('src', 
-                                                                  '<unknown>'),
-                                                         ''.join(lns))
+                        stderrSummary += "%s\n--\n%s" % (
+                            item.get("src", "<unknown>"),
+                            "".join(lns),
+                        )
 
             c.description = """\
 The clang static analyzer failed on these inputs:
@@ -466,52 +514,59 @@ class Context(object):
 STDERR Summary
 --------------
 %s
-""" % ('\n'.join([item.get('src','<unknown>') for item in data]),
-       stderrSummary)
+""" % (
+                "\n".join([item.get("src", "<unknown>") for item in data]),
+                stderrSummary,
+            )
             c.reportSource = None
             c.navMarkup = "Report Crashes > "
             c.files = []
-            for item in data:                
-                c.files.append(item.get('src',''))
-                c.files.append(posixpath.join(self.server.root,
-                                              item.get('file','')))
-                c.files.append(posixpath.join(self.server.root,
-                                              item.get('clangfile','')))
-                c.files.append(posixpath.join(self.server.root,
-                                              item.get('stderr','')))
-                c.files.append(posixpath.join(self.server.root,
-                                              item.get('info','')))
+            for item in data:
+                c.files.append(item.get("src", ""))
+                c.files.append(posixpath.join(self.server.root, item.get("file", "")))
+                c.files.append(
+                    posixpath.join(self.server.root, item.get("clangfile", ""))
+                )
+                c.files.append(posixpath.join(self.server.root, item.get("stderr", "")))
+                c.files.append(posixpath.join(self.server.root, item.get("info", "")))
             # Just in case something failed, ignore files which don't
             # exist.
-            c.files = [f for f in c.files
-                       if os.path.exists(f) and os.path.isfile(f)]
+            c.files = [f for f in c.files if os.path.exists(f) and os.path.isfile(f)]
         else:
-            # Check that this is a valid report.            
-            path = posixpath.join(self.server.root, 'report-%s.html' % report)
+            # Check that this is a valid report.
+            path = posixpath.join(self.server.root, "report-%s.html" % report)
             if not posixpath.exists(path):
-                raise ValueError('Invalid report ID')
+                raise ValueError("Invalid report ID")
             keys = self.load_report(report)
             c = Context()
-            c.title = keys.get('DESC','clang error (unrecognized')
+            c.title = keys.get("DESC", "clang error (unrecognized")
             c.description = """\
 Bug reported by the clang static analyzer.
 
 Description: %s
 File: %s
 Line: %s
-"""%(c.title, keys.get('FILE','<unknown>'), keys.get('LINE', '<unknown>'))
-            c.reportSource = 'report-%s.html' % report
-            c.navMarkup = """<a href="/%s">Report %s</a> > """ % (c.reportSource,
-                                                                  report)
+""" % (
+                c.title,
+                keys.get("FILE", "<unknown>"),
+                keys.get("LINE", "<unknown>"),
+            )
+            c.reportSource = "report-%s.html" % report
+            c.navMarkup = """<a href="/%s">Report %s</a> > """ % (
+                c.reportSource,
+                report,
+            )
 
             c.files = [path]
         return c
 
     def send_report(self, report, configOverrides=None):
-        def getConfigOption(section, field):            
-            if (configOverrides is not None and
-                section in configOverrides and
-                field in configOverrides[section]):
+        def getConfigOption(section, field):
+            if (
+                configOverrides is not None
+                and section in configOverrides
+                and field in configOverrides[section]
+            ):
                 return configOverrides[section][field]
             return self.server.config.get(section, field)
 
@@ -522,7 +577,7 @@ def getConfigOption(section, field):
             return self.send_error(400, e.message)
 
         title = c.title
-        description= c.description
+        description = c.description
         reportingFor = c.navMarkup
         if c.reportSource is None:
             extraIFrame = ""
@@ -531,25 +586,33 @@ def getConfigOption(section, field):
 <iframe src="/%s" width="100%%" height="40%%"
         scrolling="auto" frameborder="1">
   <a href="/%s">View Bug Report</a>
-</iframe>""" % (c.reportSource, c.reportSource)
+</iframe>""" % (
+                c.reportSource,
+                c.reportSource,
+            )
 
         reporterSelections = []
         reporterOptions = []
 
         try:
-            active = int(getConfigOption('ScanView','reporter'))
+            active = int(getConfigOption("ScanView", "reporter"))
         except:
             active = 0
-        for i,r in enumerate(self.server.reporters):
-            selected = (i == active)
+        for i, r in enumerate(self.server.reporters):
+            selected = i == active
             if selected:
-                selectedStr = ' selected'
+                selectedStr = " selected"
             else:
-                selectedStr = ''
-            reporterSelections.append('<option value="%d"%s>%s</option>'%(i,selectedStr,r.getName()))
-            options = '\n'.join([ o.getHTML(r,title,getConfigOption) for o in r.getParameters()])
-            display = ('none','')[selected]
-            reporterOptions.append("""\
+                selectedStr = ""
+            reporterSelections.append(
+                '<option value="%d"%s>%s</option>' % (i, selectedStr, r.getName())
+            )
+            options = "\n".join(
+                [o.getHTML(r, title, getConfigOption) for o in r.getParameters()]
+            )
+            display = ("none", "")[selected]
+            reporterOptions.append(
+                """\
 <tr id="%sReporterOptions" style="display:%s">
   <td class="form_label">%s Options</td>
   <td class="form_value">
@@ -558,15 +621,25 @@ def getConfigOption(section, field):
     </table>
   </td>
 </tr>
-"""%(r.getName(),display,r.getName(),options))
-        reporterSelections = '\n'.join(reporterSelections)
-        reporterOptionsDivs = '\n'.join(reporterOptions)
-        reportersArray = '[%s]'%(','.join([repr(r.getName()) for r in self.server.reporters]))
+"""
+                % (r.getName(), display, r.getName(), options)
+            )
+        reporterSelections = "\n".join(reporterSelections)
+        reporterOptionsDivs = "\n".join(reporterOptions)
+        reportersArray = "[%s]" % (
+            ",".join([repr(r.getName()) for r in self.server.reporters])
+        )
 
         if c.files:
             fieldSize = min(5, len(c.files))
-            attachFileOptions = '\n'.join(["""\
-<option value="%d" selected>%s</option>""" % (i,v) for i,v in enumerate(c.files)])
+            attachFileOptions = "\n".join(
+                [
+                    """\
+<option value="%d" selected>%s</option>"""
+                    % (i, v)
+                    for i, v in enumerate(c.files)
+                ]
+            )
             attachFileRow = """\
 <tr>
   <td class="form_label">Attach:</td>
@@ -576,11 +649,15 @@ def getConfigOption(section, field):
 </select>
   </td>
 </tr>
-""" % (min(5, len(c.files)), attachFileOptions)
+""" % (
+                min(5, len(c.files)),
+                attachFileOptions,
+            )
         else:
             attachFileRow = ""
 
-        result = """<html>
+        result = (
+            """<html>
 <head>
   <title>File Bug</title>
   <link rel="stylesheet" type="text/css" href="/scanview.css" />
@@ -651,14 +728,15 @@ def getConfigOption(section, field):
 %(extraIFrame)s
 
 </body>
-</html>"""%locals()
+</html>"""
+            % locals()
+        )
 
         return self.send_string(result)
 
     def send_head(self, fields=None):
-        if (self.server.options.onlyServeLocal and
-            self.client_address[0] != '127.0.0.1'):
-            return self.send_error(401, 'Unauthorized host.')
+        if self.server.options.onlyServeLocal and self.client_address[0] != "127.0.0.1":
+            return self.send_error(401, "Unauthorized host.")
 
         if fields is None:
             fields = {}
@@ -669,46 +747,45 @@ def send_head(self, fields=None):
         path = posixpath.normpath(unquote(o.path))
 
         # Split the components and strip the root prefix.
-        components = path.split('/')[1:]
-        
+        components = path.split("/")[1:]
+
         # Special case some top-level entries.
         if components:
             name = components[0]
-            if len(components)==2:
-                if name=='report':
+            if len(components) == 2:
+                if name == "report":
                     return self.send_report(components[1])
-                elif name=='open':
+                elif name == "open":
                     return self.send_open_report(components[1])
-            elif len(components)==1:
-                if name=='quit':
+            elif len(components) == 1:
+                if name == "quit":
                     self.server.halt()
-                    return self.send_string('Goodbye.', 'text/plain')
-                elif name=='report_submit':
+                    return self.send_string("Goodbye.", "text/plain")
+                elif name == "report_submit":
                     return self.send_report_submit()
-                elif name=='report_crashes':
-                    overrides = { 'ScanView' : {},
-                                  'Radar' : {},
-                                  'Email' : {} }
-                    for i,r in enumerate(self.server.reporters):
-                        if r.getName() == 'Radar':
-                            overrides['ScanView']['reporter'] = i
+                elif name == "report_crashes":
+                    overrides = {"ScanView": {}, "Radar": {}, "Email": {}}
+                    for i, r in enumerate(self.server.reporters):
+                        if r.getName() == "Radar":
+                            overrides["ScanView"]["reporter"] = i
                             break
-                    overrides['Radar']['Component'] = 'llvm - checker'
-                    overrides['Radar']['Component Version'] = 'X'
+                    overrides["Radar"]["Component"] = "llvm - checker"
+                    overrides["Radar"]["Component Version"] = "X"
                     return self.send_report(None, overrides)
-                elif name=='favicon.ico':
-                    return self.send_path(posixpath.join(kShare,'bugcatcher.ico'))
-        
+                elif name == "favicon.ico":
+                    return self.send_path(posixpath.join(kShare, "bugcatcher.ico"))
+
         # Match directory entries.
-        if components[-1] == '':
-            components[-1] = 'index.html'
+        if components[-1] == "":
+            components[-1] = "index.html"
 
-        relpath = '/'.join(components)
+        relpath = "/".join(components)
         path = posixpath.join(self.server.root, relpath)
 
         if self.server.options.debug > 1:
-            print('%s: SERVER: sending path "%s"'%(sys.argv[0],
-                                                                 path), file=sys.stderr)
+            print(
+                '%s: SERVER: sending path "%s"' % (sys.argv[0], path), file=sys.stderr
+            )
         return self.send_path(path)
 
     def send_404(self):
@@ -719,14 +796,14 @@ def send_path(self, path):
         # If the requested path is outside the root directory, do not open it
         rel = os.path.abspath(path)
         if not rel.startswith(os.path.abspath(self.server.root)):
-          return self.send_404()
-        
+            return self.send_404()
+
         ctype = self.guess_type(path)
-        if ctype.startswith('text/'):
+        if ctype.startswith("text/"):
             # Patch file instead
             return self.send_patched_file(path, ctype)
         else:
-            mode = 'rb'
+            mode = "rb"
         try:
             f = open(path, mode)
         except IOError:
@@ -743,8 +820,8 @@ def send_file(self, f, ctype):
         self.end_headers()
         return f
 
-    def send_string(self, s, ctype='text/html', headers=True, mtime=None):
-        encoded_s = s.encode('utf-8')
+    def send_string(self, s, ctype="text/html", headers=True, mtime=None):
+        encoded_s = s.encode("utf-8")
         if headers:
             self.send_response(200)
             self.send_header("Content-type", ctype)
@@ -758,18 +835,18 @@ def send_string(self, s, ctype='text/html', headers=True, mtime=None):
     def send_patched_file(self, path, ctype):
         # Allow a very limited set of variables. This is pretty gross.
         variables = {}
-        variables['report'] = ''
+        variables["report"] = ""
         m = kReportFileRE.match(path)
         if m:
-            variables['report'] = m.group(2)
+            variables["report"] = m.group(2)
 
         try:
-            f = open(path,'rb')
+            f = open(path, "rb")
         except IOError:
             return self.send_404()
         fs = os.fstat(f.fileno())
-        data = f.read().decode('utf-8')
-        for a,b in kReportReplacements:
+        data = f.read().decode("utf-8")
+        for a, b in kReportReplacements:
             data = a.sub(b % variables, data)
         return self.send_string(data, ctype, mtime=fs.st_mtime)
 
@@ -779,7 +856,4 @@ def create_server(address, options, root):
 
     reporters = Reporter.getReporters()
 
-    return ScanViewServer(address, ScanViewRequestHandler,
-                          root,
-                          reporters,
-                          options)
+    return ScanViewServer(address, ScanViewRequestHandler, root, reporters, options)

diff  --git a/clang/tools/scan-view/share/startfile.py b/clang/tools/scan-view/share/startfile.py
index 9eb548bc4329b..d63e69280e90d 100644
--- a/clang/tools/scan-view/share/startfile.py
+++ b/clang/tools/scan-view/share/startfile.py
@@ -5,8 +5,8 @@
 manner. Modified from http://code.activestate.com/recipes/511443/.
 """
 
-__version__ = '1.1x'
-__all__ = ['open']
+__version__ = "1.1x"
+__all__ = ["open"]
 
 import os
 import sys
@@ -18,7 +18,7 @@
 
 
 class BaseController(object):
-    '''Base class for open program controllers.'''
+    """Base class for open program controllers."""
 
     def __init__(self, name):
         self.name = name
@@ -28,14 +28,14 @@ def open(self, filename):
 
 
 class Controller(BaseController):
-    '''Controller for a generic open program.'''
+    """Controller for a generic open program."""
 
     def __init__(self, *args):
         super(Controller, self).__init__(os.path.basename(args[0]))
         self.args = list(args)
 
     def _invoke(self, cmdline):
-        if sys.platform[:3] == 'win':
+        if sys.platform[:3] == "win":
             closefds = False
             startupinfo = subprocess.STARTUPINFO()
             startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
@@ -43,9 +43,12 @@ def _invoke(self, cmdline):
             closefds = True
             startupinfo = None
 
-        if (os.environ.get('DISPLAY') or sys.platform[:3] == 'win' or
-                                                    sys.platform == 'darwin'):
-            inout = file(os.devnull, 'r+')
+        if (
+            os.environ.get("DISPLAY")
+            or sys.platform[:3] == "win"
+            or sys.platform == "darwin"
+        ):
+            inout = file(os.devnull, "r+")
         else:
             # for TTY programs, we need stdin/out
             inout = None
@@ -53,19 +56,25 @@ def _invoke(self, cmdline):
         # if possible, put the child precess in separate process group,
         # so keyboard interrupts don't affect child precess as well as
         # Python
-        setsid = getattr(os, 'setsid', None)
+        setsid = getattr(os, "setsid", None)
         if not setsid:
-            setsid = getattr(os, 'setpgrp', None)
-
-        pipe = subprocess.Popen(cmdline, stdin=inout, stdout=inout,
-                                stderr=inout, close_fds=closefds,
-                                preexec_fn=setsid, startupinfo=startupinfo)
+            setsid = getattr(os, "setpgrp", None)
+
+        pipe = subprocess.Popen(
+            cmdline,
+            stdin=inout,
+            stdout=inout,
+            stderr=inout,
+            close_fds=closefds,
+            preexec_fn=setsid,
+            startupinfo=startupinfo,
+        )
 
         # It is assumed that this kind of tools (gnome-open, kfmclient,
         # exo-open, xdg-open and open for OSX) immediately exit after launching
         # the specific application
         returncode = pipe.wait()
-        if hasattr(self, 'fixreturncode'):
+        if hasattr(self, "fixreturncode"):
             returncode = self.fixreturncode(returncode)
         return not returncode
 
@@ -82,10 +91,10 @@ def open(self, filename):
 
 
 # Platform support for Windows
-if sys.platform[:3] == 'win':
+if sys.platform[:3] == "win":
 
     class Start(BaseController):
-        '''Controller for the win32 start program through os.startfile.'''
+        """Controller for the win32 start program through os.startfile."""
 
         def open(self, filename):
             try:
@@ -97,14 +106,14 @@ def open(self, filename):
             else:
                 return True
 
-    _controllers['windows-default'] = Start('start')
-    _open = _controllers['windows-default'].open
+    _controllers["windows-default"] = Start("start")
+    _open = _controllers["windows-default"].open
 
 
 # Platform support for MacOS
-elif sys.platform == 'darwin':
-    _controllers['open']= Controller('open')
-    _open = _controllers['open'].open
+elif sys.platform == "darwin":
+    _controllers["open"] = Controller("open")
+    _open = _controllers["open"].open
 
 
 # Platform support for Unix
@@ -119,20 +128,20 @@ def open(self, filename):
     from webbrowser import _iscommand
 
     class KfmClient(Controller):
-        '''Controller for the KDE kfmclient program.'''
+        """Controller for the KDE kfmclient program."""
 
-        def __init__(self, kfmclient='kfmclient'):
-            super(KfmClient, self).__init__(kfmclient, 'exec')
+        def __init__(self, kfmclient="kfmclient"):
+            super(KfmClient, self).__init__(kfmclient, "exec")
             self.kde_version = self.detect_kde_version()
 
         def detect_kde_version(self):
             kde_version = None
             try:
-                info = getoutput('kde-config --version')
+                info = getoutput("kde-config --version")
 
                 for line in info.splitlines():
-                    if line.startswith('KDE'):
-                        kde_version = line.split(':')[-1].strip()
+                    if line.startswith("KDE"):
+                        kde_version = line.split(":")[-1].strip()
                         break
             except (OSError, RuntimeError):
                 pass
@@ -140,49 +149,48 @@ def detect_kde_version(self):
             return kde_version
 
         def fixreturncode(self, returncode):
-            if returncode is not None and self.kde_version > '3.5.4':
+            if returncode is not None and self.kde_version > "3.5.4":
                 return returncode
             else:
                 return os.EX_OK
 
     def detect_desktop_environment():
-        '''Checks for known desktop environments
+        """Checks for known desktop environments
 
         Return the desktop environments name, lowercase (kde, gnome, xfce)
         or "generic"
 
-        '''
+        """
 
-        desktop_environment = 'generic'
+        desktop_environment = "generic"
 
-        if os.environ.get('KDE_FULL_SESSION') == 'true':
-            desktop_environment = 'kde'
-        elif os.environ.get('GNOME_DESKTOP_SESSION_ID'):
-            desktop_environment = 'gnome'
+        if os.environ.get("KDE_FULL_SESSION") == "true":
+            desktop_environment = "kde"
+        elif os.environ.get("GNOME_DESKTOP_SESSION_ID"):
+            desktop_environment = "gnome"
         else:
             try:
-                info = getoutput('xprop -root _DT_SAVE_MODE')
+                info = getoutput("xprop -root _DT_SAVE_MODE")
                 if ' = "xfce4"' in info:
-                    desktop_environment = 'xfce'
+                    desktop_environment = "xfce"
             except (OSError, RuntimeError):
                 pass
 
         return desktop_environment
 
-
     def register_X_controllers():
-        if _iscommand('kfmclient'):
-            _controllers['kde-open'] = KfmClient()
+        if _iscommand("kfmclient"):
+            _controllers["kde-open"] = KfmClient()
 
-        for command in ('gnome-open', 'exo-open', 'xdg-open'):
+        for command in ("gnome-open", "exo-open", "xdg-open"):
             if _iscommand(command):
                 _controllers[command] = Controller(command)
 
     def get():
         controllers_map = {
-            'gnome': 'gnome-open',
-            'kde': 'kde-open',
-            'xfce': 'exo-open',
+            "gnome": "gnome-open",
+            "kde": "kde-open",
+            "xfce": "exo-open",
         }
 
         desktop_environment = detect_desktop_environment()
@@ -192,18 +200,17 @@ def get():
             return _controllers[controller_name].open
 
         except KeyError:
-            if 'xdg-open' in _controllers:
-                return _controllers['xdg-open'].open
+            if "xdg-open" in _controllers:
+                return _controllers["xdg-open"].open
             else:
                 return webbrowser.open
 
-
     if os.environ.get("DISPLAY"):
         register_X_controllers()
     _open = get()
 
 
 def open(filename):
-    '''Open a file or a URL in the registered default application.'''
+    """Open a file or a URL in the registered default application."""
 
     return _open(filename)

diff  --git a/clang/utils/ABITest/ABITestGen.py b/clang/utils/ABITest/ABITestGen.py
index 93a6de93068d4..119b3299ffa60 100755
--- a/clang/utils/ABITest/ABITestGen.py
+++ b/clang/utils/ABITest/ABITestGen.py
@@ -11,10 +11,17 @@
 
 ####
 
+
 class TypePrinter(object):
-    def __init__(self, output, outputHeader=None, 
-                 outputTests=None, outputDriver=None,
-                 headerName=None, info=None):
+    def __init__(
+        self,
+        output,
+        outputHeader=None,
+        outputTests=None,
+        outputDriver=None,
+        headerName=None,
+        info=None,
+    ):
         self.output = output
         self.outputHeader = outputHeader
         self.outputTests = outputTests
@@ -27,44 +34,49 @@ def __init__(self, output, outputHeader=None,
         self.declarations = set()
 
         if info:
-            for f in (self.output,self.outputHeader,self.outputTests,self.outputDriver):
+            for f in (
+                self.output,
+                self.outputHeader,
+                self.outputTests,
+                self.outputDriver,
+            ):
                 if f:
                     print(info, file=f)
 
         if self.writeBody:
-            print('#include <stdio.h>\n', file=self.output)
+            print("#include <stdio.h>\n", file=self.output)
             if self.outputTests:
-                print('#include <stdio.h>', file=self.outputTests)
-                print('#include <string.h>', file=self.outputTests)
-                print('#include <assert.h>\n', file=self.outputTests)
+                print("#include <stdio.h>", file=self.outputTests)
+                print("#include <string.h>", file=self.outputTests)
+                print("#include <assert.h>\n", file=self.outputTests)
 
         if headerName:
-            for f in (self.output,self.outputTests,self.outputDriver):
+            for f in (self.output, self.outputTests, self.outputDriver):
                 if f is not None:
-                    print('#include "%s"\n'%(headerName,), file=f)
-        
+                    print('#include "%s"\n' % (headerName,), file=f)
+
         if self.outputDriver:
-            print('#include <stdio.h>', file=self.outputDriver)
-            print('#include <stdlib.h>\n', file=self.outputDriver)
-            print('int main(int argc, char **argv) {', file=self.outputDriver)
-            print('  int index = -1;', file=self.outputDriver)
-            print('  if (argc > 1) index = atoi(argv[1]);', file=self.outputDriver)
-            
+            print("#include <stdio.h>", file=self.outputDriver)
+            print("#include <stdlib.h>\n", file=self.outputDriver)
+            print("int main(int argc, char **argv) {", file=self.outputDriver)
+            print("  int index = -1;", file=self.outputDriver)
+            print("  if (argc > 1) index = atoi(argv[1]);", file=self.outputDriver)
+
     def finish(self):
         if self.layoutTests:
-            print('int main(int argc, char **argv) {', file=self.output)
-            print('  int index = -1;', file=self.output)
-            print('  if (argc > 1) index = atoi(argv[1]);', file=self.output)
-            for i,f in self.layoutTests:
-                print('  if (index == -1 || index == %d)' % i, file=self.output)
-                print('    %s();' % f, file=self.output)
-            print('  return 0;', file=self.output)
-            print('}', file=self.output) 
+            print("int main(int argc, char **argv) {", file=self.output)
+            print("  int index = -1;", file=self.output)
+            print("  if (argc > 1) index = atoi(argv[1]);", file=self.output)
+            for i, f in self.layoutTests:
+                print("  if (index == -1 || index == %d)" % i, file=self.output)
+                print("    %s();" % f, file=self.output)
+            print("  return 0;", file=self.output)
+            print("}", file=self.output)
 
         if self.outputDriver:
             print('  printf("DONE\\n");', file=self.outputDriver)
-            print('  return 0;', file=self.outputDriver)
-            print('}', file=self.outputDriver)        
+            print("  return 0;", file=self.outputDriver)
+            print("}", file=self.outputDriver)
 
     def addDeclaration(self, decl):
         if decl in self.declarations:
@@ -81,402 +93,718 @@ def addDeclaration(self, decl):
 
     def getTypeName(self, T):
         name = self.types.get(T)
-        if name is None:            
+        if name is None:
             # Reserve slot
             self.types[T] = None
             self.types[T] = name = T.getTypeName(self)
         return name
-    
+
     def writeLayoutTest(self, i, ty):
         tyName = self.getTypeName(ty)
-        tyNameClean = tyName.replace(' ','_').replace('*','star')
-        fnName = 'test_%s' % tyNameClean
-            
-        print('void %s(void) {' % fnName, file=self.output)
-        self.printSizeOfType('    %s'%fnName, tyName, ty, self.output)
-        self.printAlignOfType('    %s'%fnName, tyName, ty, self.output)
-        self.printOffsetsOfType('    %s'%fnName, tyName, ty, self.output)
-        print('}', file=self.output)
+        tyNameClean = tyName.replace(" ", "_").replace("*", "star")
+        fnName = "test_%s" % tyNameClean
+
+        print("void %s(void) {" % fnName, file=self.output)
+        self.printSizeOfType("    %s" % fnName, tyName, ty, self.output)
+        self.printAlignOfType("    %s" % fnName, tyName, ty, self.output)
+        self.printOffsetsOfType("    %s" % fnName, tyName, ty, self.output)
+        print("}", file=self.output)
         print(file=self.output)
-        
-        self.layoutTests.append((i,fnName))
-        
+
+        self.layoutTests.append((i, fnName))
+
     def writeFunction(self, i, FT):
-        args = ', '.join(['%s arg%d'%(self.getTypeName(t),i) for i,t in enumerate(FT.argTypes)])
+        args = ", ".join(
+            ["%s arg%d" % (self.getTypeName(t), i) for i, t in enumerate(FT.argTypes)]
+        )
         if not args:
-            args = 'void'
+            args = "void"
 
         if FT.returnType is None:
             retvalName = None
-            retvalTypeName = 'void'
+            retvalTypeName = "void"
         else:
             retvalTypeName = self.getTypeName(FT.returnType)
             if self.writeBody or self.outputTests:
                 retvalName = self.getTestReturnValue(FT.returnType)
 
-        fnName = 'fn%d'%(FT.index,)
+        fnName = "fn%d" % (FT.index,)
         if self.outputHeader:
-            print('%s %s(%s);'%(retvalTypeName, fnName, args), file=self.outputHeader)
+            print("%s %s(%s);" % (retvalTypeName, fnName, args), file=self.outputHeader)
         elif self.outputTests:
-            print('%s %s(%s);'%(retvalTypeName, fnName, args), file=self.outputTests)
-            
-        print('%s %s(%s)'%(retvalTypeName, fnName, args), end=' ', file=self.output)
+            print("%s %s(%s);" % (retvalTypeName, fnName, args), file=self.outputTests)
+
+        print("%s %s(%s)" % (retvalTypeName, fnName, args), end=" ", file=self.output)
         if self.writeBody:
-            print('{', file=self.output)
-            
-            for i,t in enumerate(FT.argTypes):
-                self.printValueOfType('    %s'%fnName, 'arg%d'%i, t)
+            print("{", file=self.output)
+
+            for i, t in enumerate(FT.argTypes):
+                self.printValueOfType("    %s" % fnName, "arg%d" % i, t)
 
             if retvalName is not None:
-                print('  return %s;'%(retvalName,), file=self.output)
-            print('}', file=self.output)
+                print("  return %s;" % (retvalName,), file=self.output)
+            print("}", file=self.output)
         else:
-            print('{}', file=self.output)
+            print("{}", file=self.output)
         print(file=self.output)
 
         if self.outputDriver:
-            print('  if (index == -1 || index == %d) {' % i, file=self.outputDriver)
-            print('    extern void test_%s(void);' % fnName, file=self.outputDriver)
-            print('    test_%s();' % fnName, file=self.outputDriver)
-            print('   }', file=self.outputDriver)
-            
+            print("  if (index == -1 || index == %d) {" % i, file=self.outputDriver)
+            print("    extern void test_%s(void);" % fnName, file=self.outputDriver)
+            print("    test_%s();" % fnName, file=self.outputDriver)
+            print("   }", file=self.outputDriver)
+
         if self.outputTests:
             if self.outputHeader:
-                print('void test_%s(void);'%(fnName,), file=self.outputHeader)
+                print("void test_%s(void);" % (fnName,), file=self.outputHeader)
 
             if retvalName is None:
                 retvalTests = None
             else:
                 retvalTests = self.getTestValuesArray(FT.returnType)
             tests = [self.getTestValuesArray(ty) for ty in FT.argTypes]
-            print('void test_%s(void) {'%(fnName,), file=self.outputTests)
+            print("void test_%s(void) {" % (fnName,), file=self.outputTests)
 
             if retvalTests is not None:
-                print('  printf("%s: testing return.\\n");'%(fnName,), file=self.outputTests)
-                print('  for (int i=0; i<%d; ++i) {'%(retvalTests[1],), file=self.outputTests)
-                args = ', '.join(['%s[%d]'%(t,randrange(l)) for t,l in tests])
-                print('    %s RV;'%(retvalTypeName,), file=self.outputTests)
-                print('    %s = %s[i];'%(retvalName, retvalTests[0]), file=self.outputTests)
-                print('    RV = %s(%s);'%(fnName, args), file=self.outputTests)
-                self.printValueOfType('  %s_RV'%fnName, 'RV', FT.returnType, output=self.outputTests, indent=4)
-                self.checkTypeValues('RV', '%s[i]' % retvalTests[0], FT.returnType, output=self.outputTests, indent=4)
-                print('  }', file=self.outputTests)
-            
+                print(
+                    '  printf("%s: testing return.\\n");' % (fnName,),
+                    file=self.outputTests,
+                )
+                print(
+                    "  for (int i=0; i<%d; ++i) {" % (retvalTests[1],),
+                    file=self.outputTests,
+                )
+                args = ", ".join(["%s[%d]" % (t, randrange(l)) for t, l in tests])
+                print("    %s RV;" % (retvalTypeName,), file=self.outputTests)
+                print(
+                    "    %s = %s[i];" % (retvalName, retvalTests[0]),
+                    file=self.outputTests,
+                )
+                print("    RV = %s(%s);" % (fnName, args), file=self.outputTests)
+                self.printValueOfType(
+                    "  %s_RV" % fnName,
+                    "RV",
+                    FT.returnType,
+                    output=self.outputTests,
+                    indent=4,
+                )
+                self.checkTypeValues(
+                    "RV",
+                    "%s[i]" % retvalTests[0],
+                    FT.returnType,
+                    output=self.outputTests,
+                    indent=4,
+                )
+                print("  }", file=self.outputTests)
+
             if tests:
-                print('  printf("%s: testing arguments.\\n");'%(fnName,), file=self.outputTests)
-            for i,(array,length) in enumerate(tests):
+                print(
+                    '  printf("%s: testing arguments.\\n");' % (fnName,),
+                    file=self.outputTests,
+                )
+            for i, (array, length) in enumerate(tests):
                 for j in range(length):
-                    args = ['%s[%d]'%(t,randrange(l)) for t,l in tests]
-                    args[i] = '%s[%d]'%(array,j)
-                    print('  %s(%s);'%(fnName, ', '.join(args),), file=self.outputTests)
-            print('}', file=self.outputTests)
+                    args = ["%s[%d]" % (t, randrange(l)) for t, l in tests]
+                    args[i] = "%s[%d]" % (array, j)
+                    print(
+                        "  %s(%s);"
+                        % (
+                            fnName,
+                            ", ".join(args),
+                        ),
+                        file=self.outputTests,
+                    )
+            print("}", file=self.outputTests)
 
     def getTestReturnValue(self, type):
-        typeName = self.getTypeName(type)        
+        typeName = self.getTypeName(type)
         info = self.testReturnValues.get(typeName)
         if info is None:
-            name = '%s_retval'%(typeName.replace(' ','_').replace('*','star'),)
-            print('%s %s;'%(typeName,name), file=self.output)
+            name = "%s_retval" % (typeName.replace(" ", "_").replace("*", "star"),)
+            print("%s %s;" % (typeName, name), file=self.output)
             if self.outputHeader:
-                print('extern %s %s;'%(typeName,name), file=self.outputHeader)
-            elif self.outputTests:                
-                print('extern %s %s;'%(typeName,name), file=self.outputTests)
+                print("extern %s %s;" % (typeName, name), file=self.outputHeader)
+            elif self.outputTests:
+                print("extern %s %s;" % (typeName, name), file=self.outputTests)
             info = self.testReturnValues[typeName] = name
         return info
 
     def getTestValuesArray(self, type):
-        typeName = self.getTypeName(type)        
+        typeName = self.getTypeName(type)
         info = self.testValues.get(typeName)
         if info is None:
-            name = '%s_values'%(typeName.replace(' ','_').replace('*','star'),)
-            print('static %s %s[] = {'%(typeName,name), file=self.outputTests)
+            name = "%s_values" % (typeName.replace(" ", "_").replace("*", "star"),)
+            print("static %s %s[] = {" % (typeName, name), file=self.outputTests)
             length = 0
             for item in self.getTestValues(type):
-                print('\t%s,'%(item,), file=self.outputTests)
+                print("\t%s," % (item,), file=self.outputTests)
                 length += 1
-            print('};', file=self.outputTests)
-            info = self.testValues[typeName] = (name,length)
+            print("};", file=self.outputTests)
+            info = self.testValues[typeName] = (name, length)
         return info
 
     def getTestValues(self, t):
         if isinstance(t, BuiltinType):
-            if t.name=='float':
-                for i in ['0.0','-1.0','1.0']:
-                    yield i+'f'
-            elif t.name=='double':
-                for i in ['0.0','-1.0','1.0']:
+            if t.name == "float":
+                for i in ["0.0", "-1.0", "1.0"]:
+                    yield i + "f"
+            elif t.name == "double":
+                for i in ["0.0", "-1.0", "1.0"]:
                     yield i
-            elif t.name in ('void *'):
-                yield '(void*) 0'
-                yield '(void*) -1'
+            elif t.name in ("void *"):
+                yield "(void*) 0"
+                yield "(void*) -1"
             else:
-                yield '(%s) 0'%(t.name,)
-                yield '(%s) -1'%(t.name,)
-                yield '(%s) 1'%(t.name,)
+                yield "(%s) 0" % (t.name,)
+                yield "(%s) -1" % (t.name,)
+                yield "(%s) 1" % (t.name,)
         elif isinstance(t, EnumType):
             for i in range(0, len(t.enumerators)):
-                yield 'enum%dval%d_%d' % (t.index, i, t.unique_id)
+                yield "enum%dval%d_%d" % (t.index, i, t.unique_id)
         elif isinstance(t, RecordType):
-            nonPadding = [f for f in t.fields 
-                          if not f.isPaddingBitField()]
+            nonPadding = [f for f in t.fields if not f.isPaddingBitField()]
 
             if not nonPadding:
-                yield '{ }'
+                yield "{ }"
                 return
 
             # FIXME: Use designated initializers to access non-first
             # fields of unions.
             if t.isUnion:
                 for v in self.getTestValues(nonPadding[0]):
-                    yield '{ %s }' % v
+                    yield "{ %s }" % v
                 return
 
             fieldValues = [list(v) for v in map(self.getTestValues, nonPadding)]
-            for i,values in enumerate(fieldValues):
+            for i, values in enumerate(fieldValues):
                 for v in values:
                     elements = [random.choice(fv) for fv in fieldValues]
                     elements[i] = v
-                    yield '{ %s }'%(', '.join(elements))
+                    yield "{ %s }" % (", ".join(elements))
 
         elif isinstance(t, ComplexType):
             for t in self.getTestValues(t.elementType):
-                yield '%s + %s * 1i'%(t,t)
+                yield "%s + %s * 1i" % (t, t)
         elif isinstance(t, ArrayType):
             values = list(self.getTestValues(t.elementType))
             if not values:
-                yield '{ }'
+                yield "{ }"
             for i in range(t.numElements):
                 for v in values:
                     elements = [random.choice(values) for i in range(t.numElements)]
                     elements[i] = v
-                    yield '{ %s }'%(', '.join(elements))
+                    yield "{ %s }" % (", ".join(elements))
         else:
-            raise NotImplementedError('Cannot make tests values of type: "%s"'%(t,))
+            raise NotImplementedError('Cannot make tests values of type: "%s"' % (t,))
 
     def printSizeOfType(self, prefix, name, t, output=None, indent=2):
-        print('%*sprintf("%s: sizeof(%s) = %%ld\\n", (long)sizeof(%s));'%(indent, '', prefix, name, name), file=output) 
+        print(
+            '%*sprintf("%s: sizeof(%s) = %%ld\\n", (long)sizeof(%s));'
+            % (indent, "", prefix, name, name),
+            file=output,
+        )
+
     def printAlignOfType(self, prefix, name, t, output=None, indent=2):
-        print('%*sprintf("%s: __alignof__(%s) = %%ld\\n", (long)__alignof__(%s));'%(indent, '', prefix, name, name), file=output) 
+        print(
+            '%*sprintf("%s: __alignof__(%s) = %%ld\\n", (long)__alignof__(%s));'
+            % (indent, "", prefix, name, name),
+            file=output,
+        )
+
     def printOffsetsOfType(self, prefix, name, t, output=None, indent=2):
         if isinstance(t, RecordType):
-            for i,f in enumerate(t.fields):
+            for i, f in enumerate(t.fields):
                 if f.isBitField():
                     continue
-                fname = 'field%d' % i
-                print('%*sprintf("%s: __builtin_offsetof(%s, %s) = %%ld\\n", (long)__builtin_offsetof(%s, %s));'%(indent, '', prefix, name, fname, name, fname), file=output) 
-                
+                fname = "field%d" % i
+                print(
+                    '%*sprintf("%s: __builtin_offsetof(%s, %s) = %%ld\\n", (long)__builtin_offsetof(%s, %s));'
+                    % (indent, "", prefix, name, fname, name, fname),
+                    file=output,
+                )
+
     def printValueOfType(self, prefix, name, t, output=None, indent=2):
         if output is None:
             output = self.output
         if isinstance(t, BuiltinType):
             value_expr = name
-            if t.name.split(' ')[-1] == '_Bool':
+            if t.name.split(" ")[-1] == "_Bool":
                 # Hack to work around PR5579.
                 value_expr = "%s ? 2 : 0" % name
 
-            if t.name.endswith('long long'):
-                code = 'lld'
-            elif t.name.endswith('long'):
-                code = 'ld'
-            elif t.name.split(' ')[-1] in ('_Bool','char','short',
-                                           'int','unsigned'):
-                code = 'd'
-            elif t.name in ('float','double'):
-                code = 'f'
-            elif t.name == 'long double':
-                code = 'Lf'
+            if t.name.endswith("long long"):
+                code = "lld"
+            elif t.name.endswith("long"):
+                code = "ld"
+            elif t.name.split(" ")[-1] in ("_Bool", "char", "short", "int", "unsigned"):
+                code = "d"
+            elif t.name in ("float", "double"):
+                code = "f"
+            elif t.name == "long double":
+                code = "Lf"
             else:
-                code = 'p'
-            print('%*sprintf("%s: %s = %%%s\\n", %s);'%(
-                indent, '', prefix, name, code, value_expr), file=output)
+                code = "p"
+            print(
+                '%*sprintf("%s: %s = %%%s\\n", %s);'
+                % (indent, "", prefix, name, code, value_expr),
+                file=output,
+            )
         elif isinstance(t, EnumType):
-            print('%*sprintf("%s: %s = %%d\\n", %s);'%(indent, '', prefix, name, name), file=output)
+            print(
+                '%*sprintf("%s: %s = %%d\\n", %s);' % (indent, "", prefix, name, name),
+                file=output,
+            )
         elif isinstance(t, RecordType):
             if not t.fields:
-                print('%*sprintf("%s: %s (empty)\\n");'%(indent, '', prefix, name), file=output) 
-            for i,f in enumerate(t.fields):
+                print(
+                    '%*sprintf("%s: %s (empty)\\n");' % (indent, "", prefix, name),
+                    file=output,
+                )
+            for i, f in enumerate(t.fields):
                 if f.isPaddingBitField():
                     continue
-                fname = '%s.field%d'%(name,i)
+                fname = "%s.field%d" % (name, i)
                 self.printValueOfType(prefix, fname, f, output=output, indent=indent)
         elif isinstance(t, ComplexType):
-            self.printValueOfType(prefix, '(__real %s)'%name, t.elementType, output=output,indent=indent)
-            self.printValueOfType(prefix, '(__imag %s)'%name, t.elementType, output=output,indent=indent)
+            self.printValueOfType(
+                prefix,
+                "(__real %s)" % name,
+                t.elementType,
+                output=output,
+                indent=indent,
+            )
+            self.printValueOfType(
+                prefix,
+                "(__imag %s)" % name,
+                t.elementType,
+                output=output,
+                indent=indent,
+            )
         elif isinstance(t, ArrayType):
             for i in range(t.numElements):
                 # Access in this fashion as a hackish way to portably
                 # access vectors.
                 if t.isVector:
-                    self.printValueOfType(prefix, '((%s*) &%s)[%d]'%(t.elementType,name,i), t.elementType, output=output,indent=indent)
+                    self.printValueOfType(
+                        prefix,
+                        "((%s*) &%s)[%d]" % (t.elementType, name, i),
+                        t.elementType,
+                        output=output,
+                        indent=indent,
+                    )
                 else:
-                    self.printValueOfType(prefix, '%s[%d]'%(name,i), t.elementType, output=output,indent=indent)                    
+                    self.printValueOfType(
+                        prefix,
+                        "%s[%d]" % (name, i),
+                        t.elementType,
+                        output=output,
+                        indent=indent,
+                    )
         else:
-            raise NotImplementedError('Cannot print value of type: "%s"'%(t,))
+            raise NotImplementedError('Cannot print value of type: "%s"' % (t,))
 
     def checkTypeValues(self, nameLHS, nameRHS, t, output=None, indent=2):
-        prefix = 'foo'
+        prefix = "foo"
         if output is None:
             output = self.output
         if isinstance(t, BuiltinType):
-            print('%*sassert(%s == %s);' % (indent, '', nameLHS, nameRHS), file=output)
+            print("%*sassert(%s == %s);" % (indent, "", nameLHS, nameRHS), file=output)
         elif isinstance(t, EnumType):
-            print('%*sassert(%s == %s);' % (indent, '', nameLHS, nameRHS), file=output)
+            print("%*sassert(%s == %s);" % (indent, "", nameLHS, nameRHS), file=output)
         elif isinstance(t, RecordType):
-            for i,f in enumerate(t.fields):
+            for i, f in enumerate(t.fields):
                 if f.isPaddingBitField():
                     continue
-                self.checkTypeValues('%s.field%d'%(nameLHS,i), '%s.field%d'%(nameRHS,i), 
-                                     f, output=output, indent=indent)
+                self.checkTypeValues(
+                    "%s.field%d" % (nameLHS, i),
+                    "%s.field%d" % (nameRHS, i),
+                    f,
+                    output=output,
+                    indent=indent,
+                )
                 if t.isUnion:
                     break
         elif isinstance(t, ComplexType):
-            self.checkTypeValues('(__real %s)'%nameLHS, '(__real %s)'%nameRHS, t.elementType, output=output,indent=indent)
-            self.checkTypeValues('(__imag %s)'%nameLHS, '(__imag %s)'%nameRHS, t.elementType, output=output,indent=indent)
+            self.checkTypeValues(
+                "(__real %s)" % nameLHS,
+                "(__real %s)" % nameRHS,
+                t.elementType,
+                output=output,
+                indent=indent,
+            )
+            self.checkTypeValues(
+                "(__imag %s)" % nameLHS,
+                "(__imag %s)" % nameRHS,
+                t.elementType,
+                output=output,
+                indent=indent,
+            )
         elif isinstance(t, ArrayType):
             for i in range(t.numElements):
                 # Access in this fashion as a hackish way to portably
                 # access vectors.
                 if t.isVector:
-                    self.checkTypeValues('((%s*) &%s)[%d]'%(t.elementType,nameLHS,i), 
-                                         '((%s*) &%s)[%d]'%(t.elementType,nameRHS,i), 
-                                         t.elementType, output=output,indent=indent)
+                    self.checkTypeValues(
+                        "((%s*) &%s)[%d]" % (t.elementType, nameLHS, i),
+                        "((%s*) &%s)[%d]" % (t.elementType, nameRHS, i),
+                        t.elementType,
+                        output=output,
+                        indent=indent,
+                    )
                 else:
-                    self.checkTypeValues('%s[%d]'%(nameLHS,i), '%s[%d]'%(nameRHS,i), 
-                                         t.elementType, output=output,indent=indent)                    
+                    self.checkTypeValues(
+                        "%s[%d]" % (nameLHS, i),
+                        "%s[%d]" % (nameRHS, i),
+                        t.elementType,
+                        output=output,
+                        indent=indent,
+                    )
         else:
-            raise NotImplementedError('Cannot print value of type: "%s"'%(t,))
+            raise NotImplementedError('Cannot print value of type: "%s"' % (t,))
+
 
 import sys
 
+
 def main():
     from optparse import OptionParser, OptionGroup
+
     parser = OptionParser("%prog [options] {indices}")
-    parser.add_option("", "--mode", dest="mode",
-                      help="autogeneration mode (random or linear) [default %default]",
-                      type='choice', choices=('random','linear'), default='linear')
-    parser.add_option("", "--count", dest="count",
-                      help="autogenerate COUNT functions according to MODE",
-                      type=int, default=0)
-    parser.add_option("", "--min", dest="minIndex", metavar="N",
-                      help="start autogeneration with the Nth function type  [default %default]",
-                      type=int, default=0)
-    parser.add_option("", "--max", dest="maxIndex", metavar="N",
-                      help="maximum index for random autogeneration  [default %default]",
-                      type=int, default=10000000)
-    parser.add_option("", "--seed", dest="seed",
-                      help="random number generator seed [default %default]",
-                      type=int, default=1)
-    parser.add_option("", "--use-random-seed", dest="useRandomSeed",
-                      help="use random value for initial random number generator seed",
-                      action='store_true', default=False)
-    parser.add_option("", "--skip", dest="skipTests",
-                      help="add a test index to skip",
-                      type=int, action='append', default=[])
-    parser.add_option("-o", "--output", dest="output", metavar="FILE",
-                      help="write output to FILE  [default %default]",
-                      type=str, default='-')
-    parser.add_option("-O", "--output-header", dest="outputHeader", metavar="FILE",
-                      help="write header file for output to FILE  [default %default]",
-                      type=str, default=None)
-    parser.add_option("-T", "--output-tests", dest="outputTests", metavar="FILE",
-                      help="write function tests to FILE  [default %default]",
-                      type=str, default=None)
-    parser.add_option("-D", "--output-driver", dest="outputDriver", metavar="FILE",
-                      help="write test driver to FILE  [default %default]",
-                      type=str, default=None)
-    parser.add_option("", "--test-layout", dest="testLayout", metavar="FILE",
-                      help="test structure layout",
-                      action='store_true', default=False)
+    parser.add_option(
+        "",
+        "--mode",
+        dest="mode",
+        help="autogeneration mode (random or linear) [default %default]",
+        type="choice",
+        choices=("random", "linear"),
+        default="linear",
+    )
+    parser.add_option(
+        "",
+        "--count",
+        dest="count",
+        help="autogenerate COUNT functions according to MODE",
+        type=int,
+        default=0,
+    )
+    parser.add_option(
+        "",
+        "--min",
+        dest="minIndex",
+        metavar="N",
+        help="start autogeneration with the Nth function type  [default %default]",
+        type=int,
+        default=0,
+    )
+    parser.add_option(
+        "",
+        "--max",
+        dest="maxIndex",
+        metavar="N",
+        help="maximum index for random autogeneration  [default %default]",
+        type=int,
+        default=10000000,
+    )
+    parser.add_option(
+        "",
+        "--seed",
+        dest="seed",
+        help="random number generator seed [default %default]",
+        type=int,
+        default=1,
+    )
+    parser.add_option(
+        "",
+        "--use-random-seed",
+        dest="useRandomSeed",
+        help="use random value for initial random number generator seed",
+        action="store_true",
+        default=False,
+    )
+    parser.add_option(
+        "",
+        "--skip",
+        dest="skipTests",
+        help="add a test index to skip",
+        type=int,
+        action="append",
+        default=[],
+    )
+    parser.add_option(
+        "-o",
+        "--output",
+        dest="output",
+        metavar="FILE",
+        help="write output to FILE  [default %default]",
+        type=str,
+        default="-",
+    )
+    parser.add_option(
+        "-O",
+        "--output-header",
+        dest="outputHeader",
+        metavar="FILE",
+        help="write header file for output to FILE  [default %default]",
+        type=str,
+        default=None,
+    )
+    parser.add_option(
+        "-T",
+        "--output-tests",
+        dest="outputTests",
+        metavar="FILE",
+        help="write function tests to FILE  [default %default]",
+        type=str,
+        default=None,
+    )
+    parser.add_option(
+        "-D",
+        "--output-driver",
+        dest="outputDriver",
+        metavar="FILE",
+        help="write test driver to FILE  [default %default]",
+        type=str,
+        default=None,
+    )
+    parser.add_option(
+        "",
+        "--test-layout",
+        dest="testLayout",
+        metavar="FILE",
+        help="test structure layout",
+        action="store_true",
+        default=False,
+    )
 
     group = OptionGroup(parser, "Type Enumeration Options")
     # Builtins - Ints
-    group.add_option("", "--no-char", dest="useChar",
-                     help="do not generate char types",
-                     action="store_false", default=True)
-    group.add_option("", "--no-short", dest="useShort",
-                     help="do not generate short types",
-                     action="store_false", default=True)
-    group.add_option("", "--no-int", dest="useInt",
-                     help="do not generate int types",
-                     action="store_false", default=True)
-    group.add_option("", "--no-long", dest="useLong",
-                     help="do not generate long types",
-                     action="store_false", default=True)
-    group.add_option("", "--no-long-long", dest="useLongLong",
-                     help="do not generate long long types",
-                     action="store_false", default=True)
-    group.add_option("", "--no-unsigned", dest="useUnsigned",
-                     help="do not generate unsigned integer types",
-                     action="store_false", default=True)
+    group.add_option(
+        "",
+        "--no-char",
+        dest="useChar",
+        help="do not generate char types",
+        action="store_false",
+        default=True,
+    )
+    group.add_option(
+        "",
+        "--no-short",
+        dest="useShort",
+        help="do not generate short types",
+        action="store_false",
+        default=True,
+    )
+    group.add_option(
+        "",
+        "--no-int",
+        dest="useInt",
+        help="do not generate int types",
+        action="store_false",
+        default=True,
+    )
+    group.add_option(
+        "",
+        "--no-long",
+        dest="useLong",
+        help="do not generate long types",
+        action="store_false",
+        default=True,
+    )
+    group.add_option(
+        "",
+        "--no-long-long",
+        dest="useLongLong",
+        help="do not generate long long types",
+        action="store_false",
+        default=True,
+    )
+    group.add_option(
+        "",
+        "--no-unsigned",
+        dest="useUnsigned",
+        help="do not generate unsigned integer types",
+        action="store_false",
+        default=True,
+    )
 
     # Other builtins
-    group.add_option("", "--no-bool", dest="useBool",
-                     help="do not generate bool types",
-                     action="store_false", default=True)
-    group.add_option("", "--no-float", dest="useFloat",
-                     help="do not generate float types",
-                     action="store_false", default=True)
-    group.add_option("", "--no-double", dest="useDouble",
-                     help="do not generate double types",
-                     action="store_false", default=True)
-    group.add_option("", "--no-long-double", dest="useLongDouble",
-                     help="do not generate long double types",
-                     action="store_false", default=True)
-    group.add_option("", "--no-void-pointer", dest="useVoidPointer",
-                     help="do not generate void* types",
-                     action="store_false", default=True)
+    group.add_option(
+        "",
+        "--no-bool",
+        dest="useBool",
+        help="do not generate bool types",
+        action="store_false",
+        default=True,
+    )
+    group.add_option(
+        "",
+        "--no-float",
+        dest="useFloat",
+        help="do not generate float types",
+        action="store_false",
+        default=True,
+    )
+    group.add_option(
+        "",
+        "--no-double",
+        dest="useDouble",
+        help="do not generate double types",
+        action="store_false",
+        default=True,
+    )
+    group.add_option(
+        "",
+        "--no-long-double",
+        dest="useLongDouble",
+        help="do not generate long double types",
+        action="store_false",
+        default=True,
+    )
+    group.add_option(
+        "",
+        "--no-void-pointer",
+        dest="useVoidPointer",
+        help="do not generate void* types",
+        action="store_false",
+        default=True,
+    )
 
     # Enumerations
-    group.add_option("", "--no-enums", dest="useEnum",
-                     help="do not generate enum types",
-                     action="store_false", default=True)
+    group.add_option(
+        "",
+        "--no-enums",
+        dest="useEnum",
+        help="do not generate enum types",
+        action="store_false",
+        default=True,
+    )
 
     # Derived types
-    group.add_option("", "--no-array", dest="useArray",
-                     help="do not generate record types",
-                     action="store_false", default=True)
-    group.add_option("", "--no-complex", dest="useComplex",
-                     help="do not generate complex types",
-                     action="store_false", default=True)
-    group.add_option("", "--no-record", dest="useRecord",
-                     help="do not generate record types",
-                     action="store_false", default=True)
-    group.add_option("", "--no-union", dest="recordUseUnion",
-                     help="do not generate union types",
-                     action="store_false", default=True)
-    group.add_option("", "--no-vector", dest="useVector",
-                     help="do not generate vector types",
-                     action="store_false", default=True)
-    group.add_option("", "--no-bit-field", dest="useBitField",
-                     help="do not generate bit-field record members",
-                     action="store_false", default=True)
-    group.add_option("", "--no-builtins", dest="useBuiltins",
-                     help="do not use any types",
-                     action="store_false", default=True)
-
-    # Tuning 
-    group.add_option("", "--no-function-return", dest="functionUseReturn",
-                     help="do not generate return types for functions",
-                     action="store_false", default=True)
-    group.add_option("", "--vector-types", dest="vectorTypes",
-                     help="comma separated list of vector types (e.g., v2i32) [default %default]",
-                     action="store", type=str, default='v2i16, v1i64, v2i32, v4i16, v8i8, v2f32, v2i64, v4i32, v8i16, v16i8, v2f64, v4f32, v16f32', metavar="N")
-    group.add_option("", "--bit-fields", dest="bitFields",
-                     help="comma separated list 'type:width' bit-field specifiers [default %default]",
-                     action="store", type=str, default=(
-            "char:0,char:4,int:0,unsigned:1,int:1,int:4,int:13,int:24"))
-    group.add_option("", "--max-args", dest="functionMaxArgs",
-                     help="maximum number of arguments per function [default %default]",
-                     action="store", type=int, default=4, metavar="N")
-    group.add_option("", "--max-array", dest="arrayMaxSize",
-                     help="maximum array size [default %default]",
-                     action="store", type=int, default=4, metavar="N")
-    group.add_option("", "--max-record", dest="recordMaxSize",
-                     help="maximum number of fields per record [default %default]",
-                     action="store", type=int, default=4, metavar="N")
-    group.add_option("", "--max-record-depth", dest="recordMaxDepth",
-                     help="maximum nested structure depth [default %default]",
-                     action="store", type=int, default=None, metavar="N")
+    group.add_option(
+        "",
+        "--no-array",
+        dest="useArray",
+        help="do not generate record types",
+        action="store_false",
+        default=True,
+    )
+    group.add_option(
+        "",
+        "--no-complex",
+        dest="useComplex",
+        help="do not generate complex types",
+        action="store_false",
+        default=True,
+    )
+    group.add_option(
+        "",
+        "--no-record",
+        dest="useRecord",
+        help="do not generate record types",
+        action="store_false",
+        default=True,
+    )
+    group.add_option(
+        "",
+        "--no-union",
+        dest="recordUseUnion",
+        help="do not generate union types",
+        action="store_false",
+        default=True,
+    )
+    group.add_option(
+        "",
+        "--no-vector",
+        dest="useVector",
+        help="do not generate vector types",
+        action="store_false",
+        default=True,
+    )
+    group.add_option(
+        "",
+        "--no-bit-field",
+        dest="useBitField",
+        help="do not generate bit-field record members",
+        action="store_false",
+        default=True,
+    )
+    group.add_option(
+        "",
+        "--no-builtins",
+        dest="useBuiltins",
+        help="do not use any types",
+        action="store_false",
+        default=True,
+    )
+
+    # Tuning
+    group.add_option(
+        "",
+        "--no-function-return",
+        dest="functionUseReturn",
+        help="do not generate return types for functions",
+        action="store_false",
+        default=True,
+    )
+    group.add_option(
+        "",
+        "--vector-types",
+        dest="vectorTypes",
+        help="comma separated list of vector types (e.g., v2i32) [default %default]",
+        action="store",
+        type=str,
+        default="v2i16, v1i64, v2i32, v4i16, v8i8, v2f32, v2i64, v4i32, v8i16, v16i8, v2f64, v4f32, v16f32",
+        metavar="N",
+    )
+    group.add_option(
+        "",
+        "--bit-fields",
+        dest="bitFields",
+        help="comma separated list 'type:width' bit-field specifiers [default %default]",
+        action="store",
+        type=str,
+        default=("char:0,char:4,int:0,unsigned:1,int:1,int:4,int:13,int:24"),
+    )
+    group.add_option(
+        "",
+        "--max-args",
+        dest="functionMaxArgs",
+        help="maximum number of arguments per function [default %default]",
+        action="store",
+        type=int,
+        default=4,
+        metavar="N",
+    )
+    group.add_option(
+        "",
+        "--max-array",
+        dest="arrayMaxSize",
+        help="maximum array size [default %default]",
+        action="store",
+        type=int,
+        default=4,
+        metavar="N",
+    )
+    group.add_option(
+        "",
+        "--max-record",
+        dest="recordMaxSize",
+        help="maximum number of fields per record [default %default]",
+        action="store",
+        type=int,
+        default=4,
+        metavar="N",
+    )
+    group.add_option(
+        "",
+        "--max-record-depth",
+        dest="recordMaxDepth",
+        help="maximum nested structure depth [default %default]",
+        action="store",
+        type=int,
+        default=None,
+        metavar="N",
+    )
     parser.add_option_group(group)
     (opts, args) = parser.parse_args()
 
@@ -487,82 +815,98 @@ def main():
     builtins = []
     if opts.useBuiltins:
         ints = []
-        if opts.useChar: ints.append(('char',1))
-        if opts.useShort: ints.append(('short',2))
-        if opts.useInt: ints.append(('int',4))
+        if opts.useChar:
+            ints.append(("char", 1))
+        if opts.useShort:
+            ints.append(("short", 2))
+        if opts.useInt:
+            ints.append(("int", 4))
         # FIXME: Wrong size.
-        if opts.useLong: ints.append(('long',4))
-        if opts.useLongLong: ints.append(('long long',8))
-        if opts.useUnsigned: 
-            ints = ([('unsigned %s'%i,s) for i,s in ints] + 
-                    [('signed %s'%i,s) for i,s in ints])
+        if opts.useLong:
+            ints.append(("long", 4))
+        if opts.useLongLong:
+            ints.append(("long long", 8))
+        if opts.useUnsigned:
+            ints = [("unsigned %s" % i, s) for i, s in ints] + [
+                ("signed %s" % i, s) for i, s in ints
+            ]
         builtins.extend(ints)
 
-        if opts.useBool: builtins.append(('_Bool',1))
-        if opts.useFloat: builtins.append(('float',4))
-        if opts.useDouble: builtins.append(('double',8))
-        if opts.useLongDouble: builtins.append(('long double',16))
+        if opts.useBool:
+            builtins.append(("_Bool", 1))
+        if opts.useFloat:
+            builtins.append(("float", 4))
+        if opts.useDouble:
+            builtins.append(("double", 8))
+        if opts.useLongDouble:
+            builtins.append(("long double", 16))
         # FIXME: Wrong size.
-        if opts.useVoidPointer:  builtins.append(('void*',4))
+        if opts.useVoidPointer:
+            builtins.append(("void*", 4))
 
-    btg = FixedTypeGenerator([BuiltinType(n,s) for n,s in builtins])
+    btg = FixedTypeGenerator([BuiltinType(n, s) for n, s in builtins])
 
     bitfields = []
-    for specifier in opts.bitFields.split(','):
+    for specifier in opts.bitFields.split(","):
         if not specifier.strip():
             continue
-        name,width = specifier.strip().split(':', 1)
-        bitfields.append(BuiltinType(name,None,int(width)))
+        name, width = specifier.strip().split(":", 1)
+        bitfields.append(BuiltinType(name, None, int(width)))
     bftg = FixedTypeGenerator(bitfields)
 
-    charType = BuiltinType('char',1)
-    shortType = BuiltinType('short',2)
-    intType = BuiltinType('int',4)
-    longlongType = BuiltinType('long long',8)
-    floatType = BuiltinType('float',4)
-    doubleType = BuiltinType('double',8)
+    charType = BuiltinType("char", 1)
+    shortType = BuiltinType("short", 2)
+    intType = BuiltinType("int", 4)
+    longlongType = BuiltinType("long long", 8)
+    floatType = BuiltinType("float", 4)
+    doubleType = BuiltinType("double", 8)
     sbtg = FixedTypeGenerator([charType, intType, floatType, doubleType])
 
     atg = AnyTypeGenerator()
     artg = AnyTypeGenerator()
+
     def makeGenerator(atg, subgen, subfieldgen, useRecord, useArray, useBitField):
         atg.addGenerator(btg)
         if useBitField and opts.useBitField:
             atg.addGenerator(bftg)
         if useRecord and opts.useRecord:
-            assert subgen 
-            atg.addGenerator(RecordTypeGenerator(subfieldgen, opts.recordUseUnion, 
-                                                 opts.recordMaxSize))
+            assert subgen
+            atg.addGenerator(
+                RecordTypeGenerator(
+                    subfieldgen, opts.recordUseUnion, opts.recordMaxSize
+                )
+            )
         if opts.useComplex:
             # FIXME: Allow overriding builtins here
             atg.addGenerator(ComplexTypeGenerator(sbtg))
         if useArray and opts.useArray:
-            assert subgen 
+            assert subgen
             atg.addGenerator(ArrayTypeGenerator(subgen, opts.arrayMaxSize))
         if opts.useVector:
             vTypes = []
-            for i,t in enumerate(opts.vectorTypes.split(',')):
-                m = re.match('v([1-9][0-9]*)([if][1-9][0-9]*)', t.strip())
+            for i, t in enumerate(opts.vectorTypes.split(",")):
+                m = re.match("v([1-9][0-9]*)([if][1-9][0-9]*)", t.strip())
                 if not m:
-                    parser.error('Invalid vector type: %r' % t)
-                count,kind = m.groups()
+                    parser.error("Invalid vector type: %r" % t)
+                count, kind = m.groups()
                 count = int(count)
-                type = { 'i8'  : charType, 
-                         'i16' : shortType, 
-                         'i32' : intType, 
-                         'i64' : longlongType,
-                         'f32' : floatType, 
-                         'f64' : doubleType,
-                         }.get(kind)
+                type = {
+                    "i8": charType,
+                    "i16": shortType,
+                    "i32": intType,
+                    "i64": longlongType,
+                    "f32": floatType,
+                    "f64": doubleType,
+                }.get(kind)
                 if not type:
-                    parser.error('Invalid vector type: %r' % t)
+                    parser.error("Invalid vector type: %r" % t)
                 vTypes.append(ArrayType(i, True, type, count * type.size))
-                
+
             atg.addGenerator(FixedTypeGenerator(vTypes))
         if opts.useEnum:
-            atg.addGenerator(EnumTypeGenerator([None, '-1', '1', '1u'], 1, 4))
+            atg.addGenerator(EnumTypeGenerator([None, "-1", "1", "1u"], 1, 4))
 
-    if opts.recordMaxDepth is None: 
+    if opts.recordMaxDepth is None:
         # Fully recursive, just avoid top-level arrays.
         subFTG = AnyTypeGenerator()
         subTG = AnyTypeGenerator()
@@ -599,53 +943,59 @@ def makeGenerator(atg, subgen, subfieldgen, useRecord, useArray, useBitField):
         else:
             opts.maxIndex = ftg.cardinality
     opts.maxIndex = min(opts.maxIndex, ftg.cardinality)
-    opts.minIndex = max(0,min(opts.maxIndex-1, opts.minIndex))
-    if not opts.mode=='random':
-        opts.count = min(opts.count, opts.maxIndex-opts.minIndex)
+    opts.minIndex = max(0, min(opts.maxIndex - 1, opts.minIndex))
+    if not opts.mode == "random":
+        opts.count = min(opts.count, opts.maxIndex - opts.minIndex)
 
-    if opts.output=='-':
+    if opts.output == "-":
         output = sys.stdout
     else:
-        output = open(opts.output,'w')
+        output = open(opts.output, "w")
         atexit.register(lambda: output.close())
-        
+
     outputHeader = None
     if opts.outputHeader:
-        outputHeader = open(opts.outputHeader,'w')
+        outputHeader = open(opts.outputHeader, "w")
         atexit.register(lambda: outputHeader.close())
-        
+
     outputTests = None
     if opts.outputTests:
-        outputTests = open(opts.outputTests,'w')
+        outputTests = open(opts.outputTests, "w")
         atexit.register(lambda: outputTests.close())
 
     outputDriver = None
     if opts.outputDriver:
-        outputDriver = open(opts.outputDriver,'w')
+        outputDriver = open(opts.outputDriver, "w")
         atexit.register(lambda: outputDriver.close())
 
-    info = ''
-    info += '// %s\n'%(' '.join(sys.argv),)
-    info += '// Generated: %s\n'%(time.strftime('%Y-%m-%d %H:%M'),)
-    info += '// Cardinality of function generator: %s\n'%(ftg.cardinality,)
-    info += '// Cardinality of type generator: %s\n'%(atg.cardinality,)
+    info = ""
+    info += "// %s\n" % (" ".join(sys.argv),)
+    info += "// Generated: %s\n" % (time.strftime("%Y-%m-%d %H:%M"),)
+    info += "// Cardinality of function generator: %s\n" % (ftg.cardinality,)
+    info += "// Cardinality of type generator: %s\n" % (atg.cardinality,)
 
     if opts.testLayout:
-        info += '\n#include <stdio.h>'
-    
-    P = TypePrinter(output, 
-                    outputHeader=outputHeader,
-                    outputTests=outputTests,
-                    outputDriver=outputDriver,
-                    headerName=opts.outputHeader,                    
-                    info=info)
+        info += "\n#include <stdio.h>"
+
+    P = TypePrinter(
+        output,
+        outputHeader=outputHeader,
+        outputTests=outputTests,
+        outputDriver=outputDriver,
+        headerName=opts.outputHeader,
+        info=info,
+    )
 
     def write(N):
         try:
             FT = ftg.get(N)
         except RuntimeError as e:
-            if e.args[0]=='maximum recursion depth exceeded':
-                print('WARNING: Skipped %d, recursion limit exceeded (bad arguments?)'%(N,), file=sys.stderr)
+            if e.args[0] == "maximum recursion depth exceeded":
+                print(
+                    "WARNING: Skipped %d, recursion limit exceeded (bad arguments?)"
+                    % (N,),
+                    file=sys.stderr,
+                )
                 return
             raise
         if opts.testLayout:
@@ -658,16 +1008,18 @@ def write(N):
 
     skipTests = set(opts.skipTests)
     for i in range(opts.count):
-        if opts.mode=='linear':
+        if opts.mode == "linear":
             index = opts.minIndex + i
         else:
-            index = opts.minIndex + int((opts.maxIndex-opts.minIndex) * random.random())
+            index = opts.minIndex + int(
+                (opts.maxIndex - opts.minIndex) * random.random()
+            )
         if index in skipTests:
             continue
         write(index)
 
     P.finish()
 
-if __name__=='__main__':
-    main()
 
+if __name__ == "__main__":
+    main()

diff  --git a/clang/utils/ABITest/Enumeration.py b/clang/utils/ABITest/Enumeration.py
index 24f5b5fba24cd..005b104a337de 100644
--- a/clang/utils/ABITest/Enumeration.py
+++ b/clang/utils/ABITest/Enumeration.py
@@ -1,38 +1,50 @@
 """Utilities for enumeration of finite and countably infinite sets.
 """
 from __future__ import absolute_import, division, print_function
+
 ###
 # Countable iteration
 
 # Simplifies some calculations
 class Aleph0(int):
     _singleton = None
+
     def __new__(type):
         if type._singleton is None:
             type._singleton = int.__new__(type)
         return type._singleton
-    def __repr__(self): return '<aleph0>'
-    def __str__(self): return 'inf'
-    
+
+    def __repr__(self):
+        return "<aleph0>"
+
+    def __str__(self):
+        return "inf"
+
     def __cmp__(self, b):
         return 1
 
     def __sub__(self, b):
         raise ValueError("Cannot subtract aleph0")
+
     __rsub__ = __sub__
 
-    def __add__(self, b): 
+    def __add__(self, b):
         return self
+
     __radd__ = __add__
 
-    def __mul__(self, b): 
-        if b == 0: return b            
+    def __mul__(self, b):
+        if b == 0:
+            return b
         return self
+
     __rmul__ = __mul__
 
     def __floordiv__(self, b):
-        if b == 0: raise ZeroDivisionError
+        if b == 0:
+            raise ZeroDivisionError
         return self
+
     __rfloordiv__ = __floordiv__
     __truediv__ = __floordiv__
     __rtuediv__ = __floordiv__
@@ -40,37 +52,43 @@ def __floordiv__(self, b):
     __rdiv__ = __floordiv__
 
     def __pow__(self, b):
-        if b == 0: return 1
+        if b == 0:
+            return 1
         return self
+
+
 aleph0 = Aleph0()
 
+
 def base(line):
-    return line*(line+1)//2
+    return line * (line + 1) // 2
+
 
 def pairToN(pair):
-    x,y = pair
-    line,index = x+y,y
-    return base(line)+index
+    x, y = pair
+    line, index = x + y, y
+    return base(line) + index
+
 
 def getNthPairInfo(N):
     # Avoid various singularities
-    if N==0:
-        return (0,0)
+    if N == 0:
+        return (0, 0)
 
     # Gallop to find bounds for line
     line = 1
     next = 2
-    while base(next)<=N:
+    while base(next) <= N:
         line = next
         next = line << 1
-    
+
     # Binary search for starting line
     lo = line
-    hi = line<<1
+    hi = line << 1
     while lo + 1 != hi:
-        #assert base(lo) <= N < base(hi)
-        mid = (lo + hi)>>1
-        if base(mid)<=N:
+        # assert base(lo) <= N < base(hi)
+        mid = (lo + hi) >> 1
+        if base(mid) <= N:
             lo = mid
         else:
             hi = mid
@@ -78,18 +96,20 @@ def getNthPairInfo(N):
     line = lo
     return line, N - base(line)
 
+
 def getNthPair(N):
-    line,index = getNthPairInfo(N)
+    line, index = getNthPairInfo(N)
     return (line - index, index)
 
-def getNthPairBounded(N,W=aleph0,H=aleph0,useDivmod=False):
+
+def getNthPairBounded(N, W=aleph0, H=aleph0, useDivmod=False):
     """getNthPairBounded(N, W, H) -> (x, y)
-    
+
     Return the N-th pair such that 0 <= x < W and 0 <= y < H."""
 
     if W <= 0 or H <= 0:
         raise ValueError("Invalid bounds")
-    elif N >= W*H:
+    elif N >= W * H:
         raise ValueError("Invalid input (out of bounds)")
 
     # Simple case...
@@ -98,16 +118,16 @@ def getNthPairBounded(N,W=aleph0,H=aleph0,useDivmod=False):
 
     # Otherwise simplify by assuming W < H
     if H < W:
-        x,y = getNthPairBounded(N,H,W,useDivmod=useDivmod)
-        return y,x
+        x, y = getNthPairBounded(N, H, W, useDivmod=useDivmod)
+        return y, x
 
     if useDivmod:
-        return N%W,N//W
+        return N % W, N // W
     else:
         # Conceptually we want to slide a diagonal line across a
         # rectangle. This gives more interesting results for large
         # bounds than using divmod.
-        
+
         # If in lower left, just return as usual
         cornerSize = base(W)
         if N < cornerSize:
@@ -115,21 +135,26 @@ def getNthPairBounded(N,W=aleph0,H=aleph0,useDivmod=False):
 
         # Otherwise if in upper right, subtract from corner
         if H is not aleph0:
-            M = W*H - N - 1
+            M = W * H - N - 1
             if M < cornerSize:
-                x,y = getNthPair(M)
-                return (W-1-x,H-1-y)
+                x, y = getNthPair(M)
+                return (W - 1 - x, H - 1 - y)
 
         # Otherwise, compile line and index from number of times we
         # wrap.
         N = N - cornerSize
-        index,offset = N%W,N//W
+        index, offset = N % W, N // W
         # p = (W-1, 1+offset) + (-1,1)*index
-        return (W-1-index, 1+offset+index)
-def getNthPairBoundedChecked(N,W=aleph0,H=aleph0,useDivmod=False,GNP=getNthPairBounded):
-    x,y = GNP(N,W,H,useDivmod)
+        return (W - 1 - index, 1 + offset + index)
+
+
+def getNthPairBoundedChecked(
+    N, W=aleph0, H=aleph0, useDivmod=False, GNP=getNthPairBounded
+):
+    x, y = GNP(N, W, H, useDivmod)
     assert 0 <= x < W and 0 <= y < H
-    return x,y
+    return x, y
+
 
 def getNthNTuple(N, W, H=aleph0, useLeftToRight=False):
     """getNthNTuple(N, W, H) -> (x_0, x_1, ..., x_W)
@@ -137,30 +162,36 @@ def getNthNTuple(N, W, H=aleph0, useLeftToRight=False):
     Return the N-th W-tuple, where for 0 <= x_i < H."""
 
     if useLeftToRight:
-        elts = [None]*W
+        elts = [None] * W
         for i in range(W):
-            elts[i],N = getNthPairBounded(N, H)
+            elts[i], N = getNthPairBounded(N, H)
         return tuple(elts)
     else:
-        if W==0:
+        if W == 0:
             return ()
-        elif W==1:
+        elif W == 1:
             return (N,)
-        elif W==2:
+        elif W == 2:
             return getNthPairBounded(N, H, H)
         else:
-            LW,RW = W//2, W - (W//2)
-            L,R = getNthPairBounded(N, H**LW, H**RW)
-            return (getNthNTuple(L,LW,H=H,useLeftToRight=useLeftToRight) + 
-                    getNthNTuple(R,RW,H=H,useLeftToRight=useLeftToRight))
+            LW, RW = W // 2, W - (W // 2)
+            L, R = getNthPairBounded(N, H**LW, H**RW)
+            return getNthNTuple(
+                L, LW, H=H, useLeftToRight=useLeftToRight
+            ) + getNthNTuple(R, RW, H=H, useLeftToRight=useLeftToRight)
+
+
 def getNthNTupleChecked(N, W, H=aleph0, useLeftToRight=False, GNT=getNthNTuple):
-    t = GNT(N,W,H,useLeftToRight)
+    t = GNT(N, W, H, useLeftToRight)
     assert len(t) == W
     for i in t:
         assert i < H
     return t
 
-def getNthTuple(N, maxSize=aleph0, maxElement=aleph0, useDivmod=False, useLeftToRight=False):
+
+def getNthTuple(
+    N, maxSize=aleph0, maxElement=aleph0, useDivmod=False, useLeftToRight=False
+):
     """getNthTuple(N, maxSize, maxElement) -> x
 
     Return the N-th tuple where len(x) < maxSize and for y in x, 0 <=
@@ -172,21 +203,30 @@ def getNthTuple(N, maxSize=aleph0, maxElement=aleph0, useDivmod=False, useLeftTo
     N -= 1
     if maxElement is not aleph0:
         if maxSize is aleph0:
-            raise NotImplementedError('Max element size without max size unhandled')
-        bounds = [maxElement**i for i in range(1, maxSize+1)]
-        S,M = getNthPairVariableBounds(N, bounds)
+            raise NotImplementedError("Max element size without max size unhandled")
+        bounds = [maxElement**i for i in range(1, maxSize + 1)]
+        S, M = getNthPairVariableBounds(N, bounds)
     else:
-        S,M = getNthPairBounded(N, maxSize, useDivmod=useDivmod)
-    return getNthNTuple(M, S+1, maxElement, useLeftToRight=useLeftToRight)
-def getNthTupleChecked(N, maxSize=aleph0, maxElement=aleph0, 
-                       useDivmod=False, useLeftToRight=False, GNT=getNthTuple):
+        S, M = getNthPairBounded(N, maxSize, useDivmod=useDivmod)
+    return getNthNTuple(M, S + 1, maxElement, useLeftToRight=useLeftToRight)
+
+
+def getNthTupleChecked(
+    N,
+    maxSize=aleph0,
+    maxElement=aleph0,
+    useDivmod=False,
+    useLeftToRight=False,
+    GNT=getNthTuple,
+):
     # FIXME: maxsize is inclusive
-    t = GNT(N,maxSize,maxElement,useDivmod,useLeftToRight)
+    t = GNT(N, maxSize, maxElement, useDivmod, useLeftToRight)
     assert len(t) <= maxSize
     for i in t:
         assert i < maxElement
     return t
 
+
 def getNthPairVariableBounds(N, bounds):
     """getNthPairVariableBounds(N, bounds) -> (x, y)
 
@@ -203,64 +243,69 @@ def getNthPairVariableBounds(N, bounds):
     active = list(range(len(bounds)))
     active.sort(key=lambda i: bounds[i])
     prevLevel = 0
-    for i,index in enumerate(active):
+    for i, index in enumerate(active):
         level = bounds[index]
         W = len(active) - i
         if level is aleph0:
             H = aleph0
         else:
             H = level - prevLevel
-        levelSize = W*H
-        if N<levelSize: # Found the level
-            idelta,delta = getNthPairBounded(N, W, H)
-            return active[i+idelta],prevLevel+delta
+        levelSize = W * H
+        if N < levelSize:  # Found the level
+            idelta, delta = getNthPairBounded(N, W, H)
+            return active[i + idelta], prevLevel + delta
         else:
             N -= levelSize
             prevLevel = level
     else:
         raise RuntimError("Unexpected loop completion")
 
+
 def getNthPairVariableBoundsChecked(N, bounds, GNVP=getNthPairVariableBounds):
-    x,y = GNVP(N,bounds)
+    x, y = GNVP(N, bounds)
     assert 0 <= x < len(bounds) and 0 <= y < bounds[x]
-    return (x,y)
+    return (x, y)
+
 
 ###
 
+
 def testPairs():
     W = 3
     H = 6
-    a = [['  ' for x in range(10)] for y in range(10)]
-    b = [['  ' for x in range(10)] for y in range(10)]
-    for i in range(min(W*H,40)):
-        x,y = getNthPairBounded(i,W,H)
-        x2,y2 = getNthPairBounded(i,W,H,useDivmod=True)
-        print(i,(x,y),(x2,y2))
-        a[y][x] = '%2d'%i
-        b[y2][x2] = '%2d'%i
-
-    print('-- a --')
+    a = [["  " for x in range(10)] for y in range(10)]
+    b = [["  " for x in range(10)] for y in range(10)]
+    for i in range(min(W * H, 40)):
+        x, y = getNthPairBounded(i, W, H)
+        x2, y2 = getNthPairBounded(i, W, H, useDivmod=True)
+        print(i, (x, y), (x2, y2))
+        a[y][x] = "%2d" % i
+        b[y2][x2] = "%2d" % i
+
+    print("-- a --")
     for ln in a[::-1]:
-        if ''.join(ln).strip():
-            print('  '.join(ln))
-    print('-- b --')
+        if "".join(ln).strip():
+            print("  ".join(ln))
+    print("-- b --")
     for ln in b[::-1]:
-        if ''.join(ln).strip():
-            print('  '.join(ln))
+        if "".join(ln).strip():
+            print("  ".join(ln))
+
 
 def testPairsVB():
-    bounds = [2,2,4,aleph0,5,aleph0]
-    a = [['  ' for x in range(15)] for y in range(15)]
-    b = [['  ' for x in range(15)] for y in range(15)]
-    for i in range(min(sum(bounds),40)):
-        x,y = getNthPairVariableBounds(i, bounds)
-        print(i,(x,y))
-        a[y][x] = '%2d'%i
-
-    print('-- a --')
+    bounds = [2, 2, 4, aleph0, 5, aleph0]
+    a = [["  " for x in range(15)] for y in range(15)]
+    b = [["  " for x in range(15)] for y in range(15)]
+    for i in range(min(sum(bounds), 40)):
+        x, y = getNthPairVariableBounds(i, bounds)
+        print(i, (x, y))
+        a[y][x] = "%2d" % i
+
+    print("-- a --")
     for ln in a[::-1]:
-        if ''.join(ln).strip():
-            print('  '.join(ln))
+        if "".join(ln).strip():
+            print("  ".join(ln))
+
 
 ###
 
@@ -271,8 +316,7 @@ def testPairsVB():
     getNthNTuple = getNthNTupleChecked
     getNthTuple = getNthTupleChecked
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     testPairs()
 
     testPairsVB()
-

diff  --git a/clang/utils/ABITest/TypeGen.py b/clang/utils/ABITest/TypeGen.py
index 8561baea617b4..6ebec7bdf1d1c 100644
--- a/clang/utils/ABITest/TypeGen.py
+++ b/clang/utils/ABITest/TypeGen.py
@@ -18,6 +18,7 @@
 ###
 # Actual type types
 
+
 class Type(object):
     def isBitField(self):
         return False
@@ -26,11 +27,12 @@ def isPaddingBitField(self):
         return False
 
     def getTypeName(self, printer):
-        name = 'T%d' % len(printer.types)
+        name = "T%d" % len(printer.types)
         typedef = self.getTypedefDef(name, printer)
         printer.addDeclaration(typedef)
         return name
 
+
 class BuiltinType(Type):
     def __init__(self, name, size, bitFieldSize=None):
         self.name = name
@@ -56,6 +58,7 @@ def sizeof(self):
     def __str__(self):
         return self.name
 
+
 class EnumType(Type):
     unique_id = 0
 
@@ -66,21 +69,22 @@ def __init__(self, index, enumerators):
         self.__class__.unique_id += 1
 
     def getEnumerators(self):
-        result = ''
+        result = ""
         for i, init in enumerate(self.enumerators):
             if i > 0:
-                result = result + ', '
-            result = result + 'enum%dval%d_%d' % (self.index, i, self.unique_id)
+                result = result + ", "
+            result = result + "enum%dval%d_%d" % (self.index, i, self.unique_id)
             if init:
-                result = result + ' = %s' % (init)
+                result = result + " = %s" % (init)
 
         return result
 
     def __str__(self):
-        return 'enum { %s }' % (self.getEnumerators())
+        return "enum { %s }" % (self.getEnumerators())
 
     def getTypedefDef(self, name, printer):
-        return 'typedef enum %s { %s } %s;'%(name, self.getEnumerators(), name)
+        return "typedef enum %s { %s } %s;" % (name, self.getEnumerators(), name)
+
 
 class RecordType(Type):
     def __init__(self, index, isUnion, fields):
@@ -96,25 +100,36 @@ def getField(t):
             else:
                 return "%s;" % t
 
-        return '%s { %s }'%(('struct','union')[self.isUnion],
-                            ' '.join(map(getField, self.fields)))
+        return "%s { %s }" % (
+            ("struct", "union")[self.isUnion],
+            " ".join(map(getField, self.fields)),
+        )
 
     def getTypedefDef(self, name, printer):
         def getField(it):
             i, t = it
             if t.isBitField():
                 if t.isPaddingBitField():
-                    return '%s : 0;'%(printer.getTypeName(t),)
+                    return "%s : 0;" % (printer.getTypeName(t),)
                 else:
-                    return '%s field%d : %d;'%(printer.getTypeName(t),i,
-                                               t.getBitFieldSize())
+                    return "%s field%d : %d;" % (
+                        printer.getTypeName(t),
+                        i,
+                        t.getBitFieldSize(),
+                    )
             else:
-                return '%s field%d;'%(printer.getTypeName(t),i)
+                return "%s field%d;" % (printer.getTypeName(t), i)
+
         fields = [getField(f) for f in enumerate(self.fields)]
         # Name the struct for more readable LLVM IR.
-        return 'typedef %s %s { %s } %s;'%(('struct','union')[self.isUnion],
-                                           name, ' '.join(fields), name)
-                                           
+        return "typedef %s %s { %s } %s;" % (
+            ("struct", "union")[self.isUnion],
+            name,
+            " ".join(fields),
+            name,
+        )
+
+
 class ArrayType(Type):
     def __init__(self, index, isVector, elementType, size):
         if isVector:
@@ -135,24 +150,27 @@ def __init__(self, index, isVector, elementType, size):
 
     def __str__(self):
         if self.isVector:
-            return 'vector (%s)[%d]'%(self.elementType,self.size)
+            return "vector (%s)[%d]" % (self.elementType, self.size)
         elif self.size is not None:
-            return '(%s)[%d]'%(self.elementType,self.size)
+            return "(%s)[%d]" % (self.elementType, self.size)
         else:
-            return '(%s)[]'%(self.elementType,)
+            return "(%s)[]" % (self.elementType,)
 
     def getTypedefDef(self, name, printer):
         elementName = printer.getTypeName(self.elementType)
         if self.isVector:
-            return 'typedef %s %s __attribute__ ((vector_size (%d)));'%(elementName,
-                                                                        name,
-                                                                        self.size)
+            return "typedef %s %s __attribute__ ((vector_size (%d)));" % (
+                elementName,
+                name,
+                self.size,
+            )
         else:
             if self.size is None:
-                sizeStr = ''
+                sizeStr = ""
             else:
                 sizeStr = str(self.size)
-            return 'typedef %s %s[%s];'%(elementName, name, sizeStr)
+            return "typedef %s %s[%s];" % (elementName, name, sizeStr)
+
 
 class ComplexType(Type):
     def __init__(self, index, elementType):
@@ -160,10 +178,11 @@ def __init__(self, index, elementType):
         self.elementType = elementType
 
     def __str__(self):
-        return '_Complex (%s)'%(self.elementType)
+        return "_Complex (%s)" % (self.elementType)
 
     def getTypedefDef(self, name, printer):
-        return 'typedef _Complex %s %s;'%(printer.getTypeName(self.elementType), name)
+        return "typedef _Complex %s %s;" % (printer.getTypeName(self.elementType), name)
+
 
 class FunctionType(Type):
     def __init__(self, index, returnType, argTypes):
@@ -173,29 +192,31 @@ def __init__(self, index, returnType, argTypes):
 
     def __str__(self):
         if self.returnType is None:
-            rt = 'void'
+            rt = "void"
         else:
             rt = str(self.returnType)
         if not self.argTypes:
-            at = 'void'
+            at = "void"
         else:
-            at = ', '.join(map(str, self.argTypes))
-        return '%s (*)(%s)'%(rt, at)
+            at = ", ".join(map(str, self.argTypes))
+        return "%s (*)(%s)" % (rt, at)
 
     def getTypedefDef(self, name, printer):
         if self.returnType is None:
-            rt = 'void'
+            rt = "void"
         else:
             rt = str(self.returnType)
         if not self.argTypes:
-            at = 'void'
+            at = "void"
         else:
-            at = ', '.join(map(str, self.argTypes))
-        return 'typedef %s (*%s)(%s);'%(rt, name, at)
+            at = ", ".join(map(str, self.argTypes))
+        return "typedef %s (*%s)(%s);" % (rt, name, at)
+
 
 ###
 # Type enumerators
 
+
 class TypeGenerator(object):
     def __init__(self):
         self.cache = {}
@@ -213,6 +234,7 @@ def get(self, N):
     def generateType(self, N):
         abstract
 
+
 class FixedTypeGenerator(TypeGenerator):
     def __init__(self, types):
         TypeGenerator.__init__(self)
@@ -225,6 +247,7 @@ def setCardinality(self):
     def generateType(self, N):
         return self.types[N]
 
+
 # Factorial
 def fact(n):
     result = 1
@@ -233,19 +256,23 @@ def fact(n):
         n = n - 1
     return result
 
+
 # Compute the number of combinations (n choose k)
-def num_combinations(n, k): 
+def num_combinations(n, k):
     return fact(n) // (fact(k) * fact(n - k))
 
+
 # Enumerate the combinations choosing k elements from the list of values
 def combinations(values, k):
     # From ActiveState Recipe 190465: Generator for permutations,
     # combinations, selections of a sequence
-    if k==0: yield []
+    if k == 0:
+        yield []
     else:
-        for i in range(len(values)-k+1):
-            for cc in combinations(values[i+1:],k-1):
-                yield [values[i]]+cc
+        for i in range(len(values) - k + 1):
+            for cc in combinations(values[i + 1 :], k - 1):
+                yield [values[i]] + cc
+
 
 class EnumTypeGenerator(TypeGenerator):
     def __init__(self, values, minEnumerators, maxEnumerators):
@@ -277,37 +304,40 @@ def generateType(self, n):
         for enumerators in combinations(self.values, numEnumerators):
             if i == n - valuesCovered:
                 return EnumType(n, enumerators)
-                
+
             i = i + 1
 
         assert False
 
+
 class ComplexTypeGenerator(TypeGenerator):
     def __init__(self, typeGen):
         TypeGenerator.__init__(self)
         self.typeGen = typeGen
         self.setCardinality()
-    
+
     def setCardinality(self):
         self.cardinality = self.typeGen.cardinality
 
     def generateType(self, N):
         return ComplexType(N, self.typeGen.get(N))
 
+
 class VectorTypeGenerator(TypeGenerator):
     def __init__(self, typeGen, sizes):
         TypeGenerator.__init__(self)
         self.typeGen = typeGen
-        self.sizes = tuple(map(int,sizes))
+        self.sizes = tuple(map(int, sizes))
         self.setCardinality()
 
     def setCardinality(self):
-        self.cardinality = len(self.sizes)*self.typeGen.cardinality
+        self.cardinality = len(self.sizes) * self.typeGen.cardinality
 
     def generateType(self, N):
-        S,T = getNthPairBounded(N, len(self.sizes), self.typeGen.cardinality)
+        S, T = getNthPairBounded(N, len(self.sizes), self.typeGen.cardinality)
         return ArrayType(N, True, self.typeGen.get(T), self.sizes[S])
 
+
 class FixedArrayTypeGenerator(TypeGenerator):
     def __init__(self, typeGen, sizes):
         TypeGenerator.__init__(self)
@@ -316,12 +346,13 @@ def __init__(self, typeGen, sizes):
         self.setCardinality()
 
     def setCardinality(self):
-        self.cardinality = len(self.sizes)*self.typeGen.cardinality
+        self.cardinality = len(self.sizes) * self.typeGen.cardinality
 
     def generateType(self, N):
-        S,T = getNthPairBounded(N, len(self.sizes), self.typeGen.cardinality)
+        S, T = getNthPairBounded(N, len(self.sizes), self.typeGen.cardinality)
         return ArrayType(N, false, self.typeGen.get(T), self.sizes[S])
 
+
 class ArrayTypeGenerator(TypeGenerator):
     def __init__(self, typeGen, maxSize, useIncomplete=False, useZero=False):
         TypeGenerator.__init__(self)
@@ -336,9 +367,9 @@ def setCardinality(self):
         self.cardinality = self.W * self.typeGen.cardinality
 
     def generateType(self, N):
-        S,T = getNthPairBounded(N, self.W, self.typeGen.cardinality)
+        S, T = getNthPairBounded(N, self.W, self.typeGen.cardinality)
         if self.useIncomplete:
-            if S==0:
+            if S == 0:
                 size = None
                 S = None
             else:
@@ -347,9 +378,10 @@ def generateType(self, N):
             if self.useZero:
                 size = S
             else:
-                size = S + 1        
+                size = S + 1
         return ArrayType(N, False, self.typeGen.get(T), size)
 
+
 class RecordTypeGenerator(TypeGenerator):
     def __init__(self, typeGen, useUnion, maxSize):
         TypeGenerator.__init__(self)
@@ -361,20 +393,24 @@ def __init__(self, typeGen, useUnion, maxSize):
     def setCardinality(self):
         M = 1 + self.useUnion
         if self.maxSize is aleph0:
-            S =  aleph0 * self.typeGen.cardinality
+            S = aleph0 * self.typeGen.cardinality
         else:
             S = 0
-            for i in range(self.maxSize+1):
-                S += M * (self.typeGen.cardinality ** i)
+            for i in range(self.maxSize + 1):
+                S += M * (self.typeGen.cardinality**i)
         self.cardinality = S
 
     def generateType(self, N):
-        isUnion,I = False,N
+        isUnion, I = False, N
         if self.useUnion:
-            isUnion,I = (I&1),I>>1
-        fields = [self.typeGen.get(f) for f in getNthTuple(I,self.maxSize,self.typeGen.cardinality)]
+            isUnion, I = (I & 1), I >> 1
+        fields = [
+            self.typeGen.get(f)
+            for f in getNthTuple(I, self.maxSize, self.typeGen.cardinality)
+        ]
         return RecordType(N, isUnion, fields)
 
+
 class FunctionTypeGenerator(TypeGenerator):
     def __init__(self, typeGen, useReturn, maxSize):
         TypeGenerator.__init__(self)
@@ -382,25 +418,25 @@ def __init__(self, typeGen, useReturn, maxSize):
         self.useReturn = useReturn
         self.maxSize = maxSize
         self.setCardinality()
-    
+
     def setCardinality(self):
         if self.maxSize is aleph0:
             S = aleph0 * self.typeGen.cardinality()
         elif self.useReturn:
             S = 0
-            for i in range(1,self.maxSize+1+1):
-                S += self.typeGen.cardinality ** i
+            for i in range(1, self.maxSize + 1 + 1):
+                S += self.typeGen.cardinality**i
         else:
             S = 0
-            for i in range(self.maxSize+1):
-                S += self.typeGen.cardinality ** i
+            for i in range(self.maxSize + 1):
+                S += self.typeGen.cardinality**i
         self.cardinality = S
-    
+
     def generateType(self, N):
         if self.useReturn:
             # Skip the empty tuple
-            argIndices = getNthTuple(N+1, self.maxSize+1, self.typeGen.cardinality)
-            retIndex,argIndices = argIndices[0],argIndices[1:]
+            argIndices = getNthTuple(N + 1, self.maxSize + 1, self.typeGen.cardinality)
+            retIndex, argIndices = argIndices[0], argIndices[1:]
             retTy = self.typeGen.get(retIndex)
         else:
             retTy = None
@@ -408,6 +444,7 @@ def generateType(self, N):
         args = [self.typeGen.get(i) for i in argIndices]
         return FunctionType(N, retTy, args)
 
+
 class AnyTypeGenerator(TypeGenerator):
     def __init__(self):
         TypeGenerator.__init__(self)
@@ -415,15 +452,17 @@ def __init__(self):
         self.bounds = []
         self.setCardinality()
         self._cardinality = None
-        
+
     def getCardinality(self):
         if self._cardinality is None:
             return aleph0
         else:
             return self._cardinality
+
     def setCardinality(self):
         self.bounds = [g.cardinality for g in self.generators]
         self._cardinality = sum(self.bounds)
+
     cardinality = property(getCardinality, None)
 
     def addGenerator(self, g):
@@ -434,36 +473,36 @@ def addGenerator(self, g):
             for g in self.generators:
                 g.setCardinality()
             self.setCardinality()
-            if (self._cardinality is aleph0) or prev==self._cardinality:
+            if (self._cardinality is aleph0) or prev == self._cardinality:
                 break
         else:
             raise RuntimeError("Infinite loop in setting cardinality")
 
     def generateType(self, N):
-        index,M = getNthPairVariableBounds(N, self.bounds)
+        index, M = getNthPairVariableBounds(N, self.bounds)
         return self.generators[index].get(M)
 
+
 def test():
-    fbtg = FixedTypeGenerator([BuiltinType('char', 4),
-                               BuiltinType('char', 4, 0),
-                               BuiltinType('int',  4, 5)])
+    fbtg = FixedTypeGenerator(
+        [BuiltinType("char", 4), BuiltinType("char", 4, 0), BuiltinType("int", 4, 5)]
+    )
 
     fields1 = AnyTypeGenerator()
-    fields1.addGenerator( fbtg )
+    fields1.addGenerator(fbtg)
 
     fields0 = AnyTypeGenerator()
-    fields0.addGenerator( fbtg )
-#    fields0.addGenerator( RecordTypeGenerator(fields1, False, 4) )
+    fields0.addGenerator(fbtg)
+    #    fields0.addGenerator( RecordTypeGenerator(fields1, False, 4) )
 
-    btg = FixedTypeGenerator([BuiltinType('char', 4),
-                              BuiltinType('int',  4)])
-    etg = EnumTypeGenerator([None, '-1', '1', '1u'], 0, 3)
+    btg = FixedTypeGenerator([BuiltinType("char", 4), BuiltinType("int", 4)])
+    etg = EnumTypeGenerator([None, "-1", "1", "1u"], 0, 3)
 
     atg = AnyTypeGenerator()
-    atg.addGenerator( btg )
-    atg.addGenerator( RecordTypeGenerator(fields0, False, 4) )
-    atg.addGenerator( etg )
-    print('Cardinality:',atg.cardinality)
+    atg.addGenerator(btg)
+    atg.addGenerator(RecordTypeGenerator(fields0, False, 4))
+    atg.addGenerator(etg)
+    print("Cardinality:", atg.cardinality)
     for i in range(100):
         if i == atg.cardinality:
             try:
@@ -471,7 +510,8 @@ def test():
                 raise RuntimeError("Cardinality was wrong")
             except AssertionError:
                 break
-        print('%4d: %s'%(i, atg.get(i)))
+        print("%4d: %s" % (i, atg.get(i)))
+
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     test()

diff  --git a/clang/utils/CIndex/completion_logger_server.py b/clang/utils/CIndex/completion_logger_server.py
index 201667117fc2f..ff33331fc53c5 100755
--- a/clang/utils/CIndex/completion_logger_server.py
+++ b/clang/utils/CIndex/completion_logger_server.py
@@ -5,41 +5,47 @@
 from time import strftime
 import datetime
 
+
 def main():
-  if len(sys.argv) < 4:
-    print("completion_logger_server.py <listen address> <listen port> <log file>")
-    exit(1)
-
-  host = sys.argv[1]
-  port = int(sys.argv[2])
-  buf = 1024 * 8
-  addr = (host,port)
-  
-  # Create socket and bind to address
-  UDPSock = socket(AF_INET,SOCK_DGRAM)
-  UDPSock.bind(addr)
-  
-  print("Listing on {0}:{1} and logging to '{2}'".format(host, port, sys.argv[3]))
-
-  # Open the logging file.
-  f = open(sys.argv[3], "a")
-
-  # Receive messages
-  while 1:
-    data,addr = UDPSock.recvfrom(buf)
-    if not data:
-      break
-    else:
-      f.write("{ ");
-      f.write("\"time\": \"{0}\"".format(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')))
-      f.write(", \"sender\": \"{0}\" ".format(addr[0]))
-      f.write(", \"data\": ")
-      f.write(data)
-      f.write(" }\n")
-      f.flush()
-
-  # Close socket
-  UDPSock.close()
-
-if __name__ == '__main__':
-  main()
+    if len(sys.argv) < 4:
+        print("completion_logger_server.py <listen address> <listen port> <log file>")
+        exit(1)
+
+    host = sys.argv[1]
+    port = int(sys.argv[2])
+    buf = 1024 * 8
+    addr = (host, port)
+
+    # Create socket and bind to address
+    UDPSock = socket(AF_INET, SOCK_DGRAM)
+    UDPSock.bind(addr)
+
+    print("Listing on {0}:{1} and logging to '{2}'".format(host, port, sys.argv[3]))
+
+    # Open the logging file.
+    f = open(sys.argv[3], "a")
+
+    # Receive messages
+    while 1:
+        data, addr = UDPSock.recvfrom(buf)
+        if not data:
+            break
+        else:
+            f.write("{ ")
+            f.write(
+                '"time": "{0}"'.format(
+                    datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
+                )
+            )
+            f.write(', "sender": "{0}" '.format(addr[0]))
+            f.write(', "data": ')
+            f.write(data)
+            f.write(" }\n")
+            f.flush()
+
+    # Close socket
+    UDPSock.close()
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/clang/utils/ClangDataFormat.py b/clang/utils/ClangDataFormat.py
index 2a5906db65848..28c32a123bdf9 100644
--- a/clang/utils/ClangDataFormat.py
+++ b/clang/utils/ClangDataFormat.py
@@ -21,120 +21,150 @@
 
 import lldb
 
+
 def __lldb_init_module(debugger, internal_dict):
-	debugger.HandleCommand("type summary add -F ClangDataFormat.SourceLocation_summary clang::SourceLocation")
-	debugger.HandleCommand("type summary add -F ClangDataFormat.QualType_summary clang::QualType")
+    debugger.HandleCommand(
+        "type summary add -F ClangDataFormat.SourceLocation_summary clang::SourceLocation"
+    )
+    debugger.HandleCommand(
+        "type summary add -F ClangDataFormat.QualType_summary clang::QualType"
+    )
+
 
 def SourceLocation_summary(srcloc, internal_dict):
-	return SourceLocation(srcloc).summary()
+    return SourceLocation(srcloc).summary()
+
 
 def QualType_summary(qualty, internal_dict):
-	return QualType(qualty).summary()
+    return QualType(qualty).summary()
+
 
 class SourceLocation(object):
-	def __init__(self, srcloc):
-		self.srcloc = srcloc
-		self.ID = srcloc.GetChildAtIndex(0).GetValueAsUnsigned()
-		self.frame = srcloc.GetFrame()
-	
-	def offset(self):
-		return getValueFromExpression(self.srcloc, ".getOffset()").GetValueAsUnsigned()
-
-	def isInvalid(self):
-		return self.ID == 0
-
-	def isMacro(self):
-		return getValueFromExpression(self.srcloc, ".isMacroID()").GetValueAsUnsigned()
-
-	def isLocal(self, srcmgr_path):
-		return self.frame.EvaluateExpression("(%s).isLocalSourceLocation(%s)" % (srcmgr_path, getExpressionPath(self.srcloc))).GetValueAsUnsigned()
-
-	def getPrint(self, srcmgr_path):
-		print_str = getValueFromExpression(self.srcloc, ".printToString(%s)" % srcmgr_path)
-		return print_str.GetSummary()
-
-	def summary(self):
-		if self.isInvalid():
-			return "<invalid loc>"
-		srcmgr_path = findObjectExpressionPath("clang::SourceManager", self.frame)
-		if srcmgr_path:
-			return "%s (offset: %d, %s, %s)" % (self.getPrint(srcmgr_path), self.offset(), "macro" if self.isMacro() else "file", "local" if self.isLocal(srcmgr_path) else "loaded")
-		return "(offset: %d, %s)" % (self.offset(), "macro" if self.isMacro() else "file")
+    def __init__(self, srcloc):
+        self.srcloc = srcloc
+        self.ID = srcloc.GetChildAtIndex(0).GetValueAsUnsigned()
+        self.frame = srcloc.GetFrame()
+
+    def offset(self):
+        return getValueFromExpression(self.srcloc, ".getOffset()").GetValueAsUnsigned()
+
+    def isInvalid(self):
+        return self.ID == 0
+
+    def isMacro(self):
+        return getValueFromExpression(self.srcloc, ".isMacroID()").GetValueAsUnsigned()
+
+    def isLocal(self, srcmgr_path):
+        return self.frame.EvaluateExpression(
+            "(%s).isLocalSourceLocation(%s)"
+            % (srcmgr_path, getExpressionPath(self.srcloc))
+        ).GetValueAsUnsigned()
+
+    def getPrint(self, srcmgr_path):
+        print_str = getValueFromExpression(
+            self.srcloc, ".printToString(%s)" % srcmgr_path
+        )
+        return print_str.GetSummary()
+
+    def summary(self):
+        if self.isInvalid():
+            return "<invalid loc>"
+        srcmgr_path = findObjectExpressionPath("clang::SourceManager", self.frame)
+        if srcmgr_path:
+            return "%s (offset: %d, %s, %s)" % (
+                self.getPrint(srcmgr_path),
+                self.offset(),
+                "macro" if self.isMacro() else "file",
+                "local" if self.isLocal(srcmgr_path) else "loaded",
+            )
+        return "(offset: %d, %s)" % (
+            self.offset(),
+            "macro" if self.isMacro() else "file",
+        )
+
 
 class QualType(object):
-	def __init__(self, qualty):
-		self.qualty = qualty
+    def __init__(self, qualty):
+        self.qualty = qualty
 
-	def getAsString(self):
-		std_str = getValueFromExpression(self.qualty, ".getAsString()")
-		return std_str.GetSummary()
+    def getAsString(self):
+        std_str = getValueFromExpression(self.qualty, ".getAsString()")
+        return std_str.GetSummary()
+
+    def summary(self):
+        desc = self.getAsString()
+        if desc == '"NULL TYPE"':
+            return "<NULL TYPE>"
+        return desc
 
-	def summary(self):
-		desc = self.getAsString()
-		if desc == '"NULL TYPE"':
-			return "<NULL TYPE>"
-		return desc
 
 # Key is a (function address, type name) tuple, value is the expression path for
 # an object with such a type name from inside that function.
 FramePathMapCache = {}
 
+
 def findObjectExpressionPath(typename, frame):
-	func_addr = frame.GetFunction().GetStartAddress().GetFileAddress()
-	key = (func_addr, typename)
-	try:
-		return FramePathMapCache[key]
-	except KeyError:
-		#print "CACHE MISS"
-		path = None
-		obj = findObject(typename, frame)
-		if obj:
-			path = getExpressionPath(obj)
-		FramePathMapCache[key] = path
-		return path
+    func_addr = frame.GetFunction().GetStartAddress().GetFileAddress()
+    key = (func_addr, typename)
+    try:
+        return FramePathMapCache[key]
+    except KeyError:
+        # print "CACHE MISS"
+        path = None
+        obj = findObject(typename, frame)
+        if obj:
+            path = getExpressionPath(obj)
+        FramePathMapCache[key] = path
+        return path
+
 
 def findObject(typename, frame):
-	def getTypename(value):
-		# FIXME: lldb should provide something like getBaseType
-		ty = value.GetType()
-		if ty.IsPointerType() or ty.IsReferenceType():
-			return ty.GetPointeeType().GetName()
-		return ty.GetName()
-
-	def searchForType(value, searched):
-		tyname = getTypename(value)
-		#print "SEARCH:", getExpressionPath(value), value.GetType().GetName()
-		if tyname == typename:
-			return value
-		ty = value.GetType()
-		if not (ty.IsPointerType() or
-		        ty.IsReferenceType() or
-				# FIXME: lldb should provide something like getCanonicalType
-		        tyname.startswith("llvm::IntrusiveRefCntPtr<") or
-		        tyname.startswith("llvm::OwningPtr<")):
-			return None
-		# FIXME: Hashing for SBTypes does not seem to work correctly, uses the typename instead,
-		# and not the canonical one unfortunately.
-		if tyname in searched:
-			return None
-		searched.add(tyname)
-		for i in range(value.GetNumChildren()):
-			child = value.GetChildAtIndex(i, 0, False)
-			found = searchForType(child, searched)
-			if found:
-				return found
-
-	searched = set()
-	value_list = frame.GetVariables(True, True, True, True)
-	for val in value_list:
-		found = searchForType(val, searched)
-		if found:
-			return found if not found.TypeIsPointerType() else found.Dereference()
+    def getTypename(value):
+        # FIXME: lldb should provide something like getBaseType
+        ty = value.GetType()
+        if ty.IsPointerType() or ty.IsReferenceType():
+            return ty.GetPointeeType().GetName()
+        return ty.GetName()
+
+    def searchForType(value, searched):
+        tyname = getTypename(value)
+        # print "SEARCH:", getExpressionPath(value), value.GetType().GetName()
+        if tyname == typename:
+            return value
+        ty = value.GetType()
+        if not (
+            ty.IsPointerType()
+            or ty.IsReferenceType()
+            or
+            # FIXME: lldb should provide something like getCanonicalType
+            tyname.startswith("llvm::IntrusiveRefCntPtr<")
+            or tyname.startswith("llvm::OwningPtr<")
+        ):
+            return None
+        # FIXME: Hashing for SBTypes does not seem to work correctly, uses the typename instead,
+        # and not the canonical one unfortunately.
+        if tyname in searched:
+            return None
+        searched.add(tyname)
+        for i in range(value.GetNumChildren()):
+            child = value.GetChildAtIndex(i, 0, False)
+            found = searchForType(child, searched)
+            if found:
+                return found
+
+    searched = set()
+    value_list = frame.GetVariables(True, True, True, True)
+    for val in value_list:
+        found = searchForType(val, searched)
+        if found:
+            return found if not found.TypeIsPointerType() else found.Dereference()
+
 
 def getValueFromExpression(val, expr):
-	return val.GetFrame().EvaluateExpression(getExpressionPath(val) + expr)
+    return val.GetFrame().EvaluateExpression(getExpressionPath(val) + expr)
+
 
 def getExpressionPath(val):
-	stream = lldb.SBStream()
-	val.GetExpressionPath(stream)
-	return stream.GetData()
+    stream = lldb.SBStream()
+    val.GetExpressionPath(stream)
+    return stream.GetData()

diff  --git a/clang/utils/TestUtils/deep-stack.py b/clang/utils/TestUtils/deep-stack.py
index 10bf47acb1f77..4cf5789abdb12 100755
--- a/clang/utils/TestUtils/deep-stack.py
+++ b/clang/utils/TestUtils/deep-stack.py
@@ -1,25 +1,30 @@
 #!/usr/bin/env python
 
 from __future__ import absolute_import, division, print_function
+
+
 def pcall(f, N):
     if N == 0:
-        print('    f(0)', file=f)
+        print("    f(0)", file=f)
         return
 
-    print('    f(', file=f)
+    print("    f(", file=f)
     pcall(f, N - 1)
-    print('     )', file=f)
+    print("     )", file=f)
+
 
 def main():
-    f = open('t.c','w')
-    print('int f(int n) { return n; }', file=f)
-    print('int t() {', file=f)
-    print('  return', file=f)
+    f = open("t.c", "w")
+    print("int f(int n) { return n; }", file=f)
+    print("int t() {", file=f)
+    print("  return", file=f)
     pcall(f, 10000)
-    print('  ;', file=f)
-    print('}', file=f)
+    print("  ;", file=f)
+    print("}", file=f)
+
 
 if __name__ == "__main__":
     import sys
+
     sys.setrecursionlimit(100000)
     main()

diff  --git a/clang/utils/analyzer/CmpRuns.py b/clang/utils/analyzer/CmpRuns.py
index 61fd044c900fa..94c9907bff315 100644
--- a/clang/utils/analyzer/CmpRuns.py
+++ b/clang/utils/analyzer/CmpRuns.py
@@ -35,8 +35,20 @@
 from collections import defaultdict
 from copy import copy
 from enum import Enum
-from typing import (Any, DefaultDict, Dict, List, NamedTuple, Optional,
-                    Sequence, Set, TextIO, TypeVar, Tuple, Union)
+from typing import (
+    Any,
+    DefaultDict,
+    Dict,
+    List,
+    NamedTuple,
+    Optional,
+    Sequence,
+    Set,
+    TextIO,
+    TypeVar,
+    Tuple,
+    Union,
+)
 
 
 Number = Union[int, float]
@@ -46,7 +58,7 @@
 # Diff in a form: field -> (before, after)
 JSONDiff = Dict[str, Tuple[str, str]]
 # Type for generics
-T = TypeVar('T')
+T = TypeVar("T")
 
 STATS_REGEXP = re.compile(r"Statistics: (\{.+\})", re.MULTILINE | re.DOTALL)
 
@@ -55,9 +67,10 @@ class Colors:
     """
     Color for terminal highlight.
     """
-    RED = '\x1b[2;30;41m'
-    GREEN = '\x1b[6;30;42m'
-    CLEAR = '\x1b[0m'
+
+    RED = "\x1b[2;30;41m"
+    GREEN = "\x1b[6;30;42m"
+    CLEAR = "\x1b[0m"
 
 
 class HistogramType(str, Enum):
@@ -78,65 +91,66 @@ class SingleRunInfo:
     root - the name of the root directory, which will be disregarded when
     determining the source file name
     """
-    def __init__(self, results: ResultsDirectory,
-                 verbose_log: Optional[str] = None):
+
+    def __init__(self, results: ResultsDirectory, verbose_log: Optional[str] = None):
         self.path = results.path
         self.root = results.root.rstrip("/\\")
         self.verbose_log = verbose_log
 
 
 class AnalysisDiagnostic:
-    def __init__(self, data: Plist, report: "AnalysisReport",
-                 html_report: Optional[str]):
+    def __init__(
+        self, data: Plist, report: "AnalysisReport", html_report: Optional[str]
+    ):
         self._data = data
-        self._loc = self._data['location']
+        self._loc = self._data["location"]
         self._report = report
         self._html_report = html_report
-        self._report_size = len(self._data['path'])
+        self._report_size = len(self._data["path"])
 
     def get_file_name(self) -> str:
         root = self._report.run.root
-        file_name = self._report.files[self._loc['file']]
+        file_name = self._report.files[self._loc["file"]]
 
         if file_name.startswith(root) and len(root) > 0:
-            return file_name[len(root) + 1:]
+            return file_name[len(root) + 1 :]
 
         return file_name
 
     def get_root_file_name(self) -> str:
-        path = self._data['path']
+        path = self._data["path"]
 
         if not path:
             return self.get_file_name()
 
         p = path[0]
-        if 'location' in p:
-            file_index = p['location']['file']
+        if "location" in p:
+            file_index = p["location"]["file"]
         else:  # control edge
-            file_index = path[0]['edges'][0]['start'][0]['file']
+            file_index = path[0]["edges"][0]["start"][0]["file"]
 
         out = self._report.files[file_index]
         root = self._report.run.root
 
         if out.startswith(root):
-            return out[len(root):]
+            return out[len(root) :]
 
         return out
 
     def get_line(self) -> int:
-        return self._loc['line']
+        return self._loc["line"]
 
     def get_column(self) -> int:
-        return self._loc['col']
+        return self._loc["col"]
 
     def get_path_length(self) -> int:
         return self._report_size
 
     def get_category(self) -> str:
-        return self._data['category']
+        return self._data["category"]
 
     def get_description(self) -> str:
-        return self._data['description']
+        return self._data["description"]
 
     def get_location(self) -> str:
         return f"{self.get_file_name()}:{self.get_line()}:{self.get_column()}"
@@ -174,8 +188,10 @@ def get_readable_name(self) -> str:
 
         line = self.get_line()
         col = self.get_column()
-        return f"{file_prefix}{funcname_postfix}:{line}:{col}" \
+        return (
+            f"{file_prefix}{funcname_postfix}:{line}:{col}"
             f", {self.get_category()}: {self.get_description()}"
+        )
 
     KEY_FIELDS = ["check_name", "category", "description"]
 
@@ -185,9 +201,11 @@ def is_similar_to(self, other: "AnalysisDiagnostic") -> bool:
         return len(self.get_
diff s(other)) != len(self.KEY_FIELDS)
 
     def get_
diff s(self, other: "AnalysisDiagnostic") -> JSONDiff:
-        return {field: (self._data[field], other._data[field])
-                for field in self.KEY_FIELDS
-                if self._data[field] != other._data[field]}
+        return {
+            field: (self._data[field], other._data[field])
+            for field in self.KEY_FIELDS
+            if self._data[field] != other._data[field]
+        }
 
     # Note, the data format is not an API and may change from one analyzer
     # version to another.
@@ -222,42 +240,44 @@ def read_single_file(self, path: str, delete_empty: bool):
         with open(path, "rb") as plist_file:
             data = plistlib.load(plist_file)
 
-        if 'statistics' in data:
-            self.raw_stats.append(json.loads(data['statistics']))
-            data.pop('statistics')
+        if "statistics" in data:
+            self.raw_stats.append(json.loads(data["statistics"]))
+            data.pop("statistics")
 
         # We want to retrieve the clang version even if there are no
         # reports. Assume that all reports were created using the same
         # clang version (this is always true and is more efficient).
-        if 'clang_version' in data:
+        if "clang_version" in data:
             if self.clang_version is None:
-                self.clang_version = data.pop('clang_version')
+                self.clang_version = data.pop("clang_version")
             else:
-                data.pop('clang_version')
+                data.pop("clang_version")
 
         # Ignore/delete empty reports.
-        if not data['files']:
+        if not data["files"]:
             if delete_empty:
                 os.remove(path)
             return
 
         # Extract the HTML reports, if they exists.
         htmlFiles = []
-        for d in data['diagnostics']:
-            if 'HTMLDiagnostics_files' in d:
+        for d in data["diagnostics"]:
+            if "HTMLDiagnostics_files" in d:
                 # FIXME: Why is this named files, when does it have multiple
                 # files?
-                assert len(d['HTMLDiagnostics_files']) == 1
-                htmlFiles.append(d.pop('HTMLDiagnostics_files')[0])
+                assert len(d["HTMLDiagnostics_files"]) == 1
+                htmlFiles.append(d.pop("HTMLDiagnostics_files")[0])
             else:
                 htmlFiles.append(None)
 
-        report = AnalysisReport(self, data.pop('files'))
+        report = AnalysisReport(self, data.pop("files"))
         # Python 3.10 offers zip(..., strict=True). The following assertion
         # mimics it.
-        assert len(data['diagnostics']) == len(htmlFiles)
-        diagnostics = [AnalysisDiagnostic(d, report, h)
-                       for d, h in zip(data.pop('diagnostics'), htmlFiles)]
+        assert len(data["diagnostics"]) == len(htmlFiles)
+        diagnostics = [
+            AnalysisDiagnostic(d, report, h)
+            for d, h in zip(data.pop("diagnostics"), htmlFiles)
+        ]
 
         assert not data
 
@@ -273,18 +293,22 @@ def __init__(self, run: AnalysisRun, files: List[str]):
         self.diagnostics: List[AnalysisDiagnostic] = []
 
 
-def load_results(results: ResultsDirectory, delete_empty: bool = True,
-                 verbose_log: Optional[str] = None) -> AnalysisRun:
+def load_results(
+    results: ResultsDirectory,
+    delete_empty: bool = True,
+    verbose_log: Optional[str] = None,
+) -> AnalysisRun:
     """
     Backwards compatibility API.
     """
-    return load_results_from_single_run(SingleRunInfo(results,
-                                                      verbose_log),
-                                        delete_empty)
+    return load_results_from_single_run(
+        SingleRunInfo(results, verbose_log), delete_empty
+    )
 
 
-def load_results_from_single_run(info: SingleRunInfo,
-                                 delete_empty: bool = True) -> AnalysisRun:
+def load_results_from_single_run(
+    info: SingleRunInfo, delete_empty: bool = True
+) -> AnalysisRun:
     """
     # Load results of the analyzes from a given output folder.
     # - info is the SingleRunInfo object
@@ -299,7 +323,7 @@ def load_results_from_single_run(info: SingleRunInfo,
     else:
         for dirpath, dirnames, filenames in os.walk(path):
             for f in filenames:
-                if not f.endswith('plist'):
+                if not f.endswith("plist"):
                     continue
 
                 p = os.path.join(dirpath, f)
@@ -331,25 +355,27 @@ def add_removed(self, issue: AnalysisDiagnostic):
     def add_added(self, issue: AnalysisDiagnostic):
         self.present_only_in_new.append(issue)
 
-    def add_changed(self, old_issue: AnalysisDiagnostic,
-                    new_issue: AnalysisDiagnostic):
+    def add_changed(self, old_issue: AnalysisDiagnostic, new_issue: AnalysisDiagnostic):
         self.changed_between_new_and_old.append((old_issue, new_issue))
 
 
 GroupedDiagnostics = DefaultDict[str, List[AnalysisDiagnostic]]
 
 
-def get_grouped_diagnostics(diagnostics: List[AnalysisDiagnostic]
-                            ) -> GroupedDiagnostics:
+def get_grouped_diagnostics(
+    diagnostics: List[AnalysisDiagnostic],
+) -> GroupedDiagnostics:
     result: GroupedDiagnostics = defaultdict(list)
     for diagnostic in diagnostics:
         result[diagnostic.get_location()].append(diagnostic)
     return result
 
 
-def compare_results(results_old: AnalysisRun, results_new: AnalysisRun,
-                    histogram: Optional[HistogramType] = None
-                    ) -> ComparisonResult:
+def compare_results(
+    results_old: AnalysisRun,
+    results_new: AnalysisRun,
+    histogram: Optional[HistogramType] = None,
+) -> ComparisonResult:
     """
     compare_results - Generate a relation from diagnostics in run A to
     diagnostics in run B.
@@ -387,16 +413,15 @@ def compare_results(results_old: AnalysisRun, results_new: AnalysisRun,
                     if a_path_len != b_path_len:
 
                         if histogram == HistogramType.RELATIVE:
-                            path_
diff erence_data.append(
-                                float(a_path_len) / b_path_len)
+                            path_
diff erence_data.append(float(a_path_len) / b_path_len)
 
                         elif histogram == HistogramType.LOG_RELATIVE:
                             path_
diff erence_data.append(
-                                log(float(a_path_len) / b_path_len))
+                                log(float(a_path_len) / b_path_len)
+                            )
 
                         elif histogram == HistogramType.ABSOLUTE:
-                            path_
diff erence_data.append(
-                                a_path_len - b_path_len)
+                            path_
diff erence_data.append(a_path_len - b_path_len)
 
                     res.add_common(b)
                     common.add(a)
@@ -447,15 +472,16 @@ def compare_results(results_old: AnalysisRun, results_new: AnalysisRun,
 
     if histogram:
         from matplotlib import pyplot
+
         pyplot.hist(path_
diff erence_data, bins=100)
         pyplot.show()
 
     return res
 
 
-def filter_issues(origin: List[AnalysisDiagnostic],
-                  to_remove: Set[AnalysisDiagnostic]) \
-                  -> List[AnalysisDiagnostic]:
+def filter_issues(
+    origin: List[AnalysisDiagnostic], to_remove: Set[AnalysisDiagnostic]
+) -> List[AnalysisDiagnostic]:
     return [diag for diag in origin if diag not in to_remove]
 
 
@@ -473,7 +499,7 @@ def derive_stats(results: AnalysisRun) -> Stats:
     # Collect data on paths length.
     for report in results.reports:
         for diagnostic in report.diagnostics:
-            combined_data['PathsLength'].append(diagnostic.get_path_length())
+            combined_data["PathsLength"].append(diagnostic.get_path_length())
 
     for stat in results.raw_stats:
         for key, value in stat.items():
@@ -489,7 +515,7 @@ def derive_stats(results: AnalysisRun) -> Stats:
             "90th %tile": compute_percentile(values, 0.9),
             "95th %tile": compute_percentile(values, 0.95),
             "median": sorted(values)[len(values) // 2],
-            "total": sum(values)
+            "total": sum(values),
         }
 
     return combined_stats
@@ -497,8 +523,9 @@ def derive_stats(results: AnalysisRun) -> Stats:
 
 # TODO: compare_results decouples comparison from the output, we should
 #       do it here as well
-def compare_stats(results_old: AnalysisRun, results_new: AnalysisRun,
-                  out: TextIO = sys.stdout):
+def compare_stats(
+    results_old: AnalysisRun, results_new: AnalysisRun, out: TextIO = sys.stdout
+):
     stats_old = derive_stats(results_old)
     stats_new = derive_stats(results_new)
 
@@ -518,7 +545,7 @@ def compare_stats(results_old: AnalysisRun, results_new: AnalysisRun,
             report = f"{val_old:.3f} -> {val_new:.3f}"
 
             # Only apply highlighting when writing to TTY and it's not Windows
-            if out.isatty() and os.name != 'nt':
+            if out.isatty() and os.name != "nt":
                 if val_new != 0:
                     ratio = (val_new - val_old) / val_new
                     if ratio < -0.2:
@@ -539,14 +566,16 @@ def compare_stats(results_old: AnalysisRun, results_new: AnalysisRun,
     out.write("\n")
 
 
-def dump_scan_build_results_
diff (dir_old: ResultsDirectory,
-                                 dir_new: ResultsDirectory,
-                                 delete_empty: bool = True,
-                                 out: TextIO = sys.stdout,
-                                 show_stats: bool = False,
-                                 stats_only: bool = False,
-                                 histogram: Optional[HistogramType] = None,
-                                 verbose_log: Optional[str] = None):
+def dump_scan_build_results_
diff (
+    dir_old: ResultsDirectory,
+    dir_new: ResultsDirectory,
+    delete_empty: bool = True,
+    out: TextIO = sys.stdout,
+    show_stats: bool = False,
+    stats_only: bool = False,
+    histogram: Optional[HistogramType] = None,
+    verbose_log: Optional[str] = None,
+):
     """
     Compare directories with analysis results and dump results.
 
@@ -582,29 +611,34 @@ def dump_scan_build_results_
diff (dir_old: ResultsDirectory,
         found_
diff s += 1
         total_added += 1
         if aux_log:
-            aux_log.write(f"('ADDED', {new.get_readable_name()}, "
-                          f"{new.get_html_report()})\n")
+            aux_log.write(
+                f"('ADDED', {new.get_readable_name()}, " f"{new.get_html_report()})\n"
+            )
 
     for old in 
diff .present_only_in_old:
         out.write(f"REMOVED: {old.get_readable_name()}\n\n")
         found_
diff s += 1
         total_removed += 1
         if aux_log:
-            aux_log.write(f"('REMOVED', {old.get_readable_name()}, "
-                          f"{old.get_html_report()})\n")
+            aux_log.write(
+                f"('REMOVED', {old.get_readable_name()}, " f"{old.get_html_report()})\n"
+            )
 
     for old, new in 
diff .changed_between_new_and_old:
         out.write(f"MODIFIED: {old.get_readable_name()}\n")
         found_
diff s += 1
         total_modified += 1
         
diff s = old.get_
diff s(new)
-        str_
diff s = [f"          '{key}' changed: "
-                     f"'{old_value}' -> '{new_value}'"
-                     for key, (old_value, new_value) in 
diff s.items()]
+        str_
diff s = [
+            f"          '{key}' changed: " f"'{old_value}' -> '{new_value}'"
+            for key, (old_value, new_value) in 
diff s.items()
+        ]
         out.write(",\n".join(str_
diff s) + "\n\n")
         if aux_log:
-            aux_log.write(f"('MODIFIED', {old.get_readable_name()}, "
-                          f"{old.get_html_report()})\n")
+            aux_log.write(
+                f"('MODIFIED', {old.get_readable_name()}, "
+                f"{old.get_html_report()})\n"
+            )
 
     total_reports = len(results_new.diagnostics)
     out.write(f"TOTAL REPORTS: {total_reports}\n")
@@ -618,8 +652,7 @@ def dump_scan_build_results_
diff (dir_old: ResultsDirectory,
         aux_log.close()
 
     # TODO: change to NamedTuple
-    return found_
diff s, len(results_old.diagnostics), \
-        len(results_new.diagnostics)
+    return found_
diff s, len(results_old.diagnostics), len(results_new.diagnostics)
 
 
 if __name__ == "__main__":

diff  --git a/clang/utils/analyzer/ProjectMap.py b/clang/utils/analyzer/ProjectMap.py
index 1e89ce634e573..64f819f69456a 100644
--- a/clang/utils/analyzer/ProjectMap.py
+++ b/clang/utils/analyzer/ProjectMap.py
@@ -40,6 +40,7 @@ class Size(int, Enum):
     possible sizes, we want projects with UNSPECIFIED size to be filtered out
     for any given size.
     """
+
     TINY = auto()
     SMALL = auto()
     BIG = auto()
@@ -68,17 +69,23 @@ def from_str(raw_size: Optional[str]) -> "Size":
             if possible_size.name == raw_size_upper:
                 return possible_size
 
-        possible_sizes = [size.name.lower() for size in Size
-                          # no need in showing our users this size
-                          if size != Size.UNSPECIFIED]
-        raise ValueError(f"Incorrect project size '{raw_size}'. "
-                         f"Available sizes are {possible_sizes}")
+        possible_sizes = [
+            size.name.lower()
+            for size in Size
+            # no need in showing our users this size
+            if size != Size.UNSPECIFIED
+        ]
+        raise ValueError(
+            f"Incorrect project size '{raw_size}'. "
+            f"Available sizes are {possible_sizes}"
+        )
 
 
 class ProjectInfo(NamedTuple):
     """
     Information about a project to analyze.
     """
+
     name: str
     mode: int
     source: DownloadType = DownloadType.SCRIPT
@@ -106,6 +113,7 @@ class ProjectMap:
     """
     Project map stores info about all the "registered" projects.
     """
+
     def __init__(self, path: Optional[str] = None, should_exist: bool = True):
         """
         :param path: optional path to a project JSON file, when None defaults
@@ -122,7 +130,8 @@ def __init__(self, path: Optional[str] = None, should_exist: bool = True):
             if should_exist:
                 raise ValueError(
                     f"Cannot find the project map file {path}"
-                    f"\nRunning script for the wrong directory?\n")
+                    f"\nRunning script for the wrong directory?\n"
+                )
             else:
                 self._create_empty(path)
 
@@ -140,15 +149,13 @@ def _load_projects(self):
             raw_projects = json.load(raw_data)
 
             if not isinstance(raw_projects, list):
-                raise ValueError(
-                    "Project map should be a list of JSON objects")
+                raise ValueError("Project map should be a list of JSON objects")
 
             self.projects = self._parse(raw_projects)
 
     @staticmethod
     def _parse(raw_projects: List[JSON]) -> List[ProjectInfo]:
-        return [ProjectMap._parse_project(raw_project)
-                for raw_project in raw_projects]
+        return [ProjectMap._parse_project(raw_project) for raw_project in raw_projects]
 
     @staticmethod
     def _parse_project(raw_project: JSON) -> ProjectInfo:
@@ -164,12 +171,10 @@ def _parse_project(raw_project: JSON) -> ProjectInfo:
             else:
                 origin, commit = "", ""
 
-            return ProjectInfo(name, build_mode, source, origin, commit,
-                               enabled, size)
+            return ProjectInfo(name, build_mode, source, origin, commit, enabled, size)
 
         except KeyError as e:
-            raise ValueError(
-                f"Project info is required to have a '{e.args[0]}' field")
+            raise ValueError(f"Project info is required to have a '{e.args[0]}' field")
 
     @staticmethod
     def _get_git_params(raw_project: JSON) -> Tuple[str, str]:
@@ -178,7 +183,8 @@ def _get_git_params(raw_project: JSON) -> Tuple[str, str]:
         except KeyError as e:
             raise ValueError(
                 f"Profect info is required to have a '{e.args[0]}' field "
-                f"if it has a 'git' source")
+                f"if it has a 'git' source"
+            )
 
     @staticmethod
     def _create_empty(path: str):
@@ -187,13 +193,11 @@ def _create_empty(path: str):
     @staticmethod
     def _save(projects: List[ProjectInfo], path: str):
         with open(path, "w") as output:
-            json.dump(ProjectMap._convert_infos_to_dicts(projects),
-                      output, indent=2)
+            json.dump(ProjectMap._convert_infos_to_dicts(projects), output, indent=2)
 
     @staticmethod
     def _convert_infos_to_dicts(projects: List[ProjectInfo]) -> List[JSON]:
-        return [ProjectMap._convert_info_to_dict(project)
-                for project in projects]
+        return [ProjectMap._convert_info_to_dict(project) for project in projects]
 
     @staticmethod
     def _convert_info_to_dict(project: ProjectInfo) -> JSON:

diff  --git a/clang/utils/analyzer/SATest.py b/clang/utils/analyzer/SATest.py
index a34c7f29c3afb..d70e33f24c2df 100755
--- a/clang/utils/analyzer/SATest.py
+++ b/clang/utils/analyzer/SATest.py
@@ -8,10 +8,9 @@
 
 SCRIPTS_DIR = os.path.dirname(os.path.realpath(__file__))
 PROJECTS_DIR = os.path.join(SCRIPTS_DIR, "projects")
-DEFAULT_LLVM_DIR = os.path.realpath(os.path.join(SCRIPTS_DIR,
-                                                 os.path.pardir,
-                                                 os.path.pardir,
-                                                 os.path.pardir))
+DEFAULT_LLVM_DIR = os.path.realpath(
+    os.path.join(SCRIPTS_DIR, os.path.pardir, os.path.pardir, os.path.pardir)
+)
 
 
 def add(parser, args):
@@ -19,15 +18,16 @@ def add(parser, args):
     from ProjectMap import ProjectInfo
 
     if args.source == "git" and (args.origin == "" or args.commit == ""):
-        parser.error(
-            "Please provide both --origin and --commit if source is 'git'")
+        parser.error("Please provide both --origin and --commit if source is 'git'")
 
     if args.source != "git" and (args.origin != "" or args.commit != ""):
-        parser.error("Options --origin and --commit don't make sense when "
-                     "source is not 'git'")
+        parser.error(
+            "Options --origin and --commit don't make sense when " "source is not 'git'"
+        )
 
-    project = ProjectInfo(args.name[0], args.mode, args.source, args.origin,
-                          args.commit)
+    project = ProjectInfo(
+        args.name[0], args.mode, args.source, args.origin, args.commit
+    )
 
     SATestAdd.add_new_project(project)
 
@@ -38,13 +38,15 @@ def build(parser, args):
     SATestBuild.VERBOSE = args.verbose
 
     projects = get_projects(parser, args)
-    tester = SATestBuild.RegressionTester(args.jobs,
-                                          projects,
-                                          args.override_compiler,
-                                          args.extra_analyzer_config,
-                                          args.extra_checkers,
-                                          args.regenerate,
-                                          args.strictness)
+    tester = SATestBuild.RegressionTester(
+        args.jobs,
+        projects,
+        args.override_compiler,
+        args.extra_analyzer_config,
+        args.extra_checkers,
+        args.regenerate,
+        args.strictness,
+    )
     tests_passed = tester.test_all()
 
     if not tests_passed:
@@ -55,22 +57,28 @@ def build(parser, args):
 def compare(parser, args):
     import CmpRuns
 
-    choices = [CmpRuns.HistogramType.RELATIVE.value,
-               CmpRuns.HistogramType.LOG_RELATIVE.value,
-               CmpRuns.HistogramType.ABSOLUTE.value]
+    choices = [
+        CmpRuns.HistogramType.RELATIVE.value,
+        CmpRuns.HistogramType.LOG_RELATIVE.value,
+        CmpRuns.HistogramType.ABSOLUTE.value,
+    ]
 
     if args.histogram is not None and args.histogram not in choices:
-        parser.error("Incorrect histogram type, available choices are {}"
-                     .format(choices))
+        parser.error(
+            "Incorrect histogram type, available choices are {}".format(choices)
+        )
 
     dir_old = CmpRuns.ResultsDirectory(args.old[0], args.root_old)
     dir_new = CmpRuns.ResultsDirectory(args.new[0], args.root_new)
 
-    CmpRuns.dump_scan_build_results_
diff (dir_old, dir_new,
-                                         show_stats=args.show_stats,
-                                         stats_only=args.stats_only,
-                                         histogram=args.histogram,
-                                         verbose_log=args.verbose_log)
+    CmpRuns.dump_scan_build_results_
diff (
+        dir_old,
+        dir_new,
+        show_stats=args.show_stats,
+        stats_only=args.stats_only,
+        histogram=args.histogram,
+        verbose_log=args.verbose_log,
+    )
 
 
 def update(parser, args):
@@ -92,6 +100,7 @@ def benchmark(parser, args):
 
 def benchmark_compare(parser, args):
     import SATestBenchmark
+
     SATestBenchmark.compare(args.old, args.new, args.output)
 
 
@@ -102,34 +111,36 @@ def get_projects(parser, args):
     projects = project_map.projects
 
     def filter_projects(projects, predicate, force=False):
-        return [project.with_fields(enabled=(force or project.enabled) and
-                                    predicate(project))
-                for project in projects]
+        return [
+            project.with_fields(
+                enabled=(force or project.enabled) and predicate(project)
+            )
+            for project in projects
+        ]
 
     if args.projects:
         projects_arg = args.projects.split(",")
-        available_projects = [project.name
-                              for project in projects]
+        available_projects = [project.name for project in projects]
 
         # validate that given projects are present in the project map file
         for manual_project in projects_arg:
             if manual_project not in available_projects:
-                parser.error("Project '{project}' is not found in "
-                             "the project map file. Available projects are "
-                             "{all}.".format(project=manual_project,
-                                             all=available_projects))
+                parser.error(
+                    "Project '{project}' is not found in "
+                    "the project map file. Available projects are "
+                    "{all}.".format(project=manual_project, all=available_projects)
+                )
 
-        projects = filter_projects(projects, lambda project:
-                                   project.name in projects_arg,
-                                   force=True)
+        projects = filter_projects(
+            projects, lambda project: project.name in projects_arg, force=True
+        )
 
     try:
         max_size = Size.from_str(args.max_size)
     except ValueError as e:
         parser.error("{}".format(e))
 
-    projects = filter_projects(projects, lambda project:
-                               project.size <= max_size)
+    projects = filter_projects(projects, lambda project: project.size <= max_size)
 
     return projects
 
@@ -145,12 +156,11 @@ def docker(parser, args):
     elif args.shell:
         docker_shell(args)
     else:
-        sys.exit(docker_run(args, ' '.join(args.rest)))
+        sys.exit(docker_run(args, " ".join(args.rest)))
 
 
 def docker_build_image():
-    sys.exit(call("docker build --tag satest-image {}".format(SCRIPTS_DIR),
-                  shell=True))
+    sys.exit(call("docker build --tag satest-image {}".format(SCRIPTS_DIR), shell=True))
 
 
 def docker_shell(args):
@@ -171,22 +181,25 @@ def docker_shell(args):
 
 def docker_run(args, command, docker_args=""):
     try:
-        return call("docker run --rm --name satest "
-                    "-v {llvm}:/llvm-project "
-                    "-v {build}:/build "
-                    "-v {clang}:/analyzer "
-                    "-v {scripts}:/scripts "
-                    "-v {projects}:/projects "
-                    "{docker_args} "
-                    "satest-image:latest {command}"
-                    .format(llvm=args.llvm_project_dir,
-                            build=args.build_dir,
-                            clang=args.clang_dir,
-                            scripts=SCRIPTS_DIR,
-                            projects=PROJECTS_DIR,
-                            docker_args=docker_args,
-                            command=command),
-                    shell=True)
+        return call(
+            "docker run --rm --name satest "
+            "-v {llvm}:/llvm-project "
+            "-v {build}:/build "
+            "-v {clang}:/analyzer "
+            "-v {scripts}:/scripts "
+            "-v {projects}:/projects "
+            "{docker_args} "
+            "satest-image:latest {command}".format(
+                llvm=args.llvm_project_dir,
+                build=args.build_dir,
+                clang=args.clang_dir,
+                scripts=SCRIPTS_DIR,
+                projects=PROJECTS_DIR,
+                docker_args=docker_args,
+                command=command,
+            ),
+            shell=True,
+        )
 
     except KeyboardInterrupt:
         docker_cleanup()
@@ -203,62 +216,104 @@ def main():
 
     # add subcommand
     add_parser = subparsers.add_parser(
-        "add",
-        help="Add a new project for the analyzer testing.")
+        "add", help="Add a new project for the analyzer testing."
+    )
     # TODO: Add an option not to build.
     # TODO: Set the path to the Repository directory.
     add_parser.add_argument("name", nargs=1, help="Name of the new project")
-    add_parser.add_argument("--mode", action="store", default=1, type=int,
-                            choices=[0, 1, 2],
-                            help="Build mode: 0 for single file project, "
-                            "1 for scan_build, "
-                            "2 for single file c++11 project")
-    add_parser.add_argument("--source", action="store", default="script",
-                            choices=["script", "git", "zip"],
-                            help="Source type of the new project: "
-                            "'git' for getting from git "
-                            "(please provide --origin and --commit), "
-                            "'zip' for unpacking source from a zip file, "
-                            "'script' for downloading source by running "
-                            "a custom script")
-    add_parser.add_argument("--origin", action="store", default="",
-                            help="Origin link for a git repository")
-    add_parser.add_argument("--commit", action="store", default="",
-                            help="Git hash for a commit to checkout")
+    add_parser.add_argument(
+        "--mode",
+        action="store",
+        default=1,
+        type=int,
+        choices=[0, 1, 2],
+        help="Build mode: 0 for single file project, "
+        "1 for scan_build, "
+        "2 for single file c++11 project",
+    )
+    add_parser.add_argument(
+        "--source",
+        action="store",
+        default="script",
+        choices=["script", "git", "zip"],
+        help="Source type of the new project: "
+        "'git' for getting from git "
+        "(please provide --origin and --commit), "
+        "'zip' for unpacking source from a zip file, "
+        "'script' for downloading source by running "
+        "a custom script",
+    )
+    add_parser.add_argument(
+        "--origin", action="store", default="", help="Origin link for a git repository"
+    )
+    add_parser.add_argument(
+        "--commit", action="store", default="", help="Git hash for a commit to checkout"
+    )
     add_parser.set_defaults(func=add)
 
     # build subcommand
     build_parser = subparsers.add_parser(
         "build",
         help="Build projects from the project map and compare results with "
-        "the reference.")
-    build_parser.add_argument("--strictness", dest="strictness",
-                              type=int, default=0,
-                              help="0 to fail on runtime errors, 1 to fail "
-                              "when the number of found bugs are 
diff erent "
-                              "from the reference, 2 to fail on any "
-                              "
diff erence from the reference. Default is 0.")
-    build_parser.add_argument("-r", dest="regenerate", action="store_true",
-                              default=False,
-                              help="Regenerate reference output.")
-    build_parser.add_argument("--override-compiler", action="store_true",
-                              default=False, help="Call scan-build with "
-                              "--override-compiler option.")
-    build_parser.add_argument("-j", "--jobs", dest="jobs",
-                              type=int, default=0,
-                              help="Number of projects to test concurrently")
-    build_parser.add_argument("--extra-analyzer-config",
-                              dest="extra_analyzer_config", type=str,
-                              default="",
-                              help="Arguments passed to to -analyzer-config")
-    build_parser.add_argument("--extra-checkers",
-                              dest="extra_checkers", type=str,
-                              default="",
-                              help="Extra checkers to enable")
-    build_parser.add_argument("--projects", action="store", default="",
-                              help="Comma-separated list of projects to test")
-    build_parser.add_argument("--max-size", action="store", default=None,
-                              help="Maximum size for the projects to test")
+        "the reference.",
+    )
+    build_parser.add_argument(
+        "--strictness",
+        dest="strictness",
+        type=int,
+        default=0,
+        help="0 to fail on runtime errors, 1 to fail "
+        "when the number of found bugs are 
diff erent "
+        "from the reference, 2 to fail on any "
+        "
diff erence from the reference. Default is 0.",
+    )
+    build_parser.add_argument(
+        "-r",
+        dest="regenerate",
+        action="store_true",
+        default=False,
+        help="Regenerate reference output.",
+    )
+    build_parser.add_argument(
+        "--override-compiler",
+        action="store_true",
+        default=False,
+        help="Call scan-build with " "--override-compiler option.",
+    )
+    build_parser.add_argument(
+        "-j",
+        "--jobs",
+        dest="jobs",
+        type=int,
+        default=0,
+        help="Number of projects to test concurrently",
+    )
+    build_parser.add_argument(
+        "--extra-analyzer-config",
+        dest="extra_analyzer_config",
+        type=str,
+        default="",
+        help="Arguments passed to to -analyzer-config",
+    )
+    build_parser.add_argument(
+        "--extra-checkers",
+        dest="extra_checkers",
+        type=str,
+        default="",
+        help="Extra checkers to enable",
+    )
+    build_parser.add_argument(
+        "--projects",
+        action="store",
+        default="",
+        help="Comma-separated list of projects to test",
+    )
+    build_parser.add_argument(
+        "--max-size",
+        action="store",
+        default=None,
+        help="Maximum size for the projects to test",
+    )
     build_parser.add_argument("-v", "--verbose", action="count", default=0)
     build_parser.set_defaults(func=build)
 
@@ -266,29 +321,53 @@ def main():
     cmp_parser = subparsers.add_parser(
         "compare",
         help="Comparing two static analyzer runs in terms of "
-        "reported warnings and execution time statistics.")
-    cmp_parser.add_argument("--root-old", dest="root_old",
-                            help="Prefix to ignore on source files for "
-                            "OLD directory",
-                            action="store", type=str, default="")
-    cmp_parser.add_argument("--root-new", dest="root_new",
-                            help="Prefix to ignore on source files for "
-                            "NEW directory",
-                            action="store", type=str, default="")
-    cmp_parser.add_argument("--verbose-log", dest="verbose_log",
-                            help="Write additional information to LOG "
-                            "[default=None]",
-                            action="store", type=str, default=None,
-                            metavar="LOG")
-    cmp_parser.add_argument("--stats-only", action="store_true",
-                            dest="stats_only", default=False,
-                            help="Only show statistics on reports")
-    cmp_parser.add_argument("--show-stats", action="store_true",
-                            dest="show_stats", default=False,
-                            help="Show change in statistics")
-    cmp_parser.add_argument("--histogram", action="store", default=None,
-                            help="Show histogram of paths 
diff erences. "
-                            "Requires matplotlib")
+        "reported warnings and execution time statistics.",
+    )
+    cmp_parser.add_argument(
+        "--root-old",
+        dest="root_old",
+        help="Prefix to ignore on source files for " "OLD directory",
+        action="store",
+        type=str,
+        default="",
+    )
+    cmp_parser.add_argument(
+        "--root-new",
+        dest="root_new",
+        help="Prefix to ignore on source files for " "NEW directory",
+        action="store",
+        type=str,
+        default="",
+    )
+    cmp_parser.add_argument(
+        "--verbose-log",
+        dest="verbose_log",
+        help="Write additional information to LOG " "[default=None]",
+        action="store",
+        type=str,
+        default=None,
+        metavar="LOG",
+    )
+    cmp_parser.add_argument(
+        "--stats-only",
+        action="store_true",
+        dest="stats_only",
+        default=False,
+        help="Only show statistics on reports",
+    )
+    cmp_parser.add_argument(
+        "--show-stats",
+        action="store_true",
+        dest="show_stats",
+        default=False,
+        help="Show change in statistics",
+    )
+    cmp_parser.add_argument(
+        "--histogram",
+        action="store",
+        default=None,
+        help="Show histogram of paths 
diff erences. " "Requires matplotlib",
+    )
     cmp_parser.add_argument("old", nargs=1, help="Directory with old results")
     cmp_parser.add_argument("new", nargs=1, help="Directory with new results")
     cmp_parser.set_defaults(func=compare)
@@ -297,64 +376,103 @@ def main():
     upd_parser = subparsers.add_parser(
         "update",
         help="Update static analyzer reference results based on the previous "
-        "run of SATest build. Assumes that SATest build was just run.")
-    upd_parser.add_argument("--git", action="store_true",
-                            help="Stage updated results using git.")
+        "run of SATest build. Assumes that SATest build was just run.",
+    )
+    upd_parser.add_argument(
+        "--git", action="store_true", help="Stage updated results using git."
+    )
     upd_parser.set_defaults(func=update)
 
     # docker subcommand
     dock_parser = subparsers.add_parser(
-        "docker",
-        help="Run regression system in the docker.")
-
-    dock_parser.add_argument("--build-image", action="store_true",
-                             help="Build docker image for running tests.")
-    dock_parser.add_argument("--shell", action="store_true",
-                             help="Start a shell on docker.")
-    dock_parser.add_argument("--llvm-project-dir", action="store",
-                             default=DEFAULT_LLVM_DIR,
-                             help="Path to LLVM source code. Defaults "
-                             "to the repo where this script is located. ")
-    dock_parser.add_argument("--build-dir", action="store", default="",
-                             help="Path to a directory where docker should "
-                             "build LLVM code.")
-    dock_parser.add_argument("--clang-dir", action="store", default="",
-                             help="Path to find/install LLVM installation.")
-    dock_parser.add_argument("rest", nargs=argparse.REMAINDER, default=[],
-                             help="Additional args that will be forwarded "
-                             "to the docker's entrypoint.")
+        "docker", help="Run regression system in the docker."
+    )
+
+    dock_parser.add_argument(
+        "--build-image",
+        action="store_true",
+        help="Build docker image for running tests.",
+    )
+    dock_parser.add_argument(
+        "--shell", action="store_true", help="Start a shell on docker."
+    )
+    dock_parser.add_argument(
+        "--llvm-project-dir",
+        action="store",
+        default=DEFAULT_LLVM_DIR,
+        help="Path to LLVM source code. Defaults "
+        "to the repo where this script is located. ",
+    )
+    dock_parser.add_argument(
+        "--build-dir",
+        action="store",
+        default="",
+        help="Path to a directory where docker should " "build LLVM code.",
+    )
+    dock_parser.add_argument(
+        "--clang-dir",
+        action="store",
+        default="",
+        help="Path to find/install LLVM installation.",
+    )
+    dock_parser.add_argument(
+        "rest",
+        nargs=argparse.REMAINDER,
+        default=[],
+        help="Additional args that will be forwarded " "to the docker's entrypoint.",
+    )
     dock_parser.set_defaults(func=docker)
 
     # benchmark subcommand
     bench_parser = subparsers.add_parser(
-        "benchmark",
-        help="Run benchmarks by building a set of projects multiple times.")
-
-    bench_parser.add_argument("-i", "--iterations", action="store",
-                              type=int, default=20,
-                              help="Number of iterations for building each "
-                              "project.")
-    bench_parser.add_argument("-o", "--output", action="store",
-                              default="benchmark.csv",
-                              help="Output csv file for the benchmark results")
-    bench_parser.add_argument("--projects", action="store", default="",
-                              help="Comma-separated list of projects to test")
-    bench_parser.add_argument("--max-size", action="store", default=None,
-                              help="Maximum size for the projects to test")
+        "benchmark", help="Run benchmarks by building a set of projects multiple times."
+    )
+
+    bench_parser.add_argument(
+        "-i",
+        "--iterations",
+        action="store",
+        type=int,
+        default=20,
+        help="Number of iterations for building each " "project.",
+    )
+    bench_parser.add_argument(
+        "-o",
+        "--output",
+        action="store",
+        default="benchmark.csv",
+        help="Output csv file for the benchmark results",
+    )
+    bench_parser.add_argument(
+        "--projects",
+        action="store",
+        default="",
+        help="Comma-separated list of projects to test",
+    )
+    bench_parser.add_argument(
+        "--max-size",
+        action="store",
+        default=None,
+        help="Maximum size for the projects to test",
+    )
     bench_parser.set_defaults(func=benchmark)
 
     bench_subparsers = bench_parser.add_subparsers()
     bench_compare_parser = bench_subparsers.add_parser(
-        "compare",
-        help="Compare benchmark runs.")
-    bench_compare_parser.add_argument("--old", action="store", required=True,
-                                      help="Benchmark reference results to "
-                                      "compare agains.")
-    bench_compare_parser.add_argument("--new", action="store", required=True,
-                                      help="New benchmark results to check.")
-    bench_compare_parser.add_argument("-o", "--output",
-                                      action="store", required=True,
-                                      help="Output file for plots.")
+        "compare", help="Compare benchmark runs."
+    )
+    bench_compare_parser.add_argument(
+        "--old",
+        action="store",
+        required=True,
+        help="Benchmark reference results to " "compare agains.",
+    )
+    bench_compare_parser.add_argument(
+        "--new", action="store", required=True, help="New benchmark results to check."
+    )
+    bench_compare_parser.add_argument(
+        "-o", "--output", action="store", required=True, help="Output file for plots."
+    )
     bench_compare_parser.set_defaults(func=benchmark_compare)
 
     args = parser.parse_args()

diff  --git a/clang/utils/analyzer/SATestAdd.py b/clang/utils/analyzer/SATestAdd.py
index 9d0919345182f..84e6d44b42822 100644
--- a/clang/utils/analyzer/SATestAdd.py
+++ b/clang/utils/analyzer/SATestAdd.py
@@ -55,8 +55,7 @@ def add_new_project(project: ProjectInfo):
     :param name: is a short string used to identify a project.
     """
 
-    test_info = SATestBuild.TestInfo(project,
-                                     is_reference_build=True)
+    test_info = SATestBuild.TestInfo(project, is_reference_build=True)
     tester = SATestBuild.ProjectTester(test_info)
 
     project_dir = tester.get_project_dir()
@@ -71,8 +70,10 @@ def add_new_project(project: ProjectInfo):
     project_map = ProjectMap(should_exist=False)
 
     if is_existing_project(project_map, project):
-        print(f"Warning: Project with name '{project.name}' already exists.",
-              file=sys.stdout)
+        print(
+            f"Warning: Project with name '{project.name}' already exists.",
+            file=sys.stdout,
+        )
         print("Reference output has been regenerated.", file=sys.stdout)
     else:
         project_map.projects.append(project)
@@ -80,8 +81,10 @@ def add_new_project(project: ProjectInfo):
 
 
 def is_existing_project(project_map: ProjectMap, project: ProjectInfo) -> bool:
-    return any(existing_project.name == project.name
-               for existing_project in project_map.projects)
+    return any(
+        existing_project.name == project.name
+        for existing_project in project_map.projects
+    )
 
 
 if __name__ == "__main__":

diff  --git a/clang/utils/analyzer/SATestBenchmark.py b/clang/utils/analyzer/SATestBenchmark.py
index 0fa2204bbbe7e..f4936c0eafb12 100644
--- a/clang/utils/analyzer/SATestBenchmark.py
+++ b/clang/utils/analyzer/SATestBenchmark.py
@@ -36,15 +36,14 @@ class Benchmark:
     multiple times for the given set of projects and stores results in the
     specified file.
     """
-    def __init__(self, projects: List[ProjectInfo], iterations: int,
-                 output_path: str):
+
+    def __init__(self, projects: List[ProjectInfo], iterations: int, output_path: str):
         self.projects = projects
         self.iterations = iterations
         self.out = output_path
 
     def run(self):
-        results = [self._benchmark_project(project)
-                   for project in self.projects]
+        results = [self._benchmark_project(project) for project in self.projects]
 
         data = pd.concat(results, ignore_index=True)
         _save(data, self.out)
@@ -66,10 +65,13 @@ def _benchmark_project(self, project: ProjectInfo) -> pd.DataFrame:
         for i in range(self.iterations):
             stdout(f"Iteration #{i + 1}")
             time, mem = tester.build(project_dir, output_dir)
-            raw_data.append({"time": time, "memory": mem,
-                             "iteration": i, "project": project.name})
-            stdout(f"time: {utils.time_to_str(time)}, "
-                   f"peak memory: {utils.memory_to_str(mem)}")
+            raw_data.append(
+                {"time": time, "memory": mem, "iteration": i, "project": project.name}
+            )
+            stdout(
+                f"time: {utils.time_to_str(time)}, "
+                f"peak memory: {utils.memory_to_str(mem)}"
+            )
 
         return pd.DataFrame(raw_data)
 
@@ -102,8 +104,9 @@ def compare(old_path: str, new_path: str, plot_file: str):
     _plot(data, plot_file)
 
 
-def _normalize(old: pd.DataFrame,
-               new: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
+def _normalize(
+    old: pd.DataFrame, new: pd.DataFrame
+) -> Tuple[pd.DataFrame, pd.DataFrame]:
     # This creates a dataframe with all numerical data averaged.
     means = old.groupby("project").mean()
     return _normalize_impl(old, means), _normalize_impl(new, means)
@@ -144,8 +147,14 @@ def _plot(data: pd.DataFrame, plot_file: str):
     figure, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 6))
 
     def _subplot(key: str, ax: matplotlib.axes.Axes):
-        sns.boxplot(x="project", y=_normalized_name(key), hue="kind",
-                    data=data, palette=sns.color_palette("BrBG", 2), ax=ax)
+        sns.boxplot(
+            x="project",
+            y=_normalized_name(key),
+            hue="kind",
+            data=data,
+            palette=sns.color_palette("BrBG", 2),
+            ax=ax,
+        )
 
     _subplot("time", ax1)
     # No need to have xlabels on both top and bottom charts.

diff  --git a/clang/utils/analyzer/SATestBuild.py b/clang/utils/analyzer/SATestBuild.py
index cf02f26ef267b..bc86ed8b64e0e 100644
--- a/clang/utils/analyzer/SATestBuild.py
+++ b/clang/utils/analyzer/SATestBuild.py
@@ -59,6 +59,7 @@
 import zipfile
 
 from queue import Queue
+
 # mypy has problems finding InvalidFileException in the module
 # and this is we can shush that false positive
 from plistlib import InvalidFileException  # type:ignore
@@ -70,9 +71,9 @@
 # Helper functions.
 ###############################################################################
 
+
 class StreamToLogger:
-    def __init__(self, logger: logging.Logger,
-                 log_level: int = logging.INFO):
+    def __init__(self, logger: logging.Logger, log_level: int = logging.INFO):
         self.logger = logger
         self.log_level = log_level
 
@@ -109,8 +110,7 @@ def stdout(message: str):
     LOCAL.stdout.write(message)
 
 
-logging.basicConfig(
-    format='%(asctime)s:%(levelname)s:%(name)s: %(message)s')
+logging.basicConfig(format="%(asctime)s:%(levelname)s:%(name)s: %(message)s")
 
 
 ###############################################################################
@@ -119,10 +119,10 @@ def stdout(message: str):
 
 
 # Find Clang for static analysis.
-if 'CC' in os.environ:
-    cc_candidate: Optional[str] = os.environ['CC']
+if "CC" in os.environ:
+    cc_candidate: Optional[str] = os.environ["CC"]
 else:
-    cc_candidate = utils.which("clang", os.environ['PATH'])
+    cc_candidate = utils.which("clang", os.environ["PATH"])
 if not cc_candidate:
     stderr("Error: cannot find 'clang' in PATH")
     sys.exit(1)
@@ -173,18 +173,20 @@ def stdout(message: str):
 # The list of checkers used during analyzes.
 # Currently, consists of all the non-experimental checkers, plus a few alpha
 # checkers we don't want to regress on.
-CHECKERS = ",".join([
-    "alpha.unix.SimpleStream",
-    "alpha.security.taint",
-    "cplusplus.NewDeleteLeaks",
-    "core",
-    "cplusplus",
-    "deadcode",
-    "security",
-    "unix",
-    "osx",
-    "nullability"
-])
+CHECKERS = ",".join(
+    [
+        "alpha.unix.SimpleStream",
+        "alpha.security.taint",
+        "cplusplus.NewDeleteLeaks",
+        "core",
+        "cplusplus",
+        "deadcode",
+        "security",
+        "unix",
+        "osx",
+        "nullability",
+    ]
+)
 
 VERBOSE = 0
 
@@ -201,15 +203,21 @@ def run_cleanup_script(directory: str, build_log_file: IO):
     cwd = os.path.join(directory, PATCHED_SOURCE_DIR_NAME)
     script_path = os.path.join(directory, CLEANUP_SCRIPT)
 
-    utils.run_script(script_path, build_log_file, cwd,
-                     out=LOCAL.stdout, err=LOCAL.stderr,
-                     verbose=VERBOSE)
+    utils.run_script(
+        script_path,
+        build_log_file,
+        cwd,
+        out=LOCAL.stdout,
+        err=LOCAL.stderr,
+        verbose=VERBOSE,
+    )
 
 
 class TestInfo(NamedTuple):
     """
     Information about a project and settings for its analysis.
     """
+
     project: ProjectInfo
     override_compiler: bool = False
     extra_analyzer_config: str = ""
@@ -235,10 +243,16 @@ class RegressionTester:
     A component aggregating all of the project testing.
     """
 
-    def __init__(self, jobs: int, projects: List[ProjectInfo],
-                 override_compiler: bool, extra_analyzer_config: str,
-                 extra_checkers: str,
-                 regenerate: bool, strictness: bool):
+    def __init__(
+        self,
+        jobs: int,
+        projects: List[ProjectInfo],
+        override_compiler: bool,
+        extra_analyzer_config: str,
+        extra_checkers: str,
+        regenerate: bool,
+        strictness: bool,
+    ):
         self.jobs = jobs
         self.projects = projects
         self.override_compiler = override_compiler
@@ -253,18 +267,21 @@ def test_all(self) -> bool:
         # Test the projects.
         for project in self.projects:
             projects_to_test.append(
-                TestInfo(project,
-                         self.override_compiler,
-                         self.extra_analyzer_config,
-                         self.extra_checkers,
-                         self.regenerate, self.strictness))
+                TestInfo(
+                    project,
+                    self.override_compiler,
+                    self.extra_analyzer_config,
+                    self.extra_checkers,
+                    self.regenerate,
+                    self.strictness,
+                )
+            )
         if self.jobs <= 1:
             return self._single_threaded_test_all(projects_to_test)
         else:
             return self._multi_threaded_test_all(projects_to_test)
 
-    def _single_threaded_test_all(self,
-                                  projects_to_test: List[TestInfo]) -> bool:
+    def _single_threaded_test_all(self, projects_to_test: List[TestInfo]) -> bool:
         """
         Run all projects.
         :return: whether tests have passed.
@@ -275,8 +292,7 @@ def _single_threaded_test_all(self,
             success &= tester.test()
         return success
 
-    def _multi_threaded_test_all(self,
-                                 projects_to_test: List[TestInfo]) -> bool:
+    def _multi_threaded_test_all(self, projects_to_test: List[TestInfo]) -> bool:
         """
         Run each project in a separate thread.
 
@@ -327,8 +343,7 @@ def test(self) -> bool:
         to the :param strictness: criteria.
         """
         if not self.project.enabled:
-            self.out(
-                f" \n\n--- Skipping disabled project {self.project.name}\n")
+            self.out(f" \n\n--- Skipping disabled project {self.project.name}\n")
             return True
 
         self.out(f" \n\n--- Building project {self.project.name}\n")
@@ -350,8 +365,10 @@ def test(self) -> bool:
         else:
             passed = run_cmp_results(project_dir, self.strictness)
 
-        self.out(f"Completed tests for project {self.project.name} "
-                 f"(time: {time.time() - start_time:.2f}).\n")
+        self.out(
+            f"Completed tests for project {self.project.name} "
+            f"(time: {time.time() - start_time:.2f}).\n"
+        )
 
         return passed
 
@@ -380,7 +397,7 @@ def build(self, directory: str, output_dir: str) -> Tuple[float, int]:
 
             shutil.rmtree(output_dir)
 
-        assert(not os.path.exists(output_dir))
+        assert not os.path.exists(output_dir)
         os.makedirs(os.path.join(output_dir, LOG_DIR_NAME))
 
         # Build and analyze the project.
@@ -388,39 +405,39 @@ def build(self, directory: str, output_dir: str) -> Tuple[float, int]:
             if self.project.mode == 1:
                 self._download_and_patch(directory, build_log_file)
                 run_cleanup_script(directory, build_log_file)
-                build_time, memory = self.scan_build(directory, output_dir,
-                                                     build_log_file)
+                build_time, memory = self.scan_build(
+                    directory, output_dir, build_log_file
+                )
             else:
-                build_time, memory = self.analyze_preprocessed(directory,
-                                                               output_dir)
+                build_time, memory = self.analyze_preprocessed(directory, output_dir)
 
             if self.is_reference_build:
                 run_cleanup_script(directory, build_log_file)
-                normalize_reference_results(directory, output_dir,
-                                            self.project.mode)
+                normalize_reference_results(directory, output_dir, self.project.mode)
 
-        self.out(f"Build complete (time: {utils.time_to_str(build_time)}, "
-                 f"peak memory: {utils.memory_to_str(memory)}). "
-                 f"See the log for more details: {build_log_path}\n")
+        self.out(
+            f"Build complete (time: {utils.time_to_str(build_time)}, "
+            f"peak memory: {utils.memory_to_str(memory)}). "
+            f"See the log for more details: {build_log_path}\n"
+        )
 
         return build_time, memory
 
-    def scan_build(self, directory: str, output_dir: str,
-                   build_log_file: IO) -> Tuple[float, int]:
+    def scan_build(
+        self, directory: str, output_dir: str, build_log_file: IO
+    ) -> Tuple[float, int]:
         """
         Build the project with scan-build by reading in the commands and
         prefixing them with the scan-build options.
         """
         build_script_path = os.path.join(directory, BUILD_SCRIPT)
         if not os.path.exists(build_script_path):
-            stderr(f"Error: build script is not defined: "
-                   f"{build_script_path}\n")
+            stderr(f"Error: build script is not defined: " f"{build_script_path}\n")
             sys.exit(1)
 
         all_checkers = CHECKERS
-        if 'SA_ADDITIONAL_CHECKERS' in os.environ:
-            all_checkers = (all_checkers + ',' +
-                            os.environ['SA_ADDITIONAL_CHECKERS'])
+        if "SA_ADDITIONAL_CHECKERS" in os.environ:
+            all_checkers = all_checkers + "," + os.environ["SA_ADDITIONAL_CHECKERS"]
         if self.extra_checkers != "":
             all_checkers += "," + self.extra_checkers
 
@@ -456,9 +473,9 @@ def scan_build(self, directory: str, output_dir: str,
                 # instead.
                 if command == NO_PREFIX_CMD:
                     command_prefix = ""
-                    extra_env['OUTPUT'] = output_dir
-                    extra_env['CC'] = CLANG
-                    extra_env['ANALYZER_CONFIG'] = self.generate_config()
+                    extra_env["OUTPUT"] = output_dir
+                    extra_env["CC"] = CLANG
+                    extra_env["ANALYZER_CONFIG"] = self.generate_config()
                     continue
 
                 if command.startswith("#"):
@@ -467,8 +484,9 @@ def scan_build(self, directory: str, output_dir: str,
                 # If using 'make', auto imply a -jX argument
                 # to speed up analysis.  xcodebuild will
                 # automatically use the maximum number of cores.
-                if (command.startswith("make ") or command == "make") and \
-                        "-j" not in command:
+                if (
+                    command.startswith("make ") or command == "make"
+                ) and "-j" not in command:
                     command += f" -j{MAX_JOBS}"
 
                 command_to_run = command_prefix + command
@@ -476,11 +494,13 @@ def scan_build(self, directory: str, output_dir: str,
                 self.vout(f"  Executing: {command_to_run}\n")
 
                 time, mem = utils.check_and_measure_call(
-                    command_to_run, cwd=cwd,
+                    command_to_run,
+                    cwd=cwd,
                     stderr=build_log_file,
                     stdout=build_log_file,
                     env=dict(os.environ, **extra_env),
-                    shell=True)
+                    shell=True,
+                )
 
                 execution_time += time
                 peak_memory = max(peak_memory, mem)
@@ -493,14 +513,17 @@ def scan_build(self, directory: str, output_dir: str,
 
         return execution_time, peak_memory
 
-    def analyze_preprocessed(self, directory: str,
-                             output_dir: str) -> Tuple[float, int]:
+    def analyze_preprocessed(
+        self, directory: str, output_dir: str
+    ) -> Tuple[float, int]:
         """
         Run analysis on a set of preprocessed files.
         """
         if os.path.exists(os.path.join(directory, BUILD_SCRIPT)):
-            stderr(f"Error: The preprocessed files project "
-                   f"should not contain {BUILD_SCRIPT}\n")
+            stderr(
+                f"Error: The preprocessed files project "
+                f"should not contain {BUILD_SCRIPT}\n"
+            )
             raise Exception()
 
         prefix = CLANG + " --analyze "
@@ -543,16 +566,22 @@ def analyze_preprocessed(self, directory: str,
                     self.vout(f"  Executing: {command}\n")
 
                     time, mem = utils.check_and_measure_call(
-                        command, cwd=directory, stderr=log_file,
-                        stdout=log_file, shell=True)
+                        command,
+                        cwd=directory,
+                        stderr=log_file,
+                        stdout=log_file,
+                        shell=True,
+                    )
 
                     execution_time += time
                     peak_memory = max(peak_memory, mem)
 
                 except CalledProcessError as e:
-                    stderr(f"Error: Analyzes of {full_file_name} failed. "
-                           f"See {log_file.name} for details. "
-                           f"Error code {e.returncode}.\n")
+                    stderr(
+                        f"Error: Analyzes of {full_file_name} failed. "
+                        f"See {log_file.name} for details. "
+                        f"Error code {e.returncode}.\n"
+                    )
                     failed = True
 
                 # If command did not fail, erase the log file.
@@ -606,18 +635,27 @@ def _download(self, directory: str, build_log_file: IO):
         else:
             raise ValueError(
                 f"Unknown source type '{self.project.source}' is found "
-                f"for the '{self.project.name}' project")
+                f"for the '{self.project.name}' project"
+            )
 
     def _download_from_git(self, directory: str, build_log_file: IO):
         repo = self.project.origin
         cached_source = os.path.join(directory, CACHED_SOURCE_DIR_NAME)
 
-        check_call(f"git clone --recursive {repo} {cached_source}",
-                   cwd=directory, stderr=build_log_file,
-                   stdout=build_log_file, shell=True)
-        check_call(f"git checkout --quiet {self.project.commit}",
-                   cwd=cached_source, stderr=build_log_file,
-                   stdout=build_log_file, shell=True)
+        check_call(
+            f"git clone --recursive {repo} {cached_source}",
+            cwd=directory,
+            stderr=build_log_file,
+            stdout=build_log_file,
+            shell=True,
+        )
+        check_call(
+            f"git checkout --quiet {self.project.commit}",
+            cwd=cached_source,
+            stderr=build_log_file,
+            stdout=build_log_file,
+            shell=True,
+        )
 
     def _unpack_zip(self, directory: str, build_log_file: IO):
         zip_files = list(glob.glob(directory + "/*.zip"))
@@ -625,23 +663,29 @@ def _unpack_zip(self, directory: str, build_log_file: IO):
         if len(zip_files) == 0:
             raise ValueError(
                 f"Couldn't find any zip files to unpack for the "
-                f"'{self.project.name}' project")
+                f"'{self.project.name}' project"
+            )
 
         if len(zip_files) > 1:
             raise ValueError(
                 f"Couldn't decide which of the zip files ({zip_files}) "
-                f"for the '{self.project.name}' project to unpack")
+                f"for the '{self.project.name}' project to unpack"
+            )
 
         with zipfile.ZipFile(zip_files[0], "r") as zip_file:
-            zip_file.extractall(os.path.join(directory,
-                                             CACHED_SOURCE_DIR_NAME))
+            zip_file.extractall(os.path.join(directory, CACHED_SOURCE_DIR_NAME))
 
     @staticmethod
     def _run_download_script(directory: str, build_log_file: IO):
         script_path = os.path.join(directory, DOWNLOAD_SCRIPT)
-        utils.run_script(script_path, build_log_file, directory,
-                         out=LOCAL.stdout, err=LOCAL.stderr,
-                         verbose=VERBOSE)
+        utils.run_script(
+            script_path,
+            build_log_file,
+            directory,
+            out=LOCAL.stdout,
+            err=LOCAL.stderr,
+            verbose=VERBOSE,
+        )
 
     def _apply_patch(self, directory: str, build_log_file: IO):
         patchfile_path = os.path.join(directory, PATCHFILE_NAME)
@@ -653,15 +697,16 @@ def _apply_patch(self, directory: str, build_log_file: IO):
 
         self.out("  Applying patch.\n")
         try:
-            check_call(f"patch -p1 < '{patchfile_path}'",
-                       cwd=patched_source,
-                       stderr=build_log_file,
-                       stdout=build_log_file,
-                       shell=True)
+            check_call(
+                f"patch -p1 < '{patchfile_path}'",
+                cwd=patched_source,
+                stderr=build_log_file,
+                stdout=build_log_file,
+                shell=True,
+            )
 
         except CalledProcessError:
-            stderr(f"Error: Patch failed. "
-                   f"See {build_log_file.name} for details.\n")
+            stderr(f"Error: Patch failed. " f"See {build_log_file.name} for details.\n")
             sys.exit(1)
 
     def out(self, what: str):
@@ -674,9 +719,12 @@ def vout(self, what: str):
 
 
 class TestProjectThread(threading.Thread):
-    def __init__(self, tasks_queue: TestQueue,
-                 results_
diff er: threading.Event,
-                 failure_flag: threading.Event):
+    def __init__(
+        self,
+        tasks_queue: TestQueue,
+        results_
diff er: threading.Event,
+        failure_flag: threading.Event,
+    ):
         """
         :param results_
diff er: Used to signify that results 
diff er from
                the canonical ones.
@@ -728,8 +776,10 @@ def check_build(output_dir: str):
         clean_up_empty_folders(output_dir)
 
         plists = glob.glob(output_dir + "/*/*.plist")
-        stdout(f"Number of bug reports "
-               f"(non-empty plist files) produced: {len(plists)}\n")
+        stdout(
+            f"Number of bug reports "
+            f"(non-empty plist files) produced: {len(plists)}\n"
+        )
         return
 
     stderr("Error: analysis failed.\n")
@@ -797,8 +847,7 @@ def run_cmp_results(directory: str, strictness: int = 0) -> bool:
     new_list.remove(os.path.join(new_dir, LOG_DIR_NAME))
 
     if len(ref_list) != len(new_list):
-        stderr(f"Mismatch in number of results folders: "
-               f"{ref_list} vs {new_list}")
+        stderr(f"Mismatch in number of results folders: " f"{ref_list} vs {new_list}")
         sys.exit(1)
 
     # There might be more then one folder underneath - one per each scan-build
@@ -811,7 +860,7 @@ def run_cmp_results(directory: str, strictness: int = 0) -> bool:
     # Iterate and find the 
diff erences.
     num_
diff s = 0
     for ref_dir, new_dir in zip(ref_list, new_list):
-        assert(ref_dir != new_dir)
+        assert ref_dir != new_dir
 
         if VERBOSE >= 1:
             stdout(f"  Comparing Results: {ref_dir} {new_dir}\n")
@@ -822,10 +871,13 @@ def run_cmp_results(directory: str, strictness: int = 0) -> bool:
         new_results = CmpRuns.ResultsDirectory(new_dir, patched_source)
 
         # Scan the results, delete empty plist files.
-        num_
diff s, reports_in_ref, reports_in_new = \
-            CmpRuns.dump_scan_build_results_
diff (ref_results, new_results,
-                                                 delete_empty=False,
-                                                 out=LOCAL.stdout)
+        (
+            num_
diff s,
+            reports_in_ref,
+            reports_in_new,
+        ) = CmpRuns.dump_scan_build_results_
diff (
+            ref_results, new_results, delete_empty=False, out=LOCAL.stdout
+        )
 
         if num_
diff s > 0:
             stdout(f"Warning: {num_
diff s} 
diff erences in diagnostics.\n")
@@ -835,24 +887,23 @@ def run_cmp_results(directory: str, strictness: int = 0) -> bool:
             tests_passed = False
 
         elif strictness >= 1 and reports_in_ref != reports_in_new:
-            stdout("Error: The number of results are 
diff erent "
-                   " strict mode (1).\n")
+            stdout("Error: The number of results are 
diff erent " " strict mode (1).\n")
             tests_passed = False
 
-    stdout(f"Diagnostic comparison complete "
-           f"(time: {time.time() - start_time:.2f}).\n")
+    stdout(
+        f"Diagnostic comparison complete " f"(time: {time.time() - start_time:.2f}).\n"
+    )
 
     return tests_passed
 
 
-def normalize_reference_results(directory: str, output_dir: str,
-                                build_mode: int):
+def normalize_reference_results(directory: str, output_dir: str, build_mode: int):
     """
     Make the absolute paths relative in the reference results.
     """
     for dir_path, _, filenames in os.walk(output_dir):
         for filename in filenames:
-            if not filename.endswith('plist'):
+            if not filename.endswith("plist"):
                 continue
 
             plist = os.path.join(dir_path, filename)
@@ -863,18 +914,21 @@ def normalize_reference_results(directory: str, output_dir: str,
             if build_mode == 1:
                 path_prefix = os.path.join(directory, PATCHED_SOURCE_DIR_NAME)
 
-            paths = [source[len(path_prefix) + 1:]
-                     if source.startswith(path_prefix) else source
-                     for source in data['files']]
-            data['files'] = paths
+            paths = [
+                source[len(path_prefix) + 1 :]
+                if source.startswith(path_prefix)
+                else source
+                for source in data["files"]
+            ]
+            data["files"] = paths
 
             # Remove transient fields which change from run to run.
-            for diagnostic in data['diagnostics']:
-                if 'HTMLDiagnostics_files' in diagnostic:
-                    diagnostic.pop('HTMLDiagnostics_files')
+            for diagnostic in data["diagnostics"]:
+                if "HTMLDiagnostics_files" in diagnostic:
+                    diagnostic.pop("HTMLDiagnostics_files")
 
-            if 'clang_version' in data:
-                data.pop('clang_version')
+            if "clang_version" in data:
+                data.pop("clang_version")
 
             with open(plist, "wb") as plist_file:
                 plistlib.dump(data, plist_file)
@@ -908,7 +962,7 @@ def clean_up_empty_plists(output_dir: str):
             with open(plist, "rb") as plist_file:
                 data = plistlib.load(plist_file)
             # Delete empty reports.
-            if not data['files']:
+            if not data["files"]:
                 os.remove(plist)
                 continue
 

diff  --git a/clang/utils/analyzer/SATestUpdateDiffs.py b/clang/utils/analyzer/SATestUpdateDiffs.py
index 69b3383beaf17..1c5ffaaa2e330 100644
--- a/clang/utils/analyzer/SATestUpdateDiffs.py
+++ b/clang/utils/analyzer/SATestUpdateDiffs.py
@@ -27,9 +27,10 @@ def update_reference_results(project: ProjectInfo, git: bool = False):
     created_results_path = tester.get_output_dir()
 
     if not os.path.exists(created_results_path):
-        print(f"Skipping project '{project.name}', "
-              f"it doesn't have newer results.",
-              file=sys.stderr)
+        print(
+            f"Skipping project '{project.name}', " f"it doesn't have newer results.",
+            file=sys.stderr,
+        )
         return
 
     build_log_path = SATestBuild.get_build_log_path(ref_results_path)
@@ -38,6 +39,7 @@ def update_reference_results(project: ProjectInfo, git: bool = False):
     os.makedirs(build_log_dir)
 
     with open(build_log_path, "w+") as build_log_file:
+
         def run_cmd(command: str):
             if Verbose:
                 print(f"Executing {command}")
@@ -57,7 +59,8 @@ def run_cmd(command: str):
         SATestBuild.run_cleanup_script(project_dir, build_log_file)
 
         SATestBuild.normalize_reference_results(
-            project_dir, ref_results_path, project.mode)
+            project_dir, ref_results_path, project.mode
+        )
 
         # Clean up the generated 
diff erence results.
         SATestBuild.cleanup_reference_results(ref_results_path)

diff  --git a/clang/utils/analyzer/SATestUtils.py b/clang/utils/analyzer/SATestUtils.py
index 3947e183d82f3..2d2eb5a6902d8 100644
--- a/clang/utils/analyzer/SATestUtils.py
+++ b/clang/utils/analyzer/SATestUtils.py
@@ -11,7 +11,7 @@ def which(command: str, paths: Optional[str] = None) -> Optional[str]:
     (or the PATH environment variable, if unspecified)."""
 
     if paths is None:
-        paths = os.environ.get('PATH', '')
+        paths = os.environ.get("PATH", "")
 
     # Check for absolute match first.
     if os.path.exists(command):
@@ -23,10 +23,10 @@ def which(command: str, paths: Optional[str] = None) -> Optional[str]:
 
     # Get suffixes to search.
     # On Cygwin, 'PATHEXT' may exist but it should not be used.
-    if os.pathsep == ';':
-        pathext = os.environ.get('PATHEXT', '').split(';')
+    if os.pathsep == ";":
+        pathext = os.environ.get("PATHEXT", "").split(";")
     else:
-        pathext = ['']
+        pathext = [""]
 
     # Search the paths...
     for path in paths.split(os.pathsep):
@@ -62,6 +62,7 @@ def memory_to_str(memory: int) -> str:
     if memory:
         try:
             import humanize
+
             return humanize.naturalsize(memory, gnu=True)
         except ImportError:
             # no formatter installed, let's keep it in bytes
@@ -106,11 +107,10 @@ def get_memory(process: ps.Process) -> int:
 
         with ps.Popen(*popenargs, **kwargs) as process:
             # while the process is running calculate resource utilization.
-            while (process.is_running() and
-                   process.status() != ps.STATUS_ZOMBIE):
+            while process.is_running() and process.status() != ps.STATUS_ZOMBIE:
                 # track the peak utilization of the process
                 peak_mem = max(peak_mem, get_memory(process))
-                time.sleep(.5)
+                time.sleep(0.5)
 
             if process.is_running():
                 process.kill()
@@ -129,8 +129,14 @@ def get_memory(process: ps.Process) -> int:
     return time.time() - start_time, peak_mem
 
 
-def run_script(script_path: str, build_log_file: IO, cwd: str,
-               out=sys.stdout, err=sys.stderr, verbose: int = 0):
+def run_script(
+    script_path: str,
+    build_log_file: IO,
+    cwd: str,
+    out=sys.stdout,
+    err=sys.stderr,
+    verbose: int = 0,
+):
     """
     Run the provided script if it exists.
     """
@@ -139,19 +145,27 @@ def run_script(script_path: str, build_log_file: IO, cwd: str,
             if verbose == 1:
                 out.write(f"  Executing: {script_path}\n")
 
-            check_call(f"chmod +x '{script_path}'", cwd=cwd,
-                       stderr=build_log_file,
-                       stdout=build_log_file,
-                       shell=True)
-
-            check_call(f"'{script_path}'", cwd=cwd,
-                       stderr=build_log_file,
-                       stdout=build_log_file,
-                       shell=True)
+            check_call(
+                f"chmod +x '{script_path}'",
+                cwd=cwd,
+                stderr=build_log_file,
+                stdout=build_log_file,
+                shell=True,
+            )
+
+            check_call(
+                f"'{script_path}'",
+                cwd=cwd,
+                stderr=build_log_file,
+                stdout=build_log_file,
+                shell=True,
+            )
 
         except CalledProcessError:
-            err.write(f"Error: Running {script_path} failed. "
-                      f"See {build_log_file.name} for details.\n")
+            err.write(
+                f"Error: Running {script_path} failed. "
+                f"See {build_log_file.name} for details.\n"
+            )
             sys.exit(-1)
 
 

diff  --git a/clang/utils/analyzer/SumTimerInfo.py b/clang/utils/analyzer/SumTimerInfo.py
index eed17e02e32e9..01614e41a065c 100644
--- a/clang/utils/analyzer/SumTimerInfo.py
+++ b/clang/utils/analyzer/SumTimerInfo.py
@@ -8,13 +8,12 @@
 """
 import sys
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     if len(sys.argv) < 2:
-        print('Usage: ', sys.argv[0],
-              'scan_build_output_file', file=sys.stderr)
+        print("Usage: ", sys.argv[0], "scan_build_output_file", file=sys.stderr)
         sys.exit(-1)
 
-    f = open(sys.argv[1], 'r')
+    f = open(sys.argv[1], "r")
     time = 0.0
     total_time = 0.0
     max_time = 0.0
@@ -57,8 +56,11 @@
         if "The # of times we inlined a call" in line:
             s = line.split()
             num_inlined_call_sites = num_inlined_call_sites + int(s[0])
-        if "The # of times we split the path due \
-                to imprecise dynamic dispatch info" in line:
+        if (
+            "The # of times we split the path due \
+                to imprecise dynamic dispatch info"
+            in line
+        ):
             s = line.split()
             num_bifurcated_call_sites = num_bifurcated_call_sites + int(s[0])
         if ")  Total" in line:
@@ -72,8 +74,10 @@
     print(f"Reachable blocks {reachable_blocks}")
     print(f"Reached max steps {reached_max_steps}")
     print(f"Number of steps {num_steps}")
-    print(f"Number of inlined calls {num_inlined_call_sites} "
-          f"(bifurcated {num_bifurcated_call_sites})")
+    print(
+        f"Number of inlined calls {num_inlined_call_sites} "
+        f"(bifurcated {num_bifurcated_call_sites})"
+    )
     print(f"Max time {max_time}")
     print(f"Total time {total_time}")
     print(f"Max CFG Size {max_cfg_size}")

diff  --git a/clang/utils/analyzer/entrypoint.py b/clang/utils/analyzer/entrypoint.py
index b61f0d5192946..ff877060bad69 100644
--- a/clang/utils/analyzer/entrypoint.py
+++ b/clang/utils/analyzer/entrypoint.py
@@ -9,7 +9,7 @@
 
 def main():
     settings, rest = parse_arguments()
-    cmake_opts = ['-D' + cmd for cmd in settings.D]
+    cmake_opts = ["-D" + cmd for cmd in settings.D]
     if settings.wait:
         wait()
     if settings.build_llvm or settings.build_llvm_only:
@@ -28,15 +28,15 @@ def wait():
 
 def parse_arguments() -> Tuple[argparse.Namespace, List[str]]:
     parser = argparse.ArgumentParser()
-    parser.add_argument('--wait', action='store_true')
-    parser.add_argument('--build-llvm', action='store_true')
-    parser.add_argument('--build-llvm-only', action='store_true')
-    parser.add_argument('-D', action='append', default=[])
+    parser.add_argument("--wait", action="store_true")
+    parser.add_argument("--build-llvm", action="store_true")
+    parser.add_argument("--build-llvm-only", action="store_true")
+    parser.add_argument("-D", action="append", default=[])
     return parser.parse_known_args()
 
 
 def build_llvm(cmake_options):
-    os.chdir('/build')
+    os.chdir("/build")
     try:
         if is_cmake_needed():
             cmake(cmake_options)
@@ -50,16 +50,19 @@ def is_cmake_needed():
     return "build.ninja" not in os.listdir()
 
 
-CMAKE_COMMAND = "cmake -G Ninja -DCMAKE_BUILD_TYPE=Release " \
-    "-DCMAKE_INSTALL_PREFIX=/analyzer -DLLVM_TARGETS_TO_BUILD=X86 " \
-    "-DLLVM_ENABLE_PROJECTS=\"clang;openmp\" -DLLVM_BUILD_RUNTIME=OFF " \
-    "-DLLVM_ENABLE_TERMINFO=OFF -DCLANG_ENABLE_ARCMT=OFF " \
+CMAKE_COMMAND = (
+    "cmake -G Ninja -DCMAKE_BUILD_TYPE=Release "
+    "-DCMAKE_INSTALL_PREFIX=/analyzer -DLLVM_TARGETS_TO_BUILD=X86 "
+    '-DLLVM_ENABLE_PROJECTS="clang;openmp" -DLLVM_BUILD_RUNTIME=OFF '
+    "-DLLVM_ENABLE_TERMINFO=OFF -DCLANG_ENABLE_ARCMT=OFF "
     "-DCLANG_ENABLE_STATIC_ANALYZER=ON"
+)
 
 
 def cmake(cmake_options):
-    check_call(CMAKE_COMMAND + ' '.join(cmake_options) + ' /llvm-project/llvm',
-            shell=True)
+    check_call(
+        CMAKE_COMMAND + " ".join(cmake_options) + " /llvm-project/llvm", shell=True
+    )
 
 
 def ninja():
@@ -71,5 +74,5 @@ def test(args: List[str]) -> int:
     return call("/scripts/SATest.py " + " ".join(args), shell=True)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/clang/utils/analyzer/exploded-graph-rewriter.py b/clang/utils/analyzer/exploded-graph-rewriter.py
index bc447fe6cff35..c7c6315a0a27d 100755
--- a/clang/utils/analyzer/exploded-graph-rewriter.py
+++ b/clang/utils/analyzer/exploded-graph-rewriter.py
@@ -1,12 +1,12 @@
 #!/usr/bin/env python
 #
-#===- exploded-graph-rewriter.py - ExplodedGraph dump tool -----*- python -*--#
+# ===- exploded-graph-rewriter.py - ExplodedGraph dump tool -----*- python -*--#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===-----------------------------------------------------------------------===#
+# ===-----------------------------------------------------------------------===#
 
 
 from __future__ import print_function
@@ -20,9 +20,9 @@
 import re
 
 
-#===-----------------------------------------------------------------------===#
+# ===-----------------------------------------------------------------------===#
 # These data structures represent a deserialized ExplodedGraph.
-#===-----------------------------------------------------------------------===#
+# ===-----------------------------------------------------------------------===#
 
 
 # A helper function for finding the 
diff erence between two dictionaries.
@@ -48,13 +48,15 @@ def is_
diff erent(self, prev):
 # A deserialized source location.
 class SourceLocation:
     def __init__(self, json_loc):
-        logging.debug('json: %s' % json_loc)
-        self.line = json_loc['line']
-        self.col = json_loc['column']
-        self.filename = os.path.basename(json_loc['file']) \
-            if 'file' in json_loc else '(main file)'
-        self.spelling = SourceLocation(json_loc['spelling']) \
-            if 'spelling' in json_loc else None
+        logging.debug("json: %s" % json_loc)
+        self.line = json_loc["line"]
+        self.col = json_loc["column"]
+        self.filename = (
+            os.path.basename(json_loc["file"]) if "file" in json_loc else "(main file)"
+        )
+        self.spelling = (
+            SourceLocation(json_loc["spelling"]) if "spelling" in json_loc else None
+        )
 
     def is_macro(self):
         return self.spelling is not None
@@ -63,37 +65,40 @@ def is_macro(self):
 # A deserialized program point.
 class ProgramPoint:
     def __init__(self, json_pp):
-        self.kind = json_pp['kind']
-        self.tag = json_pp['tag']
-        self.node_id = json_pp['node_id']
-        self.is_sink = bool(json_pp['is_sink'])
-        self.has_report = bool(json_pp['has_report'])
-        if self.kind == 'Edge':
-            self.src_id = json_pp['src_id']
-            self.dst_id = json_pp['dst_id']
-        elif self.kind == 'Statement':
+        self.kind = json_pp["kind"]
+        self.tag = json_pp["tag"]
+        self.node_id = json_pp["node_id"]
+        self.is_sink = bool(json_pp["is_sink"])
+        self.has_report = bool(json_pp["has_report"])
+        if self.kind == "Edge":
+            self.src_id = json_pp["src_id"]
+            self.dst_id = json_pp["dst_id"]
+        elif self.kind == "Statement":
             logging.debug(json_pp)
-            self.stmt_kind = json_pp['stmt_kind']
-            self.cast_kind = json_pp['cast_kind'] \
-                if 'cast_kind' in json_pp else None
-            self.stmt_point_kind = json_pp['stmt_point_kind']
-            self.stmt_id = json_pp['stmt_id']
-            self.pointer = json_pp['pointer']
-            self.pretty = json_pp['pretty']
-            self.loc = SourceLocation(json_pp['location']) \
-                if json_pp['location'] is not None else None
-        elif self.kind == 'BlockEntrance':
-            self.block_id = json_pp['block_id']
+            self.stmt_kind = json_pp["stmt_kind"]
+            self.cast_kind = json_pp["cast_kind"] if "cast_kind" in json_pp else None
+            self.stmt_point_kind = json_pp["stmt_point_kind"]
+            self.stmt_id = json_pp["stmt_id"]
+            self.pointer = json_pp["pointer"]
+            self.pretty = json_pp["pretty"]
+            self.loc = (
+                SourceLocation(json_pp["location"])
+                if json_pp["location"] is not None
+                else None
+            )
+        elif self.kind == "BlockEntrance":
+            self.block_id = json_pp["block_id"]
 
 
 # A single expression acting as a key in a deserialized Environment.
 class EnvironmentBindingKey:
     def __init__(self, json_ek):
         # CXXCtorInitializer is not a Stmt!
-        self.stmt_id = json_ek['stmt_id'] if 'stmt_id' in json_ek \
-            else json_ek['init_id']
-        self.pretty = json_ek['pretty']
-        self.kind = json_ek['kind'] if 'kind' in json_ek else None
+        self.stmt_id = (
+            json_ek["stmt_id"] if "stmt_id" in json_ek else json_ek["init_id"]
+        )
+        self.pretty = json_ek["pretty"]
+        self.kind = json_ek["kind"] if "kind" in json_ek else None
 
     def _key(self):
         return self.stmt_id
@@ -108,11 +113,14 @@ def __hash__(self):
 # Deserialized description of a location context.
 class LocationContext:
     def __init__(self, json_frame):
-        self.lctx_id = json_frame['lctx_id']
-        self.caption = json_frame['location_context']
-        self.decl = json_frame['calling']
-        self.loc = SourceLocation(json_frame['location']) \
-            if json_frame['location'] is not None else None
+        self.lctx_id = json_frame["lctx_id"]
+        self.caption = json_frame["location_context"]
+        self.decl = json_frame["calling"]
+        self.loc = (
+            SourceLocation(json_frame["location"])
+            if json_frame["location"] is not None
+            else None
+        )
 
     def _key(self):
         return self.lctx_id
@@ -130,9 +138,10 @@ class EnvironmentFrame:
     def __init__(self, json_frame):
         self.location_context = LocationContext(json_frame)
         self.bindings = collections.OrderedDict(
-            [(EnvironmentBindingKey(b),
-              b['value']) for b in json_frame['items']]
-            if json_frame['items'] is not None else [])
+            [(EnvironmentBindingKey(b), b["value"]) for b in json_frame["items"]]
+            if json_frame["items"] is not None
+            else []
+        )
 
     def 
diff _bindings(self, prev):
         return 
diff _dicts(self.bindings, prev.bindings)
@@ -143,7 +152,7 @@ def is_
diff erent(self, prev):
 
 
 # A deserialized Environment. This class can also hold other entities that
-# are similar to Environment, such as Objects Under Construction or 
+# are similar to Environment, such as Objects Under Construction or
 # Indices Of Elements Under Construction.
 class GenericEnvironment:
     def __init__(self, json_e):
@@ -177,8 +186,8 @@ def is_
diff erent(self, prev):
 # A single binding key in a deserialized RegionStore cluster.
 class StoreBindingKey:
     def __init__(self, json_sk):
-        self.kind = json_sk['kind']
-        self.offset = json_sk['offset']
+        self.kind = json_sk["kind"]
+        self.offset = json_sk["offset"]
 
     def _key(self):
         return (self.kind, self.offset)
@@ -193,9 +202,10 @@ def __hash__(self):
 # A single cluster of the deserialized RegionStore.
 class StoreCluster:
     def __init__(self, json_sc):
-        self.base_region = json_sc['cluster']
+        self.base_region = json_sc["cluster"]
         self.bindings = collections.OrderedDict(
-            [(StoreBindingKey(b), b['value']) for b in json_sc['items']])
+            [(StoreBindingKey(b), b["value"]) for b in json_sc["items"]]
+        )
 
     def 
diff _bindings(self, prev):
         return 
diff _dicts(self.bindings, prev.bindings)
@@ -208,15 +218,19 @@ def is_
diff erent(self, prev):
 # A deserialized RegionStore.
 class Store:
     def __init__(self, json_s):
-        self.ptr = json_s['pointer']
+        self.ptr = json_s["pointer"]
         self.clusters = collections.OrderedDict(
-            [(c['pointer'], StoreCluster(c)) for c in json_s['items']])
+            [(c["pointer"], StoreCluster(c)) for c in json_s["items"]]
+        )
 
     def 
diff _clusters(self, prev):
         removed = [k for k in prev.clusters if k not in self.clusters]
         added = [k for k in self.clusters if k not in prev.clusters]
-        updated = [k for k in prev.clusters if k in self.clusters
-                   and prev.clusters[k].is_
diff erent(self.clusters[k])]
+        updated = [
+            k
+            for k in prev.clusters
+            if k in self.clusters and prev.clusters[k].is_
diff erent(self.clusters[k])
+        ]
         return (removed, added, updated)
 
     def is_
diff erent(self, prev):
@@ -232,8 +246,7 @@ def __init__(self, json_lines):
 
     def 
diff _lines(self, prev):
         lines = 
diff lib.n
diff (prev.lines, self.lines)
-        return [l.strip() for l in lines
-                if l.startswith('+') or l.startswith('-')]
+        return [l.strip() for l in lines if l.startswith("+") or l.startswith("-")]
 
     def is_
diff erent(self, prev):
         return len(self.
diff _lines(prev)) > 0
@@ -243,13 +256,17 @@ def is_
diff erent(self, prev):
 class CheckerMessages:
     def __init__(self, json_m):
         self.items = collections.OrderedDict(
-            [(m['checker'], CheckerLines(m['messages'])) for m in json_m])
+            [(m["checker"], CheckerLines(m["messages"])) for m in json_m]
+        )
 
     def 
diff _messages(self, prev):
         removed = [k for k in prev.items if k not in self.items]
         added = [k for k in self.items if k not in prev.items]
-        updated = [k for k in prev.items if k in self.items
-                   and prev.items[k].is_
diff erent(self.items[k])]
+        updated = [
+            k
+            for k in prev.items
+            if k in self.items and prev.items[k].is_
diff erent(self.items[k])
+        ]
         return (removed, added, updated)
 
     def is_
diff erent(self, prev):
@@ -260,17 +277,17 @@ def is_
diff erent(self, prev):
 # A deserialized program state.
 class ProgramState:
     def __init__(self, state_id, json_ps):
-        logging.debug('Adding ProgramState ' + str(state_id))
-        
-        store_key = 'store'
-        env_key = 'environment'
-        constraints_key = 'constraints'
-        dyn_ty_key = 'dynamic_types'
-        ctor_key = 'constructing_objects'
-        ind_key = 'index_of_element'
-        init_loop_key = 'pending_init_loops'
-        dtor_key = 'pending_destructors'
-        msg_key = 'checker_messages'
+        logging.debug("Adding ProgramState " + str(state_id))
+
+        store_key = "store"
+        env_key = "environment"
+        constraints_key = "constraints"
+        dyn_ty_key = "dynamic_types"
+        ctor_key = "constructing_objects"
+        ind_key = "index_of_element"
+        init_loop_key = "pending_init_loops"
+        dtor_key = "pending_destructors"
+        msg_key = "checker_messages"
 
         if json_ps is None:
             json_ps = {
@@ -282,53 +299,78 @@ def __init__(self, state_id, json_ps):
                 ind_key: None,
                 init_loop_key: None,
                 dtor_key: None,
-                msg_key: None
+                msg_key: None,
             }
 
         self.state_id = state_id
 
-        self.store = Store(json_ps[store_key]) \
-            if json_ps[store_key] is not None else None
-
-        self.environment = \
-            GenericEnvironment(json_ps[env_key]['items']) \
-            if json_ps[env_key] is not None else None
-
-        self.constraints = GenericMap([
-            (c['symbol'], c['range']) for c in json_ps[constraints_key]
-        ]) if json_ps[constraints_key] is not None else None
-
-        self.dynamic_types = GenericMap([
-                (t['region'], '%s%s' % (t['dyn_type'],
-                                        ' (or a sub-class)'
-                                        if t['sub_classable'] else ''))
-                for t in json_ps[dyn_ty_key]]) \
-            if json_ps[dyn_ty_key] is not None else None
-
-        self.checker_messages = CheckerMessages(json_ps[msg_key]) \
-            if json_ps[msg_key] is not None else None
+        self.store = (
+            Store(json_ps[store_key]) if json_ps[store_key] is not None else None
+        )
+
+        self.environment = (
+            GenericEnvironment(json_ps[env_key]["items"])
+            if json_ps[env_key] is not None
+            else None
+        )
+
+        self.constraints = (
+            GenericMap([(c["symbol"], c["range"]) for c in json_ps[constraints_key]])
+            if json_ps[constraints_key] is not None
+            else None
+        )
+
+        self.dynamic_types = (
+            GenericMap(
+                [
+                    (
+                        t["region"],
+                        "%s%s"
+                        % (
+                            t["dyn_type"],
+                            " (or a sub-class)" if t["sub_classable"] else "",
+                        ),
+                    )
+                    for t in json_ps[dyn_ty_key]
+                ]
+            )
+            if json_ps[dyn_ty_key] is not None
+            else None
+        )
+
+        self.checker_messages = (
+            CheckerMessages(json_ps[msg_key]) if json_ps[msg_key] is not None else None
+        )
 
         # State traits
-        # 
+        #
         # For traits we always check if a key exists because if a trait
-        # has no imformation, nothing will be printed in the .dot file 
-        # we parse. 
-
-        self.constructing_objects = \
-            GenericEnvironment(json_ps[ctor_key]) \
-            if ctor_key in json_ps and json_ps[ctor_key] is not None else None
-
-        self.index_of_element = \
-            GenericEnvironment(json_ps[ind_key]) \
-            if ind_key in json_ps and json_ps[ind_key] is not None else None
-        
-        self.pending_init_loops = \
-            GenericEnvironment(json_ps[init_loop_key]) \
-            if init_loop_key in json_ps and json_ps[init_loop_key] is not None else None
-
-        self.pending_destructors = \
-            GenericEnvironment(json_ps[dtor_key]) \
-            if dtor_key in json_ps and json_ps[dtor_key] is not None else None
+        # has no imformation, nothing will be printed in the .dot file
+        # we parse.
+
+        self.constructing_objects = (
+            GenericEnvironment(json_ps[ctor_key])
+            if ctor_key in json_ps and json_ps[ctor_key] is not None
+            else None
+        )
+
+        self.index_of_element = (
+            GenericEnvironment(json_ps[ind_key])
+            if ind_key in json_ps and json_ps[ind_key] is not None
+            else None
+        )
+
+        self.pending_init_loops = (
+            GenericEnvironment(json_ps[init_loop_key])
+            if init_loop_key in json_ps and json_ps[init_loop_key] is not None
+            else None
+        )
+
+        self.pending_destructors = (
+            GenericEnvironment(json_ps[dtor_key])
+            if dtor_key in json_ps and json_ps[dtor_key] is not None
+            else None
+        )
 
 
 # A deserialized exploded graph node. Has a default constructor because it
@@ -340,18 +382,21 @@ def __init__(self):
         self.successors = []
 
     def construct(self, node_id, json_node):
-        logging.debug('Adding ' + node_id)
+        logging.debug("Adding " + node_id)
         self.ptr = node_id[4:]
-        self.points = [ProgramPoint(p) for p in json_node['program_points']]
+        self.points = [ProgramPoint(p) for p in json_node["program_points"]]
         self.node_id = self.points[-1].node_id
-        self.state = ProgramState(json_node['state_id'],
-                                  json_node['program_state']
-            if json_node['program_state'] is not None else None);
+        self.state = ProgramState(
+            json_node["state_id"],
+            json_node["program_state"]
+            if json_node["program_state"] is not None
+            else None,
+        )
 
         assert self.node_name() == node_id
 
     def node_name(self):
-        return 'Node' + self.ptr
+        return "Node" + self.ptr
 
 
 # A deserialized ExplodedGraph. Constructed by consuming a .dot file
@@ -359,33 +404,33 @@ def node_name(self):
 class ExplodedGraph:
     # Parse .dot files with regular expressions.
     node_re = re.compile(
-        '^(Node0x[0-9a-f]*) \\[shape=record,.*label="{(.*)\\\\l}"\\];$')
-    edge_re = re.compile(
-        '^(Node0x[0-9a-f]*) -> (Node0x[0-9a-f]*);$')
+        '^(Node0x[0-9a-f]*) \\[shape=record,.*label="{(.*)\\\\l}"\\];$'
+    )
+    edge_re = re.compile("^(Node0x[0-9a-f]*) -> (Node0x[0-9a-f]*);$")
 
     def __init__(self):
         self.nodes = collections.defaultdict(ExplodedNode)
         self.root_id = None
-        self.incomplete_line = ''
+        self.incomplete_line = ""
 
     def add_raw_line(self, raw_line):
-        if raw_line.startswith('//'):
+        if raw_line.startswith("//"):
             return
 
         # Allow line breaks by waiting for ';'. This is not valid in
         # a .dot file, but it is useful for writing tests.
-        if len(raw_line) > 0 and raw_line[-1] != ';':
+        if len(raw_line) > 0 and raw_line[-1] != ";":
             self.incomplete_line += raw_line
             return
         raw_line = self.incomplete_line + raw_line
-        self.incomplete_line = ''
+        self.incomplete_line = ""
 
         # Apply regexps one by one to see if it's a node or an edge
         # and extract contents if necessary.
-        logging.debug('Line: ' + raw_line)
+        logging.debug("Line: " + raw_line)
         result = self.edge_re.match(raw_line)
         if result is not None:
-            logging.debug('Classified as edge line.')
+            logging.debug("Classified as edge line.")
             pred = result.group(1)
             succ = result.group(2)
             self.nodes[pred].successors.append(succ)
@@ -393,46 +438,48 @@ def add_raw_line(self, raw_line):
             return
         result = self.node_re.match(raw_line)
         if result is not None:
-            logging.debug('Classified as node line.')
+            logging.debug("Classified as node line.")
             node_id = result.group(1)
             if len(self.nodes) == 0:
                 self.root_id = node_id
             # Note: when writing tests you don't need to escape everything,
             # even though in a valid dot file everything is escaped.
-            node_label = result.group(2).replace(' ', '') \
-                                        .replace('\\"', '"') \
-                                        .replace('\\{', '{') \
-                                        .replace('\\}', '}') \
-                                        .replace('\\\\', '\\') \
-                                        .replace('\\|', '|') \
-                                        .replace('\\<', '\\\\<') \
-                                        .replace('\\>', '\\\\>') \
-                                        .rstrip(',')
+            node_label = (
+                result.group(2)
+                .replace(" ", "")
+                .replace('\\"', '"')
+                .replace("\\{", "{")
+                .replace("\\}", "}")
+                .replace("\\\\", "\\")
+                .replace("\\|", "|")
+                .replace("\\<", "\\\\<")
+                .replace("\\>", "\\\\>")
+                .rstrip(",")
+            )
             # Handle `\l` separately because a string literal can be in code
             # like "string\\literal" with the `\l` inside.
             # Also on Windows macros __FILE__ produces specific delimiters `\`
             # and a directory or file may starts with the letter `l`.
             # Find all `\l` (like `,\l`, `}\l`, `[\l`) except `\\l`,
             # because the literal as a rule contains multiple `\` before `\l`.
-            node_label = re.sub(r'(?<!\\)\\l', '', node_label)
+            node_label = re.sub(r"(?<!\\)\\l", "", node_label)
             logging.debug(node_label)
             json_node = json.loads(node_label)
             self.nodes[node_id].construct(node_id, json_node)
             return
-        logging.debug('Skipping.')
+        logging.debug("Skipping.")
 
 
-#===-----------------------------------------------------------------------===#
+# ===-----------------------------------------------------------------------===#
 # Visitors traverse a deserialized ExplodedGraph and do 
diff erent things
 # with every node and edge.
-#===-----------------------------------------------------------------------===#
+# ===-----------------------------------------------------------------------===#
 
 
 # A visitor that dumps the ExplodedGraph into a DOT file with fancy HTML-based
 # syntax highlighing.
 class DotDumpVisitor:
-    def __init__(self, do_
diff s, dark_mode, gray_mode,
-                 topo_mode, dump_dot_only):
+    def __init__(self, do_
diff s, dark_mode, gray_mode, topo_mode, dump_dot_only):
         self._do_
diff s = do_
diff s
         self._dark_mode = dark_mode
         self._gray_mode = gray_mode
@@ -442,31 +489,33 @@ def __init__(self, do_
diff s, dark_mode, gray_mode,
 
     def _dump_raw(self, s):
         if self._dump_dot_only:
-            print(s, end='')
+            print(s, end="")
         else:
             self._output.append(s)
 
     def output(self):
         assert not self._dump_dot_only
-        return ''.join(self._output)
+        return "".join(self._output)
 
     def _dump(self, s):
-        s = s.replace('&', '&') \
-             .replace('{', '\\{') \
-             .replace('}', '\\}') \
-             .replace('\\<', '<') \
-             .replace('\\>', '>') \
-             .replace('|', '\\|')
-        s = re.sub(r'(?<!\\)\\l', '<br />', s)
+        s = (
+            s.replace("&", "&")
+            .replace("{", "\\{")
+            .replace("}", "\\}")
+            .replace("\\<", "<")
+            .replace("\\>", ">")
+            .replace("|", "\\|")
+        )
+        s = re.sub(r"(?<!\\)\\l", "<br />", s)
         if self._gray_mode:
-            s = re.sub(r'<font color="[a-z0-9]*">', '', s)
-            s = re.sub(r'</font>', '', s)
+            s = re.sub(r'<font color="[a-z0-9]*">', "", s)
+            s = re.sub(r"</font>", "", s)
         self._dump_raw(s)
 
     @staticmethod
     def _
diff _plus_minus(is_added):
         if is_added is None:
-            return ''
+            return ""
         if is_added:
             return '<font color="forestgreen">+</font>'
         return '<font color="red">-</font>'
@@ -477,11 +526,11 @@ def _short_pretty(s):
             return None
         if len(s) < 20:
             return s
-        left = s.find('{')
-        right = s.rfind('}')
+        left = s.find("{")
+        right = s.rfind("}")
         if left == -1 or right == -1 or left >= right:
             return s
-        candidate = s[0:left + 1] + ' ... ' + s[right:]
+        candidate = s[0 : left + 1] + " ... " + s[right:]
         if len(candidate) >= len(s):
             return s
         return candidate
@@ -489,16 +538,16 @@ def _short_pretty(s):
     @staticmethod
     def _make_sloc(loc):
         if loc is None:
-            return '<i>Invalid Source Location</i>'
+            return "<i>Invalid Source Location</i>"
 
         def make_plain_loc(loc):
-            return '%s:<b>%s</b>:<b>%s</b>' \
-                % (loc.filename, loc.line, loc.col)
+            return "%s:<b>%s</b>:<b>%s</b>" % (loc.filename, loc.line, loc.col)
 
         if loc.is_macro():
-            return '%s <font color="royalblue1">' \
-                   '(<i>spelling at </i> %s)</font>' \
-                % (make_plain_loc(loc), make_plain_loc(loc.spelling))
+            return '%s <font color="royalblue1">' "(<i>spelling at </i> %s)</font>" % (
+                make_plain_loc(loc),
+                make_plain_loc(loc.spelling),
+            )
 
         return make_plain_loc(loc)
 
@@ -510,101 +559,126 @@ def visit_begin_graph(self, graph):
         self._dump_raw('label="";\n')
 
     def visit_program_point(self, p):
-        if p.kind in ['Edge', 'BlockEntrance', 'BlockExit']:
-            color = 'gold3'
-        elif p.kind in ['PreStmtPurgeDeadSymbols',
-                        'PostStmtPurgeDeadSymbols']:
-            color = 'red'
-        elif p.kind in ['CallEnter', 'CallExitBegin', 'CallExitEnd']:
-            color = 'dodgerblue' if self._dark_mode else 'blue'
-        elif p.kind in ['Statement']:
-            color = 'cyan4'
+        if p.kind in ["Edge", "BlockEntrance", "BlockExit"]:
+            color = "gold3"
+        elif p.kind in ["PreStmtPurgeDeadSymbols", "PostStmtPurgeDeadSymbols"]:
+            color = "red"
+        elif p.kind in ["CallEnter", "CallExitBegin", "CallExitEnd"]:
+            color = "dodgerblue" if self._dark_mode else "blue"
+        elif p.kind in ["Statement"]:
+            color = "cyan4"
         else:
-            color = 'forestgreen'
+            color = "forestgreen"
 
         self._dump('<tr><td align="left">%s.</td>' % p.node_id)
 
-        if p.kind == 'Statement':
+        if p.kind == "Statement":
             # This avoids pretty-printing huge statements such as CompoundStmt.
             # Such statements show up only at [Pre|Post]StmtPurgeDeadSymbols
-            skip_pretty = 'PurgeDeadSymbols' in p.stmt_point_kind
-            stmt_color = 'cyan3'
-            self._dump('<td align="left" width="0">%s:</td>'
-                       '<td align="left" width="0"><font color="%s">'
-                       '%s</font> </td>'
-                       '<td align="left"><i>S%s</i></td>'
-                       '<td align="left"><font color="%s">%s</font></td>'
-                       '<td align="left">%s</td></tr>'
-                       % (self._make_sloc(p.loc), color,
-                          '%s (%s)' % (p.stmt_kind, p.cast_kind)
-                          if p.cast_kind is not None else p.stmt_kind,
-                          p.stmt_id, stmt_color, p.stmt_point_kind,
-                          self._short_pretty(p.pretty)
-                          if not skip_pretty else ''))
-        elif p.kind == 'Edge':
-            self._dump('<td width="0"></td>'
-                       '<td align="left" width="0">'
-                       '<font color="%s">%s</font></td><td align="left">'
-                       '[B%d] -\\> [B%d]</td></tr>'
-                       % (color, 'BlockEdge', p.src_id, p.dst_id))
-        elif p.kind == 'BlockEntrance':
-            self._dump('<td width="0"></td>'
-                       '<td align="left" width="0">'
-                       '<font color="%s">%s</font></td>'
-                       '<td align="left">[B%d]</td></tr>'
-                       % (color, p.kind, p.block_id))
+            skip_pretty = "PurgeDeadSymbols" in p.stmt_point_kind
+            stmt_color = "cyan3"
+            self._dump(
+                '<td align="left" width="0">%s:</td>'
+                '<td align="left" width="0"><font color="%s">'
+                "%s</font> </td>"
+                '<td align="left"><i>S%s</i></td>'
+                '<td align="left"><font color="%s">%s</font></td>'
+                '<td align="left">%s</td></tr>'
+                % (
+                    self._make_sloc(p.loc),
+                    color,
+                    "%s (%s)" % (p.stmt_kind, p.cast_kind)
+                    if p.cast_kind is not None
+                    else p.stmt_kind,
+                    p.stmt_id,
+                    stmt_color,
+                    p.stmt_point_kind,
+                    self._short_pretty(p.pretty) if not skip_pretty else "",
+                )
+            )
+        elif p.kind == "Edge":
+            self._dump(
+                '<td width="0"></td>'
+                '<td align="left" width="0">'
+                '<font color="%s">%s</font></td><td align="left">'
+                "[B%d] -\\> [B%d]</td></tr>" % (color, "BlockEdge", p.src_id, p.dst_id)
+            )
+        elif p.kind == "BlockEntrance":
+            self._dump(
+                '<td width="0"></td>'
+                '<td align="left" width="0">'
+                '<font color="%s">%s</font></td>'
+                '<td align="left">[B%d]</td></tr>' % (color, p.kind, p.block_id)
+            )
         else:
             # TODO: Print more stuff for other kinds of points.
-            self._dump('<td width="0"></td>'
-                       '<td align="left" width="0" colspan="2">'
-                       '<font color="%s">%s</font></td></tr>'
-                       % (color, p.kind))
+            self._dump(
+                '<td width="0"></td>'
+                '<td align="left" width="0" colspan="2">'
+                '<font color="%s">%s</font></td></tr>' % (color, p.kind)
+            )
 
         if p.tag is not None:
-            self._dump('<tr><td width="0"></td><td width="0"></td>'
-                       '<td colspan="3" align="left">'
-                       '<b>Tag: </b> <font color="crimson">'
-                       '%s</font></td></tr>' % p.tag)
+            self._dump(
+                '<tr><td width="0"></td><td width="0"></td>'
+                '<td colspan="3" align="left">'
+                '<b>Tag: </b> <font color="crimson">'
+                "%s</font></td></tr>" % p.tag
+            )
 
         if p.has_report:
-            self._dump('<tr><td width="0"></td><td width="0"></td>'
-                       '<td colspan="3" align="left">'
-                       '<font color="red"><b>Bug Report Attached'
-                       '</b></font></td></tr>')
+            self._dump(
+                '<tr><td width="0"></td><td width="0"></td>'
+                '<td colspan="3" align="left">'
+                '<font color="red"><b>Bug Report Attached'
+                "</b></font></td></tr>"
+            )
         if p.is_sink:
-            self._dump('<tr><td width="0"></td><td width="0"></td>'
-                       '<td colspan="3" align="left">'
-                       '<font color="cornflowerblue"><b>Sink Node'
-                       '</b></font></td></tr>')
+            self._dump(
+                '<tr><td width="0"></td><td width="0"></td>'
+                '<td colspan="3" align="left">'
+                '<font color="cornflowerblue"><b>Sink Node'
+                "</b></font></td></tr>"
+            )
 
     def visit_environment(self, e, prev_e=None):
         self._dump('<table border="0">')
 
         def dump_location_context(lc, is_added=None):
-            self._dump('<tr><td>%s</td>'
-                       '<td align="left"><b>%s</b></td>'
-                       '<td align="left" colspan="2">'
-                       '<font color="gray60">%s </font>'
-                       '%s</td></tr>'
-                       % (self._
diff _plus_minus(is_added),
-                          lc.caption, lc.decl,
-                          ('(%s)' % self._make_sloc(lc.loc))
-                          if lc.loc is not None else ''))
+            self._dump(
+                "<tr><td>%s</td>"
+                '<td align="left"><b>%s</b></td>'
+                '<td align="left" colspan="2">'
+                '<font color="gray60">%s </font>'
+                "%s</td></tr>"
+                % (
+                    self._
diff _plus_minus(is_added),
+                    lc.caption,
+                    lc.decl,
+                    ("(%s)" % self._make_sloc(lc.loc)) if lc.loc is not None else "",
+                )
+            )
 
         def dump_binding(f, b, is_added=None):
-            self._dump('<tr><td>%s</td>'
-                       '<td align="left"><i>S%s</i></td>'
-                       '%s'
-                       '<td align="left">%s</td>'
-                       '<td align="left">%s</td></tr>'
-                       % (self._
diff _plus_minus(is_added),
-                          b.stmt_id,
-                          '<td align="left"><font color="%s"><i>'
-                          '%s</i></font></td>' % (
-                              'lavender' if self._dark_mode else 'darkgreen',
-                              ('(%s)' % b.kind) if b.kind is not None else ' '
-                          ),
-                          self._short_pretty(b.pretty), f.bindings[b]))
+            self._dump(
+                "<tr><td>%s</td>"
+                '<td align="left"><i>S%s</i></td>'
+                "%s"
+                '<td align="left">%s</td>'
+                '<td align="left">%s</td></tr>'
+                % (
+                    self._
diff _plus_minus(is_added),
+                    b.stmt_id,
+                    '<td align="left"><font color="%s"><i>'
+                    "%s</i></font></td>"
+                    % (
+                        "lavender" if self._dark_mode else "darkgreen",
+                        ("(%s)" % b.kind) if b.kind is not None else " ",
+                    ),
+                    self._short_pretty(b.pretty),
+                    f.bindings[b],
+                )
+            )
 
         frames_updated = e.
diff _frames(prev_e) if prev_e is not None else None
         if frames_updated:
@@ -623,7 +697,7 @@ def dump_binding(f, b, is_added=None):
                 for b in f.bindings:
                     dump_binding(f, b)
 
-        self._dump('</table>')
+        self._dump("</table>")
 
     def visit_environment_in_state(self, selector, title, s, prev_s=None):
         e = getattr(s, selector)
@@ -633,44 +707,48 @@ def visit_environment_in_state(self, selector, title, s, prev_s=None):
 
         self._dump('<hr /><tr><td align="left"><b>%s: </b>' % title)
         if e is None:
-            self._dump('<i> Nothing!</i>')
+            self._dump("<i> Nothing!</i>")
         else:
             if prev_e is not None:
                 if e.is_
diff erent(prev_e):
                     self._dump('</td></tr><tr><td align="left">')
                     self.visit_environment(e, prev_e)
                 else:
-                    self._dump('<i> No changes!</i>')
+                    self._dump("<i> No changes!</i>")
             else:
                 self._dump('</td></tr><tr><td align="left">')
                 self.visit_environment(e)
 
-        self._dump('</td></tr>')
+        self._dump("</td></tr>")
 
     def visit_store(self, s, prev_s=None):
         self._dump('<table border="0">')
 
         def dump_binding(s, c, b, is_added=None):
-            self._dump('<tr><td>%s</td>'
-                       '<td align="left">%s</td>'
-                       '<td align="left">%s</td>'
-                       '<td align="left">%s</td>'
-                       '<td align="left">%s</td></tr>'
-                       % (self._
diff _plus_minus(is_added),
-                          s.clusters[c].base_region, b.offset,
-                          '(<i>Default</i>)' if b.kind == 'Default'
-                          else '',
-                          s.clusters[c].bindings[b]))
+            self._dump(
+                "<tr><td>%s</td>"
+                '<td align="left">%s</td>'
+                '<td align="left">%s</td>'
+                '<td align="left">%s</td>'
+                '<td align="left">%s</td></tr>'
+                % (
+                    self._
diff _plus_minus(is_added),
+                    s.clusters[c].base_region,
+                    b.offset,
+                    "(<i>Default</i>)" if b.kind == "Default" else "",
+                    s.clusters[c].bindings[b],
+                )
+            )
 
         if prev_s is not None:
-            clusters_removed, clusters_added, clusters_updated = \
-                s.
diff _clusters(prev_s)
+            clusters_removed, clusters_added, clusters_updated = s.
diff _clusters(prev_s)
             for c in clusters_removed:
                 for b in prev_s.clusters[c].bindings:
                     dump_binding(prev_s, c, b, False)
             for c in clusters_updated:
-                bindings_removed, bindings_added = \
-                    s.clusters[c].
diff _bindings(prev_s.clusters[c])
+                bindings_removed, bindings_added = s.clusters[c].
diff _bindings(
+                    prev_s.clusters[c]
+                )
                 for b in bindings_removed:
                     dump_binding(prev_s, c, b, False)
                 for b in bindings_added:
@@ -683,7 +761,7 @@ def dump_binding(s, c, b, is_added=None):
                 for b in s.clusters[c].bindings:
                     dump_binding(s, c, b)
 
-        self._dump('</table>')
+        self._dump("</table>")
 
     def visit_store_in_state(self, s, prev_s=None):
         st = s.store
@@ -693,7 +771,7 @@ def visit_store_in_state(self, s, prev_s=None):
 
         self._dump('<hr /><tr><td align="left"><b>Store: </b>')
         if st is None:
-            self._dump('<i> Nothing!</i>')
+            self._dump("<i> Nothing!</i>")
         else:
             if self._dark_mode:
                 self._dump(' <font color="gray30">(%s)</font>' % st.ptr)
@@ -704,21 +782,22 @@ def visit_store_in_state(self, s, prev_s=None):
                     self._dump('</td></tr><tr><td align="left">')
                     self.visit_store(st, prev_st)
                 else:
-                    self._dump('<i> No changes!</i>')
+                    self._dump("<i> No changes!</i>")
             else:
                 self._dump('</td></tr><tr><td align="left">')
                 self.visit_store(st)
-        self._dump('</td></tr>')
+        self._dump("</td></tr>")
 
     def visit_generic_map(self, m, prev_m=None):
         self._dump('<table border="0">')
 
         def dump_pair(m, k, is_added=None):
-            self._dump('<tr><td>%s</td>'
-                       '<td align="left">%s</td>'
-                       '<td align="left">%s</td></tr>'
-                       % (self._
diff _plus_minus(is_added),
-                          k, m.generic_map[k]))
+            self._dump(
+                "<tr><td>%s</td>"
+                '<td align="left">%s</td>'
+                '<td align="left">%s</td></tr>'
+                % (self._
diff _plus_minus(is_added), k, m.generic_map[k])
+            )
 
         if prev_m is not None:
             removed, added = m.
diff (prev_m)
@@ -730,7 +809,7 @@ def dump_pair(m, k, is_added=None):
             for k in m.generic_map:
                 dump_pair(m, k, None)
 
-        self._dump('</table>')
+        self._dump("</table>")
 
     def visit_generic_map_in_state(self, selector, title, s, prev_s=None):
         m = getattr(s, selector)
@@ -738,34 +817,34 @@ def visit_generic_map_in_state(self, selector, title, s, prev_s=None):
         if m is None and prev_m is None:
             return
 
-        self._dump('<hr />')
-        self._dump('<tr><td align="left">'
-                   '<b>%s: </b>' % title)
+        self._dump("<hr />")
+        self._dump('<tr><td align="left">' "<b>%s: </b>" % title)
         if m is None:
-            self._dump('<i> Nothing!</i>')
+            self._dump("<i> Nothing!</i>")
         else:
             if prev_m is not None:
                 if m.is_
diff erent(prev_m):
                     self._dump('</td></tr><tr><td align="left">')
                     self.visit_generic_map(m, prev_m)
                 else:
-                    self._dump('<i> No changes!</i>')
+                    self._dump("<i> No changes!</i>")
             else:
                 self._dump('</td></tr><tr><td align="left">')
                 self.visit_generic_map(m)
 
-        self._dump('</td></tr>')
+        self._dump("</td></tr>")
 
     def visit_checker_messages(self, m, prev_m=None):
         self._dump('<table border="0">')
 
         def dump_line(l, is_added=None):
-            self._dump('<tr><td>%s</td>'
-                       '<td align="left">%s</td></tr>'
-                       % (self._
diff _plus_minus(is_added), l))
+            self._dump(
+                "<tr><td>%s</td>"
+                '<td align="left">%s</td></tr>' % (self._
diff _plus_minus(is_added), l)
+            )
 
         def dump_chk(chk, is_added=None):
-            dump_line('<i>%s</i>:' % chk, is_added)
+            dump_line("<i>%s</i>:" % chk, is_added)
 
         if prev_m is not None:
             removed, added, updated = m.
diff _messages(prev_m)
@@ -776,7 +855,7 @@ def dump_chk(chk, is_added=None):
             for chk in updated:
                 dump_chk(chk)
                 for l in m.items[chk].
diff _lines(prev_m.items[chk]):
-                    dump_line(l[1:], l.startswith('+'))
+                    dump_line(l[1:], l.startswith("+"))
             for chk in added:
                 dump_chk(chk, True)
                 for l in m.items[chk].lines:
@@ -787,7 +866,7 @@ def dump_chk(chk, is_added=None):
                 for l in m.items[chk].lines:
                     dump_line(l)
 
-        self._dump('</table>')
+        self._dump("</table>")
 
     def visit_checker_messages_in_state(self, s, prev_s=None):
         m = s.checker_messages
@@ -795,132 +874,142 @@ def visit_checker_messages_in_state(self, s, prev_s=None):
         if m is None and prev_m is None:
             return
 
-        self._dump('<hr />')
-        self._dump('<tr><td align="left">'
-                   '<b>Checker State: </b>')
+        self._dump("<hr />")
+        self._dump('<tr><td align="left">' "<b>Checker State: </b>")
         if m is None:
-            self._dump('<i> Nothing!</i>')
+            self._dump("<i> Nothing!</i>")
         else:
             if prev_m is not None:
                 if m.is_
diff erent(prev_m):
                     self._dump('</td></tr><tr><td align="left">')
                     self.visit_checker_messages(m, prev_m)
                 else:
-                    self._dump('<i> No changes!</i>')
+                    self._dump("<i> No changes!</i>")
             else:
                 self._dump('</td></tr><tr><td align="left">')
                 self.visit_checker_messages(m)
 
-        self._dump('</td></tr>')
+        self._dump("</td></tr>")
 
     def visit_state(self, s, prev_s):
         self.visit_store_in_state(s, prev_s)
-        self.visit_environment_in_state('environment', 'Expressions',
-                                        s, prev_s)
-        self.visit_generic_map_in_state('constraints', 'Ranges',
-                                        s, prev_s)
-        self.visit_generic_map_in_state('dynamic_types', 'Dynamic Types',
-                                        s, prev_s)
-        self.visit_environment_in_state('constructing_objects',
-                                        'Objects Under Construction',
-                                        s, prev_s)
-        self.visit_environment_in_state('index_of_element',
-                                        'Indices Of Elements Under Construction',
-                                        s, prev_s)
-        self.visit_environment_in_state('pending_init_loops',
-                                        'Pending Array Init Loop Expressions',
-                                        s, prev_s)
-        self.visit_environment_in_state('pending_destructors',
-                                        'Indices of Elements Under Destruction',
-                                        s, prev_s)
+        self.visit_environment_in_state("environment", "Expressions", s, prev_s)
+        self.visit_generic_map_in_state("constraints", "Ranges", s, prev_s)
+        self.visit_generic_map_in_state("dynamic_types", "Dynamic Types", s, prev_s)
+        self.visit_environment_in_state(
+            "constructing_objects", "Objects Under Construction", s, prev_s
+        )
+        self.visit_environment_in_state(
+            "index_of_element", "Indices Of Elements Under Construction", s, prev_s
+        )
+        self.visit_environment_in_state(
+            "pending_init_loops", "Pending Array Init Loop Expressions", s, prev_s
+        )
+        self.visit_environment_in_state(
+            "pending_destructors", "Indices of Elements Under Destruction", s, prev_s
+        )
         self.visit_checker_messages_in_state(s, prev_s)
 
     def visit_node(self, node):
-        self._dump('%s [shape=record,'
-                   % (node.node_name()))
+        self._dump("%s [shape=record," % (node.node_name()))
         if self._dark_mode:
             self._dump('color="white",fontcolor="gray80",')
         self._dump('label=<<table border="0">')
 
-        self._dump('<tr><td bgcolor="%s"><b>State %s</b></td></tr>'
-                   % ("gray20" if self._dark_mode else "gray70",
-                      node.state.state_id
-                      if node.state is not None else 'Unspecified'))
+        self._dump(
+            '<tr><td bgcolor="%s"><b>State %s</b></td></tr>'
+            % (
+                "gray20" if self._dark_mode else "gray70",
+                node.state.state_id if node.state is not None else "Unspecified",
+            )
+        )
         if not self._topo_mode:
             self._dump('<tr><td align="left" width="0">')
             if len(node.points) > 1:
-                self._dump('<b>Program points:</b></td></tr>')
+                self._dump("<b>Program points:</b></td></tr>")
             else:
-                self._dump('<b>Program point:</b></td></tr>')
-        self._dump('<tr><td align="left" width="0">'
-                   '<table border="0" align="left" width="0">')
+                self._dump("<b>Program point:</b></td></tr>")
+        self._dump(
+            '<tr><td align="left" width="0">'
+            '<table border="0" align="left" width="0">'
+        )
         for p in node.points:
             self.visit_program_point(p)
-        self._dump('</table></td></tr>')
+        self._dump("</table></td></tr>")
 
         if node.state is not None and not self._topo_mode:
             prev_s = None
             # Do 
diff s only when we have a unique predecessor.
             # Don't do 
diff s on the leaf nodes because they're
             # the important ones.
-            if self._do_
diff s and len(node.predecessors) == 1 \
-               and len(node.successors) > 0:
+            if (
+                self._do_
diff s
+                and len(node.predecessors) == 1
+                and len(node.successors) > 0
+            ):
                 prev_s = self._graph.nodes[node.predecessors[0]].state
             self.visit_state(node.state, prev_s)
-        self._dump_raw('</table>>];\n')
+        self._dump_raw("</table>>];\n")
 
     def visit_edge(self, pred, succ):
-        self._dump_raw('%s -> %s%s;\n' % (
-            pred.node_name(), succ.node_name(),
-            ' [color="white"]' if self._dark_mode else ''
-        ))
+        self._dump_raw(
+            "%s -> %s%s;\n"
+            % (
+                pred.node_name(),
+                succ.node_name(),
+                ' [color="white"]' if self._dark_mode else "",
+            )
+        )
 
     def visit_end_of_graph(self):
-        self._dump_raw('}\n')
+        self._dump_raw("}\n")
 
         if not self._dump_dot_only:
             import sys
             import tempfile
 
             def write_temp_file(suffix, prefix, data):
-                fd, filename = tempfile.mkstemp(suffix, prefix, '.', True)
+                fd, filename = tempfile.mkstemp(suffix, prefix, ".", True)
                 print('Writing "%s"...' % filename)
-                with os.fdopen(fd, 'w') as fp:
+                with os.fdopen(fd, "w") as fp:
                     fp.write(data)
-                print('Done! Please remember to remove the file.')
+                print("Done! Please remember to remove the file.")
                 return filename
 
             try:
                 import graphviz
             except ImportError:
                 # The fallback behavior if graphviz is not installed!
-                print('Python graphviz not found. Please invoke')
-                print('  $ pip install graphviz')
-                print('in order to enable automatic conversion to HTML.')
+                print("Python graphviz not found. Please invoke")
+                print("  $ pip install graphviz")
+                print("in order to enable automatic conversion to HTML.")
                 print()
-                print('You may also convert DOT to SVG manually via')
-                print('  $ dot -Tsvg input.dot -o output.svg')
+                print("You may also convert DOT to SVG manually via")
+                print("  $ dot -Tsvg input.dot -o output.svg")
                 print()
-                write_temp_file('.dot', 'egraph-', self.output())
+                write_temp_file(".dot", "egraph-", self.output())
                 return
 
-            svg = graphviz.pipe('dot', 'svg', self.output().encode()).decode()
+            svg = graphviz.pipe("dot", "svg", self.output().encode()).decode()
 
             filename = write_temp_file(
-                '.html', 'egraph-', '<html><body bgcolor="%s">%s</body></html>' % (
-                             '#1a1a1a' if self._dark_mode else 'white', svg))
-            if sys.platform == 'win32':
+                ".html",
+                "egraph-",
+                '<html><body bgcolor="%s">%s</body></html>'
+                % ("#1a1a1a" if self._dark_mode else "white", svg),
+            )
+            if sys.platform == "win32":
                 os.startfile(filename)
-            elif sys.platform == 'darwin':
+            elif sys.platform == "darwin":
                 os.system('open "%s"' % filename)
             else:
                 os.system('xdg-open "%s"' % filename)
 
 
-#===-----------------------------------------------------------------------===#
+# ===-----------------------------------------------------------------------===#
 # Explorers know how to traverse the ExplodedGraph in a certain order.
 # They would invoke a Visitor on every node or edge they encounter.
-#===-----------------------------------------------------------------------===#
+# ===-----------------------------------------------------------------------===#
 
 
 # BasicExplorer explores the whole graph in no particular order.
@@ -928,18 +1017,18 @@ class BasicExplorer:
     def explore(self, graph, visitor):
         visitor.visit_begin_graph(graph)
         for node in sorted(graph.nodes):
-            logging.debug('Visiting ' + node)
+            logging.debug("Visiting " + node)
             visitor.visit_node(graph.nodes[node])
             for succ in sorted(graph.nodes[node].successors):
-                logging.debug('Visiting edge: %s -> %s ' % (node, succ))
+                logging.debug("Visiting edge: %s -> %s " % (node, succ))
                 visitor.visit_edge(graph.nodes[node], graph.nodes[succ])
         visitor.visit_end_of_graph()
 
 
-#===-----------------------------------------------------------------------===#
+# ===-----------------------------------------------------------------------===#
 # Trimmers cut out parts of the ExplodedGraph so that to focus on other parts.
 # Trimmers can be combined together by applying them sequentially.
-#===-----------------------------------------------------------------------===#
+# ===-----------------------------------------------------------------------===#
 
 
 # SinglePathTrimmer keeps only a single path - the leftmost path from the root.
@@ -961,8 +1050,7 @@ def trim(self, graph):
                 node_id = succ_id
             else:
                 break
-        graph.nodes = {node_id: graph.nodes[node_id]
-                       for node_id in visited_nodes}
+        graph.nodes = {node_id: graph.nodes[node_id] for node_id in visited_nodes}
 
 
 # TargetedTrimmer keeps paths that lead to specific nodes and discards all
@@ -974,8 +1062,8 @@ def __init__(self, target_nodes):
 
     @staticmethod
     def parse_target_node(node, graph):
-        if node.startswith('0x'):
-            ret = 'Node' + node
+        if node.startswith("0x"):
+            ret = "Node" + node
             assert ret in graph.nodes
             return ret
         else:
@@ -986,8 +1074,10 @@ def parse_target_node(node, graph):
 
     @staticmethod
     def parse_target_nodes(target_nodes, graph):
-        return [TargetedTrimmer.parse_target_node(node, graph)
-                for node in target_nodes.split(',')]
+        return [
+            TargetedTrimmer.parse_target_node(node, graph)
+            for node in target_nodes.split(",")
+        ]
 
     def trim(self, graph):
         queue = self._target_nodes
@@ -1000,57 +1090,102 @@ def trim(self, graph):
             for pred_id in node.predecessors:
                 if pred_id not in visited_nodes:
                     queue.append(pred_id)
-        graph.nodes = {node_id: graph.nodes[node_id]
-                       for node_id in visited_nodes}
+        graph.nodes = {node_id: graph.nodes[node_id] for node_id in visited_nodes}
         for node_id in graph.nodes:
             node = graph.nodes[node_id]
-            node.successors = [succ_id for succ_id in node.successors
-                               if succ_id in visited_nodes]
-            node.predecessors = [succ_id for succ_id in node.predecessors
-                                 if succ_id in visited_nodes]
+            node.successors = [
+                succ_id for succ_id in node.successors if succ_id in visited_nodes
+            ]
+            node.predecessors = [
+                succ_id for succ_id in node.predecessors if succ_id in visited_nodes
+            ]
 
 
-#===-----------------------------------------------------------------------===#
+# ===-----------------------------------------------------------------------===#
 # The entry point to the script.
-#===-----------------------------------------------------------------------===#
+# ===-----------------------------------------------------------------------===#
 
 
 def main():
     parser = argparse.ArgumentParser(
-        description='Display and manipulate Exploded Graph dumps.')
-    parser.add_argument('filename', type=str,
-                        help='the .dot file produced by the Static Analyzer')
-    parser.add_argument('-v', '--verbose', action='store_const',
-                        dest='loglevel', const=logging.DEBUG,
-                        default=logging.WARNING,
-                        help='enable info prints')
-    parser.add_argument('-d', '--
diff ', action='store_const', dest='
diff ',
-                        const=True, default=False,
-                        help='display 
diff erences between states')
-    parser.add_argument('-t', '--topology', action='store_const',
-                        dest='topology', const=True, default=False,
-                        help='only display program points, omit states')
-    parser.add_argument('-s', '--single-path', action='store_const',
-                        dest='single_path', const=True, default=False,
-                        help='only display the leftmost path in the graph '
-                             '(useful for trimmed graphs that still '
-                             'branch too much)')
-    parser.add_argument('--to', type=str, default=None,
-                        help='only display execution paths from the root '
-                             'to the given comma-separated list of nodes '
-                             'identified by a pointer or a stable ID; '
-                             'compatible with --single-path')
-    parser.add_argument('--dark', action='store_const', dest='dark',
-                        const=True, default=False,
-                        help='dark mode')
-    parser.add_argument('--gray', action='store_const', dest='gray',
-                        const=True, default=False,
-                        help='black-and-white mode')
-    parser.add_argument('--dump-dot-only', action='store_const',
-                        dest='dump_dot_only', const=True, default=False,
-                        help='instead of writing an HTML file and immediately '
-                             'displaying it, dump the rewritten dot file '
-                             'to stdout')
+        description="Display and manipulate Exploded Graph dumps."
+    )
+    parser.add_argument(
+        "filename", type=str, help="the .dot file produced by the Static Analyzer"
+    )
+    parser.add_argument(
+        "-v",
+        "--verbose",
+        action="store_const",
+        dest="loglevel",
+        const=logging.DEBUG,
+        default=logging.WARNING,
+        help="enable info prints",
+    )
+    parser.add_argument(
+        "-d",
+        "--
diff ",
+        action="store_const",
+        dest="
diff ",
+        const=True,
+        default=False,
+        help="display 
diff erences between states",
+    )
+    parser.add_argument(
+        "-t",
+        "--topology",
+        action="store_const",
+        dest="topology",
+        const=True,
+        default=False,
+        help="only display program points, omit states",
+    )
+    parser.add_argument(
+        "-s",
+        "--single-path",
+        action="store_const",
+        dest="single_path",
+        const=True,
+        default=False,
+        help="only display the leftmost path in the graph "
+        "(useful for trimmed graphs that still "
+        "branch too much)",
+    )
+    parser.add_argument(
+        "--to",
+        type=str,
+        default=None,
+        help="only display execution paths from the root "
+        "to the given comma-separated list of nodes "
+        "identified by a pointer or a stable ID; "
+        "compatible with --single-path",
+    )
+    parser.add_argument(
+        "--dark",
+        action="store_const",
+        dest="dark",
+        const=True,
+        default=False,
+        help="dark mode",
+    )
+    parser.add_argument(
+        "--gray",
+        action="store_const",
+        dest="gray",
+        const=True,
+        default=False,
+        help="black-and-white mode",
+    )
+    parser.add_argument(
+        "--dump-dot-only",
+        action="store_const",
+        dest="dump_dot_only",
+        const=True,
+        default=False,
+        help="instead of writing an HTML file and immediately "
+        "displaying it, dump the rewritten dot file "
+        "to stdout",
+    )
     args = parser.parse_args()
     logging.basicConfig(level=args.loglevel)
 
@@ -1062,15 +1197,17 @@ def main():
 
     trimmers = []
     if args.to is not None:
-        trimmers.append(TargetedTrimmer(
-            TargetedTrimmer.parse_target_nodes(args.to, graph)))
+        trimmers.append(
+            TargetedTrimmer(TargetedTrimmer.parse_target_nodes(args.to, graph))
+        )
     if args.single_path:
         trimmers.append(SinglePathTrimmer())
 
     explorer = BasicExplorer()
 
-    visitor = DotDumpVisitor(args.
diff , args.dark, args.gray, args.topology,
-                             args.dump_dot_only)
+    visitor = DotDumpVisitor(
+        args.
diff , args.dark, args.gray, args.topology, args.dump_dot_only
+    )
 
     for trimmer in trimmers:
         trimmer.trim(graph)
@@ -1078,5 +1215,5 @@ def main():
     explorer.explore(graph, visitor)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()

diff  --git a/clang/utils/bundle_resources.py b/clang/utils/bundle_resources.py
index 692c0ba02b09b..66871fbe99c11 100644
--- a/clang/utils/bundle_resources.py
+++ b/clang/utils/bundle_resources.py
@@ -1,12 +1,12 @@
 #!/usr/bin/env python3
 
-#===- bundle_resources.py - Generate string constants with file contents. ===
+# ===- bundle_resources.py - Generate string constants with file contents. ===
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===----------------------------------------------------------------------===
+# ===----------------------------------------------------------------------===
 
 # Usage: bundle-resources.py foo.inc a.js path/b.css ...
 # Produces foo.inc containing:
@@ -18,12 +18,12 @@
 outfile = sys.argv[1]
 infiles = sys.argv[2:]
 
-with open(outfile, 'w') as out:
-  for filename in infiles:
-    varname = os.path.basename(filename).replace('.', '_')
-    out.write("const char " + varname + "[] = \n");
-    # MSVC limits each chunk of string to 2k, so split by lines.
-    # The overall limit is 64k, which ought to be enough for anyone.
-    for line in open(filename).read().split('\n'):
-      out.write('  R"x(' + line + ')x" "\\n"\n' )
-    out.write('  ;\n');
+with open(outfile, "w") as out:
+    for filename in infiles:
+        varname = os.path.basename(filename).replace(".", "_")
+        out.write("const char " + varname + "[] = \n")
+        # MSVC limits each chunk of string to 2k, so split by lines.
+        # The overall limit is 64k, which ought to be enough for anyone.
+        for line in open(filename).read().split("\n"):
+            out.write('  R"x(' + line + ')x" "\\n"\n')
+        out.write("  ;\n")

diff  --git a/clang/utils/check_cfc/check_cfc.py b/clang/utils/check_cfc/check_cfc.py
index cab33b41c1ca0..27d732d91030c 100755
--- a/clang/utils/check_cfc/check_cfc.py
+++ b/clang/utils/check_cfc/check_cfc.py
@@ -56,6 +56,7 @@
 import subprocess
 import sys
 import tempfile
+
 try:
     import configparser
 except ImportError:
@@ -64,28 +65,37 @@
 
 import obj_
diff 
 
+
 def is_windows():
     """Returns True if running on Windows."""
-    return platform.system() == 'Windows'
+    return platform.system() == "Windows"
+
 
 class WrapperStepException(Exception):
     """Exception type to be used when a step other than the original compile
     fails."""
+
     def __init__(self, msg, stdout, stderr):
         self.msg = msg
         self.stdout = stdout
         self.stderr = stderr
 
+
 class WrapperCheckException(Exception):
     """Exception type to be used when a comparison check fails."""
+
     def __init__(self, msg):
         self.msg = msg
 
+
 def main_is_frozen():
     """Returns True when running as a py2exe executable."""
-    return (hasattr(sys, "frozen") or # new py2exe
-            hasattr(sys, "importers") or # old py2exe
-            imp.is_frozen("__main__")) # tools/freeze
+    return (
+        hasattr(sys, "frozen")
+        or hasattr(sys, "importers")  # new py2exe
+        or imp.is_frozen("__main__")  # old py2exe
+    )  # tools/freeze
+
 
 def get_main_dir():
     """Get the directory that the script or executable is located in."""
@@ -93,30 +103,35 @@ def get_main_dir():
         return os.path.dirname(sys.executable)
     return os.path.dirname(sys.argv[0])
 
+
 def remove_dir_from_path(path_var, directory):
     """Remove the specified directory from path_var, a string representing
     PATH"""
     pathlist = path_var.split(os.pathsep)
     norm_directory = os.path.normpath(os.path.normcase(directory))
-    pathlist = [x for x in pathlist if os.path.normpath(
-        os.path.normcase(x)) != norm_directory]
+    pathlist = [
+        x for x in pathlist if os.path.normpath(os.path.normcase(x)) != norm_directory
+    ]
     return os.pathsep.join(pathlist)
 
+
 def path_without_wrapper():
     """Returns the PATH variable modified to remove the path to this program."""
     scriptdir = get_main_dir()
-    path = os.environ['PATH']
+    path = os.environ["PATH"]
     return remove_dir_from_path(path, scriptdir)
 
+
 def flip_dash_g(args):
     """Search for -g in args. If it exists then return args without. If not then
     add it."""
-    if '-g' in args:
+    if "-g" in args:
         # Return args without any -g
-        return [x for x in args if x != '-g']
+        return [x for x in args if x != "-g"]
     else:
         # No -g, add one
-        return args + ['-g']
+        return args + ["-g"]
+
 
 def derive_output_file(args):
     """Derive output file from the input file (if just one) or None
@@ -125,7 +140,8 @@ def derive_output_file(args):
     if infile is None:
         return None
     else:
-        return '{}.o'.format(os.path.splitext(infile)[0])
+        return "{}.o".format(os.path.splitext(infile)[0])
+
 
 def get_output_file(args):
     """Return the output file specified by this command or None if not
@@ -134,30 +150,32 @@ def get_output_file(args):
     for arg in args:
         if grabnext:
             return arg
-        if arg == '-o':
+        if arg == "-o":
             # Specified as a separate arg
             grabnext = True
-        elif arg.startswith('-o'):
+        elif arg.startswith("-o"):
             # Specified conjoined with -o
             return arg[2:]
     assert grabnext == False
 
     return None
 
+
 def is_output_specified(args):
     """Return true is output file is specified in args."""
     return get_output_file(args) is not None
 
+
 def replace_output_file(args, new_name):
     """Replaces the specified name of an output file with the specified name.
     Assumes that the output file name is specified in the command line args."""
     replaceidx = None
     attached = False
     for idx, val in enumerate(args):
-        if val == '-o':
+        if val == "-o":
             replaceidx = idx + 1
             attached = False
-        elif val.startswith('-o'):
+        elif val.startswith("-o"):
             replaceidx = idx
             attached = True
 
@@ -165,13 +183,15 @@ def replace_output_file(args, new_name):
         raise Exception
     replacement = new_name
     if attached == True:
-        replacement = '-o' + new_name
+        replacement = "-o" + new_name
     args[replaceidx] = replacement
     return args
 
+
 def add_output_file(args, output_file):
     """Append an output file to args, presuming not already specified."""
-    return args + ['-o', output_file]
+    return args + ["-o", output_file]
+
 
 def set_output_file(args, output_file):
     """Set the output file within the arguments. Appends or replaces as
@@ -182,7 +202,9 @@ def set_output_file(args, output_file):
         args = add_output_file(args, output_file)
     return args
 
-gSrcFileSuffixes = ('.c', '.cpp', '.cxx', '.c++', '.cp', '.cc')
+
+gSrcFileSuffixes = (".c", ".cpp", ".cxx", ".c++", ".cp", ".cc")
+
 
 def get_input_file(args):
     """Return the input file string if it can be found (and there is only
@@ -203,6 +225,7 @@ def get_input_file(args):
     else:
         return None
 
+
 def set_input_file(args, input_file):
     """Replaces the input file with that specified."""
     infile = get_input_file(args)
@@ -214,30 +237,40 @@ def set_input_file(args, input_file):
         # Could not find input file
         assert False
 
+
 def is_normal_compile(args):
     """Check if this is a normal compile which will output an object file rather
     than a preprocess or link. args is a list of command line arguments."""
-    compile_step = '-c' in args
+    compile_step = "-c" in args
     # Bitcode cannot be disassembled in the same way
-    bitcode = '-flto' in args or '-emit-llvm' in args
+    bitcode = "-flto" in args or "-emit-llvm" in args
     # Version and help are queries of the compiler and override -c if specified
-    query = '--version' in args or '--help' in args
+    query = "--version" in args or "--help" in args
     # Options to output dependency files for make
-    dependency = '-M' in args or '-MM' in args
+    dependency = "-M" in args or "-MM" in args
     # Check if the input is recognised as a source file (this may be too
     # strong a restriction)
     input_is_valid = bool(get_input_file(args))
-    return compile_step and not bitcode and not query and not dependency and input_is_valid
+    return (
+        compile_step and not bitcode and not query and not dependency and input_is_valid
+    )
+
 
 def run_step(command, my_env, error_on_failure):
     """Runs a step of the compilation. Reports failure as exception."""
     # Need to use shell=True on Windows as Popen won't use PATH otherwise.
-    p = subprocess.Popen(command, stdout=subprocess.PIPE,
-                         stderr=subprocess.PIPE, env=my_env, shell=is_windows())
+    p = subprocess.Popen(
+        command,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        env=my_env,
+        shell=is_windows(),
+    )
     (stdout, stderr) = p.communicate()
     if p.returncode != 0:
         raise WrapperStepException(error_on_failure, stdout, stderr)
 
+
 def get_temp_file_name(suffix):
     """Get a temporary file name with a particular suffix. Let the caller be
     responsible for deleting it."""
@@ -245,8 +278,10 @@ def get_temp_file_name(suffix):
     tf.close()
     return tf.name
 
+
 class WrapperCheck(object):
     """Base class for a check. Subclass this to add a check."""
+
     def __init__(self, output_file_a):
         """Record the base output file that will be compared against."""
         self._output_file_a = output_file_a
@@ -256,10 +291,11 @@ def perform_check(self, arguments, my_env):
         checks."""
         raise NotImplementedError("Please Implement this method")
 
+
 class dash_g_no_change(WrapperCheck):
     def perform_check(self, arguments, my_env):
         """Check if 
diff erent code is generated with/without the -g flag."""
-        output_file_b = get_temp_file_name('.o')
+        output_file_b = get_temp_file_name(".o")
 
         alternate_command = list(arguments)
         alternate_command = flip_dash_g(alternate_command)
@@ -267,56 +303,61 @@ def perform_check(self, arguments, my_env):
         run_step(alternate_command, my_env, "Error compiling with -g")
 
         # Compare disassembly (returns first 
diff  if 
diff ers)
-        
diff erence = obj_
diff .compare_object_files(self._output_file_a,
-                                                   output_file_b)
+        
diff erence = obj_
diff .compare_object_files(self._output_file_a, output_file_b)
         if 
diff erence:
             raise WrapperCheckException(
-                "Code 
diff erence detected with -g\n{}".format(
diff erence))
+                "Code 
diff erence detected with -g\n{}".format(
diff erence)
+            )
 
         # Clean up temp file if comparison okay
         os.remove(output_file_b)
 
+
 class dash_s_no_change(WrapperCheck):
     def perform_check(self, arguments, my_env):
         """Check if compiling to asm then assembling in separate steps results
         in 
diff erent code than compiling to object directly."""
-        output_file_b = get_temp_file_name('.o')
+        output_file_b = get_temp_file_name(".o")
 
-        alternate_command = arguments + ['-via-file-asm']
+        alternate_command = arguments + ["-via-file-asm"]
         alternate_command = set_output_file(alternate_command, output_file_b)
-        run_step(alternate_command, my_env,
-                 "Error compiling with -via-file-asm")
+        run_step(alternate_command, my_env, "Error compiling with -via-file-asm")
 
         # Compare if object files are exactly the same
         exactly_equal = obj_
diff .compare_exact(self._output_file_a, output_file_b)
         if not exactly_equal:
             # Compare disassembly (returns first 
diff  if 
diff ers)
-            
diff erence = obj_
diff .compare_object_files(self._output_file_a,
-                                                       output_file_b)
+            
diff erence = obj_
diff .compare_object_files(
+                self._output_file_a, output_file_b
+            )
             if 
diff erence:
                 raise WrapperCheckException(
-                    "Code 
diff erence detected with -S\n{}".format(
diff erence))
+                    "Code 
diff erence detected with -S\n{}".format(
diff erence)
+                )
 
             # Code is identical, compare debug info
-            dbg
diff erence = obj_
diff .compare_debug_info(self._output_file_a,
-                                                        output_file_b)
+            dbg
diff erence = obj_
diff .compare_debug_info(
+                self._output_file_a, output_file_b
+            )
             if dbg
diff erence:
                 raise WrapperCheckException(
-                    "Debug info 
diff erence detected with -S\n{}".format(dbg
diff erence))
+                    "Debug info 
diff erence detected with -S\n{}".format(dbg
diff erence)
+                )
 
             raise WrapperCheckException("Object files not identical with -S\n")
 
         # Clean up temp file if comparison okay
         os.remove(output_file_b)
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     # Create configuration defaults from list of checks
     default_config = """
 [Checks]
 """
 
     # Find all subclasses of WrapperCheck
-    checks = [cls.__name__ for cls in vars()['WrapperCheck'].__subclasses__()]
+    checks = [cls.__name__ for cls in vars()["WrapperCheck"].__subclasses__()]
 
     for c in checks:
         default_config += "{} = false\n".format(c)
@@ -324,15 +365,14 @@ def perform_check(self, arguments, my_env):
     config = configparser.RawConfigParser()
     config.readfp(io.BytesIO(default_config))
     scriptdir = get_main_dir()
-    config_path = os.path.join(scriptdir, 'check_cfc.cfg')
+    config_path = os.path.join(scriptdir, "check_cfc.cfg")
     try:
         config.read(os.path.join(config_path))
     except:
-        print("Could not read config from {}, "
-              "using defaults.".format(config_path))
+        print("Could not read config from {}, " "using defaults.".format(config_path))
 
     my_env = os.environ.copy()
-    my_env['PATH'] = path_without_wrapper()
+    my_env["PATH"] = path_without_wrapper()
 
     arguments_a = list(sys.argv)
 
@@ -340,10 +380,10 @@ def perform_check(self, arguments, my_env):
     arguments_a[0] = os.path.basename(arguments_a[0])
 
     # Basic correctness check
-    enabled_checks = [check_name
-                      for check_name in checks
-                      if config.getboolean('Checks', check_name)]
-    checks_comma_separated = ', '.join(enabled_checks)
+    enabled_checks = [
+        check_name for check_name in checks if config.getboolean("Checks", check_name)
+    ]
+    checks_comma_separated = ", ".join(enabled_checks)
     print("Check CFC, checking: {}".format(checks_comma_separated))
 
     # A - original compilation
@@ -370,21 +410,23 @@ def perform_check(self, arguments, my_env):
         sys.exit(0)
 
     # Copy output file to a temp file
-    temp_output_file_orig = get_temp_file_name('.o')
+    temp_output_file_orig = get_temp_file_name(".o")
     shutil.copyfile(output_file_orig, temp_output_file_orig)
 
     # Run checks, if they are enabled in config and if they are appropriate for
     # this command line.
     current_module = sys.modules[__name__]
     for check_name in checks:
-        if config.getboolean('Checks', check_name):
+        if config.getboolean("Checks", check_name):
             class_ = getattr(current_module, check_name)
             checker = class_(temp_output_file_orig)
             try:
                 checker.perform_check(arguments_a, my_env)
             except WrapperCheckException as e:
                 # Check failure
-                print("{} {}".format(get_input_file(arguments_a), e.msg), file=sys.stderr)
+                print(
+                    "{} {}".format(get_input_file(arguments_a), e.msg), file=sys.stderr
+                )
 
                 # Remove file to comply with build system expectations (no
                 # output file if failed)

diff  --git a/clang/utils/check_cfc/obj_
diff .py b/clang/utils/check_cfc/obj_
diff .py
index a0951c5bcdee4..99ed19e522be2 100755
--- a/clang/utils/check_cfc/obj_
diff .py
+++ b/clang/utils/check_cfc/obj_
diff .py
@@ -9,33 +9,41 @@
 import subprocess
 import sys
 
-disassembler = 'objdump'
+disassembler = "objdump"
+
 
 def keep_line(line):
     """Returns true for lines that should be compared in the disassembly
     output."""
     return "file format" not in line
 
+
 def disassemble(objfile):
     """Disassemble object to a file."""
-    p = subprocess.Popen([disassembler, '-d', objfile],
-                         stdout=subprocess.PIPE,
-                         stderr=subprocess.PIPE)
+    p = subprocess.Popen(
+        [disassembler, "-d", objfile], stdout=subprocess.PIPE, stderr=subprocess.PIPE
+    )
     (out, err) = p.communicate()
     if p.returncode or err:
         print("Disassemble failed: {}".format(objfile))
         sys.exit(1)
     return [line for line in out.split(os.linesep) if keep_line(line)]
 
+
 def dump_debug(objfile):
     """Dump all of the debug info from a file."""
-    p = subprocess.Popen([disassembler, '-WliaprmfsoRt', objfile], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    p = subprocess.Popen(
+        [disassembler, "-WliaprmfsoRt", objfile],
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+    )
     (out, err) = p.communicate()
     if p.returncode or err:
         print("Dump debug failed: {}".format(objfile))
         sys.exit(1)
     return [line for line in out.split(os.linesep) if keep_line(line)]
 
+
 def first_
diff (a, b, fromfile, tofile):
     """Returns the first few lines of a 
diff erence, if there is one.  Python
     
diff  can be very slow with large objects and the most interesting changes
@@ -55,45 +63,48 @@ def first_
diff (a, b, fromfile, tofile):
 
     # Diff to first line of 
diff  plus some lines
     context = 3
-    
diff  = 
diff lib.unified_
diff (a[:first_
diff _idx+context],
-                                b[:first_
diff _idx+context],
-                                fromfile,
-                                tofile)
+    
diff  = 
diff lib.unified_
diff (
+        a[: first_
diff _idx + context], b[: first_
diff _idx + context], fromfile, tofile
+    )
     
diff erence = "\n".join(
diff )
     if first_
diff _idx + context < len(a):
         
diff erence += "\n*** Diff truncated ***"
     return 
diff erence
 
+
 def compare_object_files(objfilea, objfileb):
     """Compare disassembly of two 
diff erent files.
-       Allowing unavoidable 
diff erences, such as filenames.
-       Return the first 
diff erence if the disassembly 
diff ers, or None.
+    Allowing unavoidable 
diff erences, such as filenames.
+    Return the first 
diff erence if the disassembly 
diff ers, or None.
     """
     disa = disassemble(objfilea)
     disb = disassemble(objfileb)
     return first_
diff (disa, disb, objfilea, objfileb)
 
+
 def compare_debug_info(objfilea, objfileb):
     """Compare debug info of two 
diff erent files.
-       Allowing unavoidable 
diff erences, such as filenames.
-       Return the first 
diff erence if the debug info 
diff ers, or None.
-       If there are 
diff erences in the code, there will almost certainly be 
diff erences in the debug info too.
+    Allowing unavoidable 
diff erences, such as filenames.
+    Return the first 
diff erence if the debug info 
diff ers, or None.
+    If there are 
diff erences in the code, there will almost certainly be 
diff erences in the debug info too.
     """
     dbga = dump_debug(objfilea)
     dbgb = dump_debug(objfileb)
     return first_
diff (dbga, dbgb, objfilea, objfileb)
 
+
 def compare_exact(objfilea, objfileb):
     """Byte for byte comparison between object files.
-       Returns True if equal, False otherwise.
+    Returns True if equal, False otherwise.
     """
     return filecmp.cmp(objfilea, objfileb)
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     parser = argparse.ArgumentParser()
-    parser.add_argument('objfilea', nargs=1)
-    parser.add_argument('objfileb', nargs=1)
-    parser.add_argument('-v', '--verbose', action='store_true')
+    parser.add_argument("objfilea", nargs=1)
+    parser.add_argument("objfileb", nargs=1)
+    parser.add_argument("-v", "--verbose", action="store_true")
     args = parser.parse_args()
     
diff  = compare_object_files(args.objfilea[0], args.objfileb[0])
     if 
diff :

diff  --git a/clang/utils/check_cfc/setup.py b/clang/utils/check_cfc/setup.py
index 64f07d5dcc52a..7a4760c72877a 100644
--- a/clang/utils/check_cfc/setup.py
+++ b/clang/utils/check_cfc/setup.py
@@ -3,12 +3,14 @@
     """
 from __future__ import absolute_import, division, print_function
 from distutils.core import setup
+
 try:
     import py2exe
 except ImportError:
     import platform
     import sys
-    if platform.system() == 'Windows':
+
+    if platform.system() == "Windows":
         print("Could not find py2exe. Please install then run setup.py py2exe.")
         raise
     else:
@@ -16,7 +18,7 @@
         sys.exit(1)
 
 setup(
-      console=['check_cfc.py'],
-      name="Check CFC",
-      description='Check Compile Flow Consistency'
-      )
+    console=["check_cfc.py"],
+    name="Check CFC",
+    description="Check Compile Flow Consistency",
+)

diff  --git a/clang/utils/check_cfc/test_check_cfc.py b/clang/utils/check_cfc/test_check_cfc.py
index 0808252a2c60c..cd4441b702cde 100755
--- a/clang/utils/check_cfc/test_check_cfc.py
+++ b/clang/utils/check_cfc/test_check_cfc.py
@@ -9,160 +9,170 @@
 
 
 class TestCheckCFC(unittest.TestCase):
-
     def test_flip_dash_g(self):
-        self.assertIn('-g', check_cfc.flip_dash_g(['clang', '-c']))
-        self.assertNotIn('-g', check_cfc.flip_dash_g(['clang', '-c', '-g']))
-        self.assertNotIn(
-            '-g', check_cfc.flip_dash_g(['clang', '-g', '-c', '-g']))
+        self.assertIn("-g", check_cfc.flip_dash_g(["clang", "-c"]))
+        self.assertNotIn("-g", check_cfc.flip_dash_g(["clang", "-c", "-g"]))
+        self.assertNotIn("-g", check_cfc.flip_dash_g(["clang", "-g", "-c", "-g"]))
 
     def test_remove_dir_from_path(self):
-        bin_path = r'/usr/bin'
-        space_path = r'/home/user/space in path'
-        superstring_path = r'/usr/bin/local'
+        bin_path = r"/usr/bin"
+        space_path = r"/home/user/space in path"
+        superstring_path = r"/usr/bin/local"
 
         # Test removing last thing in path
-        self.assertNotIn(
-            bin_path, check_cfc.remove_dir_from_path(bin_path, bin_path))
+        self.assertNotIn(bin_path, check_cfc.remove_dir_from_path(bin_path, bin_path))
 
         # Test removing one entry and leaving others
         # Also tests removing repeated path
-        path_var = os.pathsep.join(
-            [superstring_path, bin_path, space_path, bin_path])
+        path_var = os.pathsep.join([superstring_path, bin_path, space_path, bin_path])
         stripped_path_var = check_cfc.remove_dir_from_path(path_var, bin_path)
         self.assertIn(superstring_path, stripped_path_var)
         self.assertNotIn(bin_path, stripped_path_var.split(os.pathsep))
         self.assertIn(space_path, stripped_path_var)
 
         # Test removing non-canonical path
-        self.assertNotIn(r'/usr//bin',
-                         check_cfc.remove_dir_from_path(r'/usr//bin', bin_path))
+        self.assertNotIn(
+            r"/usr//bin", check_cfc.remove_dir_from_path(r"/usr//bin", bin_path)
+        )
 
-        if platform == 'Windows':
+        if platform == "Windows":
             # Windows is case insensitive so should remove a 
diff erent case
             # path
             self.assertNotIn(
-                bin_path, check_cfc.remove_dir_from_path(path_var, r'/USR/BIN'))
+                bin_path, check_cfc.remove_dir_from_path(path_var, r"/USR/BIN")
+            )
         else:
             # Case sensitive so will not remove 
diff erent case path
             self.assertIn(
-                bin_path, check_cfc.remove_dir_from_path(path_var, r'/USR/BIN'))
+                bin_path, check_cfc.remove_dir_from_path(path_var, r"/USR/BIN")
+            )
 
     def test_is_output_specified(self):
-        self.assertTrue(
-            check_cfc.is_output_specified(['clang', '-o', 'test.o']))
-        self.assertTrue(check_cfc.is_output_specified(['clang', '-otest.o']))
-        self.assertFalse(
-            check_cfc.is_output_specified(['clang', '-gline-tables-only']))
+        self.assertTrue(check_cfc.is_output_specified(["clang", "-o", "test.o"]))
+        self.assertTrue(check_cfc.is_output_specified(["clang", "-otest.o"]))
+        self.assertFalse(check_cfc.is_output_specified(["clang", "-gline-tables-only"]))
         # Not specified for implied output file name
-        self.assertFalse(check_cfc.is_output_specified(['clang', 'test.c']))
+        self.assertFalse(check_cfc.is_output_specified(["clang", "test.c"]))
 
     def test_get_output_file(self):
-        self.assertEqual(
-            check_cfc.get_output_file(['clang', '-o', 'test.o']), 'test.o')
-        self.assertEqual(
-            check_cfc.get_output_file(['clang', '-otest.o']), 'test.o')
-        self.assertIsNone(
-            check_cfc.get_output_file(['clang', '-gline-tables-only']))
+        self.assertEqual(check_cfc.get_output_file(["clang", "-o", "test.o"]), "test.o")
+        self.assertEqual(check_cfc.get_output_file(["clang", "-otest.o"]), "test.o")
+        self.assertIsNone(check_cfc.get_output_file(["clang", "-gline-tables-only"]))
         # Can't get output file if more than one input file
         self.assertIsNone(
-            check_cfc.get_output_file(['clang', '-c', 'test.cpp', 'test2.cpp']))
+            check_cfc.get_output_file(["clang", "-c", "test.cpp", "test2.cpp"])
+        )
         # No output file specified
-        self.assertIsNone(check_cfc.get_output_file(['clang', '-c', 'test.c']))
+        self.assertIsNone(check_cfc.get_output_file(["clang", "-c", "test.c"]))
 
     def test_derive_output_file(self):
         # Test getting implicit output file
         self.assertEqual(
-            check_cfc.derive_output_file(['clang', '-c', 'test.c']), 'test.o')
+            check_cfc.derive_output_file(["clang", "-c", "test.c"]), "test.o"
+        )
         self.assertEqual(
-            check_cfc.derive_output_file(['clang', '-c', 'test.cpp']), 'test.o')
-        self.assertIsNone(check_cfc.derive_output_file(['clang', '--version']))
+            check_cfc.derive_output_file(["clang", "-c", "test.cpp"]), "test.o"
+        )
+        self.assertIsNone(check_cfc.derive_output_file(["clang", "--version"]))
 
     def test_is_normal_compile(self):
-        self.assertTrue(check_cfc.is_normal_compile(
-            ['clang', '-c', 'test.cpp', '-o', 'test2.o']))
         self.assertTrue(
-            check_cfc.is_normal_compile(['clang', '-c', 'test.cpp']))
+            check_cfc.is_normal_compile(["clang", "-c", "test.cpp", "-o", "test2.o"])
+        )
+        self.assertTrue(check_cfc.is_normal_compile(["clang", "-c", "test.cpp"]))
         # Outputting bitcode is not a normal compile
         self.assertFalse(
-            check_cfc.is_normal_compile(['clang', '-c', 'test.cpp', '-flto']))
+            check_cfc.is_normal_compile(["clang", "-c", "test.cpp", "-flto"])
+        )
         self.assertFalse(
-            check_cfc.is_normal_compile(['clang', '-c', 'test.cpp', '-emit-llvm']))
+            check_cfc.is_normal_compile(["clang", "-c", "test.cpp", "-emit-llvm"])
+        )
         # Outputting preprocessed output or assembly is not a normal compile
         self.assertFalse(
-            check_cfc.is_normal_compile(['clang', '-E', 'test.cpp', '-o', 'test.ii']))
+            check_cfc.is_normal_compile(["clang", "-E", "test.cpp", "-o", "test.ii"])
+        )
         self.assertFalse(
-            check_cfc.is_normal_compile(['clang', '-S', 'test.cpp', '-o', 'test.s']))
+            check_cfc.is_normal_compile(["clang", "-S", "test.cpp", "-o", "test.s"])
+        )
         # Input of preprocessed or assembly is not a "normal compile"
         self.assertFalse(
-            check_cfc.is_normal_compile(['clang', '-c', 'test.s', '-o', 'test.o']))
+            check_cfc.is_normal_compile(["clang", "-c", "test.s", "-o", "test.o"])
+        )
         self.assertFalse(
-            check_cfc.is_normal_compile(['clang', '-c', 'test.ii', '-o', 'test.o']))
+            check_cfc.is_normal_compile(["clang", "-c", "test.ii", "-o", "test.o"])
+        )
         # Specifying --version and -c is not a normal compile
         self.assertFalse(
-            check_cfc.is_normal_compile(['clang', '-c', 'test.cpp', '--version']))
+            check_cfc.is_normal_compile(["clang", "-c", "test.cpp", "--version"])
+        )
         self.assertFalse(
-            check_cfc.is_normal_compile(['clang', '-c', 'test.cpp', '--help']))
+            check_cfc.is_normal_compile(["clang", "-c", "test.cpp", "--help"])
+        )
         # Outputting dependency files is not a normal compile
+        self.assertFalse(check_cfc.is_normal_compile(["clang", "-c", "-M", "test.cpp"]))
         self.assertFalse(
-            check_cfc.is_normal_compile(['clang', '-c', '-M', 'test.cpp']))
-        self.assertFalse(
-            check_cfc.is_normal_compile(['clang', '-c', '-MM', 'test.cpp']))
+            check_cfc.is_normal_compile(["clang", "-c", "-MM", "test.cpp"])
+        )
         # Creating a dependency file as a side effect still outputs an object file
+        self.assertTrue(check_cfc.is_normal_compile(["clang", "-c", "-MD", "test.cpp"]))
         self.assertTrue(
-            check_cfc.is_normal_compile(['clang', '-c', '-MD', 'test.cpp']))
-        self.assertTrue(
-            check_cfc.is_normal_compile(['clang', '-c', '-MMD', 'test.cpp']))
+            check_cfc.is_normal_compile(["clang", "-c", "-MMD", "test.cpp"])
+        )
 
     def test_replace_output_file(self):
-        self.assertEqual(check_cfc.replace_output_file(
-            ['clang', '-o', 'test.o'], 'testg.o'), ['clang', '-o', 'testg.o'])
-        self.assertEqual(check_cfc.replace_output_file(
-            ['clang', '-otest.o'], 'testg.o'), ['clang', '-otestg.o'])
+        self.assertEqual(
+            check_cfc.replace_output_file(["clang", "-o", "test.o"], "testg.o"),
+            ["clang", "-o", "testg.o"],
+        )
+        self.assertEqual(
+            check_cfc.replace_output_file(["clang", "-otest.o"], "testg.o"),
+            ["clang", "-otestg.o"],
+        )
         with self.assertRaises(Exception):
-            check_cfc.replace_output_file(['clang'], 'testg.o')
+            check_cfc.replace_output_file(["clang"], "testg.o")
 
     def test_add_output_file(self):
-        self.assertEqual(check_cfc.add_output_file(
-            ['clang'], 'testg.o'), ['clang', '-o', 'testg.o'])
+        self.assertEqual(
+            check_cfc.add_output_file(["clang"], "testg.o"), ["clang", "-o", "testg.o"]
+        )
 
     def test_set_output_file(self):
         # Test output not specified
         self.assertEqual(
-            check_cfc.set_output_file(['clang'], 'test.o'), ['clang', '-o', 'test.o'])
+            check_cfc.set_output_file(["clang"], "test.o"), ["clang", "-o", "test.o"]
+        )
         # Test output is specified
-        self.assertEqual(check_cfc.set_output_file(
-            ['clang', '-o', 'test.o'], 'testb.o'), ['clang', '-o', 'testb.o'])
+        self.assertEqual(
+            check_cfc.set_output_file(["clang", "-o", "test.o"], "testb.o"),
+            ["clang", "-o", "testb.o"],
+        )
 
     def test_get_input_file(self):
         # No input file
-        self.assertIsNone(check_cfc.get_input_file(['clang']))
+        self.assertIsNone(check_cfc.get_input_file(["clang"]))
         # Input C file
-        self.assertEqual(
-            check_cfc.get_input_file(['clang', 'test.c']), 'test.c')
+        self.assertEqual(check_cfc.get_input_file(["clang", "test.c"]), "test.c")
         # Input C++ file
-        self.assertEqual(
-            check_cfc.get_input_file(['clang', 'test.cpp']), 'test.cpp')
+        self.assertEqual(check_cfc.get_input_file(["clang", "test.cpp"]), "test.cpp")
         # Multiple input files
-        self.assertIsNone(
-            check_cfc.get_input_file(['clang', 'test.c', 'test2.cpp']))
-        self.assertIsNone(
-            check_cfc.get_input_file(['clang', 'test.c', 'test2.c']))
+        self.assertIsNone(check_cfc.get_input_file(["clang", "test.c", "test2.cpp"]))
+        self.assertIsNone(check_cfc.get_input_file(["clang", "test.c", "test2.c"]))
         # Don't handle preprocessed files
-        self.assertIsNone(check_cfc.get_input_file(['clang', 'test.i']))
-        self.assertIsNone(check_cfc.get_input_file(['clang', 'test.ii']))
+        self.assertIsNone(check_cfc.get_input_file(["clang", "test.i"]))
+        self.assertIsNone(check_cfc.get_input_file(["clang", "test.ii"]))
         # Test identifying input file with quotes
-        self.assertEqual(
-            check_cfc.get_input_file(['clang', '"test.c"']), '"test.c"')
-        self.assertEqual(
-            check_cfc.get_input_file(['clang', "'test.c'"]), "'test.c'")
+        self.assertEqual(check_cfc.get_input_file(["clang", '"test.c"']), '"test.c"')
+        self.assertEqual(check_cfc.get_input_file(["clang", "'test.c'"]), "'test.c'")
         # Test multiple quotes
         self.assertEqual(
-            check_cfc.get_input_file(['clang', "\"'test.c'\""]), "\"'test.c'\"")
+            check_cfc.get_input_file(["clang", "\"'test.c'\""]), "\"'test.c'\""
+        )
 
     def test_set_input_file(self):
-        self.assertEqual(check_cfc.set_input_file(
-            ['clang', 'test.c'], 'test.s'), ['clang', 'test.s'])
+        self.assertEqual(
+            check_cfc.set_input_file(["clang", "test.c"], "test.s"), ["clang", "test.s"]
+        )
+
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     unittest.main()

diff  --git a/clang/utils/clangdiag.py b/clang/utils/clangdiag.py
index 4de8c570df7f5..8c47b1cbcfff3 100755
--- a/clang/utils/clangdiag.py
+++ b/clang/utils/clangdiag.py
@@ -1,13 +1,13 @@
 #!/usr/bin/env python
 
-#----------------------------------------------------------------------
+# ----------------------------------------------------------------------
 # Be sure to add the python path that points to the LLDB shared library.
 #
 # # To use this in the embedded python interpreter using "lldb" just
 # import it with the full path using the "command script import"
 # command
 #   (lldb) command script import /path/to/clandiag.py
-#----------------------------------------------------------------------
+# ----------------------------------------------------------------------
 
 from __future__ import absolute_import, division, print_function
 import lldb
@@ -17,9 +17,10 @@
 import re
 import subprocess
 
+
 class MyParser(argparse.ArgumentParser):
     def format_help(self):
-        return '''     Commands for managing clang diagnostic breakpoints
+        return """     Commands for managing clang diagnostic breakpoints
 
 Syntax: clangdiag enable [<warning>|<diag-name>]
         clangdiag disable
@@ -57,65 +58,68 @@ def format_help(self):
 - Adding this to your ~.lldbinit file makes clangdiag available at startup:
   "command script import /path/to/clangdiag.py"
 
-'''
+"""
+
 
 def create_diag_options():
-    parser = MyParser(prog='clangdiag')
+    parser = MyParser(prog="clangdiag")
     subparsers = parser.add_subparsers(
-        title='subcommands',
-        dest='subcommands',
-        metavar='')
-    disable_parser = subparsers.add_parser('disable')
-    enable_parser = subparsers.add_parser('enable')
-    enable_parser.add_argument('id', nargs='?')
-    diagtool_parser = subparsers.add_parser('diagtool')
-    diagtool_parser.add_argument('path', nargs='?')
+        title="subcommands", dest="subcommands", metavar=""
+    )
+    disable_parser = subparsers.add_parser("disable")
+    enable_parser = subparsers.add_parser("enable")
+    enable_parser.add_argument("id", nargs="?")
+    diagtool_parser = subparsers.add_parser("diagtool")
+    diagtool_parser.add_argument("path", nargs="?")
     return parser
 
-def getDiagtool(target, diagtool = None):
+
+def getDiagtool(target, diagtool=None):
     id = target.GetProcess().GetProcessID()
-    if 'diagtool' not in getDiagtool.__dict__:
+    if "diagtool" not in getDiagtool.__dict__:
         getDiagtool.diagtool = {}
     if diagtool:
-        if diagtool == 'reset':
+        if diagtool == "reset":
             getDiagtool.diagtool[id] = None
         elif os.path.exists(diagtool):
             getDiagtool.diagtool[id] = diagtool
         else:
-            print('clangdiag: %s not found.' % diagtool)
+            print("clangdiag: %s not found." % diagtool)
     if not id in getDiagtool.diagtool or not getDiagtool.diagtool[id]:
         getDiagtool.diagtool[id] = None
         exe = target.GetExecutable()
         if not exe.Exists():
-            print('clangdiag: Target (%s) not set.' % exe.GetFilename())
+            print("clangdiag: Target (%s) not set." % exe.GetFilename())
         else:
-            diagtool = os.path.join(exe.GetDirectory(), 'diagtool')
+            diagtool = os.path.join(exe.GetDirectory(), "diagtool")
             if os.path.exists(diagtool):
                 getDiagtool.diagtool[id] = diagtool
             else:
-                print('clangdiag: diagtool not found along side %s' % exe)
+                print("clangdiag: diagtool not found along side %s" % exe)
 
     return getDiagtool.diagtool[id]
 
+
 def setDiagBreakpoint(frame, bp_loc, dict):
     id = frame.FindVariable("DiagID").GetValue()
     if id is None:
-        print('clangdiag: id is None')
+        print("clangdiag: id is None")
         return False
 
     # Don't need to test this time, since we did that in enable.
     target = frame.GetThread().GetProcess().GetTarget()
     diagtool = getDiagtool(target)
-    name = subprocess.check_output([diagtool, "find-diagnostic-id", id]).rstrip();
+    name = subprocess.check_output([diagtool, "find-diagnostic-id", id]).rstrip()
     # Make sure we only consider errors, warnings, and extensions.
     # FIXME: Make this configurable?
-    prefixes = ['err_', 'warn_', 'exp_']
-    if len([prefix for prefix in prefixes+[''] if name.startswith(prefix)][0]):
+    prefixes = ["err_", "warn_", "exp_"]
+    if len([prefix for prefix in prefixes + [""] if name.startswith(prefix)][0]):
         bp = target.BreakpointCreateBySourceRegex(name, lldb.SBFileSpec())
         bp.AddName("clang::Diagnostic")
 
     return False
 
+
 def enable(exe_ctx, args):
     # Always disable existing breakpoints
     disable(exe_ctx)
@@ -126,30 +130,33 @@ def enable(exe_ctx, args):
     if args.id:
         # Make sure we only consider errors, warnings, and extensions.
         # FIXME: Make this configurable?
-        prefixes = ['err_', 'warn_', 'exp_']
-        if len([prefix for prefix in prefixes+[''] if args.id.startswith(prefix)][0]):
+        prefixes = ["err_", "warn_", "exp_"]
+        if len([prefix for prefix in prefixes + [""] if args.id.startswith(prefix)][0]):
             bp = target.BreakpointCreateBySourceRegex(args.id, lldb.SBFileSpec())
             bp.AddName("clang::Diagnostic")
         else:
             diagtool = getDiagtool(target)
-            list = subprocess.check_output([diagtool, "list-warnings"]).rstrip();
+            list = subprocess.check_output([diagtool, "list-warnings"]).rstrip()
             for line in list.splitlines(True):
-                m = re.search(r' *(.*) .*\[\-W' + re.escape(args.id) + r'.*].*', line)
+                m = re.search(r" *(.*) .*\[\-W" + re.escape(args.id) + r".*].*", line)
                 # Make sure we only consider warnings.
-                if m and m.group(1).startswith('warn_'):
-                    bp = target.BreakpointCreateBySourceRegex(m.group(1), lldb.SBFileSpec())
+                if m and m.group(1).startswith("warn_"):
+                    bp = target.BreakpointCreateBySourceRegex(
+                        m.group(1), lldb.SBFileSpec()
+                    )
                     bp.AddName("clang::Diagnostic")
     else:
-        print('Adding callbacks.')
-        bp = target.BreakpointCreateByName('DiagnosticsEngine::Report')
-        bp.SetScriptCallbackFunction('clangdiag.setDiagBreakpoint')
+        print("Adding callbacks.")
+        bp = target.BreakpointCreateByName("DiagnosticsEngine::Report")
+        bp.SetScriptCallbackFunction("clangdiag.setDiagBreakpoint")
         bp.AddName("clang::Diagnostic")
 
     count = target.GetNumBreakpoints() - numOfBreakpoints
-    print('%i breakpoint%s added.' % (count, "s"[count==1:]))
+    print("%i breakpoint%s added." % (count, "s"[count == 1 :]))
 
     return
 
+
 def disable(exe_ctx):
     target = exe_ctx.GetTarget()
     # Remove all diag breakpoints.
@@ -160,6 +167,7 @@ def disable(exe_ctx):
 
     return
 
+
 def the_diag_command(debugger, command, exe_ctx, result, dict):
     # Use the Shell Lexer to properly parse up command options just like a
     # shell would
@@ -170,16 +178,17 @@ def the_diag_command(debugger, command, exe_ctx, result, dict):
     except:
         return
 
-    if args.subcommands == 'enable':
+    if args.subcommands == "enable":
         enable(exe_ctx, args)
-    elif args.subcommands == 'disable':
+    elif args.subcommands == "disable":
         disable(exe_ctx)
     else:
         diagtool = getDiagtool(exe_ctx.GetTarget(), args.path)
-        print('diagtool = %s' % diagtool)
+        print("diagtool = %s" % diagtool)
 
     return
 
+
 def __lldb_init_module(debugger, dict):
     # This initializer is being run from LLDB in the embedded command interpreter
     # Make the options so we can generate the help text for the new LLDB
@@ -187,6 +196,7 @@ def __lldb_init_module(debugger, dict):
     parser = create_diag_options()
     the_diag_command.__doc__ = parser.format_help()
     # Add any commands contained in this module to LLDB
-    debugger.HandleCommand(
-        'command script add -f clangdiag.the_diag_command clangdiag')
-    print('The "clangdiag" command has been installed, type "help clangdiag" or "clangdiag --help" for detailed help.')
+    debugger.HandleCommand("command script add -f clangdiag.the_diag_command clangdiag")
+    print(
+        'The "clangdiag" command has been installed, type "help clangdiag" or "clangdiag --help" for detailed help.'
+    )

diff  --git a/clang/utils/convert_arm_neon.py b/clang/utils/convert_arm_neon.py
index bb3516b07b577..836250c80452f 100644
--- a/clang/utils/convert_arm_neon.py
+++ b/clang/utils/convert_arm_neon.py
@@ -13,61 +13,63 @@
 # conflicts manually.
 
 import re, sys
+
 MOD_MAP = {
-    'v': 'v',
-    'x': 'S',
-    'u': 'U',
-    'd': '.',
-    'g': 'q',
-    'j': 'Q',
-    'w': '>Q',
-    'n': '>',
-    'h': '<',
-    'q': '<Q',
-    'e': '<U',
-    'm': '<q',
-    'i': 'I',
-    'l': 'IU>',
-    's': '1',
-    'z': '1<',
-    'r': '1>',
-    'b': '1U',
-    '$': '1S',
-    'k': 'Q',
-    '2': '2',
-    '3': '3',
-    '4': '4',
-    'B': '2Q',
-    'C': '3Q',
-    'D': '4Q',
-    'p': '*',
-    'c': 'c*',
-    '7': '<<q',
-    '8': '<<',
-    '9': '<<Q',
-    't': 'p'
-    }
+    "v": "v",
+    "x": "S",
+    "u": "U",
+    "d": ".",
+    "g": "q",
+    "j": "Q",
+    "w": ">Q",
+    "n": ">",
+    "h": "<",
+    "q": "<Q",
+    "e": "<U",
+    "m": "<q",
+    "i": "I",
+    "l": "IU>",
+    "s": "1",
+    "z": "1<",
+    "r": "1>",
+    "b": "1U",
+    "$": "1S",
+    "k": "Q",
+    "2": "2",
+    "3": "3",
+    "4": "4",
+    "B": "2Q",
+    "C": "3Q",
+    "D": "4Q",
+    "p": "*",
+    "c": "c*",
+    "7": "<<q",
+    "8": "<<",
+    "9": "<<Q",
+    "t": "p",
+}
 
 
 def typespec_elt_size(typespec):
-    if 'c' in typespec:
+    if "c" in typespec:
         return 8
-    elif 's' in typespec or 'h' in typespec:
+    elif "s" in typespec or "h" in typespec:
         return 16
-    elif 'i' in typespec or 'f' in typespec:
+    elif "i" in typespec or "f" in typespec:
         return 32
-    elif 'l' in typespec or 'd' in typespec:
+    elif "l" in typespec or "d" in typespec:
         return 64
-    elif 'k' in typespec:
+    elif "k" in typespec:
         return 128
 
+
 def get_resize(cur, desired):
-    res = ''
+    res = ""
     while cur < desired:
-        res += '>'
+        res += ">"
         cur *= 2
     while cur > desired:
-        res += '<'
+        res += "<"
         cur /= 2
     return res
 
@@ -76,7 +78,12 @@ def remap_protocol(proto, typespec, name):
     key_type = 0
 
     # Conversions like to see the integer type so they know signedness.
-    if 'vcvt' in name and '_f' in name and name != 'vcvt_f32_f64' and name != 'vcvt_f64_f32':
+    if (
+        "vcvt" in name
+        and "_f" in name
+        and name != "vcvt_f32_f64"
+        and name != "vcvt_f64_f32"
+    ):
         key_type = 1
     default_width = typespec_elt_size(typespec)
     inconsistent_width = False
@@ -85,87 +92,91 @@ def remap_protocol(proto, typespec, name):
         if new_width and new_width != default_width:
             inconsistent_width = True
 
-    res = ''
+    res = ""
     for i, c in enumerate(proto):
         # void and pointers make for bad discriminators in CGBuiltin.cpp.
-        if c in 'vcp':
-                key_type += 1
+        if c in "vcp":
+            key_type += 1
 
         if c in MOD_MAP:
             cur_mod = MOD_MAP[c]
         elif inconsistent_width:
             # Otherwise it's a fixed output width modifier.
-            sys.stderr.write(f'warning: {name} uses fixed output size but has inconsistent input widths: {proto} {typespec}\n')
+            sys.stderr.write(
+                f"warning: {name} uses fixed output size but has inconsistent input widths: {proto} {typespec}\n"
+            )
 
-        if c == 'Y':
+        if c == "Y":
             # y: scalar of half float
             resize = get_resize(default_width, 16)
-            cur_mod = f'1F{resize}'
-        elif c == 'y':
+            cur_mod = f"1F{resize}"
+        elif c == "y":
             # y: scalar of float
             resize = get_resize(default_width, 32)
-            cur_mod = f'1F{resize}'
-        elif c == 'o':
+            cur_mod = f"1F{resize}"
+        elif c == "o":
             # o: scalar of double
             resize = get_resize(default_width, 64)
-            cur_mod = f'1F{resize}'
-        elif c == 'I':
+            cur_mod = f"1F{resize}"
+        elif c == "I":
             # I: scalar of 32-bit signed
             resize = get_resize(default_width, 32)
-            cur_mod = f'1S{resize}'
-        elif c == 'L':
+            cur_mod = f"1S{resize}"
+        elif c == "L":
             # L: scalar of 64-bit signed
             resize = get_resize(default_width, 64)
-            cur_mod = f'1S{resize}'
-        elif c == 'U':
+            cur_mod = f"1S{resize}"
+        elif c == "U":
             # I: scalar of 32-bit unsigned
             resize = get_resize(default_width, 32)
-            cur_mod = f'1U{resize}'
-        elif c == 'O':
+            cur_mod = f"1U{resize}"
+        elif c == "O":
             # O: scalar of 64-bit unsigned
             resize = get_resize(default_width, 64)
-            cur_mod = f'1U{resize}'
-        elif c == 'f':
+            cur_mod = f"1U{resize}"
+        elif c == "f":
             # f: float (int args)
             resize = get_resize(default_width, 32)
-            cur_mod = f'F{resize}'
-        elif c == 'F':
+            cur_mod = f"F{resize}"
+        elif c == "F":
             # F: double (int args)
             resize = get_resize(default_width, 64)
-            cur_mod = f'F{resize}'
-        elif c == 'H':
+            cur_mod = f"F{resize}"
+        elif c == "H":
             # H: half (int args)
             resize = get_resize(default_width, 16)
-            cur_mod = f'F{resize}'
-        elif c == '0':
+            cur_mod = f"F{resize}"
+        elif c == "0":
             # 0: half (int args), ignore 'Q' size modifier.
             resize = get_resize(default_width, 16)
-            cur_mod = f'Fq{resize}'
-        elif c == '1':
+            cur_mod = f"Fq{resize}"
+        elif c == "1":
             # 1: half (int args), force 'Q' size modifier.
             resize = get_resize(default_width, 16)
-            cur_mod = f'FQ{resize}'
+            cur_mod = f"FQ{resize}"
 
         if len(cur_mod) == 0:
-            raise Exception(f'WTF: {c} in {name}')
+            raise Exception(f"WTF: {c} in {name}")
 
         if key_type != 0 and key_type == i:
-            cur_mod += '!'
+            cur_mod += "!"
 
         if len(cur_mod) == 1:
             res += cur_mod
         else:
-            res += '(' + cur_mod + ')'
+            res += "(" + cur_mod + ")"
 
     return res
 
+
 def replace_insts(m):
-    start, end = m.span('proto')
+    start, end = m.span("proto")
     start -= m.start()
     end -= m.start()
-    new_proto = remap_protocol(m['proto'], m['kinds'], m['name'])
+    new_proto = remap_protocol(m["proto"], m["kinds"], m["name"])
     return m.group()[:start] + new_proto + m.group()[end:]
 
+
 INST = re.compile(r'Inst<"(?P<name>.*?)",\s*"(?P<proto>.*?)",\s*"(?P<kinds>.*?)"')
 
 new_td = INST.sub(replace_insts, sys.stdin.read())

diff  --git a/clang/utils/creduce-clang-crash.py b/clang/utils/creduce-clang-crash.py
index fa3bd470ef725..27361bb885058 100755
--- a/clang/utils/creduce-clang-crash.py
+++ b/clang/utils/creduce-clang-crash.py
@@ -25,399 +25,459 @@
 creduce_cmd = None
 clang_cmd = None
 
+
 def verbose_print(*args, **kwargs):
-  if verbose:
-    print(*args, **kwargs)
+    if verbose:
+        print(*args, **kwargs)
+
 
 def check_file(fname):
-  fname = os.path.normpath(fname)
-  if not os.path.isfile(fname):
-    sys.exit("ERROR: %s does not exist" % (fname))
-  return fname
+    fname = os.path.normpath(fname)
+    if not os.path.isfile(fname):
+        sys.exit("ERROR: %s does not exist" % (fname))
+    return fname
+
 
 def check_cmd(cmd_name, cmd_dir, cmd_path=None):
-  """
-  Returns absolute path to cmd_path if it is given,
-  or absolute path to cmd_dir/cmd_name.
-  """
-  if cmd_path:
-    # Make the path absolute so the creduce test can be run from any directory.
-    cmd_path = os.path.abspath(cmd_path)
-    cmd = shutil.which(cmd_path)
+    """
+    Returns absolute path to cmd_path if it is given,
+    or absolute path to cmd_dir/cmd_name.
+    """
+    if cmd_path:
+        # Make the path absolute so the creduce test can be run from any directory.
+        cmd_path = os.path.abspath(cmd_path)
+        cmd = shutil.which(cmd_path)
+        if cmd:
+            return cmd
+        sys.exit("ERROR: executable `%s` not found" % (cmd_path))
+
+    cmd = shutil.which(cmd_name, path=cmd_dir)
     if cmd:
-      return cmd
-    sys.exit("ERROR: executable `%s` not found" % (cmd_path))
+        return cmd
 
-  cmd = shutil.which(cmd_name, path=cmd_dir)
-  if cmd:
-    return cmd
+    if not cmd_dir:
+        cmd_dir = "$PATH"
+    sys.exit("ERROR: `%s` not found in %s" % (cmd_name, cmd_dir))
 
-  if not cmd_dir:
-    cmd_dir = "$PATH"
-  sys.exit("ERROR: `%s` not found in %s" % (cmd_name, cmd_dir))
 
 def quote_cmd(cmd):
-  return ' '.join(pipes.quote(arg) for arg in cmd)
+    return " ".join(pipes.quote(arg) for arg in cmd)
+
 
 def write_to_script(text, filename):
-  with open(filename, 'w') as f:
-    f.write(text)
-  os.chmod(filename, os.stat(filename).st_mode | stat.S_IEXEC)
+    with open(filename, "w") as f:
+        f.write(text)
+    os.chmod(filename, os.stat(filename).st_mode | stat.S_IEXEC)
+
 
 class Reduce(object):
-  def __init__(self, crash_script, file_to_reduce, core_number):
-    crash_script_name, crash_script_ext = os.path.splitext(crash_script)
-    file_reduce_name, file_reduce_ext = os.path.splitext(file_to_reduce)
-
-    self.testfile = file_reduce_name + '.test.sh'
-    self.crash_script = crash_script_name + '.reduced' + crash_script_ext
-    self.file_to_reduce = file_reduce_name + '.reduced' + file_reduce_ext
-    shutil.copy(file_to_reduce, self.file_to_reduce)
-
-    self.clang = clang_cmd
-    self.clang_args = []
-    self.expected_output = []
-    self.needs_stack_trace = False
-    self.creduce_flags = ["--tidy"]
-    self.creduce_flags = ["--n", str(core_number)]
-
-    self.read_clang_args(crash_script, file_to_reduce)
-    self.read_expected_output()
-
-  def get_crash_cmd(self, cmd=None, args=None, filename=None):
-    if not cmd:
-      cmd = self.clang
-    if not args:
-      args = self.clang_args
-    if not filename:
-      filename = self.file_to_reduce
-
-    return [cmd] + args + [filename]
-
-  def read_clang_args(self, crash_script, filename):
-    print("\nReading arguments from crash script...")
-    with open(crash_script) as f:
-      # Assume clang call is the first non comment line.
-      cmd = []
-      for line in f:
-        if not line.lstrip().startswith('#'):
-          cmd = shlex.split(line)
-          break
-    if not cmd:
-      sys.exit("Could not find command in the crash script.");
-
-    # Remove clang and filename from the command
-    # Assume the last occurrence of the filename is the clang input file
-    del cmd[0]
-    for i in range(len(cmd)-1, -1, -1):
-      if cmd[i] == filename:
-        del cmd[i]
-        break
-    self.clang_args = cmd
-    verbose_print("Clang arguments:", quote_cmd(self.clang_args))
-
-  def read_expected_output(self):
-    print("\nGetting expected crash output...")
-    p = subprocess.Popen(self.get_crash_cmd(),
-                         stdout=subprocess.PIPE,
-                         stderr=subprocess.STDOUT)
-    crash_output, _ = p.communicate()
-    result = []
-
-    # Remove color codes
-    ansi_escape = r'\x1b\[[0-?]*m'
-    crash_output = re.sub(ansi_escape, '', crash_output.decode('utf-8'))
-
-    # Look for specific error messages
-    regexes = [r"Assertion .+ failed", # Linux assert()
-               r"Assertion failed: .+,", # FreeBSD/Mac assert()
-               r"fatal error: error in backend: .+",
-               r"LLVM ERROR: .+",
-               r"UNREACHABLE executed at .+?!",
-               r"LLVM IR generation of declaration '.+'",
-               r"Generating code for declaration '.+'",
-               r"\*\*\* Bad machine code: .+ \*\*\*",
-               r"ERROR: .*Sanitizer: [^ ]+ "]
-    for msg_re in regexes:
-      match = re.search(msg_re, crash_output)
-      if match:
-        msg = match.group(0)
-        result = [msg]
-        print("Found message:", msg)
-        break
-
-    # If no message was found, use the top five stack trace functions,
-    # ignoring some common functions
-    # Five is a somewhat arbitrary number; the goal is to get a small number
-    # of identifying functions with some leeway for common functions
-    if not result:
-      self.needs_stack_trace = True
-      stacktrace_re = r'[0-9]+\s+0[xX][0-9a-fA-F]+\s*([^(]+)\('
-      filters = ["PrintStackTrace", "RunSignalHandlers", "CleanupOnSignal",
-                 "HandleCrash", "SignalHandler", "__restore_rt", "gsignal", "abort"]
-      def skip_function(func_name):
-        return any(name in func_name for name in filters)
-
-      matches = re.findall(stacktrace_re, crash_output)
-      result = [x for x in matches if x and not skip_function(x)][:5]
-      for msg in result:
-        print("Found stack trace function:", msg)
-
-    if not result:
-      print("ERROR: no crash was found")
-      print("The crash output was:\n========\n%s========" % crash_output)
-      sys.exit(1)
-
-    self.expected_output = result
-
-  def check_expected_output(self, args=None, filename=None):
-    if not args:
-      args = self.clang_args
-    if not filename:
-      filename = self.file_to_reduce
-
-    p = subprocess.Popen(self.get_crash_cmd(args=args, filename=filename),
-                         stdout=subprocess.PIPE,
-                         stderr=subprocess.STDOUT)
-    crash_output, _ = p.communicate()
-    return all(msg in crash_output.decode('utf-8') for msg in
-               self.expected_output)
-
-  def write_interestingness_test(self):
-    print("\nCreating the interestingness test...")
-
-    # Disable symbolization if it's not required to avoid slow symbolization.
-    disable_symbolization = ''
-    if not self.needs_stack_trace:
-      disable_symbolization = 'export LLVM_DISABLE_SYMBOLIZATION=1'
-
-    output = """#!/bin/bash
+    def __init__(self, crash_script, file_to_reduce, core_number):
+        crash_script_name, crash_script_ext = os.path.splitext(crash_script)
+        file_reduce_name, file_reduce_ext = os.path.splitext(file_to_reduce)
+
+        self.testfile = file_reduce_name + ".test.sh"
+        self.crash_script = crash_script_name + ".reduced" + crash_script_ext
+        self.file_to_reduce = file_reduce_name + ".reduced" + file_reduce_ext
+        shutil.copy(file_to_reduce, self.file_to_reduce)
+
+        self.clang = clang_cmd
+        self.clang_args = []
+        self.expected_output = []
+        self.needs_stack_trace = False
+        self.creduce_flags = ["--tidy"]
+        self.creduce_flags = ["--n", str(core_number)]
+
+        self.read_clang_args(crash_script, file_to_reduce)
+        self.read_expected_output()
+
+    def get_crash_cmd(self, cmd=None, args=None, filename=None):
+        if not cmd:
+            cmd = self.clang
+        if not args:
+            args = self.clang_args
+        if not filename:
+            filename = self.file_to_reduce
+
+        return [cmd] + args + [filename]
+
+    def read_clang_args(self, crash_script, filename):
+        print("\nReading arguments from crash script...")
+        with open(crash_script) as f:
+            # Assume clang call is the first non comment line.
+            cmd = []
+            for line in f:
+                if not line.lstrip().startswith("#"):
+                    cmd = shlex.split(line)
+                    break
+        if not cmd:
+            sys.exit("Could not find command in the crash script.")
+
+        # Remove clang and filename from the command
+        # Assume the last occurrence of the filename is the clang input file
+        del cmd[0]
+        for i in range(len(cmd) - 1, -1, -1):
+            if cmd[i] == filename:
+                del cmd[i]
+                break
+        self.clang_args = cmd
+        verbose_print("Clang arguments:", quote_cmd(self.clang_args))
+
+    def read_expected_output(self):
+        print("\nGetting expected crash output...")
+        p = subprocess.Popen(
+            self.get_crash_cmd(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT
+        )
+        crash_output, _ = p.communicate()
+        result = []
+
+        # Remove color codes
+        ansi_escape = r"\x1b\[[0-?]*m"
+        crash_output = re.sub(ansi_escape, "", crash_output.decode("utf-8"))
+
+        # Look for specific error messages
+        regexes = [
+            r"Assertion .+ failed",  # Linux assert()
+            r"Assertion failed: .+,",  # FreeBSD/Mac assert()
+            r"fatal error: error in backend: .+",
+            r"LLVM ERROR: .+",
+            r"UNREACHABLE executed at .+?!",
+            r"LLVM IR generation of declaration '.+'",
+            r"Generating code for declaration '.+'",
+            r"\*\*\* Bad machine code: .+ \*\*\*",
+            r"ERROR: .*Sanitizer: [^ ]+ ",
+        ]
+        for msg_re in regexes:
+            match = re.search(msg_re, crash_output)
+            if match:
+                msg = match.group(0)
+                result = [msg]
+                print("Found message:", msg)
+                break
+
+        # If no message was found, use the top five stack trace functions,
+        # ignoring some common functions
+        # Five is a somewhat arbitrary number; the goal is to get a small number
+        # of identifying functions with some leeway for common functions
+        if not result:
+            self.needs_stack_trace = True
+            stacktrace_re = r"[0-9]+\s+0[xX][0-9a-fA-F]+\s*([^(]+)\("
+            filters = [
+                "PrintStackTrace",
+                "RunSignalHandlers",
+                "CleanupOnSignal",
+                "HandleCrash",
+                "SignalHandler",
+                "__restore_rt",
+                "gsignal",
+                "abort",
+            ]
+
+            def skip_function(func_name):
+                return any(name in func_name for name in filters)
+
+            matches = re.findall(stacktrace_re, crash_output)
+            result = [x for x in matches if x and not skip_function(x)][:5]
+            for msg in result:
+                print("Found stack trace function:", msg)
+
+        if not result:
+            print("ERROR: no crash was found")
+            print("The crash output was:\n========\n%s========" % crash_output)
+            sys.exit(1)
+
+        self.expected_output = result
+
+    def check_expected_output(self, args=None, filename=None):
+        if not args:
+            args = self.clang_args
+        if not filename:
+            filename = self.file_to_reduce
+
+        p = subprocess.Popen(
+            self.get_crash_cmd(args=args, filename=filename),
+            stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT,
+        )
+        crash_output, _ = p.communicate()
+        return all(msg in crash_output.decode("utf-8") for msg in self.expected_output)
+
+    def write_interestingness_test(self):
+        print("\nCreating the interestingness test...")
+
+        # Disable symbolization if it's not required to avoid slow symbolization.
+        disable_symbolization = ""
+        if not self.needs_stack_trace:
+            disable_symbolization = "export LLVM_DISABLE_SYMBOLIZATION=1"
+
+        output = """#!/bin/bash
 %s
 if %s >& t.log ; then
   exit 1
 fi
-""" % (disable_symbolization, quote_cmd(self.get_crash_cmd()))
-
-    for msg in self.expected_output:
-      output += 'grep -F %s t.log || exit 1\n' % pipes.quote(msg)
-
-    write_to_script(output, self.testfile)
-    self.check_interestingness()
-
-  def check_interestingness(self):
-    testfile = os.path.abspath(self.testfile)
-
-    # Check that the test considers the original file interesting
-    with open(os.devnull, 'w') as devnull:
-      returncode = subprocess.call(testfile, stdout=devnull)
-    if returncode:
-      sys.exit("The interestingness test does not pass for the original file.")
-
-    # Check that an empty file is not interesting
-    # Instead of modifying the filename in the test file, just run the command
-    with tempfile.NamedTemporaryFile() as empty_file:
-      is_interesting = self.check_expected_output(filename=empty_file.name)
-    if is_interesting:
-      sys.exit("The interestingness test passes for an empty file.")
-
-  def clang_preprocess(self):
-    print("\nTrying to preprocess the source file...")
-    with tempfile.NamedTemporaryFile() as tmpfile:
-      cmd_preprocess = self.get_crash_cmd() + ['-E', '-o', tmpfile.name]
-      cmd_preprocess_no_lines = cmd_preprocess + ['-P']
-      try:
-        subprocess.check_call(cmd_preprocess_no_lines)
-        if self.check_expected_output(filename=tmpfile.name):
-          print("Successfully preprocessed with line markers removed")
-          shutil.copy(tmpfile.name, self.file_to_reduce)
-        else:
-          subprocess.check_call(cmd_preprocess)
-          if self.check_expected_output(filename=tmpfile.name):
-            print("Successfully preprocessed without removing line markers")
-            shutil.copy(tmpfile.name, self.file_to_reduce)
-          else:
-            print("No longer crashes after preprocessing -- "
-                  "using original source")
-      except subprocess.CalledProcessError:
-        print("Preprocessing failed")
-
-  @staticmethod
-  def filter_args(args, opts_equal=[], opts_startswith=[],
-                  opts_one_arg_startswith=[]):
-    result = []
-    skip_next = False
-    for arg in args:
-      if skip_next:
+""" % (
+            disable_symbolization,
+            quote_cmd(self.get_crash_cmd()),
+        )
+
+        for msg in self.expected_output:
+            output += "grep -F %s t.log || exit 1\n" % pipes.quote(msg)
+
+        write_to_script(output, self.testfile)
+        self.check_interestingness()
+
+    def check_interestingness(self):
+        testfile = os.path.abspath(self.testfile)
+
+        # Check that the test considers the original file interesting
+        with open(os.devnull, "w") as devnull:
+            returncode = subprocess.call(testfile, stdout=devnull)
+        if returncode:
+            sys.exit("The interestingness test does not pass for the original file.")
+
+        # Check that an empty file is not interesting
+        # Instead of modifying the filename in the test file, just run the command
+        with tempfile.NamedTemporaryFile() as empty_file:
+            is_interesting = self.check_expected_output(filename=empty_file.name)
+        if is_interesting:
+            sys.exit("The interestingness test passes for an empty file.")
+
+    def clang_preprocess(self):
+        print("\nTrying to preprocess the source file...")
+        with tempfile.NamedTemporaryFile() as tmpfile:
+            cmd_preprocess = self.get_crash_cmd() + ["-E", "-o", tmpfile.name]
+            cmd_preprocess_no_lines = cmd_preprocess + ["-P"]
+            try:
+                subprocess.check_call(cmd_preprocess_no_lines)
+                if self.check_expected_output(filename=tmpfile.name):
+                    print("Successfully preprocessed with line markers removed")
+                    shutil.copy(tmpfile.name, self.file_to_reduce)
+                else:
+                    subprocess.check_call(cmd_preprocess)
+                    if self.check_expected_output(filename=tmpfile.name):
+                        print("Successfully preprocessed without removing line markers")
+                        shutil.copy(tmpfile.name, self.file_to_reduce)
+                    else:
+                        print(
+                            "No longer crashes after preprocessing -- "
+                            "using original source"
+                        )
+            except subprocess.CalledProcessError:
+                print("Preprocessing failed")
+
+    @staticmethod
+    def filter_args(
+        args, opts_equal=[], opts_startswith=[], opts_one_arg_startswith=[]
+    ):
+        result = []
         skip_next = False
-        continue
-      if any(arg == a for a in opts_equal):
-        continue
-      if any(arg.startswith(a) for a in opts_startswith):
-        continue
-      if any(arg.startswith(a) for a in opts_one_arg_startswith):
-        skip_next = True
-        continue
-      result.append(arg)
-    return result
-
-  def try_remove_args(self, args, msg=None, extra_arg=None, **kwargs):
-    new_args = self.filter_args(args, **kwargs)
-
-    if extra_arg:
-      if extra_arg in new_args:
-        new_args.remove(extra_arg)
-      new_args.append(extra_arg)
-
-    if (new_args != args and
-        self.check_expected_output(args=new_args)):
-      if msg:
-        verbose_print(msg)
-      return new_args
-    return args
-
-  def try_remove_arg_by_index(self, args, index):
-    new_args = args[:index] + args[index+1:]
-    removed_arg = args[index]
-
-    # Heuristic for grouping arguments:
-    # remove next argument if it doesn't start with "-"
-    if index < len(new_args) and not new_args[index].startswith('-'):
-      del new_args[index]
-      removed_arg += ' ' + args[index+1]
-
-    if self.check_expected_output(args=new_args):
-      verbose_print("Removed", removed_arg)
-      return new_args, index
-    return args, index+1
-
-  def simplify_clang_args(self):
-    """Simplify clang arguments before running C-Reduce to reduce the time the
-    interestingness test takes to run.
-    """
-    print("\nSimplifying the clang command...")
-
-    # Remove some clang arguments to speed up the interestingness test
-    new_args = self.clang_args
-    new_args = self.try_remove_args(new_args,
-                                    msg="Removed debug info options",
-                                    opts_startswith=["-gcodeview",
-                                                     "-debug-info-kind=",
-                                                     "-debugger-tuning="])
-
-    new_args = self.try_remove_args(new_args,
-                                    msg="Removed --show-includes",
-                                    opts_startswith=["--show-includes"])
-    # Not suppressing warnings (-w) sometimes prevents the crash from occurring
-    # after preprocessing
-    new_args = self.try_remove_args(new_args,
-                                    msg="Replaced -W options with -w",
-                                    extra_arg='-w',
-                                    opts_startswith=["-W"])
-    new_args = self.try_remove_args(new_args,
-                                    msg="Replaced optimization level with -O0",
-                                    extra_arg="-O0",
-                                    opts_startswith=["-O"])
-
-    # Try to remove compilation steps
-    new_args = self.try_remove_args(new_args, msg="Added -emit-llvm",
-                                    extra_arg="-emit-llvm")
-    new_args = self.try_remove_args(new_args, msg="Added -fsyntax-only",
-                                    extra_arg="-fsyntax-only")
-
-    # Try to make implicit int an error for more sensible test output
-    new_args = self.try_remove_args(new_args, msg="Added -Werror=implicit-int",
-                                    opts_equal=["-w"],
-                                    extra_arg="-Werror=implicit-int")
-
-    self.clang_args = new_args
-    verbose_print("Simplified command:", quote_cmd(self.get_crash_cmd()))
-
-  def reduce_clang_args(self):
-    """Minimize the clang arguments after running C-Reduce, to get the smallest
-    command that reproduces the crash on the reduced file.
-    """
-    print("\nReducing the clang crash command...")
-
-    new_args = self.clang_args
-
-    # Remove some often occurring args
-    new_args = self.try_remove_args(new_args, msg="Removed -D options",
-                                    opts_startswith=["-D"])
-    new_args = self.try_remove_args(new_args, msg="Removed -D options",
-                                    opts_one_arg_startswith=["-D"])
-    new_args = self.try_remove_args(new_args, msg="Removed -I options",
-                                    opts_startswith=["-I"])
-    new_args = self.try_remove_args(new_args, msg="Removed -I options",
-                                    opts_one_arg_startswith=["-I"])
-    new_args = self.try_remove_args(new_args, msg="Removed -W options",
-                                    opts_startswith=["-W"])
-
-    # Remove other cases that aren't covered by the heuristic
-    new_args = self.try_remove_args(new_args, msg="Removed -mllvm",
-                                    opts_one_arg_startswith=["-mllvm"])
-
-    i = 0
-    while i < len(new_args):
-      new_args, i = self.try_remove_arg_by_index(new_args, i)
-
-    self.clang_args = new_args
-
-    reduced_cmd = quote_cmd(self.get_crash_cmd())
-    write_to_script(reduced_cmd, self.crash_script)
-    print("Reduced command:", reduced_cmd)
-
-  def run_creduce(self):
-    print("\nRunning C-Reduce...")
-    try:
-      p = subprocess.Popen([creduce_cmd] + self.creduce_flags +
-                           [self.testfile, self.file_to_reduce])
-      p.communicate()
-    except KeyboardInterrupt:
-      # Hack to kill C-Reduce because it jumps into its own pgid
-      print('\n\nctrl-c detected, killed creduce')
-      p.kill()
+        for arg in args:
+            if skip_next:
+                skip_next = False
+                continue
+            if any(arg == a for a in opts_equal):
+                continue
+            if any(arg.startswith(a) for a in opts_startswith):
+                continue
+            if any(arg.startswith(a) for a in opts_one_arg_startswith):
+                skip_next = True
+                continue
+            result.append(arg)
+        return result
+
+    def try_remove_args(self, args, msg=None, extra_arg=None, **kwargs):
+        new_args = self.filter_args(args, **kwargs)
+
+        if extra_arg:
+            if extra_arg in new_args:
+                new_args.remove(extra_arg)
+            new_args.append(extra_arg)
+
+        if new_args != args and self.check_expected_output(args=new_args):
+            if msg:
+                verbose_print(msg)
+            return new_args
+        return args
+
+    def try_remove_arg_by_index(self, args, index):
+        new_args = args[:index] + args[index + 1 :]
+        removed_arg = args[index]
+
+        # Heuristic for grouping arguments:
+        # remove next argument if it doesn't start with "-"
+        if index < len(new_args) and not new_args[index].startswith("-"):
+            del new_args[index]
+            removed_arg += " " + args[index + 1]
+
+        if self.check_expected_output(args=new_args):
+            verbose_print("Removed", removed_arg)
+            return new_args, index
+        return args, index + 1
+
+    def simplify_clang_args(self):
+        """Simplify clang arguments before running C-Reduce to reduce the time the
+        interestingness test takes to run.
+        """
+        print("\nSimplifying the clang command...")
+
+        # Remove some clang arguments to speed up the interestingness test
+        new_args = self.clang_args
+        new_args = self.try_remove_args(
+            new_args,
+            msg="Removed debug info options",
+            opts_startswith=["-gcodeview", "-debug-info-kind=", "-debugger-tuning="],
+        )
+
+        new_args = self.try_remove_args(
+            new_args, msg="Removed --show-includes", opts_startswith=["--show-includes"]
+        )
+        # Not suppressing warnings (-w) sometimes prevents the crash from occurring
+        # after preprocessing
+        new_args = self.try_remove_args(
+            new_args,
+            msg="Replaced -W options with -w",
+            extra_arg="-w",
+            opts_startswith=["-W"],
+        )
+        new_args = self.try_remove_args(
+            new_args,
+            msg="Replaced optimization level with -O0",
+            extra_arg="-O0",
+            opts_startswith=["-O"],
+        )
+
+        # Try to remove compilation steps
+        new_args = self.try_remove_args(
+            new_args, msg="Added -emit-llvm", extra_arg="-emit-llvm"
+        )
+        new_args = self.try_remove_args(
+            new_args, msg="Added -fsyntax-only", extra_arg="-fsyntax-only"
+        )
+
+        # Try to make implicit int an error for more sensible test output
+        new_args = self.try_remove_args(
+            new_args,
+            msg="Added -Werror=implicit-int",
+            opts_equal=["-w"],
+            extra_arg="-Werror=implicit-int",
+        )
+
+        self.clang_args = new_args
+        verbose_print("Simplified command:", quote_cmd(self.get_crash_cmd()))
+
+    def reduce_clang_args(self):
+        """Minimize the clang arguments after running C-Reduce, to get the smallest
+        command that reproduces the crash on the reduced file.
+        """
+        print("\nReducing the clang crash command...")
+
+        new_args = self.clang_args
+
+        # Remove some often occurring args
+        new_args = self.try_remove_args(
+            new_args, msg="Removed -D options", opts_startswith=["-D"]
+        )
+        new_args = self.try_remove_args(
+            new_args, msg="Removed -D options", opts_one_arg_startswith=["-D"]
+        )
+        new_args = self.try_remove_args(
+            new_args, msg="Removed -I options", opts_startswith=["-I"]
+        )
+        new_args = self.try_remove_args(
+            new_args, msg="Removed -I options", opts_one_arg_startswith=["-I"]
+        )
+        new_args = self.try_remove_args(
+            new_args, msg="Removed -W options", opts_startswith=["-W"]
+        )
+
+        # Remove other cases that aren't covered by the heuristic
+        new_args = self.try_remove_args(
+            new_args, msg="Removed -mllvm", opts_one_arg_startswith=["-mllvm"]
+        )
+
+        i = 0
+        while i < len(new_args):
+            new_args, i = self.try_remove_arg_by_index(new_args, i)
+
+        self.clang_args = new_args
+
+        reduced_cmd = quote_cmd(self.get_crash_cmd())
+        write_to_script(reduced_cmd, self.crash_script)
+        print("Reduced command:", reduced_cmd)
+
+    def run_creduce(self):
+        print("\nRunning C-Reduce...")
+        try:
+            p = subprocess.Popen(
+                [creduce_cmd]
+                + self.creduce_flags
+                + [self.testfile, self.file_to_reduce]
+            )
+            p.communicate()
+        except KeyboardInterrupt:
+            # Hack to kill C-Reduce because it jumps into its own pgid
+            print("\n\nctrl-c detected, killed creduce")
+            p.kill()
+
 
 def main():
-  global verbose
-  global creduce_cmd
-  global clang_cmd
-
-  parser = ArgumentParser(description=__doc__,
-                          formatter_class=RawTextHelpFormatter)
-  parser.add_argument('crash_script', type=str, nargs=1,
-                      help="Name of the script that generates the crash.")
-  parser.add_argument('file_to_reduce', type=str, nargs=1,
-                      help="Name of the file to be reduced.")
-  parser.add_argument('--llvm-bin', dest='llvm_bin', type=str,
-                      help="Path to the LLVM bin directory.")
-  parser.add_argument('--clang', dest='clang', type=str,
-                      help="The path to the `clang` executable. "
-                      "By default uses the llvm-bin directory.")
-  parser.add_argument('--creduce', dest='creduce', type=str,
-                      help="The path to the `creduce` executable. "
-                      "Required if `creduce` is not in PATH environment.")
-  parser.add_argument('--n', dest='core_number', type=int,
-                      default=max(4, multiprocessing.cpu_count() // 2),
-                      help="Number of cores to use.")
-  parser.add_argument('-v', '--verbose', action='store_true')
-  args = parser.parse_args()
-
-  verbose = args.verbose
-  llvm_bin = os.path.abspath(args.llvm_bin) if args.llvm_bin else None
-  creduce_cmd = check_cmd('creduce', None, args.creduce)
-  clang_cmd = check_cmd('clang', llvm_bin, args.clang)
-  core_number = args.core_number
-
-  crash_script = check_file(args.crash_script[0])
-  file_to_reduce = check_file(args.file_to_reduce[0])
-
-  r = Reduce(crash_script, file_to_reduce, core_number)
-
-  r.simplify_clang_args()
-  r.write_interestingness_test()
-  r.clang_preprocess()
-  r.run_creduce()
-  r.reduce_clang_args()
-
-if __name__ == '__main__':
-  main()
+    global verbose
+    global creduce_cmd
+    global clang_cmd
+
+    parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
+    parser.add_argument(
+        "crash_script",
+        type=str,
+        nargs=1,
+        help="Name of the script that generates the crash.",
+    )
+    parser.add_argument(
+        "file_to_reduce", type=str, nargs=1, help="Name of the file to be reduced."
+    )
+    parser.add_argument(
+        "--llvm-bin", dest="llvm_bin", type=str, help="Path to the LLVM bin directory."
+    )
+    parser.add_argument(
+        "--clang",
+        dest="clang",
+        type=str,
+        help="The path to the `clang` executable. "
+        "By default uses the llvm-bin directory.",
+    )
+    parser.add_argument(
+        "--creduce",
+        dest="creduce",
+        type=str,
+        help="The path to the `creduce` executable. "
+        "Required if `creduce` is not in PATH environment.",
+    )
+    parser.add_argument(
+        "--n",
+        dest="core_number",
+        type=int,
+        default=max(4, multiprocessing.cpu_count() // 2),
+        help="Number of cores to use.",
+    )
+    parser.add_argument("-v", "--verbose", action="store_true")
+    args = parser.parse_args()
+
+    verbose = args.verbose
+    llvm_bin = os.path.abspath(args.llvm_bin) if args.llvm_bin else None
+    creduce_cmd = check_cmd("creduce", None, args.creduce)
+    clang_cmd = check_cmd("clang", llvm_bin, args.clang)
+    core_number = args.core_number
+
+    crash_script = check_file(args.crash_script[0])
+    file_to_reduce = check_file(args.file_to_reduce[0])
+
+    r = Reduce(crash_script, file_to_reduce, core_number)
+
+    r.simplify_clang_args()
+    r.write_interestingness_test()
+    r.clang_preprocess()
+    r.run_creduce()
+    r.reduce_clang_args()
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/clang/utils/modfuzz.py b/clang/utils/modfuzz.py
index 84707f471ada1..49ac522cd17e1 100644
--- a/clang/utils/modfuzz.py
+++ b/clang/utils/modfuzz.py
@@ -13,155 +13,181 @@
 clang = sys.argv[1]
 none_opts = 0.3
 
+
 class Decl(object):
-  def __init__(self, text, depends=[], provides=[], conflicts=[]):
-    self.text = text
-    self.depends = depends
-    self.provides = provides
-    self.conflicts = conflicts
-
-  def valid(self, model):
-    for i in self.depends:
-      if i not in model.decls:
-        return False
-    for i in self.conflicts:
-      if i in model.decls:
-        return False
-    return True
-
-  def apply(self, model, name):
-    for i in self.provides:
-      model.decls[i] = True
-    model.source += self.text % {'name': name}
+    def __init__(self, text, depends=[], provides=[], conflicts=[]):
+        self.text = text
+        self.depends = depends
+        self.provides = provides
+        self.conflicts = conflicts
+
+    def valid(self, model):
+        for i in self.depends:
+            if i not in model.decls:
+                return False
+        for i in self.conflicts:
+            if i in model.decls:
+                return False
+        return True
+
+    def apply(self, model, name):
+        for i in self.provides:
+            model.decls[i] = True
+        model.source += self.text % {"name": name}
+
 
 decls = [
-  Decl('struct X { int n; };\n', provides=['X'], conflicts=['X']),
-  Decl('static_assert(X{.n=1}.n == 1, "");\n', depends=['X']),
-  Decl('X %(name)s;\n', depends=['X']),
+    Decl("struct X { int n; };\n", provides=["X"], conflicts=["X"]),
+    Decl('static_assert(X{.n=1}.n == 1, "");\n', depends=["X"]),
+    Decl("X %(name)s;\n", depends=["X"]),
 ]
 
+
 class FS(object):
-  def __init__(self):
-    self.fs = {}
-    self.prevfs = {}
+    def __init__(self):
+        self.fs = {}
+        self.prevfs = {}
 
-  def write(self, path, contents):
-    self.fs[path] = contents
+    def write(self, path, contents):
+        self.fs[path] = contents
 
-  def done(self):
-    for f, s in self.fs.items():
-      if self.prevfs.get(f) != s:
-        f = file(f, 'w')
-        f.write(s)
-        f.close()
+    def done(self):
+        for f, s in self.fs.items():
+            if self.prevfs.get(f) != s:
+                f = file(f, "w")
+                f.write(s)
+                f.close()
 
-    for f in self.prevfs:
-      if f not in self.fs:
-        os.remove(f)
+        for f in self.prevfs:
+            if f not in self.fs:
+                os.remove(f)
 
-    self.prevfs, self.fs = self.fs, {}
+        self.prevfs, self.fs = self.fs, {}
 
-fs = FS()
 
-class CodeModel(object):
-  def __init__(self):
-    self.source = ''
-    self.modules = {}
-    self.decls = {}
-    self.i = 0
+fs = FS()
 
-  def make_name(self):
-    self.i += 1
-    return 'n' + str(self.i)
 
-  def fails(self):
-    fs.write('module.modulemap',
-          ''.join('module %s { header "%s.h" export * }\n' % (m, m)
-                  for m in self.modules.keys()))
+class CodeModel(object):
+    def __init__(self):
+        self.source = ""
+        self.modules = {}
+        self.decls = {}
+        self.i = 0
+
+    def make_name(self):
+        self.i += 1
+        return "n" + str(self.i)
+
+    def fails(self):
+        fs.write(
+            "module.modulemap",
+            "".join(
+                'module %s { header "%s.h" export * }\n' % (m, m)
+                for m in self.modules.keys()
+            ),
+        )
+
+        for m, (s, _) in self.modules.items():
+            fs.write("%s.h" % m, s)
+
+        fs.write("main.cc", self.source)
+        fs.done()
+
+        return (
+            subprocess.call(
+                [clang, "-std=c++11", "-c", "-fmodules", "main.cc", "-o", "/dev/null"]
+            )
+            != 0
+        )
 
-    for m, (s, _) in self.modules.items():
-      fs.write('%s.h' % m, s)
 
-    fs.write('main.cc', self.source)
-    fs.done()
+def generate():
+    model = CodeModel()
+    m = []
+
+    try:
+        for d in mutations(model):
+            d(model)
+            m.append(d)
+        if not model.fails():
+            return
+    except KeyboardInterrupt:
+        print()
+        return True
+
+    sys.stdout.write("\nReducing:\n")
+    sys.stdout.flush()
+
+    try:
+        while True:
+            assert m, "got a failure with no steps; broken clang binary?"
+            i = random.choice(list(range(len(m))))
+            x = m[0:i] + m[i + 1 :]
+            m2 = CodeModel()
+            for d in x:
+                d(m2)
+            if m2.fails():
+                m = x
+                model = m2
+            else:
+                sys.stdout.write(".")
+                sys.stdout.flush()
+    except KeyboardInterrupt:
+        # FIXME: Clean out output directory first.
+        model.fails()
+        return model
 
-    return subprocess.call([clang, '-std=c++11', '-c', '-fmodules', 'main.cc', '-o', '/dev/null']) != 0
 
-def generate():
-  model = CodeModel()
-  m = []
-
-  try:
-    for d in mutations(model):
-      d(model)
-      m.append(d)
-    if not model.fails():
-      return
-  except KeyboardInterrupt:
-    print()
-    return True
-
-  sys.stdout.write('\nReducing:\n')
-  sys.stdout.flush()
-
-  try:
+def choose(options):
     while True:
-      assert m, 'got a failure with no steps; broken clang binary?'
-      i = random.choice(list(range(len(m))))
-      x = m[0:i] + m[i+1:]
-      m2 = CodeModel()
-      for d in x:
-        d(m2)
-      if m2.fails():
-        m = x
-        model = m2
-      else:
-        sys.stdout.write('.')
-        sys.stdout.flush()
-  except KeyboardInterrupt:
-    # FIXME: Clean out output directory first.
-    model.fails()
-    return model
+        i = int(random.uniform(0, len(options) + none_opts))
+        if i >= len(options):
+            break
+        yield options[i]
 
-def choose(options):
-  while True:
-    i = int(random.uniform(0, len(options) + none_opts))
-    if i >= len(options):
-      break
-    yield options[i]
 
 def mutations(model):
-  options = [create_module, add_top_level_decl]
-  for opt in choose(options):
-    yield opt(model, options)
+    options = [create_module, add_top_level_decl]
+    for opt in choose(options):
+        yield opt(model, options)
+
 
 def create_module(model, options):
-  n = model.make_name()
-  def go(model):
-    model.modules[n] = (model.source, model.decls)
-    (model.source, model.decls) = ('', {})
-  options += [lambda model, options: add_import(model, options, n)]
-  return go
+    n = model.make_name()
+
+    def go(model):
+        model.modules[n] = (model.source, model.decls)
+        (model.source, model.decls) = ("", {})
+
+    options += [lambda model, options: add_import(model, options, n)]
+    return go
+
 
 def add_top_level_decl(model, options):
-  n = model.make_name()
-  d = random.choice([decl for decl in decls if decl.valid(model)])
-  def go(model):
-    if not d.valid(model):
-      return
-    d.apply(model, n)
-  return go
+    n = model.make_name()
+    d = random.choice([decl for decl in decls if decl.valid(model)])
+
+    def go(model):
+        if not d.valid(model):
+            return
+        d.apply(model, n)
+
+    return go
+
 
 def add_import(model, options, module_name):
-  def go(model):
-    if module_name in model.modules:
-      model.source += '#include "%s.h"\n' % module_name
-      model.decls.update(model.modules[module_name][1])
-  return go
+    def go(model):
+        if module_name in model.modules:
+            model.source += '#include "%s.h"\n' % module_name
+            model.decls.update(model.modules[module_name][1])
+
+    return go
+
 
-sys.stdout.write('Finding bug: ')
+sys.stdout.write("Finding bug: ")
 while True:
-  if generate():
-    break
-  sys.stdout.write('.')
-  sys.stdout.flush()
+    if generate():
+        break
+    sys.stdout.write(".")
+    sys.stdout.flush()

diff  --git a/clang/utils/module-deps-to-rsp.py b/clang/utils/module-deps-to-rsp.py
index e017917af61f3..6c9f263a786ef 100755
--- a/clang/utils/module-deps-to-rsp.py
+++ b/clang/utils/module-deps-to-rsp.py
@@ -14,60 +14,75 @@
 import json
 import sys
 
+
 class ModuleNotFoundError(Exception):
-  def __init__(self, module_name):
-    self.module_name = module_name
+    def __init__(self, module_name):
+        self.module_name = module_name
+
 
 class FullDeps:
-  def __init__(self):
-    self.modules = {}
-    self.translation_units = []
+    def __init__(self):
+        self.modules = {}
+        self.translation_units = []
+
 
 def findModule(module_name, full_deps):
-  for m in full_deps.modules.values():
-    if m['name'] == module_name:
-      return m
-  raise ModuleNotFoundError(module_name)
+    for m in full_deps.modules.values():
+        if m["name"] == module_name:
+            return m
+    raise ModuleNotFoundError(module_name)
+
 
 def parseFullDeps(json):
-  ret = FullDeps()
-  for m in json['modules']:
-    ret.modules[m['name'] + '-' + m['context-hash']] = m
-  ret.translation_units = json['translation-units']
-  return ret
+    ret = FullDeps()
+    for m in json["modules"]:
+        ret.modules[m["name"] + "-" + m["context-hash"]] = m
+    ret.translation_units = json["translation-units"]
+    return ret
+
 
 def quote(str):
-  return '"' + str.replace("\\", "\\\\") + '"'
+    return '"' + str.replace("\\", "\\\\") + '"'
+
 
 def main():
-  parser = argparse.ArgumentParser()
-  parser.add_argument("full_deps_file", help="Path to the full dependencies json file",
-                      type=str)
-  action = parser.add_mutually_exclusive_group(required=True)
-  action.add_argument("--module-name", help="The name of the module to get arguments for",
-                      type=str)
-  action.add_argument("--tu-index", help="The index of the translation unit to get arguments for",
-                      type=int)
-  parser.add_argument("--tu-cmd-index",
-                      help="The index of the command within the translation unit (default=0)",
-                      type=int, default=0)
-  args = parser.parse_args()
-
-  full_deps = parseFullDeps(json.load(open(args.full_deps_file, 'r')))
-
-  try:
-    cmd = []
-
-    if args.module_name:
-      cmd = findModule(args.module_name, full_deps)['command-line']
-    elif args.tu_index != None:
-      tu = full_deps.translation_units[args.tu_index]
-      cmd = tu['commands'][args.tu_cmd_index]['command-line']
-
-    print(" ".join(map(quote, cmd)))
-  except:
-    print("Unexpected error:", sys.exc_info()[0])
-    raise
-
-if __name__ == '__main__':
-  main()
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "full_deps_file", help="Path to the full dependencies json file", type=str
+    )
+    action = parser.add_mutually_exclusive_group(required=True)
+    action.add_argument(
+        "--module-name", help="The name of the module to get arguments for", type=str
+    )
+    action.add_argument(
+        "--tu-index",
+        help="The index of the translation unit to get arguments for",
+        type=int,
+    )
+    parser.add_argument(
+        "--tu-cmd-index",
+        help="The index of the command within the translation unit (default=0)",
+        type=int,
+        default=0,
+    )
+    args = parser.parse_args()
+
+    full_deps = parseFullDeps(json.load(open(args.full_deps_file, "r")))
+
+    try:
+        cmd = []
+
+        if args.module_name:
+            cmd = findModule(args.module_name, full_deps)["command-line"]
+        elif args.tu_index != None:
+            tu = full_deps.translation_units[args.tu_index]
+            cmd = tu["commands"][args.tu_cmd_index]["command-line"]
+
+        print(" ".join(map(quote, cmd)))
+    except:
+        print("Unexpected error:", sys.exc_info()[0])
+        raise
+
+
+if __name__ == "__main__":
+    main()

diff  --git a/clang/utils/perf-training/perf-helper.py b/clang/utils/perf-training/perf-helper.py
index f28eb0b731dd3..99d6a3333b6ef 100644
--- a/clang/utils/perf-training/perf-helper.py
+++ b/clang/utils/perf-training/perf-helper.py
@@ -1,10 +1,10 @@
-#===- perf-helper.py - Clang Python Bindings -----------------*- python -*--===#
+# ===- perf-helper.py - Clang Python Bindings -----------------*- python -*--===#
 #
 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 # See https://llvm.org/LICENSE.txt for license information.
 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 #
-#===------------------------------------------------------------------------===#
+# ===------------------------------------------------------------------------===#
 
 from __future__ import absolute_import, division, print_function
 
@@ -17,404 +17,503 @@
 import shlex
 import tempfile
 
-test_env = { 'PATH'    : os.environ['PATH'] }
+test_env = {"PATH": os.environ["PATH"]}
+
 
 def findFilesWithExtension(path, extension):
-  filenames = []
-  for root, dirs, files in os.walk(path):
-    for filename in files:
-      if filename.endswith(f".{extension}"):
-        filenames.append(os.path.join(root, filename))
-  return filenames
+    filenames = []
+    for root, dirs, files in os.walk(path):
+        for filename in files:
+            if filename.endswith(f".{extension}"):
+                filenames.append(os.path.join(root, filename))
+    return filenames
+
 
 def clean(args):
-  if len(args) != 2:
-    print('Usage: %s clean <path> <extension>\n' % __file__ +
-      '\tRemoves all files with extension from <path>.')
-    return 1
-  for filename in findFilesWithExtension(args[0], args[1]):
-    os.remove(filename)
-  return 0
+    if len(args) != 2:
+        print(
+            "Usage: %s clean <path> <extension>\n" % __file__
+            + "\tRemoves all files with extension from <path>."
+        )
+        return 1
+    for filename in findFilesWithExtension(args[0], args[1]):
+        os.remove(filename)
+    return 0
+
 
 def merge(args):
-  if len(args) != 3:
-    print('Usage: %s merge <llvm-profdata> <output> <path>\n' % __file__ +
-      '\tMerges all profraw files from path into output.')
-    return 1
-  cmd = [args[0], 'merge', '-o', args[1]]
-  cmd.extend(findFilesWithExtension(args[2], "profraw"))
-  subprocess.check_call(cmd)
-  return 0
+    if len(args) != 3:
+        print(
+            "Usage: %s merge <llvm-profdata> <output> <path>\n" % __file__
+            + "\tMerges all profraw files from path into output."
+        )
+        return 1
+    cmd = [args[0], "merge", "-o", args[1]]
+    cmd.extend(findFilesWithExtension(args[2], "profraw"))
+    subprocess.check_call(cmd)
+    return 0
+
 
 def merge_fdata(args):
-  if len(args) != 3:
-    print('Usage: %s merge-fdata <merge-fdata> <output> <path>\n' % __file__ +
-      '\tMerges all fdata files from path into output.')
-    return 1
-  cmd = [args[0], '-o', args[1]]
-  cmd.extend(findFilesWithExtension(args[2], "fdata"))
-  subprocess.check_call(cmd)
-  return 0
+    if len(args) != 3:
+        print(
+            "Usage: %s merge-fdata <merge-fdata> <output> <path>\n" % __file__
+            + "\tMerges all fdata files from path into output."
+        )
+        return 1
+    cmd = [args[0], "-o", args[1]]
+    cmd.extend(findFilesWithExtension(args[2], "fdata"))
+    subprocess.check_call(cmd)
+    return 0
+
 
 def dtrace(args):
-  parser = argparse.ArgumentParser(prog='perf-helper dtrace',
-    description='dtrace wrapper for order file generation')
-  parser.add_argument('--buffer-size', metavar='size', type=int, required=False,
-    default=1, help='dtrace buffer size in MB (default 1)')
-  parser.add_argument('--use-oneshot', required=False, action='store_true',
-    help='Use dtrace\'s oneshot probes')
-  parser.add_argument('--use-ustack', required=False, action='store_true',
-    help='Use dtrace\'s ustack to print function names')
-  parser.add_argument('--cc1', required=False, action='store_true',
-    help='Execute cc1 directly (don\'t profile the driver)')
-  parser.add_argument('cmd', nargs='*', help='')
-
-  # Use python's arg parser to handle all leading option arguments, but pass
-  # everything else through to dtrace
-  first_cmd = next(arg for arg in args if not arg.startswith("--"))
-  last_arg_idx = args.index(first_cmd)
-
-  opts = parser.parse_args(args[:last_arg_idx])
-  cmd = args[last_arg_idx:]
-
-  if opts.cc1:
-    cmd = get_cc1_command_for_args(cmd, test_env)
-
-  if opts.use_oneshot:
-      target = "oneshot$target:::entry"
-  else:
-      target = "pid$target:::entry"
-  predicate = '%s/probemod=="%s"/' % (target, os.path.basename(cmd[0]))
-  log_timestamp = 'printf("dtrace-TS: %d\\n", timestamp)'
-  if opts.use_ustack:
-      action = 'ustack(1);'
-  else:
-      action = 'printf("dtrace-Symbol: %s\\n", probefunc);'
-  dtrace_script = "%s { %s; %s }" % (predicate, log_timestamp, action)
-
-  dtrace_args = []
-  if not os.geteuid() == 0:
-    print(
-      'Script must be run as root, or you must add the following to your sudoers:'
-      + '%%admin ALL=(ALL) NOPASSWD: /usr/sbin/dtrace')
-    dtrace_args.append("sudo")
-
-  dtrace_args.extend((
-      'dtrace', '-xevaltime=exec',
-      '-xbufsize=%dm' % (opts.buffer_size),
-      '-q', '-n', dtrace_script,
-      '-c', ' '.join(cmd)))
-
-  if sys.platform == "darwin":
-    dtrace_args.append('-xmangled')
-
-  start_time = time.time()
-
-  with open("%d.dtrace" % os.getpid(), "w") as f:
-    f.write("### Command: %s" % dtrace_args)
-    subprocess.check_call(dtrace_args, stdout=f, stderr=subprocess.PIPE)
-
-  elapsed = time.time() - start_time
-  print("... data collection took %.4fs" % elapsed)
-
-  return 0
+    parser = argparse.ArgumentParser(
+        prog="perf-helper dtrace",
+        description="dtrace wrapper for order file generation",
+    )
+    parser.add_argument(
+        "--buffer-size",
+        metavar="size",
+        type=int,
+        required=False,
+        default=1,
+        help="dtrace buffer size in MB (default 1)",
+    )
+    parser.add_argument(
+        "--use-oneshot",
+        required=False,
+        action="store_true",
+        help="Use dtrace's oneshot probes",
+    )
+    parser.add_argument(
+        "--use-ustack",
+        required=False,
+        action="store_true",
+        help="Use dtrace's ustack to print function names",
+    )
+    parser.add_argument(
+        "--cc1",
+        required=False,
+        action="store_true",
+        help="Execute cc1 directly (don't profile the driver)",
+    )
+    parser.add_argument("cmd", nargs="*", help="")
+
+    # Use python's arg parser to handle all leading option arguments, but pass
+    # everything else through to dtrace
+    first_cmd = next(arg for arg in args if not arg.startswith("--"))
+    last_arg_idx = args.index(first_cmd)
+
+    opts = parser.parse_args(args[:last_arg_idx])
+    cmd = args[last_arg_idx:]
+
+    if opts.cc1:
+        cmd = get_cc1_command_for_args(cmd, test_env)
+
+    if opts.use_oneshot:
+        target = "oneshot$target:::entry"
+    else:
+        target = "pid$target:::entry"
+    predicate = '%s/probemod=="%s"/' % (target, os.path.basename(cmd[0]))
+    log_timestamp = 'printf("dtrace-TS: %d\\n", timestamp)'
+    if opts.use_ustack:
+        action = "ustack(1);"
+    else:
+        action = 'printf("dtrace-Symbol: %s\\n", probefunc);'
+    dtrace_script = "%s { %s; %s }" % (predicate, log_timestamp, action)
+
+    dtrace_args = []
+    if not os.geteuid() == 0:
+        print(
+            "Script must be run as root, or you must add the following to your sudoers:"
+            + "%%admin ALL=(ALL) NOPASSWD: /usr/sbin/dtrace"
+        )
+        dtrace_args.append("sudo")
+
+    dtrace_args.extend(
+        (
+            "dtrace",
+            "-xevaltime=exec",
+            "-xbufsize=%dm" % (opts.buffer_size),
+            "-q",
+            "-n",
+            dtrace_script,
+            "-c",
+            " ".join(cmd),
+        )
+    )
+
+    if sys.platform == "darwin":
+        dtrace_args.append("-xmangled")
+
+    start_time = time.time()
+
+    with open("%d.dtrace" % os.getpid(), "w") as f:
+        f.write("### Command: %s" % dtrace_args)
+        subprocess.check_call(dtrace_args, stdout=f, stderr=subprocess.PIPE)
+
+    elapsed = time.time() - start_time
+    print("... data collection took %.4fs" % elapsed)
+
+    return 0
+
 
 def get_cc1_command_for_args(cmd, env):
-  # Find the cc1 command used by the compiler. To do this we execute the
-  # compiler with '-###' to figure out what it wants to do.
-  cmd = cmd + ['-###']
-  cc_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env, universal_newlines=True).strip()
-  cc_commands = []
-  for ln in cc_output.split('\n'):
-      # Filter out known garbage.
-      if (ln == 'Using built-in specs.' or
-          ln.startswith('Configured with:') or
-          ln.startswith('Target:') or
-          ln.startswith('Thread model:') or
-          ln.startswith('InstalledDir:') or
-          ln.startswith('LLVM Profile Note') or
-          ln.startswith(' (in-process)') or
-          ' version ' in ln):
-          continue
-      cc_commands.append(ln)
-
-  if len(cc_commands) != 1:
-      print('Fatal error: unable to determine cc1 command: %r' % cc_output)
-      exit(1)
-
-  cc1_cmd = shlex.split(cc_commands[0])
-  if not cc1_cmd:
-      print('Fatal error: unable to determine cc1 command: %r' % cc_output)
-      exit(1)
-
-  return cc1_cmd
+    # Find the cc1 command used by the compiler. To do this we execute the
+    # compiler with '-###' to figure out what it wants to do.
+    cmd = cmd + ["-###"]
+    cc_output = subprocess.check_output(
+        cmd, stderr=subprocess.STDOUT, env=env, universal_newlines=True
+    ).strip()
+    cc_commands = []
+    for ln in cc_output.split("\n"):
+        # Filter out known garbage.
+        if (
+            ln == "Using built-in specs."
+            or ln.startswith("Configured with:")
+            or ln.startswith("Target:")
+            or ln.startswith("Thread model:")
+            or ln.startswith("InstalledDir:")
+            or ln.startswith("LLVM Profile Note")
+            or ln.startswith(" (in-process)")
+            or " version " in ln
+        ):
+            continue
+        cc_commands.append(ln)
+
+    if len(cc_commands) != 1:
+        print("Fatal error: unable to determine cc1 command: %r" % cc_output)
+        exit(1)
+
+    cc1_cmd = shlex.split(cc_commands[0])
+    if not cc1_cmd:
+        print("Fatal error: unable to determine cc1 command: %r" % cc_output)
+        exit(1)
+
+    return cc1_cmd
+
 
 def cc1(args):
-  parser = argparse.ArgumentParser(prog='perf-helper cc1',
-    description='cc1 wrapper for order file generation')
-  parser.add_argument('cmd', nargs='*', help='')
-
-  # Use python's arg parser to handle all leading option arguments, but pass
-  # everything else through to dtrace
-  first_cmd = next(arg for arg in args if not arg.startswith("--"))
-  last_arg_idx = args.index(first_cmd)
-
-  opts = parser.parse_args(args[:last_arg_idx])
-  cmd = args[last_arg_idx:]
-
-  # clear the profile file env, so that we don't generate profdata
-  # when capturing the cc1 command
-  cc1_env = test_env
-  cc1_env["LLVM_PROFILE_FILE"] = os.devnull
-  cc1_cmd = get_cc1_command_for_args(cmd, cc1_env)
-
-  subprocess.check_call(cc1_cmd)
-  return 0
-
-def parse_dtrace_symbol_file(path, all_symbols, all_symbols_set,
-                             missing_symbols, opts):
-  def fix_mangling(symbol):
-    if sys.platform == "darwin":
-      if symbol[0] != '_' and symbol != 'start':
-          symbol = '_' + symbol
-    return symbol
-
-  def get_symbols_with_prefix(symbol):
-    start_index = bisect.bisect_left(all_symbols, symbol)
-    for s in all_symbols[start_index:]:
-      if not s.startswith(symbol):
-        break
-      yield s
-
-  # Extract the list of symbols from the given file, which is assumed to be
-  # the output of a dtrace run logging either probefunc or ustack(1) and
-  # nothing else. The dtrace -xdemangle option needs to be used.
-  #
-  # This is particular to OS X at the moment, because of the '_' handling.
-  with open(path) as f:
-    current_timestamp = None
-    for ln in f:
-      # Drop leading and trailing whitespace.
-      ln = ln.strip()
-      if not ln.startswith("dtrace-"):
-        continue
-
-      # If this is a timestamp specifier, extract it.
-      if ln.startswith("dtrace-TS: "):
-        _,data = ln.split(': ', 1)
-        if not data.isdigit():
-          print("warning: unrecognized timestamp line %r, ignoring" % ln,
-            file=sys.stderr)
-          continue
-        current_timestamp = int(data)
-        continue
-      elif ln.startswith("dtrace-Symbol: "):
-
-        _,ln = ln.split(': ', 1)
-        if not ln:
-          continue
-
-        # If there is a '`' in the line, assume it is a ustack(1) entry in
-        # the form of <modulename>`<modulefunc>, where <modulefunc> is never
-        # truncated (but does need the mangling patched).
-        if '`' in ln:
-          yield (current_timestamp, fix_mangling(ln.split('`',1)[1]))
-          continue
-
-        # Otherwise, assume this is a probefunc printout. DTrace on OS X
-        # seems to have a bug where it prints the mangled version of symbols
-        # which aren't C++ mangled. We just add a '_' to anything but start
-        # which doesn't already have a '_'.
-        symbol = fix_mangling(ln)
-
-        # If we don't know all the symbols, or the symbol is one of them,
-        # just return it.
-        if not all_symbols_set or symbol in all_symbols_set:
-          yield (current_timestamp, symbol)
-          continue
-
-        # Otherwise, we have a symbol name which isn't present in the
-        # binary. We assume it is truncated, and try to extend it.
-
-        # Get all the symbols with this prefix.
-        possible_symbols = list(get_symbols_with_prefix(symbol))
-        if not possible_symbols:
-          continue
-
-        # If we found too many possible symbols, ignore this as a prefix.
-        if len(possible_symbols) > 100:
-          print( "warning: ignoring symbol %r " % symbol +
-            "(no match and too many possible suffixes)", file=sys.stderr)
-          continue
-
-        # Report that we resolved a missing symbol.
-        if opts.show_missing_symbols and symbol not in missing_symbols:
-          print("warning: resolved missing symbol %r" % symbol, file=sys.stderr)
-          missing_symbols.add(symbol)
-
-        # Otherwise, treat all the possible matches as having occurred. This
-        # is an over-approximation, but it should be ok in practice.
-        for s in possible_symbols:
-          yield (current_timestamp, s)
+    parser = argparse.ArgumentParser(
+        prog="perf-helper cc1", description="cc1 wrapper for order file generation"
+    )
+    parser.add_argument("cmd", nargs="*", help="")
+
+    # Use python's arg parser to handle all leading option arguments, but pass
+    # everything else through to dtrace
+    first_cmd = next(arg for arg in args if not arg.startswith("--"))
+    last_arg_idx = args.index(first_cmd)
+
+    opts = parser.parse_args(args[:last_arg_idx])
+    cmd = args[last_arg_idx:]
+
+    # clear the profile file env, so that we don't generate profdata
+    # when capturing the cc1 command
+    cc1_env = test_env
+    cc1_env["LLVM_PROFILE_FILE"] = os.devnull
+    cc1_cmd = get_cc1_command_for_args(cmd, cc1_env)
+
+    subprocess.check_call(cc1_cmd)
+    return 0
+
+
+def parse_dtrace_symbol_file(path, all_symbols, all_symbols_set, missing_symbols, opts):
+    def fix_mangling(symbol):
+        if sys.platform == "darwin":
+            if symbol[0] != "_" and symbol != "start":
+                symbol = "_" + symbol
+        return symbol
+
+    def get_symbols_with_prefix(symbol):
+        start_index = bisect.bisect_left(all_symbols, symbol)
+        for s in all_symbols[start_index:]:
+            if not s.startswith(symbol):
+                break
+            yield s
+
+    # Extract the list of symbols from the given file, which is assumed to be
+    # the output of a dtrace run logging either probefunc or ustack(1) and
+    # nothing else. The dtrace -xdemangle option needs to be used.
+    #
+    # This is particular to OS X at the moment, because of the '_' handling.
+    with open(path) as f:
+        current_timestamp = None
+        for ln in f:
+            # Drop leading and trailing whitespace.
+            ln = ln.strip()
+            if not ln.startswith("dtrace-"):
+                continue
+
+            # If this is a timestamp specifier, extract it.
+            if ln.startswith("dtrace-TS: "):
+                _, data = ln.split(": ", 1)
+                if not data.isdigit():
+                    print(
+                        "warning: unrecognized timestamp line %r, ignoring" % ln,
+                        file=sys.stderr,
+                    )
+                    continue
+                current_timestamp = int(data)
+                continue
+            elif ln.startswith("dtrace-Symbol: "):
+
+                _, ln = ln.split(": ", 1)
+                if not ln:
+                    continue
+
+                # If there is a '`' in the line, assume it is a ustack(1) entry in
+                # the form of <modulename>`<modulefunc>, where <modulefunc> is never
+                # truncated (but does need the mangling patched).
+                if "`" in ln:
+                    yield (current_timestamp, fix_mangling(ln.split("`", 1)[1]))
+                    continue
+
+                # Otherwise, assume this is a probefunc printout. DTrace on OS X
+                # seems to have a bug where it prints the mangled version of symbols
+                # which aren't C++ mangled. We just add a '_' to anything but start
+                # which doesn't already have a '_'.
+                symbol = fix_mangling(ln)
+
+                # If we don't know all the symbols, or the symbol is one of them,
+                # just return it.
+                if not all_symbols_set or symbol in all_symbols_set:
+                    yield (current_timestamp, symbol)
+                    continue
+
+                # Otherwise, we have a symbol name which isn't present in the
+                # binary. We assume it is truncated, and try to extend it.
+
+                # Get all the symbols with this prefix.
+                possible_symbols = list(get_symbols_with_prefix(symbol))
+                if not possible_symbols:
+                    continue
+
+                # If we found too many possible symbols, ignore this as a prefix.
+                if len(possible_symbols) > 100:
+                    print(
+                        "warning: ignoring symbol %r " % symbol
+                        + "(no match and too many possible suffixes)",
+                        file=sys.stderr,
+                    )
+                    continue
+
+                # Report that we resolved a missing symbol.
+                if opts.show_missing_symbols and symbol not in missing_symbols:
+                    print(
+                        "warning: resolved missing symbol %r" % symbol, file=sys.stderr
+                    )
+                    missing_symbols.add(symbol)
+
+                # Otherwise, treat all the possible matches as having occurred. This
+                # is an over-approximation, but it should be ok in practice.
+                for s in possible_symbols:
+                    yield (current_timestamp, s)
+
 
 def uniq(list):
-  seen = set()
-  for item in list:
-    if item not in seen:
-      yield item
-      seen.add(item)
+    seen = set()
+    for item in list:
+        if item not in seen:
+            yield item
+            seen.add(item)
+
 
 def form_by_call_order(symbol_lists):
-  # Simply strategy, just return symbols in order of occurrence, even across
-  # multiple runs.
-  return uniq(s for symbols in symbol_lists for s in symbols)
+    # Simply strategy, just return symbols in order of occurrence, even across
+    # multiple runs.
+    return uniq(s for symbols in symbol_lists for s in symbols)
+
 
 def form_by_call_order_fair(symbol_lists):
-  # More complicated strategy that tries to respect the call order across all
-  # of the test cases, instead of giving a huge preference to the first test
-  # case.
-
-  # First, uniq all the lists.
-  uniq_lists = [list(uniq(symbols)) for symbols in symbol_lists]
-
-  # Compute the successors for each list.
-  succs = {}
-  for symbols in uniq_lists:
-    for a,b in zip(symbols[:-1], symbols[1:]):
-      succs[a] = items = succs.get(a, [])
-      if b not in items:
-        items.append(b)
-
-  # Emit all the symbols, but make sure to always emit all successors from any
-  # call list whenever we see a symbol.
-  #
-  # There isn't much science here, but this sometimes works better than the
-  # more naive strategy. Then again, sometimes it doesn't so more research is
-  # probably needed.
-  return uniq(s
-    for symbols in symbol_lists
-    for node in symbols
-    for s in ([node] + succs.get(node,[])))
+    # More complicated strategy that tries to respect the call order across all
+    # of the test cases, instead of giving a huge preference to the first test
+    # case.
+
+    # First, uniq all the lists.
+    uniq_lists = [list(uniq(symbols)) for symbols in symbol_lists]
+
+    # Compute the successors for each list.
+    succs = {}
+    for symbols in uniq_lists:
+        for a, b in zip(symbols[:-1], symbols[1:]):
+            succs[a] = items = succs.get(a, [])
+            if b not in items:
+                items.append(b)
+
+    # Emit all the symbols, but make sure to always emit all successors from any
+    # call list whenever we see a symbol.
+    #
+    # There isn't much science here, but this sometimes works better than the
+    # more naive strategy. Then again, sometimes it doesn't so more research is
+    # probably needed.
+    return uniq(
+        s
+        for symbols in symbol_lists
+        for node in symbols
+        for s in ([node] + succs.get(node, []))
+    )
+
 
 def form_by_frequency(symbol_lists):
-  # Form the order file by just putting the most commonly occurring symbols
-  # first. This assumes the data files didn't use the oneshot dtrace method.
+    # Form the order file by just putting the most commonly occurring symbols
+    # first. This assumes the data files didn't use the oneshot dtrace method.
 
-  counts = {}
-  for symbols in symbol_lists:
-    for a in symbols:
-      counts[a] = counts.get(a,0) + 1
+    counts = {}
+    for symbols in symbol_lists:
+        for a in symbols:
+            counts[a] = counts.get(a, 0) + 1
+
+    by_count = list(counts.items())
+    by_count.sort(key=lambda __n: -__n[1])
+    return [s for s, n in by_count]
 
-  by_count = list(counts.items())
-  by_count.sort(key = lambda __n: -__n[1])
-  return [s for s,n in by_count]
 
 def form_by_random(symbol_lists):
-  # Randomize the symbols.
-  merged_symbols = uniq(s for symbols in symbol_lists
-                          for s in symbols)
-  random.shuffle(merged_symbols)
-  return merged_symbols
+    # Randomize the symbols.
+    merged_symbols = uniq(s for symbols in symbol_lists for s in symbols)
+    random.shuffle(merged_symbols)
+    return merged_symbols
+
 
 def form_by_alphabetical(symbol_lists):
-  # Alphabetize the symbols.
-  merged_symbols = list(set(s for symbols in symbol_lists for s in symbols))
-  merged_symbols.sort()
-  return merged_symbols
+    # Alphabetize the symbols.
+    merged_symbols = list(set(s for symbols in symbol_lists for s in symbols))
+    merged_symbols.sort()
+    return merged_symbols
+
+
+methods = dict(
+    (name[len("form_by_") :], value)
+    for name, value in locals().items()
+    if name.startswith("form_by_")
+)
 
-methods = dict((name[len("form_by_"):],value)
-  for name,value in locals().items() if name.startswith("form_by_"))
 
 def genOrderFile(args):
-  parser = argparse.ArgumentParser(
-    "%prog  [options] <dtrace data file directories>]")
-  parser.add_argument('input', nargs='+', help='')
-  parser.add_argument("--binary", metavar="PATH", type=str, dest="binary_path",
-    help="Path to the binary being ordered (for getting all symbols)",
-    default=None)
-  parser.add_argument("--output", dest="output_path",
-    help="path to output order file to write", default=None, required=True,
-    metavar="PATH")
-  parser.add_argument("--show-missing-symbols", dest="show_missing_symbols",
-    help="show symbols which are 'fixed up' to a valid name (requires --binary)",
-    action="store_true", default=None)
-  parser.add_argument("--output-unordered-symbols",
-    dest="output_unordered_symbols_path",
-    help="write a list of the unordered symbols to PATH (requires --binary)",
-    default=None, metavar="PATH")
-  parser.add_argument("--method", dest="method",
-    help="order file generation method to use", choices=list(methods.keys()),
-    default='call_order')
-  opts = parser.parse_args(args)
-
-  # If the user gave us a binary, get all the symbols in the binary by
-  # snarfing 'nm' output.
-  if opts.binary_path is not None:
-     output = subprocess.check_output(['nm', '-P', opts.binary_path], universal_newlines=True)
-     lines = output.split("\n")
-     all_symbols = [ln.split(' ',1)[0]
-                    for ln in lines
-                    if ln.strip()]
-     print("found %d symbols in binary" % len(all_symbols))
-     all_symbols.sort()
-  else:
-     all_symbols = []
-  all_symbols_set = set(all_symbols)
-
-  # Compute the list of input files.
-  input_files = []
-  for dirname in opts.input:
-    input_files.extend(findFilesWithExtension(dirname, "dtrace"))
-
-  # Load all of the input files.
-  print("loading from %d data files" % len(input_files))
-  missing_symbols = set()
-  timestamped_symbol_lists = [
-      list(parse_dtrace_symbol_file(path, all_symbols, all_symbols_set,
-                                    missing_symbols, opts))
-      for path in input_files]
-
-  # Reorder each symbol list.
-  symbol_lists = []
-  for timestamped_symbols_list in timestamped_symbol_lists:
-    timestamped_symbols_list.sort()
-    symbol_lists.append([symbol for _,symbol in timestamped_symbols_list])
-
-  # Execute the desire order file generation method.
-  method = methods.get(opts.method)
-  result = list(method(symbol_lists))
-
-  # Report to the user on what percentage of symbols are present in the order
-  # file.
-  num_ordered_symbols = len(result)
-  if all_symbols:
-    print("note: order file contains %d/%d symbols (%.2f%%)" % (
-      num_ordered_symbols, len(all_symbols),
-      100.*num_ordered_symbols/len(all_symbols)), file=sys.stderr)
-
-  if opts.output_unordered_symbols_path:
-    ordered_symbols_set = set(result)
-    with open(opts.output_unordered_symbols_path, 'w') as f:
-      f.write("\n".join(s for s in all_symbols if s not in ordered_symbols_set))
-
-  # Write the order file.
-  with open(opts.output_path, 'w') as f:
-    f.write("\n".join(result))
-    f.write("\n")
-
-  return 0
-
-commands = {'clean' : clean,
-  'merge' : merge,
-  'dtrace' : dtrace,
-  'cc1' : cc1,
-  'gen-order-file' : genOrderFile,
-  'merge-fdata' : merge_fdata,
-  }
+    parser = argparse.ArgumentParser("%prog  [options] <dtrace data file directories>]")
+    parser.add_argument("input", nargs="+", help="")
+    parser.add_argument(
+        "--binary",
+        metavar="PATH",
+        type=str,
+        dest="binary_path",
+        help="Path to the binary being ordered (for getting all symbols)",
+        default=None,
+    )
+    parser.add_argument(
+        "--output",
+        dest="output_path",
+        help="path to output order file to write",
+        default=None,
+        required=True,
+        metavar="PATH",
+    )
+    parser.add_argument(
+        "--show-missing-symbols",
+        dest="show_missing_symbols",
+        help="show symbols which are 'fixed up' to a valid name (requires --binary)",
+        action="store_true",
+        default=None,
+    )
+    parser.add_argument(
+        "--output-unordered-symbols",
+        dest="output_unordered_symbols_path",
+        help="write a list of the unordered symbols to PATH (requires --binary)",
+        default=None,
+        metavar="PATH",
+    )
+    parser.add_argument(
+        "--method",
+        dest="method",
+        help="order file generation method to use",
+        choices=list(methods.keys()),
+        default="call_order",
+    )
+    opts = parser.parse_args(args)
+
+    # If the user gave us a binary, get all the symbols in the binary by
+    # snarfing 'nm' output.
+    if opts.binary_path is not None:
+        output = subprocess.check_output(
+            ["nm", "-P", opts.binary_path], universal_newlines=True
+        )
+        lines = output.split("\n")
+        all_symbols = [ln.split(" ", 1)[0] for ln in lines if ln.strip()]
+        print("found %d symbols in binary" % len(all_symbols))
+        all_symbols.sort()
+    else:
+        all_symbols = []
+    all_symbols_set = set(all_symbols)
+
+    # Compute the list of input files.
+    input_files = []
+    for dirname in opts.input:
+        input_files.extend(findFilesWithExtension(dirname, "dtrace"))
+
+    # Load all of the input files.
+    print("loading from %d data files" % len(input_files))
+    missing_symbols = set()
+    timestamped_symbol_lists = [
+        list(
+            parse_dtrace_symbol_file(
+                path, all_symbols, all_symbols_set, missing_symbols, opts
+            )
+        )
+        for path in input_files
+    ]
+
+    # Reorder each symbol list.
+    symbol_lists = []
+    for timestamped_symbols_list in timestamped_symbol_lists:
+        timestamped_symbols_list.sort()
+        symbol_lists.append([symbol for _, symbol in timestamped_symbols_list])
+
+    # Execute the desire order file generation method.
+    method = methods.get(opts.method)
+    result = list(method(symbol_lists))
+
+    # Report to the user on what percentage of symbols are present in the order
+    # file.
+    num_ordered_symbols = len(result)
+    if all_symbols:
+        print(
+            "note: order file contains %d/%d symbols (%.2f%%)"
+            % (
+                num_ordered_symbols,
+                len(all_symbols),
+                100.0 * num_ordered_symbols / len(all_symbols),
+            ),
+            file=sys.stderr,
+        )
+
+    if opts.output_unordered_symbols_path:
+        ordered_symbols_set = set(result)
+        with open(opts.output_unordered_symbols_path, "w") as f:
+            f.write("\n".join(s for s in all_symbols if s not in ordered_symbols_set))
+
+    # Write the order file.
+    with open(opts.output_path, "w") as f:
+        f.write("\n".join(result))
+        f.write("\n")
+
+    return 0
+
+
+commands = {
+    "clean": clean,
+    "merge": merge,
+    "dtrace": dtrace,
+    "cc1": cc1,
+    "gen-order-file": genOrderFile,
+    "merge-fdata": merge_fdata,
+}
+
 
 def main():
-  f = commands[sys.argv[1]]
-  sys.exit(f(sys.argv[2:]))
+    f = commands[sys.argv[1]]
+    sys.exit(f(sys.argv[2:]))
+
 
-if __name__ == '__main__':
-  main()
+if __name__ == "__main__":
+    main()

diff  --git a/clang/utils/token-delta.py b/clang/utils/token-delta.py
index 62b4eb3c776c3..7c2375c03013f 100755
--- a/clang/utils/token-delta.py
+++ b/clang/utils/token-delta.py
@@ -9,6 +9,7 @@
 
 ###
 
+
 class DeltaAlgorithm(object):
     def __init__(self):
         self.cache = set()
@@ -38,7 +39,7 @@ def run(self, changes, force=False):
         # O(N^2) case unless user requests it.
         if not force:
             if not self.getTestResult(changes):
-                raise ValueError('Initial test passed to delta fails.')
+                raise ValueError("Initial test passed to delta fails.")
 
         # Check empty set first to quickly find poor test functions.
         if self.getTestResult(set()):
@@ -55,19 +56,19 @@ def split(self, S):
         # There are many ways to split, we could do a better job with more
         # context information (but then the API becomes grosser).
         L = list(S)
-        mid = len(L)//2
-        if mid==0:
-            return L,
+        mid = len(L) // 2
+        if mid == 0:
+            return (L,)
         else:
-            return L[:mid],L[mid:]
-    
+            return L[:mid], L[mid:]
+
     def delta(self, c, sets):
         # assert(reduce(set.union, sets, set()) == c)
 
         # If there is nothing left we can remove, we are done.
         if len(sets) <= 1:
             return c
-        
+
         # Look for a passing subset.
         res = self.search(c, sets)
         if res is not None:
@@ -77,11 +78,11 @@ def delta(self, c, sets):
         refined = sum(map(list, map(self.split, sets)), [])
         if len(refined) == len(sets):
             return c
-        
+
         return self.delta(c, refined)
 
     def search(self, c, sets):
-        for i,S in enumerate(sets):
+        for i, S in enumerate(sets):
             # If test passes on this subset alone, recurse.
             if self.getTestResult(S):
                 return self.delta(S, self.split(S))
@@ -89,71 +90,79 @@ def search(self, c, sets):
             # Otherwise if we have more than two sets, see if test
             # pases without this subset.
             if len(sets) > 2:
-                complement = sum(sets[:i] + sets[i+1:],[])
+                complement = sum(sets[:i] + sets[i + 1 :], [])
                 if self.getTestResult(complement):
-                    return self.delta(complement, sets[:i] + sets[i+1:])
+                    return self.delta(complement, sets[:i] + sets[i + 1 :])
+
 
 ###
 
+
 class Token(object):
     def __init__(self, type, data, flags, file, line, column):
-        self.type   = type
-        self.data   = data
-        self.flags  = flags
-        self.file   = file
-        self.line   = line
+        self.type = type
+        self.data = data
+        self.flags = flags
+        self.file = file
+        self.line = line
         self.column = column
-        
-kTokenRE = re.compile(r"""([a-z_]+) '(.*)'\t(.*)\tLoc=<(.*):(.*):(.*)>""",
-                      re.DOTALL | re.MULTILINE)
+
+
+kTokenRE = re.compile(
+    r"""([a-z_]+) '(.*)'\t(.*)\tLoc=<(.*):(.*):(.*)>""", re.DOTALL | re.MULTILINE
+)
+
 
 def getTokens(path):
-    p = subprocess.Popen(['clang','-dump-raw-tokens',path],
-                         stdin=subprocess.PIPE,
-                         stdout=subprocess.PIPE,
-                         stderr=subprocess.PIPE)
-    out,err = p.communicate()
+    p = subprocess.Popen(
+        ["clang", "-dump-raw-tokens", path],
+        stdin=subprocess.PIPE,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+    )
+    out, err = p.communicate()
 
     tokens = []
     collect = None
-    for ln in err.split('\n'):
+    for ln in err.split("\n"):
         # Silly programmers refuse to print in simple machine readable
         # formats. Whatever.
         if collect is None:
             collect = ln
         else:
-            collect = collect + '\n' + ln
-        if 'Loc=<' in ln and ln.endswith('>'):
-            ln,collect = collect,None
+            collect = collect + "\n" + ln
+        if "Loc=<" in ln and ln.endswith(">"):
+            ln, collect = collect, None
             tokens.append(Token(*kTokenRE.match(ln).groups()))
 
     return tokens
 
+
 ###
 
+
 class TMBDDelta(DeltaAlgorithm):
     def __init__(self, testProgram, tokenLists, log):
         def patchName(name, suffix):
-            base,ext = os.path.splitext(name)
-            return base + '.' + suffix + ext
+            base, ext = os.path.splitext(name)
+            return base + "." + suffix + ext
+
         super(TMBDDelta, self).__init__()
         self.testProgram = testProgram
         self.tokenLists = tokenLists
-        self.tempFiles = [patchName(f,'tmp')
-                            for f,_ in self.tokenLists]
-        self.targetFiles = [patchName(f,'ok')
-                            for f,_ in self.tokenLists]
+        self.tempFiles = [patchName(f, "tmp") for f, _ in self.tokenLists]
+        self.targetFiles = [patchName(f, "ok") for f, _ in self.tokenLists]
         self.log = log
         self.numTests = 0
 
     def writeFiles(self, changes, fileNames):
         assert len(fileNames) == len(self.tokenLists)
         byFile = [[] for i in self.tokenLists]
-        for i,j in changes:
+        for i, j in changes:
             byFile[i].append(j)
 
-        for i,(file,tokens) in enumerate(self.tokenLists):
-            f = open(fileNames[i],'w')
+        for i, (file, tokens) in enumerate(self.tokenLists):
+            f = open(fileNames[i], "w")
             for j in byFile[i]:
                 f.write(tokens[j])
             f.close()
@@ -166,27 +175,35 @@ def test(self, changes):
         byFile = self.writeFiles(changes, self.tempFiles)
 
         if self.log:
-            print('TEST - ', end=' ', file=sys.stderr)
+            print("TEST - ", end=" ", file=sys.stderr)
             if self.log > 1:
-                for i,(file,_) in enumerate(self.tokenLists):
+                for i, (file, _) in enumerate(self.tokenLists):
                     indices = byFile[i]
                     if i:
-                        sys.stderr.write('\n      ')
-                    sys.stderr.write('%s:%d tokens: [' % (file,len(byFile[i])))
+                        sys.stderr.write("\n      ")
+                    sys.stderr.write("%s:%d tokens: [" % (file, len(byFile[i])))
                     prev = None
                     for j in byFile[i]:
                         if prev is None or j != prev + 1:
                             if prev:
-                                sys.stderr.write('%d][' % prev)
+                                sys.stderr.write("%d][" % prev)
                             sys.stderr.write(str(j))
-                            sys.stderr.write(':')
+                            sys.stderr.write(":")
                         prev = j
                     if byFile[i]:
                         sys.stderr.write(str(byFile[i][-1]))
-                    sys.stderr.write('] ')
+                    sys.stderr.write("] ")
             else:
-                print(', '.join(['%s:%d tokens' % (file, len(byFile[i]))
-                                               for i,(file,_) in enumerate(self.tokenLists)]), end=' ', file=sys.stderr)
+                print(
+                    ", ".join(
+                        [
+                            "%s:%d tokens" % (file, len(byFile[i]))
+                            for i, (file, _) in enumerate(self.tokenLists)
+                        ]
+                    ),
+                    end=" ",
+                    file=sys.stderr,
+                )
 
         p = subprocess.Popen([self.testProgram] + self.tempFiles)
         res = p.wait() == 0
@@ -195,58 +212,72 @@ def test(self, changes):
             self.writeFiles(changes, self.targetFiles)
 
         if self.log:
-            print('=> %s' % res, file=sys.stderr)
+            print("=> %s" % res, file=sys.stderr)
         else:
             if res:
-                print('\nSUCCESS (%d tokens)' % len(changes))
-            else:                
-                sys.stderr.write('.')
+                print("\nSUCCESS (%d tokens)" % len(changes))
+            else:
+                sys.stderr.write(".")
 
         return res
 
     def run(self):
-        res = super(TMBDDelta, self).run([(i,j)
-                                          for i,(file,tokens) in enumerate(self.tokenLists)
-                                          for j in range(len(tokens))])
+        res = super(TMBDDelta, self).run(
+            [
+                (i, j)
+                for i, (file, tokens) in enumerate(self.tokenLists)
+                for j in range(len(tokens))
+            ]
+        )
         self.writeFiles(res, self.targetFiles)
         if not self.log:
             print(file=sys.stderr)
         return res
 
-def tokenBasedMultiDelta(program, files, log):            
+
+def tokenBasedMultiDelta(program, files, log):
     # Read in the lists of tokens.
-    tokenLists = [(file, [t.data for t in getTokens(file)])
-                  for file in files]
+    tokenLists = [(file, [t.data for t in getTokens(file)]) for file in files]
+
+    numTokens = sum([len(tokens) for _, tokens in tokenLists])
+    print("Delta on %s with %d tokens." % (", ".join(files), numTokens))
 
-    numTokens = sum([len(tokens) for _,tokens in tokenLists])
-    print("Delta on %s with %d tokens." % (', '.join(files), numTokens))
-    
     tbmd = TMBDDelta(program, tokenLists, log)
 
     res = tbmd.run()
 
-    print("Finished %s with %d tokens (in %d tests)." % (', '.join(tbmd.targetFiles),
-                                                         len(res),
-                                                         tbmd.numTests))
-        
+    print(
+        "Finished %s with %d tokens (in %d tests)."
+        % (", ".join(tbmd.targetFiles), len(res), tbmd.numTests)
+    )
+
+
 def main():
     from optparse import OptionParser, OptionGroup
+
     parser = OptionParser("%prog <test program> {files+}")
-    parser.add_option("", "--debug", dest="debugLevel",
-                     help="set debug level [default %default]",
-                     action="store", type=int, default=0)
+    parser.add_option(
+        "",
+        "--debug",
+        dest="debugLevel",
+        help="set debug level [default %default]",
+        action="store",
+        type=int,
+        default=0,
+    )
     (opts, args) = parser.parse_args()
 
     if len(args) <= 1:
-        parser.error('Invalid number of arguments.')
-        
-    program,files = args[0],args[1:]
+        parser.error("Invalid number of arguments.")
+
+    program, files = args[0], args[1:]
 
     md = tokenBasedMultiDelta(program, files, log=opts.debugLevel)
-        
-if __name__ == '__main__':
+
+
+if __name__ == "__main__":
     try:
         main()
     except KeyboardInterrupt:
-        print('Interrupted.', file=sys.stderr)
-        os._exit(1) # Avoid freeing our giant cache.
+        print("Interrupted.", file=sys.stderr)
+        os._exit(1)  # Avoid freeing our giant cache.

diff  --git a/clang/www/builtins.py b/clang/www/builtins.py
index f0bcf1962abb7..0c2e181a8cfa5 100755
--- a/clang/www/builtins.py
+++ b/clang/www/builtins.py
@@ -2,162 +2,171 @@
 
 import sys, fileinput
 
-err=0
+err = 0
 
 # Giant associative set of builtin->intrinsic mappings where clang doesn't
 # implement the builtin since the vector operation works by default.
 
 repl_map = {
-'__builtin_ia32_addps': '_mm_add_ps',
-'__builtin_ia32_addsd': '_mm_add_sd',
-'__builtin_ia32_addpd': '_mm_add_pd',
-'__builtin_ia32_addss': '_mm_add_ss',
-'__builtin_ia32_paddb128': '_mm_add_epi8',
-'__builtin_ia32_paddw128': '_mm_add_epi16',
-'__builtin_ia32_paddd128': '_mm_add_epi32',
-'__builtin_ia32_paddq128': '_mm_add_epi64',
-'__builtin_ia32_subps': '_mm_sub_ps',
-'__builtin_ia32_subsd': '_mm_sub_sd',
-'__builtin_ia32_subpd': '_mm_sub_pd',
-'__builtin_ia32_subss': '_mm_sub_ss',
-'__builtin_ia32_psubb128': '_mm_sub_epi8',
-'__builtin_ia32_psubw128': '_mm_sub_epi16',
-'__builtin_ia32_psubd128': '_mm_sub_epi32',
-'__builtin_ia32_psubq128': '_mm_sub_epi64',
-'__builtin_ia32_mulsd': '_mm_mul_sd',
-'__builtin_ia32_mulpd': '_mm_mul_pd',
-'__builtin_ia32_mulps': '_mm_mul_ps',
-'__builtin_ia32_mulss': '_mm_mul_ss',
-'__builtin_ia32_pmullw128': '_mm_mullo_epi16',
-'__builtin_ia32_divsd': '_mm_div_sd',
-'__builtin_ia32_divpd': '_mm_div_pd',
-'__builtin_ia32_divps': '_mm_div_ps',
-'__builtin_ia32_subss': '_mm_div_ss',
-'__builtin_ia32_andpd': '_mm_and_pd',
-'__builtin_ia32_andps': '_mm_and_ps',
-'__builtin_ia32_pand128': '_mm_and_si128',
-'__builtin_ia32_andnpd': '_mm_andnot_pd',
-'__builtin_ia32_andnps': '_mm_andnot_ps',
-'__builtin_ia32_pandn128': '_mm_andnot_si128',
-'__builtin_ia32_orpd': '_mm_or_pd',
-'__builtin_ia32_orps': '_mm_or_ps',
-'__builtin_ia32_por128': '_mm_or_si128',
-'__builtin_ia32_xorpd': '_mm_xor_pd',
-'__builtin_ia32_xorps': '_mm_xor_ps',
-'__builtin_ia32_pxor128': '_mm_xor_si128',
-'__builtin_ia32_cvtps2dq': '_mm_cvtps_epi32',
-'__builtin_ia32_cvtsd2ss': '_mm_cvtsd_ss',
-'__builtin_ia32_cvtsi2sd': '_mm_cvtsi32_sd',
-'__builtin_ia32_cvtss2sd': '_mm_cvtss_sd',
-'__builtin_ia32_cvttsd2si': '_mm_cvttsd_si32',
-'__builtin_ia32_vec_ext_v2df': '_mm_cvtsd_f64',
-'__builtin_ia32_loadhpd': '_mm_loadh_pd',
-'__builtin_ia32_loadlpd': '_mm_loadl_pd',
-'__builtin_ia32_loadlv4si': '_mm_loadl_epi64',
-'__builtin_ia32_cmpeqps': '_mm_cmpeq_ps',
-'__builtin_ia32_cmpltps': '_mm_cmplt_ps',
-'__builtin_ia32_cmpleps': '_mm_cmple_ps',
-'__builtin_ia32_cmpgtps': '_mm_cmpgt_ps',
-'__builtin_ia32_cmpgeps': '_mm_cmpge_ps',
-'__builtin_ia32_cmpunordps': '_mm_cmpunord_ps',
-'__builtin_ia32_cmpneqps': '_mm_cmpneq_ps',
-'__builtin_ia32_cmpnltps': '_mm_cmpnlt_ps',
-'__builtin_ia32_cmpnleps': '_mm_cmpnle_ps',
-'__builtin_ia32_cmpngtps': '_mm_cmpngt_ps',
-'__builtin_ia32_cmpordps': '_mm_cmpord_ps',
-'__builtin_ia32_cmpeqss': '_mm_cmpeq_ss',
-'__builtin_ia32_cmpltss': '_mm_cmplt_ss',
-'__builtin_ia32_cmpless': '_mm_cmple_ss',
-'__builtin_ia32_cmpunordss': '_mm_cmpunord_ss',
-'__builtin_ia32_cmpneqss': '_mm_cmpneq_ss',
-'__builtin_ia32_cmpnltss': '_mm_cmpnlt_ss',
-'__builtin_ia32_cmpnless': '_mm_cmpnle_ss',
-'__builtin_ia32_cmpngtss': '_mm_cmpngt_ss',
-'__builtin_ia32_cmpngess': '_mm_cmpnge_ss',
-'__builtin_ia32_cmpordss': '_mm_cmpord_ss',
-'__builtin_ia32_movss': '_mm_move_ss',
-'__builtin_ia32_movsd': '_mm_move_sd',
-'__builtin_ia32_movhlps': '_mm_movehl_ps',
-'__builtin_ia32_movlhps': '_mm_movelh_ps',
-'__builtin_ia32_movqv4si': '_mm_move_epi64',
-'__builtin_ia32_unpckhps': '_mm_unpackhi_ps',
-'__builtin_ia32_unpckhpd': '_mm_unpackhi_pd',
-'__builtin_ia32_punpckhbw128': '_mm_unpackhi_epi8',
-'__builtin_ia32_punpckhwd128': '_mm_unpackhi_epi16',
-'__builtin_ia32_punpckhdq128': '_mm_unpackhi_epi32',
-'__builtin_ia32_punpckhqdq128': '_mm_unpackhi_epi64',
-'__builtin_ia32_unpcklps': '_mm_unpacklo_ps',
-'__builtin_ia32_unpcklpd': '_mm_unpacklo_pd',
-'__builtin_ia32_punpcklbw128': '_mm_unpacklo_epi8',
-'__builtin_ia32_punpcklwd128': '_mm_unpacklo_epi16',
-'__builtin_ia32_punpckldq128': '_mm_unpacklo_epi32',
-'__builtin_ia32_punpcklqdq128': '_mm_unpacklo_epi64',
-'__builtin_ia32_cmpeqpd': '_mm_cmpeq_pd',
-'__builtin_ia32_cmpltpd': '_mm_cmplt_pd',
-'__builtin_ia32_cmplepd': '_mm_cmple_pd',
-'__builtin_ia32_cmpgtpd': '_mm_cmpgt_pd',
-'__builtin_ia32_cmpgepd': '_mm_cmpge_pd',
-'__builtin_ia32_cmpunordpd': '_mm_cmpunord_pd',
-'__builtin_ia32_cmpneqpd': '_mm_cmpneq_pd',
-'__builtin_ia32_cmpnltpd': '_mm_cmpnlt_pd',
-'__builtin_ia32_cmpnlepd': '_mm_cmpnle_pd',
-'__builtin_ia32_cmpngtpd': '_mm_cmpngt_pd',
-'__builtin_ia32_cmpngepd': '_mm_cmpnge_pd',
-'__builtin_ia32_cmpordpd': '_mm_cmpord_pd',
-'__builtin_ia32_cmpeqsd': '_mm_cmpeq_sd',
-'__builtin_ia32_cmpltsd': '_mm_cmplt_sd',
-'__builtin_ia32_cmplesd': '_mm_cmple_sd',
-'__builtin_ia32_cmpunordsd': '_mm_cmpunord_sd',
-'__builtin_ia32_cmpneqsd': '_mm_cmpneq_sd',
-'__builtin_ia32_cmpnltsd': '_mm_cmpnlt_sd',
-'__builtin_ia32_cmpnlesd': '_mm_cmpnle_sd',
-'__builtin_ia32_cmpordsd': '_mm_cmpord_sd',
-'__builtin_ia32_cvtsi642ss': '_mm_cvtsi64_ss',
-'__builtin_ia32_cvttss2si64': '_mm_cvtss_si64',
-'__builtin_ia32_shufps': '_mm_shuffle_ps',
-'__builtin_ia32_shufpd': '_mm_shuffle_pd',
-'__builtin_ia32_pshufhw': '_mm_shufflehi_epi16',
-'__builtin_ia32_pshuflw': '_mm_shufflelo_epi16',
-'__builtin_ia32_pshufd': '_mm_shuffle_epi32',
-'__builtin_ia32_movshdup': '_mm_movehdup_ps',
-'__builtin_ia32_movsldup': '_mm_moveldup_ps',
-'__builtin_ia32_maxps': '_mm_max_ps',
-'__builtin_ia32_pslldi128': '_mm_slli_epi32',
-'__builtin_ia32_vec_set_v16qi': '_mm_insert_epi8',
-'__builtin_ia32_vec_set_v8hi': '_mm_insert_epi16',
-'__builtin_ia32_vec_set_v4si': '_mm_insert_epi32',
-'__builtin_ia32_vec_set_v2di': '_mm_insert_epi64',
-'__builtin_ia32_vec_set_v4hi': '_mm_insert_pi16',
-'__builtin_ia32_vec_ext_v16qi': '_mm_extract_epi8',
-'__builtin_ia32_vec_ext_v8hi': '_mm_extract_epi16',
-'__builtin_ia32_vec_ext_v4si': '_mm_extract_epi32',
-'__builtin_ia32_vec_ext_v2di': '_mm_extract_epi64',
-'__builtin_ia32_vec_ext_v4hi': '_mm_extract_pi16',
-'__builtin_ia32_vec_ext_v4sf': '_mm_extract_ps'
+    "__builtin_ia32_addps": "_mm_add_ps",
+    "__builtin_ia32_addsd": "_mm_add_sd",
+    "__builtin_ia32_addpd": "_mm_add_pd",
+    "__builtin_ia32_addss": "_mm_add_ss",
+    "__builtin_ia32_paddb128": "_mm_add_epi8",
+    "__builtin_ia32_paddw128": "_mm_add_epi16",
+    "__builtin_ia32_paddd128": "_mm_add_epi32",
+    "__builtin_ia32_paddq128": "_mm_add_epi64",
+    "__builtin_ia32_subps": "_mm_sub_ps",
+    "__builtin_ia32_subsd": "_mm_sub_sd",
+    "__builtin_ia32_subpd": "_mm_sub_pd",
+    "__builtin_ia32_subss": "_mm_sub_ss",
+    "__builtin_ia32_psubb128": "_mm_sub_epi8",
+    "__builtin_ia32_psubw128": "_mm_sub_epi16",
+    "__builtin_ia32_psubd128": "_mm_sub_epi32",
+    "__builtin_ia32_psubq128": "_mm_sub_epi64",
+    "__builtin_ia32_mulsd": "_mm_mul_sd",
+    "__builtin_ia32_mulpd": "_mm_mul_pd",
+    "__builtin_ia32_mulps": "_mm_mul_ps",
+    "__builtin_ia32_mulss": "_mm_mul_ss",
+    "__builtin_ia32_pmullw128": "_mm_mullo_epi16",
+    "__builtin_ia32_divsd": "_mm_div_sd",
+    "__builtin_ia32_divpd": "_mm_div_pd",
+    "__builtin_ia32_divps": "_mm_div_ps",
+    "__builtin_ia32_subss": "_mm_div_ss",
+    "__builtin_ia32_andpd": "_mm_and_pd",
+    "__builtin_ia32_andps": "_mm_and_ps",
+    "__builtin_ia32_pand128": "_mm_and_si128",
+    "__builtin_ia32_andnpd": "_mm_andnot_pd",
+    "__builtin_ia32_andnps": "_mm_andnot_ps",
+    "__builtin_ia32_pandn128": "_mm_andnot_si128",
+    "__builtin_ia32_orpd": "_mm_or_pd",
+    "__builtin_ia32_orps": "_mm_or_ps",
+    "__builtin_ia32_por128": "_mm_or_si128",
+    "__builtin_ia32_xorpd": "_mm_xor_pd",
+    "__builtin_ia32_xorps": "_mm_xor_ps",
+    "__builtin_ia32_pxor128": "_mm_xor_si128",
+    "__builtin_ia32_cvtps2dq": "_mm_cvtps_epi32",
+    "__builtin_ia32_cvtsd2ss": "_mm_cvtsd_ss",
+    "__builtin_ia32_cvtsi2sd": "_mm_cvtsi32_sd",
+    "__builtin_ia32_cvtss2sd": "_mm_cvtss_sd",
+    "__builtin_ia32_cvttsd2si": "_mm_cvttsd_si32",
+    "__builtin_ia32_vec_ext_v2df": "_mm_cvtsd_f64",
+    "__builtin_ia32_loadhpd": "_mm_loadh_pd",
+    "__builtin_ia32_loadlpd": "_mm_loadl_pd",
+    "__builtin_ia32_loadlv4si": "_mm_loadl_epi64",
+    "__builtin_ia32_cmpeqps": "_mm_cmpeq_ps",
+    "__builtin_ia32_cmpltps": "_mm_cmplt_ps",
+    "__builtin_ia32_cmpleps": "_mm_cmple_ps",
+    "__builtin_ia32_cmpgtps": "_mm_cmpgt_ps",
+    "__builtin_ia32_cmpgeps": "_mm_cmpge_ps",
+    "__builtin_ia32_cmpunordps": "_mm_cmpunord_ps",
+    "__builtin_ia32_cmpneqps": "_mm_cmpneq_ps",
+    "__builtin_ia32_cmpnltps": "_mm_cmpnlt_ps",
+    "__builtin_ia32_cmpnleps": "_mm_cmpnle_ps",
+    "__builtin_ia32_cmpngtps": "_mm_cmpngt_ps",
+    "__builtin_ia32_cmpordps": "_mm_cmpord_ps",
+    "__builtin_ia32_cmpeqss": "_mm_cmpeq_ss",
+    "__builtin_ia32_cmpltss": "_mm_cmplt_ss",
+    "__builtin_ia32_cmpless": "_mm_cmple_ss",
+    "__builtin_ia32_cmpunordss": "_mm_cmpunord_ss",
+    "__builtin_ia32_cmpneqss": "_mm_cmpneq_ss",
+    "__builtin_ia32_cmpnltss": "_mm_cmpnlt_ss",
+    "__builtin_ia32_cmpnless": "_mm_cmpnle_ss",
+    "__builtin_ia32_cmpngtss": "_mm_cmpngt_ss",
+    "__builtin_ia32_cmpngess": "_mm_cmpnge_ss",
+    "__builtin_ia32_cmpordss": "_mm_cmpord_ss",
+    "__builtin_ia32_movss": "_mm_move_ss",
+    "__builtin_ia32_movsd": "_mm_move_sd",
+    "__builtin_ia32_movhlps": "_mm_movehl_ps",
+    "__builtin_ia32_movlhps": "_mm_movelh_ps",
+    "__builtin_ia32_movqv4si": "_mm_move_epi64",
+    "__builtin_ia32_unpckhps": "_mm_unpackhi_ps",
+    "__builtin_ia32_unpckhpd": "_mm_unpackhi_pd",
+    "__builtin_ia32_punpckhbw128": "_mm_unpackhi_epi8",
+    "__builtin_ia32_punpckhwd128": "_mm_unpackhi_epi16",
+    "__builtin_ia32_punpckhdq128": "_mm_unpackhi_epi32",
+    "__builtin_ia32_punpckhqdq128": "_mm_unpackhi_epi64",
+    "__builtin_ia32_unpcklps": "_mm_unpacklo_ps",
+    "__builtin_ia32_unpcklpd": "_mm_unpacklo_pd",
+    "__builtin_ia32_punpcklbw128": "_mm_unpacklo_epi8",
+    "__builtin_ia32_punpcklwd128": "_mm_unpacklo_epi16",
+    "__builtin_ia32_punpckldq128": "_mm_unpacklo_epi32",
+    "__builtin_ia32_punpcklqdq128": "_mm_unpacklo_epi64",
+    "__builtin_ia32_cmpeqpd": "_mm_cmpeq_pd",
+    "__builtin_ia32_cmpltpd": "_mm_cmplt_pd",
+    "__builtin_ia32_cmplepd": "_mm_cmple_pd",
+    "__builtin_ia32_cmpgtpd": "_mm_cmpgt_pd",
+    "__builtin_ia32_cmpgepd": "_mm_cmpge_pd",
+    "__builtin_ia32_cmpunordpd": "_mm_cmpunord_pd",
+    "__builtin_ia32_cmpneqpd": "_mm_cmpneq_pd",
+    "__builtin_ia32_cmpnltpd": "_mm_cmpnlt_pd",
+    "__builtin_ia32_cmpnlepd": "_mm_cmpnle_pd",
+    "__builtin_ia32_cmpngtpd": "_mm_cmpngt_pd",
+    "__builtin_ia32_cmpngepd": "_mm_cmpnge_pd",
+    "__builtin_ia32_cmpordpd": "_mm_cmpord_pd",
+    "__builtin_ia32_cmpeqsd": "_mm_cmpeq_sd",
+    "__builtin_ia32_cmpltsd": "_mm_cmplt_sd",
+    "__builtin_ia32_cmplesd": "_mm_cmple_sd",
+    "__builtin_ia32_cmpunordsd": "_mm_cmpunord_sd",
+    "__builtin_ia32_cmpneqsd": "_mm_cmpneq_sd",
+    "__builtin_ia32_cmpnltsd": "_mm_cmpnlt_sd",
+    "__builtin_ia32_cmpnlesd": "_mm_cmpnle_sd",
+    "__builtin_ia32_cmpordsd": "_mm_cmpord_sd",
+    "__builtin_ia32_cvtsi642ss": "_mm_cvtsi64_ss",
+    "__builtin_ia32_cvttss2si64": "_mm_cvtss_si64",
+    "__builtin_ia32_shufps": "_mm_shuffle_ps",
+    "__builtin_ia32_shufpd": "_mm_shuffle_pd",
+    "__builtin_ia32_pshufhw": "_mm_shufflehi_epi16",
+    "__builtin_ia32_pshuflw": "_mm_shufflelo_epi16",
+    "__builtin_ia32_pshufd": "_mm_shuffle_epi32",
+    "__builtin_ia32_movshdup": "_mm_movehdup_ps",
+    "__builtin_ia32_movsldup": "_mm_moveldup_ps",
+    "__builtin_ia32_maxps": "_mm_max_ps",
+    "__builtin_ia32_pslldi128": "_mm_slli_epi32",
+    "__builtin_ia32_vec_set_v16qi": "_mm_insert_epi8",
+    "__builtin_ia32_vec_set_v8hi": "_mm_insert_epi16",
+    "__builtin_ia32_vec_set_v4si": "_mm_insert_epi32",
+    "__builtin_ia32_vec_set_v2di": "_mm_insert_epi64",
+    "__builtin_ia32_vec_set_v4hi": "_mm_insert_pi16",
+    "__builtin_ia32_vec_ext_v16qi": "_mm_extract_epi8",
+    "__builtin_ia32_vec_ext_v8hi": "_mm_extract_epi16",
+    "__builtin_ia32_vec_ext_v4si": "_mm_extract_epi32",
+    "__builtin_ia32_vec_ext_v2di": "_mm_extract_epi64",
+    "__builtin_ia32_vec_ext_v4hi": "_mm_extract_pi16",
+    "__builtin_ia32_vec_ext_v4sf": "_mm_extract_ps",
 }
 
 # Special unhandled cases:
 #   __builtin_ia32_vec_ext_*(__P, idx) -> _mm_store_sd/_mm_storeh_pd
 #     depending on index. No abstract insert/extract for these oddly.
 unhandled = [
-'__builtin_ia32_vec_ext_v2df',
-'__builtin_ia32_vec_ext_v2si',
+    "__builtin_ia32_vec_ext_v2df",
+    "__builtin_ia32_vec_ext_v2si",
 ]
 
+
 def report_repl(builtin, repl):
-  sys.stderr.write("%s:%d: x86 builtin %s used, replaced with %s\n" % (fileinput.filename(), fileinput.filelineno(), builtin, repl))
+    sys.stderr.write(
+        "%s:%d: x86 builtin %s used, replaced with %s\n"
+        % (fileinput.filename(), fileinput.filelineno(), builtin, repl)
+    )
+
 
 def report_cant(builtin):
-  sys.stderr.write("%s:%d: x86 builtin %s used, too many replacements\n" % (fileinput.filename(), fileinput.filelineno(), builtin))
+    sys.stderr.write(
+        "%s:%d: x86 builtin %s used, too many replacements\n"
+        % (fileinput.filename(), fileinput.filelineno(), builtin)
+    )
+
 
 for line in fileinput.input(inplace=1):
-  for builtin, repl in repl_map.items():
-    if builtin in line:
-      line = line.replace(builtin, repl)
-      report_repl(builtin, repl)
-  for unh in unhandled:
-    if unh in line:
-        report_cant(unh)
-  sys.stdout.write(line)
+    for builtin, repl in repl_map.items():
+        if builtin in line:
+            line = line.replace(builtin, repl)
+            report_repl(builtin, repl)
+    for unh in unhandled:
+        if unh in line:
+            report_cant(unh)
+    sys.stdout.write(line)
 
 sys.exit(err)


        


More information about the cfe-commits mailing list